mariagrandury
commited on
Commit
•
a695c54
1
Parent(s):
26ae10f
Add initial eval requests
Browse files- .gitignore +1 -0
- 01-ai/Yi-1.5-9B_eval_request_False_bfloat16_Original.json +1 -0
- HiTZ/latxa-7b-v1.2_eval_request_False_bfloat16_Original.json +1 -0
- bertin-project/bertin-gpt-j-6B_eval_request_False_float32_Original.json +1 -0
- demo-leaderboard/gpt2-demo_eval_request_False_float32_Original.json +1 -0
- google/gemma-2-2b_eval_request_False_float32_Original.json +1 -0
- gplsi/Aitana-6.3B_eval_request_False_bfloat16_Original.json +1 -0
- meta-llama/Meta-Llama-3.1-8B_eval_request_False_bfloat16_Original.json +1 -0
- microsoft/phi-1_5_eval_request_False_float16_Original.json +1 -0
- mistralai/Mistral-7B-v0.3_eval_request_False_bfloat16_Original.json +1 -0
- occiglot/occiglot-7b-es-en_eval_request_False_float32_Original.json +1 -0
- projecte-aina/FLOR-6.3B_eval_request_False_float16_Original.json +1 -0
- projecte-aina/aguila-7b_eval_request_False_float16_Original.json +1 -0
- proxectonos/Carballo-bloom-1.3B_eval_request_False_float16_Original.json +1 -0
- scripts/generate.py +72 -0
- scripts/models.csv +29 -0
- tiiuae/falcon-7b_eval_request_False_bfloat16_Original.json +1 -0
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
scripts/
|
01-ai/Yi-1.5-9B_eval_request_False_bfloat16_Original.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model": "01-ai/Yi-1.5-9B", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "FINISHED", "submitted_time": "2024-09-21T10:47:19Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 44, "params": 7.25, "license": "apache-2.0", "architecture": "LlamaForCausalLM", "sender": "mariagrandury"}
|
HiTZ/latxa-7b-v1.2_eval_request_False_bfloat16_Original.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model": "HiTZ/latxa-7b-v1.2", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "FINISHED", "submitted_time": "2024-09-20T21:47:19Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 1, "params": 7.0, "license": "llama2", "architecture": "LlamaForCausalLM", "sender": "mariagrandury"}
|
bertin-project/bertin-gpt-j-6B_eval_request_False_float32_Original.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model": "bertin-project/bertin-gpt-j-6B", "base_model": "", "revision": "main", "private": false, "precision": "float32", "weight_type": "Original", "status": "FINISHED", "submitted_time": "2024-09-21T08:47:19Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 17, "params": 6.0, "license": "apache-2.0", "architecture": "GPTJForCausalLM", "sender": "mariagrandury"}
|
demo-leaderboard/gpt2-demo_eval_request_False_float32_Original.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model": "demo-leaderboard/gpt2-demo", "base_model": "", "revision": "ac3299b02780836378b9e1e68c6eead546e89f90", "private": false, "precision": "float32", "weight_type": "Original", "status": "FINISHED", "submitted_time": "2024-09-20T21:47:19Z", "model_type": "\ud83d\udfe2 : \ud83d\udfe2 : pretrained", "likes": 0, "params": 7.0, "license": "custom", "architecture": "", "sender": "mariagrandury"}
|
google/gemma-2-2b_eval_request_False_float32_Original.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model": "google/gemma2-2b", "base_model": "", "revision": "main", "private": false, "precision": "float32", "weight_type": "Original", "status": "FINISHED", "submitted_time": "2024-09-20T21:47:19Z", "model_type": "\ud83d\udfe2 : \ud83d\udfe2 : pretrained", "likes": 342, "params": 2.61, "license": "gemma", "architecture": "Gemma2ForCausalLM", "sender": "mariagrandury"}
|
gplsi/Aitana-6.3B_eval_request_False_bfloat16_Original.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model": "gplsi/Aitana-6.3B", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "FINISHED", "submitted_time": "2024-09-21T05:19:20Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 0, "params": 6.25, "license": "apache-2.0", "architecture": "BloomForCausalLM", "sender": "mariagrandury"}
|
meta-llama/Meta-Llama-3.1-8B_eval_request_False_bfloat16_Original.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model": "meta-llama/Meta-Llama-3.1-8B", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "FINISHED", "submitted_time": "2024-09-21T09:47:19Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 877, "params": 8.03, "license": "llama3.1", "architecture": "LlamaForCausalLM", "sender": "mariagrandury"}
|
microsoft/phi-1_5_eval_request_False_float16_Original.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model": "microsoft/phi-1_5", "base_model": "", "revision": "main", "private": false, "precision": "float16", "weight_type": "Original", "status": "FINISHED", "submitted_time": "2024-09-21T12:47:19Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 1310, "params": 1.42, "license": "mit", "architecture": "PhiForCausalLM", "sender": "mariagrandury"}
|
mistralai/Mistral-7B-v0.3_eval_request_False_bfloat16_Original.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model": "mistralai/Mistral-7B-v0.3", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "FINISHED", "submitted_time": "2024-09-21T10:47:19Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 363, "params": 7.25, "license": "apache-2.0", "architecture": "MistralForCausalLM", "sender": "mariagrandury"}
|
occiglot/occiglot-7b-es-en_eval_request_False_float32_Original.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model": "occiglot/occiglot-7b-es-en", "base_model": "", "revision": "main", "private": false, "precision": "float32", "weight_type": "Original", "status": "FINISHED", "submitted_time": "2024-09-21T03:47:19Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 4, "params": 7.24, "license": "apache-2.0", "architecture": "MistralForCausalLM", "sender": "mariagrandury"}
|
projecte-aina/FLOR-6.3B_eval_request_False_float16_Original.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model": "projecte-aina/FLOR-6.3B", "base_model": "", "revision": "main", "private": false, "precision": "float16", "weight_type": "Original", "status": "FINISHED", "submitted_time": "2024-09-21T00:47:19Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 29, "params": 6.25, "license": "apache-2.0", "architecture": "BloomForCausalLM", "sender": "mariagrandury"}
|
projecte-aina/aguila-7b_eval_request_False_float16_Original.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model": "projecte-aina/aguila-7b", "base_model": "", "revision": "main", "private": false, "precision": "float16", "weight_type": "Original", "status": "FINISHED", "submitted_time": "2024-09-25T14:39:00Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 54, "params": 6.85, "license": "apache-2.0", "architecture": "RWForCausalLM", "sender": "mariagrandury"}
|
proxectonos/Carballo-bloom-1.3B_eval_request_False_float16_Original.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model": "proxectonos/Carballo-bloom-1.3B", "base_model": "", "revision": "main", "private": false, "precision": "float16", "weight_type": "Original", "status": "FINISHED", "submitted_time": "2024-09-20T22:47:19Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 5, "params": 1.31, "license": "mit", "architecture": "BloomForCausalLM", "sender": "mariagrandury"}
|
scripts/generate.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from datetime import datetime
|
4 |
+
|
5 |
+
import pandas as pd
|
6 |
+
|
7 |
+
|
8 |
+
def generate_request(model_id, precision, model_type, params, index):
|
9 |
+
data = {
|
10 |
+
"model": model_id,
|
11 |
+
"base_model": "",
|
12 |
+
"revision": "main",
|
13 |
+
"private": False,
|
14 |
+
"precision": precision,
|
15 |
+
"weight_type": "Original",
|
16 |
+
"status": "FINISHED",
|
17 |
+
"submitted_time": (datetime.now() + pd.Timedelta(hours=index)).strftime(
|
18 |
+
"%Y-%m-%dT%H:%M:%SZ"
|
19 |
+
),
|
20 |
+
"model_type": f"\ud83d\udfe2 : {model_type} if model_type == 'pretrained' else model_type",
|
21 |
+
"likes": 0,
|
22 |
+
"params": params,
|
23 |
+
"license": "custom",
|
24 |
+
"architecture": "",
|
25 |
+
"sender": "mariagrandury",
|
26 |
+
}
|
27 |
+
|
28 |
+
os.makedirs(f"{model_id}", exist_ok=True)
|
29 |
+
with open(f"{model_id}_eval_request_False_{precision}_Original.json", "w") as f:
|
30 |
+
json.dump(data, f)
|
31 |
+
|
32 |
+
|
33 |
+
def generate_requests(selection: str):
|
34 |
+
df = pd.read_csv("scripts/models.csv")
|
35 |
+
df = df[["model_id", "precision", "model_type", "params", "iberobench"]]
|
36 |
+
|
37 |
+
if selection == "pretrained":
|
38 |
+
df = df[df["model_type"] == "pretrained"]
|
39 |
+
elif selection == "pretrained_new":
|
40 |
+
df = df[df["model_type"] == "pretrained"]
|
41 |
+
df = df[df["iberobench"] == False]
|
42 |
+
elif selection == "instruction":
|
43 |
+
df = df[df["model_type"] == "instruction-tuned"]
|
44 |
+
|
45 |
+
for index, row in df.iterrows():
|
46 |
+
model_id, precision, model_type, params, iberobench = row
|
47 |
+
generate_request(
|
48 |
+
model_id=model_id,
|
49 |
+
precision=precision,
|
50 |
+
model_type=model_type,
|
51 |
+
params=params,
|
52 |
+
index=index,
|
53 |
+
)
|
54 |
+
|
55 |
+
|
56 |
+
if __name__ == "__main__":
|
57 |
+
import argparse
|
58 |
+
|
59 |
+
parser = argparse.ArgumentParser(description="Generate model requests.")
|
60 |
+
parser.add_argument("--pretrained", action="store_true")
|
61 |
+
parser.add_argument("--pretrained_new", action="store_true")
|
62 |
+
parser.add_argument("--instruction", action="store_true")
|
63 |
+
args = parser.parse_args()
|
64 |
+
|
65 |
+
if args.pretrained:
|
66 |
+
generate_requests("pretrained")
|
67 |
+
elif args.pretrained_new:
|
68 |
+
generate_requests("pretrained_new")
|
69 |
+
elif args.instruction:
|
70 |
+
generate_requests("instruction")
|
71 |
+
else:
|
72 |
+
generate_requests()
|
scripts/models.csv
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
status,model_id,model_url,iberobench,model_type,params,precision,gated,remote,avg,logs,error
|
2 |
+
Not started,meta-llama/Meta-Llama-3.1-8B-Instruct,https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct,No,instruction-tuned,8.03,bfloat16,True,,,,
|
3 |
+
Not started,mistralai/Mistral-7B-Instruct-v0.3,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3,No,instruction-tuned,7.25,bfloat16,True,,,,
|
4 |
+
Not started,google/gemma-2-2b-it,https://huggingface.co/google/gemma-2-2b-it,No,instruction-tuned,2.61,bfloat16,True,,,,"Model ""google/gemma-2-2b-it"" was not found on hub! 'gemma2’"
|
5 |
+
Not started,google/gemma-2-9b-it,https://huggingface.co/google/gemma-2-9b-it,No,instruction-tuned,9.24,bfloat16,True,,,,
|
6 |
+
In progress,google/gemma-2-2b,https://huggingface.co/google/gemma-2-2b,Yes,pretrained,2.61,float32,True,,,,
|
7 |
+
In progress,google/gemma-2-9b,https://huggingface.co/google/gemma-2-9b,Yes,pretrained,9.24,float32,True,,,,
|
8 |
+
Not started,microsoft/Phi-3.5-mini-instruct,https://huggingface.co/microsoft/Phi-3.5-mini-instruct,No,instruction-tuned,3.82,bfloat16,False,True,,,"Model ""microsoft/Phi-3.5-mini-instruct"" needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard."
|
9 |
+
Not started,microsoft/Phi-3-small-128k-instruct,https://huggingface.co/microsoft/Phi-3-small-128k-instruct,No,instruction-tuned,7.39,bfloat16,False,True,,,
|
10 |
+
Not started,tiiuae/falcon-7b-instruct,https://huggingface.co/tiiuae/falcon-7b-instruct,No,instruction-tuned,7,bfloat16,False,False,,,
|
11 |
+
Not started,01-ai/Yi-1.5-9B-Chat,https://huggingface.co/01-ai/Yi-1.5-9B-Chat,No,instruction-tuned,8.83,bfloat16,False,False,,,
|
12 |
+
Not started,internlm/internlm2_5-7b-chat,https://huggingface.co/internlm/internlm2_5-7b-chat,No,instruction-tuned,7.74,bfloat16,False,True,,,
|
13 |
+
In progress,HiTZ/latxa-7b-v1.2,https://huggingface.co/HiTZ/latxa-7b-v1.2,Yes,pretrained,7,bfloat16,False,,,,
|
14 |
+
In progress,proxectonos/Carballo-bloom-1.3B,https://huggingface.co/proxectonos/Carballo-bloom-1.3B,Yes,pretrained,1.31,float16,False,,,,"Model ""proxectonos/Carballo-bloom-1.3B"" 's tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured? unhashable type: 'dict'"
|
15 |
+
Not started,projecte-aina/aguila-7b,https://huggingface.co/projecte-aina/aguila-7b,No,pretrained,6.85,float16,False,True,,,
|
16 |
+
In progress,projecte-aina/FLOR-6.3B,https://huggingface.co/projecte-aina/FLOR-6.3B,Yes,pretrained,6.25,float16,True,,,,
|
17 |
+
Not started,projecte-aina/FLOR-6.3B-Instructed,https://huggingface.co/projecte-aina/FLOR-6.3B-Instructed,No,instruction-tuned,6.25,float16,True,,,,
|
18 |
+
Not started,gplsi/Aitana-6.3B,https://huggingface.co/gplsi/Aitana-6.3B,No,pretrained,6.25,bfloat16,False,,,,Eval error: 'ScalarNode' object is not callable
|
19 |
+
In progress,occiglot/occiglot-7b-es-en,https://huggingface.co/occiglot/occiglot-7b-es-en,Yes,pretrained,7.24,float32,False,,,,
|
20 |
+
Not started,occiglot/occiglot-7b-es-en-instruct,https://huggingface.co/occiglot/occiglot-7b-es-en-instruct,No,instruction-tuned,7.24,float32,False,,,,
|
21 |
+
Not started,LenguajeNaturalAI/leniachat-gemma-2b-v0,https://huggingface.co/LenguajeNaturalAI/leniachat-gemma-2b-v0,No,instruction-tuned,2.51,bfloat16,False,,,,"Model ""LenguajeNaturalAI/leniachat-gemma-2b-v0"" was not found on hub! 'gemma'"
|
22 |
+
Not started,LenguajeNaturalAI/leniachat-qwen2-1.5B-v0,https://huggingface.co/LenguajeNaturalAI/leniachat-qwen2-1.5B-v0,No,instruction-tuned,1.54,bfloat16,False,,,,"Model ""LenguajeNaturalAI/leniachat-qwen2-1.5B-v0"" was not found on hub! 'qwen2'"
|
23 |
+
Not started,bertin-project/Gromenauer-7B-Instruct,https://huggingface.co/bertin-project/Gromenauer-7B-Instruct,No,instruction-tuned,7.24,float32,False,,,,
|
24 |
+
Not started,bertin-project/bertin-gpt-j-6B,https://huggingface.co/bertin-project/bertin-gpt-j-6B,No,pretrained,6,float32,False,,,,
|
25 |
+
In progress,meta-llama/Meta-Llama-3.1-8B,https://huggingface.co/meta-llama/Meta-Llama-3.1-8B,Yes,pretrained,8.03,bfloat16,True,,,,
|
26 |
+
In progress,mistralai/Mistral-7B-v0.3,https://huggingface.co/mistralai/Mistral-7B-v0.3,Yes,pretrained,7.25,bfloat16,True,,,,
|
27 |
+
Not started,01-ai/Yi-1.5-9B,https://huggingface.co/01-ai/Yi-1.5-9B,No,pretrained,8.83,bfloat16,False,,,,
|
28 |
+
Not started,microsoft/phi-1_5,https://huggingface.co/microsoft/phi-1_5,No,pretrained,1.42,float16,False,,,,
|
29 |
+
Not started,tiiuae/falcon-7b,https://huggingface.co/tiiuae/falcon-7b,No,pretrained,7,bfloat16,False,,,,
|
tiiuae/falcon-7b_eval_request_False_bfloat16_Original.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model": "tiiuae/falcon-7b", "base_model": "", "revision": "main", "private": false, "precision": "bfloat16", "weight_type": "Original", "status": "FINISHED", "submitted_time": "2024-09-21T13:47:19Z", "model_type": "\ud83d\udfe2 : pretrained", "likes": 1070, "params": 7.0, "license": "apache-2.0", "architecture": "FalconForCausalLM", "sender": "mariagrandury"}
|