Upload results for model google/gemma-2b-it

#50
data/google/gemma-2b-it/base/24-03-17-10:11:14.json ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "ipsum-suscipit-6540_logiqa2_base": {
4
+ "acc,none": 0.27735368956743,
5
+ "acc_stderr,none": 0.011295144105469071,
6
+ "alias": "ipsum-suscipit-6540_logiqa2_base"
7
+ },
8
+ "ipsum-suscipit-6540_logiqa_base": {
9
+ "acc,none": 0.25559105431309903,
10
+ "acc_stderr,none": 0.01744771697469749,
11
+ "alias": "ipsum-suscipit-6540_logiqa_base"
12
+ },
13
+ "ipsum-suscipit-6540_lsat-ar_base": {
14
+ "acc,none": 0.19130434782608696,
15
+ "acc_stderr,none": 0.025991852462828487,
16
+ "alias": "ipsum-suscipit-6540_lsat-ar_base"
17
+ },
18
+ "ipsum-suscipit-6540_lsat-lr_base": {
19
+ "acc,none": 0.21568627450980393,
20
+ "acc_stderr,none": 0.018230445049830818,
21
+ "alias": "ipsum-suscipit-6540_lsat-lr_base"
22
+ },
23
+ "ipsum-suscipit-6540_lsat-rc_base": {
24
+ "acc,none": 0.25650557620817843,
25
+ "acc_stderr,none": 0.02667594824667508,
26
+ "alias": "ipsum-suscipit-6540_lsat-rc_base"
27
+ }
28
+ },
29
+ "configs": {
30
+ "ipsum-suscipit-6540_logiqa2_base": {
31
+ "task": "ipsum-suscipit-6540_logiqa2_base",
32
+ "group": "logikon-bench",
33
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
34
+ "dataset_kwargs": {
35
+ "data_files": {
36
+ "test": "ipsum-suscipit-6540-logiqa2/test-00000-of-00001.parquet"
37
+ }
38
+ },
39
+ "test_split": "test",
40
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
41
+ "doc_to_target": "{{answer}}",
42
+ "doc_to_choice": "{{options}}",
43
+ "description": "",
44
+ "target_delimiter": " ",
45
+ "fewshot_delimiter": "\n\n",
46
+ "num_fewshot": 0,
47
+ "metric_list": [
48
+ {
49
+ "metric": "acc",
50
+ "aggregation": "mean",
51
+ "higher_is_better": true
52
+ }
53
+ ],
54
+ "output_type": "multiple_choice",
55
+ "repeats": 1,
56
+ "should_decontaminate": false,
57
+ "metadata": {
58
+ "version": 0.0
59
+ }
60
+ },
61
+ "ipsum-suscipit-6540_logiqa_base": {
62
+ "task": "ipsum-suscipit-6540_logiqa_base",
63
+ "group": "logikon-bench",
64
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
65
+ "dataset_kwargs": {
66
+ "data_files": {
67
+ "test": "ipsum-suscipit-6540-logiqa/test-00000-of-00001.parquet"
68
+ }
69
+ },
70
+ "test_split": "test",
71
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
72
+ "doc_to_target": "{{answer}}",
73
+ "doc_to_choice": "{{options}}",
74
+ "description": "",
75
+ "target_delimiter": " ",
76
+ "fewshot_delimiter": "\n\n",
77
+ "num_fewshot": 0,
78
+ "metric_list": [
79
+ {
80
+ "metric": "acc",
81
+ "aggregation": "mean",
82
+ "higher_is_better": true
83
+ }
84
+ ],
85
+ "output_type": "multiple_choice",
86
+ "repeats": 1,
87
+ "should_decontaminate": false,
88
+ "metadata": {
89
+ "version": 0.0
90
+ }
91
+ },
92
+ "ipsum-suscipit-6540_lsat-ar_base": {
93
+ "task": "ipsum-suscipit-6540_lsat-ar_base",
94
+ "group": "logikon-bench",
95
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
96
+ "dataset_kwargs": {
97
+ "data_files": {
98
+ "test": "ipsum-suscipit-6540-lsat-ar/test-00000-of-00001.parquet"
99
+ }
100
+ },
101
+ "test_split": "test",
102
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
103
+ "doc_to_target": "{{answer}}",
104
+ "doc_to_choice": "{{options}}",
105
+ "description": "",
106
+ "target_delimiter": " ",
107
+ "fewshot_delimiter": "\n\n",
108
+ "num_fewshot": 0,
109
+ "metric_list": [
110
+ {
111
+ "metric": "acc",
112
+ "aggregation": "mean",
113
+ "higher_is_better": true
114
+ }
115
+ ],
116
+ "output_type": "multiple_choice",
117
+ "repeats": 1,
118
+ "should_decontaminate": false,
119
+ "metadata": {
120
+ "version": 0.0
121
+ }
122
+ },
123
+ "ipsum-suscipit-6540_lsat-lr_base": {
124
+ "task": "ipsum-suscipit-6540_lsat-lr_base",
125
+ "group": "logikon-bench",
126
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
127
+ "dataset_kwargs": {
128
+ "data_files": {
129
+ "test": "ipsum-suscipit-6540-lsat-lr/test-00000-of-00001.parquet"
130
+ }
131
+ },
132
+ "test_split": "test",
133
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
134
+ "doc_to_target": "{{answer}}",
135
+ "doc_to_choice": "{{options}}",
136
+ "description": "",
137
+ "target_delimiter": " ",
138
+ "fewshot_delimiter": "\n\n",
139
+ "num_fewshot": 0,
140
+ "metric_list": [
141
+ {
142
+ "metric": "acc",
143
+ "aggregation": "mean",
144
+ "higher_is_better": true
145
+ }
146
+ ],
147
+ "output_type": "multiple_choice",
148
+ "repeats": 1,
149
+ "should_decontaminate": false,
150
+ "metadata": {
151
+ "version": 0.0
152
+ }
153
+ },
154
+ "ipsum-suscipit-6540_lsat-rc_base": {
155
+ "task": "ipsum-suscipit-6540_lsat-rc_base",
156
+ "group": "logikon-bench",
157
+ "dataset_path": "cot-leaderboard/cot-eval-traces",
158
+ "dataset_kwargs": {
159
+ "data_files": {
160
+ "test": "ipsum-suscipit-6540-lsat-rc/test-00000-of-00001.parquet"
161
+ }
162
+ },
163
+ "test_split": "test",
164
+ "doc_to_text": "def doc_to_text(doc) -> str:\n \"\"\"\n Answer the following question about the given passage.\n \n Passage: <passage>\n \n Question: <question>\n A. <choice1>\n B. <choice2>\n C. <choice3>\n D. <choice4>\n [E. <choice5>]\n \n Answer:\n \"\"\"\n k = len(doc[\"options\"])\n choices = [\"a\", \"b\", \"c\", \"d\", \"e\"][:k]\n prompt = \"Answer the following question about the given passage.\\n\\n\"\n prompt = \"Passage: \" + doc[\"passage\"] + \"\\n\\n\"\n prompt += \"Question: \" + doc[\"question\"] + \"\\n\"\n for choice, option in zip(choices, doc[\"options\"]):\n prompt += f\"{choice.upper()}. {option}\\n\"\n prompt += \"\\n\"\n prompt += \"Answer:\"\n return prompt\n",
165
+ "doc_to_target": "{{answer}}",
166
+ "doc_to_choice": "{{options}}",
167
+ "description": "",
168
+ "target_delimiter": " ",
169
+ "fewshot_delimiter": "\n\n",
170
+ "num_fewshot": 0,
171
+ "metric_list": [
172
+ {
173
+ "metric": "acc",
174
+ "aggregation": "mean",
175
+ "higher_is_better": true
176
+ }
177
+ ],
178
+ "output_type": "multiple_choice",
179
+ "repeats": 1,
180
+ "should_decontaminate": false,
181
+ "metadata": {
182
+ "version": 0.0
183
+ }
184
+ }
185
+ },
186
+ "versions": {
187
+ "ipsum-suscipit-6540_logiqa2_base": 0.0,
188
+ "ipsum-suscipit-6540_logiqa_base": 0.0,
189
+ "ipsum-suscipit-6540_lsat-ar_base": 0.0,
190
+ "ipsum-suscipit-6540_lsat-lr_base": 0.0,
191
+ "ipsum-suscipit-6540_lsat-rc_base": 0.0
192
+ },
193
+ "n-shot": {
194
+ "ipsum-suscipit-6540_logiqa2_base": 0,
195
+ "ipsum-suscipit-6540_logiqa_base": 0,
196
+ "ipsum-suscipit-6540_lsat-ar_base": 0,
197
+ "ipsum-suscipit-6540_lsat-lr_base": 0,
198
+ "ipsum-suscipit-6540_lsat-rc_base": 0
199
+ },
200
+ "config": {
201
+ "model": "vllm",
202
+ "model_args": "pretrained=google/gemma-2b-it,revision=main,dtype=bfloat16,tensor_parallel_size=1,gpu_memory_utilization=0.5,trust_remote_code=true,max_length=2048",
203
+ "batch_size": "auto",
204
+ "batch_sizes": [],
205
+ "device": null,
206
+ "use_cache": null,
207
+ "limit": null,
208
+ "bootstrap_iters": 100000,
209
+ "gen_kwargs": null
210
+ },
211
+ "git_hash": "3cf3403"
212
+ }