ybelkada DhiyaEddine commited on
Commit
8c5250f
1 Parent(s): 2a75b44

Update README.md (#3)

Browse files

- Update README.md (76441592e88f59bff4e182d507060625d99fc6b0)


Co-authored-by: Rhaiem <DhiyaEddine@users.noreply.huggingface.co>

Files changed (1) hide show
  1. README.md +19 -19
README.md CHANGED
@@ -107,7 +107,7 @@ We report in the following table our internal pipeline benchmarks:
107
  <td rowspan="3">General</td>
108
  <td>MMLU (5-shot)</td>
109
  <td>-</td>
110
- <td>-</td>
111
  <td>-</td>
112
  <td>68.5%</td>
113
  <td>-</td>
@@ -115,24 +115,24 @@ We report in the following table our internal pipeline benchmarks:
115
  <tr>
116
  <td>MMLU-PRO (5-shot)</td>
117
  <td>32.4%</td>
118
- <td>-</td>
119
  <td>-</td>
120
  <td>29.6%</td>
121
- <td>-</td>
122
  </tr>
123
  <tr>
124
  <td>IFEval</td>
125
  <td>69.9%</td>
126
- <td>-</td>
127
  <td>-</td>
128
  <td>78.6%</td>
129
- <td>-</td>
130
  </tr>
131
  <tr>
132
  <td rowspan="2">Math</td>
133
  <td>GSM8K (5-shot)</td>
134
  <td>-</td>
135
- <td>-</td>
136
  <td>-</td>
137
  <td>-</td>
138
  <td>-</td>
@@ -140,16 +140,16 @@ We report in the following table our internal pipeline benchmarks:
140
  <tr>
141
  <td>MATH(4-shot)</td>
142
  <td>-</td>
 
143
  <td>-</td>
144
  <td>-</td>
145
- <td>-</td>
146
- <td>-</td>
147
  </tr>
148
  <tr>
149
  <td rowspan="4">Reasoning</td>
150
  <td>Arc Challenge (25-shot)</td>
151
  <td>-</td>
152
- <td>-</td>
153
  <td>-</td>
154
  <td>-</td>
155
  <td>-</td>
@@ -157,32 +157,32 @@ We report in the following table our internal pipeline benchmarks:
157
  <tr>
158
  <td>GPQA (0-shot)</td>
159
  <td>10.3%</td>
160
- <td>-</td>
161
  <td>-</td>
162
  <td>2.4%</td>
163
- <td>-</td>
164
  </tr>
165
  <tr>
166
  <td>MUSR (0-shot)</td>
167
  <td>8.2%</td>
168
- <td>-</td>
169
  <td>-</td>
170
  <td>8.4%</td>
171
- <td>-</td>
172
  </tr>
173
  <tr>
174
  <td>BBH (3-shot)</td>
175
  <td>33.3%</td>
176
- <td>-</td>
177
  <td>-</td>
178
  <td>29.9%</td>
179
- <td>-</td>
180
  </tr>
181
  <tr>
182
  <td rowspan="4">CommonSense Understanding</td>
183
  <td>PIQA (0-shot)</td>
184
  <td>-</td>
185
- <td>-</td>
186
  <td>-</td>
187
  <td>-</td>
188
  <td>-</td>
@@ -190,7 +190,7 @@ We report in the following table our internal pipeline benchmarks:
190
  <tr>
191
  <td>SciQ (0-shot)</td>
192
  <td>-</td>
193
- <td>-</td>
194
  <td>-</td>
195
  <td>-</td>
196
  <td>-</td>
@@ -198,7 +198,7 @@ We report in the following table our internal pipeline benchmarks:
198
  <tr>
199
  <td>Winogrande (0-shot)</td>
200
  <td>-</td>
201
- <td>-</td>
202
  <td>-</td>
203
  <td>-</td>
204
  <td>-</td>
@@ -206,7 +206,7 @@ We report in the following table our internal pipeline benchmarks:
206
  <tr>
207
  <td>OpenbookQA (0-shot)</td>
208
  <td>-</td>
209
- <td>-</td>
210
  <td>-</td>
211
  <td>-</td>
212
  <td>-</td>
 
107
  <td rowspan="3">General</td>
108
  <td>MMLU (5-shot)</td>
109
  <td>-</td>
110
+ <td>68.7%</td>
111
  <td>-</td>
112
  <td>68.5%</td>
113
  <td>-</td>
 
115
  <tr>
116
  <td>MMLU-PRO (5-shot)</td>
117
  <td>32.4%</td>
118
+ <td>31.6%</td>
119
  <td>-</td>
120
  <td>29.6%</td>
121
+ <td>26.3%</td>
122
  </tr>
123
  <tr>
124
  <td>IFEval</td>
125
  <td>69.9%</td>
126
+ <td>65.7%</td>
127
  <td>-</td>
128
  <td>78.6%</td>
129
+ <td>71.7%</td>
130
  </tr>
131
  <tr>
132
  <td rowspan="2">Math</td>
133
  <td>GSM8K (5-shot)</td>
134
  <td>-</td>
135
+ <td>74.9%</td>
136
  <td>-</td>
137
  <td>-</td>
138
  <td>-</td>
 
140
  <tr>
141
  <td>MATH(4-shot)</td>
142
  <td>-</td>
143
+ <td>6.9%</td>
144
  <td>-</td>
145
  <td>-</td>
146
+ <td>27.3%</td>
 
147
  </tr>
148
  <tr>
149
  <td rowspan="4">Reasoning</td>
150
  <td>Arc Challenge (25-shot)</td>
151
  <td>-</td>
152
+ <td>54.3%</td>
153
  <td>-</td>
154
  <td>-</td>
155
  <td>-</td>
 
157
  <tr>
158
  <td>GPQA (0-shot)</td>
159
  <td>10.3%</td>
160
+ <td>11.1%</td>
161
  <td>-</td>
162
  <td>2.4%</td>
163
+ <td>7.2%</td>
164
  </tr>
165
  <tr>
166
  <td>MUSR (0-shot)</td>
167
  <td>8.2%</td>
168
+ <td>12.2%</td>
169
  <td>-</td>
170
  <td>8.4%</td>
171
+ <td>8.3%</td>
172
  </tr>
173
  <tr>
174
  <td>BBH (3-shot)</td>
175
  <td>33.3%</td>
176
+ <td>35.3%</td>
177
  <td>-</td>
178
  <td>29.9%</td>
179
+ <td>25.2%</td>
180
  </tr>
181
  <tr>
182
  <td rowspan="4">CommonSense Understanding</td>
183
  <td>PIQA (0-shot)</td>
184
  <td>-</td>
185
+ <td>82.3%</td>
186
  <td>-</td>
187
  <td>-</td>
188
  <td>-</td>
 
190
  <tr>
191
  <td>SciQ (0-shot)</td>
192
  <td>-</td>
193
+ <td>94.9%</td>
194
  <td>-</td>
195
  <td>-</td>
196
  <td>-</td>
 
198
  <tr>
199
  <td>Winogrande (0-shot)</td>
200
  <td>-</td>
201
+ <td>64.5%</td>
202
  <td>-</td>
203
  <td>-</td>
204
  <td>-</td>
 
206
  <tr>
207
  <td>OpenbookQA (0-shot)</td>
208
  <td>-</td>
209
+ <td>34.6%</td>
210
  <td>-</td>
211
  <td>-</td>
212
  <td>-</td>