nicholasKluge commited on
Commit
96651f8
1 Parent(s): 2361d3d

Upload results-pt-dpo.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. results-pt-dpo.json +1303 -0
results-pt-dpo.json ADDED
@@ -0,0 +1,1303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.6778064209006744,
5
+ "f1_macro_stderr,all": 0.006813205507001969,
6
+ "acc,all": 0.6907679738562091,
7
+ "acc_stderr,all": 0.006616054688908941,
8
+ "alias": "assin2_rte"
9
+ },
10
+ "assin2_sts": {
11
+ "pearson,all": 0.08876834100904987,
12
+ "pearson_stderr,all": 0.013601135542002981,
13
+ "mse,all": 1.4245261437908499,
14
+ "mse_stderr,all": "N/A",
15
+ "alias": "assin2_sts"
16
+ },
17
+ "bluex": {
18
+ "acc,all": 0.26008344923504867,
19
+ "acc_stderr,all": 0.009452533294580755,
20
+ "acc,exam_id__USP_2023": 0.3181818181818182,
21
+ "acc_stderr,exam_id__USP_2023": 0.04052437035850368,
22
+ "acc,exam_id__UNICAMP_2021_2": 0.29411764705882354,
23
+ "acc_stderr,exam_id__UNICAMP_2021_2": 0.03678406842196161,
24
+ "acc,exam_id__USP_2021": 0.19230769230769232,
25
+ "acc_stderr,exam_id__USP_2021": 0.03147378966283576,
26
+ "acc,exam_id__UNICAMP_2019": 0.26,
27
+ "acc_stderr,exam_id__UNICAMP_2019": 0.03585259404178158,
28
+ "acc,exam_id__UNICAMP_2020": 0.32727272727272727,
29
+ "acc_stderr,exam_id__UNICAMP_2020": 0.03655630277825746,
30
+ "acc,exam_id__USP_2018": 0.18518518518518517,
31
+ "acc_stderr,exam_id__USP_2018": 0.030596318881900852,
32
+ "acc,exam_id__USP_2024": 0.17073170731707318,
33
+ "acc_stderr,exam_id__USP_2024": 0.03378335774802068,
34
+ "acc,exam_id__USP_2019": 0.225,
35
+ "acc_stderr,exam_id__USP_2019": 0.03818292580701932,
36
+ "acc,exam_id__UNICAMP_2021_1": 0.32608695652173914,
37
+ "acc_stderr,exam_id__UNICAMP_2021_1": 0.03987627273416352,
38
+ "acc,exam_id__UNICAMP_2022": 0.28205128205128205,
39
+ "acc_stderr,exam_id__UNICAMP_2022": 0.04167152779427693,
40
+ "acc,exam_id__UNICAMP_2023": 0.23255813953488372,
41
+ "acc_stderr,exam_id__UNICAMP_2023": 0.03716515848328401,
42
+ "acc,exam_id__UNICAMP_2024": 0.37777777777777777,
43
+ "acc_stderr,exam_id__UNICAMP_2024": 0.04167427031074604,
44
+ "acc,exam_id__USP_2020": 0.25,
45
+ "acc_stderr,exam_id__USP_2020": 0.03348879615541624,
46
+ "acc,exam_id__USP_2022": 0.20408163265306123,
47
+ "acc_stderr,exam_id__USP_2022": 0.03327189738781395,
48
+ "acc,exam_id__UNICAMP_2018": 0.25925925925925924,
49
+ "acc_stderr,exam_id__UNICAMP_2018": 0.03440732040215374,
50
+ "alias": "bluex"
51
+ },
52
+ "enem_challenge": {
53
+ "alias": "enem",
54
+ "acc,all": 0.2106368089573128,
55
+ "acc_stderr,all": 0.006221926886428458,
56
+ "acc,exam_id__2023": 0.1925925925925926,
57
+ "acc_stderr,exam_id__2023": 0.019642560525314663,
58
+ "acc,exam_id__2016": 0.2231404958677686,
59
+ "acc_stderr,exam_id__2016": 0.021884506550252786,
60
+ "acc,exam_id__2011": 0.20512820512820512,
61
+ "acc_stderr,exam_id__2011": 0.021535615178630927,
62
+ "acc,exam_id__2014": 0.2018348623853211,
63
+ "acc_stderr,exam_id__2014": 0.02227514909979303,
64
+ "acc,exam_id__2016_2": 0.18699186991869918,
65
+ "acc_stderr,exam_id__2016_2": 0.020236833190250755,
66
+ "acc,exam_id__2013": 0.23148148148148148,
67
+ "acc_stderr,exam_id__2013": 0.023403559519633636,
68
+ "acc,exam_id__2022": 0.19548872180451127,
69
+ "acc_stderr,exam_id__2022": 0.0197906677810766,
70
+ "acc,exam_id__2017": 0.25862068965517243,
71
+ "acc_stderr,exam_id__2017": 0.023462571285656597,
72
+ "acc,exam_id__2009": 0.19130434782608696,
73
+ "acc_stderr,exam_id__2009": 0.021192171541977615,
74
+ "acc,exam_id__2012": 0.20689655172413793,
75
+ "acc_stderr,exam_id__2012": 0.02164777225944937,
76
+ "acc,exam_id__2010": 0.18803418803418803,
77
+ "acc_stderr,exam_id__2010": 0.02084241608479345,
78
+ "acc,exam_id__2015": 0.25210084033613445,
79
+ "acc_stderr,exam_id__2015": 0.02302550703391277
80
+ },
81
+ "faquad_nli": {
82
+ "f1_macro,all": 0.4396551724137931,
83
+ "f1_macro_stderr,all": 0.0035796984729087084,
84
+ "acc,all": 0.7846153846153846,
85
+ "acc_stderr,all": 0.011396120309131327,
86
+ "alias": "faquad_nli"
87
+ },
88
+ "hatebr_offensive": {
89
+ "alias": "hatebr_offensive_binary",
90
+ "f1_macro,all": 0.3128178313648294,
91
+ "f1_macro_stderr,all": 0.004586812204783609,
92
+ "acc,all": 0.45285714285714285,
93
+ "acc_stderr,all": 0.009431973605901981
94
+ },
95
+ "oab_exams": {
96
+ "acc,all": 0.26469248291571756,
97
+ "acc_stderr,all": 0.005428752815268037,
98
+ "acc,exam_id__2013-12": 0.275,
99
+ "acc_stderr,exam_id__2013-12": 0.02884127035012301,
100
+ "acc,exam_id__2015-18": 0.2,
101
+ "acc_stderr,exam_id__2015-18": 0.025728334229438835,
102
+ "acc,exam_id__2017-24": 0.275,
103
+ "acc_stderr,exam_id__2017-24": 0.028829140552922117,
104
+ "acc,exam_id__2012-06a": 0.25,
105
+ "acc_stderr,exam_id__2012-06a": 0.027921672471627818,
106
+ "acc,exam_id__2016-19": 0.2564102564102564,
107
+ "acc_stderr,exam_id__2016-19": 0.0285751477773131,
108
+ "acc,exam_id__2011-04": 0.2625,
109
+ "acc_stderr,exam_id__2011-04": 0.028487240212834847,
110
+ "acc,exam_id__2016-20a": 0.225,
111
+ "acc_stderr,exam_id__2016-20a": 0.026968247099866234,
112
+ "acc,exam_id__2011-03": 0.25252525252525254,
113
+ "acc_stderr,exam_id__2011-03": 0.02517984462055549,
114
+ "acc,exam_id__2012-08": 0.325,
115
+ "acc_stderr,exam_id__2012-08": 0.03021485499773354,
116
+ "acc,exam_id__2012-09": 0.2987012987012987,
117
+ "acc_stderr,exam_id__2012-09": 0.030147139671855144,
118
+ "acc,exam_id__2016-20": 0.225,
119
+ "acc_stderr,exam_id__2016-20": 0.026862967483280497,
120
+ "acc,exam_id__2016-21": 0.2875,
121
+ "acc_stderr,exam_id__2016-21": 0.029172210122811244,
122
+ "acc,exam_id__2017-22": 0.2375,
123
+ "acc_stderr,exam_id__2017-22": 0.027472473430105962,
124
+ "acc,exam_id__2018-25": 0.275,
125
+ "acc_stderr,exam_id__2018-25": 0.028925570890046506,
126
+ "acc,exam_id__2013-11": 0.325,
127
+ "acc_stderr,exam_id__2013-11": 0.030308086980619833,
128
+ "acc,exam_id__2010-02": 0.25,
129
+ "acc_stderr,exam_id__2010-02": 0.025044458442253486,
130
+ "acc,exam_id__2012-07": 0.3125,
131
+ "acc_stderr,exam_id__2012-07": 0.029891600574488583,
132
+ "acc,exam_id__2015-17": 0.24358974358974358,
133
+ "acc_stderr,exam_id__2015-17": 0.02805128221433091,
134
+ "acc,exam_id__2010-01": 0.23529411764705882,
135
+ "acc_stderr,exam_id__2010-01": 0.026627603995255614,
136
+ "acc,exam_id__2014-14": 0.3125,
137
+ "acc_stderr,exam_id__2014-14": 0.02988020366791311,
138
+ "acc,exam_id__2015-16": 0.225,
139
+ "acc_stderr,exam_id__2015-16": 0.02698715637840771,
140
+ "acc,exam_id__2012-06": 0.2375,
141
+ "acc_stderr,exam_id__2012-06": 0.027469022000536025,
142
+ "acc,exam_id__2011-05": 0.25,
143
+ "acc_stderr,exam_id__2011-05": 0.027880232512179248,
144
+ "acc,exam_id__2014-15": 0.3333333333333333,
145
+ "acc_stderr,exam_id__2014-15": 0.03076413057616716,
146
+ "acc,exam_id__2013-10": 0.3125,
147
+ "acc_stderr,exam_id__2013-10": 0.029820761573406854,
148
+ "acc,exam_id__2014-13": 0.25,
149
+ "acc_stderr,exam_id__2014-13": 0.02798578403861933,
150
+ "acc,exam_id__2017-23": 0.225,
151
+ "acc_stderr,exam_id__2017-23": 0.026939295289475033,
152
+ "alias": "oab_exams"
153
+ },
154
+ "portuguese_hate_speech": {
155
+ "alias": "portuguese_hate_speech_binary",
156
+ "f1_macro,all": 0.412292817679558,
157
+ "f1_macro_stderr,all": 0.0038204811103947646,
158
+ "acc,all": 0.7015276145710928,
159
+ "acc_stderr,all": 0.011059935171649318
160
+ },
161
+ "tweetsentbr": {
162
+ "f1_macro,all": 0.22033834583581732,
163
+ "f1_macro_stderr,all": 0.003670253679085497,
164
+ "acc,all": 0.45671641791044776,
165
+ "acc_stderr,all": 0.007871122638710882,
166
+ "alias": "tweetsentbr"
167
+ }
168
+ },
169
+ "configs": {
170
+ "assin2_rte": {
171
+ "task": "assin2_rte",
172
+ "group": [
173
+ "pt_benchmark",
174
+ "assin2"
175
+ ],
176
+ "dataset_path": "assin2",
177
+ "test_split": "test",
178
+ "fewshot_split": "train",
179
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
180
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
181
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
182
+ "target_delimiter": " ",
183
+ "fewshot_delimiter": "\n\n",
184
+ "fewshot_config": {
185
+ "sampler": "id_sampler",
186
+ "sampler_config": {
187
+ "id_list": [
188
+ 1,
189
+ 3251,
190
+ 2,
191
+ 3252,
192
+ 3,
193
+ 4,
194
+ 5,
195
+ 6,
196
+ 3253,
197
+ 7,
198
+ 3254,
199
+ 3255,
200
+ 3256,
201
+ 8,
202
+ 9,
203
+ 10,
204
+ 3257,
205
+ 11,
206
+ 3258,
207
+ 12,
208
+ 13,
209
+ 14,
210
+ 15,
211
+ 3259,
212
+ 3260,
213
+ 3261,
214
+ 3262,
215
+ 3263,
216
+ 16,
217
+ 17,
218
+ 3264,
219
+ 18,
220
+ 3265,
221
+ 3266,
222
+ 3267,
223
+ 19,
224
+ 20,
225
+ 3268,
226
+ 3269,
227
+ 21,
228
+ 3270,
229
+ 3271,
230
+ 22,
231
+ 3272,
232
+ 3273,
233
+ 23,
234
+ 3274,
235
+ 24,
236
+ 25,
237
+ 3275
238
+ ],
239
+ "id_column": "sentence_pair_id"
240
+ }
241
+ },
242
+ "num_fewshot": 15,
243
+ "metric_list": [
244
+ {
245
+ "metric": "f1_macro",
246
+ "aggregation": "f1_macro",
247
+ "higher_is_better": true
248
+ },
249
+ {
250
+ "metric": "acc",
251
+ "aggregation": "acc",
252
+ "higher_is_better": true
253
+ }
254
+ ],
255
+ "output_type": "generate_until",
256
+ "generation_kwargs": {
257
+ "max_gen_toks": 32,
258
+ "do_sample": false,
259
+ "temperature": 0.0,
260
+ "top_k": null,
261
+ "top_p": null,
262
+ "until": [
263
+ "\n\n"
264
+ ]
265
+ },
266
+ "repeats": 1,
267
+ "filter_list": [
268
+ {
269
+ "name": "all",
270
+ "filter": [
271
+ {
272
+ "function": "find_similar_label",
273
+ "labels": [
274
+ "Sim",
275
+ "Não"
276
+ ]
277
+ },
278
+ {
279
+ "function": "take_first"
280
+ }
281
+ ]
282
+ }
283
+ ],
284
+ "should_decontaminate": false,
285
+ "metadata": {
286
+ "version": 1.1
287
+ }
288
+ },
289
+ "assin2_sts": {
290
+ "task": "assin2_sts",
291
+ "group": [
292
+ "pt_benchmark",
293
+ "assin2"
294
+ ],
295
+ "dataset_path": "assin2",
296
+ "test_split": "test",
297
+ "fewshot_split": "train",
298
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
299
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x1493ec0de5c0>",
300
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
301
+ "target_delimiter": " ",
302
+ "fewshot_delimiter": "\n\n",
303
+ "fewshot_config": {
304
+ "sampler": "id_sampler",
305
+ "sampler_config": {
306
+ "id_list": [
307
+ 1,
308
+ 3251,
309
+ 2,
310
+ 3252,
311
+ 3,
312
+ 4,
313
+ 5,
314
+ 6,
315
+ 3253,
316
+ 7,
317
+ 3254,
318
+ 3255,
319
+ 3256,
320
+ 8,
321
+ 9,
322
+ 10,
323
+ 3257,
324
+ 11,
325
+ 3258,
326
+ 12,
327
+ 13,
328
+ 14,
329
+ 15,
330
+ 3259,
331
+ 3260,
332
+ 3261,
333
+ 3262,
334
+ 3263,
335
+ 16,
336
+ 17,
337
+ 3264,
338
+ 18,
339
+ 3265,
340
+ 3266,
341
+ 3267,
342
+ 19,
343
+ 20,
344
+ 3268,
345
+ 3269,
346
+ 21,
347
+ 3270,
348
+ 3271,
349
+ 22,
350
+ 3272,
351
+ 3273,
352
+ 23,
353
+ 3274,
354
+ 24,
355
+ 25,
356
+ 3275
357
+ ],
358
+ "id_column": "sentence_pair_id"
359
+ }
360
+ },
361
+ "num_fewshot": 10,
362
+ "metric_list": [
363
+ {
364
+ "metric": "pearson",
365
+ "aggregation": "pearsonr",
366
+ "higher_is_better": true
367
+ },
368
+ {
369
+ "metric": "mse",
370
+ "aggregation": "mean_squared_error",
371
+ "higher_is_better": false
372
+ }
373
+ ],
374
+ "output_type": "generate_until",
375
+ "generation_kwargs": {
376
+ "max_gen_toks": 32,
377
+ "do_sample": false,
378
+ "temperature": 0.0,
379
+ "top_k": null,
380
+ "top_p": null,
381
+ "until": [
382
+ "\n\n"
383
+ ]
384
+ },
385
+ "repeats": 1,
386
+ "filter_list": [
387
+ {
388
+ "name": "all",
389
+ "filter": [
390
+ {
391
+ "function": "number_filter",
392
+ "type": "float",
393
+ "range_min": 1.0,
394
+ "range_max": 5.0,
395
+ "on_outside_range": "clip",
396
+ "fallback": 5.0
397
+ },
398
+ {
399
+ "function": "take_first"
400
+ }
401
+ ]
402
+ }
403
+ ],
404
+ "should_decontaminate": false,
405
+ "metadata": {
406
+ "version": 1.1
407
+ }
408
+ },
409
+ "bluex": {
410
+ "task": "bluex",
411
+ "group": [
412
+ "pt_benchmark",
413
+ "vestibular"
414
+ ],
415
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
416
+ "test_split": "train",
417
+ "fewshot_split": "train",
418
+ "doc_to_text": "<function enem_doc_to_text at 0x1493ec0ddb20>",
419
+ "doc_to_target": "{{answerKey}}",
420
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
421
+ "target_delimiter": " ",
422
+ "fewshot_delimiter": "\n\n",
423
+ "fewshot_config": {
424
+ "sampler": "id_sampler",
425
+ "sampler_config": {
426
+ "id_list": [
427
+ "USP_2018_3",
428
+ "UNICAMP_2018_2",
429
+ "USP_2018_35",
430
+ "UNICAMP_2018_16",
431
+ "USP_2018_89"
432
+ ],
433
+ "id_column": "id",
434
+ "exclude_from_task": true
435
+ }
436
+ },
437
+ "num_fewshot": 3,
438
+ "metric_list": [
439
+ {
440
+ "metric": "acc",
441
+ "aggregation": "acc",
442
+ "higher_is_better": true
443
+ }
444
+ ],
445
+ "output_type": "generate_until",
446
+ "generation_kwargs": {
447
+ "max_gen_toks": 32,
448
+ "do_sample": false,
449
+ "temperature": 0.0,
450
+ "top_k": null,
451
+ "top_p": null,
452
+ "until": [
453
+ "\n\n"
454
+ ]
455
+ },
456
+ "repeats": 1,
457
+ "filter_list": [
458
+ {
459
+ "name": "all",
460
+ "filter": [
461
+ {
462
+ "function": "normalize_spaces"
463
+ },
464
+ {
465
+ "function": "remove_accents"
466
+ },
467
+ {
468
+ "function": "find_choices",
469
+ "choices": [
470
+ "A",
471
+ "B",
472
+ "C",
473
+ "D",
474
+ "E"
475
+ ],
476
+ "regex_patterns": [
477
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
478
+ "\\b([ABCDE])\\.",
479
+ "\\b([ABCDE]) ?[.):-]",
480
+ "\\b([ABCDE])$",
481
+ "\\b([ABCDE])\\b"
482
+ ]
483
+ },
484
+ {
485
+ "function": "take_first"
486
+ }
487
+ ],
488
+ "group_by": {
489
+ "column": "exam_id"
490
+ }
491
+ }
492
+ ],
493
+ "should_decontaminate": true,
494
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x1493ec0dde40>",
495
+ "metadata": {
496
+ "version": 1.1
497
+ }
498
+ },
499
+ "enem_challenge": {
500
+ "task": "enem_challenge",
501
+ "task_alias": "enem",
502
+ "group": [
503
+ "pt_benchmark",
504
+ "vestibular"
505
+ ],
506
+ "dataset_path": "eduagarcia/enem_challenge",
507
+ "test_split": "train",
508
+ "fewshot_split": "train",
509
+ "doc_to_text": "<function enem_doc_to_text at 0x1493ec0de020>",
510
+ "doc_to_target": "{{answerKey}}",
511
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
512
+ "target_delimiter": " ",
513
+ "fewshot_delimiter": "\n\n",
514
+ "fewshot_config": {
515
+ "sampler": "id_sampler",
516
+ "sampler_config": {
517
+ "id_list": [
518
+ "2022_21",
519
+ "2022_88",
520
+ "2022_143"
521
+ ],
522
+ "id_column": "id",
523
+ "exclude_from_task": true
524
+ }
525
+ },
526
+ "num_fewshot": 3,
527
+ "metric_list": [
528
+ {
529
+ "metric": "acc",
530
+ "aggregation": "acc",
531
+ "higher_is_better": true
532
+ }
533
+ ],
534
+ "output_type": "generate_until",
535
+ "generation_kwargs": {
536
+ "max_gen_toks": 32,
537
+ "do_sample": false,
538
+ "temperature": 0.0,
539
+ "top_k": null,
540
+ "top_p": null,
541
+ "until": [
542
+ "\n\n"
543
+ ]
544
+ },
545
+ "repeats": 1,
546
+ "filter_list": [
547
+ {
548
+ "name": "all",
549
+ "filter": [
550
+ {
551
+ "function": "normalize_spaces"
552
+ },
553
+ {
554
+ "function": "remove_accents"
555
+ },
556
+ {
557
+ "function": "find_choices",
558
+ "choices": [
559
+ "A",
560
+ "B",
561
+ "C",
562
+ "D",
563
+ "E"
564
+ ],
565
+ "regex_patterns": [
566
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
567
+ "\\b([ABCDE])\\.",
568
+ "\\b([ABCDE]) ?[.):-]",
569
+ "\\b([ABCDE])$",
570
+ "\\b([ABCDE])\\b"
571
+ ]
572
+ },
573
+ {
574
+ "function": "take_first"
575
+ }
576
+ ],
577
+ "group_by": {
578
+ "column": "exam_id"
579
+ }
580
+ }
581
+ ],
582
+ "should_decontaminate": true,
583
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x1493ec0de200>",
584
+ "metadata": {
585
+ "version": 1.1
586
+ }
587
+ },
588
+ "faquad_nli": {
589
+ "task": "faquad_nli",
590
+ "group": [
591
+ "pt_benchmark"
592
+ ],
593
+ "dataset_path": "ruanchaves/faquad-nli",
594
+ "test_split": "test",
595
+ "fewshot_split": "train",
596
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
597
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
598
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
599
+ "target_delimiter": " ",
600
+ "fewshot_delimiter": "\n\n",
601
+ "fewshot_config": {
602
+ "sampler": "first_n",
603
+ "sampler_config": {
604
+ "fewshot_indices": [
605
+ 1893,
606
+ 949,
607
+ 663,
608
+ 105,
609
+ 1169,
610
+ 2910,
611
+ 2227,
612
+ 2813,
613
+ 974,
614
+ 558,
615
+ 1503,
616
+ 1958,
617
+ 2918,
618
+ 601,
619
+ 1560,
620
+ 984,
621
+ 2388,
622
+ 995,
623
+ 2233,
624
+ 1982,
625
+ 165,
626
+ 2788,
627
+ 1312,
628
+ 2285,
629
+ 522,
630
+ 1113,
631
+ 1670,
632
+ 323,
633
+ 236,
634
+ 1263,
635
+ 1562,
636
+ 2519,
637
+ 1049,
638
+ 432,
639
+ 1167,
640
+ 1394,
641
+ 2022,
642
+ 2551,
643
+ 2194,
644
+ 2187,
645
+ 2282,
646
+ 2816,
647
+ 108,
648
+ 301,
649
+ 1185,
650
+ 1315,
651
+ 1420,
652
+ 2436,
653
+ 2322,
654
+ 766
655
+ ]
656
+ }
657
+ },
658
+ "num_fewshot": 15,
659
+ "metric_list": [
660
+ {
661
+ "metric": "f1_macro",
662
+ "aggregation": "f1_macro",
663
+ "higher_is_better": true
664
+ },
665
+ {
666
+ "metric": "acc",
667
+ "aggregation": "acc",
668
+ "higher_is_better": true
669
+ }
670
+ ],
671
+ "output_type": "generate_until",
672
+ "generation_kwargs": {
673
+ "max_gen_toks": 32,
674
+ "do_sample": false,
675
+ "temperature": 0.0,
676
+ "top_k": null,
677
+ "top_p": null,
678
+ "until": [
679
+ "\n\n"
680
+ ]
681
+ },
682
+ "repeats": 1,
683
+ "filter_list": [
684
+ {
685
+ "name": "all",
686
+ "filter": [
687
+ {
688
+ "function": "find_similar_label",
689
+ "labels": [
690
+ "Sim",
691
+ "Não"
692
+ ]
693
+ },
694
+ {
695
+ "function": "take_first"
696
+ }
697
+ ]
698
+ }
699
+ ],
700
+ "should_decontaminate": false,
701
+ "metadata": {
702
+ "version": 1.1
703
+ }
704
+ },
705
+ "hatebr_offensive": {
706
+ "task": "hatebr_offensive",
707
+ "task_alias": "hatebr_offensive_binary",
708
+ "group": [
709
+ "pt_benchmark"
710
+ ],
711
+ "dataset_path": "eduagarcia/portuguese_benchmark",
712
+ "dataset_name": "HateBR_offensive_binary",
713
+ "test_split": "test",
714
+ "fewshot_split": "train",
715
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
716
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
717
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
718
+ "target_delimiter": " ",
719
+ "fewshot_delimiter": "\n\n",
720
+ "fewshot_config": {
721
+ "sampler": "id_sampler",
722
+ "sampler_config": {
723
+ "id_list": [
724
+ 48,
725
+ 44,
726
+ 36,
727
+ 20,
728
+ 3511,
729
+ 88,
730
+ 3555,
731
+ 16,
732
+ 56,
733
+ 3535,
734
+ 60,
735
+ 40,
736
+ 3527,
737
+ 4,
738
+ 76,
739
+ 3579,
740
+ 3523,
741
+ 3551,
742
+ 68,
743
+ 3503,
744
+ 84,
745
+ 3539,
746
+ 64,
747
+ 3599,
748
+ 80,
749
+ 3563,
750
+ 3559,
751
+ 3543,
752
+ 3547,
753
+ 3587,
754
+ 3595,
755
+ 3575,
756
+ 3567,
757
+ 3591,
758
+ 24,
759
+ 96,
760
+ 92,
761
+ 3507,
762
+ 52,
763
+ 72,
764
+ 8,
765
+ 3571,
766
+ 3515,
767
+ 3519,
768
+ 3531,
769
+ 28,
770
+ 32,
771
+ 0,
772
+ 12,
773
+ 3583
774
+ ],
775
+ "id_column": "idx"
776
+ }
777
+ },
778
+ "num_fewshot": 25,
779
+ "metric_list": [
780
+ {
781
+ "metric": "f1_macro",
782
+ "aggregation": "f1_macro",
783
+ "higher_is_better": true
784
+ },
785
+ {
786
+ "metric": "acc",
787
+ "aggregation": "acc",
788
+ "higher_is_better": true
789
+ }
790
+ ],
791
+ "output_type": "generate_until",
792
+ "generation_kwargs": {
793
+ "max_gen_toks": 32,
794
+ "do_sample": false,
795
+ "temperature": 0.0,
796
+ "top_k": null,
797
+ "top_p": null,
798
+ "until": [
799
+ "\n\n"
800
+ ]
801
+ },
802
+ "repeats": 1,
803
+ "filter_list": [
804
+ {
805
+ "name": "all",
806
+ "filter": [
807
+ {
808
+ "function": "find_similar_label",
809
+ "labels": [
810
+ "Sim",
811
+ "Não"
812
+ ]
813
+ },
814
+ {
815
+ "function": "take_first"
816
+ }
817
+ ]
818
+ }
819
+ ],
820
+ "should_decontaminate": false,
821
+ "metadata": {
822
+ "version": 1.0
823
+ }
824
+ },
825
+ "oab_exams": {
826
+ "task": "oab_exams",
827
+ "group": [
828
+ "legal_benchmark",
829
+ "pt_benchmark"
830
+ ],
831
+ "dataset_path": "eduagarcia/oab_exams",
832
+ "test_split": "train",
833
+ "fewshot_split": "train",
834
+ "doc_to_text": "<function doc_to_text at 0x1493ec0ded40>",
835
+ "doc_to_target": "{{answerKey}}",
836
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
837
+ "target_delimiter": " ",
838
+ "fewshot_delimiter": "\n\n",
839
+ "fewshot_config": {
840
+ "sampler": "id_sampler",
841
+ "sampler_config": {
842
+ "id_list": [
843
+ "2010-01_1",
844
+ "2010-01_11",
845
+ "2010-01_13",
846
+ "2010-01_23",
847
+ "2010-01_26",
848
+ "2010-01_28",
849
+ "2010-01_38",
850
+ "2010-01_48",
851
+ "2010-01_58",
852
+ "2010-01_68",
853
+ "2010-01_76",
854
+ "2010-01_83",
855
+ "2010-01_85",
856
+ "2010-01_91",
857
+ "2010-01_99"
858
+ ],
859
+ "id_column": "id",
860
+ "exclude_from_task": true
861
+ }
862
+ },
863
+ "num_fewshot": 3,
864
+ "metric_list": [
865
+ {
866
+ "metric": "acc",
867
+ "aggregation": "acc",
868
+ "higher_is_better": true
869
+ }
870
+ ],
871
+ "output_type": "generate_until",
872
+ "generation_kwargs": {
873
+ "max_gen_toks": 32,
874
+ "do_sample": false,
875
+ "temperature": 0.0,
876
+ "top_k": null,
877
+ "top_p": null,
878
+ "until": [
879
+ "\n\n"
880
+ ]
881
+ },
882
+ "repeats": 1,
883
+ "filter_list": [
884
+ {
885
+ "name": "all",
886
+ "filter": [
887
+ {
888
+ "function": "normalize_spaces"
889
+ },
890
+ {
891
+ "function": "remove_accents"
892
+ },
893
+ {
894
+ "function": "find_choices",
895
+ "choices": [
896
+ "A",
897
+ "B",
898
+ "C",
899
+ "D"
900
+ ],
901
+ "regex_patterns": [
902
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
903
+ "\\b([ABCD])\\.",
904
+ "\\b([ABCD]) ?[.):-]",
905
+ "\\b([ABCD])$",
906
+ "\\b([ABCD])\\b"
907
+ ]
908
+ },
909
+ {
910
+ "function": "take_first"
911
+ }
912
+ ],
913
+ "group_by": {
914
+ "column": "exam_id"
915
+ }
916
+ }
917
+ ],
918
+ "should_decontaminate": true,
919
+ "doc_to_decontamination_query": "<function doc_to_text at 0x1493ec0defc0>",
920
+ "metadata": {
921
+ "version": 1.5
922
+ }
923
+ },
924
+ "portuguese_hate_speech": {
925
+ "task": "portuguese_hate_speech",
926
+ "task_alias": "portuguese_hate_speech_binary",
927
+ "group": [
928
+ "pt_benchmark"
929
+ ],
930
+ "dataset_path": "eduagarcia/portuguese_benchmark",
931
+ "dataset_name": "Portuguese_Hate_Speech_binary",
932
+ "test_split": "test",
933
+ "fewshot_split": "train",
934
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
935
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
936
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
937
+ "target_delimiter": " ",
938
+ "fewshot_delimiter": "\n\n",
939
+ "fewshot_config": {
940
+ "sampler": "id_sampler",
941
+ "sampler_config": {
942
+ "id_list": [
943
+ 52,
944
+ 50,
945
+ 39,
946
+ 28,
947
+ 3,
948
+ 105,
949
+ 22,
950
+ 25,
951
+ 60,
952
+ 11,
953
+ 66,
954
+ 41,
955
+ 9,
956
+ 4,
957
+ 91,
958
+ 42,
959
+ 7,
960
+ 20,
961
+ 76,
962
+ 1,
963
+ 104,
964
+ 13,
965
+ 67,
966
+ 54,
967
+ 97,
968
+ 27,
969
+ 24,
970
+ 14,
971
+ 16,
972
+ 48,
973
+ 53,
974
+ 40,
975
+ 34,
976
+ 49,
977
+ 32,
978
+ 119,
979
+ 114,
980
+ 2,
981
+ 58,
982
+ 83,
983
+ 18,
984
+ 36,
985
+ 5,
986
+ 6,
987
+ 10,
988
+ 35,
989
+ 38,
990
+ 0,
991
+ 21,
992
+ 46
993
+ ],
994
+ "id_column": "idx"
995
+ }
996
+ },
997
+ "num_fewshot": 25,
998
+ "metric_list": [
999
+ {
1000
+ "metric": "f1_macro",
1001
+ "aggregation": "f1_macro",
1002
+ "higher_is_better": true
1003
+ },
1004
+ {
1005
+ "metric": "acc",
1006
+ "aggregation": "acc",
1007
+ "higher_is_better": true
1008
+ }
1009
+ ],
1010
+ "output_type": "generate_until",
1011
+ "generation_kwargs": {
1012
+ "max_gen_toks": 32,
1013
+ "do_sample": false,
1014
+ "temperature": 0.0,
1015
+ "top_k": null,
1016
+ "top_p": null,
1017
+ "until": [
1018
+ "\n\n"
1019
+ ]
1020
+ },
1021
+ "repeats": 1,
1022
+ "filter_list": [
1023
+ {
1024
+ "name": "all",
1025
+ "filter": [
1026
+ {
1027
+ "function": "find_similar_label",
1028
+ "labels": [
1029
+ "Sim",
1030
+ "Não"
1031
+ ]
1032
+ },
1033
+ {
1034
+ "function": "take_first"
1035
+ }
1036
+ ]
1037
+ }
1038
+ ],
1039
+ "should_decontaminate": false,
1040
+ "metadata": {
1041
+ "version": 1.0
1042
+ }
1043
+ },
1044
+ "tweetsentbr": {
1045
+ "task": "tweetsentbr",
1046
+ "group": [
1047
+ "pt_benchmark"
1048
+ ],
1049
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
1050
+ "test_split": "test",
1051
+ "fewshot_split": "train",
1052
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
1053
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
1054
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
1055
+ "target_delimiter": " ",
1056
+ "fewshot_delimiter": "\n\n",
1057
+ "fewshot_config": {
1058
+ "sampler": "first_n"
1059
+ },
1060
+ "num_fewshot": 25,
1061
+ "metric_list": [
1062
+ {
1063
+ "metric": "f1_macro",
1064
+ "aggregation": "f1_macro",
1065
+ "higher_is_better": true
1066
+ },
1067
+ {
1068
+ "metric": "acc",
1069
+ "aggregation": "acc",
1070
+ "higher_is_better": true
1071
+ }
1072
+ ],
1073
+ "output_type": "generate_until",
1074
+ "generation_kwargs": {
1075
+ "max_gen_toks": 32,
1076
+ "do_sample": false,
1077
+ "temperature": 0.0,
1078
+ "top_k": null,
1079
+ "top_p": null,
1080
+ "until": [
1081
+ "\n\n"
1082
+ ]
1083
+ },
1084
+ "repeats": 1,
1085
+ "filter_list": [
1086
+ {
1087
+ "name": "all",
1088
+ "filter": [
1089
+ {
1090
+ "function": "find_similar_label",
1091
+ "labels": [
1092
+ "Positivo",
1093
+ "Neutro",
1094
+ "Negativo"
1095
+ ]
1096
+ },
1097
+ {
1098
+ "function": "take_first"
1099
+ }
1100
+ ]
1101
+ }
1102
+ ],
1103
+ "should_decontaminate": false,
1104
+ "metadata": {
1105
+ "version": 1.0
1106
+ }
1107
+ }
1108
+ },
1109
+ "versions": {
1110
+ "assin2_rte": 1.1,
1111
+ "assin2_sts": 1.1,
1112
+ "bluex": 1.1,
1113
+ "enem_challenge": 1.1,
1114
+ "faquad_nli": 1.1,
1115
+ "hatebr_offensive": 1.0,
1116
+ "oab_exams": 1.5,
1117
+ "portuguese_hate_speech": 1.0,
1118
+ "tweetsentbr": 1.0
1119
+ },
1120
+ "n-shot": {
1121
+ "assin2_rte": 15,
1122
+ "assin2_sts": 10,
1123
+ "bluex": 3,
1124
+ "enem_challenge": 3,
1125
+ "faquad_nli": 15,
1126
+ "hatebr_offensive": 25,
1127
+ "oab_exams": 3,
1128
+ "portuguese_hate_speech": 25,
1129
+ "tweetsentbr": 25
1130
+ },
1131
+ "model_meta": {
1132
+ "truncated": 2,
1133
+ "non_truncated": 14148,
1134
+ "padded": 0,
1135
+ "non_padded": 14150,
1136
+ "fewshots_truncated": 4,
1137
+ "has_chat_template": false,
1138
+ "chat_type": null,
1139
+ "n_gpus": 1,
1140
+ "accelerate_num_process": null,
1141
+ "model_sha": "None",
1142
+ "model_dtype": "torch.float32",
1143
+ "model_memory_footprint": 4400229248,
1144
+ "model_num_parameters": 1100056576,
1145
+ "model_is_loaded_in_4bit": null,
1146
+ "model_is_loaded_in_8bit": null,
1147
+ "model_is_quantized": null,
1148
+ "model_device": "cuda:0",
1149
+ "batch_size": 32,
1150
+ "max_length": 2048,
1151
+ "max_ctx_length": 2016,
1152
+ "max_gen_toks": 32
1153
+ },
1154
+ "task_model_meta": {
1155
+ "assin2_rte": {
1156
+ "sample_size": 2448,
1157
+ "truncated": 0,
1158
+ "non_truncated": 2448,
1159
+ "padded": 0,
1160
+ "non_padded": 2448,
1161
+ "fewshots_truncated": 0,
1162
+ "mean_seq_length": 924.4232026143791,
1163
+ "min_seq_length": 909,
1164
+ "max_seq_length": 963,
1165
+ "max_ctx_length": 2016,
1166
+ "max_gen_toks": 32,
1167
+ "mean_original_fewshots_size": 15.0,
1168
+ "mean_effective_fewshot_size": 15.0
1169
+ },
1170
+ "assin2_sts": {
1171
+ "sample_size": 2448,
1172
+ "truncated": 0,
1173
+ "non_truncated": 2448,
1174
+ "padded": 0,
1175
+ "non_padded": 2448,
1176
+ "fewshots_truncated": 0,
1177
+ "mean_seq_length": 659.4232026143791,
1178
+ "min_seq_length": 644,
1179
+ "max_seq_length": 698,
1180
+ "max_ctx_length": 2016,
1181
+ "max_gen_toks": 32,
1182
+ "mean_original_fewshots_size": 10.0,
1183
+ "mean_effective_fewshot_size": 10.0
1184
+ },
1185
+ "bluex": {
1186
+ "sample_size": 719,
1187
+ "truncated": 0,
1188
+ "non_truncated": 719,
1189
+ "padded": 0,
1190
+ "non_padded": 719,
1191
+ "fewshots_truncated": 0,
1192
+ "mean_seq_length": 1170.817802503477,
1193
+ "min_seq_length": 904,
1194
+ "max_seq_length": 1801,
1195
+ "max_ctx_length": 2016,
1196
+ "max_gen_toks": 32,
1197
+ "mean_original_fewshots_size": 3.0,
1198
+ "mean_effective_fewshot_size": 3.0
1199
+ },
1200
+ "enem_challenge": {
1201
+ "sample_size": 1429,
1202
+ "truncated": 2,
1203
+ "non_truncated": 1427,
1204
+ "padded": 0,
1205
+ "non_padded": 1429,
1206
+ "fewshots_truncated": 4,
1207
+ "mean_seq_length": 1007.4177746675997,
1208
+ "min_seq_length": 829,
1209
+ "max_seq_length": 2484,
1210
+ "max_ctx_length": 2016,
1211
+ "max_gen_toks": 32,
1212
+ "mean_original_fewshots_size": 3.0,
1213
+ "mean_effective_fewshot_size": 2.9972008397480754
1214
+ },
1215
+ "faquad_nli": {
1216
+ "sample_size": 650,
1217
+ "truncated": 0,
1218
+ "non_truncated": 650,
1219
+ "padded": 0,
1220
+ "non_padded": 650,
1221
+ "fewshots_truncated": 0,
1222
+ "mean_seq_length": 968.1338461538462,
1223
+ "min_seq_length": 936,
1224
+ "max_seq_length": 1034,
1225
+ "max_ctx_length": 2016,
1226
+ "max_gen_toks": 32,
1227
+ "mean_original_fewshots_size": 15.0,
1228
+ "mean_effective_fewshot_size": 15.0
1229
+ },
1230
+ "hatebr_offensive": {
1231
+ "sample_size": 1400,
1232
+ "truncated": 0,
1233
+ "non_truncated": 1400,
1234
+ "padded": 0,
1235
+ "non_padded": 1400,
1236
+ "fewshots_truncated": 0,
1237
+ "mean_seq_length": 867.4407142857143,
1238
+ "min_seq_length": 852,
1239
+ "max_seq_length": 1061,
1240
+ "max_ctx_length": 2016,
1241
+ "max_gen_toks": 32,
1242
+ "mean_original_fewshots_size": 25.0,
1243
+ "mean_effective_fewshot_size": 25.0
1244
+ },
1245
+ "oab_exams": {
1246
+ "sample_size": 2195,
1247
+ "truncated": 0,
1248
+ "non_truncated": 2195,
1249
+ "padded": 0,
1250
+ "non_padded": 2195,
1251
+ "fewshots_truncated": 0,
1252
+ "mean_seq_length": 832.024145785877,
1253
+ "min_seq_length": 659,
1254
+ "max_seq_length": 1108,
1255
+ "max_ctx_length": 2016,
1256
+ "max_gen_toks": 32,
1257
+ "mean_original_fewshots_size": 3.0,
1258
+ "mean_effective_fewshot_size": 3.0
1259
+ },
1260
+ "portuguese_hate_speech": {
1261
+ "sample_size": 851,
1262
+ "truncated": 0,
1263
+ "non_truncated": 851,
1264
+ "padded": 0,
1265
+ "non_padded": 851,
1266
+ "fewshots_truncated": 0,
1267
+ "mean_seq_length": 1219.021151586369,
1268
+ "min_seq_length": 1192,
1269
+ "max_seq_length": 1255,
1270
+ "max_ctx_length": 2016,
1271
+ "max_gen_toks": 32,
1272
+ "mean_original_fewshots_size": 25.0,
1273
+ "mean_effective_fewshot_size": 25.0
1274
+ },
1275
+ "tweetsentbr": {
1276
+ "sample_size": 2010,
1277
+ "truncated": 0,
1278
+ "non_truncated": 2010,
1279
+ "padded": 0,
1280
+ "non_padded": 2010,
1281
+ "fewshots_truncated": 0,
1282
+ "mean_seq_length": 1154.4194029850746,
1283
+ "min_seq_length": 1137,
1284
+ "max_seq_length": 1211,
1285
+ "max_ctx_length": 2016,
1286
+ "max_gen_toks": 32,
1287
+ "mean_original_fewshots_size": 25.0,
1288
+ "mean_effective_fewshot_size": 25.0
1289
+ }
1290
+ },
1291
+ "config": {
1292
+ "model": "huggingface",
1293
+ "model_args": "pretrained=/lustre/mlnvme/data/asen_hpc-mula/Tucano-1b1-DPO",
1294
+ "batch_size": "auto",
1295
+ "batch_sizes": [],
1296
+ "device": "cuda:0",
1297
+ "use_cache": null,
1298
+ "limit": null,
1299
+ "bootstrap_iters": 100000,
1300
+ "gen_kwargs": null
1301
+ },
1302
+ "git_hash": null
1303
+ }