Fixing some errors of the leaderboard evaluation results in the ModelCard yaml

#2
Files changed (1) hide show
  1. README.md +27 -1
README.md CHANGED
@@ -77,6 +77,19 @@ model-index:
77
  - type: f1_macro
78
  value: 88.52
79
  name: f1-macro
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  - type: pearson
81
  value: 76.17
82
  name: pearson
@@ -104,7 +117,7 @@ model-index:
104
  name: Text Generation
105
  dataset:
106
  name: HateBR Binary
107
- type: eduagarcia/portuguese_benchmark
108
  split: test
109
  args:
110
  num_few_shot: 25
@@ -112,6 +125,19 @@ model-index:
112
  - type: f1_macro
113
  value: 76.32
114
  name: f1-macro
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  - type: f1_macro
116
  value: 69.69
117
  name: f1-macro
 
77
  - type: f1_macro
78
  value: 88.52
79
  name: f1-macro
80
+ source:
81
+ url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=nicolasdec/CabraQwen7b
82
+ name: Open Portuguese LLM Leaderboard
83
+ - task:
84
+ type: text-generation
85
+ name: Text Generation
86
+ dataset:
87
+ name: Assin2 STS
88
+ type: eduagarcia/portuguese_benchmark
89
+ split: test
90
+ args:
91
+ num_few_shot: 15
92
+ metrics:
93
  - type: pearson
94
  value: 76.17
95
  name: pearson
 
117
  name: Text Generation
118
  dataset:
119
  name: HateBR Binary
120
+ type: ruanchaves/hatebr
121
  split: test
122
  args:
123
  num_few_shot: 25
 
125
  - type: f1_macro
126
  value: 76.32
127
  name: f1-macro
128
+ source:
129
+ url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=nicolasdec/CabraQwen7b
130
+ name: Open Portuguese LLM Leaderboard
131
+ - task:
132
+ type: text-generation
133
+ name: Text Generation
134
+ dataset:
135
+ name: PT Hate Speech Binary
136
+ type: hate_speech_portuguese
137
+ split: test
138
+ args:
139
+ num_few_shot: 25
140
+ metrics:
141
  - type: f1_macro
142
  value: 69.69
143
  name: f1-macro