Update submissions page
Browse files- app.py +23 -23
- src/about.py +1 -5
app.py
CHANGED
@@ -164,30 +164,30 @@ with demo:
|
|
164 |
with gr.Row():
|
165 |
gr.Markdown(EVALUATION_REQUESTS_TEXT, elem_classes="markdown-text")
|
166 |
|
167 |
-
with gr.Column():
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
with gr.Row():
|
192 |
gr.Markdown("# βοΈ Submission", elem_classes="markdown-text")
|
193 |
|
|
|
164 |
with gr.Row():
|
165 |
gr.Markdown(EVALUATION_REQUESTS_TEXT, elem_classes="markdown-text")
|
166 |
|
167 |
+
# with gr.Column():
|
168 |
+
# with gr.Accordion(
|
169 |
+
# f"β
Finished ({len(finished_eval_requests_df)})",
|
170 |
+
# open=False,
|
171 |
+
# ):
|
172 |
+
# with gr.Row():
|
173 |
+
# finished_eval_table = gr.components.Dataframe(
|
174 |
+
# value=finished_eval_requests_df,
|
175 |
+
# headers=EVAL_COLS,
|
176 |
+
# datatype=EVAL_TYPES,
|
177 |
+
# row_count=5,
|
178 |
+
# )
|
179 |
|
180 |
+
# with gr.Accordion(
|
181 |
+
# f"β³ Pending ({len(pending_eval_requests_df)})",
|
182 |
+
# open=False,
|
183 |
+
# ):
|
184 |
+
# with gr.Row():
|
185 |
+
# pending_eval_table = gr.components.Dataframe(
|
186 |
+
# value=pending_eval_requests_df,
|
187 |
+
# headers=EVAL_COLS,
|
188 |
+
# datatype=EVAL_TYPES,
|
189 |
+
# row_count=5,
|
190 |
+
# )
|
191 |
with gr.Row():
|
192 |
gr.Markdown("# βοΈ Submission", elem_classes="markdown-text")
|
193 |
|
src/about.py
CHANGED
@@ -45,9 +45,8 @@ To reproduce our results, here is the commands you can run:
|
|
45 |
"""
|
46 |
|
47 |
EVALUATION_REQUESTS_TEXT = """
|
48 |
-
##
|
49 |
|
50 |
-
### 1) Make sure you can load your model and tokenizer using AutoClasses:
|
51 |
```python
|
52 |
from transformers import AutoConfig, AutoModel, AutoTokenizer
|
53 |
config = AutoConfig.from_pretrained("your model name", revision=revision)
|
@@ -57,9 +56,6 @@ tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
|
|
57 |
If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
|
58 |
|
59 |
Note: make sure your model is public.
|
60 |
-
|
61 |
-
### 2) Fill up your model card
|
62 |
-
When we add extra information about models to the leaderboard, it will be automatically taken from the model card
|
63 |
"""
|
64 |
EVALUATION_SCRIPT = ''
|
65 |
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
|
|
45 |
"""
|
46 |
|
47 |
EVALUATION_REQUESTS_TEXT = """
|
48 |
+
## Make sure you can load your model and tokenizer using AutoClasses
|
49 |
|
|
|
50 |
```python
|
51 |
from transformers import AutoConfig, AutoModel, AutoTokenizer
|
52 |
config = AutoConfig.from_pretrained("your model name", revision=revision)
|
|
|
56 |
If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
|
57 |
|
58 |
Note: make sure your model is public.
|
|
|
|
|
|
|
59 |
"""
|
60 |
EVALUATION_SCRIPT = ''
|
61 |
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|