brunneis commited on
Commit
f550eb4
β€’
1 Parent(s): 48d94be

Update submissions page

Browse files
Files changed (2) hide show
  1. app.py +23 -23
  2. src/about.py +1 -5
app.py CHANGED
@@ -164,30 +164,30 @@ with demo:
164
  with gr.Row():
165
  gr.Markdown(EVALUATION_REQUESTS_TEXT, elem_classes="markdown-text")
166
 
167
- with gr.Column():
168
- with gr.Accordion(
169
- f"βœ… Finished ({len(finished_eval_requests_df)})",
170
- open=False,
171
- ):
172
- with gr.Row():
173
- finished_eval_table = gr.components.Dataframe(
174
- value=finished_eval_requests_df,
175
- headers=EVAL_COLS,
176
- datatype=EVAL_TYPES,
177
- row_count=5,
178
- )
179
 
180
- with gr.Accordion(
181
- f"⏳ Pending ({len(pending_eval_requests_df)})",
182
- open=False,
183
- ):
184
- with gr.Row():
185
- pending_eval_table = gr.components.Dataframe(
186
- value=pending_eval_requests_df,
187
- headers=EVAL_COLS,
188
- datatype=EVAL_TYPES,
189
- row_count=5,
190
- )
191
  with gr.Row():
192
  gr.Markdown("# βœ‰οΈ Submission", elem_classes="markdown-text")
193
 
 
164
  with gr.Row():
165
  gr.Markdown(EVALUATION_REQUESTS_TEXT, elem_classes="markdown-text")
166
 
167
+ # with gr.Column():
168
+ # with gr.Accordion(
169
+ # f"βœ… Finished ({len(finished_eval_requests_df)})",
170
+ # open=False,
171
+ # ):
172
+ # with gr.Row():
173
+ # finished_eval_table = gr.components.Dataframe(
174
+ # value=finished_eval_requests_df,
175
+ # headers=EVAL_COLS,
176
+ # datatype=EVAL_TYPES,
177
+ # row_count=5,
178
+ # )
179
 
180
+ # with gr.Accordion(
181
+ # f"⏳ Pending ({len(pending_eval_requests_df)})",
182
+ # open=False,
183
+ # ):
184
+ # with gr.Row():
185
+ # pending_eval_table = gr.components.Dataframe(
186
+ # value=pending_eval_requests_df,
187
+ # headers=EVAL_COLS,
188
+ # datatype=EVAL_TYPES,
189
+ # row_count=5,
190
+ # )
191
  with gr.Row():
192
  gr.Markdown("# βœ‰οΈ Submission", elem_classes="markdown-text")
193
 
src/about.py CHANGED
@@ -45,9 +45,8 @@ To reproduce our results, here is the commands you can run:
45
  """
46
 
47
  EVALUATION_REQUESTS_TEXT = """
48
- ## Some good practices before submitting a model
49
 
50
- ### 1) Make sure you can load your model and tokenizer using AutoClasses:
51
  ```python
52
  from transformers import AutoConfig, AutoModel, AutoTokenizer
53
  config = AutoConfig.from_pretrained("your model name", revision=revision)
@@ -57,9 +56,6 @@ tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
57
  If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
58
 
59
  Note: make sure your model is public.
60
-
61
- ### 2) Fill up your model card
62
- When we add extra information about models to the leaderboard, it will be automatically taken from the model card
63
  """
64
  EVALUATION_SCRIPT = ''
65
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
 
45
  """
46
 
47
  EVALUATION_REQUESTS_TEXT = """
48
+ ## Make sure you can load your model and tokenizer using AutoClasses
49
 
 
50
  ```python
51
  from transformers import AutoConfig, AutoModel, AutoTokenizer
52
  config = AutoConfig.from_pretrained("your model name", revision=revision)
 
56
  If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
57
 
58
  Note: make sure your model is public.
 
 
 
59
  """
60
  EVALUATION_SCRIPT = ''
61
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"