Tianyi (Alex) Qiu commited on
Commit
5a15668
Β·
1 Parent(s): 2dc771a

move to legacy & change submission

Browse files
Files changed (3) hide show
  1. app.py +31 -176
  2. src/legacy/app.py +331 -0
  3. src/submission/submit.py +6 -6
app.py CHANGED
@@ -64,15 +64,8 @@ leaderboard_df = original_df.copy()
64
  def update_table(
65
  hidden_df: pd.DataFrame,
66
  columns: list,
67
- type_query: list,
68
- precision_query: str,
69
- size_query: list,
70
- show_deleted: bool,
71
- query: str,
72
  ):
73
- filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted)
74
- filtered_df = filter_queries(query, filtered_df)
75
- df = select_columns(filtered_df, columns)
76
  return df
77
 
78
 
@@ -91,33 +84,6 @@ def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
91
  ]
92
  return filtered_df
93
 
94
-
95
- def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:
96
- # final_df = []
97
- # if query != "":
98
- # queries = [q.strip() for q in query.split(";")]
99
- # for _q in queries:
100
- # _q = _q.strip()
101
- # if _q != "":
102
- # temp_filtered_df = search_table(filtered_df, _q)
103
- # if len(temp_filtered_df) > 0:
104
- # final_df.append(temp_filtered_df)
105
- # if len(final_df) > 0:
106
- # filtered_df = pd.concat(final_df)
107
- # filtered_df = filtered_df.drop_duplicates(
108
- # subset=[AutoEvalColumn.model.name, AutoEvalColumn.precision.name, AutoEvalColumn.revision.name]
109
- # )
110
-
111
- return filtered_df
112
-
113
-
114
- def filter_models(
115
- df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, show_deleted: bool
116
- ) -> pd.DataFrame:
117
- # Show all models
118
- return df
119
-
120
-
121
  demo = gr.Blocks(css=custom_css)
122
  with demo:
123
  gr.HTML(TITLE)
@@ -126,56 +92,21 @@ with demo:
126
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
127
  with gr.TabItem("Leaderboard", elem_id="llm-benchmark-tab-table", id=0):
128
  with gr.Row():
129
- with gr.Column():
130
- # with gr.Row():
131
- # search_bar = gr.Textbox(
132
- # placeholder=" πŸ” Search for your model (separate multiple queries with `;`) and press ENTER...",
133
- # show_label=False,
134
- # elem_id="search-bar",
135
- # )
136
- with gr.Row():
137
- shown_columns = gr.CheckboxGroup(
138
- choices=[
139
- c.name
140
- for c in fields(AutoEvalColumn)
141
- if not c.hidden and not c.never_hidden
142
- ],
143
- value=[
144
- c.name
145
- for c in fields(AutoEvalColumn)
146
- if c.displayed_by_default and not c.hidden and not c.never_hidden
147
- ],
148
- label="Select columns to show",
149
- elem_id="column-select",
150
- interactive=True,
151
- )
152
- # with gr.Row():
153
- # deleted_models_visibility = gr.Checkbox(
154
- # value=False, label="Show gated/private/deleted models", interactive=True
155
- # )
156
- # with gr.Column(min_width=320):
157
- # #with gr.Box(elem_id="box-filter"):
158
- # filter_columns_type = gr.CheckboxGroup(
159
- # label="Model types",
160
- # choices=[t.to_str() for t in ModelType],
161
- # value=[t.to_str() for t in ModelType],
162
- # interactive=True,
163
- # elem_id="filter-columns-type",
164
- # )
165
- # filter_columns_precision = gr.CheckboxGroup(
166
- # label="Precision",
167
- # choices=[i.value.name for i in Precision],
168
- # value=[i.value.name for i in Precision],
169
- # interactive=True,
170
- # elem_id="filter-columns-precision",
171
- # )
172
- # filter_columns_size = gr.CheckboxGroup(
173
- # label="Model sizes (in billions of parameters)",
174
- # choices=list(NUMERIC_INTERVALS.keys()),
175
- # value=list(NUMERIC_INTERVALS.keys()),
176
- # interactive=True,
177
- # elem_id="filter-columns-size",
178
- # )
179
 
180
  leaderboard_table = gr.components.Dataframe(
181
  value=leaderboard_df[
@@ -196,127 +127,51 @@ with demo:
196
  datatype=TYPES,
197
  visible=False,
198
  )
199
- # search_bar.submit(
200
- # update_table,
201
- # [
202
- # hidden_leaderboard_table_for_search,
203
- # shown_columns,
204
- # filter_columns_type,
205
- # filter_columns_precision,
206
- # filter_columns_size,
207
- # deleted_models_visibility,
208
- # search_bar,
209
- # ],
210
- # leaderboard_table,
211
- # )
212
- for selector in [shown_columns]: # removed: filter_columns_type, filter_columns_precision, filter_columns_size, deleted_models_visibility
213
  selector.change(
214
  update_table,
215
  [
216
  hidden_leaderboard_table_for_search,
217
  shown_columns,
218
- # filter_columns_type,
219
- # filter_columns_precision,
220
- # filter_columns_size,
221
- # deleted_models_visibility,
222
- # search_bar,
223
  ],
224
  leaderboard_table,
225
  queue=True,
226
  )
227
 
228
- with gr.TabItem("About", elem_id="llm-benchmark-tab-table", id=2):
229
- gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
230
-
231
- with gr.TabItem("Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
232
  with gr.Column():
233
  with gr.Row():
234
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
235
 
236
- with gr.Column():
237
- with gr.Accordion(
238
- f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
239
- open=False,
240
- ):
241
- with gr.Row():
242
- finished_eval_table = gr.components.Dataframe(
243
- value=finished_eval_queue_df,
244
- headers=EVAL_COLS,
245
- datatype=EVAL_TYPES,
246
- row_count=5,
247
- )
248
- with gr.Accordion(
249
- f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
250
- open=False,
251
- ):
252
- with gr.Row():
253
- running_eval_table = gr.components.Dataframe(
254
- value=running_eval_queue_df,
255
- headers=EVAL_COLS,
256
- datatype=EVAL_TYPES,
257
- row_count=5,
258
- )
259
-
260
- with gr.Accordion(
261
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
262
- open=False,
263
- ):
264
- with gr.Row():
265
- pending_eval_table = gr.components.Dataframe(
266
- value=pending_eval_queue_df,
267
- headers=EVAL_COLS,
268
- datatype=EVAL_TYPES,
269
- row_count=5,
270
- )
271
  with gr.Row():
272
- gr.Markdown("# Submit your model here!", elem_classes="markdown-text")
273
 
274
  with gr.Row():
275
  with gr.Column():
276
- model_name_textbox = gr.Textbox(label="Model name")
277
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
278
- model_type = gr.Dropdown(
279
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
280
- label="Model type",
281
- multiselect=False,
282
- value=None,
283
- interactive=True,
284
- )
285
 
286
  with gr.Column():
287
- precision = gr.Dropdown(
288
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
289
- label="Precision",
290
- multiselect=False,
291
- value="float16",
292
- interactive=True,
293
- )
294
- weight_type = gr.Dropdown(
295
- choices=[i.value.name for i in WeightType],
296
- label="Weights type",
297
- multiselect=False,
298
- value="Original",
299
- interactive=True,
300
- )
301
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
302
 
303
- submit_button = gr.Button("Submit Eval")
304
  submission_result = gr.Markdown()
305
  submit_button.click(
306
  add_new_eval,
307
  [
308
- model_name_textbox,
309
- base_model_name_textbox,
310
- revision_name_textbox,
311
- precision,
312
- weight_type,
313
- model_type,
314
  ],
315
  submission_result,
316
  )
317
 
318
  with gr.Row():
319
- with gr.Accordion("Citation", open=False):
320
  citation_button = gr.Textbox(
321
  value=CITATION_BUTTON_TEXT,
322
  label=CITATION_BUTTON_LABEL,
 
64
  def update_table(
65
  hidden_df: pd.DataFrame,
66
  columns: list,
 
 
 
 
 
67
  ):
68
+ df = select_columns(hidden_df, columns)
 
 
69
  return df
70
 
71
 
 
84
  ]
85
  return filtered_df
86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  demo = gr.Blocks(css=custom_css)
88
  with demo:
89
  gr.HTML(TITLE)
 
92
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
93
  with gr.TabItem("Leaderboard", elem_id="llm-benchmark-tab-table", id=0):
94
  with gr.Row():
95
+ shown_columns = gr.CheckboxGroup(
96
+ choices=[
97
+ c.name
98
+ for c in fields(AutoEvalColumn)
99
+ if not c.hidden and not c.never_hidden
100
+ ],
101
+ value=[
102
+ c.name
103
+ for c in fields(AutoEvalColumn)
104
+ if c.displayed_by_default and not c.hidden and not c.never_hidden
105
+ ],
106
+ label="Select columns to show",
107
+ elem_id="column-select",
108
+ interactive=True,
109
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
111
  leaderboard_table = gr.components.Dataframe(
112
  value=leaderboard_df[
 
127
  datatype=TYPES,
128
  visible=False,
129
  )
130
+ for selector in [shown_columns]:
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  selector.change(
132
  update_table,
133
  [
134
  hidden_leaderboard_table_for_search,
135
  shown_columns,
 
 
 
 
 
136
  ],
137
  leaderboard_table,
138
  queue=True,
139
  )
140
 
141
+ with gr.TabItem("Submit Algorithm", elem_id="llm-benchmark-tab-table", id=3):
 
 
 
142
  with gr.Column():
143
  with gr.Row():
144
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  with gr.Row():
147
+ gr.Markdown("# Submission Form", elem_classes="markdown-text")
148
 
149
  with gr.Row():
150
  with gr.Column():
151
+ submission_file = gr.File(label="Evaluation result (JSON file generated by `run_benchmark.py`, one algorithm on all challenges)")
 
 
 
 
 
 
 
 
152
 
153
  with gr.Column():
154
+ algo_name = gr.Textbox(label="Algorithm display name")
155
+ algo_info = gr.Textbox(label="Comments & extra information")
156
+ algo_link = gr.Textbox(label="One external link (e.g. GitHub repo, paper, project page)")
157
+ submitter_email = gr.Textbox(label="Email address for contact (will be kept confidential)")
 
 
 
 
 
 
 
 
 
 
 
158
 
159
+ submit_button = gr.Button("Submit Algorithm")
160
  submission_result = gr.Markdown()
161
  submit_button.click(
162
  add_new_eval,
163
  [
164
+ submission_file,
165
+ algo_name,
166
+ algo_info,
167
+ algo_link,
168
+ submitter_email,
 
169
  ],
170
  submission_result,
171
  )
172
 
173
  with gr.Row():
174
+ with gr.Accordion("About & Citation", open=False):
175
  citation_button = gr.Textbox(
176
  value=CITATION_BUTTON_TEXT,
177
  label=CITATION_BUTTON_LABEL,
src/legacy/app.py ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+ import gradio as gr
3
+ import pandas as pd
4
+ from apscheduler.schedulers.background import BackgroundScheduler
5
+ from huggingface_hub import snapshot_download
6
+
7
+ from src.about import (
8
+ CITATION_BUTTON_LABEL,
9
+ CITATION_BUTTON_TEXT,
10
+ EVALUATION_QUEUE_TEXT,
11
+ INTRODUCTION_TEXT,
12
+ LLM_BENCHMARKS_TEXT,
13
+ TITLE,
14
+ )
15
+ from src.display.css_html_js import custom_css
16
+ from src.display.utils import (
17
+ BENCHMARK_COLS,
18
+ COLS,
19
+ EVAL_COLS,
20
+ EVAL_TYPES,
21
+ NUMERIC_INTERVALS,
22
+ TYPES,
23
+ AutoEvalColumn,
24
+ ModelType,
25
+ fields,
26
+ WeightType,
27
+ Precision
28
+ )
29
+ from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
30
+ from src.populate import get_evaluation_queue_df, get_leaderboard_df
31
+ from src.submission.submit import add_new_eval
32
+
33
+
34
+ def restart_space():
35
+ API.restart_space(repo_id=REPO_ID)
36
+
37
+ try:
38
+ print(EVAL_REQUESTS_PATH)
39
+ snapshot_download(
40
+ repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
41
+ )
42
+ except Exception:
43
+ restart_space()
44
+ try:
45
+ print(EVAL_RESULTS_PATH)
46
+ snapshot_download(
47
+ repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
48
+ )
49
+ except Exception:
50
+ restart_space()
51
+
52
+
53
+ raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
54
+ leaderboard_df = original_df.copy()
55
+
56
+ (
57
+ finished_eval_queue_df,
58
+ running_eval_queue_df,
59
+ pending_eval_queue_df,
60
+ ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
61
+
62
+
63
+ # Searching and filtering
64
+ def update_table(
65
+ hidden_df: pd.DataFrame,
66
+ columns: list,
67
+ type_query: list,
68
+ precision_query: str,
69
+ size_query: list,
70
+ show_deleted: bool,
71
+ query: str,
72
+ ):
73
+ filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted)
74
+ filtered_df = filter_queries(query, filtered_df)
75
+ df = select_columns(filtered_df, columns)
76
+ return df
77
+
78
+
79
+ def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
80
+ return df[(df[AutoEvalColumn.model.name].str.contains(query, case=False))]
81
+
82
+
83
+ def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
84
+ always_here_cols = [
85
+ AutoEvalColumn.model_type_symbol.name,
86
+ AutoEvalColumn.model.name,
87
+ ]
88
+ # We use COLS to maintain sorting
89
+ filtered_df = df[
90
+ always_here_cols + [c for c in COLS if c in df.columns and c in columns]
91
+ ]
92
+ return filtered_df
93
+
94
+
95
+ def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:
96
+ # final_df = []
97
+ # if query != "":
98
+ # queries = [q.strip() for q in query.split(";")]
99
+ # for _q in queries:
100
+ # _q = _q.strip()
101
+ # if _q != "":
102
+ # temp_filtered_df = search_table(filtered_df, _q)
103
+ # if len(temp_filtered_df) > 0:
104
+ # final_df.append(temp_filtered_df)
105
+ # if len(final_df) > 0:
106
+ # filtered_df = pd.concat(final_df)
107
+ # filtered_df = filtered_df.drop_duplicates(
108
+ # subset=[AutoEvalColumn.model.name, AutoEvalColumn.precision.name, AutoEvalColumn.revision.name]
109
+ # )
110
+
111
+ return filtered_df
112
+
113
+
114
+ def filter_models(
115
+ df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, show_deleted: bool
116
+ ) -> pd.DataFrame:
117
+ # Show all models
118
+ return df
119
+
120
+
121
+ demo = gr.Blocks(css=custom_css)
122
+ with demo:
123
+ gr.HTML(TITLE)
124
+ gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
125
+
126
+ with gr.Tabs(elem_classes="tab-buttons") as tabs:
127
+ with gr.TabItem("Leaderboard", elem_id="llm-benchmark-tab-table", id=0):
128
+ with gr.Row():
129
+ with gr.Column():
130
+ # with gr.Row():
131
+ # search_bar = gr.Textbox(
132
+ # placeholder=" πŸ” Search for your model (separate multiple queries with `;`) and press ENTER...",
133
+ # show_label=False,
134
+ # elem_id="search-bar",
135
+ # )
136
+ with gr.Row():
137
+ shown_columns = gr.CheckboxGroup(
138
+ choices=[
139
+ c.name
140
+ for c in fields(AutoEvalColumn)
141
+ if not c.hidden and not c.never_hidden
142
+ ],
143
+ value=[
144
+ c.name
145
+ for c in fields(AutoEvalColumn)
146
+ if c.displayed_by_default and not c.hidden and not c.never_hidden
147
+ ],
148
+ label="Select columns to show",
149
+ elem_id="column-select",
150
+ interactive=True,
151
+ )
152
+ # with gr.Row():
153
+ # deleted_models_visibility = gr.Checkbox(
154
+ # value=False, label="Show gated/private/deleted models", interactive=True
155
+ # )
156
+ # with gr.Column(min_width=320):
157
+ # #with gr.Box(elem_id="box-filter"):
158
+ # filter_columns_type = gr.CheckboxGroup(
159
+ # label="Model types",
160
+ # choices=[t.to_str() for t in ModelType],
161
+ # value=[t.to_str() for t in ModelType],
162
+ # interactive=True,
163
+ # elem_id="filter-columns-type",
164
+ # )
165
+ # filter_columns_precision = gr.CheckboxGroup(
166
+ # label="Precision",
167
+ # choices=[i.value.name for i in Precision],
168
+ # value=[i.value.name for i in Precision],
169
+ # interactive=True,
170
+ # elem_id="filter-columns-precision",
171
+ # )
172
+ # filter_columns_size = gr.CheckboxGroup(
173
+ # label="Model sizes (in billions of parameters)",
174
+ # choices=list(NUMERIC_INTERVALS.keys()),
175
+ # value=list(NUMERIC_INTERVALS.keys()),
176
+ # interactive=True,
177
+ # elem_id="filter-columns-size",
178
+ # )
179
+
180
+ leaderboard_table = gr.components.Dataframe(
181
+ value=leaderboard_df[
182
+ [c.name for c in fields(AutoEvalColumn) if c.never_hidden]
183
+ + shown_columns.value
184
+ ],
185
+ headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value,
186
+ datatype=TYPES,
187
+ elem_id="leaderboard-table",
188
+ interactive=False,
189
+ visible=True,
190
+ )
191
+
192
+ # Dummy leaderboard for handling the case when the user uses backspace key
193
+ hidden_leaderboard_table_for_search = gr.components.Dataframe(
194
+ value=original_df[COLS],
195
+ headers=COLS,
196
+ datatype=TYPES,
197
+ visible=False,
198
+ )
199
+ # search_bar.submit(
200
+ # update_table,
201
+ # [
202
+ # hidden_leaderboard_table_for_search,
203
+ # shown_columns,
204
+ # filter_columns_type,
205
+ # filter_columns_precision,
206
+ # filter_columns_size,
207
+ # deleted_models_visibility,
208
+ # search_bar,
209
+ # ],
210
+ # leaderboard_table,
211
+ # )
212
+ for selector in [shown_columns]: # removed: filter_columns_type, filter_columns_precision, filter_columns_size, deleted_models_visibility
213
+ selector.change(
214
+ update_table,
215
+ [
216
+ hidden_leaderboard_table_for_search,
217
+ shown_columns,
218
+ # filter_columns_type,
219
+ # filter_columns_precision,
220
+ # filter_columns_size,
221
+ # deleted_models_visibility,
222
+ # search_bar,
223
+ ],
224
+ leaderboard_table,
225
+ queue=True,
226
+ )
227
+
228
+ with gr.TabItem("About", elem_id="llm-benchmark-tab-table", id=2):
229
+ gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
230
+
231
+ with gr.TabItem("Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
232
+ with gr.Column():
233
+ with gr.Row():
234
+ gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
235
+
236
+ with gr.Column():
237
+ with gr.Accordion(
238
+ f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
239
+ open=False,
240
+ ):
241
+ with gr.Row():
242
+ finished_eval_table = gr.components.Dataframe(
243
+ value=finished_eval_queue_df,
244
+ headers=EVAL_COLS,
245
+ datatype=EVAL_TYPES,
246
+ row_count=5,
247
+ )
248
+ with gr.Accordion(
249
+ f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
250
+ open=False,
251
+ ):
252
+ with gr.Row():
253
+ running_eval_table = gr.components.Dataframe(
254
+ value=running_eval_queue_df,
255
+ headers=EVAL_COLS,
256
+ datatype=EVAL_TYPES,
257
+ row_count=5,
258
+ )
259
+
260
+ with gr.Accordion(
261
+ f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
262
+ open=False,
263
+ ):
264
+ with gr.Row():
265
+ pending_eval_table = gr.components.Dataframe(
266
+ value=pending_eval_queue_df,
267
+ headers=EVAL_COLS,
268
+ datatype=EVAL_TYPES,
269
+ row_count=5,
270
+ )
271
+ with gr.Row():
272
+ gr.Markdown("# Submit your model here!", elem_classes="markdown-text")
273
+
274
+ with gr.Row():
275
+ with gr.Column():
276
+ model_name_textbox = gr.Textbox(label="Model name")
277
+ revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
278
+ model_type = gr.Dropdown(
279
+ choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
280
+ label="Model type",
281
+ multiselect=False,
282
+ value=None,
283
+ interactive=True,
284
+ )
285
+
286
+ with gr.Column():
287
+ precision = gr.Dropdown(
288
+ choices=[i.value.name for i in Precision if i != Precision.Unknown],
289
+ label="Precision",
290
+ multiselect=False,
291
+ value="float16",
292
+ interactive=True,
293
+ )
294
+ weight_type = gr.Dropdown(
295
+ choices=[i.value.name for i in WeightType],
296
+ label="Weights type",
297
+ multiselect=False,
298
+ value="Original",
299
+ interactive=True,
300
+ )
301
+ base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
302
+
303
+ submit_button = gr.Button("Submit Eval")
304
+ submission_result = gr.Markdown()
305
+ submit_button.click(
306
+ add_new_eval,
307
+ [
308
+ model_name_textbox,
309
+ base_model_name_textbox,
310
+ revision_name_textbox,
311
+ precision,
312
+ weight_type,
313
+ model_type,
314
+ ],
315
+ submission_result,
316
+ )
317
+
318
+ with gr.Row():
319
+ with gr.Accordion("Citation", open=False):
320
+ citation_button = gr.Textbox(
321
+ value=CITATION_BUTTON_TEXT,
322
+ label=CITATION_BUTTON_LABEL,
323
+ lines=20,
324
+ elem_id="citation-button",
325
+ show_copy_button=True,
326
+ )
327
+
328
+ scheduler = BackgroundScheduler()
329
+ scheduler.add_job(restart_space, "interval", seconds=1800)
330
+ scheduler.start()
331
+ demo.queue(default_concurrency_limit=40).launch()
src/submission/submit.py CHANGED
@@ -15,13 +15,13 @@ REQUESTED_MODELS = None
15
  USERS_TO_SUBMISSION_DATES = None
16
 
17
  def add_new_eval(
18
- model: str,
19
- base_model: str,
20
- revision: str,
21
- precision: str,
22
- weight_type: str,
23
- model_type: str,
24
  ):
 
25
  global REQUESTED_MODELS
26
  global USERS_TO_SUBMISSION_DATES
27
  if not REQUESTED_MODELS:
 
15
  USERS_TO_SUBMISSION_DATES = None
16
 
17
  def add_new_eval(
18
+ submission_file: str,
19
+ algo_name: str,
20
+ algo_info: str,
21
+ algo_link: str,
22
+ submitter_email: str,
 
23
  ):
24
+ return 'Success!'
25
  global REQUESTED_MODELS
26
  global USERS_TO_SUBMISSION_DATES
27
  if not REQUESTED_MODELS: