xinchen9 commited on
Commit
8e09f14
β€’
1 Parent(s): 8235a54

[Update]Comment all

Browse files
Files changed (1) hide show
  1. app.py +201 -201
app.py CHANGED
@@ -137,207 +137,207 @@ with demo:
137
  gr.HTML(TITLE)
138
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
139
 
140
- with gr.Tabs(elem_classes="tab-buttons") as tabs:
141
- with gr.TabItem("UnlearnDiffAtk Benchmark", elem_id="UnlearnDiffAtk-benchmark-tab-table", id=0):
142
- with gr.Row():
143
- with gr.Column():
144
- with gr.Row():
145
- search_bar = gr.Textbox(
146
- placeholder=" πŸ” Search for your model (separate multiple queries with `;`) and press ENTER...",
147
- show_label=False,
148
- elem_id="search-bar",
149
- )
150
- with gr.Row():
151
- shown_columns = gr.CheckboxGroup(
152
- choices=[
153
- c.name
154
- for c in fields(AutoEvalColumn)
155
- if not c.hidden and not c.never_hidden
156
- ],
157
- value=[
158
- c.name
159
- for c in fields(AutoEvalColumn)
160
- if c.displayed_by_default and not c.hidden and not c.never_hidden
161
- ],
162
- label="Select columns to show",
163
- elem_id="column-select",
164
- interactive=True,
165
- )
166
- with gr.Row():
167
- deleted_models_visibility = gr.Checkbox(
168
- value=False, label="Show gated/private/deleted models", interactive=True
169
- )
170
- with gr.Column(min_width=320):
171
- #with gr.Box(elem_id="box-filter"):
172
- filter_columns_type = gr.CheckboxGroup(
173
- label="Unlearning types",
174
- choices=[t.to_str() for t in ModelType],
175
- value=[t.to_str() for t in ModelType],
176
- interactive=True,
177
- elem_id="filter-columns-type",
178
- )
179
- filter_columns_precision = gr.CheckboxGroup(
180
- label="Precision",
181
- choices=[i.value.name for i in Precision],
182
- value=[i.value.name for i in Precision],
183
- interactive=True,
184
- elem_id="filter-columns-precision",
185
- )
186
- filter_columns_size = gr.CheckboxGroup(
187
- label="Model sizes (in billions of parameters)",
188
- choices=list(NUMERIC_INTERVALS.keys()),
189
- value=list(NUMERIC_INTERVALS.keys()),
190
- interactive=True,
191
- elem_id="filter-columns-size",
192
- )
193
-
194
- leaderboard_table = gr.components.Dataframe(
195
- value=leaderboard_df[
196
- [c.name for c in fields(AutoEvalColumn) if c.never_hidden]
197
- + shown_columns.value
198
- ],
199
- headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value,
200
- datatype=TYPES,
201
- elem_id="leaderboard-table",
202
- interactive=False,
203
- visible=True,
204
- )
205
-
206
- # Dummy leaderboard for handling the case when the user uses backspace key
207
- hidden_leaderboard_table_for_search = gr.components.Dataframe(
208
- value=original_df[COLS],
209
- headers=COLS,
210
- datatype=TYPES,
211
- visible=False,
212
- )
213
- search_bar.submit(
214
- update_table,
215
- [
216
- hidden_leaderboard_table_for_search,
217
- shown_columns,
218
- filter_columns_type,
219
- filter_columns_precision,
220
- filter_columns_size,
221
- deleted_models_visibility,
222
- search_bar,
223
- ],
224
- leaderboard_table,
225
- )
226
- for selector in [shown_columns, filter_columns_type, filter_columns_precision, filter_columns_size, deleted_models_visibility]:
227
- selector.change(
228
- update_table,
229
- [
230
- hidden_leaderboard_table_for_search,
231
- shown_columns,
232
- filter_columns_type,
233
- filter_columns_precision,
234
- filter_columns_size,
235
- deleted_models_visibility,
236
- search_bar,
237
- ],
238
- leaderboard_table,
239
- queue=True,
240
- )
241
-
242
- with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
243
- gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
244
-
245
- with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
246
- with gr.Column():
247
- with gr.Row():
248
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
249
-
250
- with gr.Column():
251
- with gr.Accordion(
252
- f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
253
- open=False,
254
- ):
255
- with gr.Row():
256
- finished_eval_table = gr.components.Dataframe(
257
- value=finished_eval_queue_df,
258
- headers=EVAL_COLS,
259
- datatype=EVAL_TYPES,
260
- row_count=5,
261
- )
262
- with gr.Accordion(
263
- f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
264
- open=False,
265
- ):
266
- with gr.Row():
267
- running_eval_table = gr.components.Dataframe(
268
- value=running_eval_queue_df,
269
- headers=EVAL_COLS,
270
- datatype=EVAL_TYPES,
271
- row_count=5,
272
- )
273
-
274
- with gr.Accordion(
275
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
276
- open=False,
277
- ):
278
- with gr.Row():
279
- pending_eval_table = gr.components.Dataframe(
280
- value=pending_eval_queue_df,
281
- headers=EVAL_COLS,
282
- datatype=EVAL_TYPES,
283
- row_count=5,
284
- )
285
- with gr.Row():
286
- gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
287
-
288
- with gr.Row():
289
- with gr.Column():
290
- model_name_textbox = gr.Textbox(label="Model name")
291
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
292
- model_type = gr.Dropdown(
293
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
294
- label="Model type",
295
- multiselect=False,
296
- value=None,
297
- interactive=True,
298
- )
299
-
300
- with gr.Column():
301
- precision = gr.Dropdown(
302
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
303
- label="Precision",
304
- multiselect=False,
305
- value="float16",
306
- interactive=True,
307
- )
308
- weight_type = gr.Dropdown(
309
- choices=[i.value.name for i in WeightType],
310
- label="Weights type",
311
- multiselect=False,
312
- value="Original",
313
- interactive=True,
314
- )
315
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
316
-
317
- submit_button = gr.Button("Submit Eval")
318
- submission_result = gr.Markdown()
319
- submit_button.click(
320
- add_new_eval,
321
- [
322
- model_name_textbox,
323
- base_model_name_textbox,
324
- revision_name_textbox,
325
- precision,
326
- weight_type,
327
- model_type,
328
- ],
329
- submission_result,
330
- )
331
-
332
- with gr.Row():
333
- with gr.Accordion("πŸ“™ Citation", open=False):
334
- citation_button = gr.Textbox(
335
- value=CITATION_BUTTON_TEXT,
336
- label=CITATION_BUTTON_LABEL,
337
- lines=10,
338
- elem_id="citation-button",
339
- show_copy_button=True,
340
- )
341
 
342
  scheduler = BackgroundScheduler()
343
  scheduler.add_job(restart_space, "interval", seconds=1800)
 
137
  gr.HTML(TITLE)
138
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
139
 
140
+ # with gr.Tabs(elem_classes="tab-buttons") as tabs:
141
+ # with gr.TabItem("UnlearnDiffAtk Benchmark", elem_id="UnlearnDiffAtk-benchmark-tab-table", id=0):
142
+ # with gr.Row():
143
+ # with gr.Column():
144
+ # with gr.Row():
145
+ # search_bar = gr.Textbox(
146
+ # placeholder=" πŸ” Search for your model (separate multiple queries with `;`) and press ENTER...",
147
+ # show_label=False,
148
+ # elem_id="search-bar",
149
+ # )
150
+ # with gr.Row():
151
+ # shown_columns = gr.CheckboxGroup(
152
+ # choices=[
153
+ # c.name
154
+ # for c in fields(AutoEvalColumn)
155
+ # if not c.hidden and not c.never_hidden
156
+ # ],
157
+ # value=[
158
+ # c.name
159
+ # for c in fields(AutoEvalColumn)
160
+ # if c.displayed_by_default and not c.hidden and not c.never_hidden
161
+ # ],
162
+ # label="Select columns to show",
163
+ # elem_id="column-select",
164
+ # interactive=True,
165
+ # )
166
+ # with gr.Row():
167
+ # deleted_models_visibility = gr.Checkbox(
168
+ # value=False, label="Show gated/private/deleted models", interactive=True
169
+ # )
170
+ # with gr.Column(min_width=320):
171
+ # #with gr.Box(elem_id="box-filter"):
172
+ # filter_columns_type = gr.CheckboxGroup(
173
+ # label="Unlearning types",
174
+ # choices=[t.to_str() for t in ModelType],
175
+ # value=[t.to_str() for t in ModelType],
176
+ # interactive=True,
177
+ # elem_id="filter-columns-type",
178
+ # )
179
+ # filter_columns_precision = gr.CheckboxGroup(
180
+ # label="Precision",
181
+ # choices=[i.value.name for i in Precision],
182
+ # value=[i.value.name for i in Precision],
183
+ # interactive=True,
184
+ # elem_id="filter-columns-precision",
185
+ # )
186
+ # filter_columns_size = gr.CheckboxGroup(
187
+ # label="Model sizes (in billions of parameters)",
188
+ # choices=list(NUMERIC_INTERVALS.keys()),
189
+ # value=list(NUMERIC_INTERVALS.keys()),
190
+ # interactive=True,
191
+ # elem_id="filter-columns-size",
192
+ # )
193
+
194
+ # leaderboard_table = gr.components.Dataframe(
195
+ # value=leaderboard_df[
196
+ # [c.name for c in fields(AutoEvalColumn) if c.never_hidden]
197
+ # + shown_columns.value
198
+ # ],
199
+ # headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value,
200
+ # datatype=TYPES,
201
+ # elem_id="leaderboard-table",
202
+ # interactive=False,
203
+ # visible=True,
204
+ # )
205
+
206
+ # # Dummy leaderboard for handling the case when the user uses backspace key
207
+ # hidden_leaderboard_table_for_search = gr.components.Dataframe(
208
+ # value=original_df[COLS],
209
+ # headers=COLS,
210
+ # datatype=TYPES,
211
+ # visible=False,
212
+ # )
213
+ # search_bar.submit(
214
+ # update_table,
215
+ # [
216
+ # hidden_leaderboard_table_for_search,
217
+ # shown_columns,
218
+ # filter_columns_type,
219
+ # filter_columns_precision,
220
+ # filter_columns_size,
221
+ # deleted_models_visibility,
222
+ # search_bar,
223
+ # ],
224
+ # leaderboard_table,
225
+ # )
226
+ # for selector in [shown_columns, filter_columns_type, filter_columns_precision, filter_columns_size, deleted_models_visibility]:
227
+ # selector.change(
228
+ # update_table,
229
+ # [
230
+ # hidden_leaderboard_table_for_search,
231
+ # shown_columns,
232
+ # filter_columns_type,
233
+ # filter_columns_precision,
234
+ # filter_columns_size,
235
+ # deleted_models_visibility,
236
+ # search_bar,
237
+ # ],
238
+ # leaderboard_table,
239
+ # queue=True,
240
+ # )
241
+
242
+ # with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
243
+ # gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
244
+
245
+ # with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
246
+ # with gr.Column():
247
+ # with gr.Row():
248
+ # gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
249
+
250
+ # with gr.Column():
251
+ # with gr.Accordion(
252
+ # f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
253
+ # open=False,
254
+ # ):
255
+ # with gr.Row():
256
+ # finished_eval_table = gr.components.Dataframe(
257
+ # value=finished_eval_queue_df,
258
+ # headers=EVAL_COLS,
259
+ # datatype=EVAL_TYPES,
260
+ # row_count=5,
261
+ # )
262
+ # with gr.Accordion(
263
+ # f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
264
+ # open=False,
265
+ # ):
266
+ # with gr.Row():
267
+ # running_eval_table = gr.components.Dataframe(
268
+ # value=running_eval_queue_df,
269
+ # headers=EVAL_COLS,
270
+ # datatype=EVAL_TYPES,
271
+ # row_count=5,
272
+ # )
273
+
274
+ # with gr.Accordion(
275
+ # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
276
+ # open=False,
277
+ # ):
278
+ # with gr.Row():
279
+ # pending_eval_table = gr.components.Dataframe(
280
+ # value=pending_eval_queue_df,
281
+ # headers=EVAL_COLS,
282
+ # datatype=EVAL_TYPES,
283
+ # row_count=5,
284
+ # )
285
+ # with gr.Row():
286
+ # gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
287
+
288
+ # with gr.Row():
289
+ # with gr.Column():
290
+ # model_name_textbox = gr.Textbox(label="Model name")
291
+ # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
292
+ # model_type = gr.Dropdown(
293
+ # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
294
+ # label="Model type",
295
+ # multiselect=False,
296
+ # value=None,
297
+ # interactive=True,
298
+ # )
299
+
300
+ # with gr.Column():
301
+ # precision = gr.Dropdown(
302
+ # choices=[i.value.name for i in Precision if i != Precision.Unknown],
303
+ # label="Precision",
304
+ # multiselect=False,
305
+ # value="float16",
306
+ # interactive=True,
307
+ # )
308
+ # weight_type = gr.Dropdown(
309
+ # choices=[i.value.name for i in WeightType],
310
+ # label="Weights type",
311
+ # multiselect=False,
312
+ # value="Original",
313
+ # interactive=True,
314
+ # )
315
+ # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
316
+
317
+ # submit_button = gr.Button("Submit Eval")
318
+ # submission_result = gr.Markdown()
319
+ # submit_button.click(
320
+ # add_new_eval,
321
+ # [
322
+ # model_name_textbox,
323
+ # base_model_name_textbox,
324
+ # revision_name_textbox,
325
+ # precision,
326
+ # weight_type,
327
+ # model_type,
328
+ # ],
329
+ # submission_result,
330
+ # )
331
+
332
+ # with gr.Row():
333
+ # with gr.Accordion("πŸ“™ Citation", open=False):
334
+ # citation_button = gr.Textbox(
335
+ # value=CITATION_BUTTON_TEXT,
336
+ # label=CITATION_BUTTON_LABEL,
337
+ # lines=10,
338
+ # elem_id="citation-button",
339
+ # show_copy_button=True,
340
+ # )
341
 
342
  scheduler = BackgroundScheduler()
343
  scheduler.add_job(restart_space, "interval", seconds=1800)