App debug mode
#14
by
AppleSwing
- opened
app.py
CHANGED
@@ -3,10 +3,11 @@
|
|
3 |
import os
|
4 |
import datetime
|
5 |
import socket
|
|
|
6 |
|
7 |
import gradio as gr
|
8 |
import pandas as pd
|
9 |
-
|
10 |
from apscheduler.schedulers.background import BackgroundScheduler
|
11 |
|
12 |
from huggingface_hub import snapshot_download
|
@@ -38,11 +39,23 @@ from src.display.utils import (
|
|
38 |
Precision,
|
39 |
)
|
40 |
|
41 |
-
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, H4_TOKEN, IS_PUBLIC, QUEUE_REPO, REPO_ID, RESULTS_REPO
|
42 |
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
43 |
from src.submission.submit import add_new_eval
|
44 |
from src.utils import get_dataset_summary_table
|
45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
def ui_snapshot_download(repo_id, local_dir, repo_type, tqdm_class, etag_timeout):
|
48 |
try:
|
@@ -154,7 +167,7 @@ demo = gr.Blocks(css=custom_css)
|
|
154 |
with demo:
|
155 |
gr.HTML(TITLE)
|
156 |
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
|
157 |
-
|
158 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
159 |
with gr.TabItem("MOE-LLM-GPU-Poor-Leaderboard Benchmark", elem_id="llm-benchmark-tab-table", id=0):
|
160 |
with gr.Row():
|
@@ -233,7 +246,7 @@ with demo:
|
|
233 |
interactive=False,
|
234 |
visible=True,
|
235 |
) # column_widths=["2%", "20%"]
|
236 |
-
|
237 |
# Dummy leaderboard for handling the case when the user uses backspace key
|
238 |
hidden_leaderboard_table_for_search = gr.components.Dataframe(
|
239 |
value=original_df[COLS] if original_df.empty is False else original_df,
|
@@ -288,7 +301,7 @@ with demo:
|
|
288 |
|
289 |
gr.Markdown(LLM_BENCHMARKS_DETAILS, elem_classes="markdown-text")
|
290 |
gr.Markdown(FAQ_TEXT, elem_classes="markdown-text")
|
291 |
-
|
292 |
with gr.TabItem("Submit a model ", elem_id="llm-benchmark-tab-table", id=3):
|
293 |
with gr.Column():
|
294 |
with gr.Row():
|
@@ -373,7 +386,7 @@ with demo:
|
|
373 |
],
|
374 |
submission_result,
|
375 |
)
|
376 |
-
|
377 |
with gr.Row():
|
378 |
with gr.Accordion("Citing this leaderboard", open=False):
|
379 |
citation_button = gr.Textbox(
|
|
|
3 |
import os
|
4 |
import datetime
|
5 |
import socket
|
6 |
+
from threading import Thread
|
7 |
|
8 |
import gradio as gr
|
9 |
import pandas as pd
|
10 |
+
import time
|
11 |
from apscheduler.schedulers.background import BackgroundScheduler
|
12 |
|
13 |
from huggingface_hub import snapshot_download
|
|
|
39 |
Precision,
|
40 |
)
|
41 |
|
42 |
+
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, H4_TOKEN, IS_PUBLIC, QUEUE_REPO, REPO_ID, RESULTS_REPO, DEBUG_QUEUE_REPO, DEBUG_RESULTS_REPO
|
43 |
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
44 |
from src.submission.submit import add_new_eval
|
45 |
from src.utils import get_dataset_summary_table
|
46 |
|
47 |
+
def get_args():
|
48 |
+
import argparse
|
49 |
+
|
50 |
+
parser = argparse.ArgumentParser(description="Run the LLM Leaderboard")
|
51 |
+
parser.add_argument("--debug", action="store_true", help="Run in debug mode")
|
52 |
+
return parser.parse_args()
|
53 |
+
|
54 |
+
args = get_args()
|
55 |
+
if args.debug:
|
56 |
+
print("Running in debug mode")
|
57 |
+
QUEUE_REPO = DEBUG_QUEUE_REPO
|
58 |
+
RESULTS_REPO = DEBUG_RESULTS_REPO
|
59 |
|
60 |
def ui_snapshot_download(repo_id, local_dir, repo_type, tqdm_class, etag_timeout):
|
61 |
try:
|
|
|
167 |
with demo:
|
168 |
gr.HTML(TITLE)
|
169 |
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
|
170 |
+
|
171 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
172 |
with gr.TabItem("MOE-LLM-GPU-Poor-Leaderboard Benchmark", elem_id="llm-benchmark-tab-table", id=0):
|
173 |
with gr.Row():
|
|
|
246 |
interactive=False,
|
247 |
visible=True,
|
248 |
) # column_widths=["2%", "20%"]
|
249 |
+
|
250 |
# Dummy leaderboard for handling the case when the user uses backspace key
|
251 |
hidden_leaderboard_table_for_search = gr.components.Dataframe(
|
252 |
value=original_df[COLS] if original_df.empty is False else original_df,
|
|
|
301 |
|
302 |
gr.Markdown(LLM_BENCHMARKS_DETAILS, elem_classes="markdown-text")
|
303 |
gr.Markdown(FAQ_TEXT, elem_classes="markdown-text")
|
304 |
+
|
305 |
with gr.TabItem("Submit a model ", elem_id="llm-benchmark-tab-table", id=3):
|
306 |
with gr.Column():
|
307 |
with gr.Row():
|
|
|
386 |
],
|
387 |
submission_result,
|
388 |
)
|
389 |
+
|
390 |
with gr.Row():
|
391 |
with gr.Accordion("Citing this leaderboard", open=False):
|
392 |
citation_button = gr.Textbox(
|