Spaces:
Running
Running
add report submited model
Browse files- app.py +3 -2
- watch_leaderboard.py +64 -0
app.py
CHANGED
@@ -14,7 +14,7 @@ from src.about import (
|
|
14 |
)
|
15 |
from src.display.css_html_js import custom_css
|
16 |
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
|
17 |
-
|
18 |
|
19 |
def restart_space():
|
20 |
API.restart_space(repo_id=REPO_ID)
|
@@ -78,6 +78,7 @@ with demo:
|
|
78 |
|
79 |
|
80 |
scheduler = BackgroundScheduler()
|
81 |
-
scheduler.add_job(
|
|
|
82 |
scheduler.start()
|
83 |
demo.queue(default_concurrency_limit=40).launch()
|
|
|
14 |
)
|
15 |
from src.display.css_html_js import custom_css
|
16 |
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
|
17 |
+
from watch_leaderboard import watch_submit_queue
|
18 |
|
19 |
def restart_space():
|
20 |
API.restart_space(repo_id=REPO_ID)
|
|
|
78 |
|
79 |
|
80 |
scheduler = BackgroundScheduler()
|
81 |
+
scheduler.add_job(watch_submit_queue, "interval", seconds=3500)
|
82 |
+
scheduler.add_job(restart_space, "interval", seconds=3600)
|
83 |
scheduler.start()
|
84 |
demo.queue(default_concurrency_limit=40).launch()
|
watch_leaderboard.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from typing import List, List, Tuple, Any, Dict
|
4 |
+
from huggingface_hub import snapshot_download
|
5 |
+
from collections import defaultdict
|
6 |
+
import requests
|
7 |
+
|
8 |
+
from src.envs import EVAL_REQUESTS_PATH, QUEUE_REPO
|
9 |
+
|
10 |
+
def webhook_bot(msg: str):
|
11 |
+
bot_url = os.environ.get("DISCORD_WEBHOOK_URL", "")
|
12 |
+
if bot_url != "":
|
13 |
+
requests.post(bot_url, json={"content": msg})
|
14 |
+
|
15 |
+
def read_all_pending_model(EVAL_REQUESTS_PATH: str) -> Dict[str, List[Tuple[Any, str]]]:
|
16 |
+
depth = 1
|
17 |
+
alls = defaultdict(list)
|
18 |
+
for root, _, files in os.walk(EVAL_REQUESTS_PATH):
|
19 |
+
current_depth = root.count(os.sep) - EVAL_REQUESTS_PATH.count(os.sep)
|
20 |
+
if current_depth == depth:
|
21 |
+
for file in files:
|
22 |
+
if not file.endswith(".json"):
|
23 |
+
continue
|
24 |
+
file_abs_path = os.path.join(root, file)
|
25 |
+
with open(file_abs_path, "r") as f:
|
26 |
+
info = json.load(f)
|
27 |
+
alls[info['model']].append((info, file_abs_path))
|
28 |
+
|
29 |
+
pendings = {}
|
30 |
+
for k in alls.keys():
|
31 |
+
is_pending = False
|
32 |
+
for stat in alls[k]:
|
33 |
+
info_dict = stat[0]
|
34 |
+
if info_dict['status'] == "PENDING":
|
35 |
+
is_pending = True
|
36 |
+
if is_pending:
|
37 |
+
pendings[k] = alls[k]
|
38 |
+
return pendings
|
39 |
+
|
40 |
+
def watch_submit_queue():
|
41 |
+
try:
|
42 |
+
snapshot_download(
|
43 |
+
repo_id=QUEUE_REPO,
|
44 |
+
local_dir=EVAL_REQUESTS_PATH,
|
45 |
+
repo_type="dataset",
|
46 |
+
tqdm_class=None,
|
47 |
+
etag_timeout=30,
|
48 |
+
)
|
49 |
+
|
50 |
+
alls = read_all_pending_model(EVAL_REQUESTS_PATH)
|
51 |
+
pending_model = []
|
52 |
+
for model_name in alls.keys():
|
53 |
+
for request_row in alls[model_name]:
|
54 |
+
info, filepath = request_row
|
55 |
+
status = info["status"]
|
56 |
+
model_name = info["model"]
|
57 |
+
if status == "PENDING":
|
58 |
+
pending_model.append(model_name)
|
59 |
+
pending_model = list(set(pending_model))
|
60 |
+
pending_model_str = '\n'.join(pending_model)
|
61 |
+
webhook_bot(f'Leaderboard model pending: {len(pending_model)}\n### Models\n{pending_model_str}')
|
62 |
+
except Exception as e:
|
63 |
+
print(f'Watch submit queue error: {e}')
|
64 |
+
pass
|