Spaces:
Restarting
on
CPU Upgrade
Restarting
on
CPU Upgrade
File size: 6,753 Bytes
f766ce9 9c49811 f766ce9 9c49811 f766ce9 9c49811 5808d8f e8879cc f766ce9 5808d8f f766ce9 e8879cc f766ce9 5808d8f 1e768ec 9c49811 f766ce9 5808d8f 8b7a945 f766ce9 e8879cc f766ce9 e8879cc f766ce9 e8879cc f766ce9 e8879cc f8b3d0f f766ce9 e8879cc f766ce9 e8879cc 5808d8f e8879cc f766ce9 e8879cc f766ce9 e8879cc f766ce9 e8879cc f8b3d0f f766ce9 f8b3d0f 5808d8f f8b3d0f 5808d8f 61eca2d 5808d8f 61eca2d f8b3d0f f766ce9 5808d8f f766ce9 57ca843 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 |
import gradio as gr
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import snapshot_download
from src.about import (
INTRODUCTION_TEXT,
LLM_BENCHMARKS_TEXT,
TITLE,
)
from src.display.css_html_js import custom_css
from src.display.utils import (
QA_BENCHMARK_COLS,
COLS,
TYPES,
AutoEvalColumnQA,
fields
)
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
from src.populate import get_leaderboard_df
from utils import update_table, update_metric
from src.benchmarks import DOMAIN_COLS_QA, LANG_COLS_QA, metric_list
from functools import partial
def restart_space():
API.restart_space(repo_id=REPO_ID)
# try:
# print(EVAL_REQUESTS_PATH)
# snapshot_download(
# repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30,
# token=TOKEN
# )
# except Exception:
# restart_space()
# try:
# print(EVAL_RESULTS_PATH)
# snapshot_download(
# repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30,
# token=TOKEN
# )
# except Exception:
# restart_space()
from src.leaderboard.read_evals import get_raw_eval_results
raw_data_qa = get_raw_eval_results(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH)
original_df_qa = get_leaderboard_df(raw_data_qa, COLS, QA_BENCHMARK_COLS, task='qa', metric='ndcg_at_3')
print(f'data loaded: {len(raw_data_qa)}, {original_df_qa.shape}')
leaderboard_df = original_df_qa.copy()
def update_metric_qa(
metric: str,
domains: list,
langs: list,
reranking_model: list,
query: str,
):
return update_metric(raw_data_qa, metric, domains, langs, reranking_model, query)
# (
# finished_eval_queue_df,
# running_eval_queue_df,
# pending_eval_queue_df,
# ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
demo = gr.Blocks(css=custom_css)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("QA", elem_id="llm-benchmark-tab-table", id=0):
with gr.Row():
with gr.Column():
with gr.Row():
search_bar = gr.Textbox(
placeholder=" 🔍 Search for your model (separate multiple queries with `;`) and press ENTER...",
show_label=False,
elem_id="search-bar",
)
# select domain
with gr.Row():
selected_domains = gr.CheckboxGroup(
choices=DOMAIN_COLS_QA,
value=DOMAIN_COLS_QA,
label="Select the domains",
elem_id="domain-column-select",
interactive=True,
)
# select language
with gr.Row():
selected_langs = gr.CheckboxGroup(
choices=LANG_COLS_QA,
value=LANG_COLS_QA,
label="Select the languages",
elem_id="language-column-select",
interactive=True
)
# select reranking models
reranking_models = list(frozenset([eval_result.reranking_model for eval_result in raw_data_qa]))
with gr.Row():
selected_rerankings = gr.CheckboxGroup(
choices=reranking_models,
value=reranking_models,
label="Select the reranking models",
elem_id="reranking-select",
interactive=True
)
with gr.Column(min_width=320):
selected_metric = gr.Dropdown(
choices=metric_list,
value=metric_list[1],
label="Select the metric",
interactive=True,
elem_id="metric-select",
)
# reload the leaderboard_df and raw_data when selected_metric is changed
leaderboard_table = gr.components.Dataframe(
value=leaderboard_df,
# headers=shown_columns,
# datatype=TYPES,
elem_id="leaderboard-table",
interactive=False,
visible=True,
)
# Dummy leaderboard for handling the case when the user uses backspace key
hidden_leaderboard_table_for_search = gr.components.Dataframe(
value=leaderboard_df,
# headers=COLS,
# datatype=TYPES,
visible=False,
)
# Set search_bar listener
search_bar.submit(
update_table,
[
hidden_leaderboard_table_for_search,
selected_domains,
selected_langs,
selected_rerankings,
search_bar,
],
leaderboard_table,
)
# Set column-wise listener
for selector in [
selected_domains, selected_langs, selected_rerankings
]:
selector.change(
update_table,
[
hidden_leaderboard_table_for_search,
selected_domains,
selected_langs,
selected_rerankings,
search_bar,
],
leaderboard_table,
queue=True,
)
# set metric listener
selected_metric.change(
update_metric_qa,
[
selected_metric,
selected_domains,
selected_langs,
selected_rerankings,
search_bar,
],
leaderboard_table,
queue=True
)
with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=1800)
scheduler.start()
demo.queue(default_concurrency_limit=40).launch()
|