comparator / app.py
albertvillanova's picture
Support more than 2 models
bea7063 verified
raw
history blame
8.38 kB
from functools import partial
import gradio as gr
import src.constants as constants
from src.details import (
clear_details,
display_details,
display_loading_message_for_details,
load_details,
update_load_details_component,
update_sample_idx_component,
update_subtasks_component,
update_task_description_component,
)
from src.results import (
clear_results,
clear_results_file,
display_loading_message_for_results,
display_results,
download_results,
fetch_result_paths,
load_results,
plot_results,
sort_result_paths_per_model,
update_load_results_component,
update_tasks_component,
)
# if __name__ == "__main__":
result_paths_per_model = sort_result_paths_per_model(fetch_result_paths())
load_results = partial(load_results, result_paths_per_model=result_paths_per_model)
with gr.Blocks(fill_height=True, fill_width=True) as demo:
gr.HTML("<h1 style='text-align: center;'>Compare Results of the πŸ€— Open LLM Leaderboard</h1>")
gr.HTML("<h3 style='text-align: center;'>Select models to load and compare their results</h3>")
gr.HTML(
"<p style='text-align: center; color:orange;'>⚠ This demo is a beta version, and we're actively working on it, so you might find some tiny bugs! Please report any issues you have in the Community tab to help us make it better for all.</p>"
)
gr.Markdown(
"Compare Results of the πŸ€— [Open LLM Leaderboard](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard). "
"Check out the [documentation](https://huggingface.co/docs/leaderboards/open_llm_leaderboard/about) πŸ“„ to find explanations on the evaluations used, their configuration parameters and details on the input/outputs for the models."
)
with gr.Row():
model_ids = gr.Dropdown(choices=list(result_paths_per_model.keys()), label="Models", multiselect=True)
with gr.Row():
with gr.Tab("Results"):
load_results_btn = gr.Button("Load", interactive=False)
clear_results_btn = gr.Button("Clear")
results_task = gr.Radio(
["All"] + list(constants.TASKS.values()),
label="Tasks",
info="Evaluation tasks to be displayed",
value="All",
visible=False,
)
results_task_description = gr.Textbox(
label="Task Description",
lines=3,
visible=False,
)
hide_std_errors = gr.Checkbox(label="Hide Standard Errors", value=True, info="Options")
with gr.Row():
results_plot_1 = gr.Plot(visible=True)
results_plot_2 = gr.Plot(visible=True)
results = gr.HTML()
results_dataframe = gr.State()
download_results_btn = gr.Button("Download")
results_file = gr.File(visible=False)
with gr.Tab("Configs"):
load_configs_btn = gr.Button("Load", interactive=False)
clear_configs_btn = gr.Button("Clear")
configs_task = gr.Radio(
["All"] + list(constants.TASKS.values()),
label="Tasks",
info="Evaluation tasks to be displayed",
value="All",
visible=False,
)
configs_task_description = gr.Textbox(
label="Task Description",
lines=3,
visible=False,
)
show_only_differences = gr.Checkbox(label="Show Only Differences", value=False, info="Options")
configs = gr.HTML()
with gr.Tab("Details"):
details_task = gr.Radio(
list(constants.TASKS.values()),
label="Tasks",
info="Evaluation tasks to be loaded",
interactive=True,
)
details_task_description = gr.Textbox(
label="Task Description",
lines=3,
)
with gr.Row():
login_btn = gr.LoginButton(size="sm", visible=False)
subtask = gr.Radio(
choices=None, # constants.SUBTASKS.get(details_task.value),
label="Subtasks",
info="Evaluation subtasks to be loaded (choose one of the Tasks above)",
)
load_details_btn = gr.Button("Load Details", interactive=False)
clear_details_btn = gr.Button("Clear Details")
sample_idx = gr.Number(
label="Sample Index", info="Index of the sample to be displayed", value=0, minimum=0, visible=False
)
details_show_only_differences = gr.Checkbox(label="Show Only Differences", value=False, info="Options")
details = gr.HTML()
details_dataframe = gr.State()
gr.on(
triggers=[model_ids.input],
fn=update_load_results_component,
outputs=[load_results_btn, load_configs_btn],
)
gr.on(
triggers=[load_results_btn.click, load_configs_btn.click],
fn=display_loading_message_for_results,
outputs=[results, configs],
).then(
fn=load_results,
inputs=model_ids,
outputs=results_dataframe,
).then(
fn=update_tasks_component,
outputs=[results_task, configs_task],
)
# Synchronize the results_task and configs_task radio buttons
results_task.input(fn=lambda task: task, inputs=results_task, outputs=configs_task)
configs_task.input(fn=lambda task: task, inputs=configs_task, outputs=results_task)
# Update task descriptions
results_task.change(
fn=update_task_description_component,
inputs=results_task,
outputs=results_task_description,
).then(
fn=update_task_description_component,
inputs=results_task,
outputs=configs_task_description,
)
# Display results
gr.on(
triggers=[
results_dataframe.change,
results_task.change,
hide_std_errors.change,
show_only_differences.change,
],
fn=display_results,
inputs=[results_dataframe, results_task, hide_std_errors, show_only_differences],
outputs=[results, configs],
).then(
fn=plot_results,
inputs=[results_dataframe, results_task],
outputs=[results_plot_1, results_plot_2],
).then(
fn=clear_results_file,
outputs=results_file,
)
download_results_btn.click(
fn=download_results,
inputs=results,
outputs=results_file,
)
gr.on(
triggers=[clear_results_btn.click, clear_configs_btn.click],
fn=clear_results,
outputs=[
model_ids,
results_dataframe,
load_results_btn,
load_configs_btn,
results_task,
configs_task,
],
).then(
fn=clear_results_file,
outputs=results_file,
)
# DETAILS:
details_task.change(
fn=update_task_description_component,
inputs=details_task,
outputs=details_task_description,
).then(
fn=update_subtasks_component,
inputs=details_task,
outputs=[login_btn, subtask],
)
gr.on(
triggers=[model_ids.input, subtask.input, details_task.input],
fn=update_load_details_component,
inputs=[model_ids, subtask],
outputs=load_details_btn,
)
load_details_btn.click(
fn=display_loading_message_for_details,
outputs=details,
).then(
fn=load_details,
inputs=[model_ids, subtask],
outputs=details_dataframe,
).then(
fn=update_sample_idx_component,
inputs=[details_dataframe],
outputs=sample_idx,
)
gr.on(
triggers=[
details_dataframe.change,
sample_idx.change,
details_show_only_differences.change,
],
fn=display_details,
inputs=[details_dataframe, sample_idx, details_show_only_differences],
outputs=details,
)
clear_details_btn.click(
fn=clear_details,
outputs=[
model_ids,
details_dataframe,
details_task,
subtask,
load_details_btn,
sample_idx,
],
)
demo.launch()