from src.display.utils import EVAL_COLS, EVAL_TYPES
from src.envs import EVAL_REQUESTS_PATH
from src.populate import get_evaluation_queue_df
from src.submission.submit import add_new_eval
import gradio as gr
def show_submit_page(index: int):
(
finished_eval_queue_df,
running_eval_queue_df,
pending_eval_queue_df,
failed_eval_queue_df,
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
with gr.TabItem("🚀 Submit! ", elem_id="llm-benchmark-tab-table", id=index):
with gr.Column():
with gr.Accordion(
f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
open=False,
):
with gr.Row():
finished_eval_table = gr.components.Dataframe(
value=finished_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Accordion(
f"🔴 Failed Evaluations ({len(failed_eval_queue_df)})",
open=False,
):
with gr.Row():
failed_eval_table = gr.components.Dataframe(
value=failed_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Accordion(
f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
open=False,
):
with gr.Row():
pending_eval_table = gr.components.Dataframe(
value=pending_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Row():
gr.Markdown("# ✉️✨ Submit your model!", elem_classes="markdown-text")
with gr.Row():
with gr.Column():
model_name_textbox = gr.Textbox(label="Huggingface Model")
link_to_model_blog = gr.Textbox(label="Link to model release blog / technical report")
gr.Markdown("* The evaluation will be run manually in batches. Please allow up to one week for processing.")
gr.Markdown("* By default, the model is running using Flash-Attn2. If the model doesn't support this, please contact us via the OpenTyphoon Discord.")
submit_button = gr.Button("Submit Model")
submission_result = gr.Markdown()
submit_button.click(
add_new_eval,
[
model_name_textbox,
link_to_model_blog
],
submission_result,
)
with gr.Row():
gr.Markdown('# ✉️✨ Submit your task here!', elem_classes="markdown-text")