IL-TUR-Leaderboard / uploads.py
abhinav-joshi's picture
add meta data
6eae533
raw
history blame
3.65 kB
import os
import json
import datetime
from email.utils import parseaddr
from io import BytesIO
from huggingface_hub import HfApi
import gradio as gr
from eval_utils import get_evaluation_scores
# Constants
LEADERBOARD_PATH = "Exploration-Lab/IL-TUR-Leaderboard"
SUBMISSION_FORMAT = "predictions"
TOKEN = os.environ.get("TOKEN", None)
YEAR_VERSION = "2024"
api = HfApi(token=TOKEN)
# Helper functions for formatting messages
def format_message(msg, color):
return f"<p style='color: {color}; font-size: 20px; text-align: center;'>{msg}</p>"
def format_error(msg):
return format_message(msg, "red")
def format_warning(msg):
return format_message(msg, "orange")
def format_log(msg):
return format_message(msg, "green")
def model_hyperlink(link, model_name):
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
def input_verification(method_name, url, path_to_file, organisation, mail):
"""Verify the input fields for submission."""
# Check if any field is empty
if any(
input == "" for input in [method_name, url, path_to_file, organisation, mail]
):
return format_warning("Please fill all the fields.")
# Check if file is attached
if path_to_file is None:
return format_warning("Please attach a file.")
return
def add_new_eval(
method_name: str,
submitted_by: str,
url: str,
path_to_file: str,
organisation: str,
mail: str,
):
"""Add a new evaluation to the leaderboard."""
# Verify input
# Check if any field is empty
if any(
input == "" for input in [method_name, url, path_to_file, organisation, mail]
):
return format_warning("Please fill all the fields.")
# Check if file is attached
if path_to_file is None:
return format_warning("Please attach a file.")
# Verify email format
_, parsed_mail = parseaddr(mail)
if "@" not in parsed_mail:
print(parseaddr(mail))
return format_warning("Please provide a valid email address.")
# Process submission
if SUBMISSION_FORMAT == "predictions":
# Read submission and gold data
with open(path_to_file, "r") as f:
submission_data = json.load(f)
with open("submissions/baseline/IL_TUR_eval_gold.json", "r") as f:
gold_data = json.load(f)
# Get evaluation scores
submission = get_evaluation_scores(gold_data, submission_data)
# Add metadata
submission["Method"] = method_name
submission["Submitted By"] = submitted_by
# submission["Organisation"] = organisation
# submission["Email"] = mail
submission["Github Link"] = url
else:
# Read submission directly if it's not in predictions format
with open(path_to_file, "r") as f:
submission = json.load(f)
# Update results
with open("submissions/baseline/results.json", "r") as f:
results = json.load(f)
results.append(submission[0])
# Prepare buffer for upload
leaderboard_buffer = BytesIO(json.dumps(results).encode())
leaderboard_buffer.seek(0)
# Upload to Hugging Face
api.upload_file(
repo_id=LEADERBOARD_PATH,
path_in_repo="submissions/baseline/results.json",
path_or_fileobj=leaderboard_buffer,
token=TOKEN,
repo_type="space",
)
return format_log(
f"Method {method_name} submitted by {organisation} successfully. \n"
"Please refresh the leaderboard, and wait for the evaluation results."
)