mlabonne's picture
Update app.py
3f57330
raw
history blame
6.08 kB
import re
import streamlit as st
import requests
import pandas as pd
from io import StringIO
import plotly.graph_objs as go
from yall import create_yall
def convert_markdown_table_to_dataframe(md_content):
"""
Converts markdown table to Pandas DataFrame, handling special characters and links,
extracts Hugging Face URLs, and adds them to a new column.
"""
# Remove leading and trailing | characters
cleaned_content = re.sub(r'\|\s*$', '', re.sub(r'^\|\s*', '', md_content, flags=re.MULTILINE), flags=re.MULTILINE)
# Create DataFrame from cleaned content
df = pd.read_csv(StringIO(cleaned_content), sep="\|", engine='python')
# Remove the first row after the header
df = df.drop(0, axis=0)
# Strip whitespace from column names
df.columns = df.columns.str.strip()
# Extract Hugging Face URLs and add them to a new column
model_link_pattern = r'\[(.*?)\]\((.*?)\)\s*\[.*?\]\(.*?\)'
df['URL'] = df['Model'].apply(lambda x: re.search(model_link_pattern, x).group(2) if re.search(model_link_pattern, x) else None)
# Clean Model column to have only the model link text
df['Model'] = df['Model'].apply(lambda x: re.sub(model_link_pattern, r'\1', x))
return df
def create_bar_chart(df, category):
"""Create and display a bar chart for a given category."""
st.write(f"### {category} Scores")
# Sort the DataFrame based on the category score
sorted_df = df[['Model', category]].sort_values(by=category, ascending=True)
# Create the bar chart with color gradient
fig = go.Figure(go.Bar(
x=sorted_df[category],
y=sorted_df['Model'],
orientation='h',
marker=dict(color=sorted_df[category], colorscale='Turbo')
))
# Update layout for better readability
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20)
)
st.plotly_chart(fig, use_container_width=True)
def main():
st.set_page_config(page_title="YALL - Yet Another LLM Leaderboard", layout="wide")
st.title("πŸ† YALL - Yet Another LLM Leaderboard")
st.markdown("Leaderboard made with [🧐 LLM AutoEval](https://github.com/mlabonne/llm-autoeval) using [Nous](https://huggingface.co/NousResearch) benchmark suite. It's a collection of my own evaluations.")
content = create_yall()
tab1, tab2 = st.tabs(["πŸ† Leaderboard", "πŸ“ About"])
# Leaderboard tab
with tab1:
if content:
try:
score_columns = ['Average', 'AGIEval', 'GPT4All', 'TruthfulQA', 'Bigbench']
# Display dataframe
df = convert_markdown_table_to_dataframe(content)
for col in score_columns:
df[col] = pd.to_numeric(df[col].str.strip(), errors='coerce')
st.dataframe(df, use_container_width=True)
# Full-width plot for the first category
create_bar_chart(df, score_columns[0])
# Next two plots in two columns
col1, col2 = st.columns(2)
with col1:
create_bar_chart(df, score_columns[1])
with col2:
create_bar_chart(df, score_columns[2])
# Last two plots in two columns
col3, col4 = st.columns(2)
with col3:
create_bar_chart(df, score_columns[3])
with col4:
create_bar_chart(df, score_columns[4])
except Exception as e:
st.error("An error occurred while processing the markdown table.")
st.error(str(e))
else:
st.error("Failed to download the content from the URL provided.")
# About tab
with tab2:
st.markdown('''
### Nous benchmark suite
Popularized by [Teknium](https://huggingface.co/teknium) and [NousResearch](https://huggingface.co/NousResearch), this benchmark suite aggregates four benchmarks:
* [**AGIEval**](https://arxiv.org/abs/2304.06364) (0-shot): `agieval_aqua_rat,agieval_logiqa_en,agieval_lsat_ar,agieval_lsat_lr,agieval_lsat_rc,agieval_sat_en,agieval_sat_en_without_passage,agieval_sat_math`
* **GPT4ALL** (0-shot): `hellaswag,openbookqa,winogrande,arc_easy,arc_challenge,boolq,piqa`
* [**TruthfulQA**](https://arxiv.org/abs/2109.07958) (0-shot): `truthfulqa_mc`
* [**Bigbench**](https://arxiv.org/abs/2206.04615) (0-shot): `bigbench_causal_judgement,bigbench_date_understanding,bigbench_disambiguation_qa,bigbench_geometric_shapes,bigbench_logical_deduction_five_objects,bigbench_logical_deduction_seven_objects,bigbench_logical_deduction_three_objects,bigbench_movie_recommendation,bigbench_navigate,bigbench_reasoning_about_colored_objects,bigbench_ruin_names,bigbench_salient_translation_error_detection,bigbench_snarks,bigbench_sports_understanding,bigbench_temporal_sequences,bigbench_tracking_shuffled_objects_five_objects,bigbench_tracking_shuffled_objects_seven_objects,bigbench_tracking_shuffled_objects_three_objects`
### Reproducibility
You can easily reproduce these results using [🧐 LLM AutoEval](https://github.com/mlabonne/llm-autoeval/tree/master), a colab notebook that automates the evaluation process (benchmark: `nous`). This will upload the results to GitHub as gists. You can find the entire table with the links to the detailed results [here](https://gist.github.com/mlabonne/90294929a2dbcb8877f9696f28105fdf).
### Clone this space
You can create your own leaderboard with your LLM AutoEval results on GitHub Gist. You just need to clone this space and specify two variables:
* Change the `gist_id` in [yall.py](https://huggingface.co/spaces/mlabonne/Yet_Another_LLM_Leaderboard/blob/main/yall.py#L126).
* Create "New Secret" in Settings > Variables and secrets (name: "github", value: [your GitHub token](https://github.com/settings/tokens))
''')
if __name__ == "__main__":
main()