reward-bench / app.py
natolambert's picture
add colors
4a1518a
raw
history blame
22.6 kB
import gradio as gr
import os
from huggingface_hub import HfApi, snapshot_download
from apscheduler.schedulers.background import BackgroundScheduler
from datasets import load_dataset
from src.utils import load_all_data
from src.md import ABOUT_TEXT, TOP_TEXT
from src.plt import plot_avg_correlation
from src.constants import subset_mapping, length_categories, example_counts
from src.css import custom_css
import numpy as np
api = HfApi()
COLLAB_TOKEN = os.environ.get("COLLAB_TOKEN")
evals_repo = "allenai/reward-bench-results"
eval_set_repo = "allenai/reward-bench"
repo_dir_rewardbench = "./evals/rewardbench/"
def restart_space():
api.restart_space(repo_id="allenai/reward-bench", token=COLLAB_TOKEN)
print("Pulling evaluation results")
repo = snapshot_download(
local_dir=repo_dir_rewardbench,
ignore_patterns=["pref-sets-scores/*", "eval-set-scores/*"],
repo_id=evals_repo,
use_auth_token=COLLAB_TOKEN,
tqdm_class=None,
etag_timeout=30,
repo_type="dataset",
)
def avg_over_rewardbench(dataframe_core, dataframe_prefs):
"""
Averages over the subsets alpacaeval, mt-bench, llmbar, refusals, hep and returns dataframe with only these columns.
We average over 4 core sections (per prompt weighting):
1. Chat: Includes the easy chat subsets (alpacaeval-easy, alpacaeval-length, alpacaeval-hard, mt-bench-easy, mt-bench-medium)
2. Chat Hard: Includes the hard chat subsets (mt-bench-hard, llmbar-natural, llmbar-adver-neighbor, llmbar-adver-GPTInst, llmbar-adver-GPTOut, llmbar-adver-manual)
3. Safety: Includes the safety subsets (refusals-dangerous, refusals-offensive, xstest-should-refuse, xstest-should-respond, do not answer)
4. Reasoning: Includes the code and math subsets (math-prm, hep-cpp, hep-go, hep-java, hep-js, hep-python, hep-rust)
5. Prior Sets (0.5 weight): Includes the test sets (anthropic_helpful, mtbench_human, shp, summarize)
"""
new_df = dataframe_core.copy()
dataframe_prefs = dataframe_prefs.copy()
# for main subsets, keys in subset_mapping, take the weighted avg by example_counts and store for the models
for subset, sub_subsets in subset_mapping.items():
subset_cols = [col for col in new_df.columns if col in sub_subsets]
sub_data = new_df[subset_cols].values # take the relevant column values
sub_counts = [example_counts[s] for s in subset_cols] # take the example counts
new_df[subset] = np.average(sub_data, axis=1, weights=sub_counts) # take the weighted average
# new_df[subset] = np.round(np.nanmean(new_df[subset_cols].values, axis=1), 2)
data_cols = list(subset_mapping.keys())
keep_columns = ["model",] + ["model_type"] + data_cols
# keep_columns = ["model", "average"] + subsets
new_df = new_df[keep_columns]
# selected average from pref_sets
pref_columns = ["anthropic_helpful", "anthropic_hhh", "shp", "summarize"]
pref_data = dataframe_prefs[pref_columns].values
# add column test sets knowing the rows are not identical, take superset
dataframe_prefs["Prior Sets (0.5 weight)"] = np.nanmean(pref_data, axis=1)
# add column Test Sets empty to new_df
new_df["Prior Sets (0.5 weight)"] = np.nan
# per row in new_df if model is in dataframe_prefs, add the value to new_df["Prior Sets (0.5 weight)"]
values = []
for i, row in new_df.iterrows():
model = row["model"]
if model in dataframe_prefs["model"].values:
values.append(dataframe_prefs[dataframe_prefs["model"] == model]["Prior Sets (0.5 weight)"].values[0])
# new_df.at[i, "Prior Sets (0.5 weight)"] = dataframe_prefs[dataframe_prefs["model"] == model]["Prior Sets (0.5 weight)"].values[0]
else:
values.append(np.nan)
new_df["Prior Sets (0.5 weight)"] = values
# add total average
data_cols += ["Prior Sets (0.5 weight)"]
final_data = new_df[data_cols].values
masked_data = np.ma.masked_array(final_data, np.isnan(final_data))
weights = [2, 2, 2, 2, 1]
average = np.ma.average(masked_data, axis=1, weights=weights)
new_df["average"] = average.filled(np.nan)
# new_df["average"] = np.nanmean(new_df[data_cols].values, axis=1)
# make average third column
keep_columns = ["model", "model_type", "average"] + data_cols
new_df = new_df[keep_columns]
return new_df
def expand_subsets(dataframe):
# TODO need to modify data/ script to do this
pass
def length_bias_check(dataframe):
"""
Takes the raw rewardbench dataframe and splits the data into new buckets according to length_categories.
Then, take the average of the three buckets as "average"
"""
new_df = dataframe.copy()
existing_subsets = new_df.columns[3:] # model, model_type, average
final_subsets = ["Length Bias", "Neutral", "Terse Bias"]
# new data is empty list dict for each final subset
new_data = {s: [] for s in final_subsets}
# now, subsets correspond to those with True, Nuetral, and False length bias
# check if length_categories[subset] == "True" or "False" or "Neutral"
for subset in existing_subsets:
subset_data = new_df[subset].values
subset_length = length_categories[subset]
# route to the correct bucket
if subset_length == "True":
new_data["Length Bias"].append(subset_data)
elif subset_length == "Neutral":
new_data["Neutral"].append(subset_data)
elif subset_length == "False":
new_data["Terse Bias"].append(subset_data)
# take average of new_data and add to new_df (removing other columns than model)
for subset in final_subsets:
new_df[subset] = np.nanmean(new_data[subset], axis=0)
keep_columns = ["model"] + final_subsets
new_df = new_df[keep_columns]
# recompute average
# new_df["average"] = np.round(np.nanmean(new_df[final_subsets].values, axis=1), 2)
return new_df
rewardbench_data = load_all_data(repo_dir_rewardbench, subdir="eval-set").sort_values(by='average', ascending=False)
rewardbench_data_length = length_bias_check(rewardbench_data).sort_values(by='Terse Bias', ascending=False)
prefs_data = load_all_data(repo_dir_rewardbench, subdir="pref-sets").sort_values(by='average', ascending=False)
# prefs_data_sub = expand_subsets(prefs_data).sort_values(by='average', ascending=False)
rewardbench_data_avg = avg_over_rewardbench(rewardbench_data, prefs_data).sort_values(by='average', ascending=False)
def prep_df(df):
# add column to 0th entry with count (column name itself empty)
df.insert(0, '', range(1, 1 + len(df)))
# replace "model" with "Model" and "model_type" with "Model Type" and "average" with "Average"
df = df.rename(columns={"model": "Model", "model_type": "Model Type", "average": "Average"})
# if "Model Type" in columns
if "Model Type" in df.columns:
# get model_types that have generative in them
mask = df["Model Type"].str.contains("generative", case=False, na=False)
# set these values to "Generative"
df.loc[mask, "Model Type"] = "Generative"
return df
# add count column to all dataframes
rewardbench_data = prep_df(rewardbench_data)
rewardbench_data_avg = prep_df(rewardbench_data_avg).rename(columns={"Average": "Score"})
# adjust weight of this average to 50% for Prior Sets (0.5 weight), 1 for others
rewardbench_data_length = prep_df(rewardbench_data_length)
prefs_data = prep_df(prefs_data)
col_types_rewardbench = ["number"] + ["markdown"] + ["str"] + ["number"] * (len(rewardbench_data.columns) - 1)
col_types_rewardbench_avg = ["number"] + ["markdown"]+ ["str"] + ["number"] * (len(rewardbench_data_avg.columns) - 1)
cols_rewardbench_data_length = ["markdown"] + ["number"] * (len(rewardbench_data_length.columns) - 1)
col_types_prefs = ["number"] + ["markdown"] + ["number"] * (len(prefs_data.columns) - 1)
# col_types_prefs_sub = ["markdown"] + ["number"] * (len(prefs_data_sub.columns) - 1)
# for showing random samples
eval_set = load_dataset(eval_set_repo, use_auth_token=COLLAB_TOKEN, split="filtered")
def random_sample(r: gr.Request, subset):
if subset is None or subset == []:
sample_index = np.random.randint(0, len(eval_set) - 1)
sample = eval_set[sample_index]
else: # filter by subsets (can be list)
if isinstance(subset, str):
subset = [subset]
# filter down dataset to only include the subset(s)
eval_set_filtered = eval_set.filter(lambda x: x["subset"] in subset)
sample_index = np.random.randint(0, len(eval_set_filtered) - 1)
sample = eval_set_filtered[sample_index]
markdown_text = '\n\n'.join([f"**{key}**:\n\n{value}" for key, value in sample.items()])
return markdown_text
subsets = eval_set.unique("subset")
color_map = {
"Generative": "#7497db",
"Custom Classifier": "#E8ECF2",
"Seq. Classifier": "#ffcd75",
"DPO": "#75809c",
}
def color_model_type_column(df, color_map):
"""
Apply color to the 'Model Type' column of the DataFrame based on a given color mapping.
Parameters:
df (pd.DataFrame): The DataFrame containing the 'Model Type' column.
color_map (dict): A dictionary mapping model types to colors.
Returns:
pd.Styler: The styled DataFrame.
"""
# Function to apply color based on the model type
def apply_color(val):
color = color_map.get(val, "default") # Default color if not specified in color_map
return f'background-color: {color}'
# Format for different columns
format_dict = {col: "{:.1f}" for col in df.columns if col not in ['Average', 'Model', 'Model Type']}
format_dict['Average'] = "{:.2f}"
format_dict[''] = "{:d}"
return df.style.applymap(apply_color, subset=['Model Type']).format(format_dict, na_rep='')
def regex_table(dataframe, regex, filter_button, style=True):
"""
Takes a model name as a regex, then returns only the rows that has that in it.
"""
# Split regex statement by comma and trim whitespace around regexes
regex_list = [x.strip() for x in regex.split(",")]
# Join the list into a single regex pattern with '|' acting as OR
combined_regex = '|'.join(regex_list)
# remove internal ai2 data
dataframe = dataframe[~dataframe["Model"].str.contains("ai2", case=False, na=False)]
# if filter_button, remove all rows with "ai2" in the model name
update_scores = False
if isinstance(filter_button, list) or isinstance(filter_button, str):
if "Prior Sets" not in filter_button and 'Prior Sets (0.5 weight)' in dataframe.columns:
update_scores = True
if "Seq. Classifiers" not in filter_button:
dataframe = dataframe[~dataframe["Model Type"].str.contains("Seq. Classifier", case=False, na=False)]
if "DPO" not in filter_button:
dataframe = dataframe[~dataframe["Model Type"].str.contains("DPO", case=False, na=False)]
if "Custom Classifiers" not in filter_button:
dataframe = dataframe[~dataframe["Model Type"].str.contains("Custom Classifier", case=False, na=False)]
if "Generative" not in filter_button:
dataframe = dataframe[~dataframe["Model Type"].str.contains("generative", case=False, na=False)]
# Filter the dataframe such that 'model' contains any of the regex patterns
data = dataframe[dataframe["Model"].str.contains(combined_regex, case=False, na=False)]
# if update the score to not use prior sets, do so
if update_scores:
data["Score"] = (data["Chat"] + data["Chat Hard"] + data["Safety"] + data["Reasoning"]) / 4
data["Prior Sets (0.5 weight)"] = np.NaN
# sort array by Score column
data = data.sort_values(by='Score', ascending=False)
data.reset_index(drop=True, inplace=True)
# replace column '' with count/rank
data[''] = np.arange(1, 1 + len(data))
# if Score exists, round to 2 decimals
if "Score" in data.columns:
data["Score"] = np.round(np.array(data["Score"].values).astype(float), 2)
if "Average" in data.columns:
data["Average"] = np.round(np.array(data["Average"].values).astype(float), 1)
# round all others to 1 decimal
for col in data.columns:
if col not in ["", "Model", "Model Type", "Score", "Average"]:
# replace any data[col].values == '' with np.NaN
data[col] = data[col].replace('', np.NaN)
data[col] = np.round(np.array(data[col].values).astype(float), 1)
if style:
# apply color
data = color_model_type_column(data, color_map)
return data
# import ipdb; ipdb.set_trace()
total_models = len(regex_table(rewardbench_data_avg.copy(), "", ["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative"], style=False).values)
with gr.Blocks(css=custom_css) as app:
# create tabs for the app, moving the current table to one titled "rewardbench" and the benchmark_text to a tab called "About"
with gr.Row():
with gr.Column(scale=6):
gr.Markdown(TOP_TEXT.format(str(total_models)))
with gr.Column(scale=4):
# search = gr.Textbox(label="Model Search (delimit with , )", placeholder="Regex search for a model")
# filter_button = gr.Checkbox(label="Include AI2 training runs (or type ai2 above).", interactive=True)
# img = gr.Image(value="https://private-user-images.githubusercontent.com/10695622/310698241-24ed272a-0844-451f-b414-fde57478703e.png", width=500)
gr.Markdown("""
![](file/src/logo.png)
""")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("πŸ† RewardBench Leaderboard"):
with gr.Row():
search_1 = gr.Textbox(label="Model Search (delimit with , )",
placeholder="Model Search (delimit with , )",
show_label=False)
model_types_1 = gr.CheckboxGroup(["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative", "Prior Sets"],
value=["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative", "Prior Sets"],
label="Model Types",
show_label=False,
# info="Which model types to include.",
)
with gr.Row():
# reference data
rewardbench_table_hidden = gr.Dataframe(
rewardbench_data_avg.values,
datatype=col_types_rewardbench_avg,
headers=rewardbench_data_avg.columns.tolist(),
visible=False,
)
rewardbench_table = gr.Dataframe(
regex_table(rewardbench_data_avg.copy(), "", ["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative", "Prior Sets"]),
datatype=col_types_rewardbench_avg,
headers=rewardbench_data_avg.columns.tolist(),
elem_id="rewardbench_dataframe_avg",
height=1000,
)
with gr.TabItem("πŸ” RewardBench - Detailed"):
with gr.Row():
search_2 = gr.Textbox(label="Model Search (delimit with , )", show_label=False, placeholder="Model Search (delimit with , )")
model_types_2 = gr.CheckboxGroup(["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative"],
value=["Seq. Classifiers", "DPO", "Generative", "Custom Classifiers"],
label="Model Types",
show_label=False,
# info="Which model types to include."
)
with gr.Row():
# ref data
rewardbench_table_detailed_hidden = gr.Dataframe(
rewardbench_data.values,
datatype=col_types_rewardbench,
headers=rewardbench_data.columns.tolist(),
visible=False,
)
rewardbench_table_detailed = gr.Dataframe(
regex_table(rewardbench_data.copy(), "", ["Seq. Classifiers", "DPO", "Generative", "Custom Classifiers"]),
datatype=col_types_rewardbench,
headers=rewardbench_data.columns.tolist(),
elem_id="rewardbench_dataframe",
height=1000,
)
# with gr.TabItem("rewardbench Eval Set - Length Bias"):
# with gr.Row():
# # backup
# rewardbench_table_len_hidden = gr.Dataframe(
# rewardbench_data_length.values,
# datatype=cols_rewardbench_data_length,
# headers=rewardbench_data_length.columns.tolist(),
# visible=False,
# )
# rewardbench_table_len = gr.Dataframe(
# regex_table(rewardbench_data_length.copy(), "", False).values,
# datatype=cols_rewardbench_data_length,
# headers=rewardbench_data_length.columns.tolist(),
# elem_id="rewardbench_dataframe_length",
# height=1000,
# )
with gr.TabItem("Prior Test Sets"):
with gr.Row():
search_3 = gr.Textbox(label="Model Search (delimit with , )", show_label=False, placeholder="Model Search (delimit with , )")
model_types_3 = gr.CheckboxGroup(["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative"],
value=["Seq. Classifiers", "DPO", "Custom Classifiers"],
label="Model Types",
show_label=False,
# info="Which model types to include.",
)
with gr.Row():
PREF_SET_TEXT = """
For more information, see the [dataset](https://huggingface.co/datasets/allenai/pref-test-sets). Only the subsets Anthropic Helpful, Anthropic HHH, Stanford SHP, and OpenAI's Summarize data are used in the leaderboard ranking.
"""
gr.Markdown(PREF_SET_TEXT)
with gr.Row():
# backup
pref_sets_table_hidden = gr.Dataframe(
prefs_data.values,
datatype=col_types_prefs,
headers=prefs_data.columns.tolist(),
visible=False,
)
pref_sets_table = gr.Dataframe(
regex_table(prefs_data.copy(), "", ["Seq. Classifiers", "DPO", "Custom Classifiers"]),
datatype=col_types_prefs,
headers=prefs_data.columns.tolist(),
elem_id="prefs_dataframe",
height=1000,
)
with gr.TabItem("About"):
with gr.Row():
gr.Markdown(ABOUT_TEXT)
with gr.TabItem("Dataset Viewer"):
with gr.Row():
# loads one sample
gr.Markdown("""## Random Dataset Sample Viewer
Warning, refusals, XSTest, and donotanswer datasets have sensitive content.""")
subset_selector = gr.Dropdown(subsets, label="Subset", value=None, multiselect=True)
button = gr.Button("Show Random Sample")
with gr.Row():
sample_display = gr.Markdown("{sampled data loads here}")
button.click(fn=random_sample, inputs=[subset_selector], outputs=[sample_display])
# removed plot because not pretty enough
# with gr.TabItem("Model Correlation"):
# with gr.Row():
# plot = plot_avg_correlation(rewardbench_data_avg, prefs_data)
# gr.Plot(plot)
search_1.change(regex_table, inputs=[rewardbench_table_hidden, search_1, model_types_1], outputs=rewardbench_table)
search_2.change(regex_table, inputs=[rewardbench_table_detailed_hidden, search_2, model_types_2], outputs=rewardbench_table_detailed)
# search.change(regex_table, inputs=[rewardbench_table_len_hidden, search, filter_button], outputs=rewardbench_table_len)
search_3.change(regex_table, inputs=[pref_sets_table_hidden, search_3, model_types_3], outputs=pref_sets_table)
model_types_1.change(regex_table, inputs=[rewardbench_table_hidden, search_1, model_types_1], outputs=rewardbench_table)
model_types_2.change(regex_table, inputs=[rewardbench_table_detailed_hidden, search_2, model_types_2], outputs=rewardbench_table_detailed)
model_types_3.change(regex_table, inputs=[pref_sets_table_hidden, search_3, model_types_3], outputs=pref_sets_table)
with gr.Row():
with gr.Accordion("πŸ“š Citation", open=False):
citation_button = gr.Textbox(
value=r"""@misc{RewardBench,
title={RewardBench: Evaluating Reward Models for Language Modeling},
author={Lambert, Nathan and Pyatkin, Valentina and Morrison, Jacob and Miranda, LJ and Lin, Bill Yuchen and Chandu, Khyathi and Dziri, Nouha and Kumar, Sachin and Zick, Tom and Choi, Yejin and Smith, Noah A. and Hajishirzi, Hannaneh},
year={2024},
howpublished={\url{https://huggingface.co/spaces/allenai/reward-bench}
}""",
lines=7,
label="Copy the following to cite these results.",
elem_id="citation-button",
show_copy_button=True,
)
# Load data when app starts, TODO make this used somewhere...
# def load_data_on_start():
# data_rewardbench = load_all_data(repo_dir_rewardbench)
# rewardbench_table.update(data_rewardbench)
# data_rewardbench_avg = avg_over_rewardbench(repo_dir_rewardbench)
# rewardbench_table.update(data_rewardbench_avg)
# data_prefs = load_all_data(repo_dir_prefs)
# pref_sets_table.update(data_prefs)
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=10800) # restarted every 3h
scheduler.start()
app.launch(allowed_paths=['src/']) # had .queue() before launch before... not sure if that's necessary