Spaces:
Runtime error
Runtime error
File size: 9,349 Bytes
9346f1c 4596a70 0227006 5f65cec 4596a70 aad2e63 9346f1c 2a5f9fb 8c49cb6 2246286 8c49cb6 976f398 df66f6e 9d22eee df66f6e 0c7ef71 df66f6e 2a5f9fb f2bc0a5 df66f6e f2bc0a5 8c49cb6 2a73469 10f9b3c 2a5f9fb 0c7ef71 97453a2 0c7ef71 2a5f9fb 0c7ef71 26286b2 0c7ef71 a885f09 0c7ef71 2a73469 0c7ef71 551debe 0c7ef71 614ee1f 1f60a20 8c49cb6 72a0f0f b762711 9b2e755 6b9a0ec 72a0f0f 9b2e755 ef5b51c 512b095 a2790cb 72a0f0f 6b9a0ec 512b095 aa7c3f4 adb0416 8c49cb6 9b2e755 8c49cb6 9b2e755 8c49cb6 ecef2dc 7644705 72a0f0f ef5b51c adb0416 ef5b51c adb0416 8c49cb6 9b2e755 8c49cb6 a2790cb 8c49cb6 2a5f9fb 8c49cb6 b762711 9b2e755 6b9a0ec 3ae1b8c ab6f548 3ae1b8c dc0413f 3ae1b8c dc0413f d2179b0 8c49cb6 d2179b0 9b2e755 97453a2 9b2e755 97453a2 9b2e755 7644705 e98a91e 1b030ef 5f65cec e98a91e 5f65cec 460d762 5f65cec e98a91e 6b9a0ec 30eeec0 6b9a0ec 84fc6ef f2bc0a5 5f65cec aad2e63 413ba3e 0cf87d2 413ba3e 5f65cec d16cee2 413ba3e a0b4ebf 413ba3e 5f65cec 413ba3e 5f65cec 413ba3e 5f65cec 413ba3e 5f65cec 044bf67 84fc6ef 5f65cec aad2e63 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 |
import gradio as gr
import json
import os
from datetime import datetime, timezone
from apscheduler.schedulers.background import BackgroundScheduler
import pandas as pd
from huggingface_hub import snapshot_download
from src.display.about import (
CITATION_BUTTON_LABEL,
CITATION_BUTTON_TEXT,
EVALUATION_QUEUE_TEXT,
INTRODUCTION_TEXT,
LLM_BENCHMARKS_TEXT,
FAQ_TEXT,
TITLE,
)
from src.display.css_html_js import custom_css
from src.display.utils import (
BENCHMARK_COLS,
COLS,
EVAL_COLS,
EVAL_TYPES,
NUMERIC_INTERVALS,
TYPES,
AutoEvalColumn,
ModelType,
fields,
WeightType,
Precision
)
from src.envs import API, EVAL_REQUESTS_PATH, DYNAMIC_INFO_REPO, DYNAMIC_INFO_FILE_PATH, DYNAMIC_INFO_PATH, EVAL_RESULTS_PATH, H4_TOKEN, IS_PUBLIC, QUEUE_REPO, REPO_ID, RESULTS_REPO
from src.populate import get_evaluation_queue_df, get_leaderboard_df
from src.submission.submit import add_new_eval
from src.tools.collections import update_collections
from src.tools.plots import (
create_metric_plot_obj,
create_plot_df,
create_scores_df,
)
def restart_space():
API.restart_space(repo_id=REPO_ID, token=H4_TOKEN)
def init_space():
try:
print(EVAL_REQUESTS_PATH)
snapshot_download(
repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
)
except Exception:
restart_space()
try:
print(DYNAMIC_INFO_PATH)
snapshot_download(
repo_id=DYNAMIC_INFO_REPO, local_dir=DYNAMIC_INFO_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
)
except Exception:
restart_space()
try:
print(EVAL_RESULTS_PATH)
snapshot_download(
repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
)
except Exception:
restart_space()
raw_data, original_df = get_leaderboard_df(
results_path=EVAL_RESULTS_PATH,
requests_path=EVAL_REQUESTS_PATH,
dynamic_path=DYNAMIC_INFO_FILE_PATH,
cols=COLS,
benchmark_cols=BENCHMARK_COLS
)
update_collections(original_df.copy())
leaderboard_df = original_df.copy()
plot_df = create_plot_df(create_scores_df(raw_data))
(
finished_eval_queue_df,
running_eval_queue_df,
pending_eval_queue_df,
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
return leaderboard_df, original_df, plot_df, finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df
leaderboard_df, original_df, plot_df, finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df = init_space()
# Searching and filtering
def update_table(
hidden_df: pd.DataFrame,
columns: list,
type_query: list,
precision_query: str,
size_query: list,
show_deleted: bool,
show_merges: bool,
show_moe: bool,
show_flagged: bool,
query: str,
):
filtered_df = filter_models(hidden_df, type_query, size_query, precision_query, show_deleted, show_merges, show_moe, show_flagged)
filtered_df = filter_queries(query, filtered_df)
df = select_columns(filtered_df, columns)
return df
def load_query(request: gr.Request): # triggered only once at startup => read query parameter if it exists
query = request.query_params.get("query") or ""
return query, query # return one for the "search_bar", one for a hidden component that triggers a reload only if value has changed
def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
return df[(df[AutoEvalColumn.dummy.name].str.contains(query, case=False))]
def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
always_here_cols = [c.name for c in fields(AutoEvalColumn) if c.never_hidden]
dummy_col = [AutoEvalColumn.dummy.name]
#AutoEvalColumn.model_type_symbol.name,
#AutoEvalColumn.model.name,
# We use COLS to maintain sorting
filtered_df = df[
always_here_cols + [c for c in COLS if c in df.columns and c in columns] + dummy_col
]
return filtered_df
def filter_queries(query: str, filtered_df: pd.DataFrame):
"""Added by Abishek"""
final_df = []
if query != "":
queries = [q.strip() for q in query.split(";")]
for _q in queries:
_q = _q.strip()
if _q != "":
temp_filtered_df = search_table(filtered_df, _q)
if len(temp_filtered_df) > 0:
final_df.append(temp_filtered_df)
if len(final_df) > 0:
filtered_df = pd.concat(final_df)
filtered_df = filtered_df.drop_duplicates(
subset=[AutoEvalColumn.model.name, AutoEvalColumn.precision.name, AutoEvalColumn.revision.name]
)
return filtered_df
def filter_models(
df: pd.DataFrame, type_query: list, size_query: list, precision_query: list, show_deleted: bool, show_merges: bool, show_moe:bool, show_flagged: bool
) -> pd.DataFrame:
# Show all models
if show_deleted:
filtered_df = df
else: # Show only still on the hub models
filtered_df = df[df[AutoEvalColumn.still_on_hub.name] == True]
if not show_merges:
filtered_df = filtered_df[filtered_df[AutoEvalColumn.merged.name] == False]
if not show_moe:
filtered_df = filtered_df[filtered_df[AutoEvalColumn.moe.name] == False]
if not show_flagged:
filtered_df = filtered_df[filtered_df[AutoEvalColumn.flagged.name] == False]
type_emoji = [t[0] for t in type_query]
filtered_df = filtered_df.loc[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)]
filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])]
numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query]))
params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce")
mask = params_column.apply(lambda x: any(numeric_interval.contains(x)))
filtered_df = filtered_df.loc[mask]
return filtered_df
leaderboard_df = filter_models(
df=leaderboard_df,
type_query=[t.to_str(" : ") for t in ModelType],
size_query=list(NUMERIC_INTERVALS.keys()),
precision_query=[i.value.name for i in Precision],
show_deleted=False,
show_merges=False,
show_moe=True,
show_flagged=False
)
import unicodedata
def is_valid_unicode(char):
try:
unicodedata.name(char)
return True # Valid Unicode character
except ValueError:
return False # Invalid Unicode character
def remove_invalid_unicode(input_string):
if isinstance(input_string, str):
valid_chars = [char for char in input_string if is_valid_unicode(char)]
return ''.join(valid_chars)
else:
return input_string # Return non-string values as is
dummy1 = gr.Textbox(visible=False)
hidden_leaderboard_table_for_search = gr.components.Dataframe(
headers=COLS,
datatype=TYPES,
visible=False,
line_breaks=False,
interactive=False
)
def display(x, y):
# Assuming df is your DataFrame
for column in leaderboard_df.columns:
if leaderboard_df[column].dtype == 'object':
leaderboard_df[column] = leaderboard_df[column].apply(remove_invalid_unicode)
subset_df = leaderboard_df[COLS]
return subset_df
INTRODUCTION_TEXT = """
This is a copied space from Open LLM Leaderboard. Instead of displaying
the results as table this space was modified to simply provides a gradio API interface.
Using the following python script below, users can access the full leaderboard data easily.
```python
# Import dependencies
from gradio_client import Client
# Initialize the Gradio client with the API URL
client = Client("https://rodrigomasini-data-only-enterprise-scenarios-leaderboard.hf.space/")
try:
# Perform the API call
response = client.predict("","", api_name='/predict')
# Check if response it's directly accessible
if len(response) > 0:
print("Response received!")
headers = response.get('headers', [])
data = response.get('data', [])
print(headers)
# Remove commenst if you want to download the dataset and save in csv format
# Specify the path to your CSV file
#csv_file_path = 'foundational-models-benchmark.csv'
# Open the CSV file for writing
#with open(csv_file_path, mode='w', newline='', encoding='utf-8') as file:
# writer = csv.writer(file)
# Write the headers
# writer.writerow(headers)
# Write the data
# for row in data:
# writer.writerow(row)
#print(f"Results saved to {csv_file_path}")
# If the above line prints a string that looks like JSON, you can parse it with json.loads(response)
# Otherwise, you might need to adjust based on the actual structure of `response`
except Exception as e:
print(f"An error occurred: {e}")
```
"""
interface = gr.Interface(
fn=display,
inputs=[gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text"), dummy1],
outputs=[hidden_leaderboard_table_for_search]
)
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=1800)
scheduler.start()
interface.launch() |