Spaces:
Running
Running
import gradio as gr | |
import pandas as pd | |
import plotly.express as px | |
from dataclasses import dataclass, field | |
from typing import List, Dict, Tuple, Union | |
import json | |
import os | |
from collections import OrderedDict | |
import re | |
def load_css(css_file_path): | |
"""Load CSS from a file.""" | |
with open(css_file_path, 'r') as f: | |
return f.read() | |
# In the main code: | |
css = load_css('dashboard.css') | |
class ScorecardCategory: | |
name: str | |
questions: List[Dict[str, Union[str, List[str]]]] | |
scores: Dict[str, int] = field(default_factory=dict) | |
def extract_category_number(category_name: str) -> int: | |
"""Extract the category number from the category name.""" | |
match = re.match(r'^(\d+)\.?\s*.*$', category_name) | |
return int(match.group(1)) if match else float('inf') | |
def sort_categories(categories): | |
"""Sort categories by their numeric prefix.""" | |
return sorted(categories, key=extract_category_number) | |
# def load_scorecard_templates(directory): | |
# templates = [] | |
# for filename in os.listdir(directory): | |
# if filename.endswith('.json'): | |
# with open(os.path.join(directory, filename), 'r') as file: | |
# data = json.load(file) | |
# templates.append(ScorecardCategory( | |
# name=data['name'], | |
# questions=data['questions'] | |
# )) | |
# return templates | |
def create_category_summary(category_data): | |
"""Create a summary section for a category""" | |
# Calculate statistics | |
total_sections = len(category_data) | |
completed_sections = sum(1 for section in category_data.values() if section['status'] == 'Yes') | |
na_sections = sum(1 for section in category_data.values() if section['status'] == 'N/A') | |
# Calculate completion rates | |
total_questions = 0 | |
completed_questions = 0 | |
evaluation_types = set() | |
has_human_eval = False | |
has_quantitative = False | |
has_documentation = False | |
for section in category_data.values(): | |
if section['status'] != 'N/A': | |
questions = section.get('questions', {}) | |
total_questions += len(questions) | |
completed_questions += sum(1 for q in questions.values() if q) | |
# Check for evaluation types | |
for question in questions.keys(): | |
if 'human' in question.lower(): | |
has_human_eval = True | |
if any(term in question.lower() for term in ['quantitative', 'metric', 'benchmark']): | |
has_quantitative = True | |
if 'documentation' in question.lower(): | |
has_documentation = True | |
completion_rate = (completed_questions / total_questions * 100) if total_questions > 0 else 0 | |
# Create summary HTML | |
html = "<div class='summary-card'>" | |
html += "<div class='summary-title'>π Section Summary</div>" | |
# Completion metrics | |
html += "<div class='summary-section'>" | |
html += "<div class='summary-subtitle'>π Completion Metrics</div>" | |
html += f"<div class='metric-row'><span class='metric-label'>Overall Completion Rate:</span> <span class='metric-value'>{completion_rate:.1f}%</span></div>" | |
html += f"<div class='metric-row'><span class='metric-label'>Sections Completed:</span> <span class='metric-value'>{completed_sections}/{total_sections}</span></div>" | |
html += "</div>" | |
# Evaluation Coverage | |
html += "<div class='summary-section'>" | |
html += "<div class='summary-subtitle'>π― Evaluation Coverage</div>" | |
html += "<div class='coverage-grid'>" | |
html += f"<div class='coverage-item {get_coverage_class(has_human_eval)}'>π₯ Human Evaluation</div>" | |
html += f"<div class='coverage-item {get_coverage_class(has_quantitative)}'>π Quantitative Analysis</div>" | |
html += f"<div class='coverage-item {get_coverage_class(has_documentation)}'>π Documentation</div>" | |
html += "</div>" | |
html += "</div>" | |
# Status Breakdown | |
html += "<div class='summary-section'>" | |
html += "<div class='summary-subtitle'>π Status Breakdown</div>" | |
html += create_status_pills(category_data) | |
html += "</div>" | |
html += "</div>" | |
return html | |
def create_overall_summary(model_data, selected_categories): | |
"""Create a comprehensive summary of all categories""" | |
scores = model_data['scores'] | |
# Initialize counters | |
total_sections = 0 | |
completed_sections = 0 | |
na_sections = 0 | |
total_questions = 0 | |
completed_questions = 0 | |
# Track evaluation types across all categories | |
evaluation_types = { | |
'human': 0, | |
'quantitative': 0, | |
'documentation': 0, | |
'monitoring': 0, | |
'transparency': 0 | |
} | |
# Calculate completion rates for categories | |
category_completion = {} | |
# Process all categories | |
for category, category_data in scores.items(): | |
if category not in selected_categories: | |
continue # Skip unselected categories | |
category_questions = 0 | |
category_completed = 0 | |
category_na = 0 | |
total_sections_in_category = len(category_data) | |
na_sections_in_category = sum(1 for section in category_data.values() if section['status'] == 'N/A') | |
for section in category_data.values(): | |
total_sections += 1 | |
if section['status'] == 'Yes': | |
completed_sections += 1 | |
elif section['status'] == 'N/A': | |
na_sections += 1 | |
category_na += 1 | |
if section['status'] != 'N/A': | |
questions = section.get('questions', {}) | |
section_total = len(questions) | |
section_completed = sum(1 for q in questions.values() if q) | |
total_questions += section_total | |
completed_questions += section_completed | |
category_questions += section_total | |
category_completed += section_completed | |
# Check for evaluation types | |
for question in questions.keys(): | |
if 'human' in question.lower(): | |
evaluation_types['human'] += 1 | |
if any(term in question.lower() for term in ['quantitative', 'metric', 'benchmark']): | |
evaluation_types['quantitative'] += 1 | |
if 'documentation' in question.lower(): | |
evaluation_types['documentation'] += 1 | |
if 'monitoring' in question.lower(): | |
evaluation_types['monitoring'] += 1 | |
if 'transparency' in question.lower(): | |
evaluation_types['transparency'] += 1 | |
# Store category information | |
is_na = na_sections_in_category == total_sections_in_category | |
completion_rate = (category_completed / category_questions * 100) if category_questions > 0 and not is_na else 0 | |
category_completion[category] = { | |
'completion_rate': completion_rate, | |
'is_na': is_na | |
} | |
# Create summary HTML | |
html = "<div class='card overall-summary-card'>" | |
html += "<div class='card-title'>π Overall AI System Evaluation Summary</div>" | |
# Key metrics section | |
html += "<div class='summary-grid'>" | |
# Overall completion metrics | |
html += "<div class='summary-section'>" | |
html += "<div class='summary-subtitle'>π Overall Completion</div>" | |
completion_rate = (completed_questions / total_questions * 100) if total_questions > 0 else 0 | |
html += f"<div class='metric-row'><span class='metric-label'>Overall Completion Rate:</span> <span class='metric-value'>{completion_rate:.1f}%</span></div>" | |
html += f"<div class='metric-row'><span class='metric-label'>Sections Completed:</span> <span class='metric-value'>{completed_sections}/{total_sections}</span></div>" | |
html += f"<div class='metric-row'><span class='metric-label'>Questions Completed:</span> <span class='metric-value'>{completed_questions}/{total_questions}</span></div>" | |
html += "</div>" | |
# Evaluation coverage | |
html += "<div class='summary-section'>" | |
html += "<div class='summary-subtitle'>π― Evaluation Types Coverage</div>" | |
html += "<div class='coverage-grid'>" | |
for eval_type, count in evaluation_types.items(): | |
icon = { | |
'human': 'π₯', | |
'quantitative': 'π', | |
'documentation': 'π', | |
'monitoring': 'π‘', | |
'transparency': 'π' | |
}.get(eval_type, 'β') | |
has_coverage = count > 0 | |
html += f"<div class='coverage-item {get_coverage_class(has_coverage)}'>{icon} {eval_type.title()}</div>" | |
html += "</div>" | |
html += "</div>" | |
html += "</div>" # End summary-grid | |
# Category breakdown | |
html += "<div class='summary-section'>" | |
html += "<div class='summary-subtitle'>π Category Completion Breakdown</div>" | |
html += "<div class='category-completion-grid'>" | |
# Sort and filter categories | |
sorted_categories = [cat for cat in sort_categories(scores.keys()) if cat in selected_categories] | |
for category in sorted_categories: | |
info = category_completion[category] | |
category_name = category.split('. ', 1)[1] if '. ' in category else category | |
# Determine display text and style | |
if info['is_na']: | |
completion_text = "N/A" | |
bar_width = "0" | |
style_class = "na" | |
else: | |
completion_text = f"{info['completion_rate']:.1f}%" | |
bar_width = f"{info['completion_rate']}" | |
style_class = "active" | |
html += f""" | |
<div class='category-completion-item'> | |
<div class='category-name'>{category_name}</div> | |
<div class='completion-bar-container {style_class}'> | |
<div class='completion-bar' style='width: {bar_width}%;'></div> | |
<span class='completion-text'>{completion_text}</span> | |
</div> | |
</div> | |
""" | |
html += "</div></div>" | |
html += "</div>" # End overall-summary-card | |
return html | |
def get_coverage_class(has_feature): | |
"""Return CSS class based on feature presence""" | |
return 'covered' if has_feature else 'not-covered' | |
def create_status_pills(category_data): | |
"""Create status pill indicators""" | |
status_counts = {'Yes': 0, 'No': 0, 'N/A': 0} | |
for section in category_data.values(): | |
status_counts[section['status']] += 1 | |
html = "<div class='status-pills'>" | |
for status, count in status_counts.items(): | |
html += f"<div class='status-pill {status.lower()}'>{status}: {count}</div>" | |
html += "</div>" | |
return html | |
def get_modality_icon(modality): | |
"""Return an emoji icon for each modality type.""" | |
icons = { | |
"Text-to-Text": "π", # Memo icon for text-to-text | |
"Text-to-Image": "π¨", # Artist palette for text-to-image | |
"Image-to-Text": "π", # Magnifying glass for image-to-text | |
"Image-to-Image": "πΌοΈ", # Frame for image-to-image | |
"Audio": "π΅", # Musical note for audio | |
"Video": "π¬", # Clapper board for video | |
"Multimodal": "π" # Cycle arrows for multimodal | |
} | |
return icons.get(modality, "π«") # Default icon if modality not found | |
def create_metadata_card(metadata): | |
"""Create a formatted HTML card for metadata.""" | |
html = "<div class='card metadata-card'>" | |
html += "<div class='card-title'>AI System Information</div>" | |
html += "<div class='metadata-content'>" | |
# Handle special formatting for modalities | |
modalities = metadata.get("Modalities", []) | |
formatted_modalities = "" | |
if modalities: | |
formatted_modalities = " ".join( | |
f"<span class='modality-badge'>{get_modality_icon(m)} {m}</span>" | |
for m in modalities | |
) | |
# Order of metadata display (customize as needed) | |
display_order = ["Name", "Provider", "Type", "URL"] | |
# Display ordered metadata first | |
for key in display_order: | |
if key in metadata: | |
value = metadata[key] | |
if key == "URL": | |
html += f"<div class='metadata-row'><span class='metadata-label'>{key}:</span> " | |
html += f"<a href='{value}' target='_blank' class='metadata-link'>{value}</a></div>" | |
else: | |
html += f"<div class='metadata-row'><span class='metadata-label'>{key}:</span> <span class='metadata-value'>{value}</span></div>" | |
# Add modalities if present | |
if formatted_modalities: | |
html += f"<div class='metadata-row'><span class='metadata-label'>Modalities:</span> <div class='modality-container'>{formatted_modalities}</div></div>" | |
# Add any remaining metadata not in display_order | |
for key, value in metadata.items(): | |
if key not in display_order and key != "Modalities": | |
html += f"<div class='metadata-row'><span class='metadata-label'>{key}:</span> <span class='metadata-value'>{value}</span></div>" | |
html += "</div></div>" | |
return html | |
def load_models_from_json(directory): | |
models = {} | |
for filename in os.listdir(directory): | |
if filename.endswith('.json'): | |
with open(os.path.join(directory, filename), 'r') as file: | |
model_data = json.load(file) | |
model_name = model_data['metadata']['Name'] | |
models[model_name] = model_data | |
return OrderedDict(sorted(models.items(), key=lambda x: x[0].lower())) | |
# Load templates and models | |
# scorecard_template = load_scorecard_templates('scorecard_templates') | |
models = load_models_from_json('model_data') | |
def create_source_html(sources): | |
if not sources: | |
return "" | |
html = "<div class='sources-list'>" | |
for source in sources: | |
icon = source.get("type", "") | |
detail = source.get("detail", "") | |
name = source.get("name", detail) | |
html += f"<div class='source-item'>{icon} " | |
if detail.startswith("http"): | |
html += f"<a href='{detail}' target='_blank'>{name}</a>" | |
else: | |
html += name | |
html += "</div>" | |
html += "</div>" | |
return html | |
def create_leaderboard(selected_categories): | |
scores = [] | |
for model, data in models.items(): | |
total_score = 0 | |
total_questions = 0 | |
score_by_category = {} | |
# Calculate scores by category | |
for category_name, category in data['scores'].items(): | |
category_score = 0 | |
category_total = 0 | |
for section in category.values(): | |
if section['status'] != 'N/A': | |
questions = section.get('questions', {}) | |
category_score += sum(1 for q in questions.values() if q) | |
category_total += len(questions) | |
if category_total > 0: | |
score_by_category[category_name] = (category_score / category_total) * 100 | |
total_score += category_score | |
total_questions += category_total | |
# Calculate overall score | |
score_percentage = (total_score / total_questions * 100) if total_questions > 0 else 0 | |
# Get model type and URL | |
model_type = data['metadata'].get('Type', 'Unknown') | |
model_url = data['metadata'].get('URL', '') | |
# Create model name with HTML link if URL exists | |
model_display = f'<a href="{model_url}" target="_blank">{model}</a>' if model_url else model | |
# Create entry with numerical scores | |
model_entry = { | |
'AI System': model_display, | |
'Type': model_type, | |
'Overall Completion Rate': score_percentage | |
} | |
# Add selected category scores with emojis | |
category_map = { | |
'1. Bias, Stereotypes, and Representational Harms Evaluation': 'βοΈ Bias and Fairness', | |
'2. Cultural Values and Sensitive Content Evaluation': 'π Cultural Values', | |
'3. Disparate Performance Evaluation': 'π Disparate Performance', | |
'4. Environmental Costs and Carbon Emissions Evaluation': 'π± Environmental Impact', | |
'5. Privacy and Data Protection Evaluation': 'π Privacy', | |
'6. Financial Costs Evaluation': 'π° Financial Costs', | |
'7. Data and Content Moderation Labor Evaluation': 'π₯ Labor Practices' | |
} | |
for full_cat_name, display_name in category_map.items(): | |
if full_cat_name in selected_categories: | |
score = score_by_category.get(full_cat_name, 0) | |
model_entry[display_name] = score | |
scores.append(model_entry) | |
# Convert to DataFrame | |
df = pd.DataFrame(scores) | |
# Sort by Overall Completion Rate descending | |
df = df.sort_values('Overall Completion Rate', ascending=False) | |
# Add rank column based on current sort | |
df.insert(0, 'Rank', range(1, len(df) + 1)) | |
# Format scores with % after sorting | |
numeric_columns = ['Overall Completion Rate'] + list(category_map.values()) | |
for col in df.columns: | |
if col in numeric_columns: | |
df[col] = df[col].apply(lambda x: f"{x:.1f}%") | |
return df | |
first_model = next(iter(models.values())) | |
category_choices = list(first_model['scores'].keys()) | |
with gr.Column(visible=True) as leaderboard_tab: | |
leaderboard_output = gr.DataFrame( | |
value=create_leaderboard(category_choices), # Initialize with all categories selected | |
interactive=False, | |
wrap=True | |
) | |
def create_category_chart(selected_models, selected_categories): | |
if not selected_models: | |
fig = px.bar(title='Please select at least one model for comparison') | |
fig.update_layout(showlegend=True) # Ensure legend remains visible | |
return fig | |
# Sort categories before processing | |
selected_categories = sort_categories(selected_categories) | |
data = [] | |
for model in selected_models: | |
for category in selected_categories: | |
if category in models[model]['scores']: | |
total_score = 0 | |
total_questions = 0 | |
for section in models[model]['scores'][category].values(): | |
if section['status'] != 'N/A': | |
questions = section.get('questions', {}) | |
total_score += sum(1 for q in questions.values() if q) | |
total_questions += len(questions) | |
score_percentage = (total_score / total_questions * 100) if total_questions > 0 else 0 | |
data.append({ | |
'AI System': model, | |
'Category': category, | |
'Completion Rate': score_percentage | |
}) | |
df = pd.DataFrame(data) | |
if df.empty: | |
fig = px.bar(title='No data available for the selected models and categories') | |
else: | |
fig = px.bar(df, x='AI System', y='Completion Rate', color='Category', | |
title='AI System Evaluation Completion Rates by Category', | |
labels={'Completion Rate': 'Completion Rate (%)'}, | |
category_orders={"Category": selected_categories}) | |
fig.update_layout(showlegend=True) # Ensure legend remains visible | |
return fig | |
def update_detailed_scorecard(model, selected_categories): | |
if not model: | |
return [ | |
gr.update(value="Please select a model to view details.", visible=True), | |
gr.update(visible=False), | |
gr.update(visible=False) | |
] | |
selected_categories = sort_categories(selected_categories) | |
metadata_html = create_metadata_card(models[model]['metadata']) | |
overall_summary_html = create_overall_summary(models[model], selected_categories) | |
# Combine metadata and overall summary | |
combined_header = metadata_html + overall_summary_html | |
total_yes = 0 | |
total_no = 0 | |
total_na = 0 | |
has_non_na = False | |
# Create category cards | |
all_cards_content = "<div class='container'>" | |
for category_name in selected_categories: | |
if category_name in models[model]['scores']: | |
category_data = models[model]['scores'][category_name] | |
card_content = f"<div class='card'><div class='card-title'>{category_name}</div>" | |
# Add category-specific summary at the top of each card | |
card_content += create_category_summary(category_data) | |
# Sort sections within each category | |
sorted_sections = sorted(category_data.items(), | |
key=lambda x: float(re.match(r'^(\d+\.?\d*)', x[0]).group(1))) | |
category_yes = 0 | |
category_no = 0 | |
category_na = 0 | |
for section, details in sorted_sections: | |
status = details['status'] | |
if status != 'N/A': | |
has_non_na = True | |
sources = details.get('sources', []) | |
questions = details.get('questions', {}) | |
section_class = "section-na" if status == "N/A" else "section-active" | |
status_class = status.lower() | |
status_icon = "β" if status == "Yes" else "β" if status == "N/A" else "Γ" | |
card_content += f"<div class='section {section_class}'>" | |
card_content += f"<div class='section-header'><h3>{section}</h3>" | |
card_content += f"<span class='status-badge {status_class}'>{status_icon} {status}</span></div>" | |
if sources: | |
card_content += "<div class='sources-list'>" | |
for source in sources: | |
icon = source.get("type", "") | |
detail = source.get("detail", "") | |
name = source.get("name", detail) | |
card_content += f"<div class='source-item'>{icon} " | |
if detail.startswith("http"): | |
card_content += f"<a href='{detail}' target='_blank'>{name}</a>" | |
else: | |
card_content += name | |
card_content += "</div>" | |
card_content += "</div>" | |
if questions: | |
yes_count = sum(1 for v in questions.values() if v) | |
total_count = len(questions) | |
card_content += "<details class='question-accordion'>" | |
if status == "N/A": | |
card_content += f"<summary>View {total_count} N/A items</summary>" | |
else: | |
card_content += f"<summary>View details ({yes_count}/{total_count} completed)</summary>" | |
card_content += "<div class='questions'>" | |
for question, is_checked in questions.items(): | |
if status == "N/A": | |
style_class = "na" | |
icon = "β" | |
category_na += 1 | |
total_na += 1 | |
else: | |
if is_checked: | |
style_class = "checked" | |
icon = "β" | |
category_yes += 1 | |
total_yes += 1 | |
else: | |
style_class = "unchecked" | |
icon = "β" | |
category_no += 1 | |
total_no += 1 | |
card_content += f"<div class='question-item {style_class}'>{icon} {question}</div>" | |
card_content += "</div></details>" | |
card_content += "</div>" | |
if category_yes + category_no > 0: | |
category_score = category_yes / (category_yes + category_no) * 100 | |
card_content += f"<div class='category-score'>Completion Score Breakdown: {category_score:.2f}% Yes: {category_yes}, No: {category_no}, N/A: {category_na}</div>" | |
elif category_na > 0: | |
card_content += f"<div class='category-score'>Completion Score Breakdown: N/A (All {category_na} items not applicable)</div>" | |
card_content += "</div>" | |
all_cards_content += card_content | |
all_cards_content += "</div>" | |
# Create total score | |
if not has_non_na: | |
total_score_md = "<div class='total-score'>No applicable scores (all items N/A)</div>" | |
elif total_yes + total_no > 0: | |
total_score = total_yes / (total_yes + total_no) * 100 | |
total_score_md = f"<div class='total-score'>Total Score: {total_score:.2f}% (Yes: {total_yes}, No: {total_no}, N/A: {total_na})</div>" | |
else: | |
total_score_md = "<div class='total-score'>No applicable scores (all items N/A)</div>" | |
return [ | |
gr.update(value=combined_header, visible=True), | |
gr.update(value=all_cards_content, visible=True), | |
gr.update(value=total_score_md, visible=True) | |
] | |
first_model = next(iter(models.values())) | |
category_choices = list(first_model['scores'].keys()) | |
with gr.Blocks(css=css) as demo: | |
gr.Markdown("# AI System Social Impact Dashboard") | |
with gr.Row(): | |
tab_selection = gr.Radio(["Leaderboard", "Category Analysis", "Detailed Scorecard"], | |
label="Select Tab", value="Leaderboard") | |
with gr.Row(): | |
model_chooser = gr.Dropdown(choices=[""] + list(models.keys()), | |
label="Select AI System for Details", | |
value="", | |
interactive=True, visible=False) | |
model_multi_chooser = gr.Dropdown(choices=list(models.keys()), | |
label="Select AI Systems for Comparison", | |
value=[], | |
multiselect=True, | |
interactive=True, | |
visible=False, | |
info="Select one or more AI Systems") | |
# Category filter now visible for all tabs | |
category_filter = gr.CheckboxGroup(choices=category_choices, | |
label="Filter Categories", | |
value=category_choices) | |
with gr.Column(visible=True) as leaderboard_tab: | |
leaderboard_output = gr.DataFrame( | |
value=create_leaderboard(category_choices), | |
interactive=False, | |
wrap=True, | |
datatype=["markdown", "markdown", "markdown"] + ["markdown"] * len(category_choices) # Set markdown type for all columns | |
) | |
with gr.Column(visible=False) as category_analysis_tab: | |
# Initialize with empty plot | |
initial_plot = create_category_chart([], category_choices) | |
category_chart = gr.Plot(value=initial_plot) | |
with gr.Column(visible=False) as detailed_scorecard_tab: | |
model_metadata = gr.HTML() | |
all_category_cards = gr.HTML() | |
total_score = gr.Markdown() | |
def update_dashboard(tab, selected_models, selected_model, selected_categories): | |
# Default visibility states | |
component_states = { | |
"leaderboard": False, | |
"category_chart": False, | |
"detailed_scorecard": False, | |
"model_chooser": False, | |
"model_multi_chooser": False | |
} | |
# Initialize outputs with None | |
outputs = { | |
"leaderboard": None, | |
"category_chart": None, | |
"model_metadata": None, | |
"category_cards": None, | |
"total_score": None | |
} | |
# Update visibility based on selected tab | |
if tab == "Leaderboard": | |
component_states["leaderboard"] = True | |
outputs["leaderboard"] = create_leaderboard(selected_categories) | |
elif tab == "Category Analysis": | |
component_states["category_chart"] = True | |
component_states["model_multi_chooser"] = True | |
if selected_models: # Only update chart if models are selected | |
outputs["category_chart"] = create_category_chart(selected_models, selected_categories) | |
elif tab == "Detailed Scorecard": | |
component_states["detailed_scorecard"] = True | |
component_states["model_chooser"] = True | |
if selected_model: | |
scorecard_updates = update_detailed_scorecard(selected_model, selected_categories) | |
outputs["model_metadata"] = scorecard_updates[0] | |
outputs["category_cards"] = scorecard_updates[1] | |
outputs["total_score"] = scorecard_updates[2] | |
# Return updates in the correct order | |
return [ | |
gr.update(visible=component_states["leaderboard"]), | |
gr.update(visible=component_states["category_chart"]), | |
gr.update(visible=component_states["detailed_scorecard"]), | |
gr.update(visible=component_states["model_chooser"]), | |
gr.update(visible=component_states["model_multi_chooser"]), | |
outputs["leaderboard"] if outputs["leaderboard"] is not None else gr.update(), | |
outputs["category_chart"] if outputs["category_chart"] is not None else gr.update(), | |
outputs["model_metadata"] if outputs["model_metadata"] is not None else gr.update(), | |
outputs["category_cards"] if outputs["category_cards"] is not None else gr.update(), | |
outputs["total_score"] if outputs["total_score"] is not None else gr.update() | |
] | |
# Set up event handlers | |
for component in [tab_selection, model_chooser, model_multi_chooser, category_filter]: | |
component.change( | |
fn=update_dashboard, | |
inputs=[tab_selection, model_multi_chooser, model_chooser, category_filter], | |
outputs=[leaderboard_tab, category_analysis_tab, detailed_scorecard_tab, | |
model_chooser, model_multi_chooser, | |
leaderboard_output, category_chart, model_metadata, | |
all_category_cards, total_score] | |
) | |
# Launch the app | |
if __name__ == "__main__": | |
demo.launch() |