""" Default leaderboards with which we initialize the space. """ leaderboard_to_tags = { "HuggingFaceH4/open_llm_leaderboard": ["submission:automatic", "judge:auto", "test:public", "modality:text", "eval:math"], "bigcode/bigcode-models-leaderboard": ["submission:semiautomatic", "judge:auto", "test:public", "eval:code"], "optimum/llm-perf-leaderboard": ["submission:manual", "judge:auto", "eval:performance"], "lmsys/chatbot-arena-leaderboard": ["judge:humans", "modality:text", "eval:generation"], "llmonitor/benchmarks": ["submission:manual", "judge:humans", "modality:text", "judge:vibe_check"], "mteb/leaderboard": ["submission:semiautomatic", "modality:text", "Embeddings", "modality:artefacts"], "gaia-benchmark/leaderboard": ["submission:automatic", "test:private", "judge:auto", "modality:text", "modality:tools", "modality:text", "modality:image", "modality:video"], "opencompass/opencompass-llm-leaderboard": ["submission:manual", "modality:text", "language:chinese"], "upstage/open-ko-llm-leaderboard": ["submission:automatic", "judge:auto", "test:mix", "modality:text", "language:korean"], "BramVanroy/open_dutch_llm_leaderboard": ["submission:manual", "judge:auto", "modality:text", "language:dutch"], "vectara/leaderboard": ["submission:semiautomatic", "judge:model", "modality:text", "Hallucinations"], "facebook/CyberSecEval": ["submission:closed", "eval:code", "eval:safety"], "mlabonne/Yet_Another_LLM_Leaderboard": ["submission:manual", "modality:text", "judge:auto"], "AI-Secure/llm-trustworthy-leaderboard": ["submission:automatic", "eval:safety", "modality:text"], "AILab-CVC/EvalCrafter": ["submission:closed", "modality:video", "eval:generation"], "mike-ravkine/can-ai-code-results": ["submission:closed", "eval:code"], "echo840/ocrbench-leaderboard": ["submission:closed", "modality:image", "OCR"], "NPHardEval/NPHardEval-leaderboard": ["submission:closed", "modality:text", "eval:math", "test:rolling"], "HaizeLabs/red-teaming-resistance-benchmark": ["submission:manual", "eval:safety", "modality:text"], "devingulliver/subquadratic-llm-leaderboard": ["submission:semiautomatic", "modality:text", "eval:math"], "WildVision/vision-arena": ["modality:image", "modality:text", "judge:humans"], "Vchitect/VBench_Leaderboard": ["submission:semiautomatic", "modality:video", "eval:generation"], "eduagarcia/open_pt_llm_leaderboard": ["modality:text", "language:portuguese"], "FinancialSupport/open_ita_llm_leaderboard": ["modality:text", "language:italian"], "mesolitica/malay-llm-leaderboard": ["modality:text", "language:malay"], "TIGER-Lab/GenAI-Arena": ["modality:image", "eval:generation", "judge:humans", ], "q-future/Q-Bench-Leaderboard": ["modality:image", "judge:auto", "submission:closed"], "OpenGenAI/parti-prompts-leaderboard": ["modality:image", "eval:generation", "judge:humans"], "speakleash/open_pl_llm_leaderboard": ["modality:text", "language:polish"], "malhajar/OpenLLMTurkishLeaderboard": ["modality:text", "language:turkish"], "allenai/WildBench": ["judge:humans", "judge:model", "modality:text", "eval:generation"], "hf-audio/open_asr_leaderboard": ["judge:auto", "modality:audio"], "opencompass/open_vlm_leaderboard": ["judge:auto", "eval:generation", "modality:image"], "livecodebench/benchmarks": ["judge:auto", "eval:code"], "allenai/reward-bench": ["judge:auto", "modality:artefacts", "Models", "modality:text"], "TTS-AGI/TTS-Arena": ["judge:humans", "modality:audio"] }