|
from dataclasses import dataclass |
|
from enum import Enum |
|
|
|
@dataclass |
|
class Task: |
|
benchmark: str |
|
metric: str |
|
col_name: str |
|
|
|
|
|
|
|
|
|
class Tasks(Enum): |
|
|
|
task0 = Task("tmlu", "acc,none", "TMLU") |
|
task1 = Task("twllm_eval", "acc,none", "TW Truthful QA") |
|
task2 = Task("tw_legal", "acc,none", "TW Legal Eval") |
|
task3 = Task("mmlu", "acc,none", "MMLU") |
|
|
|
NUM_FEWSHOT = 0 |
|
|
|
|
|
|
|
|
|
|
|
TITLE = """<h1 align="center" id="space-title">Open Taiwan LLM leaderboard</h1>""" |
|
|
|
|
|
INTRODUCTION_TEXT = """ |
|
This leaderboard showcases the performance of large language models (LLMs) on various Taiwanese Mandarin language understanding tasks. The models are evaluated on their accuracy across different benchmarks, providing insights into their strengths and weaknesses in comprehending and generating Taiwanese Mandarin text. |
|
這個排行榜展示了大型語言模型 (LLMs) 在各種臺灣繁體中文語言理解任務上的表現。 |
|
|
|
排行榜在以下考題上評估 LLMs: |
|
|
|
1. [TMLU(臺灣中文大規模多任務語言理解)](https://huggingface.co/datasets/miulab/tmlu):衡量模型理解各個領域(國中、高中、大學、國考)的能力。 |
|
2. TW Truthful QA:評估模型以臺灣特定的背景來回答問題,測試模型的在地化能力。 |
|
3. [TW Legal Eval](https://huggingface.co/datasets/lianghsun/tw-legal-benchmark-v1):使用臺灣律師資格考試的問題,評估模型對臺灣法律術語和概念的理解。 |
|
4. [MMLU(英文大規模多任務語言理解)](https://huggingface.co/datasets/cais/mmlu):測試模型在英語中各種任務上的表現。 |
|
""" |
|
|
|
|
|
LLM_BENCHMARKS_TEXT = f""" |
|
The leaderboard evaluates LLMs on the following benchmarks: |
|
|
|
1. TMLU (Taiwanese Mandarin Language Understanding): Measures the model's ability to understand Taiwanese Mandarin text across various domains. |
|
2. TW Truthful QA: Assesses the model's capability to provide truthful and localized answers to questions in Taiwanese Mandarin, with a focus on Taiwan-specific context. |
|
3. TW Legal Eval: Evaluates the model's understanding of legal terminology and concepts in Taiwanese Mandarin, using questions from the Taiwanese bar exam for lawyers. |
|
4. MMLU (Massive Multitask Language Understanding): Tests the model's performance on a wide range of tasks in English. |
|
|
|
To reproduce our results, please follow the instructions in the provided GitHub repository: https://github.com/adamlin120/lm-evaluation-harness/blob/main/run_all.sh |
|
|
|
該排行榜在以下考題上評估 LLMs: |
|
|
|
1. 📚 [TMLU(臺灣中文大規模多任務語言理解)](https://huggingface.co/datasets/miulab/tmlu):衡量模型理解各個領域(國中、高中、大學、國考)的能力。 |
|
2. 🇹🇼 TW Truthful QA:評估模型以臺灣特定的背景來回答問題,測試模型的在地化能力。 |
|
3. ⚖️ [TW Legal Eval](https://huggingface.co/datasets/lianghsun/tw-legal-benchmark-v1):使用臺灣律師資格考試的問題,評估模型對臺灣法律術語和概念的理解。 |
|
4. 🌐📚 [MMLU(英文大規模多任務語言理解)](https://huggingface.co/datasets/cais/mmlu):測試模型在英語中各種任務上的表現。 |
|
|
|
要重現我們的結果,請按照:https://github.com/adamlin120/lm-evaluation-harness/blob/main/run_all.sh |
|
""" |
|
|
|
EVALUATION_QUEUE_TEXT = """ |
|
## Some good practices before submitting a model |
|
|
|
### 1) Make sure you can load your model and tokenizer using AutoClasses: |
|
```python |
|
from transformers import AutoConfig, AutoModel, AutoTokenizer |
|
config = AutoConfig.from_pretrained("your model name", revision=revision) |
|
model = AutoModel.from_pretrained("your model name", revision=revision) |
|
tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision) |
|
``` |
|
If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded. |
|
|
|
Note: make sure your model is public! |
|
Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted! |
|
|
|
### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index) |
|
It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`! |
|
|
|
### 3) Make sure your model has an open license! |
|
This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗 |
|
|
|
### 4) Fill up your model card |
|
When we add extra information about models to the leaderboard, it will be automatically taken from the model card |
|
|
|
## In case of model failure |
|
If your model is displayed in the `FAILED` category, its execution stopped. |
|
Make sure you have followed the above steps first. |
|
If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task). |
|
""" |
|
|
|
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results" |
|
CITATION_BUTTON_TEXT = r""" |
|
@misc{open-tw-llm-leaderboard, |
|
author = {Yen{-}Ting Lin, Yun{-}Nung Chen}, |
|
title = {Open TW-LLM Leaderboard}, |
|
year = {2024}, |
|
publisher = {National Taiwan University}, |
|
howpublished = "\url{https://huggingface.co/spaces/yentinglin/open-tw-llm-leaderboard}" |
|
} |
|
@article{DBLP:journals/corr/abs-2403-20180, |
|
author = {Po{-}Heng Chen and |
|
Sijia Cheng and |
|
Wei{-}Lin Chen and |
|
Yen{-}Ting Lin and |
|
Yun{-}Nung Chen}, |
|
title = {Measuring Taiwanese Mandarin Language Understanding}, |
|
journal = {CoRR}, |
|
volume = {abs/2403.20180}, |
|
year = {2024}, |
|
url = {https://doi.org/10.48550/arXiv.2403.20180}, |
|
doi = {10.48550/ARXIV.2403.20180}, |
|
eprinttype = {arXiv}, |
|
eprint = {2403.20180}, |
|
timestamp = {Wed, 10 Apr 2024 17:37:45 +0200}, |
|
biburl = {https://dblp.org/rec/journals/corr/abs-2403-20180.bib}, |
|
bibsource = {dblp computer science bibliography, https://dblp.org} |
|
} |
|
@misc{hendrycks2021measuring, |
|
title={Measuring Massive Multitask Language Understanding}, |
|
author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt}, |
|
year={2021}, |
|
eprint={2009.03300}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CY} |
|
} |
|
@software{eval-harness, |
|
author = {Gao, Leo and |
|
Tow, Jonathan and |
|
Biderman, Stella and |
|
Black, Sid and |
|
DiPofi, Anthony and |
|
Foster, Charles and |
|
Golding, Laurence and |
|
Hsu, Jeffrey and |
|
McDonell, Kyle and |
|
Muennighoff, Niklas and |
|
Phang, Jason and |
|
Reynolds, Laria and |
|
Tang, Eric and |
|
Thite, Anish and |
|
Wang, Ben and |
|
Wang, Kevin and |
|
Zou, Andy}, |
|
title = {A framework for few-shot language model evaluation}, |
|
month = sep, |
|
year = 2021, |
|
publisher = {Zenodo}, |
|
version = {v0.0.1}, |
|
doi = {10.5281/zenodo.5371628}, |
|
url = {https://doi.org/10.5281/zenodo.5371628} |
|
} |
|
""" |
|
|