Spaces:
Running
Running
references
Browse files- app.py +4 -1
- constants.py +1 -1
app.py
CHANGED
@@ -54,7 +54,10 @@ def make_clickable_model(model_name, model_info):
|
|
54 |
link = model_info[model_name]['hf_name']
|
55 |
else:
|
56 |
link = f"https://huggingface.co/{model_info[model_name]['hf_name']}"
|
57 |
-
|
|
|
|
|
|
|
58 |
|
59 |
|
60 |
def build_demo(original_df, TYPES):
|
|
|
54 |
link = model_info[model_name]['hf_name']
|
55 |
else:
|
56 |
link = f"https://huggingface.co/{model_info[model_name]['hf_name']}"
|
57 |
+
if model_name.startswith("gpt"):
|
58 |
+
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted; background-color: lightgray;">{model_info[model_name]["pretty_name"]}</a>'
|
59 |
+
else:
|
60 |
+
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_info[model_name]["pretty_name"]}</a>'
|
61 |
|
62 |
|
63 |
def build_demo(original_df, TYPES):
|
constants.py
CHANGED
@@ -16,7 +16,7 @@ TITLE = "<html> <head> <style> h1 {text-align: center;} </style> </head> <body>
|
|
16 |
|
17 |
INTRODUCTION_TEXT= """
|
18 |
# URIAL Bench (Evaluating Base LLMs with URIAL on MT-Bench)
|
19 |
-
[π Website](https://allenai.github.io/re-align/index.html) | [π» GitHub](https://github.com/Re-Align/URIAL) | [π Paper](https://arxiv.org/abs/2312.01552) | [π¦
|
20 |
|
21 |
> URIAL Bench tests the capacity of base LLMs for alignment without introducing the factors of fine-tuning (learning rate, data, etc.), which are hard to control for fair comparisons.
|
22 |
Specifically, we use [URIAL](https://github.com/Re-Align/URIAL/tree/main/run_scripts/mt-bench#run-urial-inference) to align a base LLM, and evaluate its performance on MT-Bench.
|
|
|
16 |
|
17 |
INTRODUCTION_TEXT= """
|
18 |
# URIAL Bench (Evaluating Base LLMs with URIAL on MT-Bench)
|
19 |
+
[π Website](https://allenai.github.io/re-align/index.html) | [π» GitHub](https://github.com/Re-Align/URIAL) | [π Paper](https://arxiv.org/abs/2312.01552) | [π¦ Tweet 1](https://x.com/billyuchenlin/status/1759541978881311125?s=20) | [π¦ Tweet 2](https://x.com/billyuchenlin/status/1762206077566013505?s=20)
|
20 |
|
21 |
> URIAL Bench tests the capacity of base LLMs for alignment without introducing the factors of fine-tuning (learning rate, data, etc.), which are hard to control for fair comparisons.
|
22 |
Specifically, we use [URIAL](https://github.com/Re-Align/URIAL/tree/main/run_scripts/mt-bench#run-urial-inference) to align a base LLM, and evaluate its performance on MT-Bench.
|