Spaces:
Running
Running
update
Browse files- app.py +62 -34
- uploads.py +1 -1
app.py
CHANGED
@@ -58,6 +58,29 @@ def baseline_load_data(model,version,metrics):
|
|
58 |
|
59 |
return df
|
60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
def load_data(model, version, metrics):
|
62 |
baseline_df = baseline_load_data(model, version, metrics)
|
63 |
# now for every file in "versions/{model}-{version}/*.csv"
|
@@ -109,57 +132,62 @@ with demo:
|
|
109 |
with gr.Tabs():
|
110 |
with gr.TabItem("Leaderboard"):
|
111 |
with gr.Row():
|
112 |
-
|
113 |
-
choices=["
|
114 |
-
label="π Select
|
115 |
-
value="
|
116 |
)
|
117 |
-
|
118 |
-
choices=[
|
119 |
-
label="π Select
|
120 |
-
value="
|
121 |
)
|
122 |
-
|
123 |
-
|
124 |
-
label="Select
|
125 |
-
|
126 |
-
value = ["ROUGE", "Truth Ratio", "Prob."],
|
127 |
-
)
|
128 |
-
|
129 |
-
with gr.Row():
|
130 |
-
search_bar = gr.Textbox(
|
131 |
-
placeholder="Search for methods...",
|
132 |
-
show_label=False,
|
133 |
)
|
134 |
|
135 |
leaderboard_table = gr.components.Dataframe(
|
136 |
-
value=load_data("
|
137 |
interactive=True,
|
138 |
visible=True,
|
139 |
)
|
140 |
-
|
141 |
-
|
142 |
change_version,
|
143 |
-
inputs=[model_dropdown
|
144 |
outputs=leaderboard_table
|
145 |
)
|
146 |
-
|
147 |
-
|
148 |
change_version,
|
149 |
-
inputs=[model_dropdown
|
150 |
outputs=leaderboard_table
|
151 |
)
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
inputs=[
|
156 |
outputs=leaderboard_table
|
157 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
158 |
|
159 |
-
|
160 |
-
|
161 |
-
inputs=[
|
162 |
-
outputs=
|
163 |
)
|
164 |
|
165 |
with gr.Accordion("Submit a new model for evaluation"):
|
|
|
58 |
|
59 |
return df
|
60 |
|
61 |
+
def update_dropdowns(setting, dataset, model):
|
62 |
+
updates = {
|
63 |
+
"setting": gr.update(interactive=True),
|
64 |
+
"dataset": gr.update(interactive=True),
|
65 |
+
"model": gr.update(interactive=True),
|
66 |
+
}
|
67 |
+
|
68 |
+
if setting == "memorization":
|
69 |
+
updates["dataset"] = gr.update(value="news", interactive=False)
|
70 |
+
updates["model"] = gr.update(value="llama2-7B-chat_newsqa", interactive=False)
|
71 |
+
elif dataset == "books":
|
72 |
+
updates["setting"] = gr.update(value="RAG", interactive=False)
|
73 |
+
if model == "llama2-7B-chat_newsqa":
|
74 |
+
updates["model"] = gr.update(value="llama2-7B-chat-hf", interactive=True)
|
75 |
+
elif model == "llama2-7B-chat_newsqa":
|
76 |
+
updates["setting"] = gr.update(value="memorization", interactive=False)
|
77 |
+
updates["dataset"] = gr.update(value="news", interactive=False)
|
78 |
+
elif model != "llama2-7B-chat_newsqa":
|
79 |
+
updates["setting"] = gr.update(value="RAG", interactive=False)
|
80 |
+
|
81 |
+
return updates["setting"], updates["dataset"], updates["model"]
|
82 |
+
|
83 |
+
|
84 |
def load_data(model, version, metrics):
|
85 |
baseline_df = baseline_load_data(model, version, metrics)
|
86 |
# now for every file in "versions/{model}-{version}/*.csv"
|
|
|
132 |
with gr.Tabs():
|
133 |
with gr.TabItem("Leaderboard"):
|
134 |
with gr.Row():
|
135 |
+
setting_dropdown = gr.Dropdown(
|
136 |
+
choices = ["RAG", "memorization"],
|
137 |
+
label="π Select Setting",
|
138 |
+
value="RAG",
|
139 |
)
|
140 |
+
dataset_dropdown = gr.Dropdown(
|
141 |
+
choices = ['news', 'books'],
|
142 |
+
label="π Select Dataset",
|
143 |
+
value="news",
|
144 |
)
|
145 |
+
model_dropdown = gr.Dropdown(
|
146 |
+
choices=["llama2-7B-chat-hf", "llama2-70B-chat-hf", "dbrx-instruct", "llama2-7B-chat_newsqa"],
|
147 |
+
label="π Select Model",
|
148 |
+
value="llama2-7B-chat-hf",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
)
|
150 |
|
151 |
leaderboard_table = gr.components.Dataframe(
|
152 |
+
value=load_data("RAG", "news", "llama2-7B-chat-hf"),
|
153 |
interactive=True,
|
154 |
visible=True,
|
155 |
)
|
156 |
+
|
157 |
+
setting_dropdown.change(
|
158 |
change_version,
|
159 |
+
inputs=[dataset_dropdown,model_dropdown],
|
160 |
outputs=leaderboard_table
|
161 |
)
|
162 |
+
|
163 |
+
dataset_dropdown.change(
|
164 |
change_version,
|
165 |
+
inputs=[setting_dropdown, model_dropdown],
|
166 |
outputs=leaderboard_table
|
167 |
)
|
168 |
+
|
169 |
+
model_dropdown.change(
|
170 |
+
change_version,
|
171 |
+
inputs=[setting_dropdown, dataset_dropdown],
|
172 |
outputs=leaderboard_table
|
173 |
)
|
174 |
+
|
175 |
+
setting_dropdown.change(
|
176 |
+
update_dropdowns,
|
177 |
+
inputs=[setting_dropdown, dataset_dropdown, model_dropdown],
|
178 |
+
outputs=[setting_dropdown, dataset_dropdown, model_dropdown]
|
179 |
+
)
|
180 |
+
|
181 |
+
dataset_dropdown.change(
|
182 |
+
update_dropdowns,
|
183 |
+
inputs=[setting_dropdown, dataset_dropdown, model_dropdown],
|
184 |
+
outputs=[setting_dropdown, dataset_dropdown, model_dropdown]
|
185 |
+
)
|
186 |
|
187 |
+
model_dropdown.change(
|
188 |
+
update_dropdowns,
|
189 |
+
inputs=[setting_dropdown, dataset_dropdown, model_dropdown],
|
190 |
+
outputs=[setting_dropdown, dataset_dropdown, model_dropdown]
|
191 |
)
|
192 |
|
193 |
with gr.Accordion("Submit a new model for evaluation"):
|
uploads.py
CHANGED
@@ -5,7 +5,7 @@ import datetime
|
|
5 |
import pandas as pd
|
6 |
|
7 |
|
8 |
-
RESULTS_PATH = "
|
9 |
api = HfApi()
|
10 |
TOKEN = os.environ.get("TOKEN", None)
|
11 |
YEAR_VERSION = "2024"
|
|
|
5 |
import pandas as pd
|
6 |
|
7 |
|
8 |
+
RESULTS_PATH = "boyiwei/CoTaEval_leaderboard"
|
9 |
api = HfApi()
|
10 |
TOKEN = os.environ.get("TOKEN", None)
|
11 |
YEAR_VERSION = "2024"
|