Spaces:
Runtime error
Runtime error
Add query based selection
Browse files- app.py +53 -50
- requirements.txt +1 -0
app.py
CHANGED
@@ -20,7 +20,7 @@ DATASETS_PREVIEW_API = os.getenv("DATASETS_PREVIEW_API")
|
|
20 |
TASK_TO_ID = {
|
21 |
"binary_classification": 1,
|
22 |
"multi_class_classification": 2,
|
23 |
-
"multi_label_classification": 3,
|
24 |
"entity_extraction": 4,
|
25 |
"extractive_question_answering": 5,
|
26 |
"translation": 6,
|
@@ -31,7 +31,7 @@ TASK_TO_ID = {
|
|
31 |
AUTOTRAIN_TASK_TO_HUB_TASK = {
|
32 |
"binary_classification": "text-classification",
|
33 |
"multi_class_classification": "text-classification",
|
34 |
-
"multi_label_classification": "text-classification",
|
35 |
"entity_extraction": "token-classification",
|
36 |
"extractive_question_answering": "question-answering",
|
37 |
"translation": "translation",
|
@@ -39,9 +39,6 @@ AUTOTRAIN_TASK_TO_HUB_TASK = {
|
|
39 |
"single_column_regression": 10,
|
40 |
}
|
41 |
|
42 |
-
# TODO: remove this hardcorded logic and accept any dataset on the Hub
|
43 |
-
# DATASETS_TO_EVALUATE = ["emotion", "conll2003", "imdb", "squad", "xsum", "ncbi_disease", "go_emotions"]
|
44 |
-
|
45 |
###########
|
46 |
### APP ###
|
47 |
###########
|
@@ -52,13 +49,19 @@ st.markdown(
|
|
52 |
you to evaluate any π€ Transformers model with a dataset on the Hub. Please
|
53 |
select the dataset and configuration below. The results of your evaluation
|
54 |
will be displayed on the public leaderboard
|
55 |
-
[here](https://huggingface.co/spaces/
|
56 |
"""
|
57 |
)
|
58 |
|
59 |
all_datasets = [d.id for d in list_datasets()]
|
60 |
-
|
61 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
# TODO: remove this step once we select real datasets
|
64 |
# Strip out original dataset name
|
@@ -165,45 +168,45 @@ with st.form(key="form"):
|
|
165 |
# else:
|
166 |
# st.error("π Oh noes, there was an error submitting your submission!")
|
167 |
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
|
|
20 |
TASK_TO_ID = {
|
21 |
"binary_classification": 1,
|
22 |
"multi_class_classification": 2,
|
23 |
+
# "multi_label_classification": 3, # Not fully supported in AutoTrain
|
24 |
"entity_extraction": 4,
|
25 |
"extractive_question_answering": 5,
|
26 |
"translation": 6,
|
|
|
31 |
AUTOTRAIN_TASK_TO_HUB_TASK = {
|
32 |
"binary_classification": "text-classification",
|
33 |
"multi_class_classification": "text-classification",
|
34 |
+
# "multi_label_classification": "text-classification", # Not fully supported in AutoTrain
|
35 |
"entity_extraction": "token-classification",
|
36 |
"extractive_question_answering": "question-answering",
|
37 |
"translation": "translation",
|
|
|
39 |
"single_column_regression": 10,
|
40 |
}
|
41 |
|
|
|
|
|
|
|
42 |
###########
|
43 |
### APP ###
|
44 |
###########
|
|
|
49 |
you to evaluate any π€ Transformers model with a dataset on the Hub. Please
|
50 |
select the dataset and configuration below. The results of your evaluation
|
51 |
will be displayed on the public leaderboard
|
52 |
+
[here](https://huggingface.co/spaces/autoevaluate/leaderboards).
|
53 |
"""
|
54 |
)
|
55 |
|
56 |
all_datasets = [d.id for d in list_datasets()]
|
57 |
+
query_params = st.experimental_get_query_params()
|
58 |
+
default_dataset = all_datasets[0]
|
59 |
+
if "dataset" in query_params:
|
60 |
+
if len(query_params["dataset"]) > 0 and query_params["dataset"][0] in all_datasets:
|
61 |
+
default_dataset = query_params["dataset"][0]
|
62 |
+
|
63 |
+
selected_dataset = st.selectbox("Select a dataset", all_datasets, index=all_datasets.index(default_dataset))
|
64 |
+
st.experimental_set_query_params(**{"dataset": [selected_dataset]})
|
65 |
|
66 |
# TODO: remove this step once we select real datasets
|
67 |
# Strip out original dataset name
|
|
|
168 |
# else:
|
169 |
# st.error("π Oh noes, there was an error submitting your submission!")
|
170 |
|
171 |
+
# st.write("Creating project!")
|
172 |
+
# payload = {
|
173 |
+
# "username": AUTOTRAIN_USERNAME,
|
174 |
+
# "proj_name": "my-eval-project-1",
|
175 |
+
# "task": TASK_TO_ID[metadata[0]["task_id"]],
|
176 |
+
# "config": {
|
177 |
+
# "language": "en",
|
178 |
+
# "max_models": 5,
|
179 |
+
# "instance": {
|
180 |
+
# "provider": "aws",
|
181 |
+
# "instance_type": "ml.g4dn.4xlarge",
|
182 |
+
# "max_runtime_seconds": 172800,
|
183 |
+
# "num_instances": 1,
|
184 |
+
# "disk_size_gb": 150,
|
185 |
+
# },
|
186 |
+
# },
|
187 |
+
# }
|
188 |
+
# json_resp = http_post(
|
189 |
+
# path="/projects/create", payload=payload, token=HF_TOKEN, domain=AUTOTRAIN_BACKEND_API
|
190 |
+
# ).json()
|
191 |
+
# # print(json_resp)
|
192 |
+
|
193 |
+
# # st.write("Uploading data")
|
194 |
+
# payload = {
|
195 |
+
# "split": 4,
|
196 |
+
# "col_mapping": metadata[0]["col_mapping"],
|
197 |
+
# "load_config": {"max_size_bytes": 0, "shuffle": False},
|
198 |
+
# }
|
199 |
+
# json_resp = http_post(
|
200 |
+
# path="/projects/522/data/emotion",
|
201 |
+
# payload=payload,
|
202 |
+
# token=HF_TOKEN,
|
203 |
+
# domain=AUTOTRAIN_BACKEND_API,
|
204 |
+
# params={"type": "dataset", "config_name": "default", "split_name": "train"},
|
205 |
+
# ).json()
|
206 |
+
# print(json_resp)
|
207 |
+
|
208 |
+
# st.write("Training")
|
209 |
+
# json_resp = http_get(
|
210 |
+
# path="/projects/522/data/start_process", token=HF_TOKEN, domain=AUTOTRAIN_BACKEND_API
|
211 |
+
# ).json()
|
212 |
+
# print(json_resp)
|
requirements.txt
CHANGED
@@ -1,2 +1,3 @@
|
|
1 |
huggingface-hub==0.4.0
|
2 |
python-dotenv
|
|
|
|
1 |
huggingface-hub==0.4.0
|
2 |
python-dotenv
|
3 |
+
streamlit==1.2.0
|