Spaces:
Running
Running
ZeroCommand
commited on
Commit
•
5927800
1
Parent(s):
b4afb86
add leaderboard arg and inf token
Browse files- app_text_classification.py +2 -1
- io_utils.py +10 -3
- text_classification_ui_helpers.py +6 -0
app_text_classification.py
CHANGED
@@ -70,6 +70,7 @@ def get_demo(demo):
|
|
70 |
run_local = gr.Checkbox(value=True, label="Run in this Space")
|
71 |
use_inference = read_inference_type(uid) == "hf_inference_api"
|
72 |
run_inference = gr.Checkbox(value=use_inference, label="Run with Inference API")
|
|
|
73 |
|
74 |
with gr.Accordion(label="Scanner Advance Config (optional)", open=False):
|
75 |
selected = read_scanners(uid)
|
@@ -105,7 +106,7 @@ def get_demo(demo):
|
|
105 |
|
106 |
scanners.change(write_scanners, inputs=[scanners, uid_label])
|
107 |
|
108 |
-
run_inference.change(write_inference_type, inputs=[run_inference, uid_label])
|
109 |
|
110 |
gr.on(
|
111 |
triggers=[label.change for label in column_mappings],
|
|
|
70 |
run_local = gr.Checkbox(value=True, label="Run in this Space")
|
71 |
use_inference = read_inference_type(uid) == "hf_inference_api"
|
72 |
run_inference = gr.Checkbox(value=use_inference, label="Run with Inference API")
|
73 |
+
inference_token = gr.Textbox(value="", label="HF Token for Inference API", visible=False)
|
74 |
|
75 |
with gr.Accordion(label="Scanner Advance Config (optional)", open=False):
|
76 |
selected = read_scanners(uid)
|
|
|
106 |
|
107 |
scanners.change(write_scanners, inputs=[scanners, uid_label])
|
108 |
|
109 |
+
run_inference.change(write_inference_type, inputs=[run_inference, uid_label], outputs=[inference_token])
|
110 |
|
111 |
gr.on(
|
112 |
triggers=[label.change for label in column_mappings],
|
io_utils.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import os
|
2 |
import subprocess
|
3 |
-
|
4 |
import yaml
|
5 |
|
6 |
import pipe
|
@@ -28,6 +28,7 @@ def read_scanners(uid):
|
|
28 |
with open(get_yaml_path(uid), "r") as f:
|
29 |
config = yaml.load(f, Loader=yaml.FullLoader)
|
30 |
scanners = config.get("detectors", [])
|
|
|
31 |
return scanners
|
32 |
|
33 |
|
@@ -39,6 +40,7 @@ def write_scanners(scanners, uid):
|
|
39 |
config["detectors"] = scanners
|
40 |
# save scanners to detectors in yaml
|
41 |
yaml.dump(config, f, Dumper=Dumper)
|
|
|
42 |
|
43 |
|
44 |
# read model_type from yaml file
|
@@ -47,6 +49,7 @@ def read_inference_type(uid):
|
|
47 |
with open(get_yaml_path(uid), "r") as f:
|
48 |
config = yaml.load(f, Loader=yaml.FullLoader)
|
49 |
inference_type = config.get("inference_type", "")
|
|
|
50 |
return inference_type
|
51 |
|
52 |
|
@@ -60,6 +63,9 @@ def write_inference_type(use_inference, uid):
|
|
60 |
config["inference_type"] = "hf_pipeline"
|
61 |
# save inference_type to inference_type in yaml
|
62 |
yaml.dump(config, f, Dumper=Dumper)
|
|
|
|
|
|
|
63 |
|
64 |
|
65 |
# read column mapping from yaml file
|
@@ -69,6 +75,7 @@ def read_column_mapping(uid):
|
|
69 |
config = yaml.load(f, Loader=yaml.FullLoader)
|
70 |
if config:
|
71 |
column_mapping = config.get("column_mapping", dict())
|
|
|
72 |
return column_mapping
|
73 |
|
74 |
|
@@ -85,6 +92,7 @@ def write_column_mapping(mapping, uid):
|
|
85 |
with open(get_yaml_path(uid), "w") as f:
|
86 |
# save column_mapping to column_mapping in yaml
|
87 |
yaml.dump(config, f, Dumper=Dumper)
|
|
|
88 |
|
89 |
|
90 |
# convert column mapping dataframe to json
|
@@ -107,6 +115,7 @@ def get_logs_file(uid):
|
|
107 |
def write_log_to_user_file(id, log):
|
108 |
with open(f"./tmp/{id}_log", "a") as f:
|
109 |
f.write(log)
|
|
|
110 |
|
111 |
|
112 |
def save_job_to_pipe(id, job, lock):
|
@@ -120,8 +129,6 @@ def pop_job_from_pipe():
|
|
120 |
job_info = pipe.jobs.pop()
|
121 |
write_log_to_user_file(job_info[0], f"Running job id {job_info[0]}\n")
|
122 |
command = job_info[1]
|
123 |
-
print(f"Running job id {job_info[0]}")
|
124 |
-
print(f"Running command {command}")
|
125 |
|
126 |
log_file = open(f"./tmp/{job_info[0]}_log", "a")
|
127 |
subprocess.Popen(
|
|
|
1 |
import os
|
2 |
import subprocess
|
3 |
+
import gradio as gr
|
4 |
import yaml
|
5 |
|
6 |
import pipe
|
|
|
28 |
with open(get_yaml_path(uid), "r") as f:
|
29 |
config = yaml.load(f, Loader=yaml.FullLoader)
|
30 |
scanners = config.get("detectors", [])
|
31 |
+
f.close()
|
32 |
return scanners
|
33 |
|
34 |
|
|
|
40 |
config["detectors"] = scanners
|
41 |
# save scanners to detectors in yaml
|
42 |
yaml.dump(config, f, Dumper=Dumper)
|
43 |
+
f.close()
|
44 |
|
45 |
|
46 |
# read model_type from yaml file
|
|
|
49 |
with open(get_yaml_path(uid), "r") as f:
|
50 |
config = yaml.load(f, Loader=yaml.FullLoader)
|
51 |
inference_type = config.get("inference_type", "")
|
52 |
+
f.close()
|
53 |
return inference_type
|
54 |
|
55 |
|
|
|
63 |
config["inference_type"] = "hf_pipeline"
|
64 |
# save inference_type to inference_type in yaml
|
65 |
yaml.dump(config, f, Dumper=Dumper)
|
66 |
+
f.close()
|
67 |
+
return (gr.update(visible=(use_inference == "hf_inference_api")))
|
68 |
+
|
69 |
|
70 |
|
71 |
# read column mapping from yaml file
|
|
|
75 |
config = yaml.load(f, Loader=yaml.FullLoader)
|
76 |
if config:
|
77 |
column_mapping = config.get("column_mapping", dict())
|
78 |
+
f.close()
|
79 |
return column_mapping
|
80 |
|
81 |
|
|
|
92 |
with open(get_yaml_path(uid), "w") as f:
|
93 |
# save column_mapping to column_mapping in yaml
|
94 |
yaml.dump(config, f, Dumper=Dumper)
|
95 |
+
f.close()
|
96 |
|
97 |
|
98 |
# convert column mapping dataframe to json
|
|
|
115 |
def write_log_to_user_file(id, log):
|
116 |
with open(f"./tmp/{id}_log", "a") as f:
|
117 |
f.write(log)
|
118 |
+
f.close()
|
119 |
|
120 |
|
121 |
def save_job_to_pipe(id, job, lock):
|
|
|
129 |
job_info = pipe.jobs.pop()
|
130 |
write_log_to_user_file(job_info[0], f"Running job id {job_info[0]}\n")
|
131 |
command = job_info[1]
|
|
|
|
|
132 |
|
133 |
log_file = open(f"./tmp/{job_info[0]}_log", "a")
|
134 |
subprocess.Popen(
|
text_classification_ui_helpers.py
CHANGED
@@ -188,6 +188,10 @@ def try_submit(m_id, d_id, config, split, local, uid):
|
|
188 |
return (gr.update(interactive=True), gr.update(visible=False))
|
189 |
feature_mapping = all_mappings["features"]
|
190 |
|
|
|
|
|
|
|
|
|
191 |
# TODO: Set column mapping for some dataset such as `amazon_polarity`
|
192 |
if local:
|
193 |
command = [
|
@@ -216,6 +220,8 @@ def try_submit(m_id, d_id, config, split, local, uid):
|
|
216 |
json.dumps(label_mapping),
|
217 |
"--scan_config",
|
218 |
get_yaml_path(uid),
|
|
|
|
|
219 |
]
|
220 |
|
221 |
eval_str = f"[{m_id}]<{d_id}({config}, {split} set)>"
|
|
|
188 |
return (gr.update(interactive=True), gr.update(visible=False))
|
189 |
feature_mapping = all_mappings["features"]
|
190 |
|
191 |
+
leaderboard_dataset = None
|
192 |
+
if os.environ.get("SPACE_ID") == "giskardai/giskard-evaluator":
|
193 |
+
leaderboard_dataset = "ZeroCommand/test-giskard-report"
|
194 |
+
|
195 |
# TODO: Set column mapping for some dataset such as `amazon_polarity`
|
196 |
if local:
|
197 |
command = [
|
|
|
220 |
json.dumps(label_mapping),
|
221 |
"--scan_config",
|
222 |
get_yaml_path(uid),
|
223 |
+
"--leaderboard_dataset",
|
224 |
+
leaderboard_dataset,
|
225 |
]
|
226 |
|
227 |
eval_str = f"[{m_id}]<{d_id}({config}, {split} set)>"
|