IlyasMoutawwakil HF staff commited on
Commit
063cbeb
1 Parent(s): 5468ec9
Files changed (3) hide show
  1. app.py +52 -54
  2. config_store.py +39 -39
  3. requirements.txt +1 -1
app.py CHANGED
@@ -1,11 +1,13 @@
1
  import os
2
  import time
3
- from huggingface_hub import create_repo, whoami
4
  import gradio as gr
 
 
 
5
  from config_store import (
6
  get_process_config,
7
  get_inference_config,
8
- get_onnxruntime_config,
9
  get_openvino_config,
10
  get_pytorch_config,
11
  get_ipex_config,
@@ -13,13 +15,11 @@ from config_store import (
13
  from optimum_benchmark.launchers.base import Launcher # noqa
14
  from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL
15
  from optimum_benchmark.backends.transformers_utils import TASKS_TO_MODEL_LOADERS
16
- from optimum_benchmark.backends.onnxruntime.utils import TASKS_TO_ORTMODELS
17
  from optimum_benchmark.backends.ipex.utils import TASKS_TO_IPEXMODEL
18
  from optimum_benchmark import (
19
  BenchmarkConfig,
20
  PyTorchConfig,
21
  OVConfig,
22
- ORTConfig,
23
  IPEXConfig,
24
  ProcessConfig,
25
  InferenceConfig,
@@ -31,15 +31,13 @@ from optimum_benchmark.logging_utils import setup_logging
31
  DEVICE = "cpu"
32
  LAUNCHER = "process"
33
  SCENARIO = "inference"
34
- BACKENDS = ["onnxruntime", "openvino", "pytorch", "ipex"]
35
  MODELS = [
36
- "hf-internal-testing/tiny-random-bert",
37
  "google-bert/bert-base-uncased",
38
  "openai-community/gpt2",
39
  ]
40
  TASKS = (
41
  set(TASKS_TO_OVMODEL.keys())
42
- & set(TASKS_TO_ORTMODELS.keys())
43
  & set(TASKS_TO_IPEXMODEL.keys())
44
  & set(TASKS_TO_MODEL_LOADERS.keys())
45
  )
@@ -47,20 +45,19 @@ TASKS = (
47
 
48
  def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
49
  if oauth_token.token is None:
50
- return "You must be logged in to use this space"
51
 
 
52
  username = whoami(oauth_token.token)["name"]
53
- create_repo(
54
- f"{username}/benchmarks",
55
- token=oauth_token.token,
56
- repo_type="dataset",
57
- exist_ok=True,
58
- )
59
 
60
  configs = {
61
  "process": {},
62
  "inference": {},
63
- "onnxruntime": {},
64
  "openvino": {},
65
  "pytorch": {},
66
  "ipex": {},
@@ -82,12 +79,6 @@ def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
82
  configs["process"] = ProcessConfig(**configs.pop("process"))
83
  configs["inference"] = InferenceConfig(**configs.pop("inference"))
84
 
85
- configs["onnxruntime"] = ORTConfig(
86
- task=task,
87
- model=model,
88
- device=DEVICE,
89
- **configs["onnxruntime"],
90
- )
91
  configs["openvino"] = OVConfig(
92
  task=task,
93
  model=model,
@@ -107,18 +98,15 @@ def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
107
  **configs["ipex"],
108
  )
109
 
110
- md_output = (
111
- f"<h3>Running benchmark for model {model} on task {task} with {backends}</h3>"
112
- )
113
-
114
- yield md_output
115
 
116
- timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
117
 
118
  for backend in backends:
119
- md_output += f"<br>🚀 Launching benchmark for {backend}"
120
- yield md_output
121
-
122
  try:
123
  benchmark_name = f"{timestamp}/{backend}"
124
  benchmark_config = BenchmarkConfig(
@@ -128,32 +116,28 @@ def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
128
  scenario=configs[SCENARIO],
129
  )
130
  benchmark_config.push_to_hub(
131
- repo_id=f"{username}/benchmarks",
132
- subfolder=benchmark_name,
133
- token=oauth_token.token,
134
  )
135
  benchmark_report = Benchmark.launch(benchmark_config)
136
  benchmark_report.push_to_hub(
137
- repo_id=f"{username}/benchmarks",
138
- subfolder=benchmark_name,
139
- token=oauth_token.token,
140
  )
141
  benchmark = Benchmark(config=benchmark_config, report=benchmark_report)
142
  benchmark.push_to_hub(
143
- repo_id=f"{username}/benchmarks",
144
- subfolder=benchmark_name,
145
- token=oauth_token.token,
146
  )
 
147
 
148
- md_output += (
149
- f"<br>✅ Benchmark for {backend} backend completed successfully"
150
- )
151
- yield md_output
152
- except Exception as e:
153
- md_output += (
154
- f"<br>❌ Error while running benchmark for {backend} backend: {e}"
155
- )
156
- yield md_output
 
157
 
158
 
159
  def build_demo():
@@ -211,8 +195,6 @@ def build_demo():
211
  inference_config = get_inference_config()
212
 
213
  with gr.Row() as backend_configs:
214
- with gr.Accordion(label="OnnxRuntime Config", open=False, visible=True):
215
- onnxruntime_config = get_onnxruntime_config()
216
  with gr.Accordion(label="OpenVINO Config", open=False, visible=True):
217
  openvino_config = get_openvino_config()
218
  with gr.Accordion(label="PyTorch Config", open=False, visible=True):
@@ -231,8 +213,21 @@ def build_demo():
231
  with gr.Row():
232
  button = gr.Button(value="Run Benchmark", variant="primary")
233
 
234
- with gr.Row():
235
- md_output = gr.Markdown(label="Output", value="")
 
 
 
 
 
 
 
 
 
 
 
 
 
236
 
237
  button.click(
238
  fn=run_benchmark,
@@ -242,12 +237,15 @@ def build_demo():
242
  backends,
243
  *process_config.values(),
244
  *inference_config.values(),
245
- *onnxruntime_config.values(),
246
  *openvino_config.values(),
247
  *pytorch_config.values(),
248
  *ipex_config.values(),
249
  },
250
- outputs=[md_output],
 
 
 
 
251
  concurrency_limit=1,
252
  )
253
 
 
1
  import os
2
  import time
3
+ import traceback
4
  import gradio as gr
5
+ from huggingface_hub import create_repo, whoami
6
+
7
+
8
  from config_store import (
9
  get_process_config,
10
  get_inference_config,
 
11
  get_openvino_config,
12
  get_pytorch_config,
13
  get_ipex_config,
 
15
  from optimum_benchmark.launchers.base import Launcher # noqa
16
  from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL
17
  from optimum_benchmark.backends.transformers_utils import TASKS_TO_MODEL_LOADERS
 
18
  from optimum_benchmark.backends.ipex.utils import TASKS_TO_IPEXMODEL
19
  from optimum_benchmark import (
20
  BenchmarkConfig,
21
  PyTorchConfig,
22
  OVConfig,
 
23
  IPEXConfig,
24
  ProcessConfig,
25
  InferenceConfig,
 
31
  DEVICE = "cpu"
32
  LAUNCHER = "process"
33
  SCENARIO = "inference"
34
+ BACKENDS = ["openvino", "pytorch", "ipex"]
35
  MODELS = [
 
36
  "google-bert/bert-base-uncased",
37
  "openai-community/gpt2",
38
  ]
39
  TASKS = (
40
  set(TASKS_TO_OVMODEL.keys())
 
41
  & set(TASKS_TO_IPEXMODEL.keys())
42
  & set(TASKS_TO_MODEL_LOADERS.keys())
43
  )
 
45
 
46
  def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
47
  if oauth_token.token is None:
48
+ raise gr.Error("Please login to be able to run the benchmark.")
49
 
50
+ timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
51
  username = whoami(oauth_token.token)["name"]
52
+ repo_id = f"{username}/benchmarks"
53
+ token = oauth_token.token
54
+
55
+ create_repo(repo_id, token=token, repo_type="dataset", exist_ok=True)
56
+ gr.Info(f'Benchmark will be pushed to "{username}/benchmarks" on the Hub')
 
57
 
58
  configs = {
59
  "process": {},
60
  "inference": {},
 
61
  "openvino": {},
62
  "pytorch": {},
63
  "ipex": {},
 
79
  configs["process"] = ProcessConfig(**configs.pop("process"))
80
  configs["inference"] = InferenceConfig(**configs.pop("inference"))
81
 
 
 
 
 
 
 
82
  configs["openvino"] = OVConfig(
83
  task=task,
84
  model=model,
 
98
  **configs["ipex"],
99
  )
100
 
101
+ outputs = {
102
+ "openvino": "Running benchmark for OpenVINO backend",
103
+ "pytorch": "Running benchmark for PyTorch backend",
104
+ "ipex": "Running benchmark for IPEX backend",
105
+ }
106
 
107
+ yield tuple(outputs[b] for b in BACKENDS)
108
 
109
  for backend in backends:
 
 
 
110
  try:
111
  benchmark_name = f"{timestamp}/{backend}"
112
  benchmark_config = BenchmarkConfig(
 
116
  scenario=configs[SCENARIO],
117
  )
118
  benchmark_config.push_to_hub(
119
+ repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token
 
 
120
  )
121
  benchmark_report = Benchmark.launch(benchmark_config)
122
  benchmark_report.push_to_hub(
123
+ repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token
 
 
124
  )
125
  benchmark = Benchmark(config=benchmark_config, report=benchmark_report)
126
  benchmark.push_to_hub(
127
+ repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token
 
 
128
  )
129
+ gr.Info(f"Pushed benchmark to {username}/benchmarks/{benchmark_name}")
130
 
131
+ outputs[backend] = f"\n{benchmark_report.to_markdown_text()}"
132
+
133
+ yield tuple(outputs[b] for b in BACKENDS)
134
+
135
+ except Exception:
136
+ gr.Error(f"Error while running benchmark for {backend}")
137
+
138
+ outputs[backend] = f"\n{traceback.format_exc()}"
139
+
140
+ yield tuple(outputs[b] for b in BACKENDS)
141
 
142
 
143
  def build_demo():
 
195
  inference_config = get_inference_config()
196
 
197
  with gr.Row() as backend_configs:
 
 
198
  with gr.Accordion(label="OpenVINO Config", open=False, visible=True):
199
  openvino_config = get_openvino_config()
200
  with gr.Accordion(label="PyTorch Config", open=False, visible=True):
 
213
  with gr.Row():
214
  button = gr.Button(value="Run Benchmark", variant="primary")
215
 
216
+ with gr.Row() as md_output:
217
+ with gr.Accordion(label="OpenVINO Output", open=True, visible=True):
218
+ openvino_output = gr.Markdown()
219
+ with gr.Accordion(label="PyTorch Output", open=True, visible=True):
220
+ pytorch_output = gr.Markdown()
221
+ with gr.Accordion(label="IPEX Output", open=True, visible=True):
222
+ ipex_output = gr.Markdown()
223
+
224
+ backends.change(
225
+ inputs=backends,
226
+ outputs=md_output.children,
227
+ fn=lambda values: [
228
+ gr.update(visible=value in values) for value in BACKENDS
229
+ ],
230
+ )
231
 
232
  button.click(
233
  fn=run_benchmark,
 
237
  backends,
238
  *process_config.values(),
239
  *inference_config.values(),
 
240
  *openvino_config.values(),
241
  *pytorch_config.values(),
242
  *ipex_config.values(),
243
  },
244
+ outputs={
245
+ openvino_output,
246
+ pytorch_output,
247
+ ipex_output,
248
+ },
249
  concurrency_limit=1,
250
  )
251
 
config_store.py CHANGED
@@ -16,6 +16,45 @@ def get_process_config():
16
  }
17
 
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  def get_pytorch_config():
20
  return {
21
  "pytorch.torch_dtype": gr.Dropdown(
@@ -90,42 +129,3 @@ def get_openvino_config():
90
 
91
  def get_ipex_config():
92
  return {}
93
-
94
-
95
- def get_inference_config():
96
- return {
97
- "inference.warmup_runs": gr.Slider(
98
- step=1,
99
- value=10,
100
- minimum=0,
101
- maximum=10,
102
- label="inference.warmup_runs",
103
- info="Number of warmup runs",
104
- ),
105
- "inference.duration": gr.Slider(
106
- step=1,
107
- value=10,
108
- minimum=0,
109
- maximum=10,
110
- label="inference.duration",
111
- info="Minimum duration of the benchmark in seconds",
112
- ),
113
- "inference.iterations": gr.Slider(
114
- step=1,
115
- value=10,
116
- minimum=0,
117
- maximum=10,
118
- label="inference.iterations",
119
- info="Minimum number of iterations of the benchmark",
120
- ),
121
- "inference.latency": gr.Checkbox(
122
- value=True,
123
- label="inference.latency",
124
- info="Measures the latency of the model",
125
- ),
126
- "inference.memory": gr.Checkbox(
127
- value=False,
128
- label="inference.memory",
129
- info="Measures the peak memory consumption",
130
- ),
131
- }
 
16
  }
17
 
18
 
19
+ def get_inference_config():
20
+ return {
21
+ "inference.warmup_runs": gr.Slider(
22
+ step=1,
23
+ value=10,
24
+ minimum=0,
25
+ maximum=10,
26
+ label="inference.warmup_runs",
27
+ info="Number of warmup runs",
28
+ ),
29
+ "inference.duration": gr.Slider(
30
+ step=1,
31
+ value=10,
32
+ minimum=0,
33
+ maximum=10,
34
+ label="inference.duration",
35
+ info="Minimum duration of the benchmark in seconds",
36
+ ),
37
+ "inference.iterations": gr.Slider(
38
+ step=1,
39
+ value=10,
40
+ minimum=0,
41
+ maximum=10,
42
+ label="inference.iterations",
43
+ info="Minimum number of iterations of the benchmark",
44
+ ),
45
+ "inference.latency": gr.Checkbox(
46
+ value=True,
47
+ label="inference.latency",
48
+ info="Measures the latency of the model",
49
+ ),
50
+ "inference.memory": gr.Checkbox(
51
+ value=False,
52
+ label="inference.memory",
53
+ info="Measures the peak memory consumption",
54
+ ),
55
+ }
56
+
57
+
58
  def get_pytorch_config():
59
  return {
60
  "pytorch.torch_dtype": gr.Dropdown(
 
129
 
130
  def get_ipex_config():
131
  return {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1 +1 @@
1
- optimum-benchmark[openvino,onnxruntime,ipex]@git+https://github.com/huggingface/optimum-benchmark.git
 
1
+ optimum-benchmark[openvino,onnxruntime,ipex]@git+https://github.com/huggingface/optimum-benchmark.git@markdown-report