IlyasMoutawwakil HF staff commited on
Commit
778bc8e
1 Parent(s): 063cbeb

openvino only

Browse files
Files changed (3) hide show
  1. app.py +21 -20
  2. benchmark.log +0 -41
  3. requirements.txt +1 -1
app.py CHANGED
@@ -10,17 +10,17 @@ from config_store import (
10
  get_inference_config,
11
  get_openvino_config,
12
  get_pytorch_config,
13
- get_ipex_config,
14
  )
15
  from optimum_benchmark.launchers.base import Launcher # noqa
16
  from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL
17
  from optimum_benchmark.backends.transformers_utils import TASKS_TO_MODEL_LOADERS
18
- from optimum_benchmark.backends.ipex.utils import TASKS_TO_IPEXMODEL
19
  from optimum_benchmark import (
20
  BenchmarkConfig,
21
  PyTorchConfig,
22
  OVConfig,
23
- IPEXConfig,
24
  ProcessConfig,
25
  InferenceConfig,
26
  Benchmark,
@@ -31,21 +31,22 @@ from optimum_benchmark.logging_utils import setup_logging
31
  DEVICE = "cpu"
32
  LAUNCHER = "process"
33
  SCENARIO = "inference"
34
- BACKENDS = ["openvino", "pytorch", "ipex"]
35
  MODELS = [
36
  "google-bert/bert-base-uncased",
37
  "openai-community/gpt2",
38
  ]
39
  TASKS = (
40
  set(TASKS_TO_OVMODEL.keys())
41
- & set(TASKS_TO_IPEXMODEL.keys())
42
  & set(TASKS_TO_MODEL_LOADERS.keys())
43
  )
44
 
45
 
46
  def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
47
  if oauth_token.token is None:
48
- raise gr.Error("Please login to be able to run the benchmark.")
 
49
 
50
  timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
51
  username = whoami(oauth_token.token)["name"]
@@ -60,7 +61,7 @@ def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
60
  "inference": {},
61
  "openvino": {},
62
  "pytorch": {},
63
- "ipex": {},
64
  }
65
 
66
  for key, value in kwargs.items():
@@ -91,17 +92,17 @@ def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
91
  device=DEVICE,
92
  **configs["pytorch"],
93
  )
94
- configs["ipex"] = IPEXConfig(
95
- task=task,
96
- model=model,
97
- device=DEVICE,
98
- **configs["ipex"],
99
- )
100
 
101
  outputs = {
102
  "openvino": "Running benchmark for OpenVINO backend",
103
  "pytorch": "Running benchmark for PyTorch backend",
104
- "ipex": "Running benchmark for IPEX backend",
105
  }
106
 
107
  yield tuple(outputs[b] for b in BACKENDS)
@@ -199,8 +200,8 @@ def build_demo():
199
  openvino_config = get_openvino_config()
200
  with gr.Accordion(label="PyTorch Config", open=False, visible=True):
201
  pytorch_config = get_pytorch_config()
202
- with gr.Accordion(label="IPEX Config", open=False, visible=True):
203
- ipex_config = get_ipex_config()
204
 
205
  backends.change(
206
  inputs=backends,
@@ -218,8 +219,8 @@ def build_demo():
218
  openvino_output = gr.Markdown()
219
  with gr.Accordion(label="PyTorch Output", open=True, visible=True):
220
  pytorch_output = gr.Markdown()
221
- with gr.Accordion(label="IPEX Output", open=True, visible=True):
222
- ipex_output = gr.Markdown()
223
 
224
  backends.change(
225
  inputs=backends,
@@ -239,12 +240,12 @@ def build_demo():
239
  *inference_config.values(),
240
  *openvino_config.values(),
241
  *pytorch_config.values(),
242
- *ipex_config.values(),
243
  },
244
  outputs={
245
  openvino_output,
246
  pytorch_output,
247
- ipex_output,
248
  },
249
  concurrency_limit=1,
250
  )
 
10
  get_inference_config,
11
  get_openvino_config,
12
  get_pytorch_config,
13
+ # get_ipex_config,
14
  )
15
  from optimum_benchmark.launchers.base import Launcher # noqa
16
  from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL
17
  from optimum_benchmark.backends.transformers_utils import TASKS_TO_MODEL_LOADERS
18
+ # from optimum_benchmark.backends.ipex.utils import TASKS_TO_IPEXMODEL
19
  from optimum_benchmark import (
20
  BenchmarkConfig,
21
  PyTorchConfig,
22
  OVConfig,
23
+ # IPEXConfig,
24
  ProcessConfig,
25
  InferenceConfig,
26
  Benchmark,
 
31
  DEVICE = "cpu"
32
  LAUNCHER = "process"
33
  SCENARIO = "inference"
34
+ BACKENDS = ["openvino", "pytorch"]
35
  MODELS = [
36
  "google-bert/bert-base-uncased",
37
  "openai-community/gpt2",
38
  ]
39
  TASKS = (
40
  set(TASKS_TO_OVMODEL.keys())
41
+ # & set(TASKS_TO_IPEXMODEL.keys())
42
  & set(TASKS_TO_MODEL_LOADERS.keys())
43
  )
44
 
45
 
46
  def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
47
  if oauth_token.token is None:
48
+ gr.Error("Please login to be able to run the benchmark.")
49
+ return tuple(None for _ in BACKENDS)
50
 
51
  timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
52
  username = whoami(oauth_token.token)["name"]
 
61
  "inference": {},
62
  "openvino": {},
63
  "pytorch": {},
64
+ # "ipex": {},
65
  }
66
 
67
  for key, value in kwargs.items():
 
92
  device=DEVICE,
93
  **configs["pytorch"],
94
  )
95
+ # configs["ipex"] = IPEXConfig(
96
+ # task=task,
97
+ # model=model,
98
+ # device=DEVICE,
99
+ # **configs["ipex"],
100
+ # )
101
 
102
  outputs = {
103
  "openvino": "Running benchmark for OpenVINO backend",
104
  "pytorch": "Running benchmark for PyTorch backend",
105
+ # "ipex": "Running benchmark for IPEX backend",
106
  }
107
 
108
  yield tuple(outputs[b] for b in BACKENDS)
 
200
  openvino_config = get_openvino_config()
201
  with gr.Accordion(label="PyTorch Config", open=False, visible=True):
202
  pytorch_config = get_pytorch_config()
203
+ # with gr.Accordion(label="IPEX Config", open=False, visible=True):
204
+ # ipex_config = get_ipex_config()
205
 
206
  backends.change(
207
  inputs=backends,
 
219
  openvino_output = gr.Markdown()
220
  with gr.Accordion(label="PyTorch Output", open=True, visible=True):
221
  pytorch_output = gr.Markdown()
222
+ # with gr.Accordion(label="IPEX Output", open=True, visible=True):
223
+ # ipex_output = gr.Markdown()
224
 
225
  backends.change(
226
  inputs=backends,
 
240
  *inference_config.values(),
241
  *openvino_config.values(),
242
  *pytorch_config.values(),
243
+ # *ipex_config.values(),
244
  },
245
  outputs={
246
  openvino_output,
247
  pytorch_output,
248
+ # ipex_output,
249
  },
250
  concurrency_limit=1,
251
  )
benchmark.log DELETED
@@ -1,41 +0,0 @@
1
- [ISOLATED-PROCESS][2024-09-25 18:53:27,439][pytorch][INFO] - Allocating pytorch backend
2
- [ISOLATED-PROCESS][2024-09-25 18:53:27,439][pytorch][INFO] - + Seeding backend with 42
3
- [ISOLATED-PROCESS][2024-09-25 18:53:27,440][pytorch][INFO] - + Benchmarking a Transformers model
4
- [ISOLATED-PROCESS][2024-09-25 18:53:28,549][inference][INFO] - Allocating inference scenario
5
- [ISOLATED-PROCESS][2024-09-25 18:53:28,549][inference][INFO] - + Creating input generator
6
- [ISOLATED-PROCESS][2024-09-25 18:53:28,550][inference][INFO] - + Generating Inference inputs
7
- [ISOLATED-PROCESS][2024-09-25 18:53:28,550][inference][INFO] - + Initializing Inference report
8
- [ISOLATED-PROCESS][2024-09-25 18:53:28,551][inference][INFO] - + Preparing input shapes for Inference
9
- [ISOLATED-PROCESS][2024-09-25 18:53:28,551][inference][INFO] - + Running model loading tracking
10
- [ISOLATED-PROCESS][2024-09-25 18:53:28,551][latency][INFO] - + Tracking latency using CPU performance counter
11
- [ISOLATED-PROCESS][2024-09-25 18:53:28,551][inference][INFO] - + Loading model for Inference
12
- [ISOLATED-PROCESS][2024-09-25 18:53:28,552][pytorch][INFO] - + Creating backend temporary directory
13
- [ISOLATED-PROCESS][2024-09-25 18:53:28,553][pytorch][INFO] - + Loading model with pretrained weights
14
- [ISOLATED-PROCESS][2024-09-25 18:53:28,554][pytorch][INFO] - + Loading Transformers model
15
- [ISOLATED-PROCESS][2024-09-25 18:53:28,881][pytorch][INFO] - + Enabling eval mode
16
- [ISOLATED-PROCESS][2024-09-25 18:53:28,883][pytorch][INFO] - + Cleaning up backend temporary directory
17
- [ISOLATED-PROCESS][2024-09-25 18:53:28,884][inference][INFO] - + Preparing inputs for Inference
18
- [ISOLATED-PROCESS][2024-09-25 18:53:28,885][inference][INFO] - + Warming up backend for Inference
19
- [ISOLATED-PROCESS][2024-09-25 18:53:29,252][inference][INFO] - + Running Inference latency tracking
20
- [ISOLATED-PROCESS][2024-09-25 18:53:29,252][latency][INFO] - + Tracking latency using CPU performance counter
21
- [ISOLATED-PROCESS][2024-09-25 18:53:39,282][latency][INFO] - + load latency:
22
- [ISOLATED-PROCESS][2024-09-25 18:53:39,282][latency][INFO] - - count: 1
23
- [ISOLATED-PROCESS][2024-09-25 18:53:39,283][latency][INFO] - - total: 0.331631 s
24
- [ISOLATED-PROCESS][2024-09-25 18:53:39,283][latency][INFO] - - mean: 0.331631 s
25
- [ISOLATED-PROCESS][2024-09-25 18:53:39,283][latency][INFO] - - stdev: 0.000000 s (0.00%)
26
- [ISOLATED-PROCESS][2024-09-25 18:53:39,284][latency][INFO] - - p50: 0.331631 s
27
- [ISOLATED-PROCESS][2024-09-25 18:53:39,284][latency][INFO] - - p90: 0.331631 s
28
- [ISOLATED-PROCESS][2024-09-25 18:53:39,284][latency][INFO] - - p95: 0.331631 s
29
- [ISOLATED-PROCESS][2024-09-25 18:53:39,284][latency][INFO] - - p99: 0.331631 s
30
- [ISOLATED-PROCESS][2024-09-25 18:53:39,285][latency][INFO] - + forward latency:
31
- [ISOLATED-PROCESS][2024-09-25 18:53:39,285][latency][INFO] - - count: 266
32
- [ISOLATED-PROCESS][2024-09-25 18:53:39,285][latency][INFO] - - total: 10.021863 s
33
- [ISOLATED-PROCESS][2024-09-25 18:53:39,285][latency][INFO] - - mean: 0.037676 s
34
- [ISOLATED-PROCESS][2024-09-25 18:53:39,286][latency][INFO] - - stdev: 0.008922 s (23.68%)
35
- [ISOLATED-PROCESS][2024-09-25 18:53:39,286][latency][INFO] - - p50: 0.035970 s
36
- [ISOLATED-PROCESS][2024-09-25 18:53:39,286][latency][INFO] - - p90: 0.043994 s
37
- [ISOLATED-PROCESS][2024-09-25 18:53:39,286][latency][INFO] - - p95: 0.046884 s
38
- [ISOLATED-PROCESS][2024-09-25 18:53:39,287][latency][INFO] - - p99: 0.073021 s
39
- [ISOLATED-PROCESS][2024-09-25 18:53:39,288][latency][INFO] - + forward throughput: 53.083941 samples/s
40
- [ISOLATED-PROCESS][2024-09-25 18:53:39,296][process][INFO] - + Sending report to main process
41
- [ISOLATED-PROCESS][2024-09-25 18:53:39,297][process][INFO] - + Exiting isolated process
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1 +1 @@
1
- optimum-benchmark[openvino,onnxruntime,ipex]@git+https://github.com/huggingface/optimum-benchmark.git@markdown-report
 
1
+ optimum-benchmark[openvino]@git+https://github.com/huggingface/optimum-benchmark.git@markdown-report