IlyasMoutawwakil HF staff commited on
Commit
db435b4
β€’
1 Parent(s): 7f9a235
Files changed (4) hide show
  1. README.md +1 -1
  2. app.py +12 -12
  3. requirements.txt +2 -2
  4. run.py +26 -9
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🌍
4
  colorFrom: purple
5
  colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 3.43.2
8
  app_file: app.py
9
  pinned: false
10
  ---
 
4
  colorFrom: purple
5
  colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 3.41
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py CHANGED
@@ -16,13 +16,18 @@ from configs import (
16
 
17
  BACKENDS = ["pytorch", "onnxruntime", "openvino", "neural-compressor"]
18
  BENCHMARKS = ["inference", "training"]
 
19
 
20
 
21
  with gr.Blocks() as demo:
22
  # title text
23
- gr.HTML("<h1 style='text-align: center'>πŸ€— Optimum Benchmark UI πŸ‹οΈ</h1>")
24
  # explanation text
25
- gr.Markdown("This is a demo space of [Optimum-Benchmark](https://github.com/huggingface/optimum-benchmark.git).")
 
 
 
 
26
 
27
  model = gr.Textbox(
28
  label="model",
@@ -36,7 +41,7 @@ with gr.Blocks() as demo:
36
  device = gr.Dropdown(
37
  value="cpu",
38
  label="device",
39
- choices=["cpu", "cuda"],
40
  )
41
  experiment = gr.Textbox(
42
  label="experiment_name",
@@ -92,8 +97,9 @@ with gr.Blocks() as demo:
92
  )
93
 
94
  button = gr.Button(value="Run Benchmark", variant="primary")
95
- with gr.Accordion(label="LOGS", open=True, visible=False):
96
- output = gr.HTML()
 
97
 
98
  button.click(
99
  fn=run_benchmark,
@@ -111,14 +117,8 @@ with gr.Blocks() as demo:
111
  *inference_config,
112
  *training_config,
113
  },
114
- outputs=output,
115
  queue=True,
116
  )
117
- button.click(
118
- inputs=[],
119
- outputs=output.parent,
120
- fn=lambda: gr.update(visible=True),
121
- )
122
-
123
 
124
  demo.queue().launch()
 
16
 
17
  BACKENDS = ["pytorch", "onnxruntime", "openvino", "neural-compressor"]
18
  BENCHMARKS = ["inference", "training"]
19
+ DEVICES = ["cpu", "cuda"]
20
 
21
 
22
  with gr.Blocks() as demo:
23
  # title text
24
+ gr.HTML("<h1 style='text-align: center'>πŸ€— Optimum-Benchmark UI πŸ‹οΈ</h1>")
25
  # explanation text
26
+ gr.Markdown(
27
+ "This is a demo space of [Optimum-Benchmark](https://github.com/huggingface/optimum-benchmark.git):"
28
+ "<br>A unified multi-backend utility for benchmarking `transformers`, `diffusers`, `peft` and `timm` models with "
29
+ "Optimum's optimizations & quantization, for inference & training, on different backends & hardwares."
30
+ )
31
 
32
  model = gr.Textbox(
33
  label="model",
 
41
  device = gr.Dropdown(
42
  value="cpu",
43
  label="device",
44
+ choices=DEVICES,
45
  )
46
  experiment = gr.Textbox(
47
  label="experiment_name",
 
97
  )
98
 
99
  button = gr.Button(value="Run Benchmark", variant="primary")
100
+ with gr.Accordion(label="", open=True):
101
+ html_output = gr.HTML()
102
+ table_output = gr.Dataframe(visible=False)
103
 
104
  button.click(
105
  fn=run_benchmark,
 
117
  *inference_config,
118
  *training_config,
119
  },
120
+ outputs=[html_output, button, table_output],
121
  queue=True,
122
  )
 
 
 
 
 
 
123
 
124
  demo.queue().launch()
requirements.txt CHANGED
@@ -1,3 +1,3 @@
1
- gradio
2
- ansi2html
3
  optimum-benchmark[onnxruntime,openvino,neural-compressor,diffusers,peft]@git+https://github.com/huggingface/optimum-benchmark.git
 
1
+ gradio==3.41
2
+ ansi2html==1.8.0
3
  optimum-benchmark[onnxruntime,openvino,neural-compressor,diffusers,peft]@git+https://github.com/huggingface/optimum-benchmark.git
run.py CHANGED
@@ -1,6 +1,6 @@
1
- import pprint
2
  import subprocess
3
  import gradio as gr
 
4
  from ansi2html import Ansi2HTMLConverter
5
 
6
  ansi2html_converter = Ansi2HTMLConverter(inline=True)
@@ -53,7 +53,8 @@ def run_benchmark(kwargs):
53
  else:
54
  arguments.append(f"{label}={value}")
55
 
56
- pprint.pprint(arguments)
 
57
 
58
  # stream subprocess output
59
  process = subprocess.Popen(
@@ -65,15 +66,31 @@ def run_benchmark(kwargs):
65
 
66
  ansi_text = ""
67
  for ansi_line in iter(process.stdout.readline, ""):
 
 
 
 
68
  if "torch.distributed.nn.jit.instantiator" in ansi_line:
69
  continue
70
- # stream process output
71
- print(ansi_line, end="")
72
- # append line to ansi text
73
- ansi_text += ansi_line
 
 
 
 
 
74
  # convert ansi to html
75
  html_text = ansi2html_converter.convert(ansi_text)
76
- # stream html output
77
- yield html_text
78
 
79
- return html_text
 
 
 
 
 
 
 
 
 
1
  import subprocess
2
  import gradio as gr
3
+ import pandas as pd
4
  from ansi2html import Ansi2HTMLConverter
5
 
6
  ansi2html_converter = Ansi2HTMLConverter(inline=True)
 
53
  else:
54
  arguments.append(f"{label}={value}")
55
 
56
+ command = " ".join(arguments)
57
+ yield gr.update(value=command), gr.update(interactive=False), gr.update(visible=False)
58
 
59
  # stream subprocess output
60
  process = subprocess.Popen(
 
66
 
67
  ansi_text = ""
68
  for ansi_line in iter(process.stdout.readline, ""):
69
+ # stream process output to stdout
70
+ print(ansi_line, end="")
71
+
72
+ # skip torch.distributed.nn.jit.instantiator messages
73
  if "torch.distributed.nn.jit.instantiator" in ansi_line:
74
  continue
75
+ # if the last message is a download message (contains "Downloading ") then remove it and replace it with a new one
76
+ if "Downloading " in ansi_text and "Downloading " in ansi_line:
77
+ ansi_text = ansi_text.split("\n")[:-2]
78
+ print(ansi_text)
79
+ ansi_text.append(ansi_line)
80
+ ansi_text = "\n".join(ansi_text)
81
+ else:
82
+ # append line to ansi text
83
+ ansi_text += ansi_line
84
  # convert ansi to html
85
  html_text = ansi2html_converter.convert(ansi_text)
86
+ # stream html output to gradio
87
+ yield gr.update(value=html_text), gr.update(interactive=False), gr.update(visible=False)
88
 
89
+ # read runs/{experiment_name}/{benchmark}_results.csv
90
+
91
+ table = pd.read_csv(f"runs/{experiment_name}/{benchmark}_results.csv", index_col=0)
92
+
93
+ print(table.to_dict("records"))
94
+ yield gr.update(value=html_text), gr.update(interactive=True), gr.Dataframe.update(
95
+ visible=True, value={"headers": list(table.columns), "data": table.values.tolist()}
96
+ )