Spaces:
Sleeping
Sleeping
File size: 5,534 Bytes
f43ba09 d1e3b68 7f9a235 7724866 7f9a235 d1e3b68 f43ba09 eabde51 f43ba09 7f9a235 d1e3b68 c36ecfb da818ef 025ba08 c36ecfb d1e3b68 da818ef c36ecfb d1e3b68 3aef4fb db435b4 d1e3b68 5ed1d68 f43ba09 d1e3b68 b71e276 d1e3b68 db435b4 b71e276 d1e3b68 7f9a235 d1e3b68 f43ba09 b71e276 d1e3b68 7f9a235 d1e3b68 7f9a235 b71e276 7f9a235 b71e276 7f9a235 b71e276 7f9a235 db435b4 7f9a235 d1e3b68 7f9a235 b71e276 d1e3b68 7f9a235 d1e3b68 db435b4 d1e3b68 7f9a235 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
import torch
import random
import gradio as gr
from optimum_benchmark.task_utils import (
TASKS_TO_AUTOMODELS,
infer_task_from_model_name_or_path,
)
from run import run_benchmark
from config_store import (
get_training_config,
get_inference_config,
get_neural_compressor_config,
get_onnxruntime_config,
get_openvino_config,
get_pytorch_config,
)
cuda_available = torch.cuda.is_available()
BACKENDS = ["pytorch", "onnxruntime", "openvino", "neural-compressor"]
DEVICES = ["cpu", "cuda"] if cuda_available else ["cpu"]
BENCHMARKS = ["inference", "training"]
with gr.Blocks() as demo:
# add image
gr.Markdown(
"""<img src="https://huggingface.co/spaces/optimum/optimum-benchmark-ui/resolve/main/huggy_bench.png" style="display: block; margin-left: auto; margin-right: auto; width: 30%;">"""
)
# title text
gr.Markdown("<h1 style='text-align: center'>🤗 Optimum-Benchmark UI 🏋️</h1>")
# explanation text
gr.HTML(
"<h3 style='text-align: center'>"
"A zero code Gradio interface of <a href='https://github.com/huggingface/optimum-benchmark.git'>Optimum-Benchmark</a> :<br>"
"</h3>"
"<p style='text-align: center'>"
"A unified multi-backend utility for benchmarking any model on the Hub including Transformers, Diffusers, PEFT and Timm models with "
"Optimum's arsenal of optimizations & quantization schemes, for both inference & training, on multiple backends & hardware."
"</p>"
)
model = gr.Textbox(
label="model",
value="distilbert-base-uncased-finetuned-sst-2-english",
info="Model to run the benchmark on. Press enter to infer the task automatically.",
)
task = gr.Dropdown(
label="task",
value="text-classification",
choices=list(TASKS_TO_AUTOMODELS.keys()),
info="Task to run the benchmark on. Can be infered automatically by submitting a model.",
)
device = gr.Dropdown(
value="cpu",
label="device",
choices=DEVICES,
info="Device to run the benchmark on. make sure to duplicate the space if you wanna run on CUDA devices.",
)
experiment = gr.Textbox(
label="experiment_name",
value=f"awesome-experiment-{random.randint(0, 1000)}",
info="Name of the experiment. Will be used to create a folder where results are stored.",
)
model.submit(fn=infer_task_from_model_name_or_path, inputs=model, outputs=task)
with gr.Row():
with gr.Column():
with gr.Row():
backend = gr.Dropdown(
label="backend",
choices=BACKENDS,
value=BACKENDS[0],
info="Backend to run the benchmark on.",
)
with gr.Row() as backend_configs:
with gr.Accordion(label="Pytorch Config", open=False, visible=True):
pytorch_config = get_pytorch_config()
with gr.Accordion(label="OnnxRunTime Config", open=False, visible=False):
onnxruntime_config = get_onnxruntime_config()
with gr.Accordion(label="OpenVINO Config", open=False, visible=False):
openvino_config = get_openvino_config()
with gr.Accordion(label="Neural Compressor Config", open=False, visible=False):
neural_compressor_config = get_neural_compressor_config()
# hide backend configs based on backend
backend.change(
inputs=backend,
outputs=backend_configs.children,
fn=lambda value: [gr.update(visible=value == key) for key in BACKENDS],
)
with gr.Column():
with gr.Row():
benchmark = gr.Dropdown(
label="benchmark",
choices=BENCHMARKS,
value=BENCHMARKS[0],
info="Type of benchmark to run.",
)
with gr.Row() as benchmark_configs:
with gr.Accordion(label="Inference Config", open=False, visible=True):
inference_config = get_inference_config()
with gr.Accordion(label="Training Config", open=False, visible=False):
training_config = get_training_config()
# hide benchmark configs based on benchmark
benchmark.change(
inputs=benchmark,
outputs=benchmark_configs.children,
fn=lambda value: [gr.update(visible=value == key) for key in BENCHMARKS],
)
baseline = gr.Checkbox(
value=False,
label="Compare to Baseline",
info="If checked, will run two experiments: one with the given configuration, and another with a a baseline pytorch configuration.",
)
button = gr.Button(value="Run Benchmark", variant="primary")
with gr.Accordion(label="", open=True):
html_output = gr.HTML()
table_output = gr.Dataframe(visible=False)
button.click(
fn=run_benchmark,
inputs={
experiment,
baseline,
model,
task,
device,
backend,
benchmark,
*pytorch_config,
*openvino_config,
*onnxruntime_config,
*neural_compressor_config,
*inference_config,
*training_config,
},
outputs=[html_output, button, table_output],
queue=True,
)
demo.queue().launch()
|