IlyasMoutawwakil's picture
Upload cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub
8ca594a verified
raw
history blame
9.78 kB
{
"config": {
"name": "cpu_inference_transformers_text-generation_openai-community/gpt2",
"backend": {
"name": "pytorch",
"version": "2.4.0+cpu",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-generation",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cpu",
"device_ids": null,
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 1,
"duration": 1,
"warmup_runs": 1,
"input_shapes": {
"batch_size": 1,
"num_choices": 2,
"sequence_length": 2
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": true,
"forward_kwargs": {},
"generate_kwargs": {
"max_new_tokens": 2,
"min_new_tokens": 2
},
"call_kwargs": {
"num_inference_steps": 2
}
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7763 64-Core Processor",
"cpu_count": 4,
"cpu_ram_mb": 16757.342208,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.14",
"optimum_benchmark_version": "0.4.0",
"optimum_benchmark_commit": "12475cb656b1f369db810190724d8affe3522bc1",
"transformers_version": "4.44.2",
"transformers_commit": null,
"accelerate_version": "0.33.0",
"accelerate_commit": null,
"diffusers_version": "0.30.1",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.9",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
}
},
"report": {
"load": {
"memory": {
"unit": "MB",
"max_ram": 1121.169408,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 1,
"total": 4.7336910589999945,
"mean": 4.7336910589999945,
"stdev": 0.0,
"p50": 4.7336910589999945,
"p90": 4.7336910589999945,
"p95": 4.7336910589999945,
"p99": 4.7336910589999945,
"values": [
4.7336910589999945
]
},
"throughput": null,
"energy": {
"unit": "kWh",
"cpu": 6.807849121111076e-05,
"ram": 2.845506208455407e-06,
"gpu": 0,
"total": 7.092399741956617e-05
},
"efficiency": null
},
"prefill": {
"memory": {
"unit": "MB",
"max_ram": 974.381056,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 16,
"total": 0.6287286180000251,
"mean": 0.03929553862500157,
"stdev": 0.0007628121669516826,
"p50": 0.039372347500020055,
"p90": 0.03983358499998246,
"p95": 0.04020480399998405,
"p99": 0.04102485760000718,
"values": [
0.039270236999982444,
0.039325650000023415,
0.03970791599999757,
0.03896754100000521,
0.03980405499999051,
0.039863114999974414,
0.039419045000016695,
0.03951648800000385,
0.0394958080000265,
0.03955844600000091,
0.03907073399997785,
0.03924055100000601,
0.04122987100001296,
0.038326753000006875,
0.03803288399998905,
0.03789952400001084
]
},
"throughput": {
"unit": "tokens/s",
"value": 50.89636304736923
},
"energy": {
"unit": "kWh",
"cpu": 1.5106079438034397e-06,
"ram": 6.313100509769062e-08,
"gpu": 0.0,
"total": 1.5737389489011305e-06
},
"efficiency": {
"unit": "tokens/kWh",
"value": 1270858.805011154
}
},
"decode": {
"memory": {
"unit": "MB",
"max_ram": 975.29856,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 16,
"total": 0.3905487639999592,
"mean": 0.02440929774999745,
"stdev": 0.000685287254153744,
"p50": 0.02436128349999933,
"p90": 0.025342640499999902,
"p95": 0.025804146249988946,
"p99": 0.025827993249994564,
"values": [
0.0248910710000132,
0.025833954999995967,
0.02416591499999754,
0.02426400900000658,
0.024668112999989944,
0.024499008000020694,
0.024336693999998715,
0.0243270260000088,
0.024502394999984745,
0.024648847999998225,
0.023942787999999382,
0.025794209999986606,
0.02343244399997957,
0.023388243999988845,
0.024385872999999947,
0.023468170999990434
]
},
"throughput": {
"unit": "tokens/s",
"value": 40.967995484429885
},
"energy": {
"unit": "kWh",
"cpu": 8.944844780919754e-07,
"ram": 3.7383982393635586e-08,
"gpu": 0.0,
"total": 9.318684604856102e-07
},
"efficiency": {
"unit": "tokens/kWh",
"value": 1073112.8291206309
}
},
"per_token": {
"memory": null,
"latency": {
"unit": "s",
"count": 16,
"total": 0.3851794739999548,
"mean": 0.024073717124997174,
"stdev": 0.000656511915934345,
"p50": 0.024034392000004345,
"p90": 0.024919198500001016,
"p95": 0.025356143499990935,
"p99": 0.02547519669998053,
"values": [
0.02453185900000676,
0.025504959999977928,
0.023846578000018326,
0.023934983999993165,
0.02433552200000122,
0.024165645000010727,
0.024012377999980572,
0.024013560000014422,
0.024154493999986926,
0.024315284000010706,
0.02362206899999819,
0.02530653799999527,
0.023129176999987067,
0.023083743999990247,
0.02405522399999427,
0.023167457999988983
]
},
"throughput": {
"unit": "tokens/s",
"value": 41.539077443160636
},
"energy": null,
"efficiency": null
}
}
}