IlyasMoutawwakil's picture
Upload cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub
119d5e3 verified
raw
history blame
9.65 kB
{
"config": {
"name": "cpu_inference_transformers_text-generation_openai-community/gpt2",
"backend": {
"name": "pytorch",
"version": "2.4.0+cpu",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-generation",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cpu",
"device_ids": null,
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 1,
"duration": 1,
"warmup_runs": 1,
"input_shapes": {
"batch_size": 1,
"num_choices": 2,
"sequence_length": 2
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": true,
"forward_kwargs": {},
"generate_kwargs": {
"max_new_tokens": 2,
"min_new_tokens": 2
},
"call_kwargs": {
"num_inference_steps": 2
}
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7763 64-Core Processor",
"cpu_count": 4,
"cpu_ram_mb": 16757.342208,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.14",
"optimum_benchmark_version": "0.4.0",
"optimum_benchmark_commit": "5ebc3e9d50d422174abacffe7091e4c9e1a49815",
"transformers_version": "4.44.2",
"transformers_commit": null,
"accelerate_version": "0.34.0",
"accelerate_commit": null,
"diffusers_version": "0.30.2",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.9",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
}
},
"report": {
"load": {
"memory": {
"unit": "MB",
"max_ram": 1120.948224,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 1,
"total": 4.746606008000015,
"mean": 4.746606008000015,
"stdev": 0.0,
"p50": 4.746606008000015,
"p90": 4.746606008000015,
"p95": 4.746606008000015,
"p99": 4.746606008000015,
"values": [
4.746606008000015
]
},
"throughput": null,
"energy": {
"unit": "kWh",
"cpu": 6.760749053333276e-05,
"ram": 2.8258072613474767e-06,
"gpu": 0,
"total": 7.043329779468024e-05
},
"efficiency": null
},
"prefill": {
"memory": {
"unit": "MB",
"max_ram": 974.548992,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 15,
"total": 0.6256556540000702,
"mean": 0.04171037693333801,
"stdev": 0.0009776887766654106,
"p50": 0.04190900899999406,
"p90": 0.04259780999998384,
"p95": 0.04301120789997981,
"p99": 0.04359156558002155,
"values": [
0.042001421999998456,
0.042444072000023425,
0.04189146500004881,
0.04235569600001554,
0.042700301999957446,
0.04189376100003983,
0.042130903999975544,
0.04190900899999406,
0.04200903600002448,
0.04118911999995589,
0.04373665500003199,
0.040308428000003005,
0.04026586800000587,
0.04066981500000111,
0.04015010099999472
]
},
"throughput": {
"unit": "tokens/s",
"value": 47.94969854135872
},
"energy": {
"unit": "kWh",
"cpu": 1.6133171988889344e-06,
"ram": 6.74237583848416e-08,
"gpu": 0.0,
"total": 1.680740957273776e-06
},
"efficiency": {
"unit": "tokens/kWh",
"value": 1189951.3671899054
}
},
"decode": {
"memory": {
"unit": "MB",
"max_ram": 975.335424,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 15,
"total": 0.3820947809999211,
"mean": 0.025472985399994742,
"stdev": 0.0004732101605278633,
"p50": 0.025562239000009868,
"p90": 0.02606655160000173,
"p95": 0.026281922099997246,
"p99": 0.026317609219995576,
"values": [
0.02626280399999814,
0.025716708000004473,
0.025516092999964712,
0.025772173000007115,
0.026326530999995157,
0.025441481999962434,
0.025562239000009868,
0.02567008099998702,
0.02525009300001102,
0.025647829000035927,
0.025608906000002207,
0.02491128799999842,
0.024826578999977755,
0.024639868999997816,
0.024942105999969044
]
},
"throughput": {
"unit": "tokens/s",
"value": 39.25727527799731
},
"energy": {
"unit": "kWh",
"cpu": 9.637455955555963e-07,
"ram": 4.0277884833904666e-08,
"gpu": 0.0,
"total": 1.004023480389501e-06
},
"efficiency": {
"unit": "tokens/kWh",
"value": 995992.6431322701
}
},
"per_token": {
"memory": null,
"latency": {
"unit": "s",
"count": 15,
"total": 0.3770171239999627,
"mean": 0.025134474933330844,
"stdev": 0.00046663334878078083,
"p50": 0.02521683100002292,
"p90": 0.02571559979999165,
"p95": 0.025932185100003835,
"p99": 0.025979744220029487,
"values": [
0.025906706999990092,
0.025373083999966184,
0.025185002000000623,
0.02542893899999399,
0.0259916340000359,
0.025088780999965365,
0.02521683100002292,
0.02532551499996316,
0.024913883000010628,
0.025281812999992326,
0.025290238999957637,
0.02459578700000975,
0.024470101000019895,
0.02432278500003804,
0.024626022999996167
]
},
"throughput": {
"unit": "tokens/s",
"value": 39.78599125911715
},
"energy": null,
"efficiency": null
}
}
}