IlyasMoutawwakil's picture
Upload cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub
71baacb verified
raw
history blame
9.64 kB
{
"config": {
"name": "cpu_inference_transformers_text-generation_openai-community/gpt2",
"backend": {
"name": "pytorch",
"version": "2.4.0+cpu",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-generation",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cpu",
"device_ids": null,
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 1,
"duration": 1,
"warmup_runs": 1,
"input_shapes": {
"batch_size": 1,
"num_choices": 2,
"sequence_length": 2
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": true,
"forward_kwargs": {},
"generate_kwargs": {
"max_new_tokens": 2,
"min_new_tokens": 2
},
"call_kwargs": {
"num_inference_steps": 2
}
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7763 64-Core Processor",
"cpu_count": 4,
"cpu_ram_mb": 16757.342208,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.14",
"optimum_benchmark_version": "0.4.0",
"optimum_benchmark_commit": "778fa3dbf39228b4d5e8f6de2e971dc51a6becb0",
"transformers_version": "4.44.1",
"transformers_commit": null,
"accelerate_version": "0.33.0",
"accelerate_commit": null,
"diffusers_version": "0.30.0",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.8",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
}
},
"report": {
"load": {
"memory": {
"unit": "MB",
"max_ram": 1120.096256,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 1,
"total": 4.730805744999998,
"mean": 4.730805744999998,
"stdev": 0.0,
"p50": 4.730805744999998,
"p90": 4.730805744999998,
"p95": 4.730805744999998,
"p99": 4.730805744999998,
"values": [
4.730805744999998
]
},
"throughput": null,
"energy": {
"unit": "kWh",
"cpu": 6.719407877777729e-05,
"ram": 2.8085045348813776e-06,
"gpu": 0,
"total": 7.000258331265867e-05
},
"efficiency": null
},
"prefill": {
"memory": {
"unit": "MB",
"max_ram": 973.574144,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 15,
"total": 0.6482598809999729,
"mean": 0.043217325399998195,
"stdev": 0.001804969916245532,
"p50": 0.04400004800001511,
"p90": 0.04498259799999573,
"p95": 0.04502728599999841,
"p99": 0.045027622000010294,
"values": [
0.04502770600001327,
0.044804798000001256,
0.04389168500000551,
0.0448903089999817,
0.04402539600002342,
0.045027105999992045,
0.044915836000001264,
0.04416878500001076,
0.04400004800001511,
0.040858846999981324,
0.040387000999999145,
0.04086823399998707,
0.0406872829999827,
0.04385363299999767,
0.04085321399998065
]
},
"throughput": {
"unit": "tokens/s",
"value": 46.2777365671982
},
"energy": {
"unit": "kWh",
"cpu": 1.593157880222192e-06,
"ram": 6.658095551926835e-08,
"gpu": 0.0,
"total": 1.6597388357414604e-06
},
"efficiency": {
"unit": "tokens/kWh",
"value": 1205008.8585813765
}
},
"decode": {
"memory": {
"unit": "MB",
"max_ram": 974.098432,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 15,
"total": 0.3948358519999431,
"mean": 0.02632239013332954,
"stdev": 0.001425223211492719,
"p50": 0.027020867999993925,
"p90": 0.027608359999999266,
"p95": 0.02783656870000186,
"p99": 0.028212624939994838,
"values": [
0.0274584389999859,
0.02727630699999395,
0.027349964999984877,
0.027635110000005625,
0.026918936999976495,
0.028306638999993083,
0.027568234999989727,
0.027506579000004194,
0.027020867999993925,
0.0241827540000088,
0.02458380699999907,
0.02464380800000754,
0.02518160700000749,
0.024836378999992803,
0.024366417999999612
]
},
"throughput": {
"unit": "tokens/s",
"value": 37.99047103757478
},
"energy": {
"unit": "kWh",
"cpu": 9.59169804152822e-07,
"ram": 4.0087422363177634e-08,
"gpu": 0.0,
"total": 9.992572265159989e-07
},
"efficiency": {
"unit": "tokens/kWh",
"value": 1000743.3256065516
}
},
"per_token": {
"memory": null,
"latency": {
"unit": "s",
"count": 15,
"total": 0.3887299399999904,
"mean": 0.025915329333332692,
"stdev": 0.0013438876522084175,
"p50": 0.02659673299999099,
"p90": 0.02711392420000607,
"p95": 0.027330685600006177,
"p99": 0.02771640351998883,
"values": [
0.02699183400000038,
0.026793400999991945,
0.026884252000002107,
0.02712405100001547,
0.02645589899998413,
0.027812832999984494,
0.027098733999991964,
0.02703150799999321,
0.02659673299999099,
0.023888862999996263,
0.02429071700001373,
0.02434644999999591,
0.024815791000008858,
0.02454331900000284,
0.024055555000018103
]
},
"throughput": {
"unit": "tokens/s",
"value": 38.58720015237409
},
"energy": null,
"efficiency": null
}
}
}