IlyasMoutawwakil's picture
Upload cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub
49e9635 verified
raw
history blame
9.65 kB
{
"config": {
"name": "cpu_inference_transformers_text-generation_openai-community/gpt2",
"backend": {
"name": "pytorch",
"version": "2.4.0+cpu",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-generation",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cpu",
"device_ids": null,
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 1,
"duration": 1,
"warmup_runs": 1,
"input_shapes": {
"batch_size": 1,
"num_choices": 2,
"sequence_length": 2
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": true,
"forward_kwargs": {},
"generate_kwargs": {
"max_new_tokens": 2,
"min_new_tokens": 2
},
"call_kwargs": {
"num_inference_steps": 2
}
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7763 64-Core Processor",
"cpu_count": 4,
"cpu_ram_mb": 16757.342208,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-6.5.0-1024-azure-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.14",
"optimum_benchmark_version": "0.4.0",
"optimum_benchmark_commit": "8d3ce5d99bc670ee9136c4d65ae8c25ef9d8beae",
"transformers_version": "4.43.3",
"transformers_commit": null,
"accelerate_version": "0.33.0",
"accelerate_commit": null,
"diffusers_version": "0.29.2",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.8",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
}
},
"report": {
"load": {
"memory": {
"unit": "MB",
"max_ram": 1121.390592,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 1,
"total": 5.047363528000005,
"mean": 5.047363528000005,
"stdev": 0.0,
"p50": 5.047363528000005,
"p90": 5.047363528000005,
"p95": 5.047363528000005,
"p99": 5.047363528000005,
"values": [
5.047363528000005
]
},
"throughput": null,
"energy": {
"unit": "kWh",
"cpu": 7.065001858605279e-05,
"ram": 2.952989102345782e-06,
"gpu": 0,
"total": 7.360300768839858e-05
},
"efficiency": null
},
"prefill": {
"memory": {
"unit": "MB",
"max_ram": 975.007744,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 15,
"total": 0.6600282380000522,
"mean": 0.04400188253333681,
"stdev": 0.0011281233997716032,
"p50": 0.04407160099998464,
"p90": 0.045205096199981656,
"p95": 0.04592763769999237,
"p99": 0.046628124340018076,
"values": [
0.043234446000042226,
0.0468032460000245,
0.04373368900002106,
0.04307861399996682,
0.04407160099998464,
0.04439621699998497,
0.043544233999966764,
0.044684174999986226,
0.04417950100003054,
0.04455948200001103,
0.044182157000022926,
0.043203558000016073,
0.045552376999978605,
0.0428273850000096,
0.04197755600000619
]
},
"throughput": {
"unit": "tokens/s",
"value": 45.45260077190459
},
"energy": {
"unit": "kWh",
"cpu": 1.6954402416800532e-06,
"ram": 7.085504438919088e-08,
"gpu": 0.0,
"total": 1.766295286069244e-06
},
"efficiency": {
"unit": "tokens/kWh",
"value": 1132313.5014705542
}
},
"decode": {
"memory": {
"unit": "MB",
"max_ram": 975.794176,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 15,
"total": 0.40362710599987395,
"mean": 0.02690847373332493,
"stdev": 0.0005595103178323504,
"p50": 0.027019590000008975,
"p90": 0.02740223259997947,
"p95": 0.027424443999973392,
"p99": 0.027457355199978793,
"values": [
0.027406812999970498,
0.02739536199999293,
0.026768049999986943,
0.027019590000008975,
0.027215324999986024,
0.02726252399997975,
0.026982972000041627,
0.026647645000025477,
0.026984864999974434,
0.026919692999967992,
0.027159871999970164,
0.027114978999975392,
0.02599878199998784,
0.027465582999980143,
0.02528505100002576
]
},
"throughput": {
"unit": "tokens/s",
"value": 37.16301451767386
},
"energy": {
"unit": "kWh",
"cpu": 9.702026556079516e-07,
"ram": 4.0547483242107776e-08,
"gpu": 0.0,
"total": 1.0107501388500593e-06
},
"efficiency": {
"unit": "tokens/kWh",
"value": 989364.1975035593
}
},
"per_token": {
"memory": null,
"latency": {
"unit": "s",
"count": 15,
"total": 0.39790653400001474,
"mean": 0.02652710226666765,
"stdev": 0.0005594315636376912,
"p50": 0.02662390000000414,
"p90": 0.027025588999993033,
"p95": 0.027053111400005037,
"p99": 0.027100424679988463,
"values": [
0.02702232499996171,
0.027027765000013915,
0.026399130000015703,
0.026654127000028893,
0.026857806999998957,
0.026895318000015322,
0.02661873099998502,
0.026232850000042163,
0.02662390000000414,
0.0265500219999808,
0.02680280499998844,
0.026559830999985934,
0.025635652999994818,
0.02711225299998432,
0.02491401700001461
]
},
"throughput": {
"unit": "tokens/s",
"value": 37.69729501350547
},
"energy": null,
"efficiency": null
}
}
}