IlyasMoutawwakil's picture
Upload cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub
52600f9 verified
raw
history blame
8.62 kB
{
"config": {
"name": "cpu_inference_transformers_text-generation_openai-community/gpt2",
"backend": {
"name": "pytorch",
"version": "2.5.1+cpu",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-generation",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cpu",
"device_ids": null,
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 1,
"duration": 1,
"warmup_runs": 1,
"input_shapes": {
"batch_size": 2,
"sequence_length": 16
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": true,
"forward_kwargs": {},
"generate_kwargs": {
"max_new_tokens": 2,
"min_new_tokens": 2
},
"call_kwargs": {
"num_inference_steps": 2
}
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": null,
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7763 64-Core Processor",
"cpu_count": 4,
"cpu_ram_mb": 16757.342208,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.15",
"optimum_benchmark_version": "0.5.0.dev0",
"optimum_benchmark_commit": "9104793fa9ba932870d9ceb029cd628a1388b11e",
"transformers_version": "4.46.3",
"transformers_commit": null,
"accelerate_version": "1.1.1",
"accelerate_commit": null,
"diffusers_version": "0.31.0",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.11",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
},
"print_report": true,
"log_report": true
},
"report": {
"load": {
"memory": {
"unit": "MB",
"max_ram": 1128.972288,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
1.721552040000006
],
"count": 1,
"total": 1.721552040000006,
"mean": 1.721552040000006,
"p50": 1.721552040000006,
"p90": 1.721552040000006,
"p95": 1.721552040000006,
"p99": 1.721552040000006,
"stdev": 0,
"stdev_": 0
},
"throughput": null,
"energy": null,
"efficiency": null
},
"prefill": {
"memory": {
"unit": "MB",
"max_ram": 1016.725504,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
0.11006252300001051,
0.11751415600002701,
0.11319680099995821,
0.11079609900002652,
0.1102337439999701,
0.10874600099998588,
0.11641410300001098
],
"count": 7,
"total": 0.7869634269999892,
"mean": 0.11242334671428418,
"p50": 0.11079609900002652,
"p90": 0.1168541242000174,
"p95": 0.1171841401000222,
"p99": 0.11744815282002605,
"stdev": 0.0031391391388253097,
"stdev_": 2.7922484346629575
},
"throughput": {
"unit": "tokens/s",
"value": 284.6383863782822
},
"energy": {
"unit": "kWh",
"cpu": 4.0603973161111325e-06,
"ram": 1.6969213605655676e-07,
"gpu": 0.0,
"total": 4.230089452167689e-06
},
"efficiency": {
"unit": "tokens/kWh",
"value": 7564851.845769303
}
},
"decode": {
"memory": {
"unit": "MB",
"max_ram": 1018.69568,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"values": [
0.047193404999973154,
0.04853139299996201,
0.04775638000000981,
0.04997743500001661,
0.0491628080000055,
0.04247845400004735,
0.04265343099996244
],
"count": 7,
"total": 0.3277533059999769,
"mean": 0.04682190085713955,
"p50": 0.04775638000000981,
"p90": 0.049488658800009945,
"p95": 0.049733046900013275,
"p99": 0.04992855738001595,
"stdev": 0.0028186253871583717,
"stdev_": 6.019886710192328
},
"throughput": {
"unit": "tokens/s",
"value": 42.71505349819717
},
"energy": {
"unit": "kWh",
"cpu": 1.6383290894445212e-06,
"ram": 6.846769303490792e-08,
"gpu": 0.0,
"total": 1.7067967824794315e-06
},
"efficiency": {
"unit": "tokens/kWh",
"value": 1171785.6633726703
}
},
"per_token": {
"memory": null,
"latency": {
"unit": "s",
"values": [
0.04592370399996071,
0.047344837999958145,
0.04652628300004835,
0.04877996999999823,
0.04790436800004727,
0.041267513000036615,
0.041708228999993935
],
"count": 7,
"total": 0.31945490500004325,
"mean": 0.04563641500000618,
"p50": 0.04652628300004835,
"p90": 0.04825460880002765,
"p95": 0.048517289400012945,
"p99": 0.04872743388000117,
"stdev": 0.0027603469750366906,
"stdev_": 6.048562261159902
},
"throughput": {
"unit": "tokens/s",
"value": 43.824651870654804
},
"energy": null,
"efficiency": null
}
}
}