IlyasMoutawwakil
HF staff
Upload cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub
0aff802
verified
{ | |
"config": { | |
"name": "cpu_inference_transformers_text-generation_openai-community/gpt2", | |
"backend": { | |
"name": "pytorch", | |
"version": "2.3.0+cpu", | |
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", | |
"task": "text-generation", | |
"library": "transformers", | |
"model": "openai-community/gpt2", | |
"processor": "openai-community/gpt2", | |
"device": "cpu", | |
"device_ids": null, | |
"seed": 42, | |
"inter_op_num_threads": null, | |
"intra_op_num_threads": null, | |
"model_kwargs": {}, | |
"processor_kwargs": {}, | |
"hub_kwargs": {}, | |
"no_weights": true, | |
"device_map": null, | |
"torch_dtype": null, | |
"eval_mode": true, | |
"to_bettertransformer": false, | |
"low_cpu_mem_usage": null, | |
"attn_implementation": null, | |
"cache_implementation": null, | |
"autocast_enabled": false, | |
"autocast_dtype": null, | |
"torch_compile": false, | |
"torch_compile_target": "forward", | |
"torch_compile_config": {}, | |
"quantization_scheme": null, | |
"quantization_config": {}, | |
"deepspeed_inference": false, | |
"deepspeed_inference_config": {}, | |
"peft_type": null, | |
"peft_config": {} | |
}, | |
"scenario": { | |
"name": "inference", | |
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario", | |
"iterations": 1, | |
"duration": 1, | |
"warmup_runs": 1, | |
"input_shapes": { | |
"batch_size": 1, | |
"num_choices": 2, | |
"sequence_length": 2 | |
}, | |
"new_tokens": null, | |
"latency": true, | |
"memory": true, | |
"energy": true, | |
"forward_kwargs": {}, | |
"generate_kwargs": { | |
"max_new_tokens": 2, | |
"min_new_tokens": 2 | |
}, | |
"call_kwargs": { | |
"num_inference_steps": 2 | |
} | |
}, | |
"launcher": { | |
"name": "process", | |
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", | |
"device_isolation": false, | |
"device_isolation_action": "error", | |
"start_method": "spawn" | |
}, | |
"environment": { | |
"cpu": " AMD EPYC 7763 64-Core Processor", | |
"cpu_count": 4, | |
"cpu_ram_mb": 16757.354496, | |
"system": "Linux", | |
"machine": "x86_64", | |
"platform": "Linux-6.5.0-1021-azure-x86_64-with-glibc2.35", | |
"processor": "x86_64", | |
"python_version": "3.10.14", | |
"optimum_benchmark_version": "0.2.1", | |
"optimum_benchmark_commit": "0b24af9d7b7751f74b160dfade73ef78e10964d6", | |
"transformers_version": "4.40.2", | |
"transformers_commit": null, | |
"accelerate_version": "0.30.1", | |
"accelerate_commit": null, | |
"diffusers_version": "0.27.2", | |
"diffusers_commit": null, | |
"optimum_version": null, | |
"optimum_commit": null, | |
"timm_version": "1.0.3", | |
"timm_commit": null, | |
"peft_version": null, | |
"peft_commit": null | |
} | |
}, | |
"report": { | |
"prefill": { | |
"memory": { | |
"unit": "MB", | |
"max_ram": 945.07008, | |
"max_global_vram": null, | |
"max_process_vram": null, | |
"max_reserved": null, | |
"max_allocated": null | |
}, | |
"latency": { | |
"unit": "s", | |
"count": 13, | |
"total": 0.6660168779999651, | |
"mean": 0.05123206753845885, | |
"stdev": 0.0013537270955456635, | |
"p50": 0.051345710999981975, | |
"p90": 0.052579589600003376, | |
"p95": 0.053123269000002436, | |
"p99": 0.05369348500000342, | |
"values": [ | |
0.05264808900000162, | |
0.053836039000003666, | |
0.05033808999999678, | |
0.0520746570000199, | |
0.05156406399999014, | |
0.04888016900000025, | |
0.051345710999981975, | |
0.052204220999982454, | |
0.050188006000013274, | |
0.04962437299997191, | |
0.05122551499999872, | |
0.049782351999994034, | |
0.05230559200001039 | |
] | |
}, | |
"throughput": { | |
"unit": "tokens/s", | |
"value": 39.03804972342062 | |
}, | |
"energy": { | |
"unit": "kWh", | |
"cpu": 1.7559961996216708e-06, | |
"ram": 7.338199189919322e-08, | |
"gpu": 0.0, | |
"total": 1.829378191520864e-06 | |
}, | |
"efficiency": { | |
"unit": "tokens/kWh", | |
"value": 1093267.6519650037 | |
} | |
}, | |
"decode": { | |
"memory": { | |
"unit": "MB", | |
"max_ram": 945.07008, | |
"max_global_vram": null, | |
"max_process_vram": null, | |
"max_reserved": null, | |
"max_allocated": null | |
}, | |
"latency": { | |
"unit": "s", | |
"count": 13, | |
"total": 0.4163931050000258, | |
"mean": 0.03203023884615583, | |
"stdev": 0.0005299569319006806, | |
"p50": 0.0321998800000074, | |
"p90": 0.03258501220000767, | |
"p95": 0.03261922660000778, | |
"p99": 0.03264701811999998, | |
"values": [ | |
0.032391270999994504, | |
0.032653965999998036, | |
0.032371984999997494, | |
0.03254079299998125, | |
0.031109482000005073, | |
0.031729321000000255, | |
0.03259606700001427, | |
0.03239731200000051, | |
0.03109188999999901, | |
0.03193028100000106, | |
0.03136576600002172, | |
0.032015091000005214, | |
0.0321998800000074 | |
] | |
}, | |
"throughput": { | |
"unit": "tokens/s", | |
"value": 31.220497755358355 | |
}, | |
"energy": { | |
"unit": "kWh", | |
"cpu": 9.796366791410338e-07, | |
"ram": 4.09472447039755e-08, | |
"gpu": 0.0, | |
"total": 1.020583923845009e-06 | |
}, | |
"efficiency": { | |
"unit": "tokens/kWh", | |
"value": 979831.2286093436 | |
} | |
}, | |
"per_token": { | |
"memory": null, | |
"latency": { | |
"unit": "s", | |
"count": 12, | |
"total": 0.997634010000013, | |
"mean": 0.08313616750000108, | |
"stdev": 0.001688979378226582, | |
"p50": 0.08270934799999452, | |
"p90": 0.08467333029999793, | |
"p95": 0.08553187815000171, | |
"p99": 0.08637099643000454, | |
"values": [ | |
0.08658077600000524, | |
0.0826527970000086, | |
0.08467368899999883, | |
0.08272227800000564, | |
0.08059725799998319, | |
0.08393735000001357, | |
0.08467010199998981, | |
0.08133652300000449, | |
0.08145419600000992, | |
0.0826964179999834, | |
0.08176083400002199, | |
0.08455178899998828 | |
] | |
}, | |
"throughput": { | |
"unit": "tokens/s", | |
"value": 12.028459214216088 | |
}, | |
"energy": null, | |
"efficiency": null | |
} | |
} | |
} |