{ "config": { "name": "cpu_inference_transformers_text-generation_openai-community/gpt2", "backend": { "name": "pytorch", "version": "2.5.1+cpu", "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", "task": "text-generation", "library": "transformers", "model_type": "gpt2", "model": "openai-community/gpt2", "processor": "openai-community/gpt2", "device": "cpu", "device_ids": null, "seed": 42, "inter_op_num_threads": null, "intra_op_num_threads": null, "model_kwargs": {}, "processor_kwargs": {}, "no_weights": true, "device_map": null, "torch_dtype": null, "eval_mode": true, "to_bettertransformer": false, "low_cpu_mem_usage": null, "attn_implementation": null, "cache_implementation": null, "autocast_enabled": false, "autocast_dtype": null, "torch_compile": false, "torch_compile_target": "forward", "torch_compile_config": {}, "quantization_scheme": null, "quantization_config": {}, "deepspeed_inference": false, "deepspeed_inference_config": {}, "peft_type": null, "peft_config": {} }, "scenario": { "name": "inference", "_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario", "iterations": 1, "duration": 1, "warmup_runs": 1, "input_shapes": { "batch_size": 2, "sequence_length": 16 }, "new_tokens": null, "memory": true, "latency": true, "energy": true, "forward_kwargs": {}, "generate_kwargs": { "max_new_tokens": 2, "min_new_tokens": 2 }, "call_kwargs": { "num_inference_steps": 2 } }, "launcher": { "name": "process", "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", "device_isolation": false, "device_isolation_action": null, "numactl": false, "numactl_kwargs": {}, "start_method": "spawn" }, "environment": { "cpu": " AMD EPYC 7763 64-Core Processor", "cpu_count": 4, "cpu_ram_mb": 16757.342208, "system": "Linux", "machine": "x86_64", "platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35", "processor": "x86_64", "python_version": "3.10.15", "optimum_benchmark_version": "0.5.0.dev0", "optimum_benchmark_commit": "31aa6620675bda1ecd6e40a22ecaa03106d279d8", "transformers_version": "4.46.3", "transformers_commit": null, "accelerate_version": "1.1.1", "accelerate_commit": null, "diffusers_version": "0.31.0", "diffusers_commit": null, "optimum_version": null, "optimum_commit": null, "timm_version": "1.0.11", "timm_commit": null, "peft_version": null, "peft_commit": null }, "print_report": true, "log_report": true }, "report": { "load": { "memory": { "unit": "MB", "max_ram": 1141.817344, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 1.7355103499999984 ], "count": 1, "total": 1.7355103499999984, "mean": 1.7355103499999984, "p50": 1.7355103499999984, "p90": 1.7355103499999984, "p95": 1.7355103499999984, "p99": 1.7355103499999984, "stdev": 0, "stdev_": 0 }, "throughput": null, "energy": null, "efficiency": null }, "prefill": { "memory": { "unit": "MB", "max_ram": 1021.530112, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.14835861399998862, 0.1491299559999959, 0.11902537599999619, 0.10567621199999166, 0.10334839699999065, 0.10311616399999934 ], "count": 6, "total": 0.7286547189999624, "mean": 0.1214424531666604, "p50": 0.11235079399999393, "p90": 0.14874428499999226, "p95": 0.14893712049999408, "p99": 0.14909138889999554, "stdev": 0.02003639144527361, "stdev_": 16.49867152945013 }, "throughput": { "unit": "tokens/s", "value": 263.4992884743946 }, "energy": { "unit": "kWh", "cpu": 4.056311898333433e-06, "ram": 1.6952006457567524e-07, "gpu": 0.0, "total": 4.2258319629091086e-06 }, "efficiency": { "unit": "tokens/kWh", "value": 7572473.368763781 } }, "decode": { "memory": { "unit": "MB", "max_ram": 1028.173824, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.04997929300000692, 0.04943147800000247, 0.04488025300000231, 0.04595785900002625, 0.04422805900000526, 0.04419826399998783 ], "count": 6, "total": 0.27867520600003104, "mean": 0.04644586766667184, "p50": 0.04541905600001428, "p90": 0.049705385500004695, "p95": 0.04984233925000581, "p99": 0.049951902250006695, "stdev": 0.0023825288706326836, "stdev_": 5.12968965879028 }, "throughput": { "unit": "tokens/s", "value": 43.06088141906196 }, "energy": { "unit": "kWh", "cpu": 1.6956798683331312e-06, "ram": 7.086599168562022e-08, "gpu": 0.0, "total": 1.766545860018749e-06 }, "efficiency": { "unit": "tokens/kWh", "value": 1132152.8895823702 } }, "per_token": { "memory": null, "latency": { "unit": "s", "values": [ 0.049486089999987826, 0.04890889999998649, 0.04451646300000789, 0.045597475000022314, 0.043872083000024986, 0.04382849199998873 ], "count": 6, "total": 0.27620950300001823, "mean": 0.04603491716666971, "p50": 0.0450569690000151, "p90": 0.04919749499998716, "p95": 0.04934179249998749, "p99": 0.04945723049998776, "stdev": 0.002317095653528907, "stdev_": 5.033343809743043 }, "throughput": { "unit": "tokens/s", "value": 43.445282909035924 }, "energy": null, "efficiency": null } } }