{
    "config": {
        "name": "cpu_inference_transformers_text-generation_openai-community/gpt2",
        "backend": {
            "name": "pytorch",
            "version": "2.4.0+cpu",
            "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
            "task": "text-generation",
            "library": "transformers",
            "model_type": "gpt2",
            "model": "openai-community/gpt2",
            "processor": "openai-community/gpt2",
            "device": "cpu",
            "device_ids": null,
            "seed": 42,
            "inter_op_num_threads": null,
            "intra_op_num_threads": null,
            "model_kwargs": {},
            "processor_kwargs": {},
            "no_weights": true,
            "device_map": null,
            "torch_dtype": null,
            "eval_mode": true,
            "to_bettertransformer": false,
            "low_cpu_mem_usage": null,
            "attn_implementation": null,
            "cache_implementation": null,
            "autocast_enabled": false,
            "autocast_dtype": null,
            "torch_compile": false,
            "torch_compile_target": "forward",
            "torch_compile_config": {},
            "quantization_scheme": null,
            "quantization_config": {},
            "deepspeed_inference": false,
            "deepspeed_inference_config": {},
            "peft_type": null,
            "peft_config": {}
        },
        "scenario": {
            "name": "inference",
            "_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
            "iterations": 1,
            "duration": 1,
            "warmup_runs": 1,
            "input_shapes": {
                "batch_size": 1,
                "num_choices": 2,
                "sequence_length": 2
            },
            "new_tokens": null,
            "memory": true,
            "latency": true,
            "energy": true,
            "forward_kwargs": {},
            "generate_kwargs": {
                "max_new_tokens": 2,
                "min_new_tokens": 2
            },
            "call_kwargs": {
                "num_inference_steps": 2
            }
        },
        "launcher": {
            "name": "process",
            "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
            "device_isolation": false,
            "device_isolation_action": "error",
            "numactl": false,
            "numactl_kwargs": {},
            "start_method": "spawn"
        },
        "environment": {
            "cpu": " AMD EPYC 7763 64-Core Processor",
            "cpu_count": 4,
            "cpu_ram_mb": 16757.342208,
            "system": "Linux",
            "machine": "x86_64",
            "platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35",
            "processor": "x86_64",
            "python_version": "3.10.14",
            "optimum_benchmark_version": "0.4.0",
            "optimum_benchmark_commit": "900139739b058e0bc6c42842dca50b13fd103f21",
            "transformers_version": "4.44.0",
            "transformers_commit": null,
            "accelerate_version": "0.33.0",
            "accelerate_commit": null,
            "diffusers_version": "0.30.0",
            "diffusers_commit": null,
            "optimum_version": null,
            "optimum_commit": null,
            "timm_version": "1.0.8",
            "timm_commit": null,
            "peft_version": null,
            "peft_commit": null
        }
    },
    "report": {
        "load": {
            "memory": {
                "unit": "MB",
                "max_ram": 1121.091584,
                "max_global_vram": null,
                "max_process_vram": null,
                "max_reserved": null,
                "max_allocated": null
            },
            "latency": {
                "unit": "s",
                "count": 1,
                "total": 4.779187384000011,
                "mean": 4.779187384000011,
                "stdev": 0.0,
                "p50": 4.779187384000011,
                "p90": 4.779187384000011,
                "p95": 4.779187384000011,
                "p99": 4.779187384000011,
                "values": [
                    4.779187384000011
                ]
            },
            "throughput": null,
            "energy": {
                "unit": "kWh",
                "cpu": 6.750757510000023e-05,
                "ram": 2.8216453271810148e-06,
                "gpu": 0,
                "total": 7.032922042718125e-05
            },
            "efficiency": null
        },
        "prefill": {
            "memory": {
                "unit": "MB",
                "max_ram": 975.171584,
                "max_global_vram": null,
                "max_process_vram": null,
                "max_reserved": null,
                "max_allocated": null
            },
            "latency": {
                "unit": "s",
                "count": 15,
                "total": 0.6319781909998596,
                "mean": 0.04213187939999064,
                "stdev": 0.0016364541144321379,
                "p50": 0.041553751999970245,
                "p90": 0.04498161919999575,
                "p95": 0.04589397760001361,
                "p99": 0.04595744352002043,
                "values": [
                    0.04366408099997443,
                    0.04198036499997215,
                    0.041321178999965014,
                    0.04163873999999623,
                    0.041553751999970245,
                    0.0417673889999719,
                    0.04138683099995433,
                    0.04211907600000586,
                    0.04597331000002214,
                    0.04585997800000996,
                    0.04088434599998436,
                    0.04092153400000598,
                    0.04126531399998612,
                    0.040901187000031314,
                    0.0407411090000096
                ]
            },
            "throughput": {
                "unit": "tokens/s",
                "value": 47.46999252068599
            },
            "energy": {
                "unit": "kWh",
                "cpu": 1.5967076657778107e-06,
                "ram": 6.67283608434439e-08,
                "gpu": 0.0,
                "total": 1.6634360266212545e-06
            },
            "efficiency": {
                "unit": "tokens/kWh",
                "value": 1202330.5783886195
            }
        },
        "decode": {
            "memory": {
                "unit": "MB",
                "max_ram": 975.302656,
                "max_global_vram": null,
                "max_process_vram": null,
                "max_reserved": null,
                "max_allocated": null
            },
            "latency": {
                "unit": "s",
                "count": 15,
                "total": 0.39296296899993877,
                "mean": 0.026197531266662584,
                "stdev": 0.0013209150436298558,
                "p50": 0.02577200999996876,
                "p90": 0.0286828657999763,
                "p95": 0.02889151949997313,
                "p99": 0.028899188699977003,
                "values": [
                    0.025824559000000136,
                    0.026104780000025585,
                    0.025549616000034803,
                    0.025383145999967383,
                    0.0260871269999825,
                    0.025356907999992018,
                    0.026030171000002156,
                    0.028901105999977972,
                    0.028376047999984166,
                    0.028887410999971053,
                    0.024709422000000814,
                    0.02571910200003913,
                    0.025173365999989983,
                    0.02508819700000231,
                    0.02577200999996876
                ]
            },
            "throughput": {
                "unit": "tokens/s",
                "value": 38.17153570010394
            },
            "energy": {
                "unit": "kWh",
                "cpu": 9.310908203331984e-07,
                "ram": 3.891354804507477e-08,
                "gpu": 0.0,
                "total": 9.70004368378274e-07
            },
            "efficiency": {
                "unit": "tokens/kWh",
                "value": 1030923.1923067265
            }
        },
        "per_token": {
            "memory": null,
            "latency": {
                "unit": "s",
                "count": 15,
                "total": 0.38737838299999794,
                "mean": 0.025825225533333197,
                "stdev": 0.001281153658841692,
                "p50": 0.025431014999981016,
                "p90": 0.028174532999980784,
                "p95": 0.028418578199989497,
                "p99": 0.02851887644001181,
                "values": [
                    0.02543931100001373,
                    0.02575555999999324,
                    0.02520558600002687,
                    0.02502677299997913,
                    0.025697761999992963,
                    0.025008619999994153,
                    0.025685900000041784,
                    0.028364846999977544,
                    0.027889061999985643,
                    0.028543951000017387,
                    0.024355683999999655,
                    0.02538805599999705,
                    0.024845405999997183,
                    0.02474085000000059,
                    0.025431014999981016
                ]
            },
            "throughput": {
                "unit": "tokens/s",
                "value": 38.721830278278794
            },
            "energy": null,
            "efficiency": null
        }
    }
}