IlyasMoutawwakil's picture
Upload cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub
5d7131b verified
raw
history blame
9.65 kB
{
"config": {
"name": "cpu_inference_transformers_text-generation_openai-community/gpt2",
"backend": {
"name": "pytorch",
"version": "2.4.0+cpu",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-generation",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cpu",
"device_ids": null,
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 1,
"duration": 1,
"warmup_runs": 1,
"input_shapes": {
"batch_size": 1,
"num_choices": 2,
"sequence_length": 2
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": true,
"forward_kwargs": {},
"generate_kwargs": {
"max_new_tokens": 2,
"min_new_tokens": 2
},
"call_kwargs": {
"num_inference_steps": 2
}
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7763 64-Core Processor",
"cpu_count": 4,
"cpu_ram_mb": 16757.342208,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.14",
"optimum_benchmark_version": "0.4.0",
"optimum_benchmark_commit": "1184dccbee75a8580e1bb5b5a4484cd8c18bf2b0",
"transformers_version": "4.44.2",
"transformers_commit": null,
"accelerate_version": "0.33.0",
"accelerate_commit": null,
"diffusers_version": "0.30.1",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.9",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
}
},
"report": {
"load": {
"memory": {
"unit": "MB",
"max_ram": 1122.680832,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 1,
"total": 4.854445980999969,
"mean": 4.854445980999969,
"stdev": 0.0,
"p50": 4.854445980999969,
"p90": 4.854445980999969,
"p95": 4.854445980999969,
"p99": 4.854445980999969,
"values": [
4.854445980999969
]
},
"throughput": null,
"energy": {
"unit": "kWh",
"cpu": 6.803281123333516e-05,
"ram": 2.8436015418870465e-06,
"gpu": 0,
"total": 7.087641277522221e-05
},
"efficiency": null
},
"prefill": {
"memory": {
"unit": "MB",
"max_ram": 975.900672,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 15,
"total": 0.6502646009998898,
"mean": 0.04335097339999265,
"stdev": 0.0022802032663954376,
"p50": 0.04433920699995042,
"p90": 0.04570921699998962,
"p95": 0.045892653999987945,
"p99": 0.045937582799977006,
"values": [
0.04538911200000939,
0.0458685849999938,
0.04433920699995042,
0.045470164999983353,
0.0451127329999963,
0.04510473700003104,
0.045948814999974275,
0.045215870999982144,
0.043355172999952174,
0.04092621600000257,
0.03962544999995998,
0.040447275000019545,
0.039778477000027124,
0.04160371000000396,
0.04207907500000374
]
},
"throughput": {
"unit": "tokens/s",
"value": 46.135065562341886
},
"energy": {
"unit": "kWh",
"cpu": 1.585308693111049e-06,
"ram": 6.625317908471332e-08,
"gpu": 0.0,
"total": 1.6515618721957622e-06
},
"efficiency": {
"unit": "tokens/kWh",
"value": 1210974.9163323727
}
},
"decode": {
"memory": {
"unit": "MB",
"max_ram": 976.818176,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 15,
"total": 0.40057570199996917,
"mean": 0.026705046799997946,
"stdev": 0.0016660367056711065,
"p50": 0.027859126000009837,
"p90": 0.02838041700000531,
"p95": 0.02847344879999696,
"p99": 0.02862334175997262,
"values": [
0.02821750099997189,
0.02793257499996571,
0.02806054600000607,
0.027859126000009837,
0.028145144000006894,
0.028660814999966533,
0.02839314900001,
0.028361318999998275,
0.024258344000031684,
0.02450376599995252,
0.02478201900004251,
0.024940505000017765,
0.0247045830000161,
0.025589426999999887,
0.026166882999973495
]
},
"throughput": {
"unit": "tokens/s",
"value": 37.446105505423674
},
"energy": {
"unit": "kWh",
"cpu": 9.315535544584748e-07,
"ram": 3.893205938780352e-08,
"gpu": 0.0,
"total": 9.704856138462787e-07
},
"efficiency": {
"unit": "tokens/kWh",
"value": 1030411.9769861897
}
},
"per_token": {
"memory": null,
"latency": {
"unit": "s",
"count": 15,
"total": 0.394335382999941,
"mean": 0.0262890255333294,
"stdev": 0.0015784582251021433,
"p50": 0.027370326000038858,
"p90": 0.027898661000028822,
"p95": 0.027972391300033907,
"p99": 0.02808808786000327,
"values": [
0.027696049999974548,
0.027457710999954088,
0.027536189000045397,
0.027370326000038858,
0.027666503999967063,
0.02811701199999561,
0.027910411000050317,
0.027881035999996584,
0.023948351000001367,
0.024191948999998658,
0.024462959000004503,
0.024620653999988917,
0.02438186599999881,
0.02525733299995636,
0.025837031999969895
]
},
"throughput": {
"unit": "tokens/s",
"value": 38.03868647516788
},
"energy": null,
"efficiency": null
}
}
}