IlyasMoutawwakil's picture
Upload cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub
905b4e4 verified
raw
history blame
9.65 kB
{
"config": {
"name": "cpu_inference_transformers_text-generation_openai-community/gpt2",
"backend": {
"name": "pytorch",
"version": "2.4.1+cpu",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-generation",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cpu",
"device_ids": null,
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 1,
"duration": 1,
"warmup_runs": 1,
"input_shapes": {
"batch_size": 1,
"num_choices": 2,
"sequence_length": 2
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": true,
"forward_kwargs": {},
"generate_kwargs": {
"max_new_tokens": 2,
"min_new_tokens": 2
},
"call_kwargs": {
"num_inference_steps": 2
}
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7763 64-Core Processor",
"cpu_count": 4,
"cpu_ram_mb": 16757.342208,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.14",
"optimum_benchmark_version": "0.4.0",
"optimum_benchmark_commit": "9ea4940f8d12769fd995efb985434836fb19b72f",
"transformers_version": "4.44.2",
"transformers_commit": null,
"accelerate_version": "0.34.0",
"accelerate_commit": null,
"diffusers_version": "0.30.2",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.9",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
}
},
"report": {
"load": {
"memory": {
"unit": "MB",
"max_ram": 1109.983232,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 1,
"total": 4.925631541999962,
"mean": 4.925631541999962,
"stdev": 0.0,
"p50": 4.925631541999962,
"p90": 4.925631541999962,
"p95": 4.925631541999962,
"p99": 4.925631541999962,
"values": [
4.925631541999962
]
},
"throughput": null,
"energy": {
"unit": "kWh",
"cpu": 6.702951791666737e-05,
"ram": 2.801657012968834e-06,
"gpu": 0,
"total": 6.983117492963621e-05
},
"efficiency": null
},
"prefill": {
"memory": {
"unit": "MB",
"max_ram": 963.579904,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 15,
"total": 0.6519403540002031,
"mean": 0.04346269026668021,
"stdev": 0.0023260999843744364,
"p50": 0.043894423000040206,
"p90": 0.04629842640001698,
"p95": 0.04667628710000144,
"p99": 0.047056279020009695,
"values": [
0.04473934200001395,
0.046472719999997025,
0.045755620000022645,
0.043894423000040206,
0.04603698600004691,
0.04715127700001176,
0.045042298000055325,
0.04120703400002412,
0.041516633000014735,
0.041888387000028615,
0.04153240199997299,
0.041061313000000155,
0.044926863000000594,
0.04046977699999843,
0.04024527899997565
]
},
"throughput": {
"unit": "tokens/s",
"value": 46.016479599590255
},
"energy": {
"unit": "kWh",
"cpu": 1.6627566488425738e-06,
"ram": 6.948811291108177e-08,
"gpu": 0.0,
"total": 1.7322447617536554e-06
},
"efficiency": {
"unit": "tokens/kWh",
"value": 1154571.25006704
}
},
"decode": {
"memory": {
"unit": "MB",
"max_ram": 964.235264,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 15,
"total": 0.39426829799981533,
"mean": 0.02628455319998769,
"stdev": 0.0012284875161085677,
"p50": 0.02564458800003422,
"p90": 0.028004992599971957,
"p95": 0.028086889599990172,
"p99": 0.0282132379199993,
"values": [
0.028244825000001583,
0.02760356800001773,
0.027130062999958682,
0.02738348599996243,
0.027983676999951967,
0.028019202999985282,
0.025156244999948285,
0.025276890999975876,
0.025637625000001663,
0.02525835599999482,
0.025513783999997486,
0.024951321999992615,
0.0257228140000052,
0.02564458800003422,
0.02474185099998749
]
},
"throughput": {
"unit": "tokens/s",
"value": 38.04515878171627
},
"energy": {
"unit": "kWh",
"cpu": 9.360649715278024e-07,
"ram": 3.911919284766611e-08,
"gpu": 0.0,
"total": 9.751841643754678e-07
},
"efficiency": {
"unit": "tokens/kWh",
"value": 1025447.3324435337
}
},
"per_token": {
"memory": null,
"latency": {
"unit": "s",
"count": 15,
"total": 0.3878395209998189,
"mean": 0.02585596806665459,
"stdev": 0.0011693736049780907,
"p50": 0.02527260300001899,
"p90": 0.02750290659996608,
"p95": 0.027574833899967642,
"p99": 0.02768629797997278,
"values": [
0.027714163999974062,
0.02708010999998578,
0.026649905999988732,
0.02691506099995422,
0.027484584999967865,
0.027515120999964893,
0.024764002999972945,
0.024894828000014968,
0.02525051199995687,
0.024880099999961658,
0.025114768000037202,
0.024572423999984494,
0.025350058000014997,
0.02527260300001899,
0.02438127800002121
]
},
"throughput": {
"unit": "tokens/s",
"value": 38.67579034063165
},
"energy": null,
"efficiency": null
}
}
}