IlyasMoutawwakil's picture
Upload cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub
356586f verified
raw
history blame
9.77 kB
{
"config": {
"name": "cpu_inference_transformers_text-generation_openai-community/gpt2",
"backend": {
"name": "pytorch",
"version": "2.4.1+cpu",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-generation",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cpu",
"device_ids": null,
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 1,
"duration": 1,
"warmup_runs": 1,
"input_shapes": {
"batch_size": 1,
"num_choices": 2,
"sequence_length": 2
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": true,
"forward_kwargs": {},
"generate_kwargs": {
"max_new_tokens": 2,
"min_new_tokens": 2
},
"call_kwargs": {
"num_inference_steps": 2
}
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": null,
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7763 64-Core Processor",
"cpu_count": 4,
"cpu_ram_mb": 16766.767104,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-6.8.0-1014-azure-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.15",
"optimum_benchmark_version": "0.4.0",
"optimum_benchmark_commit": "c48d602cd7a21df41cc07305fa12549ed74a7e13",
"transformers_version": "4.44.2",
"transformers_commit": null,
"accelerate_version": "0.34.2",
"accelerate_commit": null,
"diffusers_version": "0.30.3",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.9",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
}
},
"report": {
"load": {
"memory": {
"unit": "MB",
"max_ram": 1115.721728,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 1,
"total": 4.746311879000018,
"mean": 4.746311879000018,
"stdev": 0.0,
"p50": 4.746311879000018,
"p90": 4.746311879000018,
"p95": 4.746311879000018,
"p99": 4.746311879000018,
"values": [
4.746311879000018
]
},
"throughput": null,
"energy": {
"unit": "kWh",
"cpu": 6.749695532222136e-05,
"ram": 2.82279531288821e-06,
"gpu": 0,
"total": 7.031975063510957e-05
},
"efficiency": null
},
"prefill": {
"memory": {
"unit": "MB",
"max_ram": 970.231808,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 16,
"total": 0.6530532170000072,
"mean": 0.04081582606250045,
"stdev": 0.0008964170890988942,
"p50": 0.0407564564999916,
"p90": 0.041652334500000165,
"p95": 0.0422937680000075,
"p99": 0.04305897200001141,
"values": [
0.04109338600000001,
0.041974933000005876,
0.04085953999998537,
0.04058344400002056,
0.041329735999994455,
0.04116865299999972,
0.04032315800000674,
0.04086462799998003,
0.04103927400001339,
0.04065337299999783,
0.04048534000000359,
0.040621313999992026,
0.043250273000012385,
0.03963013300000284,
0.039884690999997474,
0.03929134099999487
]
},
"throughput": {
"unit": "tokens/s",
"value": 49.00060081933514
},
"energy": {
"unit": "kWh",
"cpu": 1.5558059596153937e-06,
"ram": 6.505735498805779e-08,
"gpu": 0.0,
"total": 1.6208633146034514e-06
},
"efficiency": {
"unit": "tokens/kWh",
"value": 1233910.3377691694
}
},
"decode": {
"memory": {
"unit": "MB",
"max_ram": 970.231808,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 16,
"total": 0.40792948999995815,
"mean": 0.025495593124997384,
"stdev": 0.0018068409448541267,
"p50": 0.024961831499993536,
"p90": 0.026571261499995558,
"p95": 0.02814425899998696,
"p99": 0.031241938999983173,
"values": [
0.03201635899998223,
0.025052695999988828,
0.025002653000001374,
0.02511442200000147,
0.024897246000023188,
0.024868962999988753,
0.0249210099999857,
0.024838797000001023,
0.02521681199999648,
0.025150127999978622,
0.024918205000005855,
0.02628896400000258,
0.024432278000006136,
0.026853558999988536,
0.024092854000002717,
0.024264544000004662
]
},
"throughput": {
"unit": "tokens/s",
"value": 39.22246464701937
},
"energy": {
"unit": "kWh",
"cpu": 9.268403535790723e-07,
"ram": 3.875669731414859e-08,
"gpu": 0.0,
"total": 9.65597050893221e-07
},
"efficiency": {
"unit": "tokens/kWh",
"value": 1035628.6807990504
}
},
"per_token": {
"memory": null,
"latency": {
"unit": "s",
"count": 16,
"total": 0.4020955670000319,
"mean": 0.025130972937501994,
"stdev": 0.0017461524868080168,
"p50": 0.024621541000001912,
"p90": 0.026130819000002248,
"p95": 0.027752607499984094,
"p99": 0.030704527099980793,
"values": [
0.03144250699997997,
0.02470983599999954,
0.024674330000010514,
0.02476428800000008,
0.024564124000022503,
0.02453342699999439,
0.02456875199999331,
0.024494384000007585,
0.024846550999995998,
0.024816745999999057,
0.024555608000014217,
0.025738997000019026,
0.024141434999989997,
0.02652264099998547,
0.023801179000003003,
0.02392076200001725
]
},
"throughput": {
"unit": "tokens/s",
"value": 39.7915354286876
},
"energy": null,
"efficiency": null
}
}
}