IlyasMoutawwakil's picture
Upload cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub
3b3f3f2 verified
raw
history blame
9.64 kB
{
"config": {
"name": "cpu_inference_transformers_text-generation_openai-community/gpt2",
"backend": {
"name": "pytorch",
"version": "2.4.0+cpu",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-generation",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cpu",
"device_ids": null,
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 1,
"duration": 1,
"warmup_runs": 1,
"input_shapes": {
"batch_size": 1,
"num_choices": 2,
"sequence_length": 2
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": true,
"forward_kwargs": {},
"generate_kwargs": {
"max_new_tokens": 2,
"min_new_tokens": 2
},
"call_kwargs": {
"num_inference_steps": 2
}
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7763 64-Core Processor",
"cpu_count": 4,
"cpu_ram_mb": 16757.342208,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.14",
"optimum_benchmark_version": "0.4.0",
"optimum_benchmark_commit": "81f067a1f11a4e373792667718ef95ea9a0dea36",
"transformers_version": "4.44.2",
"transformers_commit": null,
"accelerate_version": "0.33.0",
"accelerate_commit": null,
"diffusers_version": "0.30.1",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.9",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
}
},
"report": {
"load": {
"memory": {
"unit": "MB",
"max_ram": 1122.988032,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 1,
"total": 4.909352719999987,
"mean": 4.909352719999987,
"stdev": 0.0,
"p50": 4.909352719999987,
"p90": 4.909352719999987,
"p95": 4.909352719999987,
"p99": 4.909352719999987,
"values": [
4.909352719999987
]
},
"throughput": null,
"energy": {
"unit": "kWh",
"cpu": 6.787612175555483e-05,
"ram": 2.837051747931096e-06,
"gpu": 0,
"total": 7.071317350348592e-05
},
"efficiency": null
},
"prefill": {
"memory": {
"unit": "MB",
"max_ram": 976.347136,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 15,
"total": 0.6599179660000232,
"mean": 0.04399453106666821,
"stdev": 0.0019312571613579789,
"p50": 0.043221532000018215,
"p90": 0.04644600000001446,
"p95": 0.04683181250000246,
"p99": 0.04745455209998965,
"values": [
0.04274869399998238,
0.043221532000018215,
0.04287946999997416,
0.04636769700002219,
0.046498202000009314,
0.047610236999986455,
0.04318190700001878,
0.04295870900000409,
0.0457547259999842,
0.04502124900000126,
0.04562274799999955,
0.04349269100001152,
0.04157701500000144,
0.041948041000011926,
0.04103504799999769
]
},
"throughput": {
"unit": "tokens/s",
"value": 45.460195881375576
},
"energy": {
"unit": "kWh",
"cpu": 1.614982408666663e-06,
"ram": 6.749365914313016e-08,
"gpu": 0.0,
"total": 1.682476067809793e-06
},
"efficiency": {
"unit": "tokens/kWh",
"value": 1188724.1894641342
}
},
"decode": {
"memory": {
"unit": "MB",
"max_ram": 977.133568,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 15,
"total": 0.4057758879999369,
"mean": 0.02705172586666246,
"stdev": 0.0014493451593686941,
"p50": 0.027039701000006744,
"p90": 0.029035111000001734,
"p95": 0.029243130399996174,
"p99": 0.029394922879985755,
"values": [
0.026371796999995922,
0.02943287099998315,
0.025706828000011228,
0.0288450580000017,
0.02872604499998488,
0.02608430699999076,
0.027096035999989,
0.02707773199998087,
0.028410522999990917,
0.027039701000006744,
0.029161813000001757,
0.02572016299998836,
0.025509247000002233,
0.025298522000014145,
0.025295244999995248
]
},
"throughput": {
"unit": "tokens/s",
"value": 36.96621816031201
},
"energy": {
"unit": "kWh",
"cpu": 9.194307225833983e-07,
"ram": 3.842477918986546e-08,
"gpu": 0.0,
"total": 9.578555017732634e-07
},
"efficiency": {
"unit": "tokens/kWh",
"value": 1043998.8058206224
}
},
"per_token": {
"memory": null,
"latency": {
"unit": "s",
"count": 15,
"total": 0.40021055299999375,
"mean": 0.026680703533332916,
"stdev": 0.0014126665986679255,
"p50": 0.026686417999997047,
"p90": 0.028608582199990452,
"p95": 0.02886667639999132,
"p99": 0.0290331184799993,
"values": [
0.02601599900000906,
0.029074729000001298,
0.02537509600000476,
0.028355188999995562,
0.028185581000002458,
0.02574110300000143,
0.02674707099998841,
0.026701344999992216,
0.02805457499999875,
0.026686417999997047,
0.028777510999987044,
0.025385475000007318,
0.025181692000018074,
0.024969555000012633,
0.024959213999977692
]
},
"throughput": {
"unit": "tokens/s",
"value": 37.48027104122923
},
"energy": null,
"efficiency": null
}
}
}