IlyasMoutawwakil's picture
Upload cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub
9e31f86 verified
raw
history blame
9.66 kB
{
"config": {
"name": "cpu_inference_transformers_text-generation_openai-community/gpt2",
"backend": {
"name": "pytorch",
"version": "2.4.0+cpu",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-generation",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cpu",
"device_ids": null,
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "inference",
"_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
"iterations": 1,
"duration": 1,
"warmup_runs": 1,
"input_shapes": {
"batch_size": 1,
"num_choices": 2,
"sequence_length": 2
},
"new_tokens": null,
"memory": true,
"latency": true,
"energy": true,
"forward_kwargs": {},
"generate_kwargs": {
"max_new_tokens": 2,
"min_new_tokens": 2
},
"call_kwargs": {
"num_inference_steps": 2
}
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": false,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7763 64-Core Processor",
"cpu_count": 4,
"cpu_ram_mb": 16757.342208,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-6.5.0-1024-azure-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.14",
"optimum_benchmark_version": "0.3.1",
"optimum_benchmark_commit": "d3c098f7db730a120ce09e9c390325447583d3b3",
"transformers_version": "4.43.2",
"transformers_commit": null,
"accelerate_version": "0.33.0",
"accelerate_commit": null,
"diffusers_version": "0.29.2",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.7",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
}
},
"report": {
"load": {
"memory": {
"unit": "MB",
"max_ram": 1120.58368,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 1,
"total": 4.7692232099999785,
"mean": 4.7692232099999785,
"stdev": 0.0,
"p50": 4.7692232099999785,
"p90": 4.7692232099999785,
"p95": 4.7692232099999785,
"p99": 4.7692232099999785,
"values": [
4.7692232099999785
]
},
"throughput": null,
"energy": {
"unit": "kWh",
"cpu": 6.772013770209419e-05,
"ram": 2.8305295998393374e-06,
"gpu": 0,
"total": 7.055066730193353e-05
},
"efficiency": null
},
"prefill": {
"memory": {
"unit": "MB",
"max_ram": 974.594048,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 15,
"total": 0.6539542359999189,
"mean": 0.043596949066661256,
"stdev": 0.002181769598750108,
"p50": 0.04446738400000072,
"p90": 0.04513058000000001,
"p95": 0.045282110100012575,
"p99": 0.04550902602001258,
"values": [
0.04280345799998031,
0.044921682999984114,
0.044694378000002644,
0.042956071999981305,
0.04556575500001259,
0.04439705299998309,
0.043452279000007366,
0.044852292000001626,
0.04516054800001257,
0.04460382899998194,
0.04446738400000072,
0.04508562799998117,
0.044226743999985274,
0.03815867700001263,
0.03860845599999152
]
},
"throughput": {
"unit": "tokens/s",
"value": 45.87476974459681
},
"energy": {
"unit": "kWh",
"cpu": 1.506055675001226e-06,
"ram": 6.294066913247101e-08,
"gpu": 0.0,
"total": 1.568996344133697e-06
},
"efficiency": {
"unit": "tokens/kWh",
"value": 1274700.2295306665
}
},
"decode": {
"memory": {
"unit": "MB",
"max_ram": 975.118336,
"max_global_vram": null,
"max_process_vram": null,
"max_reserved": null,
"max_allocated": null
},
"latency": {
"unit": "s",
"count": 15,
"total": 0.4046859340000424,
"mean": 0.026979062266669494,
"stdev": 0.0017312101920150473,
"p50": 0.027581240000017715,
"p90": 0.028332817200004003,
"p95": 0.028510628000003636,
"p99": 0.028801816799991685,
"values": [
0.027821058000000676,
0.028874613999988696,
0.027324810999999727,
0.02727987700001222,
0.02800162399998385,
0.027849120000013272,
0.027208774000001767,
0.028354634000010037,
0.028123271999987765,
0.027020382000017662,
0.028300091999994947,
0.027581240000017715,
0.023674367000012353,
0.023687449999982846,
0.023584619000018847
]
},
"throughput": {
"unit": "tokens/s",
"value": 37.06578049732371
},
"energy": {
"unit": "kWh",
"cpu": 9.060696569023683e-07,
"ram": 3.7867727032743535e-08,
"gpu": 0.0,
"total": 9.439373839351123e-07
},
"efficiency": {
"unit": "tokens/kWh",
"value": 1059392.304001323
}
},
"per_token": {
"memory": null,
"latency": {
"unit": "s",
"count": 15,
"total": 0.398187312000033,
"mean": 0.026545820800002198,
"stdev": 0.0016697414528331663,
"p50": 0.027117524000004778,
"p90": 0.027860018600000556,
"p95": 0.02804309100000921,
"p99": 0.028346353400009433,
"values": [
0.027336202000014964,
0.02842216900000949,
0.026880320999993046,
0.02678377099999807,
0.0275383290000093,
0.027389642000002823,
0.026732846000015797,
0.027880629000009094,
0.027660277999984828,
0.026586793000006992,
0.027829102999987754,
0.027117524000004778,
0.023365170000005264,
0.02338283199998159,
0.023281703000009202
]
},
"throughput": {
"unit": "tokens/s",
"value": 37.67071312407553
},
"energy": null,
"efficiency": null
}
}
}