IlyasMoutawwakil HF staff commited on
Commit
7d4dcdf
1 Parent(s): f7ad884

Upload cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub

Browse files
cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cpu_inference_transformers_text-generation_openai-community/gpt2",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.4.1+cpu",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
9
  "library": "transformers",
@@ -45,7 +45,6 @@
45
  "warmup_runs": 1,
46
  "input_shapes": {
47
  "batch_size": 1,
48
- "num_choices": 2,
49
  "sequence_length": 2
50
  },
51
  "new_tokens": null,
@@ -73,23 +72,23 @@
73
  "environment": {
74
  "cpu": " AMD EPYC 7763 64-Core Processor",
75
  "cpu_count": 4,
76
- "cpu_ram_mb": 16766.783488,
77
  "system": "Linux",
78
  "machine": "x86_64",
79
- "platform": "Linux-6.8.0-1015-azure-x86_64-with-glibc2.39",
80
  "processor": "x86_64",
81
  "python_version": "3.10.15",
82
  "optimum_benchmark_version": "0.5.0.dev0",
83
- "optimum_benchmark_commit": "08c9f59440cf4e5a5d6711ec19e8329ab2de652d",
84
- "transformers_version": "4.45.2",
85
  "transformers_commit": null,
86
- "accelerate_version": "1.0.1",
87
  "accelerate_commit": null,
88
- "diffusers_version": "0.30.3",
89
  "diffusers_commit": null,
90
  "optimum_version": null,
91
  "optimum_commit": null,
92
- "timm_version": "1.0.9",
93
  "timm_commit": null,
94
  "peft_version": null,
95
  "peft_commit": null
@@ -101,7 +100,7 @@
101
  "load": {
102
  "memory": {
103
  "unit": "MB",
104
- "max_ram": 1110.257664,
105
  "max_global_vram": null,
106
  "max_process_vram": null,
107
  "max_reserved": null,
@@ -110,15 +109,15 @@
110
  "latency": {
111
  "unit": "s",
112
  "values": [
113
- 1.7425936460000173
114
  ],
115
  "count": 1,
116
- "total": 1.7425936460000173,
117
- "mean": 1.7425936460000173,
118
- "p50": 1.7425936460000173,
119
- "p90": 1.7425936460000173,
120
- "p95": 1.7425936460000173,
121
- "p99": 1.7425936460000173,
122
  "stdev": 0,
123
  "stdev_": 0
124
  },
@@ -129,7 +128,7 @@
129
  "prefill": {
130
  "memory": {
131
  "unit": "MB",
132
- "max_ram": 965.029888,
133
  "max_global_vram": null,
134
  "max_process_vram": null,
135
  "max_reserved": null,
@@ -138,51 +137,51 @@
138
  "latency": {
139
  "unit": "s",
140
  "values": [
141
- 0.04430770499999426,
142
- 0.044662174000023924,
143
- 0.04438492799999949,
144
- 0.04513291000000663,
145
- 0.04388426599999207,
146
- 0.04501013199998738,
147
- 0.043271607000008316,
148
- 0.04593856899998627,
149
- 0.04445945699998788,
150
- 0.04549320000000989,
151
- 0.04522682600000394,
152
- 0.04396325400000478,
153
- 0.05160017400001493,
154
- 0.04461852299999691
155
  ],
156
  "count": 14,
157
- "total": 0.6319537250000167,
158
- "mean": 0.04513955178571548,
159
- "p50": 0.04464034850001042,
160
- "p90": 0.045804958299993356,
161
- "p95": 0.0479201307499963,
162
- "p99": 0.0508641653500112,
163
- "stdev": 0.0019123563914344559,
164
- "stdev_": 4.236542712060393
165
  },
166
  "throughput": {
167
  "unit": "tokens/s",
168
- "value": 44.307041627136954
169
  },
170
  "energy": {
171
  "unit": "kWh",
172
- "cpu": 1.6903743775462637e-06,
173
- "ram": 7.068260530580505e-08,
174
  "gpu": 0.0,
175
- "total": 1.7610569828520685e-06
176
  },
177
  "efficiency": {
178
  "unit": "tokens/kWh",
179
- "value": 1135681.5931991923
180
  }
181
  },
182
  "decode": {
183
  "memory": {
184
  "unit": "MB",
185
- "max_ram": 965.029888,
186
  "max_global_vram": null,
187
  "max_process_vram": null,
188
  "max_reserved": null,
@@ -191,45 +190,45 @@
191
  "latency": {
192
  "unit": "s",
193
  "values": [
194
- 0.026748040999990508,
195
- 0.027266394999998056,
196
- 0.02669170599997983,
197
- 0.02635608200000661,
198
- 0.02731354400000896,
199
- 0.026307622000018682,
200
- 0.026464943999997104,
201
- 0.027463052000001653,
202
- 0.026757067999994888,
203
- 0.02786880700000438,
204
- 0.027078445000000784,
205
- 0.027976015999996662,
206
- 0.030265255999978535,
207
- 0.02609252099998116
208
  ],
209
  "count": 14,
210
- "total": 0.3806494989999578,
211
- "mean": 0.027189249928568415,
212
- "p50": 0.026917756499997836,
213
- "p90": 0.027943853299998978,
214
- "p95": 0.028777249999990318,
215
- "p99": 0.02996765479998089,
216
- "stdev": 0.0010169676291400635,
217
- "stdev_": 3.740329842904237
218
  },
219
  "throughput": {
220
  "unit": "tokens/s",
221
- "value": 36.77924189255678
222
  },
223
  "energy": {
224
  "unit": "kWh",
225
- "cpu": 9.907546506018636e-07,
226
- "ram": 4.143080206427035e-08,
227
  "gpu": 0.0,
228
- "total": 1.0321854526661349e-06
229
  },
230
  "efficiency": {
231
  "unit": "tokens/kWh",
232
- "value": 968818.149313188
233
  }
234
  },
235
  "per_token": {
@@ -237,34 +236,34 @@
237
  "latency": {
238
  "unit": "s",
239
  "values": [
240
- 0.02635161300000277,
241
- 0.0268744560000016,
242
- 0.0263060289999828,
243
- 0.025976455999995096,
244
- 0.026922485999989476,
245
- 0.025946228999998766,
246
- 0.02607662100001562,
247
- 0.027069519000008313,
248
- 0.026305487999991328,
249
- 0.027467559999990954,
250
- 0.02668399099999874,
251
- 0.02753880299999878,
252
- 0.029855863999983967,
253
- 0.025728733999983433
254
  ],
255
  "count": 14,
256
- "total": 0.37510384899994165,
257
- "mean": 0.026793132071424402,
258
- "p50": 0.026517802000000756,
259
- "p90": 0.02751743009999643,
260
- "p95": 0.028349774349993594,
261
- "p99": 0.029554646069985892,
262
- "stdev": 0.0010072378701798744,
263
- "stdev_": 3.7593136461045584
264
  },
265
  "throughput": {
266
  "unit": "tokens/s",
267
- "value": 37.3229974507731
268
  },
269
  "energy": null,
270
  "efficiency": null
 
3
  "name": "cpu_inference_transformers_text-generation_openai-community/gpt2",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.5.1+cpu",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
9
  "library": "transformers",
 
45
  "warmup_runs": 1,
46
  "input_shapes": {
47
  "batch_size": 1,
 
48
  "sequence_length": 2
49
  },
50
  "new_tokens": null,
 
72
  "environment": {
73
  "cpu": " AMD EPYC 7763 64-Core Processor",
74
  "cpu_count": 4,
75
+ "cpu_ram_mb": 16757.342208,
76
  "system": "Linux",
77
  "machine": "x86_64",
78
+ "platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35",
79
  "processor": "x86_64",
80
  "python_version": "3.10.15",
81
  "optimum_benchmark_version": "0.5.0.dev0",
82
+ "optimum_benchmark_commit": "6807ba28334c8c98abf72a03d78f86133328d180",
83
+ "transformers_version": "4.46.3",
84
  "transformers_commit": null,
85
+ "accelerate_version": "1.1.1",
86
  "accelerate_commit": null,
87
+ "diffusers_version": "0.31.0",
88
  "diffusers_commit": null,
89
  "optimum_version": null,
90
  "optimum_commit": null,
91
+ "timm_version": "1.0.11",
92
  "timm_commit": null,
93
  "peft_version": null,
94
  "peft_commit": null
 
100
  "load": {
101
  "memory": {
102
  "unit": "MB",
103
+ "max_ram": 1130.385408,
104
  "max_global_vram": null,
105
  "max_process_vram": null,
106
  "max_reserved": null,
 
109
  "latency": {
110
  "unit": "s",
111
  "values": [
112
+ 1.7419912789999898
113
  ],
114
  "count": 1,
115
+ "total": 1.7419912789999898,
116
+ "mean": 1.7419912789999898,
117
+ "p50": 1.7419912789999898,
118
+ "p90": 1.7419912789999898,
119
+ "p95": 1.7419912789999898,
120
+ "p99": 1.7419912789999898,
121
  "stdev": 0,
122
  "stdev_": 0
123
  },
 
128
  "prefill": {
129
  "memory": {
130
  "unit": "MB",
131
+ "max_ram": 985.68192,
132
  "max_global_vram": null,
133
  "max_process_vram": null,
134
  "max_reserved": null,
 
137
  "latency": {
138
  "unit": "s",
139
  "values": [
140
+ 0.047665391999998974,
141
+ 0.049731436999991274,
142
+ 0.04895271399999501,
143
+ 0.04833657500000754,
144
+ 0.04391997499999434,
145
+ 0.043644501000017044,
146
+ 0.04382332400001587,
147
+ 0.04319213599995919,
148
+ 0.04416296799996644,
149
+ 0.044141969000008885,
150
+ 0.04314894600003072,
151
+ 0.04354990399997405,
152
+ 0.043227575999992496,
153
+ 0.04241360399998939
154
  ],
155
  "count": 14,
156
+ "total": 0.6299110209999412,
157
+ "mean": 0.04499364435713866,
158
+ "p50": 0.043871649500005105,
159
+ "p90": 0.04876787229999877,
160
+ "p95": 0.04922526704999371,
161
+ "p99": 0.04963020300999176,
162
+ "stdev": 0.0024009023029775032,
163
+ "stdev_": 5.336092102076141
164
  },
165
  "throughput": {
166
  "unit": "tokens/s",
167
+ "value": 44.45072250927105
168
  },
169
  "energy": {
170
  "unit": "kWh",
171
+ "cpu": 1.6854151983796455e-06,
172
+ "ram": 7.043250781543132e-08,
173
  "gpu": 0.0,
174
+ "total": 1.7558477061950768e-06
175
  },
176
  "efficiency": {
177
  "unit": "tokens/kWh",
178
+ "value": 1139050.9512547653
179
  }
180
  },
181
  "decode": {
182
  "memory": {
183
  "unit": "MB",
184
+ "max_ram": 985.68192,
185
  "max_global_vram": null,
186
  "max_process_vram": null,
187
  "max_reserved": null,
 
190
  "latency": {
191
  "unit": "s",
192
  "values": [
193
+ 0.030765811999970083,
194
+ 0.030026732999999695,
195
+ 0.03023100500001874,
196
+ 0.02618368299999929,
197
+ 0.026192708999985825,
198
+ 0.026696138000033898,
199
+ 0.026371824000023025,
200
+ 0.026651877000006152,
201
+ 0.026894829000013942,
202
+ 0.026896673000010196,
203
+ 0.027024220999976478,
204
+ 0.02588880300004348,
205
+ 0.026431090000016866,
206
+ 0.025466253999979926
207
  ],
208
  "count": 14,
209
+ "total": 0.3817216510000776,
210
+ "mean": 0.02726583221429126,
211
+ "p50": 0.026674007500020025,
212
+ "p90": 0.030169723400013025,
213
+ "p95": 0.03041818745000171,
214
+ "p99": 0.030696287089976407,
215
+ "stdev": 0.0016615794659438356,
216
+ "stdev_": 6.093998719294277
217
  },
218
  "throughput": {
219
  "unit": "tokens/s",
220
+ "value": 36.67593903390393
221
  },
222
  "energy": {
223
  "unit": "kWh",
224
+ "cpu": 1.0257383568054851e-06,
225
+ "ram": 4.287120110748343e-08,
226
  "gpu": 0.0,
227
+ "total": 1.068609557912968e-06
228
  },
229
  "efficiency": {
230
  "unit": "tokens/kWh",
231
+ "value": 935795.4854465602
232
  }
233
  },
234
  "per_token": {
 
236
  "latency": {
237
  "unit": "s",
238
  "values": [
239
+ 0.030299631999980647,
240
+ 0.029594156000030125,
241
+ 0.029819386999974995,
242
+ 0.02586457799998243,
243
+ 0.025855570000032913,
244
+ 0.02636103300000059,
245
+ 0.026048771000034776,
246
+ 0.02631907600004979,
247
+ 0.02656132700002445,
248
+ 0.02656453299999839,
249
+ 0.02669703099996923,
250
+ 0.02555920800000422,
251
+ 0.026109781000002386,
252
+ 0.02515429199996788
253
  ],
254
  "count": 14,
255
+ "total": 0.3768083750000528,
256
+ "mean": 0.0269148839285752,
257
+ "p50": 0.02634005450002519,
258
+ "p90": 0.029751817699991533,
259
+ "p95": 0.029987472749976974,
260
+ "p99": 0.030237200149979913,
261
+ "stdev": 0.001616638367978842,
262
+ "stdev_": 6.006484636043616
263
  },
264
  "throughput": {
265
  "unit": "tokens/s",
266
+ "value": 37.154163571863386
267
  },
268
  "energy": null,
269
  "efficiency": null