Commit
•
e805516
1
Parent(s):
ba486a1
Upload cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub
Browse files
cpu_inference_transformers_text-generation_openai-community/gpt2/benchmark.json
CHANGED
@@ -3,7 +3,7 @@
|
|
3 |
"name": "cpu_inference_transformers_text-generation_openai-community/gpt2",
|
4 |
"backend": {
|
5 |
"name": "pytorch",
|
6 |
-
"version": "2.3.
|
7 |
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
|
8 |
"task": "text-generation",
|
9 |
"library": "transformers",
|
@@ -73,23 +73,23 @@
|
|
73 |
"environment": {
|
74 |
"cpu": " AMD EPYC 7763 64-Core Processor",
|
75 |
"cpu_count": 4,
|
76 |
-
"cpu_ram_mb": 16757.
|
77 |
"system": "Linux",
|
78 |
"machine": "x86_64",
|
79 |
-
"platform": "Linux-6.5.0-
|
80 |
"processor": "x86_64",
|
81 |
"python_version": "3.10.14",
|
82 |
"optimum_benchmark_version": "0.2.1",
|
83 |
-
"optimum_benchmark_commit": "
|
84 |
-
"transformers_version": "4.
|
85 |
"transformers_commit": null,
|
86 |
-
"accelerate_version": "0.
|
87 |
"accelerate_commit": null,
|
88 |
-
"diffusers_version": "0.
|
89 |
"diffusers_commit": null,
|
90 |
"optimum_version": null,
|
91 |
"optimum_commit": null,
|
92 |
-
"timm_version": "1.0.
|
93 |
"timm_commit": null,
|
94 |
"peft_version": null,
|
95 |
"peft_commit": null
|
@@ -99,7 +99,7 @@
|
|
99 |
"prefill": {
|
100 |
"memory": {
|
101 |
"unit": "MB",
|
102 |
-
"max_ram":
|
103 |
"max_global_vram": null,
|
104 |
"max_process_vram": null,
|
105 |
"max_reserved": null,
|
@@ -108,49 +108,49 @@
|
|
108 |
"latency": {
|
109 |
"unit": "s",
|
110 |
"count": 13,
|
111 |
-
"total": 0.
|
112 |
-
"mean": 0.
|
113 |
-
"stdev": 0.
|
114 |
-
"p50": 0.
|
115 |
-
"p90": 0.
|
116 |
-
"p95": 0.
|
117 |
-
"p99": 0.
|
118 |
"values": [
|
119 |
-
0.
|
120 |
-
0.
|
121 |
-
0.
|
122 |
-
0.
|
123 |
-
0.
|
124 |
-
0.
|
125 |
-
0.
|
126 |
-
0.
|
127 |
-
0.
|
128 |
-
0.
|
129 |
-
0.
|
130 |
-
0.
|
131 |
-
0.
|
132 |
]
|
133 |
},
|
134 |
"throughput": {
|
135 |
"unit": "tokens/s",
|
136 |
-
"value":
|
137 |
},
|
138 |
"energy": {
|
139 |
"unit": "kWh",
|
140 |
-
"cpu": 1.
|
141 |
-
"ram": 7.
|
142 |
"gpu": 0.0,
|
143 |
-
"total": 1.
|
144 |
},
|
145 |
"efficiency": {
|
146 |
"unit": "tokens/kWh",
|
147 |
-
"value":
|
148 |
}
|
149 |
},
|
150 |
"decode": {
|
151 |
"memory": {
|
152 |
"unit": "MB",
|
153 |
-
"max_ram":
|
154 |
"max_global_vram": null,
|
155 |
"max_process_vram": null,
|
156 |
"max_reserved": null,
|
@@ -159,43 +159,43 @@
|
|
159 |
"latency": {
|
160 |
"unit": "s",
|
161 |
"count": 13,
|
162 |
-
"total": 0.
|
163 |
-
"mean": 0.
|
164 |
-
"stdev": 0.
|
165 |
-
"p50": 0.
|
166 |
-
"p90": 0.
|
167 |
-
"p95": 0.
|
168 |
-
"p99": 0.
|
169 |
"values": [
|
170 |
-
0.
|
171 |
-
0.
|
172 |
-
0.
|
173 |
-
0.
|
174 |
-
0.
|
175 |
-
0.
|
176 |
-
0.
|
177 |
-
0.
|
178 |
-
0.
|
179 |
-
0.
|
180 |
-
0.
|
181 |
-
0.
|
182 |
-
0.
|
183 |
]
|
184 |
},
|
185 |
"throughput": {
|
186 |
"unit": "tokens/s",
|
187 |
-
"value":
|
188 |
},
|
189 |
"energy": {
|
190 |
"unit": "kWh",
|
191 |
-
"cpu": 1.
|
192 |
-
"ram": 4.
|
193 |
"gpu": 0.0,
|
194 |
-
"total": 1.
|
195 |
},
|
196 |
"efficiency": {
|
197 |
"unit": "tokens/kWh",
|
198 |
-
"value":
|
199 |
}
|
200 |
},
|
201 |
"per_token": {
|
@@ -203,31 +203,31 @@
|
|
203 |
"latency": {
|
204 |
"unit": "s",
|
205 |
"count": 12,
|
206 |
-
"total": 0.
|
207 |
-
"mean": 0.
|
208 |
-
"stdev": 0.
|
209 |
-
"p50": 0.
|
210 |
-
"p90": 0.
|
211 |
-
"p95": 0.
|
212 |
-
"p99": 0.
|
213 |
"values": [
|
214 |
-
0.
|
215 |
-
0.
|
216 |
-
0.
|
217 |
-
0.
|
218 |
-
0.
|
219 |
-
0.
|
220 |
-
0.
|
221 |
-
0.
|
222 |
-
0.
|
223 |
-
0.
|
224 |
-
0.
|
225 |
-
0.
|
226 |
]
|
227 |
},
|
228 |
"throughput": {
|
229 |
"unit": "tokens/s",
|
230 |
-
"value": 12.
|
231 |
},
|
232 |
"energy": null,
|
233 |
"efficiency": null
|
|
|
3 |
"name": "cpu_inference_transformers_text-generation_openai-community/gpt2",
|
4 |
"backend": {
|
5 |
"name": "pytorch",
|
6 |
+
"version": "2.3.1+cpu",
|
7 |
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
|
8 |
"task": "text-generation",
|
9 |
"library": "transformers",
|
|
|
73 |
"environment": {
|
74 |
"cpu": " AMD EPYC 7763 64-Core Processor",
|
75 |
"cpu_count": 4,
|
76 |
+
"cpu_ram_mb": 16757.338112,
|
77 |
"system": "Linux",
|
78 |
"machine": "x86_64",
|
79 |
+
"platform": "Linux-6.5.0-1022-azure-x86_64-with-glibc2.35",
|
80 |
"processor": "x86_64",
|
81 |
"python_version": "3.10.14",
|
82 |
"optimum_benchmark_version": "0.2.1",
|
83 |
+
"optimum_benchmark_commit": "d920fe9626db1e7915f6d3574b5b54b0159cd100",
|
84 |
+
"transformers_version": "4.42.3",
|
85 |
"transformers_commit": null,
|
86 |
+
"accelerate_version": "0.31.0",
|
87 |
"accelerate_commit": null,
|
88 |
+
"diffusers_version": "0.29.2",
|
89 |
"diffusers_commit": null,
|
90 |
"optimum_version": null,
|
91 |
"optimum_commit": null,
|
92 |
+
"timm_version": "1.0.7",
|
93 |
"timm_commit": null,
|
94 |
"peft_version": null,
|
95 |
"peft_commit": null
|
|
|
99 |
"prefill": {
|
100 |
"memory": {
|
101 |
"unit": "MB",
|
102 |
+
"max_ram": 948.219904,
|
103 |
"max_global_vram": null,
|
104 |
"max_process_vram": null,
|
105 |
"max_reserved": null,
|
|
|
108 |
"latency": {
|
109 |
"unit": "s",
|
110 |
"count": 13,
|
111 |
+
"total": 0.6244059869999319,
|
112 |
+
"mean": 0.04803122976922553,
|
113 |
+
"stdev": 0.002659991941552128,
|
114 |
+
"p50": 0.04875405600000704,
|
115 |
+
"p90": 0.0509883479999985,
|
116 |
+
"p95": 0.0511371429999997,
|
117 |
+
"p99": 0.05124176619999503,
|
118 |
"values": [
|
119 |
+
0.050685386000026256,
|
120 |
+
0.05104995700000359,
|
121 |
+
0.05074191199997813,
|
122 |
+
0.050600597999988395,
|
123 |
+
0.049124685999998974,
|
124 |
+
0.05126792199999386,
|
125 |
+
0.04875405600000704,
|
126 |
+
0.04520847499998126,
|
127 |
+
0.046204921999986936,
|
128 |
+
0.04494297899998401,
|
129 |
+
0.047219673999990164,
|
130 |
+
0.043867823999988786,
|
131 |
+
0.04473759600000449
|
132 |
]
|
133 |
},
|
134 |
"throughput": {
|
135 |
"unit": "tokens/s",
|
136 |
+
"value": 41.63957511830013
|
137 |
},
|
138 |
"energy": {
|
139 |
"unit": "kWh",
|
140 |
+
"cpu": 1.7411278065851922e-06,
|
141 |
+
"ram": 7.276344239316366e-08,
|
142 |
"gpu": 0.0,
|
143 |
+
"total": 1.8138912489783558e-06
|
144 |
},
|
145 |
"efficiency": {
|
146 |
"unit": "tokens/kWh",
|
147 |
+
"value": 1102601.9344469889
|
148 |
}
|
149 |
},
|
150 |
"decode": {
|
151 |
"memory": {
|
152 |
"unit": "MB",
|
153 |
+
"max_ram": 948.219904,
|
154 |
"max_global_vram": null,
|
155 |
"max_process_vram": null,
|
156 |
"max_reserved": null,
|
|
|
159 |
"latency": {
|
160 |
"unit": "s",
|
161 |
"count": 13,
|
162 |
+
"total": 0.389256606000032,
|
163 |
+
"mean": 0.029942815846156306,
|
164 |
+
"stdev": 0.0020771328080415974,
|
165 |
+
"p50": 0.02917561000001001,
|
166 |
+
"p90": 0.032512111200014716,
|
167 |
+
"p95": 0.03256633820001298,
|
168 |
+
"p99": 0.03259535564000089,
|
169 |
"values": [
|
170 |
+
0.03260260999999787,
|
171 |
+
0.03081702000000064,
|
172 |
+
0.03254215700002305,
|
173 |
+
0.03225978099999338,
|
174 |
+
0.03239192799998136,
|
175 |
+
0.031995488000006844,
|
176 |
+
0.028002361999995173,
|
177 |
+
0.02917561000001001,
|
178 |
+
0.028242559999995365,
|
179 |
+
0.027993085000019846,
|
180 |
+
0.027426657999995996,
|
181 |
+
0.02823492600001032,
|
182 |
+
0.027572421000002123
|
183 |
]
|
184 |
},
|
185 |
"throughput": {
|
186 |
"unit": "tokens/s",
|
187 |
+
"value": 33.39699262547373
|
188 |
},
|
189 |
"energy": {
|
190 |
"unit": "kWh",
|
191 |
+
"cpu": 1.04916032385711e-06,
|
192 |
+
"ram": 4.3847111288990186e-08,
|
193 |
"gpu": 0.0,
|
194 |
+
"total": 1.0930074351461e-06
|
195 |
},
|
196 |
"efficiency": {
|
197 |
"unit": "tokens/kWh",
|
198 |
+
"value": 914906.8595917942
|
199 |
}
|
200 |
},
|
201 |
"per_token": {
|
|
|
203 |
"latency": {
|
204 |
"unit": "s",
|
205 |
"count": 12,
|
206 |
+
"total": 0.9307644740000001,
|
207 |
+
"mean": 0.07756370616666668,
|
208 |
+
"stdev": 0.0044240471243253265,
|
209 |
+
"p50": 0.07580525899999202,
|
210 |
+
"p90": 0.08325035319999471,
|
211 |
+
"p95": 0.08329273979999101,
|
212 |
+
"p99": 0.08329807435998844,
|
213 |
"values": [
|
214 |
+
0.08186743799998908,
|
215 |
+
0.0832994079999878,
|
216 |
+
0.0829179760000045,
|
217 |
+
0.08149807000000919,
|
218 |
+
0.08328728399999363,
|
219 |
+
0.07693897900000479,
|
220 |
+
0.07436066100001426,
|
221 |
+
0.07451188199999592,
|
222 |
+
0.07296351499999787,
|
223 |
+
0.07467153899997925,
|
224 |
+
0.07212025300000846,
|
225 |
+
0.0723274690000153
|
226 |
]
|
227 |
},
|
228 |
"throughput": {
|
229 |
"unit": "tokens/s",
|
230 |
+
"value": 12.892627872258045
|
231 |
},
|
232 |
"energy": null,
|
233 |
"efficiency": null
|