Upload cpu_training_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM/benchmark.json with huggingface_hub
Browse files
cpu_training_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM/benchmark.json
CHANGED
@@ -3,7 +3,7 @@
|
|
3 |
"name": "cpu_training_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM",
|
4 |
"backend": {
|
5 |
"name": "pytorch",
|
6 |
-
"version": "2.
|
7 |
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
|
8 |
"model": "hf-internal-testing/tiny-random-LlamaForCausalLM",
|
9 |
"processor": "hf-internal-testing/tiny-random-LlamaForCausalLM",
|
@@ -86,8 +86,8 @@
|
|
86 |
"processor": "x86_64",
|
87 |
"python_version": "3.10.16",
|
88 |
"optimum_benchmark_version": "0.6.0.dev0",
|
89 |
-
"optimum_benchmark_commit": "
|
90 |
-
"transformers_version": "4.48.
|
91 |
"transformers_commit": null,
|
92 |
"accelerate_version": "1.3.0",
|
93 |
"accelerate_commit": null,
|
@@ -107,7 +107,7 @@
|
|
107 |
"overall": {
|
108 |
"memory": {
|
109 |
"unit": "MB",
|
110 |
-
"max_ram":
|
111 |
"max_global_vram": null,
|
112 |
"max_process_vram": null,
|
113 |
"max_reserved": null,
|
@@ -116,42 +116,42 @@
|
|
116 |
"latency": {
|
117 |
"unit": "s",
|
118 |
"values": [
|
119 |
-
0.
|
120 |
-
0.
|
121 |
-
0.
|
122 |
-
0.
|
123 |
-
0.
|
124 |
],
|
125 |
"count": 5,
|
126 |
-
"total": 0.
|
127 |
-
"mean": 0.
|
128 |
-
"p50": 0.
|
129 |
-
"p90": 0.
|
130 |
-
"p95": 0.
|
131 |
-
"p99": 0.
|
132 |
-
"stdev": 0.
|
133 |
-
"stdev_":
|
134 |
},
|
135 |
"throughput": {
|
136 |
"unit": "samples/s",
|
137 |
-
"value":
|
138 |
},
|
139 |
"energy": {
|
140 |
"unit": "kWh",
|
141 |
-
"cpu": 9.
|
142 |
-
"ram": 3.
|
143 |
"gpu": 0,
|
144 |
-
"total": 9.
|
145 |
},
|
146 |
"efficiency": {
|
147 |
"unit": "samples/kWh",
|
148 |
-
"value":
|
149 |
}
|
150 |
},
|
151 |
"warmup": {
|
152 |
"memory": {
|
153 |
"unit": "MB",
|
154 |
-
"max_ram":
|
155 |
"max_global_vram": null,
|
156 |
"max_process_vram": null,
|
157 |
"max_reserved": null,
|
@@ -160,22 +160,22 @@
|
|
160 |
"latency": {
|
161 |
"unit": "s",
|
162 |
"values": [
|
163 |
-
0.
|
164 |
-
0.
|
165 |
],
|
166 |
"count": 2,
|
167 |
-
"total": 0.
|
168 |
-
"mean": 0.
|
169 |
-
"p50": 0.
|
170 |
-
"p90": 0.
|
171 |
-
"p95": 0.
|
172 |
-
"p99": 0.
|
173 |
-
"stdev": 0.
|
174 |
-
"stdev_":
|
175 |
},
|
176 |
"throughput": {
|
177 |
"unit": "samples/s",
|
178 |
-
"value":
|
179 |
},
|
180 |
"energy": null,
|
181 |
"efficiency": null
|
@@ -183,7 +183,7 @@
|
|
183 |
"train": {
|
184 |
"memory": {
|
185 |
"unit": "MB",
|
186 |
-
"max_ram":
|
187 |
"max_global_vram": null,
|
188 |
"max_process_vram": null,
|
189 |
"max_reserved": null,
|
@@ -192,23 +192,23 @@
|
|
192 |
"latency": {
|
193 |
"unit": "s",
|
194 |
"values": [
|
195 |
-
0.
|
196 |
-
0.
|
197 |
-
0.
|
198 |
],
|
199 |
"count": 3,
|
200 |
-
"total": 0.
|
201 |
-
"mean": 0.
|
202 |
-
"p50": 0.
|
203 |
-
"p90": 0.
|
204 |
-
"p95": 0.
|
205 |
-
"p99": 0.
|
206 |
-
"stdev":
|
207 |
-
"stdev_":
|
208 |
},
|
209 |
"throughput": {
|
210 |
"unit": "samples/s",
|
211 |
-
"value":
|
212 |
},
|
213 |
"energy": null,
|
214 |
"efficiency": null
|
|
|
3 |
"name": "cpu_training_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM",
|
4 |
"backend": {
|
5 |
"name": "pytorch",
|
6 |
+
"version": "2.6.0+cpu",
|
7 |
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
|
8 |
"model": "hf-internal-testing/tiny-random-LlamaForCausalLM",
|
9 |
"processor": "hf-internal-testing/tiny-random-LlamaForCausalLM",
|
|
|
86 |
"processor": "x86_64",
|
87 |
"python_version": "3.10.16",
|
88 |
"optimum_benchmark_version": "0.6.0.dev0",
|
89 |
+
"optimum_benchmark_commit": "4eb7a37589fa5efafd23072041135e22808603ce",
|
90 |
+
"transformers_version": "4.48.2",
|
91 |
"transformers_commit": null,
|
92 |
"accelerate_version": "1.3.0",
|
93 |
"accelerate_commit": null,
|
|
|
107 |
"overall": {
|
108 |
"memory": {
|
109 |
"unit": "MB",
|
110 |
+
"max_ram": 532.078592,
|
111 |
"max_global_vram": null,
|
112 |
"max_process_vram": null,
|
113 |
"max_reserved": null,
|
|
|
116 |
"latency": {
|
117 |
"unit": "s",
|
118 |
"values": [
|
119 |
+
0.023494457999987617,
|
120 |
+
0.01259725500000286,
|
121 |
+
0.01271216899999672,
|
122 |
+
0.01171425199999021,
|
123 |
+
0.013879252000009501
|
124 |
],
|
125 |
"count": 5,
|
126 |
+
"total": 0.07439738599998691,
|
127 |
+
"mean": 0.014879477199997382,
|
128 |
+
"p50": 0.01271216899999672,
|
129 |
+
"p90": 0.01964837559999637,
|
130 |
+
"p95": 0.021571416799991992,
|
131 |
+
"p99": 0.02310984975998849,
|
132 |
+
"stdev": 0.0043621728119273215,
|
133 |
+
"stdev_": 29.316707524697772
|
134 |
},
|
135 |
"throughput": {
|
136 |
"unit": "samples/s",
|
137 |
+
"value": 672.0666234161614
|
138 |
},
|
139 |
"energy": {
|
140 |
"unit": "kWh",
|
141 |
+
"cpu": 9.272332805555865e-06,
|
142 |
+
"ram": 3.8730279171386433e-07,
|
143 |
"gpu": 0,
|
144 |
+
"total": 9.65963559726973e-06
|
145 |
},
|
146 |
"efficiency": {
|
147 |
"unit": "samples/kWh",
|
148 |
+
"value": 1035235.7394130346
|
149 |
}
|
150 |
},
|
151 |
"warmup": {
|
152 |
"memory": {
|
153 |
"unit": "MB",
|
154 |
+
"max_ram": 532.078592,
|
155 |
"max_global_vram": null,
|
156 |
"max_process_vram": null,
|
157 |
"max_reserved": null,
|
|
|
160 |
"latency": {
|
161 |
"unit": "s",
|
162 |
"values": [
|
163 |
+
0.023494457999987617,
|
164 |
+
0.01259725500000286
|
165 |
],
|
166 |
"count": 2,
|
167 |
+
"total": 0.03609171299999048,
|
168 |
+
"mean": 0.01804585649999524,
|
169 |
+
"p50": 0.01804585649999524,
|
170 |
+
"p90": 0.02240473769998914,
|
171 |
+
"p95": 0.022949597849988377,
|
172 |
+
"p99": 0.023385485969987768,
|
173 |
+
"stdev": 0.005448601499992378,
|
174 |
+
"stdev_": 30.193088923176443
|
175 |
},
|
176 |
"throughput": {
|
177 |
"unit": "samples/s",
|
178 |
+
"value": 221.6575311901131
|
179 |
},
|
180 |
"energy": null,
|
181 |
"efficiency": null
|
|
|
183 |
"train": {
|
184 |
"memory": {
|
185 |
"unit": "MB",
|
186 |
+
"max_ram": 532.078592,
|
187 |
"max_global_vram": null,
|
188 |
"max_process_vram": null,
|
189 |
"max_reserved": null,
|
|
|
192 |
"latency": {
|
193 |
"unit": "s",
|
194 |
"values": [
|
195 |
+
0.01271216899999672,
|
196 |
+
0.01171425199999021,
|
197 |
+
0.013879252000009501
|
198 |
],
|
199 |
"count": 3,
|
200 |
+
"total": 0.03830567299999643,
|
201 |
+
"mean": 0.012768557666665478,
|
202 |
+
"p50": 0.01271216899999672,
|
203 |
+
"p90": 0.013645835400006944,
|
204 |
+
"p95": 0.013762543700008224,
|
205 |
+
"p99": 0.013855910340009246,
|
206 |
+
"stdev": 0.0008847564679305419,
|
207 |
+
"stdev_": 6.929180969596521
|
208 |
},
|
209 |
"throughput": {
|
210 |
"unit": "samples/s",
|
211 |
+
"value": 469.90428806724464
|
212 |
},
|
213 |
"energy": null,
|
214 |
"efficiency": null
|