IlyasMoutawwakil HF staff commited on
Commit
601eaa0
·
verified ·
1 Parent(s): b10a2cf

Upload cpu_training_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM/benchmark.json with huggingface_hub

Browse files
cpu_training_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM/benchmark.json CHANGED
@@ -3,7 +3,7 @@
3
  "name": "cpu_training_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM",
4
  "backend": {
5
  "name": "pytorch",
6
- "version": "2.5.1+cpu",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "model": "hf-internal-testing/tiny-random-LlamaForCausalLM",
9
  "processor": "hf-internal-testing/tiny-random-LlamaForCausalLM",
@@ -86,8 +86,8 @@
86
  "processor": "x86_64",
87
  "python_version": "3.10.16",
88
  "optimum_benchmark_version": "0.6.0.dev0",
89
- "optimum_benchmark_commit": "92cd2b27febacd047b21430f43cc804cfd37a667",
90
- "transformers_version": "4.48.1",
91
  "transformers_commit": null,
92
  "accelerate_version": "1.3.0",
93
  "accelerate_commit": null,
@@ -107,7 +107,7 @@
107
  "overall": {
108
  "memory": {
109
  "unit": "MB",
110
- "max_ram": 526.811136,
111
  "max_global_vram": null,
112
  "max_process_vram": null,
113
  "max_reserved": null,
@@ -116,42 +116,42 @@
116
  "latency": {
117
  "unit": "s",
118
  "values": [
119
- 0.019057135999958064,
120
- 0.012695155999949748,
121
- 0.01251563199997463,
122
- 0.01262870200002908,
123
- 0.012682792000020982
124
  ],
125
  "count": 5,
126
- "total": 0.0695794179999325,
127
- "mean": 0.0139158835999865,
128
- "p50": 0.012682792000020982,
129
- "p90": 0.01651234399995474,
130
- "p95": 0.0177847399999564,
131
- "p99": 0.01880265679995773,
132
- "stdev": 0.002571408486499767,
133
- "stdev_": 18.47822646707293
134
  },
135
  "throughput": {
136
  "unit": "samples/s",
137
- "value": 718.603308812507
138
  },
139
  "energy": {
140
  "unit": "kWh",
141
- "cpu": 9.489859049998334e-06,
142
- "ram": 3.9652249041777215e-07,
143
  "gpu": 0,
144
- "total": 9.886381540416106e-06
145
  },
146
  "efficiency": {
147
  "unit": "samples/kWh",
148
- "value": 1011492.4210763478
149
  }
150
  },
151
  "warmup": {
152
  "memory": {
153
  "unit": "MB",
154
- "max_ram": 526.811136,
155
  "max_global_vram": null,
156
  "max_process_vram": null,
157
  "max_reserved": null,
@@ -160,22 +160,22 @@
160
  "latency": {
161
  "unit": "s",
162
  "values": [
163
- 0.019057135999958064,
164
- 0.012695155999949748
165
  ],
166
  "count": 2,
167
- "total": 0.03175229199990781,
168
- "mean": 0.015876145999953906,
169
- "p50": 0.015876145999953906,
170
- "p90": 0.018420937999957233,
171
- "p95": 0.01873903699995765,
172
- "p99": 0.01899351619995798,
173
- "stdev": 0.003180990000004158,
174
- "stdev_": 20.03628588458051
175
  },
176
  "throughput": {
177
  "unit": "samples/s",
178
- "value": 251.95031590233634
179
  },
180
  "energy": null,
181
  "efficiency": null
@@ -183,7 +183,7 @@
183
  "train": {
184
  "memory": {
185
  "unit": "MB",
186
- "max_ram": 526.811136,
187
  "max_global_vram": null,
188
  "max_process_vram": null,
189
  "max_reserved": null,
@@ -192,23 +192,23 @@
192
  "latency": {
193
  "unit": "s",
194
  "values": [
195
- 0.01251563199997463,
196
- 0.01262870200002908,
197
- 0.012682792000020982
198
  ],
199
  "count": 3,
200
- "total": 0.03782712600002469,
201
- "mean": 0.01260904200000823,
202
- "p50": 0.01262870200002908,
203
- "p90": 0.0126719740000226,
204
- "p95": 0.012677383000021792,
205
- "p99": 0.012681710200021144,
206
- "stdev": 6.96443493974392e-05,
207
- "stdev_": 0.5523365644859756
208
  },
209
  "throughput": {
210
  "unit": "samples/s",
211
- "value": 475.8489978854923
212
  },
213
  "energy": null,
214
  "efficiency": null
 
3
  "name": "cpu_training_transformers_text-generation_hf-internal-testing/tiny-random-LlamaForCausalLM",
4
  "backend": {
5
  "name": "pytorch",
6
+ "version": "2.6.0+cpu",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "model": "hf-internal-testing/tiny-random-LlamaForCausalLM",
9
  "processor": "hf-internal-testing/tiny-random-LlamaForCausalLM",
 
86
  "processor": "x86_64",
87
  "python_version": "3.10.16",
88
  "optimum_benchmark_version": "0.6.0.dev0",
89
+ "optimum_benchmark_commit": "4eb7a37589fa5efafd23072041135e22808603ce",
90
+ "transformers_version": "4.48.2",
91
  "transformers_commit": null,
92
  "accelerate_version": "1.3.0",
93
  "accelerate_commit": null,
 
107
  "overall": {
108
  "memory": {
109
  "unit": "MB",
110
+ "max_ram": 532.078592,
111
  "max_global_vram": null,
112
  "max_process_vram": null,
113
  "max_reserved": null,
 
116
  "latency": {
117
  "unit": "s",
118
  "values": [
119
+ 0.023494457999987617,
120
+ 0.01259725500000286,
121
+ 0.01271216899999672,
122
+ 0.01171425199999021,
123
+ 0.013879252000009501
124
  ],
125
  "count": 5,
126
+ "total": 0.07439738599998691,
127
+ "mean": 0.014879477199997382,
128
+ "p50": 0.01271216899999672,
129
+ "p90": 0.01964837559999637,
130
+ "p95": 0.021571416799991992,
131
+ "p99": 0.02310984975998849,
132
+ "stdev": 0.0043621728119273215,
133
+ "stdev_": 29.316707524697772
134
  },
135
  "throughput": {
136
  "unit": "samples/s",
137
+ "value": 672.0666234161614
138
  },
139
  "energy": {
140
  "unit": "kWh",
141
+ "cpu": 9.272332805555865e-06,
142
+ "ram": 3.8730279171386433e-07,
143
  "gpu": 0,
144
+ "total": 9.65963559726973e-06
145
  },
146
  "efficiency": {
147
  "unit": "samples/kWh",
148
+ "value": 1035235.7394130346
149
  }
150
  },
151
  "warmup": {
152
  "memory": {
153
  "unit": "MB",
154
+ "max_ram": 532.078592,
155
  "max_global_vram": null,
156
  "max_process_vram": null,
157
  "max_reserved": null,
 
160
  "latency": {
161
  "unit": "s",
162
  "values": [
163
+ 0.023494457999987617,
164
+ 0.01259725500000286
165
  ],
166
  "count": 2,
167
+ "total": 0.03609171299999048,
168
+ "mean": 0.01804585649999524,
169
+ "p50": 0.01804585649999524,
170
+ "p90": 0.02240473769998914,
171
+ "p95": 0.022949597849988377,
172
+ "p99": 0.023385485969987768,
173
+ "stdev": 0.005448601499992378,
174
+ "stdev_": 30.193088923176443
175
  },
176
  "throughput": {
177
  "unit": "samples/s",
178
+ "value": 221.6575311901131
179
  },
180
  "energy": null,
181
  "efficiency": null
 
183
  "train": {
184
  "memory": {
185
  "unit": "MB",
186
+ "max_ram": 532.078592,
187
  "max_global_vram": null,
188
  "max_process_vram": null,
189
  "max_reserved": null,
 
192
  "latency": {
193
  "unit": "s",
194
  "values": [
195
+ 0.01271216899999672,
196
+ 0.01171425199999021,
197
+ 0.013879252000009501
198
  ],
199
  "count": 3,
200
+ "total": 0.03830567299999643,
201
+ "mean": 0.012768557666665478,
202
+ "p50": 0.01271216899999672,
203
+ "p90": 0.013645835400006944,
204
+ "p95": 0.013762543700008224,
205
+ "p99": 0.013855910340009246,
206
+ "stdev": 0.0008847564679305419,
207
+ "stdev_": 6.929180969596521
208
  },
209
  "throughput": {
210
  "unit": "samples/s",
211
+ "value": 469.90428806724464
212
  },
213
  "energy": null,
214
  "efficiency": null