IlyasMoutawwakil HF staff commited on
Commit
0ad3a0c
1 Parent(s): 14a6b3a

Upload cpu_training_transformers_text-generation_openai-community/gpt2/benchmark.json with huggingface_hub

Browse files
cpu_training_transformers_text-generation_openai-community/gpt2/benchmark.json CHANGED
@@ -6,19 +6,17 @@
6
  "version": "2.3.0+cpu",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
9
- "model": "openai-community/gpt2",
10
  "library": "transformers",
 
 
11
  "device": "cpu",
12
  "device_ids": null,
13
  "seed": 42,
14
  "inter_op_num_threads": null,
15
  "intra_op_num_threads": null,
16
- "hub_kwargs": {
17
- "revision": "main",
18
- "force_download": false,
19
- "local_files_only": false,
20
- "trust_remote_code": false
21
- },
22
  "no_weights": true,
23
  "device_map": null,
24
  "torch_dtype": null,
@@ -76,14 +74,14 @@
76
  "environment": {
77
  "cpu": " AMD EPYC 7763 64-Core Processor",
78
  "cpu_count": 4,
79
- "cpu_ram_mb": 16757.338112,
80
  "system": "Linux",
81
  "machine": "x86_64",
82
- "platform": "Linux-6.5.0-1018-azure-x86_64-with-glibc2.35",
83
  "processor": "x86_64",
84
  "python_version": "3.10.14",
85
  "optimum_benchmark_version": "0.2.0",
86
- "optimum_benchmark_commit": "3e2eebdc0f80ae6deeb2e1faad3e889ed5a4df2d",
87
  "transformers_version": "4.40.2",
88
  "transformers_commit": null,
89
  "accelerate_version": "0.30.1",
@@ -102,7 +100,7 @@
102
  "overall": {
103
  "memory": {
104
  "unit": "MB",
105
- "max_ram": 2831.413248,
106
  "max_global_vram": null,
107
  "max_process_vram": null,
108
  "max_reserved": null,
@@ -111,24 +109,24 @@
111
  "latency": {
112
  "unit": "s",
113
  "count": 5,
114
- "total": 3.2185455340000146,
115
- "mean": 0.6437091068000029,
116
- "stdev": 0.04212561572489697,
117
- "p50": 0.6246454410000126,
118
- "p90": 0.687323689599998,
119
- "p95": 0.7075025928000002,
120
- "p99": 0.723645715360002,
121
  "values": [
122
- 0.7276814960000024,
123
- 0.6165270080000198,
124
- 0.6267869799999914,
125
- 0.6246454410000126,
126
- 0.6229046089999883
127
  ]
128
  },
129
  "throughput": {
130
  "unit": "samples/s",
131
- "value": 15.534967416744887
132
  },
133
  "energy": null,
134
  "efficiency": null
@@ -136,7 +134,7 @@
136
  "warmup": {
137
  "memory": {
138
  "unit": "MB",
139
- "max_ram": 2831.413248,
140
  "max_global_vram": null,
141
  "max_process_vram": null,
142
  "max_reserved": null,
@@ -145,21 +143,21 @@
145
  "latency": {
146
  "unit": "s",
147
  "count": 2,
148
- "total": 1.3442085040000222,
149
- "mean": 0.6721042520000111,
150
- "stdev": 0.05557724399999131,
151
- "p50": 0.6721042520000111,
152
- "p90": 0.7165660472000042,
153
- "p95": 0.7221237716000033,
154
- "p99": 0.7265699511200026,
155
  "values": [
156
- 0.7276814960000024,
157
- 0.6165270080000198
158
  ]
159
  },
160
  "throughput": {
161
  "unit": "samples/s",
162
- "value": 5.9514576616604025
163
  },
164
  "energy": null,
165
  "efficiency": null
@@ -167,7 +165,7 @@
167
  "train": {
168
  "memory": {
169
  "unit": "MB",
170
- "max_ram": 2831.413248,
171
  "max_global_vram": null,
172
  "max_process_vram": null,
173
  "max_reserved": null,
@@ -176,22 +174,22 @@
176
  "latency": {
177
  "unit": "s",
178
  "count": 3,
179
- "total": 1.8743370299999924,
180
- "mean": 0.6247790099999975,
181
- "stdev": 0.0015877828679711403,
182
- "p50": 0.6246454410000126,
183
- "p90": 0.6263586721999956,
184
- "p95": 0.6265728260999935,
185
- "p99": 0.6267441492199919,
186
  "values": [
187
- 0.6267869799999914,
188
- 0.6246454410000126,
189
- 0.6229046089999883
190
  ]
191
  },
192
  "throughput": {
193
  "unit": "samples/s",
194
- "value": 9.603395607032356
195
  },
196
  "energy": null,
197
  "efficiency": null
 
6
  "version": "2.3.0+cpu",
7
  "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
8
  "task": "text-generation",
 
9
  "library": "transformers",
10
+ "model": "openai-community/gpt2",
11
+ "processor": "openai-community/gpt2",
12
  "device": "cpu",
13
  "device_ids": null,
14
  "seed": 42,
15
  "inter_op_num_threads": null,
16
  "intra_op_num_threads": null,
17
+ "model_kwargs": {},
18
+ "processor_kwargs": {},
19
+ "hub_kwargs": {},
 
 
 
20
  "no_weights": true,
21
  "device_map": null,
22
  "torch_dtype": null,
 
74
  "environment": {
75
  "cpu": " AMD EPYC 7763 64-Core Processor",
76
  "cpu_count": 4,
77
+ "cpu_ram_mb": 16757.354496,
78
  "system": "Linux",
79
  "machine": "x86_64",
80
+ "platform": "Linux-6.5.0-1021-azure-x86_64-with-glibc2.35",
81
  "processor": "x86_64",
82
  "python_version": "3.10.14",
83
  "optimum_benchmark_version": "0.2.0",
84
+ "optimum_benchmark_commit": "6fd377459e287bb09e9383ba2516b1b2a271a562",
85
  "transformers_version": "4.40.2",
86
  "transformers_commit": null,
87
  "accelerate_version": "0.30.1",
 
100
  "overall": {
101
  "memory": {
102
  "unit": "MB",
103
+ "max_ram": 2830.180352,
104
  "max_global_vram": null,
105
  "max_process_vram": null,
106
  "max_reserved": null,
 
109
  "latency": {
110
  "unit": "s",
111
  "count": 5,
112
+ "total": 3.158187349000002,
113
+ "mean": 0.6316374698000005,
114
+ "stdev": 0.04075475755457997,
115
+ "p50": 0.6114178969999955,
116
+ "p90": 0.6769491482000035,
117
+ "p95": 0.6943017116000021,
118
+ "p99": 0.708183762320001,
119
  "values": [
120
+ 0.7116542750000008,
121
+ 0.6006115039999997,
122
+ 0.6248914580000076,
123
+ 0.6096122149999985,
124
+ 0.6114178969999955
125
  ]
126
  },
127
  "throughput": {
128
  "unit": "samples/s",
129
+ "value": 15.831866344418051
130
  },
131
  "energy": null,
132
  "efficiency": null
 
134
  "warmup": {
135
  "memory": {
136
  "unit": "MB",
137
+ "max_ram": 2830.180352,
138
  "max_global_vram": null,
139
  "max_process_vram": null,
140
  "max_reserved": null,
 
143
  "latency": {
144
  "unit": "s",
145
  "count": 2,
146
+ "total": 1.3122657790000005,
147
+ "mean": 0.6561328895000003,
148
+ "stdev": 0.05552138550000052,
149
+ "p50": 0.6561328895000003,
150
+ "p90": 0.7005499979000007,
151
+ "p95": 0.7061021364500008,
152
+ "p99": 0.7105438472900008,
153
  "values": [
154
+ 0.7116542750000008,
155
+ 0.6006115039999997
156
  ]
157
  },
158
  "throughput": {
159
  "unit": "samples/s",
160
+ "value": 6.096326009580409
161
  },
162
  "energy": null,
163
  "efficiency": null
 
165
  "train": {
166
  "memory": {
167
  "unit": "MB",
168
+ "max_ram": 2830.180352,
169
  "max_global_vram": null,
170
  "max_process_vram": null,
171
  "max_reserved": null,
 
174
  "latency": {
175
  "unit": "s",
176
  "count": 3,
177
+ "total": 1.8459215700000016,
178
+ "mean": 0.6153071900000006,
179
+ "stdev": 0.006817074968696739,
180
+ "p50": 0.6114178969999955,
181
+ "p90": 0.6221967458000052,
182
+ "p95": 0.6235441019000063,
183
+ "p99": 0.6246219867800074,
184
  "values": [
185
+ 0.6248914580000076,
186
+ 0.6096122149999985,
187
+ 0.6114178969999955
188
  ]
189
  },
190
  "throughput": {
191
  "unit": "samples/s",
192
+ "value": 9.751226862796768
193
  },
194
  "energy": null,
195
  "efficiency": null