DeepDream2045 commited on
Commit
5651cf4
1 Parent(s): cdcefd6

Training in progress, step 25, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "v_proj",
24
  "down_proj",
25
- "gate_proj",
26
- "q_proj",
27
  "o_proj",
28
- "up_proj",
29
- "k_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "up_proj",
24
  "down_proj",
25
+ "k_proj",
 
26
  "o_proj",
27
+ "gate_proj",
28
+ "v_proj",
29
+ "q_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6f222a1c4c489c9c91998165385fa6a8cca4505ef6b72b83a29f1ef9b4476dbd
3
  size 500770656
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31a7d5fdd1388a98c30ce2b92bd7ce8c07eddd25429f3aaa19d1ad7a7ff0a1a7
3
  size 500770656
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16a7f682e8fe380804a7b69db82778ae81724f76d4bbc7ccc21b9a05753f5bed
3
  size 1001863522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f146b42314babbf272c18a6a0a1e229d15716e312c551a8d093fd3ebbd7b1df9
3
  size 1001863522
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f32b24a025fd99a4774f254c729e6ea1d6653f5243edf7a461d966c3f9cfc90
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bb9bd59dcd9b44e59614c1c8232046e7e9e1caebaf83fb1b0670f88f193cd6d
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:23001c42c640f89983758733dcfdb4e7daf6cce5a119ac7170fe88dbd18dd1b0
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8ceb091103656fa4a13353c7f9c5fa1c6f42ee087495b52b0ff54267fb28bc0
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1308bf92867c81ae09e5cb48fdc76fbfd6e32eecfad5332d4defae3a6af04703
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4efdf6725885638b7d7ac22ea73d216339a42d524de3a299f6169334b40a2e4
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:676b7abf3456dcac426c8247f8ccdff0071875ec709c89ea4a819a457a82dd55
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ad43da04f664a4b1afc5e2ec8bc883b432560b2810cd33fcf9c580425140814
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b1df0528620c07325b8faa7567e59b0c1e86a1f1ee6af1245a69c6c0463fe4e2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f37b2aa490ccb1598b01e14cda36e9081f7ce646deab4d3c2d03de0d2169a755
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,386 +1,203 @@
1
  {
2
- "best_metric": 0.3135169446468353,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.22148394241417496,
5
  "eval_steps": 25,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.004429678848283499,
13
- "grad_norm": 90.43601989746094,
14
  "learning_rate": 5e-05,
15
- "loss": 186.7866,
16
  "step": 1
17
  },
18
  {
19
  "epoch": 0.004429678848283499,
20
- "eval_loss": 11.860960006713867,
21
- "eval_runtime": 59.9491,
22
- "eval_samples_per_second": 25.372,
23
- "eval_steps_per_second": 3.186,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.008859357696566999,
28
- "grad_norm": 92.87113952636719,
29
  "learning_rate": 0.0001,
30
- "loss": 191.192,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.013289036544850499,
35
- "grad_norm": 95.75019836425781,
36
  "learning_rate": 9.989294616193017e-05,
37
- "loss": 186.3358,
38
  "step": 3
39
  },
40
  {
41
  "epoch": 0.017718715393133997,
42
- "grad_norm": 131.21568298339844,
43
  "learning_rate": 9.957224306869053e-05,
44
- "loss": 169.4684,
45
  "step": 4
46
  },
47
  {
48
  "epoch": 0.0221483942414175,
49
- "grad_norm": 133.9580535888672,
50
  "learning_rate": 9.903926402016153e-05,
51
- "loss": 140.7173,
52
  "step": 5
53
  },
54
  {
55
  "epoch": 0.026578073089700997,
56
- "grad_norm": 115.58403015136719,
57
  "learning_rate": 9.829629131445342e-05,
58
- "loss": 116.9708,
59
  "step": 6
60
  },
61
  {
62
  "epoch": 0.031007751937984496,
63
- "grad_norm": 114.49787139892578,
64
  "learning_rate": 9.73465064747553e-05,
65
- "loss": 95.8174,
66
  "step": 7
67
  },
68
  {
69
  "epoch": 0.035437430786267994,
70
- "grad_norm": 112.3747329711914,
71
  "learning_rate": 9.619397662556435e-05,
72
- "loss": 76.4804,
73
  "step": 8
74
  },
75
  {
76
  "epoch": 0.03986710963455149,
77
- "grad_norm": 134.6016845703125,
78
  "learning_rate": 9.484363707663442e-05,
79
- "loss": 52.7218,
80
  "step": 9
81
  },
82
  {
83
  "epoch": 0.044296788482835,
84
- "grad_norm": 140.690673828125,
85
  "learning_rate": 9.330127018922194e-05,
86
- "loss": 28.0401,
87
  "step": 10
88
  },
89
  {
90
  "epoch": 0.048726467331118496,
91
- "grad_norm": 73.04218292236328,
92
  "learning_rate": 9.157348061512727e-05,
93
- "loss": 13.6756,
94
  "step": 11
95
  },
96
  {
97
  "epoch": 0.053156146179401995,
98
- "grad_norm": 81.19536590576172,
99
  "learning_rate": 8.966766701456177e-05,
100
- "loss": 11.1729,
101
  "step": 12
102
  },
103
  {
104
  "epoch": 0.05758582502768549,
105
- "grad_norm": 56.66990661621094,
106
  "learning_rate": 8.759199037394887e-05,
107
- "loss": 9.8869,
108
  "step": 13
109
  },
110
  {
111
  "epoch": 0.06201550387596899,
112
- "grad_norm": 28.945405960083008,
113
  "learning_rate": 8.535533905932738e-05,
114
- "loss": 9.031,
115
  "step": 14
116
  },
117
  {
118
  "epoch": 0.0664451827242525,
119
- "grad_norm": 36.69380569458008,
120
  "learning_rate": 8.296729075500344e-05,
121
- "loss": 8.7398,
122
  "step": 15
123
  },
124
  {
125
  "epoch": 0.07087486157253599,
126
- "grad_norm": 22.542613983154297,
127
  "learning_rate": 8.043807145043604e-05,
128
- "loss": 8.2923,
129
  "step": 16
130
  },
131
  {
132
  "epoch": 0.0753045404208195,
133
- "grad_norm": 76.44745635986328,
134
  "learning_rate": 7.777851165098012e-05,
135
- "loss": 9.5592,
136
  "step": 17
137
  },
138
  {
139
  "epoch": 0.07973421926910298,
140
- "grad_norm": 60.888092041015625,
141
  "learning_rate": 7.500000000000001e-05,
142
- "loss": 7.8758,
143
  "step": 18
144
  },
145
  {
146
  "epoch": 0.08416389811738649,
147
- "grad_norm": 66.46916198730469,
148
  "learning_rate": 7.211443451095007e-05,
149
- "loss": 6.9583,
150
  "step": 19
151
  },
152
  {
153
  "epoch": 0.08859357696567,
154
- "grad_norm": 67.2452163696289,
155
  "learning_rate": 6.91341716182545e-05,
156
- "loss": 8.2272,
157
  "step": 20
158
  },
159
  {
160
  "epoch": 0.09302325581395349,
161
- "grad_norm": 53.93881607055664,
162
  "learning_rate": 6.607197326515808e-05,
163
- "loss": 7.0548,
164
  "step": 21
165
  },
166
  {
167
  "epoch": 0.09745293466223699,
168
- "grad_norm": 80.97118377685547,
169
  "learning_rate": 6.294095225512603e-05,
170
- "loss": 7.2771,
171
  "step": 22
172
  },
173
  {
174
  "epoch": 0.10188261351052048,
175
- "grad_norm": 37.07639694213867,
176
  "learning_rate": 5.9754516100806423e-05,
177
- "loss": 6.2439,
178
  "step": 23
179
  },
180
  {
181
  "epoch": 0.10631229235880399,
182
- "grad_norm": 53.39252471923828,
183
  "learning_rate": 5.6526309611002594e-05,
184
- "loss": 6.0297,
185
  "step": 24
186
  },
187
  {
188
  "epoch": 0.11074197120708748,
189
- "grad_norm": 50.96709442138672,
190
  "learning_rate": 5.327015646150716e-05,
191
- "loss": 5.7838,
192
  "step": 25
193
  },
194
  {
195
  "epoch": 0.11074197120708748,
196
- "eval_loss": 0.4496269226074219,
197
- "eval_runtime": 59.8952,
198
- "eval_samples_per_second": 25.394,
199
- "eval_steps_per_second": 3.189,
200
  "step": 25
201
- },
202
- {
203
- "epoch": 0.11517165005537099,
204
- "grad_norm": 43.56239318847656,
205
- "learning_rate": 5e-05,
206
- "loss": 8.002,
207
- "step": 26
208
- },
209
- {
210
- "epoch": 0.11960132890365449,
211
- "grad_norm": 42.219730377197266,
212
- "learning_rate": 4.6729843538492847e-05,
213
- "loss": 7.7278,
214
- "step": 27
215
- },
216
- {
217
- "epoch": 0.12403100775193798,
218
- "grad_norm": 51.25581741333008,
219
- "learning_rate": 4.347369038899744e-05,
220
- "loss": 7.8858,
221
- "step": 28
222
- },
223
- {
224
- "epoch": 0.12846068660022147,
225
- "grad_norm": 28.833730697631836,
226
- "learning_rate": 4.0245483899193595e-05,
227
- "loss": 7.4616,
228
- "step": 29
229
- },
230
- {
231
- "epoch": 0.132890365448505,
232
- "grad_norm": 36.36983108520508,
233
- "learning_rate": 3.705904774487396e-05,
234
- "loss": 5.8613,
235
- "step": 30
236
- },
237
- {
238
- "epoch": 0.13732004429678848,
239
- "grad_norm": 24.915283203125,
240
- "learning_rate": 3.392802673484193e-05,
241
- "loss": 6.0989,
242
- "step": 31
243
- },
244
- {
245
- "epoch": 0.14174972314507198,
246
- "grad_norm": 26.047985076904297,
247
- "learning_rate": 3.086582838174551e-05,
248
- "loss": 5.5893,
249
- "step": 32
250
- },
251
- {
252
- "epoch": 0.1461794019933555,
253
- "grad_norm": 17.464637756347656,
254
- "learning_rate": 2.7885565489049946e-05,
255
- "loss": 5.6094,
256
- "step": 33
257
- },
258
- {
259
- "epoch": 0.150609080841639,
260
- "grad_norm": 19.809972763061523,
261
- "learning_rate": 2.500000000000001e-05,
262
- "loss": 5.3021,
263
- "step": 34
264
- },
265
- {
266
- "epoch": 0.15503875968992248,
267
- "grad_norm": 29.769086837768555,
268
- "learning_rate": 2.2221488349019903e-05,
269
- "loss": 5.5138,
270
- "step": 35
271
- },
272
- {
273
- "epoch": 0.15946843853820597,
274
- "grad_norm": 25.799795150756836,
275
- "learning_rate": 1.9561928549563968e-05,
276
- "loss": 4.979,
277
- "step": 36
278
- },
279
- {
280
- "epoch": 0.1638981173864895,
281
- "grad_norm": 25.358388900756836,
282
- "learning_rate": 1.703270924499656e-05,
283
- "loss": 3.709,
284
- "step": 37
285
- },
286
- {
287
- "epoch": 0.16832779623477298,
288
- "grad_norm": 26.125646591186523,
289
- "learning_rate": 1.4644660940672627e-05,
290
- "loss": 4.5807,
291
- "step": 38
292
- },
293
- {
294
- "epoch": 0.17275747508305647,
295
- "grad_norm": 20.812122344970703,
296
- "learning_rate": 1.2408009626051137e-05,
297
- "loss": 6.1171,
298
- "step": 39
299
- },
300
- {
301
- "epoch": 0.17718715393134,
302
- "grad_norm": 20.470781326293945,
303
- "learning_rate": 1.0332332985438248e-05,
304
- "loss": 6.5875,
305
- "step": 40
306
- },
307
- {
308
- "epoch": 0.18161683277962348,
309
- "grad_norm": 17.11683464050293,
310
- "learning_rate": 8.426519384872733e-06,
311
- "loss": 6.203,
312
- "step": 41
313
- },
314
- {
315
- "epoch": 0.18604651162790697,
316
- "grad_norm": 15.608457565307617,
317
- "learning_rate": 6.698729810778065e-06,
318
- "loss": 4.9995,
319
- "step": 42
320
- },
321
- {
322
- "epoch": 0.19047619047619047,
323
- "grad_norm": 14.48198127746582,
324
- "learning_rate": 5.156362923365588e-06,
325
- "loss": 5.2074,
326
- "step": 43
327
- },
328
- {
329
- "epoch": 0.19490586932447398,
330
- "grad_norm": 17.412363052368164,
331
- "learning_rate": 3.8060233744356633e-06,
332
- "loss": 5.2764,
333
- "step": 44
334
- },
335
- {
336
- "epoch": 0.19933554817275748,
337
- "grad_norm": 26.537670135498047,
338
- "learning_rate": 2.653493525244721e-06,
339
- "loss": 5.5755,
340
- "step": 45
341
- },
342
- {
343
- "epoch": 0.20376522702104097,
344
- "grad_norm": 25.966537475585938,
345
- "learning_rate": 1.70370868554659e-06,
346
- "loss": 5.1873,
347
- "step": 46
348
- },
349
- {
350
- "epoch": 0.2081949058693245,
351
- "grad_norm": 19.112852096557617,
352
- "learning_rate": 9.607359798384785e-07,
353
- "loss": 5.2322,
354
- "step": 47
355
- },
356
- {
357
- "epoch": 0.21262458471760798,
358
- "grad_norm": 19.956279754638672,
359
- "learning_rate": 4.277569313094809e-07,
360
- "loss": 5.3597,
361
- "step": 48
362
- },
363
- {
364
- "epoch": 0.21705426356589147,
365
- "grad_norm": 26.704853057861328,
366
- "learning_rate": 1.0705383806982606e-07,
367
- "loss": 4.1194,
368
- "step": 49
369
- },
370
- {
371
- "epoch": 0.22148394241417496,
372
- "grad_norm": 22.032495498657227,
373
- "learning_rate": 0.0,
374
- "loss": 3.1469,
375
- "step": 50
376
- },
377
- {
378
- "epoch": 0.22148394241417496,
379
- "eval_loss": 0.3135169446468353,
380
- "eval_runtime": 59.89,
381
- "eval_samples_per_second": 25.397,
382
- "eval_steps_per_second": 3.189,
383
- "step": 50
384
  }
385
  ],
386
  "logging_steps": 1,
@@ -404,12 +221,12 @@
404
  "should_evaluate": false,
405
  "should_log": false,
406
  "should_save": true,
407
- "should_training_stop": true
408
  },
409
  "attributes": {}
410
  }
411
  },
412
- "total_flos": 1.0205684108886016e+18,
413
  "train_batch_size": 2,
414
  "trial_name": null,
415
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.4165385067462921,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
+ "epoch": 0.11074197120708748,
5
  "eval_steps": 25,
6
+ "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.004429678848283499,
13
+ "grad_norm": 80.3771743774414,
14
  "learning_rate": 5e-05,
15
+ "loss": 186.8504,
16
  "step": 1
17
  },
18
  {
19
  "epoch": 0.004429678848283499,
20
+ "eval_loss": 11.864986419677734,
21
+ "eval_runtime": 126.0276,
22
+ "eval_samples_per_second": 12.069,
23
+ "eval_steps_per_second": 1.516,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.008859357696566999,
28
+ "grad_norm": 83.50208282470703,
29
  "learning_rate": 0.0001,
30
+ "loss": 191.1957,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.013289036544850499,
35
+ "grad_norm": 89.58778381347656,
36
  "learning_rate": 9.989294616193017e-05,
37
+ "loss": 186.5678,
38
  "step": 3
39
  },
40
  {
41
  "epoch": 0.017718715393133997,
42
+ "grad_norm": 129.3165740966797,
43
  "learning_rate": 9.957224306869053e-05,
44
+ "loss": 170.2373,
45
  "step": 4
46
  },
47
  {
48
  "epoch": 0.0221483942414175,
49
+ "grad_norm": 139.35647583007812,
50
  "learning_rate": 9.903926402016153e-05,
51
+ "loss": 141.4767,
52
  "step": 5
53
  },
54
  {
55
  "epoch": 0.026578073089700997,
56
+ "grad_norm": 118.65985107421875,
57
  "learning_rate": 9.829629131445342e-05,
58
+ "loss": 117.7925,
59
  "step": 6
60
  },
61
  {
62
  "epoch": 0.031007751937984496,
63
+ "grad_norm": 117.44580078125,
64
  "learning_rate": 9.73465064747553e-05,
65
+ "loss": 96.8972,
66
  "step": 7
67
  },
68
  {
69
  "epoch": 0.035437430786267994,
70
+ "grad_norm": 115.84791564941406,
71
  "learning_rate": 9.619397662556435e-05,
72
+ "loss": 77.6476,
73
  "step": 8
74
  },
75
  {
76
  "epoch": 0.03986710963455149,
77
+ "grad_norm": 137.33387756347656,
78
  "learning_rate": 9.484363707663442e-05,
79
+ "loss": 54.0985,
80
  "step": 9
81
  },
82
  {
83
  "epoch": 0.044296788482835,
84
+ "grad_norm": 146.39991760253906,
85
  "learning_rate": 9.330127018922194e-05,
86
+ "loss": 29.7652,
87
  "step": 10
88
  },
89
  {
90
  "epoch": 0.048726467331118496,
91
+ "grad_norm": 82.59661102294922,
92
  "learning_rate": 9.157348061512727e-05,
93
+ "loss": 14.8776,
94
  "step": 11
95
  },
96
  {
97
  "epoch": 0.053156146179401995,
98
+ "grad_norm": 104.44115447998047,
99
  "learning_rate": 8.966766701456177e-05,
100
+ "loss": 12.1848,
101
  "step": 12
102
  },
103
  {
104
  "epoch": 0.05758582502768549,
105
+ "grad_norm": 48.7734260559082,
106
  "learning_rate": 8.759199037394887e-05,
107
+ "loss": 9.9194,
108
  "step": 13
109
  },
110
  {
111
  "epoch": 0.06201550387596899,
112
+ "grad_norm": 46.114593505859375,
113
  "learning_rate": 8.535533905932738e-05,
114
+ "loss": 9.2692,
115
  "step": 14
116
  },
117
  {
118
  "epoch": 0.0664451827242525,
119
+ "grad_norm": 31.456167221069336,
120
  "learning_rate": 8.296729075500344e-05,
121
+ "loss": 8.4696,
122
  "step": 15
123
  },
124
  {
125
  "epoch": 0.07087486157253599,
126
+ "grad_norm": 69.29698944091797,
127
  "learning_rate": 8.043807145043604e-05,
128
+ "loss": 9.3606,
129
  "step": 16
130
  },
131
  {
132
  "epoch": 0.0753045404208195,
133
+ "grad_norm": 41.587501525878906,
134
  "learning_rate": 7.777851165098012e-05,
135
+ "loss": 8.9985,
136
  "step": 17
137
  },
138
  {
139
  "epoch": 0.07973421926910298,
140
+ "grad_norm": 32.481868743896484,
141
  "learning_rate": 7.500000000000001e-05,
142
+ "loss": 7.2757,
143
  "step": 18
144
  },
145
  {
146
  "epoch": 0.08416389811738649,
147
+ "grad_norm": 41.32707214355469,
148
  "learning_rate": 7.211443451095007e-05,
149
+ "loss": 6.7443,
150
  "step": 19
151
  },
152
  {
153
  "epoch": 0.08859357696567,
154
+ "grad_norm": 40.19483184814453,
155
  "learning_rate": 6.91341716182545e-05,
156
+ "loss": 7.1462,
157
  "step": 20
158
  },
159
  {
160
  "epoch": 0.09302325581395349,
161
+ "grad_norm": 83.13573455810547,
162
  "learning_rate": 6.607197326515808e-05,
163
+ "loss": 7.713,
164
  "step": 21
165
  },
166
  {
167
  "epoch": 0.09745293466223699,
168
+ "grad_norm": 55.984676361083984,
169
  "learning_rate": 6.294095225512603e-05,
170
+ "loss": 5.5221,
171
  "step": 22
172
  },
173
  {
174
  "epoch": 0.10188261351052048,
175
+ "grad_norm": 29.221961975097656,
176
  "learning_rate": 5.9754516100806423e-05,
177
+ "loss": 5.4771,
178
  "step": 23
179
  },
180
  {
181
  "epoch": 0.10631229235880399,
182
+ "grad_norm": 37.901004791259766,
183
  "learning_rate": 5.6526309611002594e-05,
184
+ "loss": 5.3382,
185
  "step": 24
186
  },
187
  {
188
  "epoch": 0.11074197120708748,
189
+ "grad_norm": 29.54227066040039,
190
  "learning_rate": 5.327015646150716e-05,
191
+ "loss": 4.7856,
192
  "step": 25
193
  },
194
  {
195
  "epoch": 0.11074197120708748,
196
+ "eval_loss": 0.4165385067462921,
197
+ "eval_runtime": 127.2616,
198
+ "eval_samples_per_second": 11.952,
199
+ "eval_steps_per_second": 1.501,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
 
221
  "should_evaluate": false,
222
  "should_log": false,
223
  "should_save": true,
224
+ "should_training_stop": false
225
  },
226
  "attributes": {}
227
  }
228
  },
229
+ "total_flos": 5.102842054443008e+17,
230
  "train_batch_size": 2,
231
  "trial_name": null,
232
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:119d39ddbc26862df059f1c46e9579f3f3c2231d69e1cd0cb71bc81e1445a849
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a07cb10832bd38400bef46779443665d988b8ed02bd1ef6fd2f90efabecd39f1
3
  size 6776