DeepDream2045 commited on
Commit
8fe706d
·
verified ·
1 Parent(s): f2960ec

Training in progress, step 25, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -21,12 +21,12 @@
21
  "revision": null,
22
  "target_modules": [
23
  "k_proj",
24
- "gate_proj",
25
- "v_proj",
26
- "down_proj",
27
  "q_proj",
28
  "o_proj",
29
- "up_proj"
 
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
21
  "revision": null,
22
  "target_modules": [
23
  "k_proj",
 
 
 
24
  "q_proj",
25
  "o_proj",
26
+ "gate_proj",
27
+ "down_proj",
28
+ "up_proj",
29
+ "v_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4ff964722b16dcf6066892ea2b2f5a1e7ed49803f147a3ae76528f150f428920
3
  size 1521616
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60c768093d7e707272b88b8681161101482d91ff27d34707c835688cfb66a484
3
  size 1521616
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be5dfd669994765dea55ab41fe132a2076884bc1bb67db1dd23426529c10b3ac
3
  size 3108666
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c83365661ca55181717555fd4f031c94bd9d3c0846378f0e848128d7cc2cd0b
3
  size 3108666
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:39243204b56d431003e6e773c1c2d1cdb50080063a598d8fee14c9a88682118d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9404daff786212647adae37fe2da75d552625d004cba5f320a9ff699d3d0fb10
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2cf0609556c2eb01e8432f5b060fd30bf661a79eca117a47e8dd6bd7d407f244
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63012b6efda9d84cc4848e8b5ee45ce563a6507cd33a7c7a33274f0f09037e75
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e7f9a7f8ef7e623c3305d1581a2cd933bab8261a9f9a3a65bc6a1b41de4c9766
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0eeb243c384ce8fc44c40f20541b6588764eeedc39cf6878e3a85acf147afd44
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:04c83ecaac631b2612151829ef0e470dbcec9a75dcab8cc86455369697351366
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95e2056053203985537510bbda269726e6ee1a0a97ba52768c8acfddc716b54a
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b1df0528620c07325b8faa7567e59b0c1e86a1f1ee6af1245a69c6c0463fe4e2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f37b2aa490ccb1598b01e14cda36e9081f7ce646deab4d3c2d03de0d2169a755
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,386 +1,203 @@
1
  {
2
- "best_metric": 7.063704490661621,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.12886597938144329,
5
  "eval_steps": 25,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.002577319587628866,
13
- "grad_norm": 8.84896469116211,
14
  "learning_rate": 5e-05,
15
  "loss": 8.769,
16
  "step": 1
17
  },
18
  {
19
  "epoch": 0.002577319587628866,
20
- "eval_loss": 7.704258441925049,
21
- "eval_runtime": 13.4583,
22
- "eval_samples_per_second": 194.229,
23
- "eval_steps_per_second": 24.297,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.005154639175257732,
28
- "grad_norm": 5.125444412231445,
29
  "learning_rate": 0.0001,
30
  "loss": 8.594,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.007731958762886598,
35
- "grad_norm": 4.254770755767822,
36
  "learning_rate": 9.989294616193017e-05,
37
- "loss": 8.2293,
38
  "step": 3
39
  },
40
  {
41
  "epoch": 0.010309278350515464,
42
- "grad_norm": 3.761096239089966,
43
  "learning_rate": 9.957224306869053e-05,
44
- "loss": 8.3432,
45
  "step": 4
46
  },
47
  {
48
  "epoch": 0.01288659793814433,
49
- "grad_norm": 2.6418516635894775,
50
  "learning_rate": 9.903926402016153e-05,
51
- "loss": 7.6389,
52
  "step": 5
53
  },
54
  {
55
  "epoch": 0.015463917525773196,
56
- "grad_norm": 2.60322642326355,
57
  "learning_rate": 9.829629131445342e-05,
58
- "loss": 7.5408,
59
  "step": 6
60
  },
61
  {
62
  "epoch": 0.01804123711340206,
63
- "grad_norm": 2.4060311317443848,
64
  "learning_rate": 9.73465064747553e-05,
65
- "loss": 7.5405,
66
  "step": 7
67
  },
68
  {
69
  "epoch": 0.020618556701030927,
70
- "grad_norm": 2.1694252490997314,
71
  "learning_rate": 9.619397662556435e-05,
72
- "loss": 7.2925,
73
  "step": 8
74
  },
75
  {
76
  "epoch": 0.023195876288659795,
77
- "grad_norm": 1.9732533693313599,
78
  "learning_rate": 9.484363707663442e-05,
79
- "loss": 7.2964,
80
  "step": 9
81
  },
82
  {
83
  "epoch": 0.02577319587628866,
84
- "grad_norm": 1.90111243724823,
85
  "learning_rate": 9.330127018922194e-05,
86
- "loss": 6.9977,
87
  "step": 10
88
  },
89
  {
90
  "epoch": 0.028350515463917526,
91
- "grad_norm": 1.7233860492706299,
92
  "learning_rate": 9.157348061512727e-05,
93
- "loss": 7.0945,
94
  "step": 11
95
  },
96
  {
97
  "epoch": 0.030927835051546393,
98
- "grad_norm": 1.980125069618225,
99
  "learning_rate": 8.966766701456177e-05,
100
- "loss": 7.056,
101
  "step": 12
102
  },
103
  {
104
  "epoch": 0.03350515463917526,
105
- "grad_norm": 3.8307547569274902,
106
  "learning_rate": 8.759199037394887e-05,
107
- "loss": 7.6226,
108
  "step": 13
109
  },
110
  {
111
  "epoch": 0.03608247422680412,
112
- "grad_norm": 4.206315994262695,
113
  "learning_rate": 8.535533905932738e-05,
114
- "loss": 7.7023,
115
  "step": 14
116
  },
117
  {
118
  "epoch": 0.03865979381443299,
119
- "grad_norm": 2.2115254402160645,
120
  "learning_rate": 8.296729075500344e-05,
121
- "loss": 7.7303,
122
  "step": 15
123
  },
124
  {
125
  "epoch": 0.041237113402061855,
126
- "grad_norm": 1.6303458213806152,
127
  "learning_rate": 8.043807145043604e-05,
128
- "loss": 7.5333,
129
  "step": 16
130
  },
131
  {
132
  "epoch": 0.04381443298969072,
133
- "grad_norm": 1.4174168109893799,
134
  "learning_rate": 7.777851165098012e-05,
135
- "loss": 7.4892,
136
  "step": 17
137
  },
138
  {
139
  "epoch": 0.04639175257731959,
140
- "grad_norm": 1.1928077936172485,
141
  "learning_rate": 7.500000000000001e-05,
142
- "loss": 7.4843,
143
  "step": 18
144
  },
145
  {
146
  "epoch": 0.04896907216494845,
147
- "grad_norm": 1.2452458143234253,
148
  "learning_rate": 7.211443451095007e-05,
149
- "loss": 7.3061,
150
  "step": 19
151
  },
152
  {
153
  "epoch": 0.05154639175257732,
154
- "grad_norm": 1.2192394733428955,
155
  "learning_rate": 6.91341716182545e-05,
156
- "loss": 7.1825,
157
  "step": 20
158
  },
159
  {
160
  "epoch": 0.05412371134020619,
161
- "grad_norm": 1.2151198387145996,
162
  "learning_rate": 6.607197326515808e-05,
163
- "loss": 7.0109,
164
  "step": 21
165
  },
166
  {
167
  "epoch": 0.05670103092783505,
168
- "grad_norm": 1.2521761655807495,
169
  "learning_rate": 6.294095225512603e-05,
170
- "loss": 6.8825,
171
  "step": 22
172
  },
173
  {
174
  "epoch": 0.059278350515463915,
175
- "grad_norm": 1.183918833732605,
176
  "learning_rate": 5.9754516100806423e-05,
177
- "loss": 6.9443,
178
  "step": 23
179
  },
180
  {
181
  "epoch": 0.061855670103092786,
182
- "grad_norm": 1.1090588569641113,
183
  "learning_rate": 5.6526309611002594e-05,
184
- "loss": 6.9259,
185
  "step": 24
186
  },
187
  {
188
  "epoch": 0.06443298969072164,
189
- "grad_norm": 1.5472322702407837,
190
  "learning_rate": 5.327015646150716e-05,
191
- "loss": 6.5826,
192
  "step": 25
193
  },
194
  {
195
  "epoch": 0.06443298969072164,
196
- "eval_loss": 7.122404098510742,
197
- "eval_runtime": 13.3389,
198
- "eval_samples_per_second": 195.968,
199
- "eval_steps_per_second": 24.515,
200
  "step": 25
201
- },
202
- {
203
- "epoch": 0.06701030927835051,
204
- "grad_norm": 2.843515634536743,
205
- "learning_rate": 5e-05,
206
- "loss": 7.2493,
207
- "step": 26
208
- },
209
- {
210
- "epoch": 0.06958762886597938,
211
- "grad_norm": 1.6025389432907104,
212
- "learning_rate": 4.6729843538492847e-05,
213
- "loss": 7.8308,
214
- "step": 27
215
- },
216
- {
217
- "epoch": 0.07216494845360824,
218
- "grad_norm": 1.3303579092025757,
219
- "learning_rate": 4.347369038899744e-05,
220
- "loss": 7.5161,
221
- "step": 28
222
- },
223
- {
224
- "epoch": 0.07474226804123711,
225
- "grad_norm": 1.0986202955245972,
226
- "learning_rate": 4.0245483899193595e-05,
227
- "loss": 7.4137,
228
- "step": 29
229
- },
230
- {
231
- "epoch": 0.07731958762886598,
232
- "grad_norm": 0.9372117519378662,
233
- "learning_rate": 3.705904774487396e-05,
234
- "loss": 7.3211,
235
- "step": 30
236
- },
237
- {
238
- "epoch": 0.07989690721649484,
239
- "grad_norm": 1.0359989404678345,
240
- "learning_rate": 3.392802673484193e-05,
241
- "loss": 7.4251,
242
- "step": 31
243
- },
244
- {
245
- "epoch": 0.08247422680412371,
246
- "grad_norm": 1.050330400466919,
247
- "learning_rate": 3.086582838174551e-05,
248
- "loss": 6.9601,
249
- "step": 32
250
- },
251
- {
252
- "epoch": 0.08505154639175258,
253
- "grad_norm": 1.0149939060211182,
254
- "learning_rate": 2.7885565489049946e-05,
255
- "loss": 7.0997,
256
- "step": 33
257
- },
258
- {
259
- "epoch": 0.08762886597938144,
260
- "grad_norm": 0.8797879815101624,
261
- "learning_rate": 2.500000000000001e-05,
262
- "loss": 6.8525,
263
- "step": 34
264
- },
265
- {
266
- "epoch": 0.09020618556701031,
267
- "grad_norm": 0.9121543169021606,
268
- "learning_rate": 2.2221488349019903e-05,
269
- "loss": 6.8513,
270
- "step": 35
271
- },
272
- {
273
- "epoch": 0.09278350515463918,
274
- "grad_norm": 0.9668096303939819,
275
- "learning_rate": 1.9561928549563968e-05,
276
- "loss": 6.7363,
277
- "step": 36
278
- },
279
- {
280
- "epoch": 0.09536082474226804,
281
- "grad_norm": 1.13383150100708,
282
- "learning_rate": 1.703270924499656e-05,
283
- "loss": 6.6522,
284
- "step": 37
285
- },
286
- {
287
- "epoch": 0.0979381443298969,
288
- "grad_norm": 1.455060601234436,
289
- "learning_rate": 1.4644660940672627e-05,
290
- "loss": 7.743,
291
- "step": 38
292
- },
293
- {
294
- "epoch": 0.10051546391752578,
295
- "grad_norm": 1.4939161539077759,
296
- "learning_rate": 1.2408009626051137e-05,
297
- "loss": 7.6736,
298
- "step": 39
299
- },
300
- {
301
- "epoch": 0.10309278350515463,
302
- "grad_norm": 1.1243243217468262,
303
- "learning_rate": 1.0332332985438248e-05,
304
- "loss": 7.5141,
305
- "step": 40
306
- },
307
- {
308
- "epoch": 0.1056701030927835,
309
- "grad_norm": 1.0334477424621582,
310
- "learning_rate": 8.426519384872733e-06,
311
- "loss": 7.4467,
312
- "step": 41
313
- },
314
- {
315
- "epoch": 0.10824742268041238,
316
- "grad_norm": 0.850965142250061,
317
- "learning_rate": 6.698729810778065e-06,
318
- "loss": 7.4166,
319
- "step": 42
320
- },
321
- {
322
- "epoch": 0.11082474226804123,
323
- "grad_norm": 0.9271207451820374,
324
- "learning_rate": 5.156362923365588e-06,
325
- "loss": 7.2256,
326
- "step": 43
327
- },
328
- {
329
- "epoch": 0.1134020618556701,
330
- "grad_norm": 0.924728512763977,
331
- "learning_rate": 3.8060233744356633e-06,
332
- "loss": 7.1742,
333
- "step": 44
334
- },
335
- {
336
- "epoch": 0.11597938144329897,
337
- "grad_norm": 0.8596798777580261,
338
- "learning_rate": 2.653493525244721e-06,
339
- "loss": 7.0014,
340
- "step": 45
341
- },
342
- {
343
- "epoch": 0.11855670103092783,
344
- "grad_norm": 0.9014486074447632,
345
- "learning_rate": 1.70370868554659e-06,
346
- "loss": 6.9507,
347
- "step": 46
348
- },
349
- {
350
- "epoch": 0.1211340206185567,
351
- "grad_norm": 0.8988224267959595,
352
- "learning_rate": 9.607359798384785e-07,
353
- "loss": 6.8728,
354
- "step": 47
355
- },
356
- {
357
- "epoch": 0.12371134020618557,
358
- "grad_norm": 0.8983946442604065,
359
- "learning_rate": 4.277569313094809e-07,
360
- "loss": 6.6614,
361
- "step": 48
362
- },
363
- {
364
- "epoch": 0.12628865979381443,
365
- "grad_norm": 0.9872623085975647,
366
- "learning_rate": 1.0705383806982606e-07,
367
- "loss": 6.8147,
368
- "step": 49
369
- },
370
- {
371
- "epoch": 0.12886597938144329,
372
- "grad_norm": 1.2063250541687012,
373
- "learning_rate": 0.0,
374
- "loss": 6.6433,
375
- "step": 50
376
- },
377
- {
378
- "epoch": 0.12886597938144329,
379
- "eval_loss": 7.063704490661621,
380
- "eval_runtime": 13.226,
381
- "eval_samples_per_second": 197.64,
382
- "eval_steps_per_second": 24.724,
383
- "step": 50
384
  }
385
  ],
386
  "logging_steps": 1,
@@ -404,12 +221,12 @@
404
  "should_evaluate": false,
405
  "should_log": false,
406
  "should_save": true,
407
- "should_training_stop": true
408
  },
409
  "attributes": {}
410
  }
411
  },
412
- "total_flos": 239916767772672.0,
413
  "train_batch_size": 2,
414
  "trial_name": null,
415
  "trial_params": null
 
1
  {
2
+ "best_metric": 7.125263214111328,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
+ "epoch": 0.06443298969072164,
5
  "eval_steps": 25,
6
+ "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.002577319587628866,
13
+ "grad_norm": 8.932095527648926,
14
  "learning_rate": 5e-05,
15
  "loss": 8.769,
16
  "step": 1
17
  },
18
  {
19
  "epoch": 0.002577319587628866,
20
+ "eval_loss": 7.7042412757873535,
21
+ "eval_runtime": 27.296,
22
+ "eval_samples_per_second": 95.765,
23
+ "eval_steps_per_second": 11.98,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.005154639175257732,
28
+ "grad_norm": 5.1865434646606445,
29
  "learning_rate": 0.0001,
30
  "loss": 8.594,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.007731958762886598,
35
+ "grad_norm": 4.275647163391113,
36
  "learning_rate": 9.989294616193017e-05,
37
+ "loss": 8.2291,
38
  "step": 3
39
  },
40
  {
41
  "epoch": 0.010309278350515464,
42
+ "grad_norm": 3.744438409805298,
43
  "learning_rate": 9.957224306869053e-05,
44
+ "loss": 8.3425,
45
  "step": 4
46
  },
47
  {
48
  "epoch": 0.01288659793814433,
49
+ "grad_norm": 2.634883403778076,
50
  "learning_rate": 9.903926402016153e-05,
51
+ "loss": 7.6405,
52
  "step": 5
53
  },
54
  {
55
  "epoch": 0.015463917525773196,
56
+ "grad_norm": 2.6033172607421875,
57
  "learning_rate": 9.829629131445342e-05,
58
+ "loss": 7.5428,
59
  "step": 6
60
  },
61
  {
62
  "epoch": 0.01804123711340206,
63
+ "grad_norm": 2.397871255874634,
64
  "learning_rate": 9.73465064747553e-05,
65
+ "loss": 7.5443,
66
  "step": 7
67
  },
68
  {
69
  "epoch": 0.020618556701030927,
70
+ "grad_norm": 2.1589975357055664,
71
  "learning_rate": 9.619397662556435e-05,
72
+ "loss": 7.2973,
73
  "step": 8
74
  },
75
  {
76
  "epoch": 0.023195876288659795,
77
+ "grad_norm": 1.9659868478775024,
78
  "learning_rate": 9.484363707663442e-05,
79
+ "loss": 7.2995,
80
  "step": 9
81
  },
82
  {
83
  "epoch": 0.02577319587628866,
84
+ "grad_norm": 1.8988659381866455,
85
  "learning_rate": 9.330127018922194e-05,
86
+ "loss": 6.9992,
87
  "step": 10
88
  },
89
  {
90
  "epoch": 0.028350515463917526,
91
+ "grad_norm": 1.7255256175994873,
92
  "learning_rate": 9.157348061512727e-05,
93
+ "loss": 7.0953,
94
  "step": 11
95
  },
96
  {
97
  "epoch": 0.030927835051546393,
98
+ "grad_norm": 1.986493706703186,
99
  "learning_rate": 8.966766701456177e-05,
100
+ "loss": 7.0552,
101
  "step": 12
102
  },
103
  {
104
  "epoch": 0.03350515463917526,
105
+ "grad_norm": 3.806684732437134,
106
  "learning_rate": 8.759199037394887e-05,
107
+ "loss": 7.6275,
108
  "step": 13
109
  },
110
  {
111
  "epoch": 0.03608247422680412,
112
+ "grad_norm": 4.209149360656738,
113
  "learning_rate": 8.535533905932738e-05,
114
+ "loss": 7.7068,
115
  "step": 14
116
  },
117
  {
118
  "epoch": 0.03865979381443299,
119
+ "grad_norm": 2.2306342124938965,
120
  "learning_rate": 8.296729075500344e-05,
121
+ "loss": 7.7352,
122
  "step": 15
123
  },
124
  {
125
  "epoch": 0.041237113402061855,
126
+ "grad_norm": 1.6664167642593384,
127
  "learning_rate": 8.043807145043604e-05,
128
+ "loss": 7.5366,
129
  "step": 16
130
  },
131
  {
132
  "epoch": 0.04381443298969072,
133
+ "grad_norm": 1.4569931030273438,
134
  "learning_rate": 7.777851165098012e-05,
135
+ "loss": 7.4936,
136
  "step": 17
137
  },
138
  {
139
  "epoch": 0.04639175257731959,
140
+ "grad_norm": 1.2213176488876343,
141
  "learning_rate": 7.500000000000001e-05,
142
+ "loss": 7.4877,
143
  "step": 18
144
  },
145
  {
146
  "epoch": 0.04896907216494845,
147
+ "grad_norm": 1.2630321979522705,
148
  "learning_rate": 7.211443451095007e-05,
149
+ "loss": 7.3076,
150
  "step": 19
151
  },
152
  {
153
  "epoch": 0.05154639175257732,
154
+ "grad_norm": 1.1982954740524292,
155
  "learning_rate": 6.91341716182545e-05,
156
+ "loss": 7.1835,
157
  "step": 20
158
  },
159
  {
160
  "epoch": 0.05412371134020619,
161
+ "grad_norm": 1.179440975189209,
162
  "learning_rate": 6.607197326515808e-05,
163
+ "loss": 7.0129,
164
  "step": 21
165
  },
166
  {
167
  "epoch": 0.05670103092783505,
168
+ "grad_norm": 1.2165639400482178,
169
  "learning_rate": 6.294095225512603e-05,
170
+ "loss": 6.8845,
171
  "step": 22
172
  },
173
  {
174
  "epoch": 0.059278350515463915,
175
+ "grad_norm": 1.1670974493026733,
176
  "learning_rate": 5.9754516100806423e-05,
177
+ "loss": 6.9454,
178
  "step": 23
179
  },
180
  {
181
  "epoch": 0.061855670103092786,
182
+ "grad_norm": 1.096975326538086,
183
  "learning_rate": 5.6526309611002594e-05,
184
+ "loss": 6.9278,
185
  "step": 24
186
  },
187
  {
188
  "epoch": 0.06443298969072164,
189
+ "grad_norm": 1.54706609249115,
190
  "learning_rate": 5.327015646150716e-05,
191
+ "loss": 6.5843,
192
  "step": 25
193
  },
194
  {
195
  "epoch": 0.06443298969072164,
196
+ "eval_loss": 7.125263214111328,
197
+ "eval_runtime": 26.0926,
198
+ "eval_samples_per_second": 100.182,
199
+ "eval_steps_per_second": 12.532,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
 
221
  "should_evaluate": false,
222
  "should_log": false,
223
  "should_save": true,
224
+ "should_training_stop": false
225
  },
226
  "attributes": {}
227
  }
228
  },
229
+ "total_flos": 119559610171392.0,
230
  "train_batch_size": 2,
231
  "trial_name": null,
232
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66bf9ec9ddbf12dd719515e13707e33922360dd50872490ee350c666d3a23578
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9aa8a78e217473ef4dde9abd0449db4a3aeeaeba985aec38100e9a44af330002
3
  size 6776