leixa commited on
Commit
adbc4ef
·
verified ·
1 Parent(s): c4526dd

Training in progress, step 126, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:121983274dff80eb5f14c755257724700d2d56e115cc300d0316f914d3cb7dee
3
  size 201892112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aadd79cb0ce90a3800ba84eddb5d6ed311601f6907fd8577721c4a48d11c900a
3
  size 201892112
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d5e2df5660b367f6f036c08934da668f1270a0be7f47fa9b46e2cae168bcf555
3
  size 102864548
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3369db71b47a6b28d8af870490f2fffae251fdfc9b2ec04d614352e035cb1c3a
3
  size 102864548
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7074cec2061417cedcea0f1edca671f4979b23051157d7011cc5e12489f9fe90
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e19d88c5ee8945747e0c8f00ae8f30f12218b1bee6b5d59ad749865193f9388
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2df224011d0e75c4a97901f6c1b2930bba4bc3a9aa7c877e6c91e796bec6013f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e00328545513b592b6bf311a002e087bb048e374276594cf37ee8782d28887fc
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.01760821716801174,
5
  "eval_steps": 42,
6
- "global_step": 84,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -227,6 +227,112 @@
227
  "eval_samples_per_second": 48.68,
228
  "eval_steps_per_second": 6.089,
229
  "step": 84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
  }
231
  ],
232
  "logging_steps": 3,
@@ -246,7 +352,7 @@
246
  "attributes": {}
247
  }
248
  },
249
- "total_flos": 1.7918441960767488e+16,
250
  "train_batch_size": 8,
251
  "trial_name": null,
252
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.02641232575201761,
5
  "eval_steps": 42,
6
+ "global_step": 126,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
227
  "eval_samples_per_second": 48.68,
228
  "eval_steps_per_second": 6.089,
229
  "step": 84
230
+ },
231
+ {
232
+ "epoch": 0.0182370820668693,
233
+ "grad_norm": 0.3466218411922455,
234
+ "learning_rate": 9.40297765928369e-05,
235
+ "loss": 1.181,
236
+ "step": 87
237
+ },
238
+ {
239
+ "epoch": 0.018865946965726865,
240
+ "grad_norm": 0.3219515085220337,
241
+ "learning_rate": 9.356593520616948e-05,
242
+ "loss": 1.1236,
243
+ "step": 90
244
+ },
245
+ {
246
+ "epoch": 0.019494811864584426,
247
+ "grad_norm": 0.35251516103744507,
248
+ "learning_rate": 9.308597683653975e-05,
249
+ "loss": 1.1749,
250
+ "step": 93
251
+ },
252
+ {
253
+ "epoch": 0.020123676763441987,
254
+ "grad_norm": 0.3366820514202118,
255
+ "learning_rate": 9.259007904196023e-05,
256
+ "loss": 1.1411,
257
+ "step": 96
258
+ },
259
+ {
260
+ "epoch": 0.020752541662299548,
261
+ "grad_norm": 0.3501774072647095,
262
+ "learning_rate": 9.207842527714767e-05,
263
+ "loss": 1.122,
264
+ "step": 99
265
+ },
266
+ {
267
+ "epoch": 0.021381406561157112,
268
+ "grad_norm": 0.3260366916656494,
269
+ "learning_rate": 9.155120482565521e-05,
270
+ "loss": 1.1032,
271
+ "step": 102
272
+ },
273
+ {
274
+ "epoch": 0.022010271460014674,
275
+ "grad_norm": 0.3130464255809784,
276
+ "learning_rate": 9.10086127298478e-05,
277
+ "loss": 1.1025,
278
+ "step": 105
279
+ },
280
+ {
281
+ "epoch": 0.022639136358872235,
282
+ "grad_norm": 0.3319336473941803,
283
+ "learning_rate": 9.045084971874738e-05,
284
+ "loss": 1.1279,
285
+ "step": 108
286
+ },
287
+ {
288
+ "epoch": 0.0232680012577298,
289
+ "grad_norm": 0.3683924973011017,
290
+ "learning_rate": 8.987812213377424e-05,
291
+ "loss": 1.1384,
292
+ "step": 111
293
+ },
294
+ {
295
+ "epoch": 0.02389686615658736,
296
+ "grad_norm": 0.3043256998062134,
297
+ "learning_rate": 8.929064185241213e-05,
298
+ "loss": 1.106,
299
+ "step": 114
300
+ },
301
+ {
302
+ "epoch": 0.02452573105544492,
303
+ "grad_norm": 0.3852984607219696,
304
+ "learning_rate": 8.868862620982534e-05,
305
+ "loss": 1.123,
306
+ "step": 117
307
+ },
308
+ {
309
+ "epoch": 0.025154595954302483,
310
+ "grad_norm": 0.36246711015701294,
311
+ "learning_rate": 8.807229791845673e-05,
312
+ "loss": 1.1248,
313
+ "step": 120
314
+ },
315
+ {
316
+ "epoch": 0.025783460853160047,
317
+ "grad_norm": 0.3592873215675354,
318
+ "learning_rate": 8.744188498563641e-05,
319
+ "loss": 1.0983,
320
+ "step": 123
321
+ },
322
+ {
323
+ "epoch": 0.02641232575201761,
324
+ "grad_norm": 0.3461982309818268,
325
+ "learning_rate": 8.679762062923175e-05,
326
+ "loss": 1.1211,
327
+ "step": 126
328
+ },
329
+ {
330
+ "epoch": 0.02641232575201761,
331
+ "eval_loss": 1.1246975660324097,
332
+ "eval_runtime": 165.051,
333
+ "eval_samples_per_second": 48.682,
334
+ "eval_steps_per_second": 6.089,
335
+ "step": 126
336
  }
337
  ],
338
  "logging_steps": 3,
 
352
  "attributes": {}
353
  }
354
  },
355
+ "total_flos": 2.687766294115123e+16,
356
  "train_batch_size": 8,
357
  "trial_name": null,
358
  "trial_params": null