leixa commited on
Commit
97c1860
1 Parent(s): bf2b3a6

Training in progress, step 168, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f5849698230477377bc43d99927ea0b750ff86b3da0cd6f06982ce816ba6dbcd
3
  size 671149168
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ac95b09d8525db8031f8dbc1d8e3ed07d7e85a7df4228ef043cdaf3ee203236
3
  size 671149168
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c427c14ecb51f7b09cfad05ca6b48fae3ca8b3a89561d714f3b547288cbe22e
3
  size 341314196
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa4029985d53a5443c979e69946d3dbe088ec242f46eb7a6202b94cf53f44199
3
  size 341314196
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4c2513b077ee0a9d0d20b1ed96188cfd8d5c30d65f28f3d6dee11f2dc6d8ca8
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb35f7933787785ef92acdac24ff917ef72043d42461a8f1e3ee05f375cdf20a
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e00328545513b592b6bf311a002e087bb048e374276594cf37ee8782d28887fc
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f516a6e4e8a8eba956b80cb2ea416b9fd98f0dec12d7d9d9a36274d0eef4a63
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.41042345276872966,
5
  "eval_steps": 42,
6
- "global_step": 126,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -333,6 +333,112 @@
333
  "eval_samples_per_second": 23.321,
334
  "eval_steps_per_second": 5.864,
335
  "step": 126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
336
  }
337
  ],
338
  "logging_steps": 3,
@@ -352,7 +458,7 @@
352
  "attributes": {}
353
  }
354
  },
355
- "total_flos": 9.503635546098893e+16,
356
  "train_batch_size": 4,
357
  "trial_name": null,
358
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5472312703583062,
5
  "eval_steps": 42,
6
+ "global_step": 168,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
333
  "eval_samples_per_second": 23.321,
334
  "eval_steps_per_second": 5.864,
335
  "step": 126
336
+ },
337
+ {
338
+ "epoch": 0.4201954397394137,
339
+ "grad_norm": 2.6070010662078857,
340
+ "learning_rate": 8.613974319136958e-05,
341
+ "loss": 1.313,
342
+ "step": 129
343
+ },
344
+ {
345
+ "epoch": 0.42996742671009774,
346
+ "grad_norm": 2.7287192344665527,
347
+ "learning_rate": 8.54684960502629e-05,
348
+ "loss": 1.4142,
349
+ "step": 132
350
+ },
351
+ {
352
+ "epoch": 0.43973941368078173,
353
+ "grad_norm": 2.9047019481658936,
354
+ "learning_rate": 8.478412753017433e-05,
355
+ "loss": 1.4128,
356
+ "step": 135
357
+ },
358
+ {
359
+ "epoch": 0.4495114006514658,
360
+ "grad_norm": 3.378373622894287,
361
+ "learning_rate": 8.408689080954998e-05,
362
+ "loss": 1.5116,
363
+ "step": 138
364
+ },
365
+ {
366
+ "epoch": 0.4592833876221498,
367
+ "grad_norm": 2.828160285949707,
368
+ "learning_rate": 8.33770438273574e-05,
369
+ "loss": 1.3085,
370
+ "step": 141
371
+ },
372
+ {
373
+ "epoch": 0.46905537459283386,
374
+ "grad_norm": 2.577202081680298,
375
+ "learning_rate": 8.265484918766243e-05,
376
+ "loss": 1.3892,
377
+ "step": 144
378
+ },
379
+ {
380
+ "epoch": 0.4788273615635179,
381
+ "grad_norm": 2.659550428390503,
382
+ "learning_rate": 8.192057406248028e-05,
383
+ "loss": 1.4546,
384
+ "step": 147
385
+ },
386
+ {
387
+ "epoch": 0.48859934853420195,
388
+ "grad_norm": 3.5885229110717773,
389
+ "learning_rate": 8.117449009293668e-05,
390
+ "loss": 1.2797,
391
+ "step": 150
392
+ },
393
+ {
394
+ "epoch": 0.498371335504886,
395
+ "grad_norm": 2.8732728958129883,
396
+ "learning_rate": 8.041687328877567e-05,
397
+ "loss": 1.405,
398
+ "step": 153
399
+ },
400
+ {
401
+ "epoch": 0.50814332247557,
402
+ "grad_norm": 2.7093594074249268,
403
+ "learning_rate": 7.964800392625129e-05,
404
+ "loss": 1.4728,
405
+ "step": 156
406
+ },
407
+ {
408
+ "epoch": 0.5179153094462541,
409
+ "grad_norm": 2.460803747177124,
410
+ "learning_rate": 7.886816644444098e-05,
411
+ "loss": 1.5428,
412
+ "step": 159
413
+ },
414
+ {
415
+ "epoch": 0.5276872964169381,
416
+ "grad_norm": 2.5754201412200928,
417
+ "learning_rate": 7.807764934001874e-05,
418
+ "loss": 1.332,
419
+ "step": 162
420
+ },
421
+ {
422
+ "epoch": 0.5374592833876222,
423
+ "grad_norm": 2.630692481994629,
424
+ "learning_rate": 7.727674506052743e-05,
425
+ "loss": 1.4519,
426
+ "step": 165
427
+ },
428
+ {
429
+ "epoch": 0.5472312703583062,
430
+ "grad_norm": 3.039989948272705,
431
+ "learning_rate": 7.646574989618938e-05,
432
+ "loss": 1.4669,
433
+ "step": 168
434
+ },
435
+ {
436
+ "epoch": 0.5472312703583062,
437
+ "eval_loss": 1.471885085105896,
438
+ "eval_runtime": 22.1606,
439
+ "eval_samples_per_second": 23.33,
440
+ "eval_steps_per_second": 5.866,
441
+ "step": 168
442
  }
443
  ],
444
  "logging_steps": 3,
 
458
  "attributes": {}
459
  }
460
  },
461
+ "total_flos": 1.267151406146519e+17,
462
  "train_batch_size": 4,
463
  "trial_name": null,
464
  "trial_params": null