dimasik87 commited on
Commit
3efed0c
1 Parent(s): 8b30d82

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e9746171f832097d6457da6ed9f1a576ac4bbd4dc16cf3b96bf4e60080ea6d9
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5658e6f9f72a104ec79bf247cf48d21ffa113045ecc8e93e97a0a78199528659
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:947c49409410dbea009305e6bea316bcc90666497f85365c37d5f7a627cc9bed
3
  size 335922386
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdfd20e9731beab18add4f46fe756c7c44388e1c1416c2f9f5b0025e93d526ef
3
  size 335922386
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:927e08927f37ea4af9a349e532e3fe8772a1f852b703e9692faae370d8e4f54e
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe749a398a2b18d0185683c03f20fcbdfcdfe8a0cbd9b267b7d328c877414214
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a946ba862c9290cf73cbdcd016309ecac1eb5b3695453bc8a42a8a259a1c695
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e69e2b49ea642509f0c688c16fb190b7cf27dac0a18903a5e2d1467d0343d8b8
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.12920592193808883,
5
  "eval_steps": 4,
6
- "global_step": 48,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -447,6 +447,20 @@
447
  "eval_samples_per_second": 7.807,
448
  "eval_steps_per_second": 7.807,
449
  "step": 48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
450
  }
451
  ],
452
  "logging_steps": 1,
@@ -461,12 +475,12 @@
461
  "should_evaluate": false,
462
  "should_log": false,
463
  "should_save": true,
464
- "should_training_stop": false
465
  },
466
  "attributes": {}
467
  }
468
  },
469
- "total_flos": 1.947454119346176e+16,
470
  "train_batch_size": 1,
471
  "trial_name": null,
472
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.13458950201884254,
5
  "eval_steps": 4,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
447
  "eval_samples_per_second": 7.807,
448
  "eval_steps_per_second": 7.807,
449
  "step": 48
450
+ },
451
+ {
452
+ "epoch": 0.13189771197846567,
453
+ "grad_norm": 1.1134506464004517,
454
+ "learning_rate": 3.0826662668720364e-07,
455
+ "loss": 0.2964,
456
+ "step": 49
457
+ },
458
+ {
459
+ "epoch": 0.13458950201884254,
460
+ "grad_norm": 0.8477309346199036,
461
+ "learning_rate": 0.0,
462
+ "loss": 0.2486,
463
+ "step": 50
464
  }
465
  ],
466
  "logging_steps": 1,
 
475
  "should_evaluate": false,
476
  "should_log": false,
477
  "should_save": true,
478
+ "should_training_stop": true
479
  },
480
  "attributes": {}
481
  }
482
  },
483
+ "total_flos": 2.021642847702221e+16,
484
  "train_batch_size": 1,
485
  "trial_name": null,
486
  "trial_params": null