Nexspear commited on
Commit
c27fc3e
·
verified ·
1 Parent(s): 0714daa

Training in progress, step 294, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dd129a1bd9e3d8ee5a1e5db9d958aed101562fb77fbab566965c550cf4cf930d
3
  size 100966336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:930e0e949e58701597d874e209b7344a4d706b043639fc115daabd8ebbeab0fe
3
  size 100966336
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f8c1d468e59b49e68a6e930bd1f26d86439dccd611e450f66da506396b180023
3
- size 51613348
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:709f78d5e19655cbd94a1c21408c442f8f92f8b4fdef1ad9958db57bcf19cf98
3
+ size 51613668
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f261dee394c5079254fc75fbcc32a19145fa5f61c502d9043aac58db31a15775
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e926e6553b71955bfbf13bceb3502fb7e3ab2b01b876537fd15965365d03680
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fde5cce913d9c5501edc422466fae8378d9b13dc57f22397ec7fc38f7801fc24
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0316ab07671fffcd24f1d6dedeb5b41d6e5808ab64506af6cae3d569237843af
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.037751395078836,
5
  "eval_steps": 42,
6
- "global_step": 252,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -651,6 +651,112 @@
651
  "eval_samples_per_second": 35.926,
652
  "eval_steps_per_second": 4.493,
653
  "step": 252
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
654
  }
655
  ],
656
  "logging_steps": 3,
@@ -670,7 +776,7 @@
670
  "attributes": {}
671
  }
672
  },
673
- "total_flos": 8.058094740937114e+16,
674
  "train_batch_size": 8,
675
  "trial_name": null,
676
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.044043294258642,
5
  "eval_steps": 42,
6
+ "global_step": 294,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
651
  "eval_samples_per_second": 35.926,
652
  "eval_steps_per_second": 4.493,
653
  "step": 252
654
+ },
655
+ {
656
+ "epoch": 0.038200816448822145,
657
+ "grad_norm": 0.19113826751708984,
658
+ "learning_rate": 2.5e-05,
659
+ "loss": 1.0117,
660
+ "step": 255
661
+ },
662
+ {
663
+ "epoch": 0.03865023781880828,
664
+ "grad_norm": 0.17212168872356415,
665
+ "learning_rate": 2.4519173630307825e-05,
666
+ "loss": 1.136,
667
+ "step": 258
668
+ },
669
+ {
670
+ "epoch": 0.039099659188794426,
671
+ "grad_norm": 0.16413848102092743,
672
+ "learning_rate": 2.403852513974004e-05,
673
+ "loss": 1.0806,
674
+ "step": 261
675
+ },
676
+ {
677
+ "epoch": 0.03954908055878057,
678
+ "grad_norm": 0.15564818680286407,
679
+ "learning_rate": 2.3558232341615643e-05,
680
+ "loss": 1.0233,
681
+ "step": 264
682
+ },
683
+ {
684
+ "epoch": 0.039998501928766715,
685
+ "grad_norm": 0.15014681220054626,
686
+ "learning_rate": 2.3078472917667092e-05,
687
+ "loss": 1.09,
688
+ "step": 267
689
+ },
690
+ {
691
+ "epoch": 0.04044792329875285,
692
+ "grad_norm": 0.1784486323595047,
693
+ "learning_rate": 2.2599424352307957e-05,
694
+ "loss": 1.0867,
695
+ "step": 270
696
+ },
697
+ {
698
+ "epoch": 0.040897344668739,
699
+ "grad_norm": 0.1629609763622284,
700
+ "learning_rate": 2.212126386697352e-05,
701
+ "loss": 1.0916,
702
+ "step": 273
703
+ },
704
+ {
705
+ "epoch": 0.04134676603872514,
706
+ "grad_norm": 0.1595894992351532,
707
+ "learning_rate": 2.164416835455862e-05,
708
+ "loss": 1.0537,
709
+ "step": 276
710
+ },
711
+ {
712
+ "epoch": 0.041796187408711286,
713
+ "grad_norm": 0.16272102296352386,
714
+ "learning_rate": 2.11683143139771e-05,
715
+ "loss": 1.1907,
716
+ "step": 279
717
+ },
718
+ {
719
+ "epoch": 0.04224560877869743,
720
+ "grad_norm": 0.15896858274936676,
721
+ "learning_rate": 2.069387778486703e-05,
722
+ "loss": 1.0492,
723
+ "step": 282
724
+ },
725
+ {
726
+ "epoch": 0.04269503014868357,
727
+ "grad_norm": 0.18164744973182678,
728
+ "learning_rate": 2.02210342824657e-05,
729
+ "loss": 1.064,
730
+ "step": 285
731
+ },
732
+ {
733
+ "epoch": 0.04314445151866971,
734
+ "grad_norm": 0.17921298742294312,
735
+ "learning_rate": 1.9749958732678767e-05,
736
+ "loss": 1.1456,
737
+ "step": 288
738
+ },
739
+ {
740
+ "epoch": 0.043593872888655856,
741
+ "grad_norm": 0.14521931111812592,
742
+ "learning_rate": 1.928082540736737e-05,
743
+ "loss": 1.1073,
744
+ "step": 291
745
+ },
746
+ {
747
+ "epoch": 0.044043294258642,
748
+ "grad_norm": 0.15362174808979034,
749
+ "learning_rate": 1.8813807859877147e-05,
750
+ "loss": 1.0171,
751
+ "step": 294
752
+ },
753
+ {
754
+ "epoch": 0.044043294258642,
755
+ "eval_loss": 1.0923734903335571,
756
+ "eval_runtime": 312.595,
757
+ "eval_samples_per_second": 35.967,
758
+ "eval_steps_per_second": 4.498,
759
+ "step": 294
760
  }
761
  ],
762
  "logging_steps": 3,
 
776
  "attributes": {}
777
  }
778
  },
779
+ "total_flos": 9.381143263366349e+16,
780
  "train_batch_size": 8,
781
  "trial_name": null,
782
  "trial_params": null