leixa commited on
Commit
dc36405
·
verified ·
1 Parent(s): 6b8e9f4

Training in progress, step 336, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bcc7c1bb49029978b38479c4be7f1ef61c6a2f919bd26fa335b25ed148c05645
3
  size 201892112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abb56d38a89e7b62a7798d623f95ec7bceb0800b1e0e250ab37ccd5fc69a012a
3
  size 201892112
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bc3812fdb507dc890ad3dfeb461b18c142361e92db1a8d19d6b3dbcdb50cb254
3
  size 102864868
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e53496acb3ee81f29db8e8dd644eb76ba4b47ccf9f2ab343ac4a3ae57576051b
3
  size 102864868
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1fd586e160e28252df6d0804518bcfa9955584c079b4b4506e38e98dfc07096c
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:207840526dea4fa9627489a495e4077d47d6217bc1c29fded5fdb4cce6503140
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:318b114b83c26bcba11815378a88e0015bce044b0002c702e0a2627e1a1d1e56
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10affc9ced28dcfaf0d40e3497a97c8e7416bd057324538f99a7e1756fd84408
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.061628760088041086,
5
  "eval_steps": 42,
6
- "global_step": 294,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -757,6 +757,112 @@
757
  "eval_samples_per_second": 48.701,
758
  "eval_steps_per_second": 6.091,
759
  "step": 294
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
760
  }
761
  ],
762
  "logging_steps": 3,
@@ -776,7 +882,7 @@
776
  "attributes": {}
777
  }
778
  },
779
- "total_flos": 6.271454686268621e+16,
780
  "train_batch_size": 8,
781
  "trial_name": null,
782
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.07043286867204696,
5
  "eval_steps": 42,
6
+ "global_step": 336,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
757
  "eval_samples_per_second": 48.701,
758
  "eval_steps_per_second": 6.091,
759
  "step": 294
760
+ },
761
+ {
762
+ "epoch": 0.06225762498689865,
763
+ "grad_norm": 0.36433523893356323,
764
+ "learning_rate": 3.6698157721666246e-05,
765
+ "loss": 1.0619,
766
+ "step": 297
767
+ },
768
+ {
769
+ "epoch": 0.06288648988575621,
770
+ "grad_norm": 0.382252961397171,
771
+ "learning_rate": 3.5773620668448384e-05,
772
+ "loss": 1.0544,
773
+ "step": 300
774
+ },
775
+ {
776
+ "epoch": 0.06351535478461377,
777
+ "grad_norm": 0.4051714539527893,
778
+ "learning_rate": 3.48543465876014e-05,
779
+ "loss": 0.9961,
780
+ "step": 303
781
+ },
782
+ {
783
+ "epoch": 0.06414421968347134,
784
+ "grad_norm": 0.34751641750335693,
785
+ "learning_rate": 3.3940675559617724e-05,
786
+ "loss": 1.0396,
787
+ "step": 306
788
+ },
789
+ {
790
+ "epoch": 0.0647730845823289,
791
+ "grad_norm": 0.38340121507644653,
792
+ "learning_rate": 3.303294559217063e-05,
793
+ "loss": 1.0916,
794
+ "step": 309
795
+ },
796
+ {
797
+ "epoch": 0.06540194948118645,
798
+ "grad_norm": 0.41020599007606506,
799
+ "learning_rate": 3.213149249506997e-05,
800
+ "loss": 1.0738,
801
+ "step": 312
802
+ },
803
+ {
804
+ "epoch": 0.06603081438004402,
805
+ "grad_norm": 0.3864920735359192,
806
+ "learning_rate": 3.12366497560313e-05,
807
+ "loss": 1.0936,
808
+ "step": 315
809
+ },
810
+ {
811
+ "epoch": 0.06665967927890158,
812
+ "grad_norm": 0.35983094573020935,
813
+ "learning_rate": 3.0348748417303823e-05,
814
+ "loss": 1.1001,
815
+ "step": 318
816
+ },
817
+ {
818
+ "epoch": 0.06728854417775915,
819
+ "grad_norm": 0.39053815603256226,
820
+ "learning_rate": 2.9468116953203107e-05,
821
+ "loss": 1.0366,
822
+ "step": 321
823
+ },
824
+ {
825
+ "epoch": 0.06791740907661671,
826
+ "grad_norm": 0.3519478142261505,
827
+ "learning_rate": 2.8595081148593738e-05,
828
+ "loss": 1.044,
829
+ "step": 324
830
+ },
831
+ {
832
+ "epoch": 0.06854627397547428,
833
+ "grad_norm": 0.37353023886680603,
834
+ "learning_rate": 2.772996397836704e-05,
835
+ "loss": 1.0449,
836
+ "step": 327
837
+ },
838
+ {
839
+ "epoch": 0.06917513887433183,
840
+ "grad_norm": 0.357658714056015,
841
+ "learning_rate": 2.687308548795825e-05,
842
+ "loss": 1.0519,
843
+ "step": 330
844
+ },
845
+ {
846
+ "epoch": 0.06980400377318939,
847
+ "grad_norm": 0.40195825695991516,
848
+ "learning_rate": 2.6024762674947313e-05,
849
+ "loss": 1.0915,
850
+ "step": 333
851
+ },
852
+ {
853
+ "epoch": 0.07043286867204696,
854
+ "grad_norm": 0.3929975628852844,
855
+ "learning_rate": 2.5185309371787513e-05,
856
+ "loss": 1.0375,
857
+ "step": 336
858
+ },
859
+ {
860
+ "epoch": 0.07043286867204696,
861
+ "eval_loss": 1.0703336000442505,
862
+ "eval_runtime": 165.0152,
863
+ "eval_samples_per_second": 48.692,
864
+ "eval_steps_per_second": 6.09,
865
+ "step": 336
866
  }
867
  ],
868
  "logging_steps": 3,
 
882
  "attributes": {}
883
  }
884
  },
885
+ "total_flos": 7.167376784306995e+16,
886
  "train_batch_size": 8,
887
  "trial_name": null,
888
  "trial_params": null