ardaspear commited on
Commit
75ccaae
·
verified ·
1 Parent(s): 00f9924

Training in progress, step 340, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:071f7490848f8db3ad947d28a481e886b6fde2c39706ecda5845c4bc262c6edc
3
  size 72396376
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b35927f06b80d26e3132911d1799d75251e7e6f71fcf0af72ef64fef8170870a
3
  size 72396376
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a5406715c6c25e67ddc39831f5deb857fe1d7b8610da87d62b53c698479ed0c0
3
  size 37134740
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc56f5223c6ddbd7f71b1177c33bd87cfc42ca847c24b1ed458640671a0fbb09
3
  size 37134740
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:686cb4067140cd81e5c35c9c423deb1a7cd2b969cc1b59549029227a11d67047
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e070c1a19e2f7fe8d8783d14cf6c4980c084d0861be459da7f60717ee83ba20f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:542eeb761eff9bd2c88163850a5018d7ed947bdab57ea917e6e376b6cb0c0259
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6999f9aad8d44fbf7db1d80d56ad86630abb8e28a7187e80ed24f8546462146
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.08700597099800966,
5
  "eval_steps": 34,
6
- "global_step": 306,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -801,6 +801,91 @@
801
  "eval_samples_per_second": 35.255,
802
  "eval_steps_per_second": 4.41,
803
  "step": 306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
804
  }
805
  ],
806
  "logging_steps": 3,
@@ -820,7 +905,7 @@
820
  "attributes": {}
821
  }
822
  },
823
- "total_flos": 9.799200538833715e+16,
824
  "train_batch_size": 8,
825
  "trial_name": null,
826
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.09667330110889963,
5
  "eval_steps": 34,
6
+ "global_step": 340,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
801
  "eval_samples_per_second": 35.255,
802
  "eval_steps_per_second": 4.41,
803
  "step": 306
804
+ },
805
+ {
806
+ "epoch": 0.08785897071367643,
807
+ "grad_norm": 0.5052304863929749,
808
+ "learning_rate": 6.421379363065142e-06,
809
+ "loss": 0.3779,
810
+ "step": 309
811
+ },
812
+ {
813
+ "epoch": 0.08871197042934319,
814
+ "grad_norm": 0.5623005628585815,
815
+ "learning_rate": 6.022586521156715e-06,
816
+ "loss": 0.5177,
817
+ "step": 312
818
+ },
819
+ {
820
+ "epoch": 0.08956497014500996,
821
+ "grad_norm": 0.615669846534729,
822
+ "learning_rate": 5.634875954308638e-06,
823
+ "loss": 0.3902,
824
+ "step": 315
825
+ },
826
+ {
827
+ "epoch": 0.09041796986067671,
828
+ "grad_norm": 0.5363687872886658,
829
+ "learning_rate": 5.258474074573877e-06,
830
+ "loss": 0.3245,
831
+ "step": 318
832
+ },
833
+ {
834
+ "epoch": 0.09127096957634348,
835
+ "grad_norm": 0.45147329568862915,
836
+ "learning_rate": 4.893600690050579e-06,
837
+ "loss": 0.4372,
838
+ "step": 321
839
+ },
840
+ {
841
+ "epoch": 0.09212396929201024,
842
+ "grad_norm": 0.42790791392326355,
843
+ "learning_rate": 4.540468876520323e-06,
844
+ "loss": 0.4359,
845
+ "step": 324
846
+ },
847
+ {
848
+ "epoch": 0.09297696900767699,
849
+ "grad_norm": 0.512692928314209,
850
+ "learning_rate": 4.199284853017896e-06,
851
+ "loss": 0.41,
852
+ "step": 327
853
+ },
854
+ {
855
+ "epoch": 0.09382996872334376,
856
+ "grad_norm": 0.5399787425994873,
857
+ "learning_rate": 3.8702478614051355e-06,
858
+ "loss": 0.4455,
859
+ "step": 330
860
+ },
861
+ {
862
+ "epoch": 0.09468296843901051,
863
+ "grad_norm": 0.5106746554374695,
864
+ "learning_rate": 3.5535500500193357e-06,
865
+ "loss": 0.4301,
866
+ "step": 333
867
+ },
868
+ {
869
+ "epoch": 0.09553596815467728,
870
+ "grad_norm": 0.6958709955215454,
871
+ "learning_rate": 3.249376361464021e-06,
872
+ "loss": 0.3602,
873
+ "step": 336
874
+ },
875
+ {
876
+ "epoch": 0.09638896787034404,
877
+ "grad_norm": 0.5049157738685608,
878
+ "learning_rate": 2.957904424607652e-06,
879
+ "loss": 0.35,
880
+ "step": 339
881
+ },
882
+ {
883
+ "epoch": 0.09667330110889963,
884
+ "eval_loss": 0.4159228503704071,
885
+ "eval_runtime": 167.9747,
886
+ "eval_samples_per_second": 35.267,
887
+ "eval_steps_per_second": 4.411,
888
+ "step": 340
889
  }
890
  ],
891
  "logging_steps": 3,
 
905
  "attributes": {}
906
  }
907
  },
908
+ "total_flos": 1.0888000598704128e+17,
909
  "train_batch_size": 8,
910
  "trial_name": null,
911
  "trial_params": null