leixa commited on
Commit
17ae89f
1 Parent(s): 3a870e3

Training in progress, step 336, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:addcb636d016f33a42f56c949361dc651112b44b1f3c17f4ea5d642ec2a29994
3
  size 671149168
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acd89bab327e82c9d70eeec005795d508bf6adaacfc92ddd8f20f724469a6234
3
  size 671149168
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cce5eb24b6d45b8f5d21cb933ee820f15283aae75fecf2daee4b387af774d9b7
3
  size 341314644
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbbe25b88e42f5ce81a38f5cf504c4dcaa7e05557dd4f368b68f622afa912480
3
  size 341314644
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a042cb67dc4a87fef40f9f59dff7d1a08bd46e0ddcbbd5cc8542c9d96b350b9a
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd03e0f639b7da9ed1de154fce4cfdfc9d7d7afd7ed92a06fd4ed6e19dbfb56b
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:318b114b83c26bcba11815378a88e0015bce044b0002c702e0a2627e1a1d1e56
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10affc9ced28dcfaf0d40e3497a97c8e7416bd057324538f99a7e1756fd84408
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.9576547231270358,
5
  "eval_steps": 42,
6
- "global_step": 294,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -757,6 +757,112 @@
757
  "eval_samples_per_second": 23.325,
758
  "eval_steps_per_second": 5.865,
759
  "step": 294
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
760
  }
761
  ],
762
  "logging_steps": 3,
@@ -776,7 +882,7 @@
776
  "attributes": {}
777
  }
778
  },
779
- "total_flos": 2.2175149607564083e+17,
780
  "train_batch_size": 4,
781
  "trial_name": null,
782
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.0944625407166124,
5
  "eval_steps": 42,
6
+ "global_step": 336,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
757
  "eval_samples_per_second": 23.325,
758
  "eval_steps_per_second": 5.865,
759
  "step": 294
760
+ },
761
+ {
762
+ "epoch": 0.9674267100977199,
763
+ "grad_norm": 2.2964820861816406,
764
+ "learning_rate": 3.6698157721666246e-05,
765
+ "loss": 1.2606,
766
+ "step": 297
767
+ },
768
+ {
769
+ "epoch": 0.9771986970684039,
770
+ "grad_norm": 2.9193806648254395,
771
+ "learning_rate": 3.5773620668448384e-05,
772
+ "loss": 1.2477,
773
+ "step": 300
774
+ },
775
+ {
776
+ "epoch": 0.9869706840390879,
777
+ "grad_norm": 2.5898802280426025,
778
+ "learning_rate": 3.48543465876014e-05,
779
+ "loss": 1.2954,
780
+ "step": 303
781
+ },
782
+ {
783
+ "epoch": 0.996742671009772,
784
+ "grad_norm": 2.4339256286621094,
785
+ "learning_rate": 3.3940675559617724e-05,
786
+ "loss": 1.3758,
787
+ "step": 306
788
+ },
789
+ {
790
+ "epoch": 1.006514657980456,
791
+ "grad_norm": 2.0160953998565674,
792
+ "learning_rate": 3.303294559217063e-05,
793
+ "loss": 1.0917,
794
+ "step": 309
795
+ },
796
+ {
797
+ "epoch": 1.01628664495114,
798
+ "grad_norm": 2.0552120208740234,
799
+ "learning_rate": 3.213149249506997e-05,
800
+ "loss": 0.8775,
801
+ "step": 312
802
+ },
803
+ {
804
+ "epoch": 1.0260586319218241,
805
+ "grad_norm": 2.050333261489868,
806
+ "learning_rate": 3.12366497560313e-05,
807
+ "loss": 0.8579,
808
+ "step": 315
809
+ },
810
+ {
811
+ "epoch": 1.0358306188925082,
812
+ "grad_norm": 2.006868362426758,
813
+ "learning_rate": 3.0348748417303823e-05,
814
+ "loss": 0.8009,
815
+ "step": 318
816
+ },
817
+ {
818
+ "epoch": 1.0456026058631922,
819
+ "grad_norm": 2.201401710510254,
820
+ "learning_rate": 2.9468116953203107e-05,
821
+ "loss": 0.8379,
822
+ "step": 321
823
+ },
824
+ {
825
+ "epoch": 1.0553745928338762,
826
+ "grad_norm": 2.284052848815918,
827
+ "learning_rate": 2.8595081148593738e-05,
828
+ "loss": 0.7961,
829
+ "step": 324
830
+ },
831
+ {
832
+ "epoch": 1.0651465798045603,
833
+ "grad_norm": 2.104940414428711,
834
+ "learning_rate": 2.772996397836704e-05,
835
+ "loss": 0.8973,
836
+ "step": 327
837
+ },
838
+ {
839
+ "epoch": 1.0749185667752443,
840
+ "grad_norm": 2.4074976444244385,
841
+ "learning_rate": 2.687308548795825e-05,
842
+ "loss": 0.8669,
843
+ "step": 330
844
+ },
845
+ {
846
+ "epoch": 1.0846905537459284,
847
+ "grad_norm": 2.3930447101593018,
848
+ "learning_rate": 2.6024762674947313e-05,
849
+ "loss": 0.7428,
850
+ "step": 333
851
+ },
852
+ {
853
+ "epoch": 1.0944625407166124,
854
+ "grad_norm": 2.6202473640441895,
855
+ "learning_rate": 2.5185309371787513e-05,
856
+ "loss": 0.7865,
857
+ "step": 336
858
+ },
859
+ {
860
+ "epoch": 1.0944625407166124,
861
+ "eval_loss": 1.344246745109558,
862
+ "eval_runtime": 22.1704,
863
+ "eval_samples_per_second": 23.319,
864
+ "eval_steps_per_second": 5.864,
865
+ "step": 336
866
  }
867
  ],
868
  "logging_steps": 3,
 
882
  "attributes": {}
883
  }
884
  },
885
+ "total_flos": 2.53383140179968e+17,
886
  "train_batch_size": 4,
887
  "trial_name": null,
888
  "trial_params": null