leixa commited on
Commit
b62f265
1 Parent(s): ea0a5f8

Training in progress, step 310, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0cf25f76e8662756856fd1a097be6ff3870d9f310686bf41f15fe00f6e14dc4
3
  size 639691872
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80811b878fbdd5efdd4f08e7b0bd3a4c9633c207c497739ecac63cd5a9eca669
3
  size 639691872
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:742095389fc358571b77e198008ebd180a04e2384f21dd14cc648803058e1cfe
3
  size 325340244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e73ae7c4dc344be3442d36391cb4ba52cfd5d97e4bb25bc22948ccdfd1f87bdf
3
  size 325340244
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:28e1effb06033458f08c521267ddbc73b4a5a3e148e528b2cfd2ce1d0d17a805
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:876d99865a3847c750dc39ca4a3b8622be8f667a3f24db9221e48742faf0c0fe
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:de2e7670b3561000eee216684d0727bea9800d1c3f3b2422105732155595c43d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68293b71e7e6e68b301413d0698f3727535763528fb8f34f0fe9f273150e3e61
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 2.26369168356998,
5
  "eval_steps": 31,
6
- "global_step": 279,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -738,6 +738,84 @@
738
  "eval_samples_per_second": 15.481,
739
  "eval_steps_per_second": 1.935,
740
  "step": 279
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
741
  }
742
  ],
743
  "logging_steps": 3,
@@ -757,7 +835,7 @@
757
  "attributes": {}
758
  }
759
  },
760
- "total_flos": 3.706262291176489e+17,
761
  "train_batch_size": 8,
762
  "trial_name": null,
763
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 2.5152129817444218,
5
  "eval_steps": 31,
6
+ "global_step": 310,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
738
  "eval_samples_per_second": 15.481,
739
  "eval_steps_per_second": 1.935,
740
  "step": 279
741
+ },
742
+ {
743
+ "epoch": 2.288032454361055,
744
+ "grad_norm": 5.454842567443848,
745
+ "learning_rate": 1.4033009983067452e-05,
746
+ "loss": 1.0822,
747
+ "step": 282
748
+ },
749
+ {
750
+ "epoch": 2.31237322515213,
751
+ "grad_norm": 5.592954635620117,
752
+ "learning_rate": 1.3136133159493802e-05,
753
+ "loss": 0.9626,
754
+ "step": 285
755
+ },
756
+ {
757
+ "epoch": 2.336713995943205,
758
+ "grad_norm": 5.249399662017822,
759
+ "learning_rate": 1.22645209888614e-05,
760
+ "loss": 0.8161,
761
+ "step": 288
762
+ },
763
+ {
764
+ "epoch": 2.36105476673428,
765
+ "grad_norm": 5.653781890869141,
766
+ "learning_rate": 1.1418770830614013e-05,
767
+ "loss": 0.9639,
768
+ "step": 291
769
+ },
770
+ {
771
+ "epoch": 2.385395537525355,
772
+ "grad_norm": 5.4389119148254395,
773
+ "learning_rate": 1.0599462319663905e-05,
774
+ "loss": 0.868,
775
+ "step": 294
776
+ },
777
+ {
778
+ "epoch": 2.40973630831643,
779
+ "grad_norm": 5.680983543395996,
780
+ "learning_rate": 9.807156969139136e-06,
781
+ "loss": 1.1558,
782
+ "step": 297
783
+ },
784
+ {
785
+ "epoch": 2.4340770791075053,
786
+ "grad_norm": 8.884838104248047,
787
+ "learning_rate": 9.042397785550405e-06,
788
+ "loss": 0.9113,
789
+ "step": 300
790
+ },
791
+ {
792
+ "epoch": 2.4584178498985803,
793
+ "grad_norm": 6.362790107727051,
794
+ "learning_rate": 8.305708896641594e-06,
795
+ "loss": 1.0533,
796
+ "step": 303
797
+ },
798
+ {
799
+ "epoch": 2.4827586206896552,
800
+ "grad_norm": 6.722659111022949,
801
+ "learning_rate": 7.597595192178702e-06,
802
+ "loss": 0.945,
803
+ "step": 306
804
+ },
805
+ {
806
+ "epoch": 2.5070993914807302,
807
+ "grad_norm": 4.19068717956543,
808
+ "learning_rate": 6.918541977923709e-06,
809
+ "loss": 0.8369,
810
+ "step": 309
811
+ },
812
+ {
813
+ "epoch": 2.5152129817444218,
814
+ "eval_loss": 0.5427043437957764,
815
+ "eval_runtime": 13.4361,
816
+ "eval_samples_per_second": 15.481,
817
+ "eval_steps_per_second": 1.935,
818
+ "step": 310
819
  }
820
  ],
821
  "logging_steps": 3,
 
835
  "attributes": {}
836
  }
837
  },
838
+ "total_flos": 4.118715981197476e+17,
839
  "train_batch_size": 8,
840
  "trial_name": null,
841
  "trial_params": null