leixa commited on
Commit
fe41a25
1 Parent(s): 3d13646

Training in progress, step 294, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:586fd04ed04d760d37f53f41e09542570cc554ebc66067372543dd3f2a963511
3
  size 671149168
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:addcb636d016f33a42f56c949361dc651112b44b1f3c17f4ea5d642ec2a29994
3
  size 671149168
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:28b2ceac6018dae6fdb4d9ef0f5c8fb480f407b18c678ebe83cbb5b4ab5cb5b7
3
- size 341314196
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cce5eb24b6d45b8f5d21cb933ee820f15283aae75fecf2daee4b387af774d9b7
3
+ size 341314644
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:29b9fe7e09a65746829ba365c53904d26ac9041437bdc46c4d9bdaea8de869a7
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a042cb67dc4a87fef40f9f59dff7d1a08bd46e0ddcbbd5cc8542c9d96b350b9a
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ee19ddad9c4c375a1de2d74fb4c1cf5e15d36c1ed47a2cb80f7cb0fbacb3b29e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:318b114b83c26bcba11815378a88e0015bce044b0002c702e0a2627e1a1d1e56
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.8208469055374593,
5
  "eval_steps": 42,
6
- "global_step": 252,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -651,6 +651,112 @@
651
  "eval_samples_per_second": 23.313,
652
  "eval_steps_per_second": 5.862,
653
  "step": 252
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
654
  }
655
  ],
656
  "logging_steps": 3,
@@ -670,7 +776,7 @@
670
  "attributes": {}
671
  }
672
  },
673
- "total_flos": 1.9007271092197786e+17,
674
  "train_batch_size": 4,
675
  "trial_name": null,
676
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.9576547231270358,
5
  "eval_steps": 42,
6
+ "global_step": 294,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
651
  "eval_samples_per_second": 23.313,
652
  "eval_steps_per_second": 5.862,
653
  "step": 252
654
+ },
655
+ {
656
+ "epoch": 0.8306188925081434,
657
+ "grad_norm": 2.505326509475708,
658
+ "learning_rate": 5e-05,
659
+ "loss": 1.3629,
660
+ "step": 255
661
+ },
662
+ {
663
+ "epoch": 0.8403908794788274,
664
+ "grad_norm": 2.5464847087860107,
665
+ "learning_rate": 4.903834726061565e-05,
666
+ "loss": 1.2965,
667
+ "step": 258
668
+ },
669
+ {
670
+ "epoch": 0.8501628664495114,
671
+ "grad_norm": 2.423649787902832,
672
+ "learning_rate": 4.807705027948008e-05,
673
+ "loss": 1.1622,
674
+ "step": 261
675
+ },
676
+ {
677
+ "epoch": 0.8599348534201955,
678
+ "grad_norm": 2.3888437747955322,
679
+ "learning_rate": 4.711646468323129e-05,
680
+ "loss": 1.2045,
681
+ "step": 264
682
+ },
683
+ {
684
+ "epoch": 0.8697068403908795,
685
+ "grad_norm": 2.202131509780884,
686
+ "learning_rate": 4.6156945835334184e-05,
687
+ "loss": 1.2004,
688
+ "step": 267
689
+ },
690
+ {
691
+ "epoch": 0.8794788273615635,
692
+ "grad_norm": 2.20468807220459,
693
+ "learning_rate": 4.5198848704615914e-05,
694
+ "loss": 1.2365,
695
+ "step": 270
696
+ },
697
+ {
698
+ "epoch": 0.8892508143322475,
699
+ "grad_norm": 2.211899995803833,
700
+ "learning_rate": 4.424252773394704e-05,
701
+ "loss": 1.3499,
702
+ "step": 273
703
+ },
704
+ {
705
+ "epoch": 0.8990228013029316,
706
+ "grad_norm": 2.4418487548828125,
707
+ "learning_rate": 4.328833670911724e-05,
708
+ "loss": 1.2556,
709
+ "step": 276
710
+ },
711
+ {
712
+ "epoch": 0.9087947882736156,
713
+ "grad_norm": 2.3675637245178223,
714
+ "learning_rate": 4.23366286279542e-05,
715
+ "loss": 1.1791,
716
+ "step": 279
717
+ },
718
+ {
719
+ "epoch": 0.9185667752442996,
720
+ "grad_norm": 2.2540576457977295,
721
+ "learning_rate": 4.138775556973406e-05,
722
+ "loss": 1.135,
723
+ "step": 282
724
+ },
725
+ {
726
+ "epoch": 0.9283387622149837,
727
+ "grad_norm": 2.869304656982422,
728
+ "learning_rate": 4.04420685649314e-05,
729
+ "loss": 1.3404,
730
+ "step": 285
731
+ },
732
+ {
733
+ "epoch": 0.9381107491856677,
734
+ "grad_norm": 2.2384979724884033,
735
+ "learning_rate": 3.9499917465357534e-05,
736
+ "loss": 1.184,
737
+ "step": 288
738
+ },
739
+ {
740
+ "epoch": 0.9478827361563518,
741
+ "grad_norm": 2.6150434017181396,
742
+ "learning_rate": 3.856165081473474e-05,
743
+ "loss": 1.398,
744
+ "step": 291
745
+ },
746
+ {
747
+ "epoch": 0.9576547231270358,
748
+ "grad_norm": 2.1370980739593506,
749
+ "learning_rate": 3.762761571975429e-05,
750
+ "loss": 1.0922,
751
+ "step": 294
752
+ },
753
+ {
754
+ "epoch": 0.9576547231270358,
755
+ "eval_loss": 1.2961455583572388,
756
+ "eval_runtime": 22.1652,
757
+ "eval_samples_per_second": 23.325,
758
+ "eval_steps_per_second": 5.865,
759
+ "step": 294
760
  }
761
  ],
762
  "logging_steps": 3,
 
776
  "attributes": {}
777
  }
778
  },
779
+ "total_flos": 2.2175149607564083e+17,
780
  "train_batch_size": 4,
781
  "trial_name": null,
782
  "trial_params": null