ardaspear commited on
Commit
ff933da
·
verified ·
1 Parent(s): aabb27b

Training in progress, step 306, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3c7e6d9652442fa642063424b204e43fa7e18c77cf6990911220075103af0998
3
  size 72396376
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:071f7490848f8db3ad947d28a481e886b6fde2c39706ecda5845c4bc262c6edc
3
  size 72396376
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b739aced26e02b72f0f4376d4b8e542febe656c5c4972b6b6ed9a4685b22ac00
3
  size 37134740
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5406715c6c25e67ddc39831f5deb857fe1d7b8610da87d62b53c698479ed0c0
3
  size 37134740
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ecc873a6b8b4dda97e1cfd6b2b58261bd5e2dd33f35d84c167b41efe0afbcd31
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:686cb4067140cd81e5c35c9c423deb1a7cd2b969cc1b59549029227a11d67047
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3520b6e9bfde48b403dd6f4096e526132e910f4d92bd802fb2e831d46f8ad41f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:542eeb761eff9bd2c88163850a5018d7ed947bdab57ea917e6e376b6cb0c0259
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.07733864088711971,
5
  "eval_steps": 34,
6
- "global_step": 272,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -709,6 +709,98 @@
709
  "eval_samples_per_second": 35.254,
710
  "eval_steps_per_second": 4.41,
711
  "step": 272
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
712
  }
713
  ],
714
  "logging_steps": 3,
@@ -728,7 +820,7 @@
728
  "attributes": {}
729
  }
730
  },
731
- "total_flos": 8.710400478963302e+16,
732
  "train_batch_size": 8,
733
  "trial_name": null,
734
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.08700597099800966,
5
  "eval_steps": 34,
6
+ "global_step": 306,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
709
  "eval_samples_per_second": 35.254,
710
  "eval_steps_per_second": 4.41,
711
  "step": 272
712
+ },
713
+ {
714
+ "epoch": 0.07762297412567529,
715
+ "grad_norm": 0.6064507961273193,
716
+ "learning_rate": 1.1980489393370938e-05,
717
+ "loss": 0.4708,
718
+ "step": 273
719
+ },
720
+ {
721
+ "epoch": 0.07847597384134206,
722
+ "grad_norm": 0.7535691261291504,
723
+ "learning_rate": 1.1468581814301717e-05,
724
+ "loss": 0.426,
725
+ "step": 276
726
+ },
727
+ {
728
+ "epoch": 0.07932897355700881,
729
+ "grad_norm": 0.5106021165847778,
730
+ "learning_rate": 1.096457620240298e-05,
731
+ "loss": 0.457,
732
+ "step": 279
733
+ },
734
+ {
735
+ "epoch": 0.08018197327267558,
736
+ "grad_norm": 0.7610855102539062,
737
+ "learning_rate": 1.0468766882759094e-05,
738
+ "loss": 0.3867,
739
+ "step": 282
740
+ },
741
+ {
742
+ "epoch": 0.08103497298834234,
743
+ "grad_norm": 0.57286137342453,
744
+ "learning_rate": 9.981443394050525e-06,
745
+ "loss": 0.4744,
746
+ "step": 285
747
+ },
748
+ {
749
+ "epoch": 0.0818879727040091,
750
+ "grad_norm": 0.6350270509719849,
751
+ "learning_rate": 9.502890319471491e-06,
752
+ "loss": 0.4382,
753
+ "step": 288
754
+ },
755
+ {
756
+ "epoch": 0.08274097241967586,
757
+ "grad_norm": 0.5878217816352844,
758
+ "learning_rate": 9.033387120541306e-06,
759
+ "loss": 0.4471,
760
+ "step": 291
761
+ },
762
+ {
763
+ "epoch": 0.08359397213534261,
764
+ "grad_norm": 0.4584065079689026,
765
+ "learning_rate": 8.573207973906735e-06,
766
+ "loss": 0.4223,
767
+ "step": 294
768
+ },
769
+ {
770
+ "epoch": 0.08444697185100938,
771
+ "grad_norm": 0.514761745929718,
772
+ "learning_rate": 8.1226216112306e-06,
773
+ "loss": 0.4439,
774
+ "step": 297
775
+ },
776
+ {
777
+ "epoch": 0.08529997156667614,
778
+ "grad_norm": 0.4604704678058624,
779
+ "learning_rate": 7.681891162260015e-06,
780
+ "loss": 0.4769,
781
+ "step": 300
782
+ },
783
+ {
784
+ "epoch": 0.08615297128234291,
785
+ "grad_norm": 0.5754848718643188,
786
+ "learning_rate": 7.251274001166044e-06,
787
+ "loss": 0.4719,
788
+ "step": 303
789
+ },
790
+ {
791
+ "epoch": 0.08700597099800966,
792
+ "grad_norm": 0.6474661827087402,
793
+ "learning_rate": 6.831021596244424e-06,
794
+ "loss": 0.3843,
795
+ "step": 306
796
+ },
797
+ {
798
+ "epoch": 0.08700597099800966,
799
+ "eval_loss": 0.4252224862575531,
800
+ "eval_runtime": 168.0333,
801
+ "eval_samples_per_second": 35.255,
802
+ "eval_steps_per_second": 4.41,
803
+ "step": 306
804
  }
805
  ],
806
  "logging_steps": 3,
 
820
  "attributes": {}
821
  }
822
  },
823
+ "total_flos": 9.799200538833715e+16,
824
  "train_batch_size": 8,
825
  "trial_name": null,
826
  "trial_params": null