RoyJoy commited on
Commit
c7e5cfd
1 Parent(s): f5d3ded

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5609fc529a27d9029b91cbc1b6497a9f6481f3a1c180872608d28578468175b1
3
  size 323014168
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03c4489852ce3f749c9d8e603e867176bba85bcb212dba0dc425a6ffed14990f
3
  size 323014168
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:797695502e8e2360d028bb82c1f225c0abe7e4cb6deeb4f5e1c5dda05381c7a6
3
  size 646253418
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b08a0b8daac4a5d1df32b47a81d859d72b5807ce4fdb7ad924757a5ca784bb86
3
  size 646253418
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:03c1430c9d12a1d23e8e302b7d18d4a4a9c4a6d9b8a704442e85759e1c8af6c1
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14248276d72f4dd98c9849d8d54a4a93183633b55757a76e949b9c60ec0872ea
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8947b26818f2b48cdab60ac49e6850ac5a45983ddbc2e966334b654b665b5c81
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5dd4aef7f4fbae7c037e914c7735463bbaa5c3acfaeb1ac32b922a442a8cc90
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c02a3b615456816eafc7b2eae64c142b795e81886f3f5a6ae469c9925735b4b2
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:379f3d49970108e5b914aca890b8b88dd6aa26961d9f0c9a59d96989acdb034f
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5086913c80ff6b019645ad90ab852cd887c7fcab5d8728794f65402e55991b67
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0a46fa0dbae053bdcec3e3a5d9494dcbf84811793f22e6af38007809a18de18
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:344fc18df9c84f214bc47e69d1bbde70d66cf3ec6caeac9cd529dfb5814c4b9a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:051dee7dfbeecb34b46e8409ffafec324501f465585234624669bc8c9e863ae4
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.6713114976882935,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-75",
4
- "epoch": 0.9149828440716736,
5
  "eval_steps": 25,
6
- "global_step": 75,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -564,6 +564,189 @@
564
  "eval_samples_per_second": 35.12,
565
  "eval_steps_per_second": 9.131,
566
  "step": 75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
567
  }
568
  ],
569
  "logging_steps": 1,
@@ -587,12 +770,12 @@
587
  "should_evaluate": false,
588
  "should_log": false,
589
  "should_save": true,
590
- "should_training_stop": false
591
  },
592
  "attributes": {}
593
  }
594
  },
595
- "total_flos": 8.436086715973632e+17,
596
  "train_batch_size": 1,
597
  "trial_name": null,
598
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.6244957447052002,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
+ "epoch": 1.2222645825390774,
5
  "eval_steps": 25,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
564
  "eval_samples_per_second": 35.12,
565
  "eval_steps_per_second": 9.131,
566
  "step": 75
567
+ },
568
+ {
569
+ "epoch": 0.9271826153259627,
570
+ "grad_norm": 1.0527911186218262,
571
+ "learning_rate": 2.3444344707738015e-05,
572
+ "loss": 2.1647,
573
+ "step": 76
574
+ },
575
+ {
576
+ "epoch": 0.9393823865802516,
577
+ "grad_norm": 1.0965566635131836,
578
+ "learning_rate": 2.2400865784401e-05,
579
+ "loss": 1.8203,
580
+ "step": 77
581
+ },
582
+ {
583
+ "epoch": 0.9515821578345406,
584
+ "grad_norm": 1.1224842071533203,
585
+ "learning_rate": 2.1393033535713093e-05,
586
+ "loss": 1.8365,
587
+ "step": 78
588
+ },
589
+ {
590
+ "epoch": 0.9637819290888295,
591
+ "grad_norm": 1.083632469177246,
592
+ "learning_rate": 2.0421950011441354e-05,
593
+ "loss": 1.5104,
594
+ "step": 79
595
+ },
596
+ {
597
+ "epoch": 0.9759817003431186,
598
+ "grad_norm": 1.1941848993301392,
599
+ "learning_rate": 1.9488677077162295e-05,
600
+ "loss": 1.4217,
601
+ "step": 80
602
+ },
603
+ {
604
+ "epoch": 0.9881814715974075,
605
+ "grad_norm": 1.4089711904525757,
606
+ "learning_rate": 1.8594235253127375e-05,
607
+ "loss": 1.4428,
608
+ "step": 81
609
+ },
610
+ {
611
+ "epoch": 1.0026686999618757,
612
+ "grad_norm": 1.9810051918029785,
613
+ "learning_rate": 1.77396025983391e-05,
614
+ "loss": 1.5342,
615
+ "step": 82
616
+ },
617
+ {
618
+ "epoch": 1.0148684712161646,
619
+ "grad_norm": 0.940060555934906,
620
+ "learning_rate": 1.6925713641057904e-05,
621
+ "loss": 1.7435,
622
+ "step": 83
623
+ },
624
+ {
625
+ "epoch": 1.0270682424704536,
626
+ "grad_norm": 1.0035717487335205,
627
+ "learning_rate": 1.6153458356909176e-05,
628
+ "loss": 1.9253,
629
+ "step": 84
630
+ },
631
+ {
632
+ "epoch": 1.0392680137247428,
633
+ "grad_norm": 1.069605827331543,
634
+ "learning_rate": 1.5423681195707997e-05,
635
+ "loss": 1.7862,
636
+ "step": 85
637
+ },
638
+ {
639
+ "epoch": 1.0514677849790317,
640
+ "grad_norm": 1.121212124824524,
641
+ "learning_rate": 1.4737180158065644e-05,
642
+ "loss": 1.505,
643
+ "step": 86
644
+ },
645
+ {
646
+ "epoch": 1.0636675562333207,
647
+ "grad_norm": 1.131169080734253,
648
+ "learning_rate": 1.4094705922787687e-05,
649
+ "loss": 1.6766,
650
+ "step": 87
651
+ },
652
+ {
653
+ "epoch": 1.0758673274876096,
654
+ "grad_norm": 1.1634432077407837,
655
+ "learning_rate": 1.3496961026017687e-05,
656
+ "loss": 1.4677,
657
+ "step": 88
658
+ },
659
+ {
660
+ "epoch": 1.0880670987418986,
661
+ "grad_norm": 1.1550370454788208,
662
+ "learning_rate": 1.2944599093024267e-05,
663
+ "loss": 1.4571,
664
+ "step": 89
665
+ },
666
+ {
667
+ "epoch": 1.1002668699961875,
668
+ "grad_norm": 1.2418510913848877,
669
+ "learning_rate": 1.2438224123471442e-05,
670
+ "loss": 1.3787,
671
+ "step": 90
672
+ },
673
+ {
674
+ "epoch": 1.1124666412504765,
675
+ "grad_norm": 1.244449496269226,
676
+ "learning_rate": 1.1978389830953907e-05,
677
+ "loss": 1.3382,
678
+ "step": 91
679
+ },
680
+ {
681
+ "epoch": 1.1246664125047656,
682
+ "grad_norm": 1.207255482673645,
683
+ "learning_rate": 1.1565599037519316e-05,
684
+ "loss": 1.1813,
685
+ "step": 92
686
+ },
687
+ {
688
+ "epoch": 1.1368661837590546,
689
+ "grad_norm": 1.613624930381775,
690
+ "learning_rate": 1.1200303123839742e-05,
691
+ "loss": 1.3322,
692
+ "step": 93
693
+ },
694
+ {
695
+ "epoch": 1.1490659550133435,
696
+ "grad_norm": 1.5924932956695557,
697
+ "learning_rate": 1.088290153563358e-05,
698
+ "loss": 0.7849,
699
+ "step": 94
700
+ },
701
+ {
702
+ "epoch": 1.1612657262676325,
703
+ "grad_norm": 1.3866314888000488,
704
+ "learning_rate": 1.0613741346877497e-05,
705
+ "loss": 2.1158,
706
+ "step": 95
707
+ },
708
+ {
709
+ "epoch": 1.1734654975219214,
710
+ "grad_norm": 1.0334395170211792,
711
+ "learning_rate": 1.0393116880286118e-05,
712
+ "loss": 1.7684,
713
+ "step": 96
714
+ },
715
+ {
716
+ "epoch": 1.1856652687762104,
717
+ "grad_norm": 1.1649991273880005,
718
+ "learning_rate": 1.0221269385474488e-05,
719
+ "loss": 1.8588,
720
+ "step": 97
721
+ },
722
+ {
723
+ "epoch": 1.1978650400304995,
724
+ "grad_norm": 1.1444116830825806,
725
+ "learning_rate": 1.0098386775155147e-05,
726
+ "loss": 1.7781,
727
+ "step": 98
728
+ },
729
+ {
730
+ "epoch": 1.2100648112847885,
731
+ "grad_norm": 1.1716587543487549,
732
+ "learning_rate": 1.0024603419658329e-05,
733
+ "loss": 1.5944,
734
+ "step": 99
735
+ },
736
+ {
737
+ "epoch": 1.2222645825390774,
738
+ "grad_norm": 1.1668322086334229,
739
+ "learning_rate": 1e-05,
740
+ "loss": 1.5552,
741
+ "step": 100
742
+ },
743
+ {
744
+ "epoch": 1.2222645825390774,
745
+ "eval_loss": 1.6244957447052002,
746
+ "eval_runtime": 1.4265,
747
+ "eval_samples_per_second": 35.051,
748
+ "eval_steps_per_second": 9.113,
749
+ "step": 100
750
  }
751
  ],
752
  "logging_steps": 1,
 
770
  "should_evaluate": false,
771
  "should_log": false,
772
  "should_save": true,
773
+ "should_training_stop": true
774
  },
775
  "attributes": {}
776
  }
777
  },
778
+ "total_flos": 1.1248115621298176e+18,
779
  "train_batch_size": 1,
780
  "trial_name": null,
781
  "trial_params": null