dada22231 commited on
Commit
92c0f3b
1 Parent(s): c372c6e

Training in progress, step 95, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5b9e6029b8e20ffea46f0a63f305f8a19b4b8d4c222505c00dd1756a2c05e327
3
  size 237402
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:719fcb5c9db18237bd1dc22a5c30e434288dbf398d2e0596edeace2933d22427
3
  size 237402
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c5ac99cd1c2daa03a4004e768b1475426edc7b4b4d2c5f4eaebb8ae60fbdc45c
3
  size 222294
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0eb5b2c377a3c356f094f68d42270bee4cc35d70c7af5b5d48e4f8c53670c64b
3
  size 222294
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9cd119cfb8d5f79126ee024c0402e15266cdde51590656c9bfff6d4c1906f5f2
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1198b041f02130d25891e82e3bfff2efce248c2f44a5246eec91fc96c7a38f9
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bcdfaa06a050154a02b7db40937ce5ed5fadd7e1cdcda101cef6fda76cd4aa3b
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9021567b3ca7affc6bb82b67aa17648ec2ce8c44a9b4c1f5f01d2dacf89c9b13
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f1dce42a6c12395909ff288b85ec6528b48ce766f999f9454df6e9b03a18ef1d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d3b5b6efd5f70e44b3ddc68874b355dd85d3e12fa484eab5e8800b612717e68
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:05d6bba347f596d22ebf95e8935e06faf3787342cd6c72879bba6c7d46f04e1a
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b6783182f0ea3ea6ac751b0318d93d4953b4ecbb0e620c4e3ce3ecebfdcde32
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a5b53655d80c3ade692dacae57cafa4aff84c325b5cb8d0fba89d01b50d41566
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fd9769bcb04586b7ead175b588f12a0bbbeba72d5f308b868ba8f36b77aa801
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 6.879824638366699,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-75",
4
- "epoch": 0.03702104029123218,
5
  "eval_steps": 25,
6
- "global_step": 75,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -564,6 +564,146 @@
564
  "eval_samples_per_second": 663.996,
565
  "eval_steps_per_second": 172.639,
566
  "step": 75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
567
  }
568
  ],
569
  "logging_steps": 1,
@@ -587,12 +727,12 @@
587
  "should_evaluate": false,
588
  "should_log": false,
589
  "should_save": true,
590
- "should_training_stop": false
591
  },
592
  "attributes": {}
593
  }
594
  },
595
- "total_flos": 8558936064000.0,
596
  "train_batch_size": 1,
597
  "trial_name": null,
598
  "trial_params": null
 
1
  {
2
  "best_metric": 6.879824638366699,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-75",
4
+ "epoch": 0.04689331770222743,
5
  "eval_steps": 25,
6
+ "global_step": 95,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
564
  "eval_samples_per_second": 663.996,
565
  "eval_steps_per_second": 172.639,
566
  "step": 75
567
+ },
568
+ {
569
+ "epoch": 0.03751465416178194,
570
+ "grad_norm": 0.12949536740779877,
571
+ "learning_rate": 1.91437699862843e-05,
572
+ "loss": 6.8888,
573
+ "step": 76
574
+ },
575
+ {
576
+ "epoch": 0.03800826803233171,
577
+ "grad_norm": 0.122923344373703,
578
+ "learning_rate": 1.8236354814530112e-05,
579
+ "loss": 6.8853,
580
+ "step": 77
581
+ },
582
+ {
583
+ "epoch": 0.03850188190288147,
584
+ "grad_norm": 0.1295005977153778,
585
+ "learning_rate": 1.7371804408538024e-05,
586
+ "loss": 6.8858,
587
+ "step": 78
588
+ },
589
+ {
590
+ "epoch": 0.038995495773431234,
591
+ "grad_norm": 0.11691726744174957,
592
+ "learning_rate": 1.6551126795408016e-05,
593
+ "loss": 6.8864,
594
+ "step": 79
595
+ },
596
+ {
597
+ "epoch": 0.039489109643980995,
598
+ "grad_norm": 0.1165173351764679,
599
+ "learning_rate": 1.577527884852619e-05,
600
+ "loss": 6.8828,
601
+ "step": 80
602
+ },
603
+ {
604
+ "epoch": 0.039982723514530756,
605
+ "grad_norm": 0.1302141696214676,
606
+ "learning_rate": 1.5045165171893116e-05,
607
+ "loss": 6.8843,
608
+ "step": 81
609
+ },
610
+ {
611
+ "epoch": 0.040476337385080524,
612
+ "grad_norm": 0.12200380116701126,
613
+ "learning_rate": 1.4361637045396029e-05,
614
+ "loss": 6.8813,
615
+ "step": 82
616
+ },
617
+ {
618
+ "epoch": 0.040969951255630285,
619
+ "grad_norm": 0.12973135709762573,
620
+ "learning_rate": 1.3725491432254624e-05,
621
+ "loss": 6.8809,
622
+ "step": 83
623
+ },
624
+ {
625
+ "epoch": 0.041463565126180046,
626
+ "grad_norm": 0.12173114717006683,
627
+ "learning_rate": 1.313747004979751e-05,
628
+ "loss": 6.8837,
629
+ "step": 84
630
+ },
631
+ {
632
+ "epoch": 0.04195717899672981,
633
+ "grad_norm": 0.10662299394607544,
634
+ "learning_rate": 1.2598258504653081e-05,
635
+ "loss": 6.8864,
636
+ "step": 85
637
+ },
638
+ {
639
+ "epoch": 0.04245079286727957,
640
+ "grad_norm": 0.11969508230686188,
641
+ "learning_rate": 1.2108485493362765e-05,
642
+ "loss": 6.8813,
643
+ "step": 86
644
+ },
645
+ {
646
+ "epoch": 0.04294440673782933,
647
+ "grad_norm": 0.11755706369876862,
648
+ "learning_rate": 1.1668722069349041e-05,
649
+ "loss": 6.8826,
650
+ "step": 87
651
+ },
652
+ {
653
+ "epoch": 0.0434380206083791,
654
+ "grad_norm": 0.11743596196174622,
655
+ "learning_rate": 1.1279480977092635e-05,
656
+ "loss": 6.8882,
657
+ "step": 88
658
+ },
659
+ {
660
+ "epoch": 0.04393163447892886,
661
+ "grad_norm": 0.11842235922813416,
662
+ "learning_rate": 1.094121605429547e-05,
663
+ "loss": 6.8884,
664
+ "step": 89
665
+ },
666
+ {
667
+ "epoch": 0.04442524834947862,
668
+ "grad_norm": 0.1109689399600029,
669
+ "learning_rate": 1.0654321702726141e-05,
670
+ "loss": 6.8836,
671
+ "step": 90
672
+ },
673
+ {
674
+ "epoch": 0.04491886222002838,
675
+ "grad_norm": 0.10875685513019562,
676
+ "learning_rate": 1.0419132428365116e-05,
677
+ "loss": 6.8867,
678
+ "step": 91
679
+ },
680
+ {
681
+ "epoch": 0.04541247609057814,
682
+ "grad_norm": 0.11175169795751572,
683
+ "learning_rate": 1.0235922451385733e-05,
684
+ "loss": 6.8833,
685
+ "step": 92
686
+ },
687
+ {
688
+ "epoch": 0.04590608996112791,
689
+ "grad_norm": 0.10757743567228317,
690
+ "learning_rate": 1.0104905386425733e-05,
691
+ "loss": 6.8826,
692
+ "step": 93
693
+ },
694
+ {
695
+ "epoch": 0.04639970383167767,
696
+ "grad_norm": 0.10695550590753555,
697
+ "learning_rate": 1.002623399352217e-05,
698
+ "loss": 6.8832,
699
+ "step": 94
700
+ },
701
+ {
702
+ "epoch": 0.04689331770222743,
703
+ "grad_norm": 0.1099054366350174,
704
+ "learning_rate": 1e-05,
705
+ "loss": 6.8866,
706
+ "step": 95
707
  }
708
  ],
709
  "logging_steps": 1,
 
727
  "should_evaluate": false,
728
  "should_log": false,
729
  "should_save": true,
730
+ "should_training_stop": true
731
  },
732
  "attributes": {}
733
  }
734
  },
735
+ "total_flos": 10841319014400.0,
736
  "train_batch_size": 1,
737
  "trial_name": null,
738
  "trial_params": null