0x1202 commited on
Commit
fd01793
·
verified ·
1 Parent(s): 05dc4a8

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f22522263c8238dfcb7d1f942124c111f5893613753ab429e87ad928ced9d2f
3
  size 159967880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e4f3b58c24f87f6560869f0a57471a3fb26df7400ca06792c4e17e1c1a36ec2
3
  size 159967880
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eaa6485cfe1cdfdfca5b5fe6d1919de7e5a056eb7b33f77c35f369330be67058
3
  size 320194002
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:802142b852ce2beb520e38beb1cb34c6dfe69c4fb8af1c7c85369dfafcf706e7
3
  size 320194002
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e7774b924a2282d72542d9919611905ca854797f016aff695cc6031e95bdbf8
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc8afd77857ea80d425b1c16cfed4cb483ac5a5abd07b72260c215d556e738be
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:27056a0998d22ff91c008d94a9050c2a20dda9fd1206f04e807d59afc14e5421
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52e7e615ef85192a77dfa418d0a1415bde484c32aceb04c8295b02411d9b9920
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.7010467052459717,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.6920415224913494,
5
  "eval_steps": 25,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -381,6 +381,372 @@
381
  "eval_samples_per_second": 9.509,
382
  "eval_steps_per_second": 1.331,
383
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384
  }
385
  ],
386
  "logging_steps": 1,
@@ -395,7 +761,7 @@
395
  "early_stopping_threshold": 0.0
396
  },
397
  "attributes": {
398
- "early_stopping_patience_counter": 0
399
  }
400
  },
401
  "TrainerControl": {
@@ -404,12 +770,12 @@
404
  "should_evaluate": false,
405
  "should_log": false,
406
  "should_save": true,
407
- "should_training_stop": false
408
  },
409
  "attributes": {}
410
  }
411
  },
412
- "total_flos": 6.94299339522048e+16,
413
  "train_batch_size": 8,
414
  "trial_name": null,
415
  "trial_params": null
 
1
  {
2
  "best_metric": 0.7010467052459717,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 1.3840830449826989,
5
  "eval_steps": 25,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
381
  "eval_samples_per_second": 9.509,
382
  "eval_steps_per_second": 1.331,
383
  "step": 50
384
+ },
385
+ {
386
+ "epoch": 0.7058823529411765,
387
+ "grad_norm": 9.266826629638672,
388
+ "learning_rate": 0.00023674985042519795,
389
+ "loss": 3.4074,
390
+ "step": 51
391
+ },
392
+ {
393
+ "epoch": 0.7197231833910035,
394
+ "grad_norm": 2.7682924270629883,
395
+ "learning_rate": 0.00023387893552061199,
396
+ "loss": 2.6934,
397
+ "step": 52
398
+ },
399
+ {
400
+ "epoch": 0.7335640138408305,
401
+ "grad_norm": 1.135008454322815,
402
+ "learning_rate": 0.00023096259869272693,
403
+ "loss": 2.7218,
404
+ "step": 53
405
+ },
406
+ {
407
+ "epoch": 0.7474048442906575,
408
+ "grad_norm": 3.1696879863739014,
409
+ "learning_rate": 0.00022800241918938228,
410
+ "loss": 2.8763,
411
+ "step": 54
412
+ },
413
+ {
414
+ "epoch": 0.7612456747404844,
415
+ "grad_norm": 3.2597336769104004,
416
+ "learning_rate": 0.000225,
417
+ "loss": 2.794,
418
+ "step": 55
419
+ },
420
+ {
421
+ "epoch": 0.7750865051903114,
422
+ "grad_norm": 2.9772768020629883,
423
+ "learning_rate": 0.00022195696698753693,
424
+ "loss": 2.9002,
425
+ "step": 56
426
+ },
427
+ {
428
+ "epoch": 0.7889273356401384,
429
+ "grad_norm": 2.6768057346343994,
430
+ "learning_rate": 0.00021887496800805173,
431
+ "loss": 2.8117,
432
+ "step": 57
433
+ },
434
+ {
435
+ "epoch": 0.8027681660899654,
436
+ "grad_norm": 1.5049817562103271,
437
+ "learning_rate": 0.0002157556720183616,
438
+ "loss": 2.8173,
439
+ "step": 58
440
+ },
441
+ {
442
+ "epoch": 0.8166089965397924,
443
+ "grad_norm": 1.5916688442230225,
444
+ "learning_rate": 0.00021260076817227266,
445
+ "loss": 2.7637,
446
+ "step": 59
447
+ },
448
+ {
449
+ "epoch": 0.8304498269896193,
450
+ "grad_norm": 2.124816656112671,
451
+ "learning_rate": 0.0002094119649058735,
452
+ "loss": 2.7946,
453
+ "step": 60
454
+ },
455
+ {
456
+ "epoch": 0.8442906574394463,
457
+ "grad_norm": 1.6769332885742188,
458
+ "learning_rate": 0.0002061909890123868,
459
+ "loss": 2.7702,
460
+ "step": 61
461
+ },
462
+ {
463
+ "epoch": 0.8581314878892734,
464
+ "grad_norm": 3.2984721660614014,
465
+ "learning_rate": 0.0002029395847070803,
466
+ "loss": 2.7117,
467
+ "step": 62
468
+ },
469
+ {
470
+ "epoch": 0.8719723183391004,
471
+ "grad_norm": 1.5877281427383423,
472
+ "learning_rate": 0.0001996595126827437,
473
+ "loss": 2.705,
474
+ "step": 63
475
+ },
476
+ {
477
+ "epoch": 0.8858131487889274,
478
+ "grad_norm": 3.0824265480041504,
479
+ "learning_rate": 0.0001963525491562421,
480
+ "loss": 2.8567,
481
+ "step": 64
482
+ },
483
+ {
484
+ "epoch": 0.8996539792387543,
485
+ "grad_norm": 3.194683313369751,
486
+ "learning_rate": 0.00019302048490666353,
487
+ "loss": 2.8681,
488
+ "step": 65
489
+ },
490
+ {
491
+ "epoch": 0.9134948096885813,
492
+ "grad_norm": 2.6802637577056885,
493
+ "learning_rate": 0.00018966512430558034,
494
+ "loss": 2.8496,
495
+ "step": 66
496
+ },
497
+ {
498
+ "epoch": 0.9273356401384083,
499
+ "grad_norm": 3.094783067703247,
500
+ "learning_rate": 0.00018628828433995013,
501
+ "loss": 2.8264,
502
+ "step": 67
503
+ },
504
+ {
505
+ "epoch": 0.9411764705882353,
506
+ "grad_norm": 1.2324931621551514,
507
+ "learning_rate": 0.00018289179362818546,
508
+ "loss": 2.7473,
509
+ "step": 68
510
+ },
511
+ {
512
+ "epoch": 0.9550173010380623,
513
+ "grad_norm": 2.415250778198242,
514
+ "learning_rate": 0.0001794774914299245,
515
+ "loss": 2.835,
516
+ "step": 69
517
+ },
518
+ {
519
+ "epoch": 0.9688581314878892,
520
+ "grad_norm": 2.9529473781585693,
521
+ "learning_rate": 0.00017604722665003956,
522
+ "loss": 2.958,
523
+ "step": 70
524
+ },
525
+ {
526
+ "epoch": 0.9826989619377162,
527
+ "grad_norm": 1.7587758302688599,
528
+ "learning_rate": 0.00017260285683742246,
529
+ "loss": 2.8396,
530
+ "step": 71
531
+ },
532
+ {
533
+ "epoch": 0.9965397923875432,
534
+ "grad_norm": 1.809816598892212,
535
+ "learning_rate": 0.00016914624717908922,
536
+ "loss": 2.8112,
537
+ "step": 72
538
+ },
539
+ {
540
+ "epoch": 1.0103806228373702,
541
+ "grad_norm": 1.430889368057251,
542
+ "learning_rate": 0.000165679269490148,
543
+ "loss": 2.7052,
544
+ "step": 73
545
+ },
546
+ {
547
+ "epoch": 1.0242214532871972,
548
+ "grad_norm": 2.520479440689087,
549
+ "learning_rate": 0.00016220380120017872,
550
+ "loss": 2.7617,
551
+ "step": 74
552
+ },
553
+ {
554
+ "epoch": 1.0380622837370241,
555
+ "grad_norm": 1.8224382400512695,
556
+ "learning_rate": 0.00015872172433657134,
557
+ "loss": 2.7747,
558
+ "step": 75
559
+ },
560
+ {
561
+ "epoch": 1.0380622837370241,
562
+ "eval_loss": 0.6992730498313904,
563
+ "eval_runtime": 5.2527,
564
+ "eval_samples_per_second": 9.519,
565
+ "eval_steps_per_second": 1.333,
566
+ "step": 75
567
+ },
568
+ {
569
+ "epoch": 1.0519031141868511,
570
+ "grad_norm": 1.3448646068572998,
571
+ "learning_rate": 0.00015523492450537517,
572
+ "loss": 2.763,
573
+ "step": 76
574
+ },
575
+ {
576
+ "epoch": 1.065743944636678,
577
+ "grad_norm": 1.085045576095581,
578
+ "learning_rate": 0.00015174528987020957,
579
+ "loss": 2.7388,
580
+ "step": 77
581
+ },
582
+ {
583
+ "epoch": 1.0795847750865053,
584
+ "grad_norm": 1.328552007675171,
585
+ "learning_rate": 0.00014825471012979045,
586
+ "loss": 2.7668,
587
+ "step": 78
588
+ },
589
+ {
590
+ "epoch": 1.0934256055363323,
591
+ "grad_norm": 1.4537187814712524,
592
+ "learning_rate": 0.0001447650754946249,
593
+ "loss": 2.7073,
594
+ "step": 79
595
+ },
596
+ {
597
+ "epoch": 1.1072664359861593,
598
+ "grad_norm": 1.3335843086242676,
599
+ "learning_rate": 0.00014127827566342863,
600
+ "loss": 2.796,
601
+ "step": 80
602
+ },
603
+ {
604
+ "epoch": 1.1211072664359862,
605
+ "grad_norm": 2.351191997528076,
606
+ "learning_rate": 0.00013779619879982126,
607
+ "loss": 2.8494,
608
+ "step": 81
609
+ },
610
+ {
611
+ "epoch": 1.1349480968858132,
612
+ "grad_norm": 2.260059118270874,
613
+ "learning_rate": 0.000134320730509852,
614
+ "loss": 2.7993,
615
+ "step": 82
616
+ },
617
+ {
618
+ "epoch": 1.1487889273356402,
619
+ "grad_norm": 2.7046008110046387,
620
+ "learning_rate": 0.00013085375282091078,
621
+ "loss": 2.837,
622
+ "step": 83
623
+ },
624
+ {
625
+ "epoch": 1.1626297577854672,
626
+ "grad_norm": 1.4147647619247437,
627
+ "learning_rate": 0.0001273971431625775,
628
+ "loss": 2.7192,
629
+ "step": 84
630
+ },
631
+ {
632
+ "epoch": 1.1764705882352942,
633
+ "grad_norm": 2.916611909866333,
634
+ "learning_rate": 0.00012395277334996044,
635
+ "loss": 2.8989,
636
+ "step": 85
637
+ },
638
+ {
639
+ "epoch": 1.1903114186851211,
640
+ "grad_norm": 3.9687960147857666,
641
+ "learning_rate": 0.00012052250857007545,
642
+ "loss": 2.728,
643
+ "step": 86
644
+ },
645
+ {
646
+ "epoch": 1.2041522491349481,
647
+ "grad_norm": 1.5941438674926758,
648
+ "learning_rate": 0.00011710820637181447,
649
+ "loss": 2.7109,
650
+ "step": 87
651
+ },
652
+ {
653
+ "epoch": 1.217993079584775,
654
+ "grad_norm": 1.5909277200698853,
655
+ "learning_rate": 0.00011371171566004985,
656
+ "loss": 2.78,
657
+ "step": 88
658
+ },
659
+ {
660
+ "epoch": 1.231833910034602,
661
+ "grad_norm": 3.141655206680298,
662
+ "learning_rate": 0.0001103348756944197,
663
+ "loss": 2.7588,
664
+ "step": 89
665
+ },
666
+ {
667
+ "epoch": 1.245674740484429,
668
+ "grad_norm": 2.018080234527588,
669
+ "learning_rate": 0.0001069795150933365,
670
+ "loss": 2.7771,
671
+ "step": 90
672
+ },
673
+ {
674
+ "epoch": 1.259515570934256,
675
+ "grad_norm": 1.5090923309326172,
676
+ "learning_rate": 0.0001036474508437579,
677
+ "loss": 2.7333,
678
+ "step": 91
679
+ },
680
+ {
681
+ "epoch": 1.273356401384083,
682
+ "grad_norm": 1.451968789100647,
683
+ "learning_rate": 0.0001003404873172563,
684
+ "loss": 2.7345,
685
+ "step": 92
686
+ },
687
+ {
688
+ "epoch": 1.28719723183391,
689
+ "grad_norm": 2.1932549476623535,
690
+ "learning_rate": 9.706041529291968e-05,
691
+ "loss": 2.7071,
692
+ "step": 93
693
+ },
694
+ {
695
+ "epoch": 1.301038062283737,
696
+ "grad_norm": 2.9638872146606445,
697
+ "learning_rate": 9.380901098761319e-05,
698
+ "loss": 2.8978,
699
+ "step": 94
700
+ },
701
+ {
702
+ "epoch": 1.314878892733564,
703
+ "grad_norm": 2.9430599212646484,
704
+ "learning_rate": 9.058803509412646e-05,
705
+ "loss": 2.8598,
706
+ "step": 95
707
+ },
708
+ {
709
+ "epoch": 1.328719723183391,
710
+ "grad_norm": 2.1553397178649902,
711
+ "learning_rate": 8.739923182772731e-05,
712
+ "loss": 2.6598,
713
+ "step": 96
714
+ },
715
+ {
716
+ "epoch": 1.342560553633218,
717
+ "grad_norm": 1.4002549648284912,
718
+ "learning_rate": 8.424432798163836e-05,
719
+ "loss": 2.7373,
720
+ "step": 97
721
+ },
722
+ {
723
+ "epoch": 1.356401384083045,
724
+ "grad_norm": 2.587266206741333,
725
+ "learning_rate": 8.112503199194821e-05,
726
+ "loss": 2.785,
727
+ "step": 98
728
+ },
729
+ {
730
+ "epoch": 1.370242214532872,
731
+ "grad_norm": 1.669758915901184,
732
+ "learning_rate": 7.804303301246311e-05,
733
+ "loss": 2.6785,
734
+ "step": 99
735
+ },
736
+ {
737
+ "epoch": 1.3840830449826989,
738
+ "grad_norm": 3.042837381362915,
739
+ "learning_rate": 7.500000000000002e-05,
740
+ "loss": 2.858,
741
+ "step": 100
742
+ },
743
+ {
744
+ "epoch": 1.3840830449826989,
745
+ "eval_loss": 0.7048866748809814,
746
+ "eval_runtime": 5.2514,
747
+ "eval_samples_per_second": 9.521,
748
+ "eval_steps_per_second": 1.333,
749
+ "step": 100
750
  }
751
  ],
752
  "logging_steps": 1,
 
761
  "early_stopping_threshold": 0.0
762
  },
763
  "attributes": {
764
+ "early_stopping_patience_counter": 1
765
  }
766
  },
767
  "TrainerControl": {
 
770
  "should_evaluate": false,
771
  "should_log": false,
772
  "should_save": true,
773
+ "should_training_stop": true
774
  },
775
  "attributes": {}
776
  }
777
  },
778
+ "total_flos": 1.3877308048696934e+17,
779
  "train_batch_size": 8,
780
  "trial_name": null,
781
  "trial_params": null