0x1202 commited on
Commit
428e5d5
·
verified ·
1 Parent(s): 083f9c2

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c54f95b44c35d8bd62ce121fbeef645cbf15b201cb75b5f40891efa299a97fc3
3
  size 45118424
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43c3946a5a89d36dfa1fc36228b54d7c970649172043020b6e528b995b6b00fe
3
  size 45118424
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b7175fd5911f5a67bb3b9739142788a233ed262ccd927a7ea46a574c63b0ee50
3
  size 90365754
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5301d5597f3da64893b3d9e2e609104f4243ffff9c9302a9bdd9c47057329e6
3
  size 90365754
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e52aff640d5a3d66075cc76aa8f8de7123ebcf80ff659b5fdbc74059972e971e
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:819e8f8d7c618e04879ebe16dcdcf92dc0610755b13a10ec192932587c41e3d1
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3cf5a6b72aec24b43630a95f66252ceddc36547934caef83f8781b0cbc7b4cc
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df88ad9d29a5b994fc668c3ab662b1d4e6baa321c3f5068caf8ff1c21c6e351d
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.03938543424010277,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.91324200913242,
5
  "eval_steps": 25,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -381,6 +381,372 @@
381
  "eval_samples_per_second": 12.562,
382
  "eval_steps_per_second": 1.759,
383
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384
  }
385
  ],
386
  "logging_steps": 1,
@@ -409,7 +775,7 @@
409
  "attributes": {}
410
  }
411
  },
412
- "total_flos": 1.310961668456448e+16,
413
  "train_batch_size": 8,
414
  "trial_name": null,
415
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.004337400663644075,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
+ "epoch": 1.8310502283105023,
5
  "eval_steps": 25,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
381
  "eval_samples_per_second": 12.562,
382
  "eval_steps_per_second": 1.759,
383
  "step": 50
384
+ },
385
+ {
386
+ "epoch": 0.9315068493150684,
387
+ "grad_norm": 0.7113423347473145,
388
+ "learning_rate": 0.00019184866590588439,
389
+ "loss": 0.0168,
390
+ "step": 51
391
+ },
392
+ {
393
+ "epoch": 0.9497716894977168,
394
+ "grad_norm": 0.3350503742694855,
395
+ "learning_rate": 0.00018730348307472824,
396
+ "loss": 0.0141,
397
+ "step": 52
398
+ },
399
+ {
400
+ "epoch": 0.9680365296803652,
401
+ "grad_norm": 0.6726917624473572,
402
+ "learning_rate": 0.0001827214862094814,
403
+ "loss": 0.0454,
404
+ "step": 53
405
+ },
406
+ {
407
+ "epoch": 0.9863013698630136,
408
+ "grad_norm": 0.31374573707580566,
409
+ "learning_rate": 0.0001781071971878587,
410
+ "loss": 0.0138,
411
+ "step": 54
412
+ },
413
+ {
414
+ "epoch": 1.009132420091324,
415
+ "grad_norm": 0.6686992049217224,
416
+ "learning_rate": 0.00017346516975603462,
417
+ "loss": 0.032,
418
+ "step": 55
419
+ },
420
+ {
421
+ "epoch": 1.0273972602739727,
422
+ "grad_norm": 0.4239481985569,
423
+ "learning_rate": 0.00016879998503464561,
424
+ "loss": 0.0243,
425
+ "step": 56
426
+ },
427
+ {
428
+ "epoch": 1.045662100456621,
429
+ "grad_norm": 0.2905505895614624,
430
+ "learning_rate": 0.00016411624699777717,
431
+ "loss": 0.0159,
432
+ "step": 57
433
+ },
434
+ {
435
+ "epoch": 1.0639269406392695,
436
+ "grad_norm": 2.1741881370544434,
437
+ "learning_rate": 0.000159418577929397,
438
+ "loss": 0.0303,
439
+ "step": 58
440
+ },
441
+ {
442
+ "epoch": 1.0821917808219177,
443
+ "grad_norm": 0.6694914102554321,
444
+ "learning_rate": 0.00015471161386171922,
445
+ "loss": 0.0176,
446
+ "step": 59
447
+ },
448
+ {
449
+ "epoch": 1.1004566210045663,
450
+ "grad_norm": 0.4211752414703369,
451
+ "learning_rate": 0.00015,
452
+ "loss": 0.0112,
453
+ "step": 60
454
+ },
455
+ {
456
+ "epoch": 1.1187214611872145,
457
+ "grad_norm": 1.0037997961044312,
458
+ "learning_rate": 0.00014528838613828076,
459
+ "loss": 0.0202,
460
+ "step": 61
461
+ },
462
+ {
463
+ "epoch": 1.1369863013698631,
464
+ "grad_norm": 0.20127694308757782,
465
+ "learning_rate": 0.000140581422070603,
466
+ "loss": 0.0052,
467
+ "step": 62
468
+ },
469
+ {
470
+ "epoch": 1.1552511415525113,
471
+ "grad_norm": 0.32008397579193115,
472
+ "learning_rate": 0.00013588375300222283,
473
+ "loss": 0.0052,
474
+ "step": 63
475
+ },
476
+ {
477
+ "epoch": 1.17351598173516,
478
+ "grad_norm": 0.1412021666765213,
479
+ "learning_rate": 0.00013120001496535433,
480
+ "loss": 0.0028,
481
+ "step": 64
482
+ },
483
+ {
484
+ "epoch": 1.191780821917808,
485
+ "grad_norm": 0.8715978860855103,
486
+ "learning_rate": 0.00012653483024396533,
487
+ "loss": 0.0081,
488
+ "step": 65
489
+ },
490
+ {
491
+ "epoch": 1.2100456621004567,
492
+ "grad_norm": 0.14639712870121002,
493
+ "learning_rate": 0.00012189280281214126,
494
+ "loss": 0.0029,
495
+ "step": 66
496
+ },
497
+ {
498
+ "epoch": 1.228310502283105,
499
+ "grad_norm": 0.020685512572526932,
500
+ "learning_rate": 0.00011727851379051865,
501
+ "loss": 0.001,
502
+ "step": 67
503
+ },
504
+ {
505
+ "epoch": 1.2465753424657535,
506
+ "grad_norm": 0.3620803952217102,
507
+ "learning_rate": 0.0001126965169252718,
508
+ "loss": 0.0172,
509
+ "step": 68
510
+ },
511
+ {
512
+ "epoch": 1.2648401826484017,
513
+ "grad_norm": 0.3493403494358063,
514
+ "learning_rate": 0.00010815133409411562,
515
+ "loss": 0.0116,
516
+ "step": 69
517
+ },
518
+ {
519
+ "epoch": 1.2831050228310503,
520
+ "grad_norm": 0.19864076375961304,
521
+ "learning_rate": 0.0001036474508437579,
522
+ "loss": 0.0067,
523
+ "step": 70
524
+ },
525
+ {
526
+ "epoch": 1.3013698630136985,
527
+ "grad_norm": 0.4525580406188965,
528
+ "learning_rate": 9.918931196320629e-05,
529
+ "loss": 0.0112,
530
+ "step": 71
531
+ },
532
+ {
533
+ "epoch": 1.3196347031963471,
534
+ "grad_norm": 0.36928990483283997,
535
+ "learning_rate": 9.47813170972983e-05,
536
+ "loss": 0.0154,
537
+ "step": 72
538
+ },
539
+ {
540
+ "epoch": 1.3378995433789953,
541
+ "grad_norm": 0.3303503394126892,
542
+ "learning_rate": 9.042781640478291e-05,
543
+ "loss": 0.0088,
544
+ "step": 73
545
+ },
546
+ {
547
+ "epoch": 1.356164383561644,
548
+ "grad_norm": 0.1237182691693306,
549
+ "learning_rate": 8.613310626523909e-05,
550
+ "loss": 0.0046,
551
+ "step": 74
552
+ },
553
+ {
554
+ "epoch": 1.374429223744292,
555
+ "grad_norm": 0.10325484722852707,
556
+ "learning_rate": 8.190142503906798e-05,
557
+ "loss": 0.0028,
558
+ "step": 75
559
+ },
560
+ {
561
+ "epoch": 1.374429223744292,
562
+ "eval_loss": 0.006142734084278345,
563
+ "eval_runtime": 3.4169,
564
+ "eval_samples_per_second": 14.633,
565
+ "eval_steps_per_second": 2.049,
566
+ "step": 75
567
+ },
568
+ {
569
+ "epoch": 1.3926940639269407,
570
+ "grad_norm": 0.2936699688434601,
571
+ "learning_rate": 7.773694888474267e-05,
572
+ "loss": 0.0047,
573
+ "step": 76
574
+ },
575
+ {
576
+ "epoch": 1.410958904109589,
577
+ "grad_norm": 0.04170661419630051,
578
+ "learning_rate": 7.364378763744429e-05,
579
+ "loss": 0.0015,
580
+ "step": 77
581
+ },
582
+ {
583
+ "epoch": 1.4292237442922375,
584
+ "grad_norm": 0.04328185319900513,
585
+ "learning_rate": 6.962598075315046e-05,
586
+ "loss": 0.0011,
587
+ "step": 78
588
+ },
589
+ {
590
+ "epoch": 1.4474885844748857,
591
+ "grad_norm": 0.11742109060287476,
592
+ "learning_rate": 6.568749332218044e-05,
593
+ "loss": 0.0015,
594
+ "step": 79
595
+ },
596
+ {
597
+ "epoch": 1.4657534246575343,
598
+ "grad_norm": 0.029690410941839218,
599
+ "learning_rate": 6.183221215612904e-05,
600
+ "loss": 0.0009,
601
+ "step": 80
602
+ },
603
+ {
604
+ "epoch": 1.4840182648401825,
605
+ "grad_norm": 0.5141183733940125,
606
+ "learning_rate": 5.806394195205356e-05,
607
+ "loss": 0.0079,
608
+ "step": 81
609
+ },
610
+ {
611
+ "epoch": 1.5022831050228311,
612
+ "grad_norm": 0.0738927498459816,
613
+ "learning_rate": 5.4386401537696536e-05,
614
+ "loss": 0.0036,
615
+ "step": 82
616
+ },
617
+ {
618
+ "epoch": 1.5205479452054793,
619
+ "grad_norm": 0.3400103747844696,
620
+ "learning_rate": 5.080322020145224e-05,
621
+ "loss": 0.0093,
622
+ "step": 83
623
+ },
624
+ {
625
+ "epoch": 1.538812785388128,
626
+ "grad_norm": 0.16344979405403137,
627
+ "learning_rate": 4.7317934110696685e-05,
628
+ "loss": 0.005,
629
+ "step": 84
630
+ },
631
+ {
632
+ "epoch": 1.5570776255707761,
633
+ "grad_norm": 0.17904306948184967,
634
+ "learning_rate": 4.3933982822017876e-05,
635
+ "loss": 0.0033,
636
+ "step": 85
637
+ },
638
+ {
639
+ "epoch": 1.5753424657534247,
640
+ "grad_norm": 0.0942436158657074,
641
+ "learning_rate": 4.06547058867883e-05,
642
+ "loss": 0.0069,
643
+ "step": 86
644
+ },
645
+ {
646
+ "epoch": 1.593607305936073,
647
+ "grad_norm": 0.144642174243927,
648
+ "learning_rate": 3.7483339555431055e-05,
649
+ "loss": 0.0046,
650
+ "step": 87
651
+ },
652
+ {
653
+ "epoch": 1.6118721461187215,
654
+ "grad_norm": 0.27809712290763855,
655
+ "learning_rate": 3.442301358363163e-05,
656
+ "loss": 0.003,
657
+ "step": 88
658
+ },
659
+ {
660
+ "epoch": 1.6301369863013697,
661
+ "grad_norm": 0.021354489028453827,
662
+ "learning_rate": 3.1476748143646435e-05,
663
+ "loss": 0.0012,
664
+ "step": 89
665
+ },
666
+ {
667
+ "epoch": 1.6484018264840183,
668
+ "grad_norm": 0.019089965149760246,
669
+ "learning_rate": 2.8647450843757897e-05,
670
+ "loss": 0.001,
671
+ "step": 90
672
+ },
673
+ {
674
+ "epoch": 1.6666666666666665,
675
+ "grad_norm": 0.0716283917427063,
676
+ "learning_rate": 2.5937913858815708e-05,
677
+ "loss": 0.0012,
678
+ "step": 91
679
+ },
680
+ {
681
+ "epoch": 1.6849315068493151,
682
+ "grad_norm": 0.012859423644840717,
683
+ "learning_rate": 2.335081117469777e-05,
684
+ "loss": 0.0007,
685
+ "step": 92
686
+ },
687
+ {
688
+ "epoch": 1.7031963470319633,
689
+ "grad_norm": 0.07900480180978775,
690
+ "learning_rate": 2.0888695949408468e-05,
691
+ "loss": 0.0011,
692
+ "step": 93
693
+ },
694
+ {
695
+ "epoch": 1.721461187214612,
696
+ "grad_norm": 0.06209683418273926,
697
+ "learning_rate": 1.8553997993420495e-05,
698
+ "loss": 0.0023,
699
+ "step": 94
700
+ },
701
+ {
702
+ "epoch": 1.7397260273972601,
703
+ "grad_norm": 0.08779767155647278,
704
+ "learning_rate": 1.634902137174483e-05,
705
+ "loss": 0.0038,
706
+ "step": 95
707
+ },
708
+ {
709
+ "epoch": 1.7579908675799087,
710
+ "grad_norm": 0.10400469601154327,
711
+ "learning_rate": 1.4275942130097096e-05,
712
+ "loss": 0.0042,
713
+ "step": 96
714
+ },
715
+ {
716
+ "epoch": 1.776255707762557,
717
+ "grad_norm": 0.06363116204738617,
718
+ "learning_rate": 1.2336806147402828e-05,
719
+ "loss": 0.0019,
720
+ "step": 97
721
+ },
722
+ {
723
+ "epoch": 1.7945205479452055,
724
+ "grad_norm": 0.08082360029220581,
725
+ "learning_rate": 1.0533527116762296e-05,
726
+ "loss": 0.0018,
727
+ "step": 98
728
+ },
729
+ {
730
+ "epoch": 1.8127853881278537,
731
+ "grad_norm": 0.060259874910116196,
732
+ "learning_rate": 8.867884656866181e-06,
733
+ "loss": 0.0024,
734
+ "step": 99
735
+ },
736
+ {
737
+ "epoch": 1.8310502283105023,
738
+ "grad_norm": 0.08836295455694199,
739
+ "learning_rate": 7.34152255572697e-06,
740
+ "loss": 0.0023,
741
+ "step": 100
742
+ },
743
+ {
744
+ "epoch": 1.8310502283105023,
745
+ "eval_loss": 0.004337400663644075,
746
+ "eval_runtime": 3.4882,
747
+ "eval_samples_per_second": 14.334,
748
+ "eval_steps_per_second": 2.007,
749
+ "step": 100
750
  }
751
  ],
752
  "logging_steps": 1,
 
775
  "attributes": {}
776
  }
777
  },
778
+ "total_flos": 2.637346415365325e+16,
779
  "train_batch_size": 8,
780
  "trial_name": null,
781
  "trial_params": null