RoyJoy commited on
Commit
8dba39f
·
verified ·
1 Parent(s): f00b38c

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c44cab8fc17dc655c00b744b81624a330c5b31ec72fb7959c0e31093be71c8df
3
  size 13587864
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8360975599d9ee4a4ee411c8f1cc2b0c35825bf8d393ec9c864f89971ffde2ae
3
  size 13587864
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5a7937f8d0b0522568ba8dfeb64b31e36201465e3c7f20a06891b288ecb9f156
3
  size 27273018
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49eecbd237b68ad70c577c8ff7e819b7e92e76ef96dd18fb804e48c36028f643
3
  size 27273018
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:75fa3135b33d7aeb44f8e4124bad9a0ee98c95b0c419b1acd1aa54202c67d28e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4539c524f63404b02bfa58cff36e62ce8ae7b9c028d47aa10d725df6f040629c
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9aa7879bf8691ab078fb4963204a8d87c49979ccea0d999a5e6fa161eef24dce
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd125cb9ecad77cadeadabb22b22642cf7e9de35acf800977a895980f38e3788
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:52271eaee836640a3d36f1efdda22e3ec319c97364205f4661e4e3c95a947b6c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c3b86c562fbca6ea65842f86630024b1d2de4305e7c472c99265bcc81bc860e
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4cd45e697edfc2bd92e6b96095f7e5f7494e1956faacd386039c966942997664
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ff0435b6c2a8a9add06442eacd037ea4c488f1cd755e7f1185edf5dac8a311c
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:701bb5e634c7b895c484576107f4685e06cbe998965f059274fc80a23bf7f467
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af0b17754e8379d35b8568fcdfcceb7a1b2d560c454ce4dd5fdb665e8ee0830c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.9253315925598145,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.3813155386081983,
5
  "eval_steps": 25,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -381,6 +381,372 @@
381
  "eval_samples_per_second": 179.063,
382
  "eval_steps_per_second": 46.557,
383
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384
  }
385
  ],
386
  "logging_steps": 1,
@@ -409,7 +775,7 @@
409
  "attributes": {}
410
  }
411
  },
412
- "total_flos": 2.22140137734144e+16,
413
  "train_batch_size": 1,
414
  "trial_name": null,
415
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.7645323276519775,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
+ "epoch": 0.7626310772163966,
5
  "eval_steps": 25,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
381
  "eval_samples_per_second": 179.063,
382
  "eval_steps_per_second": 46.557,
383
  "step": 50
384
+ },
385
+ {
386
+ "epoch": 0.38894184938036225,
387
+ "grad_norm": 1.3231308460235596,
388
+ "learning_rate": 0.00028079336235003674,
389
+ "loss": 1.8672,
390
+ "step": 51
391
+ },
392
+ {
393
+ "epoch": 0.3965681601525262,
394
+ "grad_norm": 0.3953985869884491,
395
+ "learning_rate": 0.0002799329510692108,
396
+ "loss": 1.7821,
397
+ "step": 52
398
+ },
399
+ {
400
+ "epoch": 0.4041944709246902,
401
+ "grad_norm": 0.34664496779441833,
402
+ "learning_rate": 0.0002790552313087104,
403
+ "loss": 1.8357,
404
+ "step": 53
405
+ },
406
+ {
407
+ "epoch": 0.41182078169685415,
408
+ "grad_norm": 0.32160669565200806,
409
+ "learning_rate": 0.0002781603352499031,
410
+ "loss": 1.8114,
411
+ "step": 54
412
+ },
413
+ {
414
+ "epoch": 0.4194470924690181,
415
+ "grad_norm": 0.35647809505462646,
416
+ "learning_rate": 0.0002772483976608436,
417
+ "loss": 1.9044,
418
+ "step": 55
419
+ },
420
+ {
421
+ "epoch": 0.4270734032411821,
422
+ "grad_norm": 0.5951141715049744,
423
+ "learning_rate": 0.0002763195558759784,
424
+ "loss": 1.8211,
425
+ "step": 56
426
+ },
427
+ {
428
+ "epoch": 0.43469971401334606,
429
+ "grad_norm": 0.5895670652389526,
430
+ "learning_rate": 0.00027537394977546377,
431
+ "loss": 1.8736,
432
+ "step": 57
433
+ },
434
+ {
435
+ "epoch": 0.44232602478551003,
436
+ "grad_norm": 0.3808022439479828,
437
+ "learning_rate": 0.00027441172176410027,
438
+ "loss": 1.8487,
439
+ "step": 58
440
+ },
441
+ {
442
+ "epoch": 0.449952335557674,
443
+ "grad_norm": 0.3616071343421936,
444
+ "learning_rate": 0.000273433016749887,
445
+ "loss": 1.86,
446
+ "step": 59
447
+ },
448
+ {
449
+ "epoch": 0.45757864632983797,
450
+ "grad_norm": 0.34098148345947266,
451
+ "learning_rate": 0.00027243798212219926,
452
+ "loss": 1.8849,
453
+ "step": 60
454
+ },
455
+ {
456
+ "epoch": 0.4652049571020019,
457
+ "grad_norm": 0.3677496910095215,
458
+ "learning_rate": 0.0002714267677295918,
459
+ "loss": 1.9066,
460
+ "step": 61
461
+ },
462
+ {
463
+ "epoch": 0.47283126787416585,
464
+ "grad_norm": 0.42355066537857056,
465
+ "learning_rate": 0.0002703995258572327,
466
+ "loss": 1.8943,
467
+ "step": 62
468
+ },
469
+ {
470
+ "epoch": 0.4804575786463298,
471
+ "grad_norm": 0.8111021518707275,
472
+ "learning_rate": 0.0002693564112039695,
473
+ "loss": 1.8816,
474
+ "step": 63
475
+ },
476
+ {
477
+ "epoch": 0.4880838894184938,
478
+ "grad_norm": 0.5244035720825195,
479
+ "learning_rate": 0.00026829758085903196,
480
+ "loss": 1.7596,
481
+ "step": 64
482
+ },
483
+ {
484
+ "epoch": 0.49571020019065776,
485
+ "grad_norm": 0.49128928780555725,
486
+ "learning_rate": 0.0002672231942783754,
487
+ "loss": 1.7882,
488
+ "step": 65
489
+ },
490
+ {
491
+ "epoch": 0.5033365109628217,
492
+ "grad_norm": 0.35969385504722595,
493
+ "learning_rate": 0.000266133413260667,
494
+ "loss": 1.7607,
495
+ "step": 66
496
+ },
497
+ {
498
+ "epoch": 0.5109628217349858,
499
+ "grad_norm": 0.30560654401779175,
500
+ "learning_rate": 0.0002650284019229195,
501
+ "loss": 1.7378,
502
+ "step": 67
503
+ },
504
+ {
505
+ "epoch": 0.5185891325071497,
506
+ "grad_norm": 0.2821063697338104,
507
+ "learning_rate": 0.0002639083266757757,
508
+ "loss": 1.7452,
509
+ "step": 68
510
+ },
511
+ {
512
+ "epoch": 0.5262154432793136,
513
+ "grad_norm": 0.30434897541999817,
514
+ "learning_rate": 0.000262773356198448,
515
+ "loss": 1.757,
516
+ "step": 69
517
+ },
518
+ {
519
+ "epoch": 0.5338417540514776,
520
+ "grad_norm": 0.35866251587867737,
521
+ "learning_rate": 0.0002616236614133155,
522
+ "loss": 1.8456,
523
+ "step": 70
524
+ },
525
+ {
526
+ "epoch": 0.5414680648236415,
527
+ "grad_norm": 0.36527636647224426,
528
+ "learning_rate": 0.0002604594154601839,
529
+ "loss": 1.7636,
530
+ "step": 71
531
+ },
532
+ {
533
+ "epoch": 0.5490943755958055,
534
+ "grad_norm": 0.4293094575405121,
535
+ "learning_rate": 0.00025928079367021134,
536
+ "loss": 1.7983,
537
+ "step": 72
538
+ },
539
+ {
540
+ "epoch": 0.5567206863679695,
541
+ "grad_norm": 0.4558382034301758,
542
+ "learning_rate": 0.000258087973539504,
543
+ "loss": 1.8167,
544
+ "step": 73
545
+ },
546
+ {
547
+ "epoch": 0.5643469971401335,
548
+ "grad_norm": 0.46237555146217346,
549
+ "learning_rate": 0.00025688113470238616,
550
+ "loss": 1.8516,
551
+ "step": 74
552
+ },
553
+ {
554
+ "epoch": 0.5719733079122974,
555
+ "grad_norm": 0.5830234885215759,
556
+ "learning_rate": 0.00025566045890434747,
557
+ "loss": 1.8979,
558
+ "step": 75
559
+ },
560
+ {
561
+ "epoch": 0.5719733079122974,
562
+ "eval_loss": 1.8215560913085938,
563
+ "eval_runtime": 0.2804,
564
+ "eval_samples_per_second": 178.348,
565
+ "eval_steps_per_second": 46.371,
566
+ "step": 75
567
+ },
568
+ {
569
+ "epoch": 0.5795996186844614,
570
+ "grad_norm": 0.4658236503601074,
571
+ "learning_rate": 0.00025442612997467315,
572
+ "loss": 1.7275,
573
+ "step": 76
574
+ },
575
+ {
576
+ "epoch": 0.5872259294566253,
577
+ "grad_norm": 0.45749977231025696,
578
+ "learning_rate": 0.0002531783337987598,
579
+ "loss": 1.7482,
580
+ "step": 77
581
+ },
582
+ {
583
+ "epoch": 0.5948522402287894,
584
+ "grad_norm": 0.4462372660636902,
585
+ "learning_rate": 0.0002519172582901218,
586
+ "loss": 1.7561,
587
+ "step": 78
588
+ },
589
+ {
590
+ "epoch": 0.6024785510009533,
591
+ "grad_norm": 0.38078993558883667,
592
+ "learning_rate": 0.00025064309336209214,
593
+ "loss": 1.7398,
594
+ "step": 79
595
+ },
596
+ {
597
+ "epoch": 0.6101048617731173,
598
+ "grad_norm": 0.3360918164253235,
599
+ "learning_rate": 0.00024935603089922215,
600
+ "loss": 1.7546,
601
+ "step": 80
602
+ },
603
+ {
604
+ "epoch": 0.6177311725452812,
605
+ "grad_norm": 0.30906444787979126,
606
+ "learning_rate": 0.0002480562647283846,
607
+ "loss": 1.7487,
608
+ "step": 81
609
+ },
610
+ {
611
+ "epoch": 0.6253574833174452,
612
+ "grad_norm": 0.36905303597450256,
613
+ "learning_rate": 0.00024674399058958394,
614
+ "loss": 1.7589,
615
+ "step": 82
616
+ },
617
+ {
618
+ "epoch": 0.6329837940896091,
619
+ "grad_norm": 0.3760243058204651,
620
+ "learning_rate": 0.0002454194061064785,
621
+ "loss": 1.7732,
622
+ "step": 83
623
+ },
624
+ {
625
+ "epoch": 0.6406101048617732,
626
+ "grad_norm": 0.41075772047042847,
627
+ "learning_rate": 0.0002440827107566192,
628
+ "loss": 1.7812,
629
+ "step": 84
630
+ },
631
+ {
632
+ "epoch": 0.6482364156339371,
633
+ "grad_norm": 0.41905277967453003,
634
+ "learning_rate": 0.00024273410584140913,
635
+ "loss": 1.7692,
636
+ "step": 85
637
+ },
638
+ {
639
+ "epoch": 0.655862726406101,
640
+ "grad_norm": 0.41411152482032776,
641
+ "learning_rate": 0.00024137379445578774,
642
+ "loss": 1.8508,
643
+ "step": 86
644
+ },
645
+ {
646
+ "epoch": 0.663489037178265,
647
+ "grad_norm": 0.47408804297447205,
648
+ "learning_rate": 0.0002400019814576463,
649
+ "loss": 1.847,
650
+ "step": 87
651
+ },
652
+ {
653
+ "epoch": 0.6711153479504289,
654
+ "grad_norm": 0.4235435724258423,
655
+ "learning_rate": 0.00023861887343697624,
656
+ "loss": 1.8122,
657
+ "step": 88
658
+ },
659
+ {
660
+ "epoch": 0.678741658722593,
661
+ "grad_norm": 0.3859807252883911,
662
+ "learning_rate": 0.00023722467868475812,
663
+ "loss": 1.6975,
664
+ "step": 89
665
+ },
666
+ {
667
+ "epoch": 0.6863679694947569,
668
+ "grad_norm": 0.3468429148197174,
669
+ "learning_rate": 0.0002358196071615933,
670
+ "loss": 1.694,
671
+ "step": 90
672
+ },
673
+ {
674
+ "epoch": 0.6939942802669209,
675
+ "grad_norm": 0.3533165156841278,
676
+ "learning_rate": 0.00023440387046608487,
677
+ "loss": 1.6882,
678
+ "step": 91
679
+ },
680
+ {
681
+ "epoch": 0.7016205910390848,
682
+ "grad_norm": 0.3266445994377136,
683
+ "learning_rate": 0.00023297768180297187,
684
+ "loss": 1.6909,
685
+ "step": 92
686
+ },
687
+ {
688
+ "epoch": 0.7092469018112488,
689
+ "grad_norm": 0.3379365801811218,
690
+ "learning_rate": 0.00023154125595102083,
691
+ "loss": 1.7055,
692
+ "step": 93
693
+ },
694
+ {
695
+ "epoch": 0.7168732125834127,
696
+ "grad_norm": 0.32845547795295715,
697
+ "learning_rate": 0.00023009480923068157,
698
+ "loss": 1.7529,
699
+ "step": 94
700
+ },
701
+ {
702
+ "epoch": 0.7244995233555768,
703
+ "grad_norm": 0.3393206000328064,
704
+ "learning_rate": 0.00022863855947150968,
705
+ "loss": 1.7702,
706
+ "step": 95
707
+ },
708
+ {
709
+ "epoch": 0.7321258341277407,
710
+ "grad_norm": 0.3422520160675049,
711
+ "learning_rate": 0.0002271727259793624,
712
+ "loss": 1.7063,
713
+ "step": 96
714
+ },
715
+ {
716
+ "epoch": 0.7397521448999047,
717
+ "grad_norm": 0.378302663564682,
718
+ "learning_rate": 0.0002256975295033719,
719
+ "loss": 1.7602,
720
+ "step": 97
721
+ },
722
+ {
723
+ "epoch": 0.7473784556720686,
724
+ "grad_norm": 0.41780129075050354,
725
+ "learning_rate": 0.0002242131922027012,
726
+ "loss": 1.8039,
727
+ "step": 98
728
+ },
729
+ {
730
+ "epoch": 0.7550047664442326,
731
+ "grad_norm": 0.44365987181663513,
732
+ "learning_rate": 0.00022271993761308807,
733
+ "loss": 1.7738,
734
+ "step": 99
735
+ },
736
+ {
737
+ "epoch": 0.7626310772163966,
738
+ "grad_norm": 0.5752303600311279,
739
+ "learning_rate": 0.00022121799061318104,
740
+ "loss": 1.9044,
741
+ "step": 100
742
+ },
743
+ {
744
+ "epoch": 0.7626310772163966,
745
+ "eval_loss": 1.7645323276519775,
746
+ "eval_runtime": 0.28,
747
+ "eval_samples_per_second": 178.583,
748
+ "eval_steps_per_second": 46.431,
749
+ "step": 100
750
  }
751
  ],
752
  "logging_steps": 1,
 
775
  "attributes": {}
776
  }
777
  },
778
+ "total_flos": 4.44280275468288e+16,
779
  "train_batch_size": 1,
780
  "trial_name": null,
781
  "trial_params": null