diaenra commited on
Commit
e301bd6
1 Parent(s): 96ce45c

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66d3d3ee67e131a2ad07a12e7f41a7bd55c78ff361880af8eb7c687c1297a684
3
  size 50624
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7abe2422a0995616bf17d085c9084821d9ef6028586d94c0f2756ec0b713fbf8
3
  size 50624
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cddb4435ae641fb922d75092cc02ea07aed5b3f10c88fe825c1d664ff3b5c380
3
  size 118090
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82ffcfbea78f3e6a85dc8887d2a9cc3ea352d2e9baed6ee77ad3e59a43ed37e6
3
  size 118090
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6c8a9a5c60432d947e5ee71e8ed56ddf7215842b602093d59593c5433f0453b7
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1532fcda08409d1eee43e4ac1a56b777d52d2c75477bbd2f451ae7c444979d2a
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9567f2f3182e832808fd621212e0dd5e8f1a88bd24ddda3ea0d289496073738c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a60c7d771c1fd156acee762fba03c724cb41829a3f71df370ecd1d20b134982
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 11.756505966186523,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.2518891687657431,
5
  "eval_steps": 50,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -373,6 +373,364 @@
373
  "eval_samples_per_second": 60.803,
374
  "eval_steps_per_second": 30.401,
375
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
376
  }
377
  ],
378
  "logging_steps": 1,
@@ -396,12 +754,12 @@
396
  "should_evaluate": false,
397
  "should_log": false,
398
  "should_save": true,
399
- "should_training_stop": false
400
  },
401
  "attributes": {}
402
  }
403
  },
404
- "total_flos": 20370004377600.0,
405
  "train_batch_size": 2,
406
  "trial_name": null,
407
  "trial_params": null
 
1
  {
2
+ "best_metric": 11.747422218322754,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
+ "epoch": 0.5037783375314862,
5
  "eval_steps": 50,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
373
  "eval_samples_per_second": 60.803,
374
  "eval_steps_per_second": 30.401,
375
  "step": 50
376
+ },
377
+ {
378
+ "epoch": 0.25692695214105793,
379
+ "grad_norm": 0.12329468876123428,
380
+ "learning_rate": 0.00010495837546732224,
381
+ "loss": 11.7644,
382
+ "step": 51
383
+ },
384
+ {
385
+ "epoch": 0.2619647355163728,
386
+ "grad_norm": 0.19282935559749603,
387
+ "learning_rate": 0.00010165339447663587,
388
+ "loss": 11.7626,
389
+ "step": 52
390
+ },
391
+ {
392
+ "epoch": 0.26700251889168763,
393
+ "grad_norm": 0.13012297451496124,
394
+ "learning_rate": 9.834660552336415e-05,
395
+ "loss": 11.7701,
396
+ "step": 53
397
+ },
398
+ {
399
+ "epoch": 0.27204030226700254,
400
+ "grad_norm": 0.1542385220527649,
401
+ "learning_rate": 9.504162453267777e-05,
402
+ "loss": 11.748,
403
+ "step": 54
404
+ },
405
+ {
406
+ "epoch": 0.2770780856423174,
407
+ "grad_norm": 0.13558229804039001,
408
+ "learning_rate": 9.174206545276677e-05,
409
+ "loss": 11.7566,
410
+ "step": 55
411
+ },
412
+ {
413
+ "epoch": 0.28211586901763225,
414
+ "grad_norm": 0.1677481085062027,
415
+ "learning_rate": 8.845153630304139e-05,
416
+ "loss": 11.7563,
417
+ "step": 56
418
+ },
419
+ {
420
+ "epoch": 0.2871536523929471,
421
+ "grad_norm": 0.20597466826438904,
422
+ "learning_rate": 8.517363522881579e-05,
423
+ "loss": 11.7526,
424
+ "step": 57
425
+ },
426
+ {
427
+ "epoch": 0.29219143576826195,
428
+ "grad_norm": 0.17000000178813934,
429
+ "learning_rate": 8.191194656678904e-05,
430
+ "loss": 11.7474,
431
+ "step": 58
432
+ },
433
+ {
434
+ "epoch": 0.2972292191435768,
435
+ "grad_norm": 0.2849511206150055,
436
+ "learning_rate": 7.867003692562534e-05,
437
+ "loss": 11.7591,
438
+ "step": 59
439
+ },
440
+ {
441
+ "epoch": 0.3022670025188917,
442
+ "grad_norm": 0.19527281820774078,
443
+ "learning_rate": 7.54514512859201e-05,
444
+ "loss": 11.7456,
445
+ "step": 60
446
+ },
447
+ {
448
+ "epoch": 0.30730478589420657,
449
+ "grad_norm": 0.25237226486206055,
450
+ "learning_rate": 7.225970912381556e-05,
451
+ "loss": 11.7602,
452
+ "step": 61
453
+ },
454
+ {
455
+ "epoch": 0.3123425692695214,
456
+ "grad_norm": 0.24200652539730072,
457
+ "learning_rate": 6.909830056250527e-05,
458
+ "loss": 11.7528,
459
+ "step": 62
460
+ },
461
+ {
462
+ "epoch": 0.31738035264483627,
463
+ "grad_norm": 0.19608649611473083,
464
+ "learning_rate": 6.59706825558357e-05,
465
+ "loss": 11.7477,
466
+ "step": 63
467
+ },
468
+ {
469
+ "epoch": 0.3224181360201511,
470
+ "grad_norm": 0.2160205841064453,
471
+ "learning_rate": 6.28802751081779e-05,
472
+ "loss": 11.7498,
473
+ "step": 64
474
+ },
475
+ {
476
+ "epoch": 0.327455919395466,
477
+ "grad_norm": 0.3006030321121216,
478
+ "learning_rate": 5.983045753470308e-05,
479
+ "loss": 11.7526,
480
+ "step": 65
481
+ },
482
+ {
483
+ "epoch": 0.33249370277078083,
484
+ "grad_norm": 0.2874875068664551,
485
+ "learning_rate": 5.6824564766150726e-05,
486
+ "loss": 11.7516,
487
+ "step": 66
488
+ },
489
+ {
490
+ "epoch": 0.33753148614609574,
491
+ "grad_norm": 0.34401634335517883,
492
+ "learning_rate": 5.386588370213124e-05,
493
+ "loss": 11.7539,
494
+ "step": 67
495
+ },
496
+ {
497
+ "epoch": 0.3425692695214106,
498
+ "grad_norm": 0.33066168427467346,
499
+ "learning_rate": 5.095764961694922e-05,
500
+ "loss": 11.752,
501
+ "step": 68
502
+ },
503
+ {
504
+ "epoch": 0.34760705289672544,
505
+ "grad_norm": 0.29201266169548035,
506
+ "learning_rate": 4.810304262187852e-05,
507
+ "loss": 11.743,
508
+ "step": 69
509
+ },
510
+ {
511
+ "epoch": 0.3526448362720403,
512
+ "grad_norm": 0.34607893228530884,
513
+ "learning_rate": 4.530518418775733e-05,
514
+ "loss": 11.7538,
515
+ "step": 70
516
+ },
517
+ {
518
+ "epoch": 0.35768261964735515,
519
+ "grad_norm": 0.2541449964046478,
520
+ "learning_rate": 4.256713373170564e-05,
521
+ "loss": 11.7628,
522
+ "step": 71
523
+ },
524
+ {
525
+ "epoch": 0.36272040302267,
526
+ "grad_norm": 0.29604771733283997,
527
+ "learning_rate": 3.9891885271697496e-05,
528
+ "loss": 11.7578,
529
+ "step": 72
530
+ },
531
+ {
532
+ "epoch": 0.3677581863979849,
533
+ "grad_norm": 0.3188517093658447,
534
+ "learning_rate": 3.7282364152646297e-05,
535
+ "loss": 11.7621,
536
+ "step": 73
537
+ },
538
+ {
539
+ "epoch": 0.37279596977329976,
540
+ "grad_norm": 0.24045149981975555,
541
+ "learning_rate": 3.4741423847583134e-05,
542
+ "loss": 11.7587,
543
+ "step": 74
544
+ },
545
+ {
546
+ "epoch": 0.3778337531486146,
547
+ "grad_norm": 0.27738526463508606,
548
+ "learning_rate": 3.227184283742591e-05,
549
+ "loss": 11.7587,
550
+ "step": 75
551
+ },
552
+ {
553
+ "epoch": 0.38287153652392947,
554
+ "grad_norm": 0.20490190386772156,
555
+ "learning_rate": 2.9876321572751144e-05,
556
+ "loss": 11.736,
557
+ "step": 76
558
+ },
559
+ {
560
+ "epoch": 0.3879093198992443,
561
+ "grad_norm": 0.34930336475372314,
562
+ "learning_rate": 2.7557479520891104e-05,
563
+ "loss": 11.7466,
564
+ "step": 77
565
+ },
566
+ {
567
+ "epoch": 0.3929471032745592,
568
+ "grad_norm": 0.3548698425292969,
569
+ "learning_rate": 2.5317852301584643e-05,
570
+ "loss": 11.748,
571
+ "step": 78
572
+ },
573
+ {
574
+ "epoch": 0.3979848866498741,
575
+ "grad_norm": 0.22995570302009583,
576
+ "learning_rate": 2.315988891431412e-05,
577
+ "loss": 11.7384,
578
+ "step": 79
579
+ },
580
+ {
581
+ "epoch": 0.40302267002518893,
582
+ "grad_norm": 0.39114826917648315,
583
+ "learning_rate": 2.1085949060360654e-05,
584
+ "loss": 11.7427,
585
+ "step": 80
586
+ },
587
+ {
588
+ "epoch": 0.4080604534005038,
589
+ "grad_norm": 0.3221098482608795,
590
+ "learning_rate": 1.9098300562505266e-05,
591
+ "loss": 11.7438,
592
+ "step": 81
593
+ },
594
+ {
595
+ "epoch": 0.41309823677581864,
596
+ "grad_norm": 0.5230813026428223,
597
+ "learning_rate": 1.7199116885197995e-05,
598
+ "loss": 11.7491,
599
+ "step": 82
600
+ },
601
+ {
602
+ "epoch": 0.4181360201511335,
603
+ "grad_norm": 0.5868245363235474,
604
+ "learning_rate": 1.5390474757906446e-05,
605
+ "loss": 11.7492,
606
+ "step": 83
607
+ },
608
+ {
609
+ "epoch": 0.42317380352644834,
610
+ "grad_norm": 0.7091190218925476,
611
+ "learning_rate": 1.3674351904242611e-05,
612
+ "loss": 11.7395,
613
+ "step": 84
614
+ },
615
+ {
616
+ "epoch": 0.4282115869017632,
617
+ "grad_norm": 0.34904906153678894,
618
+ "learning_rate": 1.2052624879351104e-05,
619
+ "loss": 11.7469,
620
+ "step": 85
621
+ },
622
+ {
623
+ "epoch": 0.4332493702770781,
624
+ "grad_norm": 0.497840017080307,
625
+ "learning_rate": 1.0527067017923654e-05,
626
+ "loss": 11.7541,
627
+ "step": 86
628
+ },
629
+ {
630
+ "epoch": 0.43828715365239296,
631
+ "grad_norm": 0.6534344553947449,
632
+ "learning_rate": 9.09934649508375e-06,
633
+ "loss": 11.7602,
634
+ "step": 87
635
+ },
636
+ {
637
+ "epoch": 0.4433249370277078,
638
+ "grad_norm": 0.6424288749694824,
639
+ "learning_rate": 7.771024502261526e-06,
640
+ "loss": 11.7518,
641
+ "step": 88
642
+ },
643
+ {
644
+ "epoch": 0.44836272040302266,
645
+ "grad_norm": 0.6290802955627441,
646
+ "learning_rate": 6.543553540053926e-06,
647
+ "loss": 11.7363,
648
+ "step": 89
649
+ },
650
+ {
651
+ "epoch": 0.4534005037783375,
652
+ "grad_norm": 0.6020405292510986,
653
+ "learning_rate": 5.418275829936537e-06,
654
+ "loss": 11.7478,
655
+ "step": 90
656
+ },
657
+ {
658
+ "epoch": 0.45843828715365237,
659
+ "grad_norm": 0.5933429002761841,
660
+ "learning_rate": 4.3964218465642355e-06,
661
+ "loss": 11.7431,
662
+ "step": 91
663
+ },
664
+ {
665
+ "epoch": 0.4634760705289673,
666
+ "grad_norm": 0.7595274448394775,
667
+ "learning_rate": 3.4791089722651436e-06,
668
+ "loss": 11.746,
669
+ "step": 92
670
+ },
671
+ {
672
+ "epoch": 0.46851385390428213,
673
+ "grad_norm": 0.7738293409347534,
674
+ "learning_rate": 2.667340275199426e-06,
675
+ "loss": 11.7478,
676
+ "step": 93
677
+ },
678
+ {
679
+ "epoch": 0.473551637279597,
680
+ "grad_norm": 0.6566546559333801,
681
+ "learning_rate": 1.9620034125190644e-06,
682
+ "loss": 11.7331,
683
+ "step": 94
684
+ },
685
+ {
686
+ "epoch": 0.47858942065491183,
687
+ "grad_norm": 0.790342390537262,
688
+ "learning_rate": 1.3638696597277679e-06,
689
+ "loss": 11.7377,
690
+ "step": 95
691
+ },
692
+ {
693
+ "epoch": 0.4836272040302267,
694
+ "grad_norm": 0.7581157684326172,
695
+ "learning_rate": 8.735930673024806e-07,
696
+ "loss": 11.7515,
697
+ "step": 96
698
+ },
699
+ {
700
+ "epoch": 0.48866498740554154,
701
+ "grad_norm": 0.8844296932220459,
702
+ "learning_rate": 4.917097454988584e-07,
703
+ "loss": 11.715,
704
+ "step": 97
705
+ },
706
+ {
707
+ "epoch": 0.49370277078085645,
708
+ "grad_norm": 0.9089491963386536,
709
+ "learning_rate": 2.1863727812254653e-07,
710
+ "loss": 11.7192,
711
+ "step": 98
712
+ },
713
+ {
714
+ "epoch": 0.4987405541561713,
715
+ "grad_norm": 0.14338494837284088,
716
+ "learning_rate": 5.467426590739511e-08,
717
+ "loss": 11.7708,
718
+ "step": 99
719
+ },
720
+ {
721
+ "epoch": 0.5037783375314862,
722
+ "grad_norm": 0.16467101871967316,
723
+ "learning_rate": 0.0,
724
+ "loss": 11.7612,
725
+ "step": 100
726
+ },
727
+ {
728
+ "epoch": 0.5037783375314862,
729
+ "eval_loss": 11.747422218322754,
730
+ "eval_runtime": 1.3803,
731
+ "eval_samples_per_second": 60.857,
732
+ "eval_steps_per_second": 30.429,
733
+ "step": 100
734
  }
735
  ],
736
  "logging_steps": 1,
 
754
  "should_evaluate": false,
755
  "should_log": false,
756
  "should_save": true,
757
+ "should_training_stop": true
758
  },
759
  "attributes": {}
760
  }
761
  },
762
+ "total_flos": 40740008755200.0,
763
  "train_batch_size": 2,
764
  "trial_name": null,
765
  "trial_params": null