Nexspear commited on
Commit
db40b40
·
verified ·
1 Parent(s): 0411044

Training in progress, step 252, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:971e54de63920eed6700963a65c28021c96f5ce4caf96cc351d0df00fb6d803c
3
  size 100966336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd129a1bd9e3d8ee5a1e5db9d958aed101562fb77fbab566965c550cf4cf930d
3
  size 100966336
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54c1dc870cb0fc047cba8029354bda92e92efcf53a26b597de339883b64b86e3
3
  size 51613348
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8c1d468e59b49e68a6e930bd1f26d86439dccd611e450f66da506396b180023
3
  size 51613348
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:792d8588f6659622a3919c79527db3d98a5bae8354f3a9892e08b902b222082b
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f261dee394c5079254fc75fbcc32a19145fa5f61c502d9043aac58db31a15775
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d485fc873443ef7ad597c6f4a82e93694f8fe8522b8ffecf4d60075246020043
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fde5cce913d9c5501edc422466fae8378d9b13dc57f22397ec7fc38f7801fc24
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.03145949589903,
5
  "eval_steps": 42,
6
- "global_step": 210,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -545,6 +545,112 @@
545
  "eval_samples_per_second": 35.956,
546
  "eval_steps_per_second": 4.497,
547
  "step": 210
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
548
  }
549
  ],
550
  "logging_steps": 3,
@@ -564,7 +670,7 @@
564
  "attributes": {}
565
  }
566
  },
567
- "total_flos": 6.724628513606861e+16,
568
  "train_batch_size": 8,
569
  "trial_name": null,
570
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.037751395078836,
5
  "eval_steps": 42,
6
+ "global_step": 252,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
545
  "eval_samples_per_second": 35.956,
546
  "eval_steps_per_second": 4.497,
547
  "step": 210
548
+ },
549
+ {
550
+ "epoch": 0.031908917269016145,
551
+ "grad_norm": 0.15568239986896515,
552
+ "learning_rate": 3.165092113916688e-05,
553
+ "loss": 1.12,
554
+ "step": 213
555
+ },
556
+ {
557
+ "epoch": 0.03235833863900228,
558
+ "grad_norm": 0.16250278055667877,
559
+ "learning_rate": 3.118619214012286e-05,
560
+ "loss": 1.0651,
561
+ "step": 216
562
+ },
563
+ {
564
+ "epoch": 0.032807760008988426,
565
+ "grad_norm": 0.16043943166732788,
566
+ "learning_rate": 3.071917459263264e-05,
567
+ "loss": 1.1254,
568
+ "step": 219
569
+ },
570
+ {
571
+ "epoch": 0.03325718137897457,
572
+ "grad_norm": 0.19213679432868958,
573
+ "learning_rate": 3.0250041267321232e-05,
574
+ "loss": 1.1082,
575
+ "step": 222
576
+ },
577
+ {
578
+ "epoch": 0.033706602748960715,
579
+ "grad_norm": 0.17149952054023743,
580
+ "learning_rate": 2.9778965717534313e-05,
581
+ "loss": 1.0345,
582
+ "step": 225
583
+ },
584
+ {
585
+ "epoch": 0.03415602411894685,
586
+ "grad_norm": 0.19161058962345123,
587
+ "learning_rate": 2.9306122215132976e-05,
588
+ "loss": 1.2631,
589
+ "step": 228
590
+ },
591
+ {
592
+ "epoch": 0.034605445488933,
593
+ "grad_norm": 0.12983594834804535,
594
+ "learning_rate": 2.8831685686022897e-05,
595
+ "loss": 1.0125,
596
+ "step": 231
597
+ },
598
+ {
599
+ "epoch": 0.03505486685891914,
600
+ "grad_norm": 0.1923818439245224,
601
+ "learning_rate": 2.8355831645441388e-05,
602
+ "loss": 1.1152,
603
+ "step": 234
604
+ },
605
+ {
606
+ "epoch": 0.035504288228905286,
607
+ "grad_norm": 0.17539535462856293,
608
+ "learning_rate": 2.787873613302649e-05,
609
+ "loss": 1.0698,
610
+ "step": 237
611
+ },
612
+ {
613
+ "epoch": 0.03595370959889143,
614
+ "grad_norm": 0.20096057653427124,
615
+ "learning_rate": 2.7400575647692046e-05,
616
+ "loss": 1.0742,
617
+ "step": 240
618
+ },
619
+ {
620
+ "epoch": 0.03640313096887757,
621
+ "grad_norm": 0.1583949774503708,
622
+ "learning_rate": 2.692152708233292e-05,
623
+ "loss": 1.0255,
624
+ "step": 243
625
+ },
626
+ {
627
+ "epoch": 0.03685255233886371,
628
+ "grad_norm": 0.13673090934753418,
629
+ "learning_rate": 2.6441767658384366e-05,
630
+ "loss": 1.1167,
631
+ "step": 246
632
+ },
633
+ {
634
+ "epoch": 0.037301973708849856,
635
+ "grad_norm": 0.15132947266101837,
636
+ "learning_rate": 2.596147486025996e-05,
637
+ "loss": 1.053,
638
+ "step": 249
639
+ },
640
+ {
641
+ "epoch": 0.037751395078836,
642
+ "grad_norm": 0.1649513989686966,
643
+ "learning_rate": 2.5480826369692178e-05,
644
+ "loss": 1.1041,
645
+ "step": 252
646
+ },
647
+ {
648
+ "epoch": 0.037751395078836,
649
+ "eval_loss": 1.0942788124084473,
650
+ "eval_runtime": 312.947,
651
+ "eval_samples_per_second": 35.926,
652
+ "eval_steps_per_second": 4.493,
653
+ "step": 252
654
  }
655
  ],
656
  "logging_steps": 3,
 
670
  "attributes": {}
671
  }
672
  },
673
+ "total_flos": 8.058094740937114e+16,
674
  "train_batch_size": 8,
675
  "trial_name": null,
676
  "trial_params": null