ardaspear commited on
Commit
864a2de
·
verified ·
1 Parent(s): 8be5586

Training in progress, step 238, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3b11ec8052c5552f688ed9d416868cd53ddb56b5d87ead5a1ac28143162119f3
3
  size 72396376
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94712826a7f2c0b7756c1d1754ed196d80b21d0e57551e1ca4f6dcd8cb62a183
3
  size 72396376
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c07cb456027b1e8133d5349eb14073d4e3f3071ed39e24a44c43b361c9a7e331
3
  size 37134420
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c760d568f970c9090c6b12152f53f090933d3e37f77d860d804763cad359c610
3
  size 37134420
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8513bc15602cc9778e1a951f5ade81824ac1d664cd10d25d7f7f817465e8501d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:914b1fa34e1d3c8a9975e1e5a238a3740456137dacecfe916285c613ce13f0db
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5f9839d107756d9c8815de9164f2ebf92c05b3536704a349ca5892084df7663e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbcef9424696e41c7961bd91f0570d39d59ef33af28ed19a0eb9e4f50ed1b09a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.05800398066533978,
5
  "eval_steps": 34,
6
- "global_step": 204,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -539,6 +539,91 @@
539
  "eval_samples_per_second": 35.272,
540
  "eval_steps_per_second": 4.412,
541
  "step": 204
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
542
  }
543
  ],
544
  "logging_steps": 3,
@@ -558,7 +643,7 @@
558
  "attributes": {}
559
  }
560
  },
561
- "total_flos": 6.532800359222477e+16,
562
  "train_batch_size": 8,
563
  "trial_name": null,
564
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.06767131077622975,
5
  "eval_steps": 34,
6
+ "global_step": 238,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
539
  "eval_samples_per_second": 35.272,
540
  "eval_steps_per_second": 4.412,
541
  "step": 204
542
+ },
543
+ {
544
+ "epoch": 0.05885698038100654,
545
+ "grad_norm": 0.47230827808380127,
546
+ "learning_rate": 2.459724913431772e-05,
547
+ "loss": 0.5152,
548
+ "step": 207
549
+ },
550
+ {
551
+ "epoch": 0.059709980096673304,
552
+ "grad_norm": 0.5665333867073059,
553
+ "learning_rate": 2.399335149726463e-05,
554
+ "loss": 0.5736,
555
+ "step": 210
556
+ },
557
+ {
558
+ "epoch": 0.06056297981234006,
559
+ "grad_norm": 0.47652244567871094,
560
+ "learning_rate": 2.3390041714589514e-05,
561
+ "loss": 0.5006,
562
+ "step": 213
563
+ },
564
+ {
565
+ "epoch": 0.06141597952800682,
566
+ "grad_norm": 0.7172293663024902,
567
+ "learning_rate": 2.2787672102216042e-05,
568
+ "loss": 0.4619,
569
+ "step": 216
570
+ },
571
+ {
572
+ "epoch": 0.06226897924367358,
573
+ "grad_norm": 0.5251888036727905,
574
+ "learning_rate": 2.2186594427034864e-05,
575
+ "loss": 0.4852,
576
+ "step": 219
577
+ },
578
+ {
579
+ "epoch": 0.06312197895934035,
580
+ "grad_norm": 0.45427563786506653,
581
+ "learning_rate": 2.1587159701481716e-05,
582
+ "loss": 0.4936,
583
+ "step": 222
584
+ },
585
+ {
586
+ "epoch": 0.06397497867500711,
587
+ "grad_norm": 0.49813351035118103,
588
+ "learning_rate": 2.098971797855599e-05,
589
+ "loss": 0.5062,
590
+ "step": 225
591
+ },
592
+ {
593
+ "epoch": 0.06482797839067388,
594
+ "grad_norm": 0.510427713394165,
595
+ "learning_rate": 2.0394618147399713e-05,
596
+ "loss": 0.4742,
597
+ "step": 228
598
+ },
599
+ {
600
+ "epoch": 0.06568097810634063,
601
+ "grad_norm": 0.5860615372657776,
602
+ "learning_rate": 1.980220772955602e-05,
603
+ "loss": 0.5472,
604
+ "step": 231
605
+ },
606
+ {
607
+ "epoch": 0.06653397782200739,
608
+ "grad_norm": 0.45956236124038696,
609
+ "learning_rate": 1.921283267602643e-05,
610
+ "loss": 0.5065,
611
+ "step": 234
612
+ },
613
+ {
614
+ "epoch": 0.06738697753767416,
615
+ "grad_norm": 0.46069616079330444,
616
+ "learning_rate": 1.8626837165245165e-05,
617
+ "loss": 0.4309,
618
+ "step": 237
619
+ },
620
+ {
621
+ "epoch": 0.06767131077622975,
622
+ "eval_loss": 0.4809924364089966,
623
+ "eval_runtime": 167.8669,
624
+ "eval_samples_per_second": 35.29,
625
+ "eval_steps_per_second": 4.414,
626
+ "step": 238
627
  }
628
  ],
629
  "logging_steps": 3,
 
643
  "attributes": {}
644
  }
645
  },
646
+ "total_flos": 7.62160041909289e+16,
647
  "train_batch_size": 8,
648
  "trial_name": null,
649
  "trial_params": null