leixa commited on
Commit
ebd298e
·
verified ·
1 Parent(s): a9f3e2c

Training in progress, step 217, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:53edbf1c300db97f7cf757d113c0f32b09d47e4c871522e2a7fdffb0b7294fa5
3
  size 639691872
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a069365120e85adb14a02be1d6d1bcb7f57bd56552e43dae1279161a73ba747
3
  size 639691872
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c77201b64afa737acfe0f1a12b83c348c32769eda957cf5d48833dae0c011405
3
  size 325339796
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:038ad53f8bbc1f098ba079bddbf6a3f8098ff367610e685c65b90cc790e33514
3
  size 325339796
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c7954fa5892170562c4c3a384fdeff71992c4c66cf9afa61a87dcba009edcf0
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8da25d38ebcf1dfe8351fa92f64b9a44da3c87afd204df29f93fba4ad49762fa
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f7a7e20609bcc2282e67bffd32305d513fe0d2d1256b3de4e851bbc232952907
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a89729c98649b6d89eebc77276357ac5f2594b10fb1a49cf61a28a5fa2b4c3d3
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.5091277890466532,
5
  "eval_steps": 31,
6
- "global_step": 186,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -497,6 +497,84 @@
497
  "eval_samples_per_second": 15.494,
498
  "eval_steps_per_second": 1.937,
499
  "step": 186
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500
  }
501
  ],
502
  "logging_steps": 3,
@@ -516,7 +594,7 @@
516
  "attributes": {}
517
  }
518
  },
519
- "total_flos": 2.4718116806197248e+17,
520
  "train_batch_size": 8,
521
  "trial_name": null,
522
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.7606490872210954,
5
  "eval_steps": 31,
6
+ "global_step": 217,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
497
  "eval_samples_per_second": 15.494,
498
  "eval_steps_per_second": 1.937,
499
  "step": 186
500
+ },
501
+ {
502
+ "epoch": 1.5334685598377282,
503
+ "grad_norm": 15.1577730178833,
504
+ "learning_rate": 5.04363267749187e-05,
505
+ "loss": 1.6155,
506
+ "step": 189
507
+ },
508
+ {
509
+ "epoch": 1.5578093306288032,
510
+ "grad_norm": 4.470703125,
511
+ "learning_rate": 4.912737967813583e-05,
512
+ "loss": 1.6756,
513
+ "step": 192
514
+ },
515
+ {
516
+ "epoch": 1.5821501014198782,
517
+ "grad_norm": 5.094776630401611,
518
+ "learning_rate": 4.781903063173321e-05,
519
+ "loss": 1.5824,
520
+ "step": 195
521
+ },
522
+ {
523
+ "epoch": 1.6064908722109532,
524
+ "grad_norm": 3.8165762424468994,
525
+ "learning_rate": 4.6512176312793736e-05,
526
+ "loss": 1.5824,
527
+ "step": 198
528
+ },
529
+ {
530
+ "epoch": 1.6308316430020284,
531
+ "grad_norm": 5.532138347625732,
532
+ "learning_rate": 4.52077123739888e-05,
533
+ "loss": 1.744,
534
+ "step": 201
535
+ },
536
+ {
537
+ "epoch": 1.6551724137931034,
538
+ "grad_norm": 4.430169582366943,
539
+ "learning_rate": 4.390653282974264e-05,
540
+ "loss": 1.6626,
541
+ "step": 204
542
+ },
543
+ {
544
+ "epoch": 1.6795131845841786,
545
+ "grad_norm": 3.9386327266693115,
546
+ "learning_rate": 4.260952944351947e-05,
547
+ "loss": 1.4812,
548
+ "step": 207
549
+ },
550
+ {
551
+ "epoch": 1.7038539553752536,
552
+ "grad_norm": 4.511838436126709,
553
+ "learning_rate": 4.131759111665349e-05,
554
+ "loss": 1.7028,
555
+ "step": 210
556
+ },
557
+ {
558
+ "epoch": 1.7281947261663286,
559
+ "grad_norm": 4.205485820770264,
560
+ "learning_rate": 4.003160327914015e-05,
561
+ "loss": 1.761,
562
+ "step": 213
563
+ },
564
+ {
565
+ "epoch": 1.7525354969574036,
566
+ "grad_norm": 4.336887836456299,
567
+ "learning_rate": 3.875244728280676e-05,
568
+ "loss": 1.5719,
569
+ "step": 216
570
+ },
571
+ {
572
+ "epoch": 1.7606490872210954,
573
+ "eval_loss": 0.49112430214881897,
574
+ "eval_runtime": 13.4322,
575
+ "eval_samples_per_second": 15.485,
576
+ "eval_steps_per_second": 1.936,
577
+ "step": 217
578
  }
579
  ],
580
  "logging_steps": 3,
 
594
  "attributes": {}
595
  }
596
  },
597
+ "total_flos": 2.884265370640712e+17,
598
  "train_batch_size": 8,
599
  "trial_name": null,
600
  "trial_params": null