leixa commited on
Commit
7f3aed1
1 Parent(s): 9a1e8cd

Training in progress, step 173, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1af31d62fe7961d7981cc90313d8f2b921e6bc728f41b25e05b784db15723ebb
3
  size 1001465824
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eae2757e83b9aeaeed247f86747314384a73a443319ad6b3434d372d0306e0a9
3
  size 1001465824
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:da4651a70e13d122e85d8dc112c706bff35297761fdc3aa64ff81ba9a879282c
3
  size 509176980
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:046a0cf1b275f01298af197a0ea8c1b133b20f129c8cc0fba9d6d416f23eec48
3
  size 509176980
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:380ec0b049b37b314671979349104a52b81fba5d4ab1fa2da5437554df1c19ce
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed7ef44e0be5ae6f671b8552794c084c69f5daa1ee2258881e51a57db189afdc
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:76f9fa65284e4b8f8e69031f6dc478e117b8d1a8c08ea32a69c55e7f794584df
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13cea8c13b7ff143621afe3809a9208b72f31395569d02b28f3b5848f77c0a7e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 2.865800865800866,
5
  "eval_steps": 15,
6
- "global_step": 165,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -488,6 +488,20 @@
488
  "eval_samples_per_second": 2.232,
489
  "eval_steps_per_second": 0.299,
490
  "step": 165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
491
  }
492
  ],
493
  "logging_steps": 3,
@@ -502,12 +516,12 @@
502
  "should_evaluate": false,
503
  "should_log": false,
504
  "should_save": true,
505
- "should_training_stop": false
506
  },
507
  "attributes": {}
508
  }
509
  },
510
- "total_flos": 8.481582471551386e+17,
511
  "train_batch_size": 8,
512
  "trial_name": null,
513
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 3.0086580086580086,
5
  "eval_steps": 15,
6
+ "global_step": 173,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
488
  "eval_samples_per_second": 2.232,
489
  "eval_steps_per_second": 0.299,
490
  "step": 165
491
+ },
492
+ {
493
+ "epoch": 2.9177489177489178,
494
+ "grad_norm": 1.2679266929626465,
495
+ "learning_rate": 2.319895532739369e-07,
496
+ "loss": 1.6958,
497
+ "step": 168
498
+ },
499
+ {
500
+ "epoch": 2.9696969696969697,
501
+ "grad_norm": 1.2044878005981445,
502
+ "learning_rate": 3.7142468185014104e-08,
503
+ "loss": 1.697,
504
+ "step": 171
505
  }
506
  ],
507
  "logging_steps": 3,
 
516
  "should_evaluate": false,
517
  "should_log": false,
518
  "should_save": true,
519
+ "should_training_stop": true
520
  },
521
  "attributes": {}
522
  }
523
  },
524
+ "total_flos": 8.887307373379584e+17,
525
  "train_batch_size": 8,
526
  "trial_name": null,
527
  "trial_params": null