Femboyuwu2000 commited on
Commit
c648498
1 Parent(s): deb1abf

Training in progress, step 5620, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5fd06c9f2222239e592302c7e30afefc517d5cb1a680437c4ab7fea503265580
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:551f265b0824afe04aca693b6dea37fd5b135be5b8dccb283b485991e4dad3cf
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:499ab8ad010c3a6f278b1cd553e39c4fc0711e8fcf3b7a6ccdda457fd792691a
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d57e143f333860daa20b388b6223f6ddf41f78b6f0260df5e7f84606866d22b2
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:118a9b4f162443ffba8ddef62d094103298e7cce32dfd348189ba8ded99c2d30
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:346e59e0611ec165950f793dc7bab0e7711af8e5091b452f221d2a71732c2269
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:067ea48f98f205db606896f3d7b13ff5b4456f48e2572b42a96efcfda3855838
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29d2f36af1b7bb2b6336bf3a3f577dcb5c5a5a76d17401f059ff27c28f1ca2cc
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.448,
5
  "eval_steps": 500,
6
- "global_step": 5600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1967,6 +1967,13 @@
1967
  "learning_rate": 2.5345856659171567e-05,
1968
  "loss": 3.4033,
1969
  "step": 5600
 
 
 
 
 
 
 
1970
  }
1971
  ],
1972
  "logging_steps": 20,
@@ -1974,7 +1981,7 @@
1974
  "num_input_tokens_seen": 0,
1975
  "num_train_epochs": 2,
1976
  "save_steps": 20,
1977
- "total_flos": 1.3255983512322048e+16,
1978
  "train_batch_size": 8,
1979
  "trial_name": null,
1980
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.4496,
5
  "eval_steps": 500,
6
+ "global_step": 5620,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1967
  "learning_rate": 2.5345856659171567e-05,
1968
  "loss": 3.4033,
1969
  "step": 5600
1970
+ },
1971
+ {
1972
+ "epoch": 0.45,
1973
+ "grad_norm": 34.580299377441406,
1974
+ "learning_rate": 2.5310626050032873e-05,
1975
+ "loss": 3.5598,
1976
+ "step": 5620
1977
  }
1978
  ],
1979
  "logging_steps": 20,
 
1981
  "num_input_tokens_seen": 0,
1982
  "num_train_epochs": 2,
1983
  "save_steps": 20,
1984
+ "total_flos": 1.3302567908179968e+16,
1985
  "train_batch_size": 8,
1986
  "trial_name": null,
1987
  "trial_params": null