Femboyuwu2000 commited on
Commit
324db61
1 Parent(s): 6440380

Training in progress, step 560, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7042f9ed08ad702b4807e2082f62b7c77edad4356283c5cfef4c2ac8b3b06034
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:796907df6dad182b13ce60cc0ccb07c112a82034aaa5441dd30b61e18d46838c
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea2f7979011099fe7dff43f9cdaa7c8c71ecebd10fcfa3e69aea88e5fe4630df
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4690d06bae8821a7b6f9abbbdeddc59340b43f3b519fcd1343c54658a8e61c5
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cad5804f88e56590ede594cde634ed7edf004b94b62834acab77d105f3c4294b
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c6995b24c4f5e8f78c1ffb24f2e17f0e7ac641fd12550a93bad04b92d9a530b
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8954cfae79acdcb584a4c2168efc0e60ef154434502bd57627e490c360dd8a90
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa82337c72273104e0d7cc27c48a05e78f4a8c0defe4d7528f9fd4baa01b222e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0432,
5
  "eval_steps": 500,
6
- "global_step": 540,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -196,6 +196,13 @@
196
  "learning_rate": 2.7000000000000002e-05,
197
  "loss": 3.9166,
198
  "step": 540
 
 
 
 
 
 
 
199
  }
200
  ],
201
  "logging_steps": 20,
@@ -203,7 +210,7 @@
203
  "num_input_tokens_seen": 0,
204
  "num_train_epochs": 2,
205
  "save_steps": 20,
206
- "total_flos": 1299081332097024.0,
207
  "train_batch_size": 8,
208
  "trial_name": null,
209
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0448,
5
  "eval_steps": 500,
6
+ "global_step": 560,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
196
  "learning_rate": 2.7000000000000002e-05,
197
  "loss": 3.9166,
198
  "step": 540
199
+ },
200
+ {
201
+ "epoch": 0.04,
202
+ "grad_norm": 23.113840103149414,
203
+ "learning_rate": 2.8e-05,
204
+ "loss": 3.8248,
205
+ "step": 560
206
  }
207
  ],
208
  "logging_steps": 20,
 
210
  "num_input_tokens_seen": 0,
211
  "num_train_epochs": 2,
212
  "save_steps": 20,
213
+ "total_flos": 1347371635408896.0,
214
  "train_batch_size": 8,
215
  "trial_name": null,
216
  "trial_params": null