Femboyuwu2000 commited on
Commit
92c3a30
·
verified ·
1 Parent(s): efc87cd

Training in progress, step 6520, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:98e08bf1a33a98f5ad95a313e7a7f611270dd303d602c0511a878d938534fca8
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab44c0650fc35117528c103709aeefeda1ba5a0c1b201a042c443fced883ac06
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4631ab9889a8ec8ca19b9d63d135f464285d7b10f4a57df37d5602c15eb4157e
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8f8827e725054a7d3aa47b66a00e417f27bf58195e5d53d80134e98a97b930d
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:327156111fc189e7e1e06944c3644c0b0e602db7096e3ae9ec3b005b4a71556b
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:966b1d0c91c0efe2171d6c761e8fb3dd8dd23b43cb7c345758210f8c5726e8a5
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:559bbd1bba34568007cb524880da5ab92a31da308e4db32d6963a8a220ceec7d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b9c2686d4c99776f5194741a1ee501ae1a6b12f42eb0cf041ec1c8a8c483c5a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.52,
5
  "eval_steps": 500,
6
- "global_step": 6500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2282,6 +2282,13 @@
2282
  "learning_rate": 2.3658830835585294e-05,
2283
  "loss": 3.5093,
2284
  "step": 6500
 
 
 
 
 
 
 
2285
  }
2286
  ],
2287
  "logging_steps": 20,
@@ -2289,7 +2296,7 @@
2289
  "num_input_tokens_seen": 0,
2290
  "num_train_epochs": 2,
2291
  "save_steps": 20,
2292
- "total_flos": 1.5342734805368832e+16,
2293
  "train_batch_size": 8,
2294
  "trial_name": null,
2295
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5216,
5
  "eval_steps": 500,
6
+ "global_step": 6520,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2282
  "learning_rate": 2.3658830835585294e-05,
2283
  "loss": 3.5093,
2284
  "step": 6500
2285
+ },
2286
+ {
2287
+ "epoch": 0.52,
2288
+ "grad_norm": 37.4576530456543,
2289
+ "learning_rate": 2.361911574288736e-05,
2290
+ "loss": 3.5952,
2291
+ "step": 6520
2292
  }
2293
  ],
2294
  "logging_steps": 20,
 
2296
  "num_input_tokens_seen": 0,
2297
  "num_train_epochs": 2,
2298
  "save_steps": 20,
2299
+ "total_flos": 1.539204209197056e+16,
2300
  "train_batch_size": 8,
2301
  "trial_name": null,
2302
  "trial_params": null