Femboyuwu2000 commited on
Commit
561676c
1 Parent(s): c005757

Training in progress, step 6620, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ae6463dbdc2d91e9b1322891346f3833afe4775061c6f0d3b48157d6dc75931b
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf37fb9d99a5a966a288465c2366c3d30e92057587f61bde6eb015a0741f5df4
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:15999c7ecd152d668237b91758fdf2cc3d249213db421bac4f67f330f90f0aed
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86a468f66a69a897ea8cb002c953ac648196c88c5fe7343f089f868b782961b7
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c054359dfd0c186601f9d93f0acef8011e2b1aa8a1492ec2b57ae90a1bc5794b
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:121f9d4684e576b3166761513a3b4b3158536830adb54aef4bcf454539fb4e2a
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9cc67bbe12a73a6303ede190a9fcdf4c085df79ad7d2b182ce6b8dfd5dcdfecd
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8204e0c4867e8d38c506caa9e5d81b9a9b7021fd5bfa716504e4e01e9745e60f
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.528,
5
  "eval_steps": 500,
6
- "global_step": 6600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2317,6 +2317,13 @@
2317
  "learning_rate": 2.3459355447763596e-05,
2318
  "loss": 3.4875,
2319
  "step": 6600
 
 
 
 
 
 
 
2320
  }
2321
  ],
2322
  "logging_steps": 20,
@@ -2324,7 +2331,7 @@
2324
  "num_input_tokens_seen": 0,
2325
  "num_train_epochs": 2,
2326
  "save_steps": 20,
2327
- "total_flos": 1.5577756363063296e+16,
2328
  "train_batch_size": 8,
2329
  "trial_name": null,
2330
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5296,
5
  "eval_steps": 500,
6
+ "global_step": 6620,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2317
  "learning_rate": 2.3459355447763596e-05,
2318
  "loss": 3.4875,
2319
  "step": 6600
2320
+ },
2321
+ {
2322
+ "epoch": 0.53,
2323
+ "grad_norm": 28.182842254638672,
2324
+ "learning_rate": 2.341919248766422e-05,
2325
+ "loss": 3.5085,
2326
+ "step": 6620
2327
  }
2328
  ],
2329
  "logging_steps": 20,
 
2331
  "num_input_tokens_seen": 0,
2332
  "num_train_epochs": 2,
2333
  "save_steps": 20,
2334
+ "total_flos": 1.5626866814189568e+16,
2335
  "train_batch_size": 8,
2336
  "trial_name": null,
2337
  "trial_params": null