Femboyuwu2000 commited on
Commit
4e11a86
1 Parent(s): 6a1b362

Training in progress, step 7220, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d33ad5ad7297a3474565ac786f8f9d70a33eb5627d08f5a697c138ffc15d88d1
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04106cea74c9b7217608ed5335434aec139928b980a1d98f13c1d65ad9a2757e
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6cbe5a6df51d7cf574700e32447f7c59e1f86ee74b6f42f6b5250d0259029ffa
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44c17ba484ea8b4f6a5dc4fb01470e7b5de3b2280c1d5dfa43045e816b3035ab
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ae43d9a0a1751b368da41b4d38a9e6b8912af0859b5bf0f0514e58b2fa49faf
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:515ac92b399b536a4a0990d851348426453ad5a34a8114a85806dc0f2120c04c
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c777f1142b68b8b3ec3d325314bfa50ddccfacdeb4ee5ac4c1090831572e47da
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:612c3aa776f0b1615bfed5f7770ad478e4281d699f7ba3dca66036c0a3ba855a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.576,
5
  "eval_steps": 500,
6
- "global_step": 7200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2527,6 +2527,13 @@
2527
  "learning_rate": 2.2217789167362078e-05,
2528
  "loss": 3.4405,
2529
  "step": 7200
 
 
 
 
 
 
 
2530
  }
2531
  ],
2532
  "logging_steps": 20,
@@ -2534,7 +2541,7 @@
2534
  "num_input_tokens_seen": 0,
2535
  "num_train_epochs": 2,
2536
  "save_steps": 20,
2537
- "total_flos": 1.7005830543409152e+16,
2538
  "train_batch_size": 8,
2539
  "trial_name": null,
2540
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5776,
5
  "eval_steps": 500,
6
+ "global_step": 7220,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2527
  "learning_rate": 2.2217789167362078e-05,
2528
  "loss": 3.4405,
2529
  "step": 7200
2530
+ },
2531
+ {
2532
+ "epoch": 0.58,
2533
+ "grad_norm": 34.66562271118164,
2534
+ "learning_rate": 2.217516409129699e-05,
2535
+ "loss": 3.5408,
2536
+ "step": 7220
2537
  }
2538
  ],
2539
  "logging_steps": 20,
 
2541
  "num_input_tokens_seen": 0,
2542
  "num_train_epochs": 2,
2543
  "save_steps": 20,
2544
+ "total_flos": 1.7055072218185728e+16,
2545
  "train_batch_size": 8,
2546
  "trial_name": null,
2547
  "trial_params": null