Femboyuwu2000 commited on
Commit
2221184
1 Parent(s): 26e7ac6

Training in progress, step 620, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc4a41d5d22b7d0d85455aa32db9e7c97b9166ea007173ae0f8b60ba7996b322
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c92ea041d259fd0f1f97d3d99cc8dcb4a35f0729c4f9cfe1e03af37c28316782
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:37a60897eab1d7588d260a33b2f1fa0c1b58f193663e391249437e08e9459931
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78a98df909e786b9af2ba1de67522448ec74261759633c6997092bdf4beaab7f
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e9ab3296562ebe49bf8535f9527f415326e1c9c49a2be5613326da9318c84575
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c65cd55e2fff96c97e67ca068ed8c714e7c66984dea1333f2e03593c396075d
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b87b8c463789ee95efb85f337d13d204d6d893266f8733a46591a2aa7fddc131
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1ce72457ecdb61a4ff660064d82592f9b16269351060cb691068fea59fdedc2
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.048,
5
  "eval_steps": 500,
6
- "global_step": 600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -217,6 +217,13 @@
217
  "learning_rate": 3e-05,
218
  "loss": 3.726,
219
  "step": 600
 
 
 
 
 
 
 
220
  }
221
  ],
222
  "logging_steps": 20,
@@ -224,7 +231,7 @@
224
  "num_input_tokens_seen": 0,
225
  "num_train_epochs": 2,
226
  "save_steps": 20,
227
- "total_flos": 1434832198336512.0,
228
  "train_batch_size": 8,
229
  "trial_name": null,
230
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0496,
5
  "eval_steps": 500,
6
+ "global_step": 620,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
217
  "learning_rate": 3e-05,
218
  "loss": 3.726,
219
  "step": 600
220
+ },
221
+ {
222
+ "epoch": 0.05,
223
+ "grad_norm": 49.25693893432617,
224
+ "learning_rate": 2.999992132854894e-05,
225
+ "loss": 3.8692,
226
+ "step": 620
227
  }
228
  ],
229
  "logging_steps": 20,
 
231
  "num_input_tokens_seen": 0,
232
  "num_train_epochs": 2,
233
  "save_steps": 20,
234
+ "total_flos": 1478300032499712.0,
235
  "train_batch_size": 8,
236
  "trial_name": null,
237
  "trial_params": null