Femboyuwu2000 commited on
Commit
561a4b6
1 Parent(s): 068b3d6

Training in progress, step 3800, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4223f69fa5c4d343cdb0daf20def262d26468340b68b3012ba5b5bf860add746
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36ac2a87ab309a324156770cff23fbcfbbf432e170a53864d6322a1b4b0f07fa
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:07336ce007d26d9a03fa7d6fb0fb6a9b44ea05300d930f065fe4546aa4d1e107
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4e71a65761b91f23557705c80c31bf4dc741e188e8b8dd3cb91d9e1d04d0829
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0b38fda0416b95acf561519c239d9553c03304b18c4ad47455ccbf998847d400
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae7fdc9e832b9048f8d780c7ee755c1b7ac708ee22fb1d6c77d7ce7699702566
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eab6ea1f9b7df74951725865c85704c6f9d44947e8a6d8f259ab0d7413d273be
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0e3d8cec0e23166a3a5218cd6c00728334e53ee9906c6ef1df2da53651ea7a3
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.3024,
5
  "eval_steps": 500,
6
- "global_step": 3780,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1330,6 +1330,13 @@
1330
  "learning_rate": 2.805467086984027e-05,
1331
  "loss": 3.4905,
1332
  "step": 3780
 
 
 
 
 
 
 
1333
  }
1334
  ],
1335
  "logging_steps": 20,
@@ -1337,7 +1344,7 @@
1337
  "num_input_tokens_seen": 0,
1338
  "num_train_epochs": 2,
1339
  "save_steps": 20,
1340
- "total_flos": 8928030689820672.0,
1341
  "train_batch_size": 8,
1342
  "trial_name": null,
1343
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.304,
5
  "eval_steps": 500,
6
+ "global_step": 3800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1330
  "learning_rate": 2.805467086984027e-05,
1331
  "loss": 3.4905,
1332
  "step": 3780
1333
+ },
1334
+ {
1335
+ "epoch": 0.3,
1336
+ "grad_norm": 41.776817321777344,
1337
+ "learning_rate": 2.803067604777227e-05,
1338
+ "loss": 3.5289,
1339
+ "step": 3800
1340
  }
1341
  ],
1342
  "logging_steps": 20,
 
1344
  "num_input_tokens_seen": 0,
1345
  "num_train_epochs": 2,
1346
  "save_steps": 20,
1347
+ "total_flos": 8969070886453248.0,
1348
  "train_batch_size": 8,
1349
  "trial_name": null,
1350
  "trial_params": null