Femboyuwu2000 commited on
Commit
ef783b9
1 Parent(s): 6e39a22

Training in progress, step 480, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:73a88f4bf7a1b6dddff39e8af97c009fd73b38526759b2a26f6ec08d9b4d183f
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afc3626697f9b8b21110b8ca8aa05f989017e7e572a2ad1c791d8f8de7672958
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3c1621a369dc7e4bdf318b29e9869c1af3b08eee35502e0d46b34b86f42bdd0c
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aceadab4673de599001bacd0931786bce2dbbe16d8d14e7b7a5e857b3b4c7b63
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a38b8c0c37b77f3baeb6ec8974f233500a56911027cd6d2217e87b0f9c9028ed
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e53efd56a8066289cd7aca225d88c44b543c66b52f0755a9972f2ffab46cccc5
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5641c2652ebf9c036c42249768cbb64151dbdde17367e15dd5a080285743b021
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a125a87639fe4cd06b007808fbdba612724f438a0ebfc69a057f80762c8382d7
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0368,
5
  "eval_steps": 500,
6
- "global_step": 460,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -168,6 +168,13 @@
168
  "learning_rate": 2.3000000000000003e-05,
169
  "loss": 3.8172,
170
  "step": 460
 
 
 
 
 
 
 
171
  }
172
  ],
173
  "logging_steps": 20,
@@ -175,7 +182,7 @@
175
  "num_input_tokens_seen": 0,
176
  "num_train_epochs": 2,
177
  "save_steps": 20,
178
- "total_flos": 1091157458190336.0,
179
  "train_batch_size": 8,
180
  "trial_name": null,
181
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0384,
5
  "eval_steps": 500,
6
+ "global_step": 480,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
168
  "learning_rate": 2.3000000000000003e-05,
169
  "loss": 3.8172,
170
  "step": 460
171
+ },
172
+ {
173
+ "epoch": 0.04,
174
+ "grad_norm": 26.000978469848633,
175
+ "learning_rate": 2.4e-05,
176
+ "loss": 3.8934,
177
+ "step": 480
178
  }
179
  ],
180
  "logging_steps": 20,
 
182
  "num_input_tokens_seen": 0,
183
  "num_train_epochs": 2,
184
  "save_steps": 20,
185
+ "total_flos": 1144237424738304.0,
186
  "train_batch_size": 8,
187
  "trial_name": null,
188
  "trial_params": null