dimasik87 commited on
Commit
6371f8a
1 Parent(s): 4d61756

Training in progress, step 24, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8373c5e4c8ca9133fbe3ff268192eb46706f3a841ef92d81816a0577e1744f3
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55e64cd9429c3e3e5761c51f818f35c75ecd1232057c81228740dea4aa744c7d
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0a134857514fb9dfe5e2f59d16b28f578292d717fe3548b42257a0cb8f318fcf
3
  size 335922386
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cb57304bbdb393557583e9ea391f4cfe629785a56d8b7f467320faeb6a4decb
3
  size 335922386
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5607b27ad36e8b8b6a123386b38e0716be53aa03eb9581fdcae093ba333e0d68
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b63c8755f60283d61d8f5e9b05b9fc3d8c48b7abe96f514359bc54f46be9cceb
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d605401690d7669ff16aeaca6820cbd8d0d605afe748c51045ce90888810a22
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:470670603e8fdc5330cdb9a9152c4fd9c3d8c5a74dd26bffbbb0d869d097eafa
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.05383580080753701,
5
  "eval_steps": 4,
6
- "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -195,6 +195,42 @@
195
  "eval_samples_per_second": 7.667,
196
  "eval_steps_per_second": 7.667,
197
  "step": 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
  }
199
  ],
200
  "logging_steps": 1,
@@ -214,7 +250,7 @@
214
  "attributes": {}
215
  }
216
  },
217
- "total_flos": 8068024208719872.0,
218
  "train_batch_size": 1,
219
  "trial_name": null,
220
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.06460296096904442,
5
  "eval_steps": 4,
6
+ "global_step": 24,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
195
  "eval_samples_per_second": 7.667,
196
  "eval_steps_per_second": 7.667,
197
  "step": 20
198
+ },
199
+ {
200
+ "epoch": 0.05652759084791386,
201
+ "grad_norm": 1.8083579540252686,
202
+ "learning_rate": 0.00016494480483301836,
203
+ "loss": 0.3447,
204
+ "step": 21
205
+ },
206
+ {
207
+ "epoch": 0.059219380888290714,
208
+ "grad_norm": 1.2060580253601074,
209
+ "learning_rate": 0.00015877852522924732,
210
+ "loss": 0.1974,
211
+ "step": 22
212
+ },
213
+ {
214
+ "epoch": 0.06191117092866756,
215
+ "grad_norm": 0.7741371989250183,
216
+ "learning_rate": 0.0001522498564715949,
217
+ "loss": 0.1914,
218
+ "step": 23
219
+ },
220
+ {
221
+ "epoch": 0.06460296096904442,
222
+ "grad_norm": 1.2901431322097778,
223
+ "learning_rate": 0.00014539904997395468,
224
+ "loss": 0.2147,
225
+ "step": 24
226
+ },
227
+ {
228
+ "epoch": 0.06460296096904442,
229
+ "eval_loss": 0.28900590538978577,
230
+ "eval_runtime": 10.1131,
231
+ "eval_samples_per_second": 7.812,
232
+ "eval_steps_per_second": 7.812,
233
+ "step": 24
234
  }
235
  ],
236
  "logging_steps": 1,
 
250
  "attributes": {}
251
  }
252
  },
253
+ "total_flos": 9830006507175936.0,
254
  "train_batch_size": 1,
255
  "trial_name": null,
256
  "trial_params": null