leixa commited on
Commit
9573c6e
1 Parent(s): cbe18dc

Training in progress, step 126, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:88a479e925ede0c3da3821d664013f8c545b67f4c0210547884d9612a19eef4f
3
  size 671149168
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5849698230477377bc43d99927ea0b750ff86b3da0cd6f06982ce816ba6dbcd
3
  size 671149168
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:27a7e3cd7b75a685cbd36c0f64b79a56218ed9625b7edaae67dc35cc2696380b
3
  size 341314196
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c427c14ecb51f7b09cfad05ca6b48fae3ca8b3a89561d714f3b547288cbe22e
3
  size 341314196
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab838fbe570b6d415c10780e3a0e3a3e25daa76549fd9a5954f6f1727ad17437
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4c2513b077ee0a9d0d20b1ed96188cfd8d5c30d65f28f3d6dee11f2dc6d8ca8
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2df224011d0e75c4a97901f6c1b2930bba4bc3a9aa7c877e6c91e796bec6013f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e00328545513b592b6bf311a002e087bb048e374276594cf37ee8782d28887fc
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.2736156351791531,
5
  "eval_steps": 42,
6
- "global_step": 84,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -227,6 +227,112 @@
227
  "eval_samples_per_second": 23.321,
228
  "eval_steps_per_second": 5.864,
229
  "step": 84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
  }
231
  ],
232
  "logging_steps": 3,
@@ -246,7 +352,7 @@
246
  "attributes": {}
247
  }
248
  },
249
- "total_flos": 6.335757030732595e+16,
250
  "train_batch_size": 4,
251
  "trial_name": null,
252
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.41042345276872966,
5
  "eval_steps": 42,
6
+ "global_step": 126,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
227
  "eval_samples_per_second": 23.321,
228
  "eval_steps_per_second": 5.864,
229
  "step": 84
230
+ },
231
+ {
232
+ "epoch": 0.28338762214983715,
233
+ "grad_norm": 2.819925308227539,
234
+ "learning_rate": 9.40297765928369e-05,
235
+ "loss": 1.5062,
236
+ "step": 87
237
+ },
238
+ {
239
+ "epoch": 0.2931596091205212,
240
+ "grad_norm": 2.894965410232544,
241
+ "learning_rate": 9.356593520616948e-05,
242
+ "loss": 1.5876,
243
+ "step": 90
244
+ },
245
+ {
246
+ "epoch": 0.30293159609120524,
247
+ "grad_norm": 2.7666544914245605,
248
+ "learning_rate": 9.308597683653975e-05,
249
+ "loss": 1.4585,
250
+ "step": 93
251
+ },
252
+ {
253
+ "epoch": 0.3127035830618892,
254
+ "grad_norm": 2.8919670581817627,
255
+ "learning_rate": 9.259007904196023e-05,
256
+ "loss": 1.5846,
257
+ "step": 96
258
+ },
259
+ {
260
+ "epoch": 0.32247557003257327,
261
+ "grad_norm": 2.660968542098999,
262
+ "learning_rate": 9.207842527714767e-05,
263
+ "loss": 1.5343,
264
+ "step": 99
265
+ },
266
+ {
267
+ "epoch": 0.3322475570032573,
268
+ "grad_norm": 2.652714967727661,
269
+ "learning_rate": 9.155120482565521e-05,
270
+ "loss": 1.591,
271
+ "step": 102
272
+ },
273
+ {
274
+ "epoch": 0.34201954397394135,
275
+ "grad_norm": 2.7949202060699463,
276
+ "learning_rate": 9.10086127298478e-05,
277
+ "loss": 1.4614,
278
+ "step": 105
279
+ },
280
+ {
281
+ "epoch": 0.3517915309446254,
282
+ "grad_norm": 2.527437686920166,
283
+ "learning_rate": 9.045084971874738e-05,
284
+ "loss": 1.5641,
285
+ "step": 108
286
+ },
287
+ {
288
+ "epoch": 0.36156351791530944,
289
+ "grad_norm": 2.352187395095825,
290
+ "learning_rate": 8.987812213377424e-05,
291
+ "loss": 1.4186,
292
+ "step": 111
293
+ },
294
+ {
295
+ "epoch": 0.3713355048859935,
296
+ "grad_norm": 2.577479124069214,
297
+ "learning_rate": 8.929064185241213e-05,
298
+ "loss": 1.4607,
299
+ "step": 114
300
+ },
301
+ {
302
+ "epoch": 0.3811074918566775,
303
+ "grad_norm": 2.7002623081207275,
304
+ "learning_rate": 8.868862620982534e-05,
305
+ "loss": 1.6182,
306
+ "step": 117
307
+ },
308
+ {
309
+ "epoch": 0.39087947882736157,
310
+ "grad_norm": 2.521603584289551,
311
+ "learning_rate": 8.807229791845673e-05,
312
+ "loss": 1.4524,
313
+ "step": 120
314
+ },
315
+ {
316
+ "epoch": 0.4006514657980456,
317
+ "grad_norm": 2.723996877670288,
318
+ "learning_rate": 8.744188498563641e-05,
319
+ "loss": 1.2303,
320
+ "step": 123
321
+ },
322
+ {
323
+ "epoch": 0.41042345276872966,
324
+ "grad_norm": 2.888556957244873,
325
+ "learning_rate": 8.679762062923175e-05,
326
+ "loss": 1.376,
327
+ "step": 126
328
+ },
329
+ {
330
+ "epoch": 0.41042345276872966,
331
+ "eval_loss": 1.5374845266342163,
332
+ "eval_runtime": 22.1691,
333
+ "eval_samples_per_second": 23.321,
334
+ "eval_steps_per_second": 5.864,
335
+ "step": 126
336
  }
337
  ],
338
  "logging_steps": 3,
 
352
  "attributes": {}
353
  }
354
  },
355
+ "total_flos": 9.503635546098893e+16,
356
  "train_batch_size": 4,
357
  "trial_name": null,
358
  "trial_params": null