vdos commited on
Commit
bd7cef1
1 Parent(s): aa6ab4c

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f12fe7776df5d3c3505abba7b31e63bacd898d7aa1abf191f7fe6328fc83d0c5
3
  size 319876032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3720f048f0e0d5bb3af893066bfb1914d0c131628fb657807ad6b9376801e38
3
  size 319876032
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dbc7592851645f5ba6bb0b8128cd9247746d7c1dbe1da7d9f129df405146d994
3
  size 640009682
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eea8146b9a45a50d4677f46f81c71cf2760d4f610bc2f82c664d27de4c3719eb
3
  size 640009682
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:928213ab2e09763e116d1bfb478aad8f4139c963ed129d921a2501493e30efdd
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19ba59754b31e5e79014ce130754a8a3fdf9772677ff18499997328c0d9c7146
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:004d7c9b66ee0132acc47b6d5e9cb18d0a333840a025bac015ed1d6ed63c4eb2
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7de3ad2b13e4a7ba5f0a10459abaecc1f8b8fd8b60df9729954c4f5b657c917
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8dd4edda31bf9d44bcea92ce7553db96d7effb20ed51068a52ee0d69285b7d6f
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e36a52e22bbc9b8b07d332783d70b5d67be7c5d641e448758d33c9fb54f4a57d
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6656893e56520b745793e71047f05cb9279d9ef3478b134f3967730170f391bc
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d38ebc3e63761f676d20556f4357162d5180abd6204d3d2f4a79245e233103a
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f37b2aa490ccb1598b01e14cda36e9081f7ce646deab4d3c2d03de0d2169a755
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1df0528620c07325b8faa7567e59b0c1e86a1f1ee6af1245a69c6c0463fe4e2
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.13296709954738617,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
- "epoch": 0.33783783783783783,
5
  "eval_steps": 25,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -198,6 +198,189 @@
198
  "eval_samples_per_second": 34.094,
199
  "eval_steps_per_second": 4.304,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
@@ -221,12 +404,12 @@
221
  "should_evaluate": false,
222
  "should_log": false,
223
  "should_save": true,
224
- "should_training_stop": false
225
  },
226
  "attributes": {}
227
  }
228
  },
229
- "total_flos": 2.66009604784128e+17,
230
  "train_batch_size": 2,
231
  "trial_name": null,
232
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.10377126187086105,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 0.6756756756756757,
5
  "eval_steps": 25,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
198
  "eval_samples_per_second": 34.094,
199
  "eval_steps_per_second": 4.304,
200
  "step": 25
201
+ },
202
+ {
203
+ "epoch": 0.35135135135135137,
204
+ "grad_norm": 1.1895753145217896,
205
+ "learning_rate": 5e-05,
206
+ "loss": 0.1106,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 0.36486486486486486,
211
+ "grad_norm": 2.3610384464263916,
212
+ "learning_rate": 4.6729843538492847e-05,
213
+ "loss": 0.2791,
214
+ "step": 27
215
+ },
216
+ {
217
+ "epoch": 0.3783783783783784,
218
+ "grad_norm": 1.643008828163147,
219
+ "learning_rate": 4.347369038899744e-05,
220
+ "loss": 0.1546,
221
+ "step": 28
222
+ },
223
+ {
224
+ "epoch": 0.3918918918918919,
225
+ "grad_norm": 1.6177732944488525,
226
+ "learning_rate": 4.0245483899193595e-05,
227
+ "loss": 0.1978,
228
+ "step": 29
229
+ },
230
+ {
231
+ "epoch": 0.40540540540540543,
232
+ "grad_norm": 1.8108652830123901,
233
+ "learning_rate": 3.705904774487396e-05,
234
+ "loss": 0.2273,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 0.4189189189189189,
239
+ "grad_norm": 2.1313583850860596,
240
+ "learning_rate": 3.392802673484193e-05,
241
+ "loss": 0.1894,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.43243243243243246,
246
+ "grad_norm": 1.4777275323867798,
247
+ "learning_rate": 3.086582838174551e-05,
248
+ "loss": 0.232,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 0.44594594594594594,
253
+ "grad_norm": 1.6066914796829224,
254
+ "learning_rate": 2.7885565489049946e-05,
255
+ "loss": 0.2551,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 0.4594594594594595,
260
+ "grad_norm": 1.397235631942749,
261
+ "learning_rate": 2.500000000000001e-05,
262
+ "loss": 0.253,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 0.47297297297297297,
267
+ "grad_norm": 1.329254150390625,
268
+ "learning_rate": 2.2221488349019903e-05,
269
+ "loss": 0.2383,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 0.4864864864864865,
274
+ "grad_norm": 1.3167423009872437,
275
+ "learning_rate": 1.9561928549563968e-05,
276
+ "loss": 0.1335,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 0.5,
281
+ "grad_norm": 1.0337791442871094,
282
+ "learning_rate": 1.703270924499656e-05,
283
+ "loss": 0.1781,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 0.5135135135135135,
288
+ "grad_norm": 0.6120166778564453,
289
+ "learning_rate": 1.4644660940672627e-05,
290
+ "loss": 0.1018,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 0.527027027027027,
295
+ "grad_norm": 0.6781937479972839,
296
+ "learning_rate": 1.2408009626051137e-05,
297
+ "loss": 0.1015,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 0.5405405405405406,
302
+ "grad_norm": 0.6627649068832397,
303
+ "learning_rate": 1.0332332985438248e-05,
304
+ "loss": 0.0898,
305
+ "step": 40
306
+ },
307
+ {
308
+ "epoch": 0.5540540540540541,
309
+ "grad_norm": 0.9930323362350464,
310
+ "learning_rate": 8.426519384872733e-06,
311
+ "loss": 0.073,
312
+ "step": 41
313
+ },
314
+ {
315
+ "epoch": 0.5675675675675675,
316
+ "grad_norm": 0.8100405931472778,
317
+ "learning_rate": 6.698729810778065e-06,
318
+ "loss": 0.1591,
319
+ "step": 42
320
+ },
321
+ {
322
+ "epoch": 0.581081081081081,
323
+ "grad_norm": 0.8315913677215576,
324
+ "learning_rate": 5.156362923365588e-06,
325
+ "loss": 0.1893,
326
+ "step": 43
327
+ },
328
+ {
329
+ "epoch": 0.5945945945945946,
330
+ "grad_norm": 1.6323856115341187,
331
+ "learning_rate": 3.8060233744356633e-06,
332
+ "loss": 0.21,
333
+ "step": 44
334
+ },
335
+ {
336
+ "epoch": 0.6081081081081081,
337
+ "grad_norm": 1.213604211807251,
338
+ "learning_rate": 2.653493525244721e-06,
339
+ "loss": 0.1902,
340
+ "step": 45
341
+ },
342
+ {
343
+ "epoch": 0.6216216216216216,
344
+ "grad_norm": 1.0373070240020752,
345
+ "learning_rate": 1.70370868554659e-06,
346
+ "loss": 0.1713,
347
+ "step": 46
348
+ },
349
+ {
350
+ "epoch": 0.6351351351351351,
351
+ "grad_norm": 0.6015317440032959,
352
+ "learning_rate": 9.607359798384785e-07,
353
+ "loss": 0.1038,
354
+ "step": 47
355
+ },
356
+ {
357
+ "epoch": 0.6486486486486487,
358
+ "grad_norm": 1.1619991064071655,
359
+ "learning_rate": 4.277569313094809e-07,
360
+ "loss": 0.1853,
361
+ "step": 48
362
+ },
363
+ {
364
+ "epoch": 0.6621621621621622,
365
+ "grad_norm": 0.6225360035896301,
366
+ "learning_rate": 1.0705383806982606e-07,
367
+ "loss": 0.1095,
368
+ "step": 49
369
+ },
370
+ {
371
+ "epoch": 0.6756756756756757,
372
+ "grad_norm": 0.8265971541404724,
373
+ "learning_rate": 0.0,
374
+ "loss": 0.1351,
375
+ "step": 50
376
+ },
377
+ {
378
+ "epoch": 0.6756756756756757,
379
+ "eval_loss": 0.10377126187086105,
380
+ "eval_runtime": 14.6286,
381
+ "eval_samples_per_second": 34.111,
382
+ "eval_steps_per_second": 4.307,
383
+ "step": 50
384
  }
385
  ],
386
  "logging_steps": 1,
 
404
  "should_evaluate": false,
405
  "should_log": false,
406
  "should_save": true,
407
+ "should_training_stop": true
408
  },
409
  "attributes": {}
410
  }
411
  },
412
+ "total_flos": 5.32019209568256e+17,
413
  "train_batch_size": 2,
414
  "trial_name": null,
415
  "trial_params": null