vdos commited on
Commit
9dd0d02
1 Parent(s): fbac286

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1ecb59cd3dcc8e9b08abd098f0ce4842cf2cad3509e1f3c3c5ec645880279806
3
  size 432223744
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:305172a4233209b332785586535bf2c3d8aca8bc72d082903a8db84c77f208e6
3
  size 432223744
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6bceef687e00eb26ad6a325b5da3c7da1c4940b45e035cadfa9cae20209244e0
3
  size 864785974
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78214a0d1c775972f9a80c311a64590edceadfb735370dcdcd5755a2b473c2cb
3
  size 864785974
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c3d3b7a4a0b65f1ef2a79121dcc36f0f46eec4e9c1f11e716bee10a5e99ef92f
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ecdfeb4ea872cf0b22fba2a4a5b49827313d1db0a60ec289a0450ef8689f90b
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d621dd152a81ceefcc6d5bc8b659af40b059d8f66901d5617810de558ea00786
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72c9642b537a002260f157de61a35183ce8c31d54ef4a119842de582faf6851b
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a72dbdd06a160ef49790154c5ffb1bdabd6bc430d3425ac68abaddefc26f687a
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9623f940cc98fa5ce48c7c451ba6138228d9924f883248f3ad6342bbfa49453
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5ab0db3accd686bcb39bcef79ea6b326c5ddeba4f6221bc32e79fe54bd5ade06
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e71c64ff5dc134b563f75857c691dc2d3e69683266ad3b7566169a083142d621
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f37b2aa490ccb1598b01e14cda36e9081f7ce646deab4d3c2d03de0d2169a755
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1df0528620c07325b8faa7567e59b0c1e86a1f1ee6af1245a69c6c0463fe4e2
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.7876332402229309,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
- "epoch": 0.7326007326007326,
5
  "eval_steps": 25,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -198,6 +198,189 @@
198
  "eval_samples_per_second": 19.179,
199
  "eval_steps_per_second": 2.418,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
@@ -221,12 +404,12 @@
221
  "should_evaluate": false,
222
  "should_log": false,
223
  "should_save": true,
224
- "should_training_stop": false
225
  },
226
  "attributes": {}
227
  }
228
  },
229
- "total_flos": 3.315690861756416e+17,
230
  "train_batch_size": 2,
231
  "trial_name": null,
232
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.7660866975784302,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 1.4652014652014653,
5
  "eval_steps": 25,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
198
  "eval_samples_per_second": 19.179,
199
  "eval_steps_per_second": 2.418,
200
  "step": 25
201
+ },
202
+ {
203
+ "epoch": 0.7619047619047619,
204
+ "grad_norm": 0.5740500092506409,
205
+ "learning_rate": 5e-05,
206
+ "loss": 0.7998,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 0.7912087912087912,
211
+ "grad_norm": 1.062048316001892,
212
+ "learning_rate": 4.6729843538492847e-05,
213
+ "loss": 0.7508,
214
+ "step": 27
215
+ },
216
+ {
217
+ "epoch": 0.8205128205128205,
218
+ "grad_norm": 0.8686541318893433,
219
+ "learning_rate": 4.347369038899744e-05,
220
+ "loss": 0.7552,
221
+ "step": 28
222
+ },
223
+ {
224
+ "epoch": 0.8498168498168498,
225
+ "grad_norm": 0.5461609363555908,
226
+ "learning_rate": 4.0245483899193595e-05,
227
+ "loss": 0.7701,
228
+ "step": 29
229
+ },
230
+ {
231
+ "epoch": 0.8791208791208791,
232
+ "grad_norm": 0.20379406213760376,
233
+ "learning_rate": 3.705904774487396e-05,
234
+ "loss": 0.7829,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 0.9084249084249084,
239
+ "grad_norm": 0.48669975996017456,
240
+ "learning_rate": 3.392802673484193e-05,
241
+ "loss": 0.8228,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.9377289377289377,
246
+ "grad_norm": 0.4675156772136688,
247
+ "learning_rate": 3.086582838174551e-05,
248
+ "loss": 0.807,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 0.967032967032967,
253
+ "grad_norm": 1.1531270742416382,
254
+ "learning_rate": 2.7885565489049946e-05,
255
+ "loss": 0.885,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 0.9963369963369964,
260
+ "grad_norm": 0.7078146934509277,
261
+ "learning_rate": 2.500000000000001e-05,
262
+ "loss": 0.8386,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 1.0256410256410255,
267
+ "grad_norm": 1.082878828048706,
268
+ "learning_rate": 2.2221488349019903e-05,
269
+ "loss": 1.441,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 1.054945054945055,
274
+ "grad_norm": 0.5584947466850281,
275
+ "learning_rate": 1.9561928549563968e-05,
276
+ "loss": 0.7131,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 1.0842490842490842,
281
+ "grad_norm": 0.623293399810791,
282
+ "learning_rate": 1.703270924499656e-05,
283
+ "loss": 0.7405,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 1.1135531135531136,
288
+ "grad_norm": 0.5018717646598816,
289
+ "learning_rate": 1.4644660940672627e-05,
290
+ "loss": 0.7195,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 1.1428571428571428,
295
+ "grad_norm": 0.893699586391449,
296
+ "learning_rate": 1.2408009626051137e-05,
297
+ "loss": 0.8149,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 1.1721611721611722,
302
+ "grad_norm": 0.6624011397361755,
303
+ "learning_rate": 1.0332332985438248e-05,
304
+ "loss": 0.81,
305
+ "step": 40
306
+ },
307
+ {
308
+ "epoch": 1.2014652014652014,
309
+ "grad_norm": 0.39665865898132324,
310
+ "learning_rate": 8.426519384872733e-06,
311
+ "loss": 0.7523,
312
+ "step": 41
313
+ },
314
+ {
315
+ "epoch": 1.2307692307692308,
316
+ "grad_norm": 0.5487731695175171,
317
+ "learning_rate": 6.698729810778065e-06,
318
+ "loss": 0.8137,
319
+ "step": 42
320
+ },
321
+ {
322
+ "epoch": 1.26007326007326,
323
+ "grad_norm": 0.5722079873085022,
324
+ "learning_rate": 5.156362923365588e-06,
325
+ "loss": 0.7874,
326
+ "step": 43
327
+ },
328
+ {
329
+ "epoch": 1.2893772893772895,
330
+ "grad_norm": 0.6342135071754456,
331
+ "learning_rate": 3.8060233744356633e-06,
332
+ "loss": 0.7224,
333
+ "step": 44
334
+ },
335
+ {
336
+ "epoch": 1.3186813186813187,
337
+ "grad_norm": 0.5970407128334045,
338
+ "learning_rate": 2.653493525244721e-06,
339
+ "loss": 0.7422,
340
+ "step": 45
341
+ },
342
+ {
343
+ "epoch": 1.347985347985348,
344
+ "grad_norm": 0.600911557674408,
345
+ "learning_rate": 1.70370868554659e-06,
346
+ "loss": 0.7389,
347
+ "step": 46
348
+ },
349
+ {
350
+ "epoch": 1.3772893772893773,
351
+ "grad_norm": 0.5775716304779053,
352
+ "learning_rate": 9.607359798384785e-07,
353
+ "loss": 0.7585,
354
+ "step": 47
355
+ },
356
+ {
357
+ "epoch": 1.4065934065934065,
358
+ "grad_norm": 0.5375389456748962,
359
+ "learning_rate": 4.277569313094809e-07,
360
+ "loss": 0.7847,
361
+ "step": 48
362
+ },
363
+ {
364
+ "epoch": 1.435897435897436,
365
+ "grad_norm": 0.4871210753917694,
366
+ "learning_rate": 1.0705383806982606e-07,
367
+ "loss": 0.798,
368
+ "step": 49
369
+ },
370
+ {
371
+ "epoch": 1.4652014652014653,
372
+ "grad_norm": 0.7523501515388489,
373
+ "learning_rate": 0.0,
374
+ "loss": 0.7955,
375
+ "step": 50
376
+ },
377
+ {
378
+ "epoch": 1.4652014652014653,
379
+ "eval_loss": 0.7660866975784302,
380
+ "eval_runtime": 11.9906,
381
+ "eval_samples_per_second": 19.182,
382
+ "eval_steps_per_second": 2.419,
383
+ "step": 50
384
  }
385
  ],
386
  "logging_steps": 1,
 
404
  "should_evaluate": false,
405
  "should_log": false,
406
  "should_save": true,
407
+ "should_training_stop": true
408
  },
409
  "attributes": {}
410
  }
411
  },
412
+ "total_flos": 6.631381723512832e+17,
413
  "train_batch_size": 2,
414
  "trial_name": null,
415
  "trial_params": null