fats-fme commited on
Commit
4978cc0
·
verified ·
1 Parent(s): feb87cf

Training in progress, step 45, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2f801add4c0d2e08fe2b55e70f53bea6aef21a176c0657a3e820c530d88b29ca
3
  size 90365754
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e4b409903b918c6df7d1cf23cdf0ef713e7f8f395ecf3d8b59468d0342b1e21
3
  size 90365754
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:72a607a89f29f579bb7e3b03d01051406f784e1383419ed5b710137e9d12493e
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dc4928675a55c735fd8b40848204aeb479d0a3c4cb847cd8b779245fffeab2c
3
  size 14512
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3ec16933bc3b62330056ce3e53e0d229a8971f7e3969b0b72440ffdd637e6216
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bec9259f8861eb32b9f9ceb9f92b307a160be2011844e232460a52b5467663d
3
  size 14512
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4a73e1ff9beffc13aa54f4adf4df9ed4ad8819cc503c53ddfd100ef74e91d520
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd1931e4016bf28b07346e79413c71e240f12f43909f7431de607b5c05407707
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5095541401273885,
5
  "eval_steps": 15,
6
- "global_step": 30,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -241,6 +241,119 @@
241
  "eval_samples_per_second": 23.727,
242
  "eval_steps_per_second": 5.932,
243
  "step": 30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
  }
245
  ],
246
  "logging_steps": 1,
@@ -260,7 +373,7 @@
260
  "attributes": {}
261
  }
262
  },
263
- "total_flos": 1.161267083476992e+16,
264
  "train_batch_size": 2,
265
  "trial_name": null,
266
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.7643312101910829,
5
  "eval_steps": 15,
6
+ "global_step": 45,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
241
  "eval_samples_per_second": 23.727,
242
  "eval_steps_per_second": 5.932,
243
  "step": 30
244
+ },
245
+ {
246
+ "epoch": 0.5265392781316348,
247
+ "grad_norm": NaN,
248
+ "learning_rate": 6.2e-05,
249
+ "loss": 0.0,
250
+ "step": 31
251
+ },
252
+ {
253
+ "epoch": 0.5435244161358811,
254
+ "grad_norm": NaN,
255
+ "learning_rate": 6.400000000000001e-05,
256
+ "loss": 0.0,
257
+ "step": 32
258
+ },
259
+ {
260
+ "epoch": 0.5605095541401274,
261
+ "grad_norm": NaN,
262
+ "learning_rate": 6.6e-05,
263
+ "loss": 0.0,
264
+ "step": 33
265
+ },
266
+ {
267
+ "epoch": 0.5774946921443737,
268
+ "grad_norm": NaN,
269
+ "learning_rate": 6.800000000000001e-05,
270
+ "loss": 0.0,
271
+ "step": 34
272
+ },
273
+ {
274
+ "epoch": 0.5944798301486199,
275
+ "grad_norm": NaN,
276
+ "learning_rate": 7e-05,
277
+ "loss": 0.0,
278
+ "step": 35
279
+ },
280
+ {
281
+ "epoch": 0.6114649681528662,
282
+ "grad_norm": NaN,
283
+ "learning_rate": 7.2e-05,
284
+ "loss": 0.0,
285
+ "step": 36
286
+ },
287
+ {
288
+ "epoch": 0.6284501061571125,
289
+ "grad_norm": NaN,
290
+ "learning_rate": 7.4e-05,
291
+ "loss": 0.0,
292
+ "step": 37
293
+ },
294
+ {
295
+ "epoch": 0.6454352441613588,
296
+ "grad_norm": NaN,
297
+ "learning_rate": 7.6e-05,
298
+ "loss": 0.0,
299
+ "step": 38
300
+ },
301
+ {
302
+ "epoch": 0.6624203821656051,
303
+ "grad_norm": NaN,
304
+ "learning_rate": 7.800000000000001e-05,
305
+ "loss": 0.0,
306
+ "step": 39
307
+ },
308
+ {
309
+ "epoch": 0.6794055201698513,
310
+ "grad_norm": NaN,
311
+ "learning_rate": 8e-05,
312
+ "loss": 0.0,
313
+ "step": 40
314
+ },
315
+ {
316
+ "epoch": 0.6963906581740976,
317
+ "grad_norm": NaN,
318
+ "learning_rate": 8.2e-05,
319
+ "loss": 0.0,
320
+ "step": 41
321
+ },
322
+ {
323
+ "epoch": 0.7133757961783439,
324
+ "grad_norm": NaN,
325
+ "learning_rate": 8.4e-05,
326
+ "loss": 0.0,
327
+ "step": 42
328
+ },
329
+ {
330
+ "epoch": 0.7303609341825902,
331
+ "grad_norm": NaN,
332
+ "learning_rate": 8.6e-05,
333
+ "loss": 0.0,
334
+ "step": 43
335
+ },
336
+ {
337
+ "epoch": 0.7473460721868365,
338
+ "grad_norm": NaN,
339
+ "learning_rate": 8.800000000000001e-05,
340
+ "loss": 0.0,
341
+ "step": 44
342
+ },
343
+ {
344
+ "epoch": 0.7643312101910829,
345
+ "grad_norm": NaN,
346
+ "learning_rate": 9e-05,
347
+ "loss": 0.0,
348
+ "step": 45
349
+ },
350
+ {
351
+ "epoch": 0.7643312101910829,
352
+ "eval_loss": NaN,
353
+ "eval_runtime": 4.2395,
354
+ "eval_samples_per_second": 23.588,
355
+ "eval_steps_per_second": 5.897,
356
+ "step": 45
357
  }
358
  ],
359
  "logging_steps": 1,
 
373
  "attributes": {}
374
  }
375
  },
376
+ "total_flos": 1.741900625215488e+16,
377
  "train_batch_size": 2,
378
  "trial_name": null,
379
  "trial_params": null