dimasik87 commited on
Commit
48ce416
1 Parent(s): 3b1c1e3

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c6d05a09290e08374ba471884c0ee6e1ac03bb5db58669cae1a410f49cb1fdd2
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f07f53588e51f38743190f04977460e90472f441f86f11d0316279112c49bd4
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:77b861e65db7e13b9bb0b6916a9106be085e299f76d798c6908b3c36ddf04aae
3
  size 168149074
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85882e52a84d3d037db3444bd0614a90f8a932e802ac241fb01e3c623663aaee
3
  size 168149074
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2e309f5ce24036f3e6dfbcef1917f6169844aee863fe7def3368b41adabbc042
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:125e06bc6886fe16739325d4827ddfdb2a79ab0b603a75168c11ddc9e965ab31
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c792918044964431737f4cb39f3769dbfd230048b1125ac69a6439eb6c8534b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e69e2b49ea642509f0c688c16fb190b7cf27dac0a18903a5e2d1467d0343d8b8
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0006775483758953696,
5
  "eval_steps": 5,
6
- "global_step": 40,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -359,6 +359,92 @@
359
  "eval_samples_per_second": 9.692,
360
  "eval_steps_per_second": 4.846,
361
  "step": 40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
362
  }
363
  ],
364
  "logging_steps": 1,
@@ -373,12 +459,12 @@
373
  "should_evaluate": false,
374
  "should_log": false,
375
  "should_save": true,
376
- "should_training_stop": false
377
  },
378
  "attributes": {}
379
  }
380
  },
381
- "total_flos": 5.608543315034112e+16,
382
  "train_batch_size": 2,
383
  "trial_name": null,
384
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.000846935469869212,
5
  "eval_steps": 5,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
359
  "eval_samples_per_second": 9.692,
360
  "eval_steps_per_second": 4.846,
361
  "step": 40
362
+ },
363
+ {
364
+ "epoch": 0.0006944870852927538,
365
+ "grad_norm": 17.339757919311523,
366
+ "learning_rate": 2.3959403439996907e-05,
367
+ "loss": 14.7313,
368
+ "step": 41
369
+ },
370
+ {
371
+ "epoch": 0.000711425794690138,
372
+ "grad_norm": 15.278409957885742,
373
+ "learning_rate": 1.9098300562505266e-05,
374
+ "loss": 14.637,
375
+ "step": 42
376
+ },
377
+ {
378
+ "epoch": 0.0007283645040875223,
379
+ "grad_norm": 15.347859382629395,
380
+ "learning_rate": 1.4735983564590783e-05,
381
+ "loss": 16.3984,
382
+ "step": 43
383
+ },
384
+ {
385
+ "epoch": 0.0007453032134849065,
386
+ "grad_norm": 15.566729545593262,
387
+ "learning_rate": 1.0899347581163221e-05,
388
+ "loss": 15.9272,
389
+ "step": 44
390
+ },
391
+ {
392
+ "epoch": 0.0007622419228822908,
393
+ "grad_norm": 15.312088012695312,
394
+ "learning_rate": 7.612046748871327e-06,
395
+ "loss": 14.5249,
396
+ "step": 45
397
+ },
398
+ {
399
+ "epoch": 0.0007622419228822908,
400
+ "eval_loss": 2.008061170578003,
401
+ "eval_runtime": 5125.393,
402
+ "eval_samples_per_second": 9.7,
403
+ "eval_steps_per_second": 4.85,
404
+ "step": 45
405
+ },
406
+ {
407
+ "epoch": 0.000779180632279675,
408
+ "grad_norm": 18.034000396728516,
409
+ "learning_rate": 4.8943483704846475e-06,
410
+ "loss": 16.604,
411
+ "step": 46
412
+ },
413
+ {
414
+ "epoch": 0.0007961193416770592,
415
+ "grad_norm": 25.57406997680664,
416
+ "learning_rate": 2.7630079602323442e-06,
417
+ "loss": 19.8734,
418
+ "step": 47
419
+ },
420
+ {
421
+ "epoch": 0.0008130580510744435,
422
+ "grad_norm": 15.575130462646484,
423
+ "learning_rate": 1.231165940486234e-06,
424
+ "loss": 15.871,
425
+ "step": 48
426
+ },
427
+ {
428
+ "epoch": 0.0008299967604718277,
429
+ "grad_norm": 21.707000732421875,
430
+ "learning_rate": 3.0826662668720364e-07,
431
+ "loss": 16.6486,
432
+ "step": 49
433
+ },
434
+ {
435
+ "epoch": 0.000846935469869212,
436
+ "grad_norm": 19.565614700317383,
437
+ "learning_rate": 0.0,
438
+ "loss": 14.055,
439
+ "step": 50
440
+ },
441
+ {
442
+ "epoch": 0.000846935469869212,
443
+ "eval_loss": 2.0067672729492188,
444
+ "eval_runtime": 5122.5703,
445
+ "eval_samples_per_second": 9.705,
446
+ "eval_steps_per_second": 4.853,
447
+ "step": 50
448
  }
449
  ],
450
  "logging_steps": 1,
 
459
  "should_evaluate": false,
460
  "should_log": false,
461
  "should_save": true,
462
+ "should_training_stop": true
463
  },
464
  "attributes": {}
465
  }
466
  },
467
+ "total_flos": 7.01067914379264e+16,
468
  "train_batch_size": 2,
469
  "trial_name": null,
470
  "trial_params": null