leixa commited on
Commit
b1735b0
·
verified ·
1 Parent(s): 46aab24

Training in progress, step 147, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e861928e1b2ca38a4a6746866d7d8f7b7ca001e137c5b880cdc7ca389cae56a0
3
  size 191968
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6151dcd4134499202adb9f850a26aa2c5af6fa3bec8e91e35b43aa56a097fe2c
3
  size 191968
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d926fce7eee99bf36a4de34f618c75332ae83e4fbcc18a94351c99ca4b7722e1
3
  size 253144
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ed8312b126cb5e54ec81601cd004752b3b5deebd4565b60b914c76669a94370
3
  size 253144
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b5043a9d527c42b332561475083f4469c87b7663281bbf8ae1e7c09b5adbc61a
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed026b8d344c5d0a011031e05610698192a36ffdb63930c6e505b4062d507b67
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f9350fe25e75c3aec8e0f08fabcc3ea69e0fa51f62eea810c9a733906e4363f8
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3a17ab167da966fed3c0386891062c971357d34456fa7825f3daecc1ee9f578
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.525679758308157,
5
  "eval_steps": 21,
6
- "global_step": 126,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -357,6 +357,63 @@
357
  "eval_samples_per_second": 527.718,
358
  "eval_steps_per_second": 67.849,
359
  "step": 126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
360
  }
361
  ],
362
  "logging_steps": 3,
@@ -376,7 +433,7 @@
376
  "attributes": {}
377
  }
378
  },
379
- "total_flos": 14055383236608.0,
380
  "train_batch_size": 8,
381
  "trial_name": null,
382
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.7794561933534743,
5
  "eval_steps": 21,
6
+ "global_step": 147,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
357
  "eval_samples_per_second": 527.718,
358
  "eval_steps_per_second": 67.849,
359
  "step": 126
360
+ },
361
+ {
362
+ "epoch": 1.5619335347432024,
363
+ "grad_norm": 0.2541723847389221,
364
+ "learning_rate": 5.032861611257783e-05,
365
+ "loss": 10.2813,
366
+ "step": 129
367
+ },
368
+ {
369
+ "epoch": 1.5981873111782479,
370
+ "grad_norm": 0.18722322583198547,
371
+ "learning_rate": 4.835720332151907e-05,
372
+ "loss": 10.0301,
373
+ "step": 132
374
+ },
375
+ {
376
+ "epoch": 1.634441087613293,
377
+ "grad_norm": 0.2005719244480133,
378
+ "learning_rate": 4.6388344842726264e-05,
379
+ "loss": 9.9704,
380
+ "step": 135
381
+ },
382
+ {
383
+ "epoch": 1.6706948640483383,
384
+ "grad_norm": 0.22246921062469482,
385
+ "learning_rate": 4.4425101967610674e-05,
386
+ "loss": 10.3317,
387
+ "step": 138
388
+ },
389
+ {
390
+ "epoch": 1.7069486404833838,
391
+ "grad_norm": 0.16641516983509064,
392
+ "learning_rate": 4.247052725612852e-05,
393
+ "loss": 10.1891,
394
+ "step": 141
395
+ },
396
+ {
397
+ "epoch": 1.743202416918429,
398
+ "grad_norm": 0.19296815991401672,
399
+ "learning_rate": 4.052765979048986e-05,
400
+ "loss": 10.3081,
401
+ "step": 144
402
+ },
403
+ {
404
+ "epoch": 1.7794561933534743,
405
+ "grad_norm": 0.30453190207481384,
406
+ "learning_rate": 3.859952044982329e-05,
407
+ "loss": 10.2634,
408
+ "step": 147
409
+ },
410
+ {
411
+ "epoch": 1.7794561933534743,
412
+ "eval_loss": 10.188948631286621,
413
+ "eval_runtime": 0.2641,
414
+ "eval_samples_per_second": 530.026,
415
+ "eval_steps_per_second": 68.146,
416
+ "step": 147
417
  }
418
  ],
419
  "logging_steps": 3,
 
433
  "attributes": {}
434
  }
435
  },
436
+ "total_flos": 16397947109376.0,
437
  "train_batch_size": 8,
438
  "trial_name": null,
439
  "trial_params": null