leixa commited on
Commit
d85dd93
1 Parent(s): 3785cbf

Training in progress, step 210, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ac95b09d8525db8031f8dbc1d8e3ed07d7e85a7df4228ef043cdaf3ee203236
3
  size 671149168
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd409d2cda44386875e2237cbc14387320bb7b2b55cde3c8ddd45a9c2bb03e9a
3
  size 671149168
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa4029985d53a5443c979e69946d3dbe088ec242f46eb7a6202b94cf53f44199
3
  size 341314196
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c8061481dc5236ca996649ca5cc63afee81ef7df75bdd47d238714a9ba6b4f2
3
  size 341314196
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb35f7933787785ef92acdac24ff917ef72043d42461a8f1e3ee05f375cdf20a
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46bf71438827f27fea3a358fd83e1761732ffba0ae573ca96fc80e490196bc32
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2f516a6e4e8a8eba956b80cb2ea416b9fd98f0dec12d7d9d9a36274d0eef4a63
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bd95a1db0e917ddf11b12a343f06e907fcec4b81104002e2471b4778587b465
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5472312703583062,
5
  "eval_steps": 42,
6
- "global_step": 168,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -439,6 +439,112 @@
439
  "eval_samples_per_second": 23.33,
440
  "eval_steps_per_second": 5.866,
441
  "step": 168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
442
  }
443
  ],
444
  "logging_steps": 3,
@@ -458,7 +564,7 @@
458
  "attributes": {}
459
  }
460
  },
461
- "total_flos": 1.267151406146519e+17,
462
  "train_batch_size": 4,
463
  "trial_name": null,
464
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.6840390879478827,
5
  "eval_steps": 42,
6
+ "global_step": 210,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
439
  "eval_samples_per_second": 23.33,
440
  "eval_steps_per_second": 5.866,
441
  "step": 168
442
+ },
443
+ {
444
+ "epoch": 0.5570032573289903,
445
+ "grad_norm": 2.361525297164917,
446
+ "learning_rate": 7.564496387029532e-05,
447
+ "loss": 1.3612,
448
+ "step": 171
449
+ },
450
+ {
451
+ "epoch": 0.5667752442996743,
452
+ "grad_norm": 2.969438076019287,
453
+ "learning_rate": 7.481469062821252e-05,
454
+ "loss": 1.3524,
455
+ "step": 174
456
+ },
457
+ {
458
+ "epoch": 0.5765472312703583,
459
+ "grad_norm": 2.654615879058838,
460
+ "learning_rate": 7.39752373250527e-05,
461
+ "loss": 1.3684,
462
+ "step": 177
463
+ },
464
+ {
465
+ "epoch": 0.5863192182410424,
466
+ "grad_norm": 2.500892162322998,
467
+ "learning_rate": 7.312691451204178e-05,
468
+ "loss": 1.2688,
469
+ "step": 180
470
+ },
471
+ {
472
+ "epoch": 0.5960912052117264,
473
+ "grad_norm": 2.7902750968933105,
474
+ "learning_rate": 7.227003602163295e-05,
475
+ "loss": 1.213,
476
+ "step": 183
477
+ },
478
+ {
479
+ "epoch": 0.6058631921824105,
480
+ "grad_norm": 2.477710008621216,
481
+ "learning_rate": 7.14049188514063e-05,
482
+ "loss": 1.4632,
483
+ "step": 186
484
+ },
485
+ {
486
+ "epoch": 0.6156351791530945,
487
+ "grad_norm": 2.8017044067382812,
488
+ "learning_rate": 7.05318830467969e-05,
489
+ "loss": 1.4655,
490
+ "step": 189
491
+ },
492
+ {
493
+ "epoch": 0.6254071661237784,
494
+ "grad_norm": 2.2910585403442383,
495
+ "learning_rate": 6.965125158269619e-05,
496
+ "loss": 1.3622,
497
+ "step": 192
498
+ },
499
+ {
500
+ "epoch": 0.6351791530944625,
501
+ "grad_norm": 2.3485262393951416,
502
+ "learning_rate": 6.876335024396872e-05,
503
+ "loss": 1.3191,
504
+ "step": 195
505
+ },
506
+ {
507
+ "epoch": 0.6449511400651465,
508
+ "grad_norm": 2.221445083618164,
509
+ "learning_rate": 6.786850750493006e-05,
510
+ "loss": 1.2862,
511
+ "step": 198
512
+ },
513
+ {
514
+ "epoch": 0.6547231270358306,
515
+ "grad_norm": 2.2322473526000977,
516
+ "learning_rate": 6.696705440782938e-05,
517
+ "loss": 1.2873,
518
+ "step": 201
519
+ },
520
+ {
521
+ "epoch": 0.6644951140065146,
522
+ "grad_norm": 2.134915828704834,
523
+ "learning_rate": 6.605932444038229e-05,
524
+ "loss": 1.2457,
525
+ "step": 204
526
+ },
527
+ {
528
+ "epoch": 0.6742671009771987,
529
+ "grad_norm": 2.2600533962249756,
530
+ "learning_rate": 6.514565341239861e-05,
531
+ "loss": 1.3544,
532
+ "step": 207
533
+ },
534
+ {
535
+ "epoch": 0.6840390879478827,
536
+ "grad_norm": 2.104802370071411,
537
+ "learning_rate": 6.422637933155162e-05,
538
+ "loss": 1.2662,
539
+ "step": 210
540
+ },
541
+ {
542
+ "epoch": 0.6840390879478827,
543
+ "eval_loss": 1.3924102783203125,
544
+ "eval_runtime": 22.1429,
545
+ "eval_samples_per_second": 23.348,
546
+ "eval_steps_per_second": 5.871,
547
+ "step": 210
548
  }
549
  ],
550
  "logging_steps": 3,
 
564
  "attributes": {}
565
  }
566
  },
567
+ "total_flos": 1.5839392576831488e+17,
568
  "train_batch_size": 4,
569
  "trial_name": null,
570
  "trial_params": null