RodrigoSalazar-U commited on
Commit
021f49d
1 Parent(s): 95444c4

Training in progress, step 7500, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f0f779b104df13d2ef3ff238b794639a37aa0d0cd2cee827abc5589fa06152b0
3
  size 4785762744
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5868ba587c3a824bc63fc899fdaeb6194618d48e020066ab2d114a4922121e1
3
  size 4785762744
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8923f264cdf21bc1f05bbec9aea66273c2a073b140852d3ef36be0f3c2033b55
3
  size 3497859804
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a7d4f1caf511e7340b15cbbb3b6dc53b6d076d036358b828129c1ff3e45f7d3
3
  size 3497859804
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:acc2d65074a2fcc8399dba6d3c0a62d0568496e8b5831e17319a7dcd95d56dc4
3
  size 14308
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56ada7a9076e894e38e63b2e547d8bfe564d09412403a53a9fd4f37e79358b8d
3
  size 14308
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:41d9009edd34a178308b9e0582a661bb8622860498c029764cf1c8060b2e3cee
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16a4fdf7915d781b2a36593733c87afb8e5ce6e3d04384acc5dc1d5c1959abad
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 3.8642009384487994,
5
  "eval_steps": 500,
6
- "global_step": 7000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -497,6 +497,41 @@
497
  "learning_rate": 1.4887421946651437e-05,
498
  "loss": 0.1058,
499
  "step": 7000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500
  }
501
  ],
502
  "logging_steps": 100,
@@ -516,7 +551,7 @@
516
  "attributes": {}
517
  }
518
  },
519
- "total_flos": 3.711467098590806e+18,
520
  "train_batch_size": 16,
521
  "trial_name": null,
522
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 4.140215291195142,
5
  "eval_steps": 500,
6
+ "global_step": 7500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
497
  "learning_rate": 1.4887421946651437e-05,
498
  "loss": 0.1058,
499
  "step": 7000
500
+ },
501
+ {
502
+ "epoch": 3.919403808998068,
503
+ "grad_norm": 0.451486200094223,
504
+ "learning_rate": 1.354154177409932e-05,
505
+ "loss": 0.1057,
506
+ "step": 7100
507
+ },
508
+ {
509
+ "epoch": 3.9746066795473363,
510
+ "grad_norm": 0.4829927384853363,
511
+ "learning_rate": 1.224984117967648e-05,
512
+ "loss": 0.1034,
513
+ "step": 7200
514
+ },
515
+ {
516
+ "epoch": 4.029809550096605,
517
+ "grad_norm": 0.29708752036094666,
518
+ "learning_rate": 1.1014239712279945e-05,
519
+ "loss": 0.0977,
520
+ "step": 7300
521
+ },
522
+ {
523
+ "epoch": 4.0850124206458736,
524
+ "grad_norm": 0.4131404459476471,
525
+ "learning_rate": 9.83657355395079e-06,
526
+ "loss": 0.09,
527
+ "step": 7400
528
+ },
529
+ {
530
+ "epoch": 4.140215291195142,
531
+ "grad_norm": 0.3377688229084015,
532
+ "learning_rate": 8.718592791191299e-06,
533
+ "loss": 0.0907,
534
+ "step": 7500
535
  }
536
  ],
537
  "logging_steps": 100,
 
551
  "attributes": {}
552
  }
553
  },
554
+ "total_flos": 3.9786029952435487e+18,
555
  "train_batch_size": 16,
556
  "trial_name": null,
557
  "trial_params": null