Femboyuwu2000 commited on
Commit
779c4bf
1 Parent(s): 224589a

Training in progress, step 3900, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cde6f153e1e8b9a9e7c00323ae4d7db7386328f2d24b06facfc13a8ee245b3fe
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0391a43e44101ba1c8646a6094f6b65cec4318f22c74e9c2ae40b34185f7cc5d
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:124b7ab2647d2a34f2d76c3fe8f556de61ab2239e56a049ecc9fdd7887cdf35e
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09e8ffb13099b6ec8a1019bcdb942d10f9971ad077f0de806881fa51c83496d4
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9cbc3f7eb6570c006a685ab90bed6095506dc3296d40e802ec891bd2b2edc3fe
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81e66deaa7a6347bec6d2635166d3c92ba13107fc8c837f587e530f8f9f6413f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:073260aeda648df98a231950123c1b6238009c89906d5834aa58374e8953701f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17f09240eb3a2df53c686d502e0925c2a9bbb6edcd515f95977492c20f85fc70
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.3104,
5
  "eval_steps": 500,
6
- "global_step": 3880,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1365,6 +1365,13 @@
1365
  "learning_rate": 2.793333244167681e-05,
1366
  "loss": 3.5345,
1367
  "step": 3880
 
 
 
 
 
 
 
1368
  }
1369
  ],
1370
  "logging_steps": 20,
@@ -1372,7 +1379,7 @@
1372
  "num_input_tokens_seen": 0,
1373
  "num_train_epochs": 2,
1374
  "save_steps": 20,
1375
- "total_flos": 9159181149831168.0,
1376
  "train_batch_size": 8,
1377
  "trial_name": null,
1378
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.312,
5
  "eval_steps": 500,
6
+ "global_step": 3900,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1365
  "learning_rate": 2.793333244167681e-05,
1366
  "loss": 3.5345,
1367
  "step": 3880
1368
+ },
1369
+ {
1370
+ "epoch": 0.31,
1371
+ "grad_norm": 29.29547119140625,
1372
+ "learning_rate": 2.790865673705888e-05,
1373
+ "loss": 3.4588,
1374
+ "step": 3900
1375
  }
1376
  ],
1377
  "logging_steps": 20,
 
1379
  "num_input_tokens_seen": 0,
1380
  "num_train_epochs": 2,
1381
  "save_steps": 20,
1382
+ "total_flos": 9202648983994368.0,
1383
  "train_batch_size": 8,
1384
  "trial_name": null,
1385
  "trial_params": null