Femboyuwu2000 commited on
Commit
00d7a83
1 Parent(s): 6411802

Training in progress, step 3620, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:42d62c631ca13cfab3a8d4e18d1338c8812ac7251234fda355f962f76c871066
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62bf22c6eeae733bf8b6cecf81b15e2b37848eb3b634eababf70fb6e6cccfec6
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e5a7aeac9850dfd4db801131fdbab98330fcd65542d55a2e5dc82fa5741fbbe3
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:895f7436cc35982b7d3d069fb09940e4f049b1cd096fe6488c076caa50d97d4f
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dc3d6dc000d3ba51393607c0971658acfb92d6dc5b372d9d0679847e9b97f57f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fedadc5ef75b1128f404f29cb98838b62424119c20db612c5cd0c4e91792a88a
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ed823fd53aa11b151a56285d111464eb705a2816abfaf8a7d50c1ca9441a3e0d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d232e6df33e1c271d4fc7f5958443fe46136e9ed27fdf469d7b0e41f315c0be2
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.288,
5
  "eval_steps": 500,
6
- "global_step": 3600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1267,6 +1267,13 @@
1267
  "learning_rate": 2.8264432360950355e-05,
1268
  "loss": 3.5626,
1269
  "step": 3600
 
 
 
 
 
 
 
1270
  }
1271
  ],
1272
  "logging_steps": 20,
@@ -1274,7 +1281,7 @@
1274
  "num_input_tokens_seen": 0,
1275
  "num_train_epochs": 2,
1276
  "save_steps": 20,
1277
- "total_flos": 8519236213211136.0,
1278
  "train_batch_size": 8,
1279
  "trial_name": null,
1280
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.2896,
5
  "eval_steps": 500,
6
+ "global_step": 3620,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1267
  "learning_rate": 2.8264432360950355e-05,
1268
  "loss": 3.5626,
1269
  "step": 3600
1270
+ },
1271
+ {
1272
+ "epoch": 0.29,
1273
+ "grad_norm": 27.4322509765625,
1274
+ "learning_rate": 2.8241678862842374e-05,
1275
+ "loss": 3.4831,
1276
+ "step": 3620
1277
  }
1278
  ],
1279
  "logging_steps": 20,
 
1281
  "num_input_tokens_seen": 0,
1282
  "num_train_epochs": 2,
1283
  "save_steps": 20,
1284
+ "total_flos": 8562835271024640.0,
1285
  "train_batch_size": 8,
1286
  "trial_name": null,
1287
  "trial_params": null