Nexspear commited on
Commit
6c95d7a
·
verified ·
1 Parent(s): 7cbae87

Training in progress, step 63, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:418cf71d40f31601a0fb7898371e7cbd777ab4dac5d7db6b21568f304cb32419
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7fd699c55557ed55070429a00f0683c6e4782303df524e0788ed06fef3cf8c0
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:330a3ce1fc5554bc4e11b0e93ff306fa93402f571796182bca7cabd783009e61
3
  size 85723284
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae78be996d1f5d59a47198d4c0588e6633e2a4e293e3706e977b2391ef5bd4ac
3
  size 85723284
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:87951bd1d28cfc789427636c05788fc21db0e7e40be21589b4278413c44c66bd
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49644ad8f5452e53758ca52013eb3650a3941bcec6083d72c46c935a677f7d88
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec430a8fba90f7f39f74e916eb32712c363a0fd20bb4904251fce0eb82f2b9cf
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:922634a168fad3088c2a461ec82359f2941891b1472f492b835996e27c3cba9d
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0045955491255691245,
5
  "eval_steps": 9,
6
- "global_step": 54,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -189,6 +189,35 @@
189
  "eval_samples_per_second": 13.845,
190
  "eval_steps_per_second": 1.731,
191
  "step": 54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
  }
193
  ],
194
  "logging_steps": 3,
@@ -208,7 +237,7 @@
208
  "attributes": {}
209
  }
210
  },
211
- "total_flos": 7.59380728306729e+16,
212
  "train_batch_size": 8,
213
  "trial_name": null,
214
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.005361473979830646,
5
  "eval_steps": 9,
6
+ "global_step": 63,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
189
  "eval_samples_per_second": 13.845,
190
  "eval_steps_per_second": 1.731,
191
  "step": 54
192
+ },
193
+ {
194
+ "epoch": 0.004850857410322965,
195
+ "grad_norm": 2.7015180587768555,
196
+ "learning_rate": 2.3256088156396868e-05,
197
+ "loss": 4.4799,
198
+ "step": 57
199
+ },
200
+ {
201
+ "epoch": 0.005106165695076805,
202
+ "grad_norm": 2.699486017227173,
203
+ "learning_rate": 2.0658795558326743e-05,
204
+ "loss": 4.4337,
205
+ "step": 60
206
+ },
207
+ {
208
+ "epoch": 0.005361473979830646,
209
+ "grad_norm": 2.7420105934143066,
210
+ "learning_rate": 1.8109066104575023e-05,
211
+ "loss": 4.4129,
212
+ "step": 63
213
+ },
214
+ {
215
+ "epoch": 0.005361473979830646,
216
+ "eval_loss": 1.1171752214431763,
217
+ "eval_runtime": 1430.265,
218
+ "eval_samples_per_second": 13.837,
219
+ "eval_steps_per_second": 1.73,
220
+ "step": 63
221
  }
222
  ],
223
  "logging_steps": 3,
 
237
  "attributes": {}
238
  }
239
  },
240
+ "total_flos": 8.859441830245171e+16,
241
  "train_batch_size": 8,
242
  "trial_name": null,
243
  "trial_params": null