leixa commited on
Commit
86437c9
·
verified ·
1 Parent(s): 9fab689

Training in progress, step 78, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:146a208d3aa4db0b6797a0b23846578e21f54342fc2a9b6f797541c735af524f
3
  size 150486964
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d34e1b1858956956b2a4bc7d70576542147952fd58b8e3f1c7df21e7eecd1f0
3
  size 150486964
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:559fbbc77ee7bda4b9cb7dfb44f1f251410a918168a2aea24cbfbb3cb8bec867
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ffb5552c62229403427efd349092fc35e09bedff55d8fb73712fa390452f3c7
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fb438ab9499d5178d553290d304fad6bd105cd4f59f8f03d9657a249b7bd5f14
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4d6e5f76805b36e4d76ee2a3b48ba3bedb1c2bda79be4f5c70f809dd0d57438
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.6632653061224489,
5
  "eval_steps": 13,
6
- "global_step": 65,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -202,6 +202,49 @@
202
  "eval_samples_per_second": 36.476,
203
  "eval_steps_per_second": 4.642,
204
  "step": 65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  }
206
  ],
207
  "logging_steps": 3,
@@ -221,7 +264,7 @@
221
  "attributes": {}
222
  }
223
  },
224
- "total_flos": 1.76894074945536e+16,
225
  "train_batch_size": 8,
226
  "trial_name": null,
227
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.7959183673469388,
5
  "eval_steps": 13,
6
+ "global_step": 78,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
202
  "eval_samples_per_second": 36.476,
203
  "eval_steps_per_second": 4.642,
204
  "step": 65
205
+ },
206
+ {
207
+ "epoch": 0.673469387755102,
208
+ "grad_norm": NaN,
209
+ "learning_rate": 6.545084971874738e-05,
210
+ "loss": 0.0,
211
+ "step": 66
212
+ },
213
+ {
214
+ "epoch": 0.7040816326530612,
215
+ "grad_norm": NaN,
216
+ "learning_rate": 6.22170203068947e-05,
217
+ "loss": 0.0,
218
+ "step": 69
219
+ },
220
+ {
221
+ "epoch": 0.7346938775510204,
222
+ "grad_norm": NaN,
223
+ "learning_rate": 5.8927844739931834e-05,
224
+ "loss": 0.0,
225
+ "step": 72
226
+ },
227
+ {
228
+ "epoch": 0.7653061224489796,
229
+ "grad_norm": NaN,
230
+ "learning_rate": 5.559822380516539e-05,
231
+ "loss": 0.0,
232
+ "step": 75
233
+ },
234
+ {
235
+ "epoch": 0.7959183673469388,
236
+ "grad_norm": NaN,
237
+ "learning_rate": 5.2243241517525754e-05,
238
+ "loss": 0.0,
239
+ "step": 78
240
+ },
241
+ {
242
+ "epoch": 0.7959183673469388,
243
+ "eval_loss": NaN,
244
+ "eval_runtime": 4.5322,
245
+ "eval_samples_per_second": 36.406,
246
+ "eval_steps_per_second": 4.634,
247
+ "step": 78
248
  }
249
  ],
250
  "logging_steps": 3,
 
264
  "attributes": {}
265
  }
266
  },
267
+ "total_flos": 2.122728899346432e+16,
268
  "train_batch_size": 8,
269
  "trial_name": null,
270
  "trial_params": null