leixa commited on
Commit
1bee715
1 Parent(s): 62d3bc7

Training in progress, step 78, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b004125126c5ffcd7f5a52278582e0bbca469d3c94482cf7c834c9174f9bb268
3
  size 639691872
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0696594f83839f848a9fb64316cd8f0fa1678b6a6ebd76756d0417be0a738d2d
3
  size 639691872
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:58626b946af51b020c1453719a4944beb19c24f35d12db6a55983a848644b8a4
3
  size 325339796
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f433fdbdda0f9a302c42618afafd1488d146199d9b0e84b79c28cab70c4b869a
3
  size 325339796
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4d52029764bce6b207ece38b8ac6ef3a9c9a1677d93ff032b9d0c8abcf44917a
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0bfe203dcc9b3b030e44f5295d53d381de790a5365f0913e245991089bf10fb
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fb438ab9499d5178d553290d304fad6bd105cd4f59f8f03d9657a249b7bd5f14
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4d6e5f76805b36e4d76ee2a3b48ba3bedb1c2bda79be4f5c70f809dd0d57438
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.01800055386319579,
5
  "eval_steps": 13,
6
- "global_step": 65,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -202,6 +202,49 @@
202
  "eval_samples_per_second": 13.193,
203
  "eval_steps_per_second": 1.651,
204
  "step": 65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  }
206
  ],
207
  "logging_steps": 3,
@@ -221,7 +264,7 @@
221
  "attributes": {}
222
  }
223
  },
224
- "total_flos": 1.0477654222307328e+17,
225
  "train_batch_size": 8,
226
  "trial_name": null,
227
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.02160066463583495,
5
  "eval_steps": 13,
6
+ "global_step": 78,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
202
  "eval_samples_per_second": 13.193,
203
  "eval_steps_per_second": 1.651,
204
  "step": 65
205
+ },
206
+ {
207
+ "epoch": 0.01827748546109111,
208
+ "grad_norm": 26.79857063293457,
209
+ "learning_rate": 6.545084971874738e-05,
210
+ "loss": 1.5632,
211
+ "step": 66
212
+ },
213
+ {
214
+ "epoch": 0.01910828025477707,
215
+ "grad_norm": 1.9956955909729004,
216
+ "learning_rate": 6.22170203068947e-05,
217
+ "loss": 1.4358,
218
+ "step": 69
219
+ },
220
+ {
221
+ "epoch": 0.01993907504846303,
222
+ "grad_norm": 4.290536403656006,
223
+ "learning_rate": 5.8927844739931834e-05,
224
+ "loss": 1.3875,
225
+ "step": 72
226
+ },
227
+ {
228
+ "epoch": 0.02076986984214899,
229
+ "grad_norm": 2.6607825756073,
230
+ "learning_rate": 5.559822380516539e-05,
231
+ "loss": 1.5102,
232
+ "step": 75
233
+ },
234
+ {
235
+ "epoch": 0.02160066463583495,
236
+ "grad_norm": 3.075331687927246,
237
+ "learning_rate": 5.2243241517525754e-05,
238
+ "loss": 1.3868,
239
+ "step": 78
240
+ },
241
+ {
242
+ "epoch": 0.02160066463583495,
243
+ "eval_loss": 0.3655809164047241,
244
+ "eval_runtime": 461.183,
245
+ "eval_samples_per_second": 13.188,
246
+ "eval_steps_per_second": 1.65,
247
+ "step": 78
248
  }
249
  ],
250
  "logging_steps": 3,
 
264
  "attributes": {}
265
  }
266
  },
267
+ "total_flos": 1.2672972249838387e+17,
268
  "train_batch_size": 8,
269
  "trial_name": null,
270
  "trial_params": null