leixa commited on
Commit
855d4d1
·
verified ·
1 Parent(s): ef4d404

Training in progress, step 105, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1a166fd1375d531101b7fe544fadf6ca75cb91e0259b33cdd526e64d4a2ffc1b
3
  size 1001465824
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:266cd94871718a986e4ce23c183790fc4b0de8cc9aa2104fde09808eb0885d75
3
  size 1001465824
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:daac49d5d7c56d84fb7f9ba7c9dee83610088e5c7a7ae5db279cfd5b1e0f0781
3
  size 509176980
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e33311b799b7918ecff9dc9f8b578379d48f6bda91cf68adc1a95d45266093b
3
  size 509176980
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2c595568d5a7bb7b9b57941fb29c461e3dc17e26eb6b784f3922a9394cd85bee
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83b302bd10368c09cb19ba400e1fa8ceca162eab977031b0bc94e411bbd47746
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:31c70338869ffec5aa6e537de1ea64302e45bfe61a54ec17491a7c787e89c12b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9042f02fbfca7c1dbbfe6d148e2a1de0ab7c9345d455fd2ba76f5d757c8ebcc0
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1.5627705627705628,
5
  "eval_steps": 15,
6
- "global_step": 90,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -273,6 +273,49 @@
273
  "eval_samples_per_second": 4.437,
274
  "eval_steps_per_second": 0.595,
275
  "step": 90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
  }
277
  ],
278
  "logging_steps": 3,
@@ -292,7 +335,7 @@
292
  "attributes": {}
293
  }
294
  },
295
- "total_flos": 4.636856020893696e+17,
296
  "train_batch_size": 8,
297
  "trial_name": null,
298
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.8225108225108224,
5
  "eval_steps": 15,
6
+ "global_step": 105,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
273
  "eval_samples_per_second": 4.437,
274
  "eval_steps_per_second": 0.595,
275
  "step": 90
276
+ },
277
+ {
278
+ "epoch": 1.6147186147186146,
279
+ "grad_norm": 0.9706316590309143,
280
+ "learning_rate": 4.855468326228638e-05,
281
+ "loss": 2.0843,
282
+ "step": 93
283
+ },
284
+ {
285
+ "epoch": 1.6666666666666665,
286
+ "grad_norm": 1.0962471961975098,
287
+ "learning_rate": 4.566888047586507e-05,
288
+ "loss": 2.0797,
289
+ "step": 96
290
+ },
291
+ {
292
+ "epoch": 1.7186147186147185,
293
+ "grad_norm": 0.9773306846618652,
294
+ "learning_rate": 4.27975536108268e-05,
295
+ "loss": 2.1115,
296
+ "step": 99
297
+ },
298
+ {
299
+ "epoch": 1.7705627705627704,
300
+ "grad_norm": 0.9265680909156799,
301
+ "learning_rate": 3.9950299516607766e-05,
302
+ "loss": 2.1697,
303
+ "step": 102
304
+ },
305
+ {
306
+ "epoch": 1.8225108225108224,
307
+ "grad_norm": 1.1050467491149902,
308
+ "learning_rate": 3.713663458410779e-05,
309
+ "loss": 2.157,
310
+ "step": 105
311
+ },
312
+ {
313
+ "epoch": 1.8225108225108224,
314
+ "eval_loss": 0.6672056317329407,
315
+ "eval_runtime": 21.8494,
316
+ "eval_samples_per_second": 4.439,
317
+ "eval_steps_per_second": 0.595,
318
+ "step": 105
319
  }
320
  ],
321
  "logging_steps": 3,
 
335
  "attributes": {}
336
  }
337
  },
338
+ "total_flos": 5.403225279902515e+17,
339
  "train_batch_size": 8,
340
  "trial_name": null,
341
  "trial_params": null