dimasik87 commited on
Commit
69fd4d1
1 Parent(s): 4ec9a65

Training in progress, step 32, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7215f9f15c094cf3cdeb1e5537709a89bef70adc369dc292e35882f21cc46725
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:128525095c9c4aa2360441c5622c7962547ed143337bfac52c83e3c618dc4e84
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ed5fd588ee748ab7c105ed8409ef61fdb4fcfc08394d7fd879127026767aac6
3
  size 335922386
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfbc3e8f45768fc4da85d1bfec94316cf2bc55ecb2cfaf6f30ff35e6567ace72
3
  size 335922386
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6e18c671f1c508102bc524438e5e499b73e4276946e8f3dfc5062547fd2a74b0
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd8ec78ff15268c9da253ecb4b146058c0df0371aefad94e4b756060686ce26d
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff994fffd2fb6fe21545e6fbc55baa2a1474438a89b2d40605678f7de701427c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06c69db2db6de56f38ba12b474a491d20087e27dc2893a95d6ac7716476ca645
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.07537012113055182,
5
  "eval_steps": 4,
6
- "global_step": 28,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -267,6 +267,42 @@
267
  "eval_samples_per_second": 7.82,
268
  "eval_steps_per_second": 7.82,
269
  "step": 28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
270
  }
271
  ],
272
  "logging_steps": 1,
@@ -286,7 +322,7 @@
286
  "attributes": {}
287
  }
288
  },
289
- "total_flos": 1.1591988805632e+16,
290
  "train_batch_size": 1,
291
  "trial_name": null,
292
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.08613728129205922,
5
  "eval_steps": 4,
6
+ "global_step": 32,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
267
  "eval_samples_per_second": 7.82,
268
  "eval_steps_per_second": 7.82,
269
  "step": 28
270
+ },
271
+ {
272
+ "epoch": 0.07806191117092867,
273
+ "grad_norm": 0.8260064721107483,
274
+ "learning_rate": 0.0001078459095727845,
275
+ "loss": 0.2503,
276
+ "step": 29
277
+ },
278
+ {
279
+ "epoch": 0.08075370121130551,
280
+ "grad_norm": 1.2920253276824951,
281
+ "learning_rate": 0.0001,
282
+ "loss": 0.3839,
283
+ "step": 30
284
+ },
285
+ {
286
+ "epoch": 0.08344549125168237,
287
+ "grad_norm": 1.1818400621414185,
288
+ "learning_rate": 9.215409042721552e-05,
289
+ "loss": 0.2586,
290
+ "step": 31
291
+ },
292
+ {
293
+ "epoch": 0.08613728129205922,
294
+ "grad_norm": 1.3729400634765625,
295
+ "learning_rate": 8.435655349597689e-05,
296
+ "loss": 0.4248,
297
+ "step": 32
298
+ },
299
+ {
300
+ "epoch": 0.08613728129205922,
301
+ "eval_loss": 0.27306219935417175,
302
+ "eval_runtime": 10.1319,
303
+ "eval_samples_per_second": 7.797,
304
+ "eval_steps_per_second": 7.797,
305
+ "step": 32
306
  }
307
  ],
308
  "logging_steps": 1,
 
322
  "attributes": {}
323
  }
324
  },
325
+ "total_flos": 1.3168499283197952e+16,
326
  "train_batch_size": 1,
327
  "trial_name": null,
328
  "trial_params": null