leixa commited on
Commit
0f68f7b
1 Parent(s): c4cbfce

Training in progress, step 117, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d15e4509d78b338702add67e1a0342d20cea1e62d70828144cdd82d7e225f61c
3
  size 639691872
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df9b95bc471cc3fea848c45abf3ec644b179a80655080267108d364c7410a07b
3
  size 639691872
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a134a8407784819add1857aa0a3a9ee8c24a75e636b885a04bed46b9e8d507c
3
  size 325339796
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bd86cd5aaff0df3defda957eee1d5e98476a0bed9dff47a67adde018538ba72
3
  size 325339796
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:11f555bc649def2d5f222c82b81fe7588d9710c829bc5246c9b7c7e93cf60c40
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9dfcfa634c3d602fbb32d8617cc1f5299a65b972221d7081152805a0ea9f6b3
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c89b61481eb2980a04190d12a0165f95c9fc0a4db20ad8cc3d895835eac42f68
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49a9f08177ba433a82ec0fa599b737a453ff55e83dd9d31c6fddaf5bc4b0a0b3
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.028800886181113265,
5
  "eval_steps": 13,
6
- "global_step": 104,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -317,6 +317,49 @@
317
  "eval_samples_per_second": 13.199,
318
  "eval_steps_per_second": 1.651,
319
  "step": 104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
320
  }
321
  ],
322
  "logging_steps": 3,
@@ -336,7 +379,7 @@
336
  "attributes": {}
337
  }
338
  },
339
- "total_flos": 1.689729633311785e+17,
340
  "train_batch_size": 8,
341
  "trial_name": null,
342
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.032400996953752426,
5
  "eval_steps": 13,
6
+ "global_step": 117,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
317
  "eval_samples_per_second": 13.199,
318
  "eval_steps_per_second": 1.651,
319
  "step": 104
320
+ },
321
+ {
322
+ "epoch": 0.029077817779008586,
323
+ "grad_norm": 2.075512647628784,
324
+ "learning_rate": 2.3398396174233178e-05,
325
+ "loss": 1.4585,
326
+ "step": 105
327
+ },
328
+ {
329
+ "epoch": 0.029908612572694546,
330
+ "grad_norm": 2.6266424655914307,
331
+ "learning_rate": 2.061073738537635e-05,
332
+ "loss": 1.4344,
333
+ "step": 108
334
+ },
335
+ {
336
+ "epoch": 0.030739407366380506,
337
+ "grad_norm": 1.5184435844421387,
338
+ "learning_rate": 1.7956219300748793e-05,
339
+ "loss": 1.387,
340
+ "step": 111
341
+ },
342
+ {
343
+ "epoch": 0.031570202160066466,
344
+ "grad_norm": 2.3101930618286133,
345
+ "learning_rate": 1.544686755065677e-05,
346
+ "loss": 1.3772,
347
+ "step": 114
348
+ },
349
+ {
350
+ "epoch": 0.032400996953752426,
351
+ "grad_norm": 2.4926693439483643,
352
+ "learning_rate": 1.3094050125632972e-05,
353
+ "loss": 1.339,
354
+ "step": 117
355
+ },
356
+ {
357
+ "epoch": 0.032400996953752426,
358
+ "eval_loss": 0.3452422320842743,
359
+ "eval_runtime": 460.5584,
360
+ "eval_samples_per_second": 13.206,
361
+ "eval_steps_per_second": 1.652,
362
+ "step": 117
363
  }
364
  ],
365
  "logging_steps": 3,
 
379
  "attributes": {}
380
  }
381
  },
382
+ "total_flos": 1.9026089571935846e+17,
383
  "train_batch_size": 8,
384
  "trial_name": null,
385
  "trial_params": null