dimasik87 commited on
Commit
86ab861
1 Parent(s): 2436a7d

Training in progress, step 40, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:15e78fa49fb00271ad118e01dad1e6674dfce4334a69cbe9e3cf1f3e76ece4a4
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f23294f8b89cf6925e8d59776d5a66dd2d4c6da2ed4d25863a2dd3fe47b031c5
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6e12cf4a8339ad6e6ccc2e4a2b586e7d84e101d5a489f8144ee3b6d4413b9990
3
  size 335922386
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5904f1110d6059c1226c28dec7c4e85eeb3cc24651b9e84f3a948758f7ec1fe9
3
  size 335922386
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5daf23a304609686c8d20bb6fce1b79761fd95eb6abbd30d197b1a5200625adb
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce9f59fd5c03d0708261ef0e7af3053978f1a8a6c1d8fb67d92ba701dee6b3ec
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0aa0f75a1f3e346be25756b578158b09a68943f0b9f1cfe29f97939687f864ef
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c792918044964431737f4cb39f3769dbfd230048b1125ac69a6439eb6c8534b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.09690444145356662,
5
  "eval_steps": 4,
6
- "global_step": 36,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -339,6 +339,42 @@
339
  "eval_samples_per_second": 7.749,
340
  "eval_steps_per_second": 7.749,
341
  "step": 36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
342
  }
343
  ],
344
  "logging_steps": 1,
@@ -358,7 +394,7 @@
358
  "attributes": {}
359
  }
360
  },
361
- "total_flos": 1.4745009760763904e+16,
362
  "train_batch_size": 1,
363
  "trial_name": null,
364
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.10767160161507403,
5
  "eval_steps": 4,
6
+ "global_step": 40,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
339
  "eval_samples_per_second": 7.749,
340
  "eval_steps_per_second": 7.749,
341
  "step": 36
342
+ },
343
+ {
344
+ "epoch": 0.09959623149394348,
345
+ "grad_norm": 1.368626356124878,
346
+ "learning_rate": 4.7750143528405126e-05,
347
+ "loss": 0.4373,
348
+ "step": 37
349
+ },
350
+ {
351
+ "epoch": 0.10228802153432032,
352
+ "grad_norm": 0.7500585913658142,
353
+ "learning_rate": 4.12214747707527e-05,
354
+ "loss": 0.2919,
355
+ "step": 38
356
+ },
357
+ {
358
+ "epoch": 0.10497981157469717,
359
+ "grad_norm": 0.903423011302948,
360
+ "learning_rate": 3.5055195166981645e-05,
361
+ "loss": 0.3673,
362
+ "step": 39
363
+ },
364
+ {
365
+ "epoch": 0.10767160161507403,
366
+ "grad_norm": 0.7334548830986023,
367
+ "learning_rate": 2.9289321881345254e-05,
368
+ "loss": 0.2465,
369
+ "step": 40
370
+ },
371
+ {
372
+ "epoch": 0.10767160161507403,
373
+ "eval_loss": 0.27042919397354126,
374
+ "eval_runtime": 10.1181,
375
+ "eval_samples_per_second": 7.808,
376
+ "eval_steps_per_second": 7.808,
377
+ "step": 40
378
  }
379
  ],
380
  "logging_steps": 1,
 
394
  "attributes": {}
395
  }
396
  },
397
+ "total_flos": 1.62287843278848e+16,
398
  "train_batch_size": 1,
399
  "trial_name": null,
400
  "trial_params": null