dimasik87 commited on
Commit
71e16e7
1 Parent(s): d21e365

Training in progress, step 48, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:384ed821673440e0c55d6d023a0358c53debfa6ec642c549b2ca0d3a82345d68
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f967a6e912caa29dde25ca3288c033c52ad0f53515a483f7c8fa809fc65a3760
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b3e5353dd622bb5d97a3055c8b897729d053cfa74bade3e5c0db01e28a8e2f16
3
  size 335922386
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92adf1fefefc8daad7c13159f188bf22733a5d0c335079f978687ad653fa5327
3
  size 335922386
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c027f738a4dfd52c6913eec1699a4b97c1baecedcd1dcbe2a0d7d12892180c4
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da87e4c065bb443c099cfa52ab3a5635819290010328feda7944776129428914
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7ed6431293c03d1e193e9501dc8ed4f4955bf079fba7f2c1c309a3efed166e50
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a946ba862c9290cf73cbdcd016309ecac1eb5b3695453bc8a42a8a259a1c695
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.013174638820270979,
5
  "eval_steps": 4,
6
- "global_step": 44,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -411,6 +411,42 @@
411
  "eval_samples_per_second": 8.45,
412
  "eval_steps_per_second": 8.45,
413
  "step": 44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
414
  }
415
  ],
416
  "logging_steps": 1,
@@ -430,7 +466,7 @@
430
  "attributes": {}
431
  }
432
  },
433
- "total_flos": 1.6321520238329856e+16,
434
  "train_batch_size": 1,
435
  "trial_name": null,
436
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.014372333258477431,
5
  "eval_steps": 4,
6
+ "global_step": 48,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
411
  "eval_samples_per_second": 8.45,
412
  "eval_steps_per_second": 8.45,
413
  "step": 44
414
+ },
415
+ {
416
+ "epoch": 0.013474062429822591,
417
+ "grad_norm": 34.80168151855469,
418
+ "learning_rate": 7.612046748871327e-06,
419
+ "loss": 4.7818,
420
+ "step": 45
421
+ },
422
+ {
423
+ "epoch": 0.013773486039374205,
424
+ "grad_norm": 25.594444274902344,
425
+ "learning_rate": 4.8943483704846475e-06,
426
+ "loss": 3.0296,
427
+ "step": 46
428
+ },
429
+ {
430
+ "epoch": 0.014072909648925817,
431
+ "grad_norm": 39.9898567199707,
432
+ "learning_rate": 2.7630079602323442e-06,
433
+ "loss": 1.9811,
434
+ "step": 47
435
+ },
436
+ {
437
+ "epoch": 0.014372333258477431,
438
+ "grad_norm": 58.41755294799805,
439
+ "learning_rate": 1.231165940486234e-06,
440
+ "loss": 2.8259,
441
+ "step": 48
442
+ },
443
+ {
444
+ "epoch": 0.014372333258477431,
445
+ "eval_loss": 3.2478675842285156,
446
+ "eval_runtime": 83.2404,
447
+ "eval_samples_per_second": 8.457,
448
+ "eval_steps_per_second": 8.457,
449
+ "step": 48
450
  }
451
  ],
452
  "logging_steps": 1,
 
466
  "attributes": {}
467
  }
468
  },
469
+ "total_flos": 1.7805294805450752e+16,
470
  "train_batch_size": 1,
471
  "trial_name": null,
472
  "trial_params": null