Femboyuwu2000 commited on
Commit
f546f4d
1 Parent(s): fb7a77c

Training in progress, step 2800, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:654b23694cdd1bbec4f90862155f3f41c2a6bcac9229cb226a66d80cc205f4cc
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a6d51afb5613c7e07b57dba34d6e18086edbcf93686c18e65caf50ce10b821e
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e2fdce7da28ed66b3d91cfa2a1cbe53544b2858c9aa4b494826630ef96e4b87c
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c78f94cf2f5d394f6ffbcbada32faaf7421d6e7e2210fc0b7aa3f9304f69fb1
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d27a260316eae40f12eb26cf979926c13542623ae9f28e7854a86a259b8f86ac
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e56e2dd61d2120005a26d903703b6fbaef4472282e0e02d16683e4bcff04ca50
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6154d037b631b5f05d1997c63d1a8dc99cf7eb96d3d6cf27edbd3ad94f018941
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bfb1808e171f97eba17f5beca7d0a41ebaaf65e4eb012e17bca2fb8e72cb499
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.2224,
5
  "eval_steps": 500,
6
- "global_step": 2780,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -980,6 +980,13 @@
980
  "learning_rate": 2.9074970738873054e-05,
981
  "loss": 3.6409,
982
  "step": 2780
 
 
 
 
 
 
 
983
  }
984
  ],
985
  "logging_steps": 20,
@@ -987,7 +994,7 @@
987
  "num_input_tokens_seen": 0,
988
  "num_train_epochs": 2,
989
  "save_steps": 20,
990
- "total_flos": 6577585471488000.0,
991
  "train_batch_size": 8,
992
  "trial_name": null,
993
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.224,
5
  "eval_steps": 500,
6
+ "global_step": 2800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
980
  "learning_rate": 2.9074970738873054e-05,
981
  "loss": 3.6409,
982
  "step": 2780
983
+ },
984
+ {
985
+ "epoch": 0.22,
986
+ "grad_norm": 27.53672218322754,
987
+ "learning_rate": 2.9058100575095156e-05,
988
+ "loss": 3.5903,
989
+ "step": 2800
990
  }
991
  ],
992
  "logging_steps": 20,
 
994
  "num_input_tokens_seen": 0,
995
  "num_train_epochs": 2,
996
  "save_steps": 20,
997
+ "total_flos": 6625810162974720.0,
998
  "train_batch_size": 8,
999
  "trial_name": null,
1000
  "trial_params": null