Femboyuwu2000 commited on
Commit
cf9ffc1
1 Parent(s): 9de744b

Training in progress, step 2780, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b7c4a44abf8634f7abd4ae2e7c69c9985e580503513a18830a2ecd874d0f563
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:654b23694cdd1bbec4f90862155f3f41c2a6bcac9229cb226a66d80cc205f4cc
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6523b809018c23a175a6391f7281cb9e9f51b92a78e512668164068571daab52
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2fdce7da28ed66b3d91cfa2a1cbe53544b2858c9aa4b494826630ef96e4b87c
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4d02bb0ca8829dd2c6092c231d8f86f0365111ccb21a1e6b4322c7eda5086789
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d27a260316eae40f12eb26cf979926c13542623ae9f28e7854a86a259b8f86ac
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d857a77763b85adde3e5e30fb04f1ce5b7dbf57906196936546a9f424267aa89
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6154d037b631b5f05d1997c63d1a8dc99cf7eb96d3d6cf27edbd3ad94f018941
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.2208,
5
  "eval_steps": 500,
6
- "global_step": 2760,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -973,6 +973,13 @@
973
  "learning_rate": 2.909169326286807e-05,
974
  "loss": 3.4967,
975
  "step": 2760
 
 
 
 
 
 
 
976
  }
977
  ],
978
  "logging_steps": 20,
@@ -980,7 +987,7 @@
980
  "num_input_tokens_seen": 0,
981
  "num_train_epochs": 2,
982
  "save_steps": 20,
983
- "total_flos": 6537004557631488.0,
984
  "train_batch_size": 8,
985
  "trial_name": null,
986
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.2224,
5
  "eval_steps": 500,
6
+ "global_step": 2780,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
973
  "learning_rate": 2.909169326286807e-05,
974
  "loss": 3.4967,
975
  "step": 2760
976
+ },
977
+ {
978
+ "epoch": 0.22,
979
+ "grad_norm": 41.78467559814453,
980
+ "learning_rate": 2.9074970738873054e-05,
981
+ "loss": 3.6409,
982
+ "step": 2780
983
  }
984
  ],
985
  "logging_steps": 20,
 
987
  "num_input_tokens_seen": 0,
988
  "num_train_epochs": 2,
989
  "save_steps": 20,
990
+ "total_flos": 6577585471488000.0,
991
  "train_batch_size": 8,
992
  "trial_name": null,
993
  "trial_params": null