Femboyuwu2000 commited on
Commit
edbc89f
1 Parent(s): 7c6c8b6

Training in progress, step 3460, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:88bd2b571093acbc27fcd8536450a3527fc0193315a3c3ce86c40e2966b6388d
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:406424f725b9028141f772e089cad085915f5e7009a4360642947df9e09aef62
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e87e2cffd6499ab28af59d0b4ee7a16fdb3892b678d0ef0cb0afe39a44a7524
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22136554903baf5dc10cdaf8959e7db746437ffba579d74c15cecb3532f4bd87
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ee292670562564a7bb33c2459a592a62918de5efa5b5e8582d3b7a88297d572c
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9773ea3fb71f0f0969063d9b7bfcc76343c6e53cf42371dca5deed02dc682e43
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c459e5a971be0d13a40d59c3e5859b24b3694b53687b127d583761b575636ed7
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fced894a4b71f5250dfe4d1c7fd9bf448653b2cdd683ed6e3b7c3ba78979133a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.2752,
5
  "eval_steps": 500,
6
- "global_step": 3440,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1211,6 +1211,13 @@
1211
  "learning_rate": 2.8441431650084018e-05,
1212
  "loss": 3.4839,
1213
  "step": 3440
 
 
 
 
 
 
 
1214
  }
1215
  ],
1216
  "logging_steps": 20,
@@ -1218,7 +1225,7 @@
1218
  "num_input_tokens_seen": 0,
1219
  "num_train_epochs": 2,
1220
  "save_steps": 20,
1221
- "total_flos": 8161651766132736.0,
1222
  "train_batch_size": 8,
1223
  "trial_name": null,
1224
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.2768,
5
  "eval_steps": 500,
6
+ "global_step": 3460,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1211
  "learning_rate": 2.8441431650084018e-05,
1212
  "loss": 3.4839,
1213
  "step": 3440
1214
+ },
1215
+ {
1216
+ "epoch": 0.28,
1217
+ "grad_norm": 46.23302459716797,
1218
+ "learning_rate": 2.841979781287424e-05,
1219
+ "loss": 3.5411,
1220
+ "step": 3460
1221
  }
1222
  ],
1223
  "logging_steps": 20,
 
1225
  "num_input_tokens_seen": 0,
1226
  "num_train_epochs": 2,
1227
  "save_steps": 20,
1228
+ "total_flos": 8203774557880320.0,
1229
  "train_batch_size": 8,
1230
  "trial_name": null,
1231
  "trial_params": null