Femboyuwu2000 commited on
Commit
ed6e52a
1 Parent(s): 412b1a9

Training in progress, step 6500, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a30a008dae072195fc28eccb26ab5bd9eb4cd09d5723d01bd5eef06c2f548cc
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98e08bf1a33a98f5ad95a313e7a7f611270dd303d602c0511a878d938534fca8
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f4c548ed51d455157bc66ff27bbdf7cc0ece0b25d6b6583885fd9166381cfa1f
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4631ab9889a8ec8ca19b9d63d135f464285d7b10f4a57df37d5602c15eb4157e
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab96dece0057048172e676099e7a2287bdd92408cbb9619002ab46488332801f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:327156111fc189e7e1e06944c3644c0b0e602db7096e3ae9ec3b005b4a71556b
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f067d06bd847c479f71bde77c89318403c2c2dbcdb7f1dd86724b30b9fac060c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:559bbd1bba34568007cb524880da5ab92a31da308e4db32d6963a8a220ceec7d
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.5184,
5
  "eval_steps": 500,
6
- "global_step": 6480,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2275,6 +2275,13 @@
2275
  "learning_rate": 2.3698455101245052e-05,
2276
  "loss": 3.5403,
2277
  "step": 6480
 
 
 
 
 
 
 
2278
  }
2279
  ],
2280
  "logging_steps": 20,
@@ -2282,7 +2289,7 @@
2282
  "num_input_tokens_seen": 0,
2283
  "num_train_epochs": 2,
2284
  "save_steps": 20,
2285
- "total_flos": 1.5298774882516992e+16,
2286
  "train_batch_size": 8,
2287
  "trial_name": null,
2288
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.52,
5
  "eval_steps": 500,
6
+ "global_step": 6500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2275
  "learning_rate": 2.3698455101245052e-05,
2276
  "loss": 3.5403,
2277
  "step": 6480
2278
+ },
2279
+ {
2280
+ "epoch": 0.52,
2281
+ "grad_norm": 28.261327743530273,
2282
+ "learning_rate": 2.3658830835585294e-05,
2283
+ "loss": 3.5093,
2284
+ "step": 6500
2285
  }
2286
  ],
2287
  "logging_steps": 20,
 
2289
  "num_input_tokens_seen": 0,
2290
  "num_train_epochs": 2,
2291
  "save_steps": 20,
2292
+ "total_flos": 1.5342734805368832e+16,
2293
  "train_batch_size": 8,
2294
  "trial_name": null,
2295
  "trial_params": null