Femboyuwu2000 commited on
Commit
0db53fc
1 Parent(s): 9dba93d

Training in progress, step 420, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1826199ce99703a7d2d59648b49ee661301c1ea410965f9387eb99140722aea4
3
  size 13982248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c65255ddd80a79d081a462fb0e47c505c70cc4ec1f5c180d5893fc538c0cf46
3
  size 13982248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:21072423999cc222ac587607d166171f947ed78c90ac333ffbd138f589bf8a35
3
  size 7062522
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a0b0d2ee83c41e0a6310838f74db01ddc2200d37e671a4de13bb392a19faf7b
3
  size 7062522
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f7b286d49b95ae88580bf2c664acf642fad5bbdb5e1fe3eacfc43452c8836952
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:466b75100f91cee5a56826b8b389b4e82eaca68c0c9410222ac8faaf583279b1
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d6578a81728865849e42e6628772ec9d1e42fa52835b77cf7f6701c70eb964a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98c8022bf7a9a1164892708673761968b7e2c729d993619227f343f725c5c5ae
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.032,
5
  "eval_steps": 500,
6
- "global_step": 400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -147,6 +147,13 @@
147
  "learning_rate": 1.9999999999999998e-05,
148
  "loss": 3.9454,
149
  "step": 400
 
 
 
 
 
 
 
150
  }
151
  ],
152
  "logging_steps": 20,
@@ -154,7 +161,7 @@
154
  "num_input_tokens_seen": 0,
155
  "num_train_epochs": 2,
156
  "save_steps": 20,
157
- "total_flos": 961410073952256.0,
158
  "train_batch_size": 8,
159
  "trial_name": null,
160
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0336,
5
  "eval_steps": 500,
6
+ "global_step": 420,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
147
  "learning_rate": 1.9999999999999998e-05,
148
  "loss": 3.9454,
149
  "step": 400
150
+ },
151
+ {
152
+ "epoch": 0.03,
153
+ "grad_norm": 19.407127380371094,
154
+ "learning_rate": 2.1e-05,
155
+ "loss": 4.0119,
156
+ "step": 420
157
  }
158
  ],
159
  "logging_steps": 20,
 
161
  "num_input_tokens_seen": 0,
162
  "num_train_epochs": 2,
163
  "save_steps": 20,
164
+ "total_flos": 1006518203744256.0,
165
  "train_batch_size": 8,
166
  "trial_name": null,
167
  "trial_params": null