ardaspear commited on
Commit
e3b3c86
·
verified ·
1 Parent(s): d739149

Training in progress, step 63, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b5bc3665eb969d55941f958da6a3d13c244b9eb8ee809445f4a65b52931f858c
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0421aa29105f02d352b3e31dec1837b8b4c6b078120ab76e42ec9cf8461c1663
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c52c5a3f256efcce06dadccc2702edeef3814a5c812633b60970144030ebb435
3
  size 85723284
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46462f547f758d2c2c12bf486d24e294ac00c4663f542674bb46aec4385a2083
3
  size 85723284
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:87951bd1d28cfc789427636c05788fc21db0e7e40be21589b4278413c44c66bd
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49644ad8f5452e53758ca52013eb3650a3941bcec6083d72c46c935a677f7d88
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec430a8fba90f7f39f74e916eb32712c363a0fd20bb4904251fce0eb82f2b9cf
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:922634a168fad3088c2a461ec82359f2941891b1472f492b835996e27c3cba9d
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.004886214541012532,
5
  "eval_steps": 9,
6
- "global_step": 54,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -189,6 +189,35 @@
189
  "eval_samples_per_second": 12.932,
190
  "eval_steps_per_second": 1.617,
191
  "step": 54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
  }
193
  ],
194
  "logging_steps": 3,
@@ -208,7 +237,7 @@
208
  "attributes": {}
209
  }
210
  },
211
- "total_flos": 7.798615858741248e+16,
212
  "train_batch_size": 8,
213
  "trial_name": null,
214
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.005700583631181287,
5
  "eval_steps": 9,
6
+ "global_step": 63,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
189
  "eval_samples_per_second": 12.932,
190
  "eval_steps_per_second": 1.617,
191
  "step": 54
192
+ },
193
+ {
194
+ "epoch": 0.005157670904402117,
195
+ "grad_norm": 1.517822504043579,
196
+ "learning_rate": 2.3256088156396868e-05,
197
+ "loss": 0.5922,
198
+ "step": 57
199
+ },
200
+ {
201
+ "epoch": 0.005429127267791703,
202
+ "grad_norm": 1.7860591411590576,
203
+ "learning_rate": 2.0658795558326743e-05,
204
+ "loss": 0.437,
205
+ "step": 60
206
+ },
207
+ {
208
+ "epoch": 0.005700583631181287,
209
+ "grad_norm": 4.32674503326416,
210
+ "learning_rate": 1.8109066104575023e-05,
211
+ "loss": 0.5061,
212
+ "step": 63
213
+ },
214
+ {
215
+ "epoch": 0.005700583631181287,
216
+ "eval_loss": 0.4976564645767212,
217
+ "eval_runtime": 1439.5584,
218
+ "eval_samples_per_second": 12.93,
219
+ "eval_steps_per_second": 1.616,
220
+ "step": 63
221
  }
222
  ],
223
  "logging_steps": 3,
 
237
  "attributes": {}
238
  }
239
  },
240
+ "total_flos": 9.098385168531456e+16,
241
  "train_batch_size": 8,
242
  "trial_name": null,
243
  "trial_params": null