dimasik87 commited on
Commit
194ff34
·
verified ·
1 Parent(s): 827f267

Training in progress, step 24, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8bf3b554a722161d949f66405978a8112bd762c52b942d8b436544e3dad153d9
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c465b0e3b15658575943591e823cbcdecd76aced8e5b9eb3b46fb7c3e5f89def
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9659418d3605e5b40babd95284fbf5ec8686f8df71e70dc60388ce063f42d584
3
  size 335922386
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38b8f98710483c8d3cd51b5ea223c460194d9ac7e6e5c31f0946ed657c6a258f
3
  size 335922386
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5f121b3f8e6b3ed310c94a49893eb97a3ee7cfc910bf43c67bb9322868d758fd
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49b94774303e58a13c6692326b1b2c8587483979f24ee2e9470fb045acc1a400
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d605401690d7669ff16aeaca6820cbd8d0d605afe748c51045ce90888810a22
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:470670603e8fdc5330cdb9a9152c4fd9c3d8c5a74dd26bffbbb0d869d097eafa
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.005988472191032263,
5
  "eval_steps": 4,
6
- "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -195,6 +195,42 @@
195
  "eval_samples_per_second": 8.467,
196
  "eval_steps_per_second": 8.467,
197
  "step": 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
  }
199
  ],
200
  "logging_steps": 1,
@@ -214,7 +250,7 @@
214
  "attributes": {}
215
  }
216
  },
217
- "total_flos": 7418872835604480.0,
218
  "train_batch_size": 1,
219
  "trial_name": null,
220
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.007186166629238716,
5
  "eval_steps": 4,
6
+ "global_step": 24,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
195
  "eval_samples_per_second": 8.467,
196
  "eval_steps_per_second": 8.467,
197
  "step": 20
198
+ },
199
+ {
200
+ "epoch": 0.006287895800583876,
201
+ "grad_norm": 18.60074806213379,
202
+ "learning_rate": 0.00016494480483301836,
203
+ "loss": 4.4333,
204
+ "step": 21
205
+ },
206
+ {
207
+ "epoch": 0.006587319410135489,
208
+ "grad_norm": 15.545724868774414,
209
+ "learning_rate": 0.00015877852522924732,
210
+ "loss": 3.6316,
211
+ "step": 22
212
+ },
213
+ {
214
+ "epoch": 0.0068867430196871025,
215
+ "grad_norm": 23.216941833496094,
216
+ "learning_rate": 0.0001522498564715949,
217
+ "loss": 3.2189,
218
+ "step": 23
219
+ },
220
+ {
221
+ "epoch": 0.007186166629238716,
222
+ "grad_norm": 16.716583251953125,
223
+ "learning_rate": 0.00014539904997395468,
224
+ "loss": 3.0378,
225
+ "step": 24
226
+ },
227
+ {
228
+ "epoch": 0.007186166629238716,
229
+ "eval_loss": 3.7071402072906494,
230
+ "eval_runtime": 83.2372,
231
+ "eval_samples_per_second": 8.458,
232
+ "eval_steps_per_second": 8.458,
233
+ "step": 24
234
  }
235
  ],
236
  "logging_steps": 1,
 
250
  "attributes": {}
251
  }
252
  },
253
+ "total_flos": 8902647402725376.0,
254
  "train_batch_size": 1,
255
  "trial_name": null,
256
  "trial_params": null