vdos commited on
Commit
84a17ce
1 Parent(s): 54b5e6b

Training in progress, step 47, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8919b7e782d1c2443ad66ca535b4a4af3e464654c2ca832d1f58cc6bf9aede84
3
  size 125048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3a466b9f1ccfb57b29855eb8f50e6de7c9e0e9e10c39928b8d478bd2533a028
3
  size 125048
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b014331bddf69a90069c01eca0c250f24c6225b57f51a04f7e7124c27d999e54
3
  size 259694
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:977af5186bcb5246476c6fbc1a6a5081bb3b83b1f244499888dcb58420fba1b1
3
  size 259694
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:319d6b8fda060d0294fd97158033c18d9c48822021690e34a7cd8af00ab53617
3
  size 14960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33227ebdc5d770ec0ff71c6f47b6abcdc96ce349fa2484b414d214f5542ba1e6
3
  size 14960
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26f8b3a1736e564a4b137739bad0216b115210f6249caaa3cc1297f0a2de1098
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91d2ac2b7eabf057fe3a35c0bbc4681bb9253f386e25e6e7cbf956cd50e43d02
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:970a46bb7467d976a43f0beb9e7c74dbeb8f4ff725d368f66e8bfebd5270fe4d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dfbe37ee33752ada5cb964d53b19a0358e420903b7ebbd188f18e5b9e51658c
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:07ffb34c2329cff24c81267ba92e305440f5541296091d810f80fdc7a8f09d8f
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c0449131b67708924f0d9f9675c17daaa11a086eb1e024cfcd64d74452c62ef
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:698599bc6a23f90bcfa6fe6477fe271a75dd69b61906d06e949f339ef005f13a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35151b95c3cc502d9e4fc6d9dc99d112a424ad3624e33428ecbe517e837a6fc0
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 11.071593284606934,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
- "epoch": 1.6,
5
  "eval_steps": 25,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -198,6 +198,160 @@
198
  "eval_samples_per_second": 1047.442,
199
  "eval_steps_per_second": 138.341,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
@@ -221,12 +375,12 @@
221
  "should_evaluate": false,
222
  "should_log": false,
223
  "should_save": true,
224
- "should_training_stop": false
225
  },
226
  "attributes": {}
227
  }
228
  },
229
- "total_flos": 2101346304000.0,
230
  "train_batch_size": 2,
231
  "trial_name": null,
232
  "trial_params": null
 
1
  {
2
  "best_metric": 11.071593284606934,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
+ "epoch": 3.008,
5
  "eval_steps": 25,
6
+ "global_step": 47,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
198
  "eval_samples_per_second": 1047.442,
199
  "eval_steps_per_second": 138.341,
200
  "step": 25
201
+ },
202
+ {
203
+ "epoch": 1.6640000000000001,
204
+ "grad_norm": 4.809144020080566,
205
+ "learning_rate": 4.477357683661734e-05,
206
+ "loss": 11.0729,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 1.728,
211
+ "grad_norm": 5.203744888305664,
212
+ "learning_rate": 4.131759111665349e-05,
213
+ "loss": 11.078,
214
+ "step": 27
215
+ },
216
+ {
217
+ "epoch": 1.792,
218
+ "grad_norm": 3.483856678009033,
219
+ "learning_rate": 3.790390522001662e-05,
220
+ "loss": 11.0775,
221
+ "step": 28
222
+ },
223
+ {
224
+ "epoch": 1.8559999999999999,
225
+ "grad_norm": 4.308492660522461,
226
+ "learning_rate": 3.4549150281252636e-05,
227
+ "loss": 11.0724,
228
+ "step": 29
229
+ },
230
+ {
231
+ "epoch": 1.92,
232
+ "grad_norm": 4.462175369262695,
233
+ "learning_rate": 3.12696703292044e-05,
234
+ "loss": 11.0692,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 1.984,
239
+ "grad_norm": 4.674779891967773,
240
+ "learning_rate": 2.8081442660546125e-05,
241
+ "loss": 11.0758,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 2.048,
246
+ "grad_norm": 4.3319244384765625,
247
+ "learning_rate": 2.500000000000001e-05,
248
+ "loss": 11.0778,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 2.112,
253
+ "grad_norm": 4.53694486618042,
254
+ "learning_rate": 2.2040354826462668e-05,
255
+ "loss": 11.078,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 2.176,
260
+ "grad_norm": 4.895163536071777,
261
+ "learning_rate": 1.9216926233717085e-05,
262
+ "loss": 11.0718,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 2.24,
267
+ "grad_norm": 6.260696887969971,
268
+ "learning_rate": 1.6543469682057106e-05,
269
+ "loss": 11.0733,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 2.304,
274
+ "grad_norm": 3.851774215698242,
275
+ "learning_rate": 1.4033009983067452e-05,
276
+ "loss": 11.0782,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 2.368,
281
+ "grad_norm": 4.45391845703125,
282
+ "learning_rate": 1.1697777844051105e-05,
283
+ "loss": 11.0703,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 2.432,
288
+ "grad_norm": 5.240525722503662,
289
+ "learning_rate": 9.549150281252633e-06,
290
+ "loss": 11.0769,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 2.496,
295
+ "grad_norm": 5.4221577644348145,
296
+ "learning_rate": 7.597595192178702e-06,
297
+ "loss": 11.0718,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 2.56,
302
+ "grad_norm": 4.008364200592041,
303
+ "learning_rate": 5.852620357053651e-06,
304
+ "loss": 11.0728,
305
+ "step": 40
306
+ },
307
+ {
308
+ "epoch": 2.624,
309
+ "grad_norm": 4.672859191894531,
310
+ "learning_rate": 4.322727117869951e-06,
311
+ "loss": 11.0769,
312
+ "step": 41
313
+ },
314
+ {
315
+ "epoch": 2.6879999999999997,
316
+ "grad_norm": 5.85573148727417,
317
+ "learning_rate": 3.0153689607045845e-06,
318
+ "loss": 11.0651,
319
+ "step": 42
320
+ },
321
+ {
322
+ "epoch": 2.752,
323
+ "grad_norm": 5.035050868988037,
324
+ "learning_rate": 1.9369152030840556e-06,
325
+ "loss": 11.0731,
326
+ "step": 43
327
+ },
328
+ {
329
+ "epoch": 2.816,
330
+ "grad_norm": 4.608683109283447,
331
+ "learning_rate": 1.0926199633097157e-06,
332
+ "loss": 11.0689,
333
+ "step": 44
334
+ },
335
+ {
336
+ "epoch": 2.88,
337
+ "grad_norm": 5.071901321411133,
338
+ "learning_rate": 4.865965629214819e-07,
339
+ "loss": 11.0771,
340
+ "step": 45
341
+ },
342
+ {
343
+ "epoch": 2.944,
344
+ "grad_norm": 6.193559169769287,
345
+ "learning_rate": 1.2179748700879012e-07,
346
+ "loss": 11.0724,
347
+ "step": 46
348
+ },
349
+ {
350
+ "epoch": 3.008,
351
+ "grad_norm": 3.6927578449249268,
352
+ "learning_rate": 0.0,
353
+ "loss": 11.0724,
354
+ "step": 47
355
  }
356
  ],
357
  "logging_steps": 1,
 
375
  "should_evaluate": false,
376
  "should_log": false,
377
  "should_save": true,
378
+ "should_training_stop": true
379
  },
380
  "attributes": {}
381
  }
382
  },
383
+ "total_flos": 3950531051520.0,
384
  "train_batch_size": 2,
385
  "trial_name": null,
386
  "trial_params": null