dada22231 commited on
Commit
1012244
·
verified ·
1 Parent(s): 7ade3b3

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1882108af6d608e2676ecaa6664223ceb00dc7174189258500bfc93339e2e029
3
  size 237402
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:793dfc603f2c7a7c15dcdf4bb51da84a72125a8219b19ce1cbd254729d64737e
3
  size 237402
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:46030a6e9332c869aa2458095b8e3dd9ba63a49424c9fd44d7b74b1609df6982
3
  size 222294
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:856e7d4eb9bd346bf124dff87a8946f1671a604bfd7531d553ff39abb77add29
3
  size 222294
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:91636dec793429ee602fdaf9b9faa0afdc47095f72ae969f5c58810a97da0987
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c54bf62083df637d1530b0f4e42da7a5383f1440dcf59914ac070a0cc961306
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e50f70672872f3b8010a720ca817361faa4c57b49eed6c138b47d944908fb4fb
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45b3ae6d3c75fc9a883598570634d2d02e94adb288116ee96016d8e66593f33c
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f07cf1bebaf9d9405dc7edc08f6baf744cadc9272e707450483ddf1b8d071a67
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca4e985a3b2ce7e77c43587f7b4ad3d34862356bce8859e611d669cac953ad1a
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:25a4d2fc00d7f1440110e76ad6cee684c0489b4e4ff90c7bcb515e8b99f55976
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2338ab34fe89067e42575e3154cbe5a7bc3334783ac35e5c51d9e7df9a240fec
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f5c305e4a92be904895c02f0f0a1da666e6e7555f6043a8f089990c87f4ce88
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df19ed1a9610a5422497073697cbf4575f80de47fbb46ef0cdd2779386b031fa
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 6.91409969329834,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
- "epoch": 0.012340346763744061,
5
  "eval_steps": 25,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -198,6 +198,189 @@
198
  "eval_samples_per_second": 593.267,
199
  "eval_steps_per_second": 154.249,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
@@ -226,7 +409,7 @@
226
  "attributes": {}
227
  }
228
  },
229
- "total_flos": 2852978688000.0,
230
  "train_batch_size": 1,
231
  "trial_name": null,
232
  "trial_params": null
 
1
  {
2
+ "best_metric": 6.8887457847595215,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 0.024680693527488123,
5
  "eval_steps": 25,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
198
  "eval_samples_per_second": 593.267,
199
  "eval_steps_per_second": 154.249,
200
  "step": 25
201
+ },
202
+ {
203
+ "epoch": 0.012833960634293824,
204
+ "grad_norm": 0.15400898456573486,
205
+ "learning_rate": 8.681980515339464e-05,
206
+ "loss": 6.92,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 0.013327574504843585,
211
+ "grad_norm": 0.16461063921451569,
212
+ "learning_rate": 8.571489144483944e-05,
213
+ "loss": 6.9187,
214
+ "step": 27
215
+ },
216
+ {
217
+ "epoch": 0.013821188375393348,
218
+ "grad_norm": 0.15997587144374847,
219
+ "learning_rate": 8.457416554680877e-05,
220
+ "loss": 6.9185,
221
+ "step": 28
222
+ },
223
+ {
224
+ "epoch": 0.014314802245943111,
225
+ "grad_norm": 0.17501673102378845,
226
+ "learning_rate": 8.339895749467238e-05,
227
+ "loss": 6.915,
228
+ "step": 29
229
+ },
230
+ {
231
+ "epoch": 0.014808416116492874,
232
+ "grad_norm": 0.16417518258094788,
233
+ "learning_rate": 8.219063752844926e-05,
234
+ "loss": 6.9176,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 0.015302029987042635,
239
+ "grad_norm": 0.17748786509037018,
240
+ "learning_rate": 8.095061449516903e-05,
241
+ "loss": 6.9119,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.015795643857592398,
246
+ "grad_norm": 0.17625170946121216,
247
+ "learning_rate": 7.968033420621935e-05,
248
+ "loss": 6.9124,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 0.01628925772814216,
253
+ "grad_norm": 0.16539260745048523,
254
+ "learning_rate": 7.838127775159452e-05,
255
+ "loss": 6.914,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 0.016782871598691924,
260
+ "grad_norm": 0.18232646584510803,
261
+ "learning_rate": 7.705495977301078e-05,
262
+ "loss": 6.9073,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 0.017276485469241685,
267
+ "grad_norm": 0.17349576950073242,
268
+ "learning_rate": 7.570292669790186e-05,
269
+ "loss": 6.9101,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 0.01777009933979145,
274
+ "grad_norm": 0.1874023824930191,
275
+ "learning_rate": 7.43267549363537e-05,
276
+ "loss": 6.9062,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 0.01826371321034121,
281
+ "grad_norm": 0.17965620756149292,
282
+ "learning_rate": 7.292804904308087e-05,
283
+ "loss": 6.905,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 0.01875732708089097,
288
+ "grad_norm": 0.17918899655342102,
289
+ "learning_rate": 7.150843984658754e-05,
290
+ "loss": 6.9092,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 0.019250940951440736,
295
+ "grad_norm": 0.18893581628799438,
296
+ "learning_rate": 7.006958254769438e-05,
297
+ "loss": 6.9072,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 0.019744554821990497,
302
+ "grad_norm": 0.189700648188591,
303
+ "learning_rate": 6.861315478964841e-05,
304
+ "loss": 6.9048,
305
+ "step": 40
306
+ },
307
+ {
308
+ "epoch": 0.020238168692540262,
309
+ "grad_norm": 0.19713561236858368,
310
+ "learning_rate": 6.714085470206609e-05,
311
+ "loss": 6.9022,
312
+ "step": 41
313
+ },
314
+ {
315
+ "epoch": 0.020731782563090023,
316
+ "grad_norm": 0.18430104851722717,
317
+ "learning_rate": 6.56543989209901e-05,
318
+ "loss": 6.9024,
319
+ "step": 42
320
+ },
321
+ {
322
+ "epoch": 0.021225396433639784,
323
+ "grad_norm": 0.18701981008052826,
324
+ "learning_rate": 6.415552058736854e-05,
325
+ "loss": 6.903,
326
+ "step": 43
327
+ },
328
+ {
329
+ "epoch": 0.02171901030418955,
330
+ "grad_norm": 0.18464837968349457,
331
+ "learning_rate": 6.264596732629e-05,
332
+ "loss": 6.902,
333
+ "step": 44
334
+ },
335
+ {
336
+ "epoch": 0.02221262417473931,
337
+ "grad_norm": 0.18956860899925232,
338
+ "learning_rate": 6.112749920933111e-05,
339
+ "loss": 6.8958,
340
+ "step": 45
341
+ },
342
+ {
343
+ "epoch": 0.02270623804528907,
344
+ "grad_norm": 0.18333902955055237,
345
+ "learning_rate": 5.960188670239154e-05,
346
+ "loss": 6.9006,
347
+ "step": 46
348
+ },
349
+ {
350
+ "epoch": 0.023199851915838836,
351
+ "grad_norm": 0.17345166206359863,
352
+ "learning_rate": 5.80709086014102e-05,
353
+ "loss": 6.8991,
354
+ "step": 47
355
+ },
356
+ {
357
+ "epoch": 0.023693465786388597,
358
+ "grad_norm": 0.17997267842292786,
359
+ "learning_rate": 5.653634995836856e-05,
360
+ "loss": 6.9001,
361
+ "step": 48
362
+ },
363
+ {
364
+ "epoch": 0.02418707965693836,
365
+ "grad_norm": 0.18579334020614624,
366
+ "learning_rate": 5.500000000000001e-05,
367
+ "loss": 6.8974,
368
+ "step": 49
369
+ },
370
+ {
371
+ "epoch": 0.024680693527488123,
372
+ "grad_norm": 0.1840241253376007,
373
+ "learning_rate": 5.346365004163145e-05,
374
+ "loss": 6.8931,
375
+ "step": 50
376
+ },
377
+ {
378
+ "epoch": 0.024680693527488123,
379
+ "eval_loss": 6.8887457847595215,
380
+ "eval_runtime": 0.0853,
381
+ "eval_samples_per_second": 586.013,
382
+ "eval_steps_per_second": 152.363,
383
+ "step": 50
384
  }
385
  ],
386
  "logging_steps": 1,
 
409
  "attributes": {}
410
  }
411
  },
412
+ "total_flos": 5705957376000.0,
413
  "train_batch_size": 1,
414
  "trial_name": null,
415
  "trial_params": null