dsakerkwq commited on
Commit
0f68ee7
1 Parent(s): 8190d5a

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a41e360a3f9a0f257b98c7648325387bb4020dd54dc51900d2cefe84a0222086
3
  size 432223744
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbfbef29435175efd23bfc37642586d740969cfd25ff74fb4cc959e07ecb1d2a
3
  size 432223744
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:caed606f85ab19b4e8558b9adac2606d5d3366fcee973cbba542f96558053b86
3
  size 864785974
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fb478cc2e004c1ed1f7c132a00ca1a364eac184825be56ad9130d5e7b2818fe
3
  size 864785974
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:72d041ac83f563cb84d466918d05b0736705ff9a650d41535e8aaa0c8bb48f1d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b2dbd8deb17cb1a4779249b931ef0d90c93ba7158b48e40f7f92fe04adf4964
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2a698e31fe920afc66628b9262859dcd8a507a4bec2ac734959d91e961117e09
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65e19fc3b88ea273d99a9b0f99b3f3ed651f4874a11cfa7c0a9e33f8e8cbdbdc
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1483aca8b157fb4873afaa6f0f77c7e2c462b06b46af771efd5efe4007afa4e0
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74d6c7d4fafe2186f063bf07be8bbf59a16a55c57458c0e5aca00c6a98961cde
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b2ffa4cd34c8ebb680343b0218aec67793563c31785a8c2ae7c354827246ea80
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c61c260d7148ac1b78bd4e31b931f4ca26b2744aea16185a2b56640637a3d4c7
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d271cdb95f63cd655315f063ca2e25c78dc5ae4275523c5d4f80f367586b3351
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5607f6de446164d9d9adb8b91c44cec55b14aa391e24ba5637c08b834eedda2a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.8159880042076111,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
- "epoch": 0.7036059806508356,
5
  "eval_steps": 25,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -198,6 +198,189 @@
198
  "eval_samples_per_second": 9.807,
199
  "eval_steps_per_second": 2.55,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
@@ -221,12 +404,12 @@
221
  "should_evaluate": false,
222
  "should_log": false,
223
  "should_save": true,
224
- "should_training_stop": false
225
  },
226
  "attributes": {}
227
  }
228
  },
229
- "total_flos": 3.315690861756416e+17,
230
  "train_batch_size": 1,
231
  "trial_name": null,
232
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.8043988943099976,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 1.4195250659630607,
5
  "eval_steps": 25,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
198
  "eval_samples_per_second": 9.807,
199
  "eval_steps_per_second": 2.55,
200
  "step": 25
201
+ },
202
+ {
203
+ "epoch": 0.731750219876869,
204
+ "grad_norm": 1.0224896669387817,
205
+ "learning_rate": 5.500000000000001e-05,
206
+ "loss": 0.809,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 0.7598944591029023,
211
+ "grad_norm": 0.7973669171333313,
212
+ "learning_rate": 5.205685918464356e-05,
213
+ "loss": 0.7901,
214
+ "step": 27
215
+ },
216
+ {
217
+ "epoch": 0.7880386983289358,
218
+ "grad_norm": 0.9329594969749451,
219
+ "learning_rate": 4.912632135009769e-05,
220
+ "loss": 0.746,
221
+ "step": 28
222
+ },
223
+ {
224
+ "epoch": 0.8161829375549692,
225
+ "grad_norm": 0.45507898926734924,
226
+ "learning_rate": 4.6220935509274235e-05,
227
+ "loss": 0.7476,
228
+ "step": 29
229
+ },
230
+ {
231
+ "epoch": 0.8443271767810027,
232
+ "grad_norm": 0.4434109926223755,
233
+ "learning_rate": 4.3353142970386564e-05,
234
+ "loss": 0.7383,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 0.872471416007036,
239
+ "grad_norm": 0.5660054087638855,
240
+ "learning_rate": 4.053522406135775e-05,
241
+ "loss": 0.8037,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.9006156552330695,
246
+ "grad_norm": 0.256831556558609,
247
+ "learning_rate": 3.777924554357096e-05,
248
+ "loss": 0.754,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 0.9287598944591029,
253
+ "grad_norm": 0.424334853887558,
254
+ "learning_rate": 3.509700894014496e-05,
255
+ "loss": 0.7718,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 0.9569041336851363,
260
+ "grad_norm": 0.6750025749206543,
261
+ "learning_rate": 3.250000000000001e-05,
262
+ "loss": 0.8117,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 0.9850483729111698,
267
+ "grad_norm": 0.5063544511795044,
268
+ "learning_rate": 2.9999339514117912e-05,
269
+ "loss": 0.7905,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 1.0255057167985928,
274
+ "grad_norm": 0.7037585377693176,
275
+ "learning_rate": 2.760573569460757e-05,
276
+ "loss": 1.3675,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 1.0536499560246262,
281
+ "grad_norm": 0.29729217290878296,
282
+ "learning_rate": 2.53294383204969e-05,
283
+ "loss": 0.6966,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 1.0817941952506596,
288
+ "grad_norm": 0.1905137151479721,
289
+ "learning_rate": 2.3180194846605367e-05,
290
+ "loss": 0.7452,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 1.109938434476693,
295
+ "grad_norm": 0.3099241852760315,
296
+ "learning_rate": 2.1167208663446025e-05,
297
+ "loss": 0.7634,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 1.1380826737027265,
302
+ "grad_norm": 0.35693028569221497,
303
+ "learning_rate": 1.9299099686894423e-05,
304
+ "loss": 0.7997,
305
+ "step": 40
306
+ },
307
+ {
308
+ "epoch": 1.16622691292876,
309
+ "grad_norm": 0.22667019069194794,
310
+ "learning_rate": 1.758386744638546e-05,
311
+ "loss": 0.7956,
312
+ "step": 41
313
+ },
314
+ {
315
+ "epoch": 1.1943711521547933,
316
+ "grad_norm": 0.3850700259208679,
317
+ "learning_rate": 1.602885682970026e-05,
318
+ "loss": 0.7755,
319
+ "step": 42
320
+ },
321
+ {
322
+ "epoch": 1.2225153913808267,
323
+ "grad_norm": 0.2817460596561432,
324
+ "learning_rate": 1.464072663102903e-05,
325
+ "loss": 0.8038,
326
+ "step": 43
327
+ },
328
+ {
329
+ "epoch": 1.2506596306068603,
330
+ "grad_norm": 0.30248430371284485,
331
+ "learning_rate": 1.3425421036992098e-05,
332
+ "loss": 0.8167,
333
+ "step": 44
334
+ },
335
+ {
336
+ "epoch": 1.2788038698328936,
337
+ "grad_norm": 0.28060272336006165,
338
+ "learning_rate": 1.2388144172720251e-05,
339
+ "loss": 0.6941,
340
+ "step": 45
341
+ },
342
+ {
343
+ "epoch": 1.306948109058927,
344
+ "grad_norm": 0.2564436197280884,
345
+ "learning_rate": 1.1533337816991932e-05,
346
+ "loss": 0.7092,
347
+ "step": 46
348
+ },
349
+ {
350
+ "epoch": 1.3350923482849604,
351
+ "grad_norm": 0.23669637739658356,
352
+ "learning_rate": 1.0864662381854632e-05,
353
+ "loss": 0.7473,
354
+ "step": 47
355
+ },
356
+ {
357
+ "epoch": 1.3632365875109937,
358
+ "grad_norm": 0.2533303499221802,
359
+ "learning_rate": 1.0384981238178534e-05,
360
+ "loss": 0.7641,
361
+ "step": 48
362
+ },
363
+ {
364
+ "epoch": 1.3913808267370273,
365
+ "grad_norm": 0.33936166763305664,
366
+ "learning_rate": 1.0096348454262845e-05,
367
+ "loss": 0.8011,
368
+ "step": 49
369
+ },
370
+ {
371
+ "epoch": 1.4195250659630607,
372
+ "grad_norm": 0.2788606882095337,
373
+ "learning_rate": 1e-05,
374
+ "loss": 0.7872,
375
+ "step": 50
376
+ },
377
+ {
378
+ "epoch": 1.4195250659630607,
379
+ "eval_loss": 0.8043988943099976,
380
+ "eval_runtime": 5.2242,
381
+ "eval_samples_per_second": 9.571,
382
+ "eval_steps_per_second": 2.488,
383
+ "step": 50
384
  }
385
  ],
386
  "logging_steps": 1,
 
404
  "should_evaluate": false,
405
  "should_log": false,
406
  "should_save": true,
407
+ "should_training_stop": true
408
  },
409
  "attributes": {}
410
  }
411
  },
412
+ "total_flos": 6.631381723512832e+17,
413
  "train_batch_size": 1,
414
  "trial_name": null,
415
  "trial_params": null