oldiday commited on
Commit
62aeed6
·
verified ·
1 Parent(s): ee83768

Training in progress, step 200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:07752f8f824d33e5969ebb88cf88efbbeef0cd239ec5655d3832bbba227715b0
3
  size 78207176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efbf0dccdb814b2d7d2fa13cc5d2c8e81230b0c54483411cb309264084e7150e
3
  size 78207176
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:81a1d369a6c699e3f4a3f42d8d54df7feee285b627e8600204c369be3c35c20e
3
  size 40177764
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cab0c65b01baf01fd0aabb5d5cbc483748638bef85e11eb209aac3b660581e6e
3
  size 40177764
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1543d263cdd4788ae1417e978dead81d20d5840d1e3b4221fcebdc0828dd8bd1
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de115c072803c252542859e1816fd43f8d4ef7d5909a1cde82f59143886bebba
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8ce05761f46e7cf72fb17a02e3a0ca15c9d25ce3babf590eeb40568923b8bac
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2d754412c61116546142914503e7369d0cc35d3c380a07e5218f595d76b6d96
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.44607609510421753,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-150",
4
- "epoch": 0.14566642388929352,
5
  "eval_steps": 50,
6
- "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -389,6 +389,126 @@
389
  "eval_samples_per_second": 65.662,
390
  "eval_steps_per_second": 16.425,
391
  "step": 150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
392
  }
393
  ],
394
  "logging_steps": 3,
@@ -412,12 +532,12 @@
412
  "should_evaluate": false,
413
  "should_log": false,
414
  "should_save": true,
415
- "should_training_stop": false
416
  },
417
  "attributes": {}
418
  }
419
  },
420
- "total_flos": 4320127639093248.0,
421
  "train_batch_size": 8,
422
  "trial_name": null,
423
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.4413750171661377,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-200",
4
+ "epoch": 0.19422189851905802,
5
  "eval_steps": 50,
6
+ "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
389
  "eval_samples_per_second": 65.662,
390
  "eval_steps_per_second": 16.425,
391
  "step": 150
392
+ },
393
+ {
394
+ "epoch": 0.14857975236707938,
395
+ "grad_norm": 0.38435041904449463,
396
+ "learning_rate": 1.435357758543015e-05,
397
+ "loss": 0.5631,
398
+ "step": 153
399
+ },
400
+ {
401
+ "epoch": 0.15149308084486526,
402
+ "grad_norm": 0.33270201086997986,
403
+ "learning_rate": 1.2658926150792322e-05,
404
+ "loss": 0.623,
405
+ "step": 156
406
+ },
407
+ {
408
+ "epoch": 0.15440640932265112,
409
+ "grad_norm": 0.22977907955646515,
410
+ "learning_rate": 1.1056136061894384e-05,
411
+ "loss": 0.4877,
412
+ "step": 159
413
+ },
414
+ {
415
+ "epoch": 0.157319737800437,
416
+ "grad_norm": 0.21603785455226898,
417
+ "learning_rate": 9.549150281252633e-06,
418
+ "loss": 0.4403,
419
+ "step": 162
420
+ },
421
+ {
422
+ "epoch": 0.16023306627822287,
423
+ "grad_norm": 0.209615558385849,
424
+ "learning_rate": 8.141676086873572e-06,
425
+ "loss": 0.4224,
426
+ "step": 165
427
+ },
428
+ {
429
+ "epoch": 0.16314639475600873,
430
+ "grad_norm": 0.18484285473823547,
431
+ "learning_rate": 6.837175952121306e-06,
432
+ "loss": 0.4118,
433
+ "step": 168
434
+ },
435
+ {
436
+ "epoch": 0.16605972323379461,
437
+ "grad_norm": 0.18960103392601013,
438
+ "learning_rate": 5.6388590278194096e-06,
439
+ "loss": 0.4072,
440
+ "step": 171
441
+ },
442
+ {
443
+ "epoch": 0.16897305171158047,
444
+ "grad_norm": 0.16774511337280273,
445
+ "learning_rate": 4.549673247541875e-06,
446
+ "loss": 0.366,
447
+ "step": 174
448
+ },
449
+ {
450
+ "epoch": 0.17188638018936636,
451
+ "grad_norm": 0.18589498102664948,
452
+ "learning_rate": 3.5722980755146517e-06,
453
+ "loss": 0.3681,
454
+ "step": 177
455
+ },
456
+ {
457
+ "epoch": 0.17479970866715222,
458
+ "grad_norm": 0.18734581768512726,
459
+ "learning_rate": 2.7091379149682685e-06,
460
+ "loss": 0.3482,
461
+ "step": 180
462
+ },
463
+ {
464
+ "epoch": 0.17771303714493808,
465
+ "grad_norm": 0.19667771458625793,
466
+ "learning_rate": 1.962316193157593e-06,
467
+ "loss": 0.3446,
468
+ "step": 183
469
+ },
470
+ {
471
+ "epoch": 0.18062636562272397,
472
+ "grad_norm": 0.2224486619234085,
473
+ "learning_rate": 1.333670137599713e-06,
474
+ "loss": 0.3361,
475
+ "step": 186
476
+ },
477
+ {
478
+ "epoch": 0.18353969410050983,
479
+ "grad_norm": 0.18418388068675995,
480
+ "learning_rate": 8.247462563808817e-07,
481
+ "loss": 0.3035,
482
+ "step": 189
483
+ },
484
+ {
485
+ "epoch": 0.1864530225782957,
486
+ "grad_norm": 0.21602746844291687,
487
+ "learning_rate": 4.367965336512403e-07,
488
+ "loss": 0.311,
489
+ "step": 192
490
+ },
491
+ {
492
+ "epoch": 0.18936635105608157,
493
+ "grad_norm": 0.23305264115333557,
494
+ "learning_rate": 1.7077534966650766e-07,
495
+ "loss": 0.2924,
496
+ "step": 195
497
+ },
498
+ {
499
+ "epoch": 0.19227967953386743,
500
+ "grad_norm": 0.290690541267395,
501
+ "learning_rate": 2.7337132953697554e-08,
502
+ "loss": 0.5002,
503
+ "step": 198
504
+ },
505
+ {
506
+ "epoch": 0.19422189851905802,
507
+ "eval_loss": 0.4413750171661377,
508
+ "eval_runtime": 25.429,
509
+ "eval_samples_per_second": 68.229,
510
+ "eval_steps_per_second": 17.067,
511
+ "step": 200
512
  }
513
  ],
514
  "logging_steps": 3,
 
532
  "should_evaluate": false,
533
  "should_log": false,
534
  "should_save": true,
535
+ "should_training_stop": true
536
  },
537
  "attributes": {}
538
  }
539
  },
540
+ "total_flos": 5747809305231360.0,
541
  "train_batch_size": 8,
542
  "trial_name": null,
543
  "trial_params": null