bbytxt's picture
Training in progress, step 75, checkpoint
72382a7 verified
raw
history blame
13.3 kB
{
"best_metric": NaN,
"best_model_checkpoint": "miner_id_24/checkpoint-25",
"epoch": 0.24106066693451186,
"eval_steps": 25,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0032141422257934912,
"grad_norm": NaN,
"learning_rate": 2e-05,
"loss": 0.0,
"step": 1
},
{
"epoch": 0.0032141422257934912,
"eval_loss": NaN,
"eval_runtime": 48.7668,
"eval_samples_per_second": 10.766,
"eval_steps_per_second": 5.393,
"step": 1
},
{
"epoch": 0.0064282844515869825,
"grad_norm": NaN,
"learning_rate": 4e-05,
"loss": 0.0,
"step": 2
},
{
"epoch": 0.009642426677380474,
"grad_norm": NaN,
"learning_rate": 6e-05,
"loss": 0.0,
"step": 3
},
{
"epoch": 0.012856568903173965,
"grad_norm": NaN,
"learning_rate": 8e-05,
"loss": 0.0,
"step": 4
},
{
"epoch": 0.016070711128967456,
"grad_norm": NaN,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 5
},
{
"epoch": 0.019284853354760947,
"grad_norm": NaN,
"learning_rate": 0.00012,
"loss": 0.0,
"step": 6
},
{
"epoch": 0.02249899558055444,
"grad_norm": NaN,
"learning_rate": 0.00014,
"loss": 0.0,
"step": 7
},
{
"epoch": 0.02571313780634793,
"grad_norm": NaN,
"learning_rate": 0.00016,
"loss": 0.0,
"step": 8
},
{
"epoch": 0.02892728003214142,
"grad_norm": NaN,
"learning_rate": 0.00018,
"loss": 0.0,
"step": 9
},
{
"epoch": 0.03214142225793491,
"grad_norm": NaN,
"learning_rate": 0.0002,
"loss": 0.0,
"step": 10
},
{
"epoch": 0.035355564483728404,
"grad_norm": NaN,
"learning_rate": 0.00019988322268323268,
"loss": 0.0,
"step": 11
},
{
"epoch": 0.038569706709521895,
"grad_norm": NaN,
"learning_rate": 0.00019953316347176488,
"loss": 0.0,
"step": 12
},
{
"epoch": 0.041783848935315386,
"grad_norm": NaN,
"learning_rate": 0.0001989506399451051,
"loss": 0.0,
"step": 13
},
{
"epoch": 0.04499799116110888,
"grad_norm": NaN,
"learning_rate": 0.00019813701261394136,
"loss": 0.0,
"step": 14
},
{
"epoch": 0.04821213338690237,
"grad_norm": NaN,
"learning_rate": 0.0001970941817426052,
"loss": 0.0,
"step": 15
},
{
"epoch": 0.05142627561269586,
"grad_norm": NaN,
"learning_rate": 0.00019582458291091663,
"loss": 0.0,
"step": 16
},
{
"epoch": 0.05464041783848935,
"grad_norm": NaN,
"learning_rate": 0.0001943311813257743,
"loss": 0.0,
"step": 17
},
{
"epoch": 0.05785456006428284,
"grad_norm": NaN,
"learning_rate": 0.00019261746489577765,
"loss": 0.0,
"step": 18
},
{
"epoch": 0.061068702290076333,
"grad_norm": NaN,
"learning_rate": 0.00019068743608505455,
"loss": 0.0,
"step": 19
},
{
"epoch": 0.06428284451586982,
"grad_norm": NaN,
"learning_rate": 0.000188545602565321,
"loss": 0.0,
"step": 20
},
{
"epoch": 0.06749698674166332,
"grad_norm": NaN,
"learning_rate": 0.00018619696668800492,
"loss": 0.0,
"step": 21
},
{
"epoch": 0.07071112896745681,
"grad_norm": NaN,
"learning_rate": 0.00018364701380102266,
"loss": 0.0,
"step": 22
},
{
"epoch": 0.0739252711932503,
"grad_norm": NaN,
"learning_rate": 0.00018090169943749476,
"loss": 0.0,
"step": 23
},
{
"epoch": 0.07713941341904379,
"grad_norm": NaN,
"learning_rate": 0.00017796743540632223,
"loss": 0.0,
"step": 24
},
{
"epoch": 0.08035355564483729,
"grad_norm": NaN,
"learning_rate": 0.00017485107481711012,
"loss": 0.0,
"step": 25
},
{
"epoch": 0.08035355564483729,
"eval_loss": NaN,
"eval_runtime": 48.3741,
"eval_samples_per_second": 10.853,
"eval_steps_per_second": 5.437,
"step": 25
},
{
"epoch": 0.08356769787063077,
"grad_norm": NaN,
"learning_rate": 0.00017155989607441213,
"loss": 0.0,
"step": 26
},
{
"epoch": 0.08678184009642427,
"grad_norm": NaN,
"learning_rate": 0.00016810158587867973,
"loss": 0.0,
"step": 27
},
{
"epoch": 0.08999598232221775,
"grad_norm": NaN,
"learning_rate": 0.00016448422127361706,
"loss": 0.0,
"step": 28
},
{
"epoch": 0.09321012454801125,
"grad_norm": NaN,
"learning_rate": 0.00016071625078187114,
"loss": 0.0,
"step": 29
},
{
"epoch": 0.09642426677380474,
"grad_norm": NaN,
"learning_rate": 0.00015680647467311557,
"loss": 0.0,
"step": 30
},
{
"epoch": 0.09963840899959824,
"grad_norm": NaN,
"learning_rate": 0.0001527640244106133,
"loss": 0.0,
"step": 31
},
{
"epoch": 0.10285255122539172,
"grad_norm": NaN,
"learning_rate": 0.0001485983413242606,
"loss": 0.0,
"step": 32
},
{
"epoch": 0.10606669345118522,
"grad_norm": NaN,
"learning_rate": 0.00014431915455992414,
"loss": 0.0,
"step": 33
},
{
"epoch": 0.1092808356769787,
"grad_norm": NaN,
"learning_rate": 0.00013993645835656953,
"loss": 0.0,
"step": 34
},
{
"epoch": 0.1124949779027722,
"grad_norm": NaN,
"learning_rate": 0.00013546048870425356,
"loss": 0.0,
"step": 35
},
{
"epoch": 0.11570912012856568,
"grad_norm": NaN,
"learning_rate": 0.00013090169943749476,
"loss": 0.0,
"step": 36
},
{
"epoch": 0.11892326235435918,
"grad_norm": NaN,
"learning_rate": 0.0001262707378198587,
"loss": 0.0,
"step": 37
},
{
"epoch": 0.12213740458015267,
"grad_norm": NaN,
"learning_rate": 0.00012157841967678063,
"loss": 0.0,
"step": 38
},
{
"epoch": 0.12535154680594615,
"grad_norm": NaN,
"learning_rate": 0.00011683570413470383,
"loss": 0.0,
"step": 39
},
{
"epoch": 0.12856568903173965,
"grad_norm": NaN,
"learning_rate": 0.0001120536680255323,
"loss": 0.0,
"step": 40
},
{
"epoch": 0.13177983125753315,
"grad_norm": NaN,
"learning_rate": 0.00010724348001617625,
"loss": 0.0,
"step": 41
},
{
"epoch": 0.13499397348332665,
"grad_norm": NaN,
"learning_rate": 0.00010241637452361323,
"loss": 0.0,
"step": 42
},
{
"epoch": 0.13820811570912012,
"grad_norm": NaN,
"learning_rate": 9.75836254763868e-05,
"loss": 0.0,
"step": 43
},
{
"epoch": 0.14142225793491361,
"grad_norm": NaN,
"learning_rate": 9.275651998382377e-05,
"loss": 0.0,
"step": 44
},
{
"epoch": 0.1446364001607071,
"grad_norm": NaN,
"learning_rate": 8.79463319744677e-05,
"loss": 0.0,
"step": 45
},
{
"epoch": 0.1478505423865006,
"grad_norm": NaN,
"learning_rate": 8.316429586529615e-05,
"loss": 0.0,
"step": 46
},
{
"epoch": 0.15106468461229408,
"grad_norm": NaN,
"learning_rate": 7.84215803232194e-05,
"loss": 0.0,
"step": 47
},
{
"epoch": 0.15427882683808758,
"grad_norm": NaN,
"learning_rate": 7.372926218014131e-05,
"loss": 0.0,
"step": 48
},
{
"epoch": 0.15749296906388108,
"grad_norm": NaN,
"learning_rate": 6.909830056250527e-05,
"loss": 0.0,
"step": 49
},
{
"epoch": 0.16070711128967458,
"grad_norm": NaN,
"learning_rate": 6.453951129574644e-05,
"loss": 0.0,
"step": 50
},
{
"epoch": 0.16070711128967458,
"eval_loss": NaN,
"eval_runtime": 48.3643,
"eval_samples_per_second": 10.855,
"eval_steps_per_second": 5.438,
"step": 50
},
{
"epoch": 0.16392125351546805,
"grad_norm": NaN,
"learning_rate": 6.006354164343046e-05,
"loss": 0.0,
"step": 51
},
{
"epoch": 0.16713539574126154,
"grad_norm": NaN,
"learning_rate": 5.568084544007588e-05,
"loss": 0.0,
"step": 52
},
{
"epoch": 0.17034953796705504,
"grad_norm": NaN,
"learning_rate": 5.14016586757394e-05,
"loss": 0.0,
"step": 53
},
{
"epoch": 0.17356368019284854,
"grad_norm": NaN,
"learning_rate": 4.723597558938672e-05,
"loss": 0.0,
"step": 54
},
{
"epoch": 0.176777822418642,
"grad_norm": NaN,
"learning_rate": 4.3193525326884435e-05,
"loss": 0.0,
"step": 55
},
{
"epoch": 0.1799919646444355,
"grad_norm": NaN,
"learning_rate": 3.9283749218128885e-05,
"loss": 0.0,
"step": 56
},
{
"epoch": 0.183206106870229,
"grad_norm": NaN,
"learning_rate": 3.5515778726382966e-05,
"loss": 0.0,
"step": 57
},
{
"epoch": 0.1864202490960225,
"grad_norm": NaN,
"learning_rate": 3.1898414121320276e-05,
"loss": 0.0,
"step": 58
},
{
"epoch": 0.189634391321816,
"grad_norm": NaN,
"learning_rate": 2.84401039255879e-05,
"loss": 0.0,
"step": 59
},
{
"epoch": 0.19284853354760947,
"grad_norm": NaN,
"learning_rate": 2.514892518288988e-05,
"loss": 0.0,
"step": 60
},
{
"epoch": 0.19606267577340297,
"grad_norm": NaN,
"learning_rate": 2.2032564593677774e-05,
"loss": 0.0,
"step": 61
},
{
"epoch": 0.19927681799919647,
"grad_norm": NaN,
"learning_rate": 1.9098300562505266e-05,
"loss": 0.0,
"step": 62
},
{
"epoch": 0.20249096022498997,
"grad_norm": NaN,
"learning_rate": 1.6352986198977325e-05,
"loss": 0.0,
"step": 63
},
{
"epoch": 0.20570510245078344,
"grad_norm": NaN,
"learning_rate": 1.3803033311995072e-05,
"loss": 0.0,
"step": 64
},
{
"epoch": 0.20891924467657694,
"grad_norm": NaN,
"learning_rate": 1.1454397434679021e-05,
"loss": 0.0,
"step": 65
},
{
"epoch": 0.21213338690237044,
"grad_norm": NaN,
"learning_rate": 9.31256391494546e-06,
"loss": 0.0,
"step": 66
},
{
"epoch": 0.21534752912816393,
"grad_norm": NaN,
"learning_rate": 7.382535104222366e-06,
"loss": 0.0,
"step": 67
},
{
"epoch": 0.2185616713539574,
"grad_norm": NaN,
"learning_rate": 5.668818674225685e-06,
"loss": 0.0,
"step": 68
},
{
"epoch": 0.2217758135797509,
"grad_norm": NaN,
"learning_rate": 4.175417089083378e-06,
"loss": 0.0,
"step": 69
},
{
"epoch": 0.2249899558055444,
"grad_norm": NaN,
"learning_rate": 2.905818257394799e-06,
"loss": 0.0,
"step": 70
},
{
"epoch": 0.2282040980313379,
"grad_norm": NaN,
"learning_rate": 1.8629873860586566e-06,
"loss": 0.0,
"step": 71
},
{
"epoch": 0.23141824025713137,
"grad_norm": NaN,
"learning_rate": 1.0493600548948878e-06,
"loss": 0.0,
"step": 72
},
{
"epoch": 0.23463238248292487,
"grad_norm": NaN,
"learning_rate": 4.668365282351372e-07,
"loss": 0.0,
"step": 73
},
{
"epoch": 0.23784652470871837,
"grad_norm": NaN,
"learning_rate": 1.1677731676733584e-07,
"loss": 0.0,
"step": 74
},
{
"epoch": 0.24106066693451186,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 0.0,
"step": 75
},
{
"epoch": 0.24106066693451186,
"eval_loss": NaN,
"eval_runtime": 48.3572,
"eval_samples_per_second": 10.857,
"eval_steps_per_second": 5.439,
"step": 75
}
],
"logging_steps": 1,
"max_steps": 75,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 2
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.064189343563776e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}