|
{ |
|
"best_metric": 1.4757866859436035, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-75", |
|
"epoch": 0.9327633113097551, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.012436844150796735, |
|
"grad_norm": 4.334837913513184, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 4.2908, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.012436844150796735, |
|
"eval_loss": 6.697080135345459, |
|
"eval_runtime": 4.4995, |
|
"eval_samples_per_second": 11.112, |
|
"eval_steps_per_second": 2.889, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02487368830159347, |
|
"grad_norm": 4.578608512878418, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 4.86, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.03731053245239021, |
|
"grad_norm": 4.818152904510498, |
|
"learning_rate": 0.0001, |
|
"loss": 4.925, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.04974737660318694, |
|
"grad_norm": 5.355129241943359, |
|
"learning_rate": 9.99571699711836e-05, |
|
"loss": 4.5105, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.06218422075398368, |
|
"grad_norm": 5.519801616668701, |
|
"learning_rate": 9.982876141412856e-05, |
|
"loss": 3.597, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.07462106490478042, |
|
"grad_norm": 4.105515480041504, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 2.9274, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.08705790905557714, |
|
"grad_norm": 3.8129172325134277, |
|
"learning_rate": 9.931634888554937e-05, |
|
"loss": 2.7116, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.09949475320637388, |
|
"grad_norm": 3.111717939376831, |
|
"learning_rate": 9.893332032039701e-05, |
|
"loss": 2.4759, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.11193159735717062, |
|
"grad_norm": 3.8541200160980225, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 2.2602, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.12436844150796736, |
|
"grad_norm": 3.1858394145965576, |
|
"learning_rate": 9.791726278367022e-05, |
|
"loss": 2.1403, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1368052856587641, |
|
"grad_norm": 2.9167511463165283, |
|
"learning_rate": 9.728616793536588e-05, |
|
"loss": 1.8919, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.14924212980956084, |
|
"grad_norm": 6.798462390899658, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 1.6077, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.16167897396035755, |
|
"grad_norm": 11.831518173217773, |
|
"learning_rate": 9.578385041664925e-05, |
|
"loss": 3.0053, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.17411581811115429, |
|
"grad_norm": 9.798856735229492, |
|
"learning_rate": 9.491548749301997e-05, |
|
"loss": 2.8547, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.18655266226195102, |
|
"grad_norm": 4.359902858734131, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 2.5411, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.19898950641274776, |
|
"grad_norm": 2.2944040298461914, |
|
"learning_rate": 9.295261506157986e-05, |
|
"loss": 2.3035, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.2114263505635445, |
|
"grad_norm": 2.36320424079895, |
|
"learning_rate": 9.186184199300464e-05, |
|
"loss": 2.1875, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.22386319471434124, |
|
"grad_norm": 2.428184986114502, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 2.1433, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.23630003886513798, |
|
"grad_norm": 2.2989890575408936, |
|
"learning_rate": 8.947199994035401e-05, |
|
"loss": 2.1427, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.24873688301593472, |
|
"grad_norm": 2.1419265270233154, |
|
"learning_rate": 8.817748015645558e-05, |
|
"loss": 1.9281, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.26117372716673143, |
|
"grad_norm": 1.9683345556259155, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 1.7075, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2736105713175282, |
|
"grad_norm": 1.7316274642944336, |
|
"learning_rate": 8.540155934270471e-05, |
|
"loss": 1.7915, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.2860474154683249, |
|
"grad_norm": 1.917214035987854, |
|
"learning_rate": 8.392544243589427e-05, |
|
"loss": 1.6485, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.2984842596191217, |
|
"grad_norm": 1.8824145793914795, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 1.3475, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.3109211037699184, |
|
"grad_norm": 8.887231826782227, |
|
"learning_rate": 8.081093963579707e-05, |
|
"loss": 0.772, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3109211037699184, |
|
"eval_loss": 1.8483147621154785, |
|
"eval_runtime": 4.4758, |
|
"eval_samples_per_second": 11.171, |
|
"eval_steps_per_second": 2.905, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3233579479207151, |
|
"grad_norm": 3.4082753658294678, |
|
"learning_rate": 7.917848237560709e-05, |
|
"loss": 2.41, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.33579479207151186, |
|
"grad_norm": 3.109001874923706, |
|
"learning_rate": 7.75e-05, |
|
"loss": 2.3198, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.34823163622230857, |
|
"grad_norm": 2.304497718811035, |
|
"learning_rate": 7.577868759557654e-05, |
|
"loss": 2.0177, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.36066848037310534, |
|
"grad_norm": 1.9224731922149658, |
|
"learning_rate": 7.401782177833148e-05, |
|
"loss": 2.0373, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.37310532452390205, |
|
"grad_norm": 1.6497129201889038, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 1.807, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.3855421686746988, |
|
"grad_norm": 1.6339839696884155, |
|
"learning_rate": 7.03909064496551e-05, |
|
"loss": 1.8076, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.3979790128254955, |
|
"grad_norm": 1.8460805416107178, |
|
"learning_rate": 6.853176097769229e-05, |
|
"loss": 1.907, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.4104158569762923, |
|
"grad_norm": 1.7650914192199707, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 1.8927, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.422852701127089, |
|
"grad_norm": 1.8454054594039917, |
|
"learning_rate": 6.473978262721463e-05, |
|
"loss": 1.5441, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.4352895452778857, |
|
"grad_norm": 1.8942859172821045, |
|
"learning_rate": 6.281416799501188e-05, |
|
"loss": 1.5689, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.4477263894286825, |
|
"grad_norm": 2.334362268447876, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 1.2838, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.4601632335794792, |
|
"grad_norm": 2.4197657108306885, |
|
"learning_rate": 5.8922008423644624e-05, |
|
"loss": 0.9639, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.47260007773027596, |
|
"grad_norm": 1.9846646785736084, |
|
"learning_rate": 5.696287243144013e-05, |
|
"loss": 1.8122, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.48503692188107267, |
|
"grad_norm": 1.9487015008926392, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 2.0742, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.49747376603186944, |
|
"grad_norm": 2.0693061351776123, |
|
"learning_rate": 5.303712756855988e-05, |
|
"loss": 1.9641, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5099106101826661, |
|
"grad_norm": 1.666406273841858, |
|
"learning_rate": 5.107799157635538e-05, |
|
"loss": 1.7359, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.5223474543334629, |
|
"grad_norm": 1.9289875030517578, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 1.887, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.5347842984842596, |
|
"grad_norm": 1.6595007181167603, |
|
"learning_rate": 4.718583200498814e-05, |
|
"loss": 1.7954, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.5472211426350564, |
|
"grad_norm": 1.7720122337341309, |
|
"learning_rate": 4.526021737278538e-05, |
|
"loss": 1.7259, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.5596579867858531, |
|
"grad_norm": 1.9382787942886353, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 1.7288, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.5720948309366498, |
|
"grad_norm": 1.5037600994110107, |
|
"learning_rate": 4.146823902230772e-05, |
|
"loss": 1.5281, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.5845316750874465, |
|
"grad_norm": 1.6816551685333252, |
|
"learning_rate": 3.960909355034491e-05, |
|
"loss": 1.5025, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.5969685192382433, |
|
"grad_norm": 1.8008204698562622, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 1.3675, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.6094053633890401, |
|
"grad_norm": 1.7607805728912354, |
|
"learning_rate": 3.598217822166854e-05, |
|
"loss": 0.9439, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.6218422075398368, |
|
"grad_norm": 8.726191520690918, |
|
"learning_rate": 3.422131240442349e-05, |
|
"loss": 0.7328, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6218422075398368, |
|
"eval_loss": 1.5857478380203247, |
|
"eval_runtime": 4.4715, |
|
"eval_samples_per_second": 11.182, |
|
"eval_steps_per_second": 2.907, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6342790516906335, |
|
"grad_norm": 1.9586689472198486, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 1.9342, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.6467158958414302, |
|
"grad_norm": 1.7377395629882812, |
|
"learning_rate": 3.082151762439293e-05, |
|
"loss": 1.9846, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.659152739992227, |
|
"grad_norm": 1.4514222145080566, |
|
"learning_rate": 2.9189060364202943e-05, |
|
"loss": 1.6922, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.6715895841430237, |
|
"grad_norm": 1.5051312446594238, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 1.872, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.6840264282938204, |
|
"grad_norm": 1.3607957363128662, |
|
"learning_rate": 2.6074557564105727e-05, |
|
"loss": 1.563, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.6964632724446171, |
|
"grad_norm": 1.4457933902740479, |
|
"learning_rate": 2.459844065729529e-05, |
|
"loss": 1.5738, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.708900116595414, |
|
"grad_norm": 1.5340603590011597, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 1.4182, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.7213369607462107, |
|
"grad_norm": 1.8126493692398071, |
|
"learning_rate": 2.1822519843544424e-05, |
|
"loss": 1.4997, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.7337738048970074, |
|
"grad_norm": 1.8174535036087036, |
|
"learning_rate": 2.0528000059645997e-05, |
|
"loss": 1.4925, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.7462106490478041, |
|
"grad_norm": 1.6950277090072632, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 1.3651, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.7586474931986008, |
|
"grad_norm": 1.7184079885482788, |
|
"learning_rate": 1.8138158006995364e-05, |
|
"loss": 1.258, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.7710843373493976, |
|
"grad_norm": 2.286306142807007, |
|
"learning_rate": 1.7047384938420154e-05, |
|
"loss": 0.9343, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.7835211815001943, |
|
"grad_norm": 1.4997891187667847, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 1.5661, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.795958025650991, |
|
"grad_norm": 1.3938400745391846, |
|
"learning_rate": 1.5084512506980026e-05, |
|
"loss": 1.7878, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.8083948698017878, |
|
"grad_norm": 1.4616719484329224, |
|
"learning_rate": 1.4216149583350754e-05, |
|
"loss": 1.7966, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.8208317139525846, |
|
"grad_norm": 1.3521653413772583, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 1.7621, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.8332685581033813, |
|
"grad_norm": 1.436239242553711, |
|
"learning_rate": 1.2713832064634126e-05, |
|
"loss": 1.4504, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.845705402254178, |
|
"grad_norm": 1.5169296264648438, |
|
"learning_rate": 1.2082737216329794e-05, |
|
"loss": 1.5677, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.8581422464049747, |
|
"grad_norm": 1.4382579326629639, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 1.5528, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.8705790905557714, |
|
"grad_norm": 1.535103440284729, |
|
"learning_rate": 1.1066679679603e-05, |
|
"loss": 1.5379, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.8830159347065683, |
|
"grad_norm": 1.5666173696517944, |
|
"learning_rate": 1.0683651114450641e-05, |
|
"loss": 1.4208, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.895452778857365, |
|
"grad_norm": 1.581671953201294, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 1.3917, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.9078896230081617, |
|
"grad_norm": 1.4792799949645996, |
|
"learning_rate": 1.017123858587145e-05, |
|
"loss": 1.1024, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.9203264671589584, |
|
"grad_norm": 1.4445815086364746, |
|
"learning_rate": 1.00428300288164e-05, |
|
"loss": 0.9231, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.9327633113097551, |
|
"grad_norm": 1.419348120689392, |
|
"learning_rate": 1e-05, |
|
"loss": 0.3999, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.9327633113097551, |
|
"eval_loss": 1.4757866859436035, |
|
"eval_runtime": 4.3684, |
|
"eval_samples_per_second": 11.446, |
|
"eval_steps_per_second": 2.976, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.952125425975296e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|