|
{ |
|
"best_metric": 1.7020063400268555, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.56657223796034, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0113314447592068, |
|
"grad_norm": 0.33666571974754333, |
|
"learning_rate": 2e-05, |
|
"loss": 1.7268, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0113314447592068, |
|
"eval_loss": 1.8638567924499512, |
|
"eval_runtime": 1.406, |
|
"eval_samples_per_second": 105.975, |
|
"eval_steps_per_second": 13.514, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0226628895184136, |
|
"grad_norm": 0.34872931241989136, |
|
"learning_rate": 4e-05, |
|
"loss": 1.7041, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0339943342776204, |
|
"grad_norm": 0.32515448331832886, |
|
"learning_rate": 6e-05, |
|
"loss": 1.6899, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0453257790368272, |
|
"grad_norm": 0.39392638206481934, |
|
"learning_rate": 8e-05, |
|
"loss": 1.771, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.056657223796033995, |
|
"grad_norm": 0.32078537344932556, |
|
"learning_rate": 0.0001, |
|
"loss": 1.7116, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0679886685552408, |
|
"grad_norm": 0.3297460675239563, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 1.7778, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.07932011331444759, |
|
"grad_norm": 0.2444680780172348, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 1.6437, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0906515580736544, |
|
"grad_norm": 0.25901690125465393, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 1.7814, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.10198300283286119, |
|
"grad_norm": 0.2574293911457062, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 1.76, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.11331444759206799, |
|
"grad_norm": 0.22195817530155182, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 1.7619, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.12464589235127478, |
|
"grad_norm": 0.25635820627212524, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 1.7202, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.1359773371104816, |
|
"grad_norm": 0.25191575288772583, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 1.7746, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.14730878186968838, |
|
"grad_norm": 0.2583252489566803, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 1.7015, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.15864022662889518, |
|
"grad_norm": 0.2822646498680115, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 1.7986, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.16997167138810199, |
|
"grad_norm": 0.27486222982406616, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 1.7921, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.1813031161473088, |
|
"grad_norm": 0.2897486090660095, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 1.68, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.19263456090651557, |
|
"grad_norm": 0.2819582223892212, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 1.9592, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.20396600566572237, |
|
"grad_norm": 0.3186541199684143, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 1.8444, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.21529745042492918, |
|
"grad_norm": 0.32152897119522095, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 1.9009, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.22662889518413598, |
|
"grad_norm": 0.33151161670684814, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.8928, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.23796033994334279, |
|
"grad_norm": 0.33403679728507996, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 1.9462, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.24929178470254956, |
|
"grad_norm": 0.37862664461135864, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 1.8694, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.26062322946175637, |
|
"grad_norm": 0.20253029465675354, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 1.6101, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.2719546742209632, |
|
"grad_norm": 0.18948052823543549, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 1.6217, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.28328611898017, |
|
"grad_norm": 0.19364707171916962, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 1.5635, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.28328611898017, |
|
"eval_loss": 1.7303435802459717, |
|
"eval_runtime": 1.3795, |
|
"eval_samples_per_second": 108.007, |
|
"eval_steps_per_second": 13.773, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.29461756373937675, |
|
"grad_norm": 0.2151980847120285, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 1.7348, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.3059490084985836, |
|
"grad_norm": 0.20931684970855713, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 1.5753, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.31728045325779036, |
|
"grad_norm": 0.19959765672683716, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 1.5703, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.3286118980169972, |
|
"grad_norm": 0.20375734567642212, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 1.5869, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.33994334277620397, |
|
"grad_norm": 0.2205701470375061, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 1.6535, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.35127478753541075, |
|
"grad_norm": 0.24109084904193878, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 1.7494, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.3626062322946176, |
|
"grad_norm": 0.1876843124628067, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 1.651, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.37393767705382436, |
|
"grad_norm": 0.20250895619392395, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 1.6442, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.38526912181303113, |
|
"grad_norm": 0.20786873996257782, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 1.6759, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.39660056657223797, |
|
"grad_norm": 0.20187856256961823, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 1.7009, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.40793201133144474, |
|
"grad_norm": 0.23279821872711182, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 1.649, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.4192634560906516, |
|
"grad_norm": 0.2545683681964874, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 1.6021, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.43059490084985835, |
|
"grad_norm": 0.2432079017162323, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 1.771, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.44192634560906513, |
|
"grad_norm": 0.2748010754585266, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 1.8674, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.45325779036827196, |
|
"grad_norm": 0.2268821895122528, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 1.8442, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.46458923512747874, |
|
"grad_norm": 0.28976666927337646, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 1.7986, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.47592067988668557, |
|
"grad_norm": 0.2901630401611328, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 1.7726, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.48725212464589235, |
|
"grad_norm": 0.34646856784820557, |
|
"learning_rate": 5.852620357053651e-06, |
|
"loss": 1.7965, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.4985835694050991, |
|
"grad_norm": 0.372428297996521, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 1.7613, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.509915014164306, |
|
"grad_norm": 0.17116321623325348, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 1.5638, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.5212464589235127, |
|
"grad_norm": 0.1613224297761917, |
|
"learning_rate": 1.9369152030840556e-06, |
|
"loss": 1.4856, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.5325779036827195, |
|
"grad_norm": 0.1517619788646698, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 1.661, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.5439093484419264, |
|
"grad_norm": 0.19363200664520264, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 1.6942, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.5552407932011332, |
|
"grad_norm": 0.18895885348320007, |
|
"learning_rate": 1.2179748700879012e-07, |
|
"loss": 1.5129, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.56657223796034, |
|
"grad_norm": 0.20351547002792358, |
|
"learning_rate": 0.0, |
|
"loss": 1.5714, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.56657223796034, |
|
"eval_loss": 1.7020063400268555, |
|
"eval_runtime": 1.3844, |
|
"eval_samples_per_second": 107.631, |
|
"eval_steps_per_second": 13.725, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1236088022630400.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|