|
{ |
|
"best_metric": 10.363144874572754, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 3.0569105691056913, |
|
"eval_steps": 25, |
|
"global_step": 47, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06504065040650407, |
|
"grad_norm": 0.20289339125156403, |
|
"learning_rate": 5e-05, |
|
"loss": 10.3736, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06504065040650407, |
|
"eval_loss": 10.376363754272461, |
|
"eval_runtime": 0.1413, |
|
"eval_samples_per_second": 736.244, |
|
"eval_steps_per_second": 92.03, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.13008130081300814, |
|
"grad_norm": 0.1323336660861969, |
|
"learning_rate": 0.0001, |
|
"loss": 10.3719, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.1951219512195122, |
|
"grad_norm": 0.17840038239955902, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 10.3803, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.2601626016260163, |
|
"grad_norm": 0.17275120317935944, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 10.3763, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.3252032520325203, |
|
"grad_norm": 0.1751108169555664, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 10.3712, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.3902439024390244, |
|
"grad_norm": 0.13938479125499725, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 10.373, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.45528455284552843, |
|
"grad_norm": 0.17337481677532196, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 10.3757, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.5203252032520326, |
|
"grad_norm": 0.22474798560142517, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 10.3718, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.5853658536585366, |
|
"grad_norm": 0.13538026809692383, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 10.3692, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.6504065040650406, |
|
"grad_norm": 0.17122657597064972, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 10.3734, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.7154471544715447, |
|
"grad_norm": 0.17080749571323395, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 10.3726, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.7804878048780488, |
|
"grad_norm": 0.22811958193778992, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 10.3704, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.8455284552845529, |
|
"grad_norm": 0.14899323880672455, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 10.367, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.9105691056910569, |
|
"grad_norm": 0.2465336173772812, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 10.3744, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.975609756097561, |
|
"grad_norm": 0.17647525668144226, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 10.3707, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.040650406504065, |
|
"grad_norm": 0.3739725649356842, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 18.1444, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.1056910569105691, |
|
"grad_norm": 0.14059346914291382, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 10.2548, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.170731707317073, |
|
"grad_norm": 0.25436219573020935, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 9.4334, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.2357723577235773, |
|
"grad_norm": 0.09833599627017975, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 5.198, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.3008130081300813, |
|
"grad_norm": 0.3984502851963043, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 16.4165, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.3658536585365852, |
|
"grad_norm": 0.183476984500885, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 10.9875, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.4308943089430894, |
|
"grad_norm": 0.21129857003688812, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 9.0419, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.4959349593495934, |
|
"grad_norm": 0.11231347918510437, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 7.1111, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.5609756097560976, |
|
"grad_norm": 0.3311237394809723, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 13.8825, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.6260162601626016, |
|
"grad_norm": 0.19978109002113342, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 10.5704, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.6260162601626016, |
|
"eval_loss": 10.363144874572754, |
|
"eval_runtime": 0.1376, |
|
"eval_samples_per_second": 755.6, |
|
"eval_steps_per_second": 94.45, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.6910569105691056, |
|
"grad_norm": 0.20653556287288666, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 10.0741, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.7560975609756098, |
|
"grad_norm": 0.2042083591222763, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 9.9069, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.821138211382114, |
|
"grad_norm": 0.17668259143829346, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 11.2898, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.886178861788618, |
|
"grad_norm": 0.30113402009010315, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 9.5075, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.951219512195122, |
|
"grad_norm": 0.2606578469276428, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 11.0706, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.016260162601626, |
|
"grad_norm": 0.29925504326820374, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 17.1891, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 2.08130081300813, |
|
"grad_norm": 0.18933387100696564, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 10.4637, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.1463414634146343, |
|
"grad_norm": 0.20285484194755554, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 9.0417, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 2.2113821138211383, |
|
"grad_norm": 0.18055562674999237, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 8.3783, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.2764227642276422, |
|
"grad_norm": 0.3930016756057739, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 13.4143, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 2.341463414634146, |
|
"grad_norm": 0.18804241716861725, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 11.0764, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 2.40650406504065, |
|
"grad_norm": 0.20714622735977173, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 8.692, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 2.4715447154471546, |
|
"grad_norm": 0.16578172147274017, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 7.9278, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.5365853658536586, |
|
"grad_norm": 0.38254594802856445, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 13.5877, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 2.6016260162601625, |
|
"grad_norm": 0.19641678035259247, |
|
"learning_rate": 5.852620357053651e-06, |
|
"loss": 11.453, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.6666666666666665, |
|
"grad_norm": 0.2300872653722763, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 8.5011, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 2.7317073170731705, |
|
"grad_norm": 0.1619451344013214, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 7.7501, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 2.796747967479675, |
|
"grad_norm": 0.3057861328125, |
|
"learning_rate": 1.9369152030840556e-06, |
|
"loss": 13.4289, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 2.861788617886179, |
|
"grad_norm": 0.19348877668380737, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 10.0297, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 2.926829268292683, |
|
"grad_norm": 0.27544501423835754, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 10.6125, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.991869918699187, |
|
"grad_norm": 0.38650020956993103, |
|
"learning_rate": 1.2179748700879012e-07, |
|
"loss": 15.4729, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 3.0569105691056913, |
|
"grad_norm": 0.31395643949508667, |
|
"learning_rate": 0.0, |
|
"loss": 11.6441, |
|
"step": 47 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 47, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 47285435891712.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|