|
{ |
|
"best_metric": 1.0394631624221802, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.03244646333549643, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0006489292667099286, |
|
"grad_norm": 2.683505058288574, |
|
"learning_rate": 2e-05, |
|
"loss": 3.5951, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0006489292667099286, |
|
"eval_loss": 1.4502065181732178, |
|
"eval_runtime": 148.1881, |
|
"eval_samples_per_second": 17.518, |
|
"eval_steps_per_second": 2.193, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0012978585334198572, |
|
"grad_norm": 3.0936529636383057, |
|
"learning_rate": 4e-05, |
|
"loss": 3.7839, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.001946787800129786, |
|
"grad_norm": 2.257113456726074, |
|
"learning_rate": 6e-05, |
|
"loss": 3.907, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0025957170668397143, |
|
"grad_norm": 2.010742425918579, |
|
"learning_rate": 8e-05, |
|
"loss": 3.6334, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.003244646333549643, |
|
"grad_norm": 3.200777769088745, |
|
"learning_rate": 0.0001, |
|
"loss": 3.932, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.003893575600259572, |
|
"grad_norm": 2.122283935546875, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 3.7128, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.004542504866969501, |
|
"grad_norm": 1.8623450994491577, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 3.7839, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.005191434133679429, |
|
"grad_norm": 2.3359696865081787, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 3.7878, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.005840363400389357, |
|
"grad_norm": 1.9105135202407837, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 3.8119, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.006489292667099286, |
|
"grad_norm": 1.7457245588302612, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 3.7471, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.007138221933809215, |
|
"grad_norm": 2.1727206707000732, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 3.9428, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.007787151200519144, |
|
"grad_norm": 1.9664866924285889, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 3.9362, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.008436080467229072, |
|
"grad_norm": 2.474402904510498, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 3.9396, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.009085009733939001, |
|
"grad_norm": 3.0328118801116943, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 3.6997, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.00973393900064893, |
|
"grad_norm": 2.00704026222229, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 3.9555, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.010382868267358857, |
|
"grad_norm": 1.9274024963378906, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 3.6365, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.011031797534068787, |
|
"grad_norm": 2.011793851852417, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 4.1249, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.011680726800778715, |
|
"grad_norm": 2.1976027488708496, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 4.2458, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.012329656067488644, |
|
"grad_norm": 1.9308854341506958, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 4.0175, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.012978585334198572, |
|
"grad_norm": 2.092264175415039, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 4.0406, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0136275146009085, |
|
"grad_norm": 2.3463144302368164, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 4.1564, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.01427644386761843, |
|
"grad_norm": 2.3224618434906006, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 3.9462, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.014925373134328358, |
|
"grad_norm": 2.443256139755249, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 3.692, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.015574302401038288, |
|
"grad_norm": 3.4629366397857666, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 4.3253, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.016223231667748216, |
|
"grad_norm": 2.778249740600586, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 4.1526, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.016223231667748216, |
|
"eval_loss": 1.1567610502243042, |
|
"eval_runtime": 148.826, |
|
"eval_samples_per_second": 17.443, |
|
"eval_steps_per_second": 2.184, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.016872160934458143, |
|
"grad_norm": 423.1258239746094, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 4.3783, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.01752109020116807, |
|
"grad_norm": 19.292482376098633, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 3.7328, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.018170019467878003, |
|
"grad_norm": 3.9839913845062256, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 3.9131, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.01881894873458793, |
|
"grad_norm": 5.250899314880371, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 3.8158, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.01946787800129786, |
|
"grad_norm": 3.4630203247070312, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 4.1871, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.020116807268007787, |
|
"grad_norm": 4.317551612854004, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 4.7563, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.020765736534717714, |
|
"grad_norm": 4.866771221160889, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 4.3659, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.021414665801427646, |
|
"grad_norm": 5.2690839767456055, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 4.1365, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.022063595068137574, |
|
"grad_norm": 5.08319091796875, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 4.5015, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0227125243348475, |
|
"grad_norm": 4.986558437347412, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 4.2522, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.02336145360155743, |
|
"grad_norm": 5.291998386383057, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 3.7079, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.024010382868267358, |
|
"grad_norm": 7.075346946716309, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 4.3515, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.02465931213497729, |
|
"grad_norm": 6.59442663192749, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 4.4537, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.025308241401687217, |
|
"grad_norm": 7.289731502532959, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 4.6425, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.025957170668397145, |
|
"grad_norm": 8.14652156829834, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 4.7023, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.026606099935107073, |
|
"grad_norm": 8.087379455566406, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 4.0411, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.027255029201817, |
|
"grad_norm": 8.625016212463379, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 4.4971, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.027903958468526932, |
|
"grad_norm": 8.698261260986328, |
|
"learning_rate": 5.852620357053651e-06, |
|
"loss": 4.0275, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.02855288773523686, |
|
"grad_norm": 9.690203666687012, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 4.6694, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.029201817001946788, |
|
"grad_norm": 10.64246940612793, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 4.3358, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.029850746268656716, |
|
"grad_norm": 9.350142478942871, |
|
"learning_rate": 1.9369152030840556e-06, |
|
"loss": 3.6146, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.030499675535366644, |
|
"grad_norm": 9.56218433380127, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 3.4644, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.031148604802076575, |
|
"grad_norm": 16.128480911254883, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 5.3455, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0317975340687865, |
|
"grad_norm": 15.799928665161133, |
|
"learning_rate": 1.2179748700879012e-07, |
|
"loss": 4.5824, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.03244646333549643, |
|
"grad_norm": 13.623414993286133, |
|
"learning_rate": 0.0, |
|
"loss": 3.5276, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03244646333549643, |
|
"eval_loss": 1.0394631624221802, |
|
"eval_runtime": 148.7645, |
|
"eval_samples_per_second": 17.45, |
|
"eval_steps_per_second": 2.185, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.09238892150784e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|