|
{ |
|
"best_metric": 0.8514222502708435, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.7634408602150538, |
|
"eval_steps": 25, |
|
"global_step": 71, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.010752688172043012, |
|
"grad_norm": 2.4238014221191406, |
|
"learning_rate": 5e-05, |
|
"loss": 1.4918, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.010752688172043012, |
|
"eval_loss": 1.3816931247711182, |
|
"eval_runtime": 2.4913, |
|
"eval_samples_per_second": 20.07, |
|
"eval_steps_per_second": 3.613, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.021505376344086023, |
|
"grad_norm": 2.2297356128692627, |
|
"learning_rate": 0.0001, |
|
"loss": 1.2278, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.03225806451612903, |
|
"grad_norm": 2.2073001861572266, |
|
"learning_rate": 9.995336531410274e-05, |
|
"loss": 1.3697, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.043010752688172046, |
|
"grad_norm": 1.6624882221221924, |
|
"learning_rate": 9.981355791391891e-05, |
|
"loss": 1.1219, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.053763440860215055, |
|
"grad_norm": 1.6604645252227783, |
|
"learning_rate": 9.958086757163489e-05, |
|
"loss": 1.1475, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.06451612903225806, |
|
"grad_norm": 1.6998720169067383, |
|
"learning_rate": 9.92557765735184e-05, |
|
"loss": 1.0778, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.07526881720430108, |
|
"grad_norm": 1.714016318321228, |
|
"learning_rate": 9.883895872030657e-05, |
|
"loss": 1.0594, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.08602150537634409, |
|
"grad_norm": 1.6773078441619873, |
|
"learning_rate": 9.833127793065098e-05, |
|
"loss": 1.0144, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0967741935483871, |
|
"grad_norm": 1.657386064529419, |
|
"learning_rate": 9.773378645051438e-05, |
|
"loss": 1.0591, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.10752688172043011, |
|
"grad_norm": 1.5666390657424927, |
|
"learning_rate": 9.70477226722302e-05, |
|
"loss": 1.046, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.11827956989247312, |
|
"grad_norm": 1.3943883180618286, |
|
"learning_rate": 9.627450856774539e-05, |
|
"loss": 0.9919, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.12903225806451613, |
|
"grad_norm": 1.389440655708313, |
|
"learning_rate": 9.541574674136633e-05, |
|
"loss": 1.0354, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.13978494623655913, |
|
"grad_norm": 1.302047848701477, |
|
"learning_rate": 9.447321710811675e-05, |
|
"loss": 0.9882, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.15053763440860216, |
|
"grad_norm": 1.1911146640777588, |
|
"learning_rate": 9.3448873204592e-05, |
|
"loss": 0.8856, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.16129032258064516, |
|
"grad_norm": 1.1654369831085205, |
|
"learning_rate": 9.234483813995613e-05, |
|
"loss": 0.9539, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.17204301075268819, |
|
"grad_norm": 1.4484726190567017, |
|
"learning_rate": 9.116340019547403e-05, |
|
"loss": 1.0391, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.1827956989247312, |
|
"grad_norm": 1.2450820207595825, |
|
"learning_rate": 8.990700808169889e-05, |
|
"loss": 1.0139, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.1935483870967742, |
|
"grad_norm": 1.2063860893249512, |
|
"learning_rate": 8.857826586314586e-05, |
|
"loss": 1.0437, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.20430107526881722, |
|
"grad_norm": 1.1757769584655762, |
|
"learning_rate": 8.717992756097048e-05, |
|
"loss": 0.9011, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.21505376344086022, |
|
"grad_norm": 1.050092339515686, |
|
"learning_rate": 8.571489144483944e-05, |
|
"loss": 0.8287, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.22580645161290322, |
|
"grad_norm": 1.1617540121078491, |
|
"learning_rate": 8.418619402582402e-05, |
|
"loss": 0.8964, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.23655913978494625, |
|
"grad_norm": 1.3131704330444336, |
|
"learning_rate": 8.259700376276725e-05, |
|
"loss": 1.0047, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.24731182795698925, |
|
"grad_norm": 1.2672499418258667, |
|
"learning_rate": 8.095061449516903e-05, |
|
"loss": 0.934, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.25806451612903225, |
|
"grad_norm": 1.506225824356079, |
|
"learning_rate": 7.925043861620091e-05, |
|
"loss": 0.9909, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.26881720430107525, |
|
"grad_norm": 1.1839021444320679, |
|
"learning_rate": 7.75e-05, |
|
"loss": 0.7994, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.26881720430107525, |
|
"eval_loss": 0.8984464406967163, |
|
"eval_runtime": 2.556, |
|
"eval_samples_per_second": 19.562, |
|
"eval_steps_per_second": 3.521, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.27956989247311825, |
|
"grad_norm": 1.2005083560943604, |
|
"learning_rate": 7.570292669790186e-05, |
|
"loss": 0.8812, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.2903225806451613, |
|
"grad_norm": 1.1817283630371094, |
|
"learning_rate": 7.386294341874989e-05, |
|
"loss": 0.918, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.3010752688172043, |
|
"grad_norm": 1.283726453781128, |
|
"learning_rate": 7.198386380886765e-05, |
|
"loss": 0.9037, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.3118279569892473, |
|
"grad_norm": 1.1192891597747803, |
|
"learning_rate": 7.006958254769438e-05, |
|
"loss": 0.8936, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.3225806451612903, |
|
"grad_norm": 1.106978178024292, |
|
"learning_rate": 6.812406727546713e-05, |
|
"loss": 0.9516, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.3333333333333333, |
|
"grad_norm": 1.1660184860229492, |
|
"learning_rate": 6.61513503696805e-05, |
|
"loss": 0.9056, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.34408602150537637, |
|
"grad_norm": 1.0723466873168945, |
|
"learning_rate": 6.415552058736854e-05, |
|
"loss": 0.823, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.3548387096774194, |
|
"grad_norm": 1.0570652484893799, |
|
"learning_rate": 6.214071459053132e-05, |
|
"loss": 0.8688, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.3655913978494624, |
|
"grad_norm": 1.0927484035491943, |
|
"learning_rate": 6.0111108372271376e-05, |
|
"loss": 0.901, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.3763440860215054, |
|
"grad_norm": 1.2289843559265137, |
|
"learning_rate": 5.80709086014102e-05, |
|
"loss": 0.9617, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.3870967741935484, |
|
"grad_norm": 1.010915994644165, |
|
"learning_rate": 5.6024343903524755e-05, |
|
"loss": 0.7569, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.3978494623655914, |
|
"grad_norm": 0.9901615381240845, |
|
"learning_rate": 5.397565609647525e-05, |
|
"loss": 0.8592, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.40860215053763443, |
|
"grad_norm": 1.1046444177627563, |
|
"learning_rate": 5.192909139858981e-05, |
|
"loss": 0.7586, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.41935483870967744, |
|
"grad_norm": 1.07370126247406, |
|
"learning_rate": 4.988889162772863e-05, |
|
"loss": 0.8669, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.43010752688172044, |
|
"grad_norm": 1.0143686532974243, |
|
"learning_rate": 4.785928540946869e-05, |
|
"loss": 0.8277, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.44086021505376344, |
|
"grad_norm": 0.9582819938659668, |
|
"learning_rate": 4.584447941263149e-05, |
|
"loss": 0.8128, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.45161290322580644, |
|
"grad_norm": 1.1986384391784668, |
|
"learning_rate": 4.384864963031951e-05, |
|
"loss": 0.8032, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.46236559139784944, |
|
"grad_norm": 1.00569748878479, |
|
"learning_rate": 4.1875932724532885e-05, |
|
"loss": 0.8394, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.4731182795698925, |
|
"grad_norm": 1.094247817993164, |
|
"learning_rate": 3.9930417452305626e-05, |
|
"loss": 0.7881, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.4838709677419355, |
|
"grad_norm": 1.1001094579696655, |
|
"learning_rate": 3.8016136191132356e-05, |
|
"loss": 0.7501, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.4946236559139785, |
|
"grad_norm": 1.1981114149093628, |
|
"learning_rate": 3.613705658125014e-05, |
|
"loss": 0.8529, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.5053763440860215, |
|
"grad_norm": 1.2741680145263672, |
|
"learning_rate": 3.4297073302098156e-05, |
|
"loss": 0.8109, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.5161290322580645, |
|
"grad_norm": 0.9917993545532227, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 0.7039, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.5268817204301075, |
|
"grad_norm": 0.9533838033676147, |
|
"learning_rate": 3.074956138379911e-05, |
|
"loss": 0.7464, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.5376344086021505, |
|
"grad_norm": 1.1153020858764648, |
|
"learning_rate": 2.9049385504830985e-05, |
|
"loss": 1.0776, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.5376344086021505, |
|
"eval_loss": 0.8514222502708435, |
|
"eval_runtime": 2.5572, |
|
"eval_samples_per_second": 19.552, |
|
"eval_steps_per_second": 3.519, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.5483870967741935, |
|
"grad_norm": 0.9954845309257507, |
|
"learning_rate": 2.740299623723276e-05, |
|
"loss": 0.7883, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.5591397849462365, |
|
"grad_norm": 0.9178506135940552, |
|
"learning_rate": 2.5813805974175988e-05, |
|
"loss": 0.8541, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.5698924731182796, |
|
"grad_norm": 1.0208925008773804, |
|
"learning_rate": 2.4285108555160577e-05, |
|
"loss": 0.7694, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.5806451612903226, |
|
"grad_norm": 1.0292917490005493, |
|
"learning_rate": 2.2820072439029525e-05, |
|
"loss": 0.8974, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.5913978494623656, |
|
"grad_norm": 1.062528133392334, |
|
"learning_rate": 2.1421734136854156e-05, |
|
"loss": 0.8796, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.6021505376344086, |
|
"grad_norm": 0.994775652885437, |
|
"learning_rate": 2.0092991918301108e-05, |
|
"loss": 0.7694, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.6129032258064516, |
|
"grad_norm": 0.9980999231338501, |
|
"learning_rate": 1.883659980452598e-05, |
|
"loss": 0.8256, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.6236559139784946, |
|
"grad_norm": 0.891745924949646, |
|
"learning_rate": 1.765516186004387e-05, |
|
"loss": 0.7038, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.6344086021505376, |
|
"grad_norm": 1.0422624349594116, |
|
"learning_rate": 1.6551126795408016e-05, |
|
"loss": 0.8348, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.6451612903225806, |
|
"grad_norm": 0.9354367852210999, |
|
"learning_rate": 1.552678289188326e-05, |
|
"loss": 0.7824, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.6559139784946236, |
|
"grad_norm": 1.0644829273223877, |
|
"learning_rate": 1.4584253258633682e-05, |
|
"loss": 0.8215, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"grad_norm": 0.8735445141792297, |
|
"learning_rate": 1.3725491432254624e-05, |
|
"loss": 0.7796, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.6774193548387096, |
|
"grad_norm": 0.9756590723991394, |
|
"learning_rate": 1.2952277327769804e-05, |
|
"loss": 0.7496, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.6881720430107527, |
|
"grad_norm": 1.0405932664871216, |
|
"learning_rate": 1.2266213549485638e-05, |
|
"loss": 0.7398, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.6989247311827957, |
|
"grad_norm": 1.0412120819091797, |
|
"learning_rate": 1.1668722069349041e-05, |
|
"loss": 0.7613, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.7096774193548387, |
|
"grad_norm": 1.0129188299179077, |
|
"learning_rate": 1.1161041279693446e-05, |
|
"loss": 0.8097, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.7204301075268817, |
|
"grad_norm": 1.0584628582000732, |
|
"learning_rate": 1.074422342648161e-05, |
|
"loss": 0.877, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.7311827956989247, |
|
"grad_norm": 1.030712366104126, |
|
"learning_rate": 1.0419132428365116e-05, |
|
"loss": 0.8288, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.7419354838709677, |
|
"grad_norm": 1.2178075313568115, |
|
"learning_rate": 1.0186442086081093e-05, |
|
"loss": 0.8672, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.7526881720430108, |
|
"grad_norm": 1.0627042055130005, |
|
"learning_rate": 1.0046634685897261e-05, |
|
"loss": 0.9266, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.7634408602150538, |
|
"grad_norm": 0.9612053632736206, |
|
"learning_rate": 1e-05, |
|
"loss": 0.8116, |
|
"step": 71 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 71, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.8284068806262784e+16, |
|
"train_batch_size": 6, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|