|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.28155795401220085, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.003754106053496011, |
|
"grad_norm": 5.032824516296387, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 10.732, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.003754106053496011, |
|
"eval_loss": 10.386579513549805, |
|
"eval_runtime": 23.6744, |
|
"eval_samples_per_second": 9.504, |
|
"eval_steps_per_second": 4.773, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.007508212106992022, |
|
"grad_norm": 4.45830774307251, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 10.1977, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.011262318160488035, |
|
"grad_norm": 4.931914806365967, |
|
"learning_rate": 0.0001, |
|
"loss": 10.4683, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.015016424213984044, |
|
"grad_norm": 5.310385227203369, |
|
"learning_rate": 9.99524110790929e-05, |
|
"loss": 10.0276, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.018770530267480056, |
|
"grad_norm": 5.923955917358398, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 8.8427, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.02252463632097607, |
|
"grad_norm": 6.323431015014648, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 8.1762, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02627874237447208, |
|
"grad_norm": 7.335643291473389, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 6.8706, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.03003284842796809, |
|
"grad_norm": 8.605307579040527, |
|
"learning_rate": 9.881480035599667e-05, |
|
"loss": 5.6587, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0337869544814641, |
|
"grad_norm": 8.2649564743042, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 4.7449, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03754106053496011, |
|
"grad_norm": 8.937447547912598, |
|
"learning_rate": 9.768584753741134e-05, |
|
"loss": 3.5373, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.041295166588456125, |
|
"grad_norm": 7.276700019836426, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.5148, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.04504927264195214, |
|
"grad_norm": 6.3739237785339355, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 1.8054, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.048803378695448145, |
|
"grad_norm": 9.503493309020996, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 1.4151, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.05255748474894416, |
|
"grad_norm": 7.375895977020264, |
|
"learning_rate": 9.435054165891109e-05, |
|
"loss": 1.3681, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.05631159080244017, |
|
"grad_norm": 5.152674674987793, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.6395, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.06006569685593618, |
|
"grad_norm": 3.77114200592041, |
|
"learning_rate": 9.21695722906443e-05, |
|
"loss": 0.529, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.06381980290943219, |
|
"grad_norm": 5.566516876220703, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 1.0785, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0675739089629282, |
|
"grad_norm": 6.849836826324463, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 0.4344, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.07132801501642422, |
|
"grad_norm": 1.5408703088760376, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.2114, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.07508212106992022, |
|
"grad_norm": 4.369248390197754, |
|
"learning_rate": 8.68638668405062e-05, |
|
"loss": 0.4968, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.07883622712341623, |
|
"grad_norm": 7.375030040740967, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.6239, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.08259033317691225, |
|
"grad_norm": 3.9516844749450684, |
|
"learning_rate": 8.377951038078302e-05, |
|
"loss": 0.2511, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.08634443923040826, |
|
"grad_norm": 12.229116439819336, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 0.3429, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.09009854528390428, |
|
"grad_norm": 5.0463337898254395, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 0.156, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.09385265133740028, |
|
"grad_norm": 6.648087978363037, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 0.1689, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.09385265133740028, |
|
"eval_loss": 0.1127062663435936, |
|
"eval_runtime": 23.1821, |
|
"eval_samples_per_second": 9.706, |
|
"eval_steps_per_second": 4.874, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.09760675739089629, |
|
"grad_norm": 1.7941869497299194, |
|
"learning_rate": 7.68649804173412e-05, |
|
"loss": 0.1451, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.10136086344439231, |
|
"grad_norm": 4.530095100402832, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.2568, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.10511496949788832, |
|
"grad_norm": 2.566866636276245, |
|
"learning_rate": 7.308743066175172e-05, |
|
"loss": 0.1024, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.10886907555138432, |
|
"grad_norm": 1.405090570449829, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 0.0991, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.11262318160488034, |
|
"grad_norm": 1.2196756601333618, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.0372, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.11637728765837635, |
|
"grad_norm": 2.8575334548950195, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 0.1209, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.12013139371187236, |
|
"grad_norm": 1.67503821849823, |
|
"learning_rate": 6.503528997521366e-05, |
|
"loss": 0.0985, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.12388549976536838, |
|
"grad_norm": 2.7599682807922363, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.1226, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.12763960581886438, |
|
"grad_norm": 0.700145423412323, |
|
"learning_rate": 6.0821980696905146e-05, |
|
"loss": 0.0623, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.1313937118723604, |
|
"grad_norm": 1.034289836883545, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 0.0414, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.1351478179258564, |
|
"grad_norm": 2.3948283195495605, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 0.0645, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.13890192397935242, |
|
"grad_norm": 4.111328125, |
|
"learning_rate": 5.435778713738292e-05, |
|
"loss": 0.1108, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.14265603003284844, |
|
"grad_norm": 2.2279419898986816, |
|
"learning_rate": 5.218096936826681e-05, |
|
"loss": 0.0874, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.14641013608634443, |
|
"grad_norm": 1.1830275058746338, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0498, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.15016424213984045, |
|
"grad_norm": 0.3365573585033417, |
|
"learning_rate": 4.781903063173321e-05, |
|
"loss": 0.0223, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.15391834819333647, |
|
"grad_norm": 2.0155816078186035, |
|
"learning_rate": 4.564221286261709e-05, |
|
"loss": 0.1617, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.15767245424683246, |
|
"grad_norm": 3.003706455230713, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.102, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.16142656030032848, |
|
"grad_norm": 1.5898525714874268, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 0.0484, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.1651806663538245, |
|
"grad_norm": 0.8386490345001221, |
|
"learning_rate": 3.917801930309486e-05, |
|
"loss": 0.0363, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.1689347724073205, |
|
"grad_norm": 1.006565809249878, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.0238, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.17268887846081651, |
|
"grad_norm": 1.485731601715088, |
|
"learning_rate": 3.4964710024786354e-05, |
|
"loss": 0.0654, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.17644298451431253, |
|
"grad_norm": 1.6186491250991821, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 0.1113, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.18019709056780855, |
|
"grad_norm": 2.721684217453003, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.1347, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.18395119662130455, |
|
"grad_norm": 0.671211302280426, |
|
"learning_rate": 2.886908691296504e-05, |
|
"loss": 0.0177, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.18770530267480057, |
|
"grad_norm": 0.7374471426010132, |
|
"learning_rate": 2.6912569338248315e-05, |
|
"loss": 0.0291, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.18770530267480057, |
|
"eval_loss": 0.03728002309799194, |
|
"eval_runtime": 23.2151, |
|
"eval_samples_per_second": 9.692, |
|
"eval_steps_per_second": 4.868, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1914594087282966, |
|
"grad_norm": 0.2194223254919052, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.006, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.19521351478179258, |
|
"grad_norm": 0.8518089056015015, |
|
"learning_rate": 2.3135019582658802e-05, |
|
"loss": 0.0223, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.1989676208352886, |
|
"grad_norm": 1.0204380750656128, |
|
"learning_rate": 2.132117818244771e-05, |
|
"loss": 0.027, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.20272172688878462, |
|
"grad_norm": 1.9320348501205444, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 0.0502, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.2064758329422806, |
|
"grad_norm": 1.0254801511764526, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 0.0232, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.21022993899577663, |
|
"grad_norm": 0.39294230937957764, |
|
"learning_rate": 1.622048961921699e-05, |
|
"loss": 0.0112, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.21398404504927265, |
|
"grad_norm": 0.06195160001516342, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.0033, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.21773815110276865, |
|
"grad_norm": 0.6369668841362, |
|
"learning_rate": 1.3136133159493802e-05, |
|
"loss": 0.038, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.22149225715626467, |
|
"grad_norm": 0.4076542556285858, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 0.0095, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.22524636320976069, |
|
"grad_norm": 0.6492363214492798, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.0206, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.22900046926325668, |
|
"grad_norm": 0.630257248878479, |
|
"learning_rate": 9.042397785550405e-06, |
|
"loss": 0.0199, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.2327545753167527, |
|
"grad_norm": 0.5601248145103455, |
|
"learning_rate": 7.830427709355725e-06, |
|
"loss": 0.0139, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.23650868137024872, |
|
"grad_norm": 0.7685927152633667, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.0222, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.2402627874237447, |
|
"grad_norm": 0.995885968208313, |
|
"learning_rate": 5.649458341088915e-06, |
|
"loss": 0.0187, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.24401689347724073, |
|
"grad_norm": 0.505293071269989, |
|
"learning_rate": 4.684610648167503e-06, |
|
"loss": 0.0163, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.24777099953073675, |
|
"grad_norm": 0.4831348955631256, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.0372, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.25152510558423274, |
|
"grad_norm": 0.6265993118286133, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 0.0232, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.25527921163772876, |
|
"grad_norm": 0.8600174188613892, |
|
"learning_rate": 2.314152462588659e-06, |
|
"loss": 0.0164, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.2590333176912248, |
|
"grad_norm": 0.46614569425582886, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 0.022, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.2627874237447208, |
|
"grad_norm": 0.19857588410377502, |
|
"learning_rate": 1.1851996440033319e-06, |
|
"loss": 0.0041, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.2665415297982168, |
|
"grad_norm": 0.3438130021095276, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 0.0181, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.2702956358517128, |
|
"grad_norm": 1.681471347808838, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 0.0286, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.2740497419052088, |
|
"grad_norm": 0.05950739607214928, |
|
"learning_rate": 1.9026509541272275e-07, |
|
"loss": 0.0024, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.27780384795870483, |
|
"grad_norm": 0.18584494292736053, |
|
"learning_rate": 4.7588920907110094e-08, |
|
"loss": 0.0057, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.28155795401220085, |
|
"grad_norm": 0.41768908500671387, |
|
"learning_rate": 0.0, |
|
"loss": 0.0132, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.28155795401220085, |
|
"eval_loss": 0.022396452724933624, |
|
"eval_runtime": 23.2271, |
|
"eval_samples_per_second": 9.687, |
|
"eval_steps_per_second": 4.865, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.048558039990272e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|