|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.023937761819269897, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0003191701575902653, |
|
"grad_norm": 74.66423797607422, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 11.8509, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0003191701575902653, |
|
"eval_loss": 12.318764686584473, |
|
"eval_runtime": 286.3176, |
|
"eval_samples_per_second": 36.861, |
|
"eval_steps_per_second": 4.61, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0006383403151805306, |
|
"grad_norm": 71.02066802978516, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 11.9938, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0009575104727707959, |
|
"grad_norm": 59.003684997558594, |
|
"learning_rate": 0.0001, |
|
"loss": 9.6107, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0012766806303610612, |
|
"grad_norm": 73.98766326904297, |
|
"learning_rate": 9.99524110790929e-05, |
|
"loss": 3.6539, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0015958507879513266, |
|
"grad_norm": 7.199901580810547, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 0.8517, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0019150209455415918, |
|
"grad_norm": 3.2114429473876953, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 0.1353, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.002234191103131857, |
|
"grad_norm": 108.24579620361328, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 3.1604, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0025533612607221224, |
|
"grad_norm": 14.213573455810547, |
|
"learning_rate": 9.881480035599667e-05, |
|
"loss": 0.293, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.002872531418312388, |
|
"grad_norm": 3.7268381118774414, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 0.2232, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.003191701575902653, |
|
"grad_norm": 1.227923035621643, |
|
"learning_rate": 9.768584753741134e-05, |
|
"loss": 0.0856, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0035108717334929184, |
|
"grad_norm": 14.622164726257324, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.3813, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0038300418910831835, |
|
"grad_norm": 5.4367194175720215, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.1709, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.004149212048673449, |
|
"grad_norm": 1.4004822969436646, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 0.1118, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.004468382206263714, |
|
"grad_norm": 0.2667771279811859, |
|
"learning_rate": 9.435054165891109e-05, |
|
"loss": 0.0067, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0047875523638539795, |
|
"grad_norm": 2.9109885692596436, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.2121, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.005106722521444245, |
|
"grad_norm": 1.8067930936813354, |
|
"learning_rate": 9.21695722906443e-05, |
|
"loss": 0.1049, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.00542589267903451, |
|
"grad_norm": 0.8027642965316772, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 0.0282, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.005745062836624776, |
|
"grad_norm": 1.0130234956741333, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 0.0292, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.006064232994215041, |
|
"grad_norm": 1.2449007034301758, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.0761, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.006383403151805306, |
|
"grad_norm": 1.0734630823135376, |
|
"learning_rate": 8.68638668405062e-05, |
|
"loss": 0.0299, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0067025733093955715, |
|
"grad_norm": 0.5908615589141846, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.0137, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.007021743466985837, |
|
"grad_norm": 0.6940059661865234, |
|
"learning_rate": 8.377951038078302e-05, |
|
"loss": 0.0322, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.007340913624576102, |
|
"grad_norm": 1.4336614608764648, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 0.1108, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.007660083782166367, |
|
"grad_norm": 2.7778196334838867, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 0.0822, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.007979253939756632, |
|
"grad_norm": 1.1669725179672241, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 0.0476, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.007979253939756632, |
|
"eval_loss": 0.03445420786738396, |
|
"eval_runtime": 286.9529, |
|
"eval_samples_per_second": 36.78, |
|
"eval_steps_per_second": 4.6, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.008298424097346898, |
|
"grad_norm": 1.6094774007797241, |
|
"learning_rate": 7.68649804173412e-05, |
|
"loss": 0.0902, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.008617594254937163, |
|
"grad_norm": 0.9928753972053528, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.0321, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.008936764412527429, |
|
"grad_norm": 0.8845332264900208, |
|
"learning_rate": 7.308743066175172e-05, |
|
"loss": 0.0358, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.009255934570117695, |
|
"grad_norm": 0.6543906331062317, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 0.0209, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.009575104727707959, |
|
"grad_norm": 0.648944079875946, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.0231, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.009894274885298225, |
|
"grad_norm": 1.3208541870117188, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 0.06, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.01021344504288849, |
|
"grad_norm": 0.25852757692337036, |
|
"learning_rate": 6.503528997521366e-05, |
|
"loss": 0.0066, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.010532615200478756, |
|
"grad_norm": 0.7767801284790039, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.0324, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.01085178535806902, |
|
"grad_norm": 0.7248371243476868, |
|
"learning_rate": 6.0821980696905146e-05, |
|
"loss": 0.0134, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.011170955515659286, |
|
"grad_norm": 0.9914122223854065, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 0.0302, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.011490125673249552, |
|
"grad_norm": 0.7037466764450073, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 0.0148, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.011809295830839816, |
|
"grad_norm": 0.8041982650756836, |
|
"learning_rate": 5.435778713738292e-05, |
|
"loss": 0.0064, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.012128465988430082, |
|
"grad_norm": 0.8569905757904053, |
|
"learning_rate": 5.218096936826681e-05, |
|
"loss": 0.0107, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.012447636146020347, |
|
"grad_norm": 0.8578211665153503, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0098, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.012766806303610613, |
|
"grad_norm": 1.557311773300171, |
|
"learning_rate": 4.781903063173321e-05, |
|
"loss": 0.0157, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.013085976461200877, |
|
"grad_norm": 1.648313283920288, |
|
"learning_rate": 4.564221286261709e-05, |
|
"loss": 0.036, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.013405146618791143, |
|
"grad_norm": 1.1188888549804688, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.0183, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.013724316776381409, |
|
"grad_norm": 0.10944560170173645, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 0.0013, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.014043486933971673, |
|
"grad_norm": 0.8294479846954346, |
|
"learning_rate": 3.917801930309486e-05, |
|
"loss": 0.0061, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.01436265709156194, |
|
"grad_norm": 0.2965063452720642, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.0023, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.014681827249152204, |
|
"grad_norm": 0.07751064747571945, |
|
"learning_rate": 3.4964710024786354e-05, |
|
"loss": 0.0007, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.01500099740674247, |
|
"grad_norm": 2.2420613765716553, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 0.0523, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.015320167564332734, |
|
"grad_norm": 0.05525254085659981, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.0009, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.015639337721923, |
|
"grad_norm": 0.6241516470909119, |
|
"learning_rate": 2.886908691296504e-05, |
|
"loss": 0.0039, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.015958507879513265, |
|
"grad_norm": 0.018162360414862633, |
|
"learning_rate": 2.6912569338248315e-05, |
|
"loss": 0.0004, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.015958507879513265, |
|
"eval_loss": 0.03291398659348488, |
|
"eval_runtime": 286.6445, |
|
"eval_samples_per_second": 36.819, |
|
"eval_steps_per_second": 4.605, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.016277678037103532, |
|
"grad_norm": 0.6963501572608948, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.0272, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.016596848194693797, |
|
"grad_norm": 1.651873230934143, |
|
"learning_rate": 2.3135019582658802e-05, |
|
"loss": 0.0693, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.01691601835228406, |
|
"grad_norm": 3.7848258018493652, |
|
"learning_rate": 2.132117818244771e-05, |
|
"loss": 0.0339, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.017235188509874325, |
|
"grad_norm": 0.4375050663948059, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 0.0031, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.017554358667464593, |
|
"grad_norm": 3.5927422046661377, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 0.0997, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.017873528825054857, |
|
"grad_norm": 2.43323016166687, |
|
"learning_rate": 1.622048961921699e-05, |
|
"loss": 0.0267, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.01819269898264512, |
|
"grad_norm": 0.128454327583313, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.0019, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.01851186914023539, |
|
"grad_norm": 0.32172662019729614, |
|
"learning_rate": 1.3136133159493802e-05, |
|
"loss": 0.003, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.018831039297825654, |
|
"grad_norm": 2.459465265274048, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 0.02, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.019150209455415918, |
|
"grad_norm": 1.4478123188018799, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.0096, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.019469379613006182, |
|
"grad_norm": 1.4985058307647705, |
|
"learning_rate": 9.042397785550405e-06, |
|
"loss": 0.0774, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.01978854977059645, |
|
"grad_norm": 2.816249370574951, |
|
"learning_rate": 7.830427709355725e-06, |
|
"loss": 0.0525, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.020107719928186715, |
|
"grad_norm": 2.870896339416504, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.1261, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.02042689008577698, |
|
"grad_norm": 0.29331719875335693, |
|
"learning_rate": 5.649458341088915e-06, |
|
"loss": 0.0018, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.020746060243367247, |
|
"grad_norm": 2.912463426589966, |
|
"learning_rate": 4.684610648167503e-06, |
|
"loss": 0.0731, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.02106523040095751, |
|
"grad_norm": 1.086990237236023, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.0129, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.021384400558547775, |
|
"grad_norm": 3.1444857120513916, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 0.0484, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.02170357071613804, |
|
"grad_norm": 0.1576186716556549, |
|
"learning_rate": 2.314152462588659e-06, |
|
"loss": 0.0021, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.022022740873728307, |
|
"grad_norm": 1.069075107574463, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 0.0609, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.022341911031318572, |
|
"grad_norm": 1.1245722770690918, |
|
"learning_rate": 1.1851996440033319e-06, |
|
"loss": 0.118, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.022661081188908836, |
|
"grad_norm": 1.8234457969665527, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 0.0816, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.022980251346499104, |
|
"grad_norm": 0.014433772303164005, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 0.0004, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.023299421504089368, |
|
"grad_norm": 2.4126806259155273, |
|
"learning_rate": 1.9026509541272275e-07, |
|
"loss": 0.0283, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.023618591661679633, |
|
"grad_norm": 0.9039496779441833, |
|
"learning_rate": 4.7588920907110094e-08, |
|
"loss": 0.006, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.023937761819269897, |
|
"grad_norm": 1.8179197311401367, |
|
"learning_rate": 0.0, |
|
"loss": 0.0769, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.023937761819269897, |
|
"eval_loss": 0.02614620514214039, |
|
"eval_runtime": 286.7682, |
|
"eval_samples_per_second": 36.803, |
|
"eval_steps_per_second": 4.603, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.476062712987648e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|