|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.009538647419795872, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0001271819655972783, |
|
"grad_norm": 14.686725616455078, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 5.7949, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0001271819655972783, |
|
"eval_loss": 5.837724208831787, |
|
"eval_runtime": 1012.7963, |
|
"eval_samples_per_second": 6.538, |
|
"eval_steps_per_second": 3.269, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0002543639311945566, |
|
"grad_norm": 14.532427787780762, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 5.9049, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0003815458967918349, |
|
"grad_norm": 14.580315589904785, |
|
"learning_rate": 0.0001, |
|
"loss": 5.5549, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0005087278623891132, |
|
"grad_norm": 12.386919975280762, |
|
"learning_rate": 9.99524110790929e-05, |
|
"loss": 5.2155, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0006359098279863915, |
|
"grad_norm": 8.52061939239502, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 3.5045, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0007630917935836698, |
|
"grad_norm": 7.900720596313477, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 2.7008, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0008902737591809482, |
|
"grad_norm": 8.71509838104248, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 2.4308, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0010174557247782264, |
|
"grad_norm": 5.258134841918945, |
|
"learning_rate": 9.881480035599667e-05, |
|
"loss": 2.1263, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0011446376903755048, |
|
"grad_norm": 5.729340076446533, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 2.1367, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.001271819655972783, |
|
"grad_norm": 7.960080623626709, |
|
"learning_rate": 9.768584753741134e-05, |
|
"loss": 2.0017, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0013990016215700615, |
|
"grad_norm": 4.6670241355896, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.021, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0015261835871673397, |
|
"grad_norm": 4.256051063537598, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 1.9393, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0016533655527646179, |
|
"grad_norm": 6.798515319824219, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 1.8432, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0017805475183618963, |
|
"grad_norm": 5.227590084075928, |
|
"learning_rate": 9.435054165891109e-05, |
|
"loss": 1.7585, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0019077294839591745, |
|
"grad_norm": 4.927859306335449, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.8781, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0020349114495564528, |
|
"grad_norm": 6.839364051818848, |
|
"learning_rate": 9.21695722906443e-05, |
|
"loss": 1.7892, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.002162093415153731, |
|
"grad_norm": 8.424290657043457, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 1.9716, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0022892753807510096, |
|
"grad_norm": 3.870750904083252, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 1.734, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0024164573463482876, |
|
"grad_norm": 3.9411606788635254, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 1.7441, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.002543639311945566, |
|
"grad_norm": 4.28488302230835, |
|
"learning_rate": 8.68638668405062e-05, |
|
"loss": 1.5066, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0026708212775428445, |
|
"grad_norm": 2.7083420753479004, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 1.6947, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.002798003243140123, |
|
"grad_norm": 3.216218948364258, |
|
"learning_rate": 8.377951038078302e-05, |
|
"loss": 1.7181, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.002925185208737401, |
|
"grad_norm": 3.545102119445801, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 1.5627, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0030523671743346793, |
|
"grad_norm": 5.308266639709473, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 1.81, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0031795491399319578, |
|
"grad_norm": 2.468996286392212, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 1.6267, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0031795491399319578, |
|
"eval_loss": 1.6478782892227173, |
|
"eval_runtime": 1016.413, |
|
"eval_samples_per_second": 6.515, |
|
"eval_steps_per_second": 3.258, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0033067311055292358, |
|
"grad_norm": 2.405796766281128, |
|
"learning_rate": 7.68649804173412e-05, |
|
"loss": 1.6712, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.003433913071126514, |
|
"grad_norm": 2.1344990730285645, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.657, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0035610950367237926, |
|
"grad_norm": 3.931316375732422, |
|
"learning_rate": 7.308743066175172e-05, |
|
"loss": 1.7584, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.003688277002321071, |
|
"grad_norm": 2.662752628326416, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 1.605, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.003815458967918349, |
|
"grad_norm": 3.1903748512268066, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 1.6863, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0039426409335156275, |
|
"grad_norm": 2.342129945755005, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 1.5341, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0040698228991129055, |
|
"grad_norm": 2.9486243724823, |
|
"learning_rate": 6.503528997521366e-05, |
|
"loss": 1.6403, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.004197004864710184, |
|
"grad_norm": 2.807129383087158, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 1.6695, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.004324186830307462, |
|
"grad_norm": 2.300341844558716, |
|
"learning_rate": 6.0821980696905146e-05, |
|
"loss": 1.5375, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.00445136879590474, |
|
"grad_norm": 5.008623123168945, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 1.7217, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.004578550761502019, |
|
"grad_norm": 3.538121461868286, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 1.6573, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.004705732727099297, |
|
"grad_norm": 2.3192837238311768, |
|
"learning_rate": 5.435778713738292e-05, |
|
"loss": 1.4853, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.004832914692696575, |
|
"grad_norm": 2.5325701236724854, |
|
"learning_rate": 5.218096936826681e-05, |
|
"loss": 1.5451, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.004960096658293854, |
|
"grad_norm": 2.0213067531585693, |
|
"learning_rate": 5e-05, |
|
"loss": 1.6081, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.005087278623891132, |
|
"grad_norm": 3.6637330055236816, |
|
"learning_rate": 4.781903063173321e-05, |
|
"loss": 1.7933, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.005214460589488411, |
|
"grad_norm": 2.7776637077331543, |
|
"learning_rate": 4.564221286261709e-05, |
|
"loss": 1.4603, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.005341642555085689, |
|
"grad_norm": 3.7768895626068115, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 1.6809, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.005468824520682967, |
|
"grad_norm": 3.5375547409057617, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 1.6731, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.005596006486280246, |
|
"grad_norm": 2.548534393310547, |
|
"learning_rate": 3.917801930309486e-05, |
|
"loss": 1.6962, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.005723188451877524, |
|
"grad_norm": 1.7797907590866089, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 1.6327, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.005850370417474802, |
|
"grad_norm": 1.6917822360992432, |
|
"learning_rate": 3.4964710024786354e-05, |
|
"loss": 1.543, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.005977552383072081, |
|
"grad_norm": 2.2675538063049316, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 1.6209, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.006104734348669359, |
|
"grad_norm": 1.9940866231918335, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 1.5505, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.006231916314266637, |
|
"grad_norm": 1.4850102663040161, |
|
"learning_rate": 2.886908691296504e-05, |
|
"loss": 1.6082, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0063590982798639156, |
|
"grad_norm": 1.984756350517273, |
|
"learning_rate": 2.6912569338248315e-05, |
|
"loss": 1.6368, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0063590982798639156, |
|
"eval_loss": 1.5464518070220947, |
|
"eval_runtime": 1017.6554, |
|
"eval_samples_per_second": 6.507, |
|
"eval_steps_per_second": 3.254, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0064862802454611936, |
|
"grad_norm": 2.5585479736328125, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 1.6511, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.0066134622110584716, |
|
"grad_norm": 3.03650164604187, |
|
"learning_rate": 2.3135019582658802e-05, |
|
"loss": 1.465, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.00674064417665575, |
|
"grad_norm": 2.079188108444214, |
|
"learning_rate": 2.132117818244771e-05, |
|
"loss": 1.5449, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.006867826142253028, |
|
"grad_norm": 2.6188244819641113, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 1.5621, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.006995008107850306, |
|
"grad_norm": 2.1260628700256348, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 1.5848, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.007122190073447585, |
|
"grad_norm": 1.6332706212997437, |
|
"learning_rate": 1.622048961921699e-05, |
|
"loss": 1.5695, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.007249372039044863, |
|
"grad_norm": 2.4522626399993896, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 1.6052, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.007376554004642142, |
|
"grad_norm": 2.603383779525757, |
|
"learning_rate": 1.3136133159493802e-05, |
|
"loss": 1.6324, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.00750373597023942, |
|
"grad_norm": 2.176748752593994, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 1.3584, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.007630917935836698, |
|
"grad_norm": 2.0545296669006348, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 1.5658, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.007758099901433977, |
|
"grad_norm": 2.176485061645508, |
|
"learning_rate": 9.042397785550405e-06, |
|
"loss": 1.5229, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.007885281867031255, |
|
"grad_norm": 2.3088207244873047, |
|
"learning_rate": 7.830427709355725e-06, |
|
"loss": 1.4633, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.008012463832628533, |
|
"grad_norm": 3.608034372329712, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 1.6021, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.008139645798225811, |
|
"grad_norm": 1.7870920896530151, |
|
"learning_rate": 5.649458341088915e-06, |
|
"loss": 1.5331, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.00826682776382309, |
|
"grad_norm": 2.5201566219329834, |
|
"learning_rate": 4.684610648167503e-06, |
|
"loss": 1.5798, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.008394009729420369, |
|
"grad_norm": 3.3685851097106934, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 1.6904, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.008521191695017647, |
|
"grad_norm": 4.109929084777832, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 1.6286, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.008648373660614925, |
|
"grad_norm": 3.135014533996582, |
|
"learning_rate": 2.314152462588659e-06, |
|
"loss": 1.5616, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.008775555626212203, |
|
"grad_norm": 2.6625030040740967, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 1.5772, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.00890273759180948, |
|
"grad_norm": 2.5769317150115967, |
|
"learning_rate": 1.1851996440033319e-06, |
|
"loss": 1.4481, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.00902991955740676, |
|
"grad_norm": 2.8375887870788574, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 1.6209, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.009157101523004038, |
|
"grad_norm": 2.410177707672119, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 1.6444, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.009284283488601316, |
|
"grad_norm": 2.377715826034546, |
|
"learning_rate": 1.9026509541272275e-07, |
|
"loss": 1.5977, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.009411465454198594, |
|
"grad_norm": 2.3886666297912598, |
|
"learning_rate": 4.7588920907110094e-08, |
|
"loss": 1.6901, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.009538647419795872, |
|
"grad_norm": 2.8022537231445312, |
|
"learning_rate": 0.0, |
|
"loss": 1.3744, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.009538647419795872, |
|
"eval_loss": 1.5396223068237305, |
|
"eval_runtime": 1016.8842, |
|
"eval_samples_per_second": 6.512, |
|
"eval_steps_per_second": 3.256, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.01589487157248e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|