|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.058519457719691796, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0007802594362625573, |
|
"grad_norm": 0.3376312553882599, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.4989, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0007802594362625573, |
|
"eval_loss": 0.8446513414382935, |
|
"eval_runtime": 321.1678, |
|
"eval_samples_per_second": 3.363, |
|
"eval_steps_per_second": 1.681, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0015605188725251145, |
|
"grad_norm": 0.4070318341255188, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 0.6737, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.002340778308787672, |
|
"grad_norm": 0.4004059433937073, |
|
"learning_rate": 0.0001, |
|
"loss": 0.5249, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.003121037745050229, |
|
"grad_norm": 0.43589961528778076, |
|
"learning_rate": 9.99524110790929e-05, |
|
"loss": 0.6278, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0039012971813127866, |
|
"grad_norm": 0.5050280690193176, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 0.6549, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.004681556617575344, |
|
"grad_norm": 0.40668612718582153, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 0.5572, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.005461816053837901, |
|
"grad_norm": 0.4934292733669281, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 0.6219, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.006242075490100458, |
|
"grad_norm": 0.5753898620605469, |
|
"learning_rate": 9.881480035599667e-05, |
|
"loss": 0.5658, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.007022334926363016, |
|
"grad_norm": 0.5027673840522766, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 0.6963, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.007802594362625573, |
|
"grad_norm": 0.47841089963912964, |
|
"learning_rate": 9.768584753741134e-05, |
|
"loss": 0.6117, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.00858285379888813, |
|
"grad_norm": 0.4685758948326111, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.561, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.009363113235150688, |
|
"grad_norm": 0.5550876259803772, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.7298, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.010143372671413246, |
|
"grad_norm": 0.48063692450523376, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 0.7098, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.010923632107675801, |
|
"grad_norm": 0.4520593583583832, |
|
"learning_rate": 9.435054165891109e-05, |
|
"loss": 0.6776, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.011703891543938359, |
|
"grad_norm": 0.5039393305778503, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.8103, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.012484150980200916, |
|
"grad_norm": 0.4985882043838501, |
|
"learning_rate": 9.21695722906443e-05, |
|
"loss": 0.702, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.013264410416463474, |
|
"grad_norm": 0.4833678603172302, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 0.7118, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.014044669852726031, |
|
"grad_norm": 0.45505037903785706, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 0.6413, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.014824929288988589, |
|
"grad_norm": 0.5279719829559326, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.7873, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.015605188725251146, |
|
"grad_norm": 0.5257707834243774, |
|
"learning_rate": 8.68638668405062e-05, |
|
"loss": 0.7481, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.016385448161513702, |
|
"grad_norm": 0.5363844633102417, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.7771, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.01716570759777626, |
|
"grad_norm": 0.5524177551269531, |
|
"learning_rate": 8.377951038078302e-05, |
|
"loss": 0.8244, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.017945967034038817, |
|
"grad_norm": 0.5710583329200745, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 0.7883, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.018726226470301376, |
|
"grad_norm": 0.5935850739479065, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 0.8224, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.019506485906563932, |
|
"grad_norm": 0.6716726422309875, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 0.9925, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.019506485906563932, |
|
"eval_loss": 0.7827931642532349, |
|
"eval_runtime": 323.3131, |
|
"eval_samples_per_second": 3.34, |
|
"eval_steps_per_second": 1.67, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.02028674534282649, |
|
"grad_norm": 0.7108809351921082, |
|
"learning_rate": 7.68649804173412e-05, |
|
"loss": 0.8741, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.021067004779089047, |
|
"grad_norm": 0.7109420299530029, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.9355, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.021847264215351603, |
|
"grad_norm": 0.6290819048881531, |
|
"learning_rate": 7.308743066175172e-05, |
|
"loss": 0.8823, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.022627523651614162, |
|
"grad_norm": 0.6449434161186218, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 0.8747, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.023407783087876718, |
|
"grad_norm": 0.7190560102462769, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.9264, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.024188042524139277, |
|
"grad_norm": 0.6574559807777405, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 0.8356, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.024968301960401833, |
|
"grad_norm": 0.6734890937805176, |
|
"learning_rate": 6.503528997521366e-05, |
|
"loss": 0.9268, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.025748561396664392, |
|
"grad_norm": 0.7330193519592285, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.8603, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.026528820832926948, |
|
"grad_norm": 0.7113292813301086, |
|
"learning_rate": 6.0821980696905146e-05, |
|
"loss": 0.8205, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.027309080269189507, |
|
"grad_norm": 0.6743346452713013, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 0.8219, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.028089339705452063, |
|
"grad_norm": 0.6742866039276123, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 0.8635, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.02886959914171462, |
|
"grad_norm": 0.7305226922035217, |
|
"learning_rate": 5.435778713738292e-05, |
|
"loss": 0.818, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.029649858577977178, |
|
"grad_norm": 0.6999145150184631, |
|
"learning_rate": 5.218096936826681e-05, |
|
"loss": 0.8282, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.030430118014239733, |
|
"grad_norm": 0.6912006139755249, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7905, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.031210377450502293, |
|
"grad_norm": 0.7328338623046875, |
|
"learning_rate": 4.781903063173321e-05, |
|
"loss": 0.8069, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03199063688676485, |
|
"grad_norm": 0.7178049087524414, |
|
"learning_rate": 4.564221286261709e-05, |
|
"loss": 0.7919, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.032770896323027404, |
|
"grad_norm": 0.7273955941200256, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.8151, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.03355115575928996, |
|
"grad_norm": 0.7298927307128906, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 0.8524, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.03433141519555252, |
|
"grad_norm": 0.740074872970581, |
|
"learning_rate": 3.917801930309486e-05, |
|
"loss": 0.8144, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.03511167463181508, |
|
"grad_norm": 0.7409592866897583, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.8217, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.035891934068077634, |
|
"grad_norm": 0.757175087928772, |
|
"learning_rate": 3.4964710024786354e-05, |
|
"loss": 0.7642, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.03667219350434019, |
|
"grad_norm": 0.8015655875205994, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 0.7827, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.03745245294060275, |
|
"grad_norm": 0.7460522651672363, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.7761, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.038232712376865305, |
|
"grad_norm": 0.8603819608688354, |
|
"learning_rate": 2.886908691296504e-05, |
|
"loss": 0.7828, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.039012971813127864, |
|
"grad_norm": 1.0592442750930786, |
|
"learning_rate": 2.6912569338248315e-05, |
|
"loss": 0.8512, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.039012971813127864, |
|
"eval_loss": 0.7553926706314087, |
|
"eval_runtime": 323.1681, |
|
"eval_samples_per_second": 3.342, |
|
"eval_steps_per_second": 1.671, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03979323124939042, |
|
"grad_norm": 0.2862579822540283, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.507, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.04057349068565298, |
|
"grad_norm": 0.3461783230304718, |
|
"learning_rate": 2.3135019582658802e-05, |
|
"loss": 0.5549, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.041353750121915535, |
|
"grad_norm": 0.4043627977371216, |
|
"learning_rate": 2.132117818244771e-05, |
|
"loss": 0.6321, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.042134009558178094, |
|
"grad_norm": 0.37553367018699646, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 0.571, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.04291426899444065, |
|
"grad_norm": 0.36022651195526123, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 0.5375, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.043694528430703206, |
|
"grad_norm": 0.4114338457584381, |
|
"learning_rate": 1.622048961921699e-05, |
|
"loss": 0.6081, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.044474787866965765, |
|
"grad_norm": 0.3871912956237793, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.6099, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.045255047303228324, |
|
"grad_norm": 0.40150532126426697, |
|
"learning_rate": 1.3136133159493802e-05, |
|
"loss": 0.6333, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.04603530673949088, |
|
"grad_norm": 0.49420806765556335, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 0.773, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.046815566175753436, |
|
"grad_norm": 0.42875587940216064, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.5722, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.047595825612015995, |
|
"grad_norm": 0.4141090512275696, |
|
"learning_rate": 9.042397785550405e-06, |
|
"loss": 0.6371, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.048376085048278554, |
|
"grad_norm": 0.42042094469070435, |
|
"learning_rate": 7.830427709355725e-06, |
|
"loss": 0.6135, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.04915634448454111, |
|
"grad_norm": 0.4620630145072937, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.7216, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.049936603920803666, |
|
"grad_norm": 0.47221145033836365, |
|
"learning_rate": 5.649458341088915e-06, |
|
"loss": 0.6438, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.050716863357066225, |
|
"grad_norm": 0.4742897152900696, |
|
"learning_rate": 4.684610648167503e-06, |
|
"loss": 0.7366, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.051497122793328784, |
|
"grad_norm": 0.4719804525375366, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.6938, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.052277382229591336, |
|
"grad_norm": 0.49254298210144043, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 0.7749, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.053057641665853895, |
|
"grad_norm": 0.521112859249115, |
|
"learning_rate": 2.314152462588659e-06, |
|
"loss": 0.7219, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.053837901102116455, |
|
"grad_norm": 0.5138411521911621, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 0.832, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.054618160538379014, |
|
"grad_norm": 0.5415810942649841, |
|
"learning_rate": 1.1851996440033319e-06, |
|
"loss": 0.8144, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.055398419974641566, |
|
"grad_norm": 0.6076228022575378, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 0.9642, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.056178679410904125, |
|
"grad_norm": 0.5630006194114685, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 0.7559, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.056958938847166685, |
|
"grad_norm": 0.608703076839447, |
|
"learning_rate": 1.9026509541272275e-07, |
|
"loss": 0.8729, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.05773919828342924, |
|
"grad_norm": 0.569412887096405, |
|
"learning_rate": 4.7588920907110094e-08, |
|
"loss": 0.7729, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.058519457719691796, |
|
"grad_norm": 0.6440850496292114, |
|
"learning_rate": 0.0, |
|
"loss": 0.8444, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.058519457719691796, |
|
"eval_loss": 0.7578212022781372, |
|
"eval_runtime": 323.0666, |
|
"eval_samples_per_second": 3.343, |
|
"eval_steps_per_second": 1.671, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.163503502056489e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|