|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.004013270547945206, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 5.351027397260274e-05, |
|
"grad_norm": 3.4412171840667725, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 9.5299, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 5.351027397260274e-05, |
|
"eval_loss": 1.1119320392608643, |
|
"eval_runtime": 2160.453, |
|
"eval_samples_per_second": 7.285, |
|
"eval_steps_per_second": 3.642, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00010702054794520547, |
|
"grad_norm": 3.5267574787139893, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 8.5445, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0001605308219178082, |
|
"grad_norm": 4.200962543487549, |
|
"learning_rate": 0.0001, |
|
"loss": 9.5069, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.00021404109589041095, |
|
"grad_norm": 2.991516351699829, |
|
"learning_rate": 9.99524110790929e-05, |
|
"loss": 7.5923, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0002675513698630137, |
|
"grad_norm": 4.5141520500183105, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 9.3669, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0003210616438356164, |
|
"grad_norm": 3.5825226306915283, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 8.0533, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00037457191780821916, |
|
"grad_norm": 3.585103750228882, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 9.2888, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0004280821917808219, |
|
"grad_norm": 3.0140938758850098, |
|
"learning_rate": 9.881480035599667e-05, |
|
"loss": 7.9157, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.00048159246575342463, |
|
"grad_norm": 2.723397970199585, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 7.173, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0005351027397260274, |
|
"grad_norm": 3.36850643157959, |
|
"learning_rate": 9.768584753741134e-05, |
|
"loss": 8.088, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0005886130136986301, |
|
"grad_norm": 4.041073799133301, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 8.5535, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0006421232876712328, |
|
"grad_norm": 4.422593116760254, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 6.9489, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0006956335616438356, |
|
"grad_norm": 4.800517559051514, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 7.9887, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0007491438356164383, |
|
"grad_norm": 3.9624767303466797, |
|
"learning_rate": 9.435054165891109e-05, |
|
"loss": 7.9977, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.000802654109589041, |
|
"grad_norm": 3.386883497238159, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 7.6339, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0008561643835616438, |
|
"grad_norm": 4.2820539474487305, |
|
"learning_rate": 9.21695722906443e-05, |
|
"loss": 7.1733, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0009096746575342465, |
|
"grad_norm": 3.999424934387207, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 7.1131, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0009631849315068493, |
|
"grad_norm": 3.930689573287964, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 7.4491, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.001016695205479452, |
|
"grad_norm": 3.9529831409454346, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 7.2537, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0010702054794520547, |
|
"grad_norm": 4.9496307373046875, |
|
"learning_rate": 8.68638668405062e-05, |
|
"loss": 7.0658, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0011237157534246575, |
|
"grad_norm": 3.3722610473632812, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 6.6348, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0011772260273972602, |
|
"grad_norm": 3.6756198406219482, |
|
"learning_rate": 8.377951038078302e-05, |
|
"loss": 8.3285, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.001230736301369863, |
|
"grad_norm": 3.552631378173828, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 8.7068, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0012842465753424657, |
|
"grad_norm": 3.2053675651550293, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 7.7342, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0013377568493150684, |
|
"grad_norm": 3.083847999572754, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 7.9201, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0013377568493150684, |
|
"eval_loss": 0.9074388742446899, |
|
"eval_runtime": 2160.8303, |
|
"eval_samples_per_second": 7.283, |
|
"eval_steps_per_second": 3.642, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0013912671232876712, |
|
"grad_norm": 3.115147590637207, |
|
"learning_rate": 7.68649804173412e-05, |
|
"loss": 5.6922, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.001444777397260274, |
|
"grad_norm": 3.4430694580078125, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 5.9599, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0014982876712328766, |
|
"grad_norm": 3.203700542449951, |
|
"learning_rate": 7.308743066175172e-05, |
|
"loss": 6.4039, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0015517979452054794, |
|
"grad_norm": 3.7065799236297607, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 7.1779, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.001605308219178082, |
|
"grad_norm": 3.401582717895508, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 7.5066, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0016588184931506848, |
|
"grad_norm": 3.41133975982666, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 7.854, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0017123287671232876, |
|
"grad_norm": 3.8782904148101807, |
|
"learning_rate": 6.503528997521366e-05, |
|
"loss": 8.3734, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.0017658390410958903, |
|
"grad_norm": 3.4962656497955322, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 6.9779, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.001819349315068493, |
|
"grad_norm": 3.0437915325164795, |
|
"learning_rate": 6.0821980696905146e-05, |
|
"loss": 6.3532, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0018728595890410958, |
|
"grad_norm": 3.930208921432495, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 6.8118, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0019263698630136985, |
|
"grad_norm": 3.7718610763549805, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 7.3981, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0019798801369863015, |
|
"grad_norm": 3.5606725215911865, |
|
"learning_rate": 5.435778713738292e-05, |
|
"loss": 6.9738, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.002033390410958904, |
|
"grad_norm": 3.5234267711639404, |
|
"learning_rate": 5.218096936826681e-05, |
|
"loss": 7.7004, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.002086900684931507, |
|
"grad_norm": 3.824514389038086, |
|
"learning_rate": 5e-05, |
|
"loss": 7.7513, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.0021404109589041095, |
|
"grad_norm": 4.543697357177734, |
|
"learning_rate": 4.781903063173321e-05, |
|
"loss": 8.0452, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0021939212328767124, |
|
"grad_norm": 4.090927600860596, |
|
"learning_rate": 4.564221286261709e-05, |
|
"loss": 6.377, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.002247431506849315, |
|
"grad_norm": 3.5463740825653076, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 7.5359, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.002300941780821918, |
|
"grad_norm": 3.0797812938690186, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 6.253, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.0023544520547945204, |
|
"grad_norm": 3.326676368713379, |
|
"learning_rate": 3.917801930309486e-05, |
|
"loss": 6.6982, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.0024079623287671234, |
|
"grad_norm": 2.822622299194336, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 6.012, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.002461472602739726, |
|
"grad_norm": 2.613372802734375, |
|
"learning_rate": 3.4964710024786354e-05, |
|
"loss": 5.9406, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.002514982876712329, |
|
"grad_norm": 3.6091737747192383, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 7.7066, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0025684931506849314, |
|
"grad_norm": 4.005115032196045, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 8.5424, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0026220034246575343, |
|
"grad_norm": 3.131537675857544, |
|
"learning_rate": 2.886908691296504e-05, |
|
"loss": 6.281, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.002675513698630137, |
|
"grad_norm": 3.379215717315674, |
|
"learning_rate": 2.6912569338248315e-05, |
|
"loss": 6.5812, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.002675513698630137, |
|
"eval_loss": 0.8749306201934814, |
|
"eval_runtime": 2159.0999, |
|
"eval_samples_per_second": 7.289, |
|
"eval_steps_per_second": 3.645, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.00272902397260274, |
|
"grad_norm": 3.2952077388763428, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 7.3355, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.0027825342465753423, |
|
"grad_norm": 3.3396549224853516, |
|
"learning_rate": 2.3135019582658802e-05, |
|
"loss": 6.6649, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.0028360445205479453, |
|
"grad_norm": 4.649153232574463, |
|
"learning_rate": 2.132117818244771e-05, |
|
"loss": 8.7628, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.002889554794520548, |
|
"grad_norm": 3.6606626510620117, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 6.8996, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.0029430650684931507, |
|
"grad_norm": 3.4278030395507812, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 7.3124, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.0029965753424657533, |
|
"grad_norm": 3.267777681350708, |
|
"learning_rate": 1.622048961921699e-05, |
|
"loss": 6.8136, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.003050085616438356, |
|
"grad_norm": 3.700079917907715, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 6.78, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.0031035958904109587, |
|
"grad_norm": 3.482595682144165, |
|
"learning_rate": 1.3136133159493802e-05, |
|
"loss": 7.8277, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.0031571061643835617, |
|
"grad_norm": 3.9059576988220215, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 6.7461, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.003210616438356164, |
|
"grad_norm": 3.3149898052215576, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 7.1818, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.003264126712328767, |
|
"grad_norm": 2.8718600273132324, |
|
"learning_rate": 9.042397785550405e-06, |
|
"loss": 6.8146, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.0033176369863013697, |
|
"grad_norm": 3.4928603172302246, |
|
"learning_rate": 7.830427709355725e-06, |
|
"loss": 5.8661, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.0033711472602739726, |
|
"grad_norm": 3.289541482925415, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 6.6667, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.003424657534246575, |
|
"grad_norm": 3.2029364109039307, |
|
"learning_rate": 5.649458341088915e-06, |
|
"loss": 7.2747, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.003478167808219178, |
|
"grad_norm": 3.1482632160186768, |
|
"learning_rate": 4.684610648167503e-06, |
|
"loss": 7.4577, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.0035316780821917806, |
|
"grad_norm": 3.4329683780670166, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 7.6697, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.0035851883561643836, |
|
"grad_norm": 3.0778117179870605, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 7.5335, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.003638698630136986, |
|
"grad_norm": 3.8979506492614746, |
|
"learning_rate": 2.314152462588659e-06, |
|
"loss": 8.1609, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.003692208904109589, |
|
"grad_norm": 3.739900588989258, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 7.5071, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.0037457191780821916, |
|
"grad_norm": 3.5693302154541016, |
|
"learning_rate": 1.1851996440033319e-06, |
|
"loss": 6.7223, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.0037992294520547945, |
|
"grad_norm": 4.022164821624756, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 7.1291, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.003852739726027397, |
|
"grad_norm": 3.5596697330474854, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 6.016, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.00390625, |
|
"grad_norm": 4.030635356903076, |
|
"learning_rate": 1.9026509541272275e-07, |
|
"loss": 7.129, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.003959760273972603, |
|
"grad_norm": 2.96244740486145, |
|
"learning_rate": 4.7588920907110094e-08, |
|
"loss": 6.2029, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.004013270547945206, |
|
"grad_norm": 3.192884683609009, |
|
"learning_rate": 0.0, |
|
"loss": 6.3423, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.004013270547945206, |
|
"eval_loss": 0.8689998984336853, |
|
"eval_runtime": 2162.9086, |
|
"eval_samples_per_second": 7.276, |
|
"eval_steps_per_second": 3.638, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.442508980701102e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|