|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.04324947740214806, |
|
"eval_steps": 25, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0005766596986953074, |
|
"grad_norm": 0.5187491774559021, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.8061, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0005766596986953074, |
|
"eval_loss": 0.7736253142356873, |
|
"eval_runtime": 802.3416, |
|
"eval_samples_per_second": 1.821, |
|
"eval_steps_per_second": 0.911, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0011533193973906149, |
|
"grad_norm": 0.48624253273010254, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 0.8548, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0017299790960859224, |
|
"grad_norm": 0.428561806678772, |
|
"learning_rate": 0.0001, |
|
"loss": 0.5315, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0023066387947812297, |
|
"grad_norm": 0.43896380066871643, |
|
"learning_rate": 9.99524110790929e-05, |
|
"loss": 0.7549, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0028832984934765373, |
|
"grad_norm": 0.3388628661632538, |
|
"learning_rate": 9.980973490458728e-05, |
|
"loss": 0.655, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.003459958192171845, |
|
"grad_norm": 0.3486120104789734, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 0.6753, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.004036617890867152, |
|
"grad_norm": 0.37648850679397583, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 0.6459, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0046132775895624594, |
|
"grad_norm": 0.4131505787372589, |
|
"learning_rate": 9.881480035599667e-05, |
|
"loss": 0.5957, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.005189937288257767, |
|
"grad_norm": 0.46566611528396606, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 0.6302, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0057665969869530745, |
|
"grad_norm": 0.3140219449996948, |
|
"learning_rate": 9.768584753741134e-05, |
|
"loss": 0.6051, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.006343256685648382, |
|
"grad_norm": 0.2500462532043457, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 0.6843, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.00691991638434369, |
|
"grad_norm": 0.30054011940956116, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.6803, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.007496576083038996, |
|
"grad_norm": 0.3195454776287079, |
|
"learning_rate": 9.53153893518325e-05, |
|
"loss": 0.557, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.008073235781734304, |
|
"grad_norm": 0.294595867395401, |
|
"learning_rate": 9.435054165891109e-05, |
|
"loss": 0.7265, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.008649895480429612, |
|
"grad_norm": 0.3167280852794647, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.8634, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.009226555179124919, |
|
"grad_norm": 0.2780252993106842, |
|
"learning_rate": 9.21695722906443e-05, |
|
"loss": 0.7195, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.009803214877820226, |
|
"grad_norm": 0.2810973823070526, |
|
"learning_rate": 9.09576022144496e-05, |
|
"loss": 0.6641, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.010379874576515534, |
|
"grad_norm": 0.2534291446208954, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 0.7635, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.01095653427521084, |
|
"grad_norm": 0.25898292660713196, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 0.6175, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.011533193973906149, |
|
"grad_norm": 0.2497512251138687, |
|
"learning_rate": 8.68638668405062e-05, |
|
"loss": 0.6251, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.012109853672601456, |
|
"grad_norm": 0.3540191352367401, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.7004, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.012686513371296764, |
|
"grad_norm": 0.25450509786605835, |
|
"learning_rate": 8.377951038078302e-05, |
|
"loss": 0.6049, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.01326317306999207, |
|
"grad_norm": 0.33254560828208923, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 0.5982, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.01383983276868738, |
|
"grad_norm": 0.33598387241363525, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 0.9153, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.014416492467382686, |
|
"grad_norm": 0.23375222086906433, |
|
"learning_rate": 7.86788218175523e-05, |
|
"loss": 0.677, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.014416492467382686, |
|
"eval_loss": 0.670843780040741, |
|
"eval_runtime": 804.822, |
|
"eval_samples_per_second": 1.815, |
|
"eval_steps_per_second": 0.908, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.014993152166077993, |
|
"grad_norm": 0.3155810236930847, |
|
"learning_rate": 7.68649804173412e-05, |
|
"loss": 0.9014, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.015569811864773301, |
|
"grad_norm": 0.3011918067932129, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.4967, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.016146471563468608, |
|
"grad_norm": 0.3234626054763794, |
|
"learning_rate": 7.308743066175172e-05, |
|
"loss": 0.6157, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.016723131262163914, |
|
"grad_norm": 0.28305330872535706, |
|
"learning_rate": 7.113091308703498e-05, |
|
"loss": 0.6846, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.017299790960859224, |
|
"grad_norm": 0.2650391161441803, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.6766, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.01787645065955453, |
|
"grad_norm": 0.2921651005744934, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 0.4771, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.018453110358249838, |
|
"grad_norm": 0.26164016127586365, |
|
"learning_rate": 6.503528997521366e-05, |
|
"loss": 0.6642, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.019029770056945144, |
|
"grad_norm": 0.28726956248283386, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.5747, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.01960642975564045, |
|
"grad_norm": 0.3075750470161438, |
|
"learning_rate": 6.0821980696905146e-05, |
|
"loss": 0.5039, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.02018308945433576, |
|
"grad_norm": 0.2462749481201172, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 0.5682, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.020759749153031068, |
|
"grad_norm": 0.3078925311565399, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 0.6167, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.021336408851726375, |
|
"grad_norm": 0.30313003063201904, |
|
"learning_rate": 5.435778713738292e-05, |
|
"loss": 0.6987, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.02191306855042168, |
|
"grad_norm": 0.30238619446754456, |
|
"learning_rate": 5.218096936826681e-05, |
|
"loss": 0.6085, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.02248972824911699, |
|
"grad_norm": 0.34323957562446594, |
|
"learning_rate": 5e-05, |
|
"loss": 0.6488, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.023066387947812298, |
|
"grad_norm": 0.3006385862827301, |
|
"learning_rate": 4.781903063173321e-05, |
|
"loss": 0.5428, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.023643047646507605, |
|
"grad_norm": 0.28719136118888855, |
|
"learning_rate": 4.564221286261709e-05, |
|
"loss": 0.6068, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.02421970734520291, |
|
"grad_norm": 0.34415391087532043, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.6461, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.024796367043898218, |
|
"grad_norm": 0.257804811000824, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 0.6974, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.025373026742593528, |
|
"grad_norm": 0.3095720112323761, |
|
"learning_rate": 3.917801930309486e-05, |
|
"loss": 0.7856, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.025949686441288835, |
|
"grad_norm": 0.3070933520793915, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.8322, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.02652634613998414, |
|
"grad_norm": 0.28544628620147705, |
|
"learning_rate": 3.4964710024786354e-05, |
|
"loss": 0.591, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.027103005838679448, |
|
"grad_norm": 0.3209655284881592, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 0.9191, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.02767966553737476, |
|
"grad_norm": 0.30374830961227417, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.5097, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.028256325236070065, |
|
"grad_norm": 0.41248422861099243, |
|
"learning_rate": 2.886908691296504e-05, |
|
"loss": 0.6574, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.028832984934765372, |
|
"grad_norm": 0.3581903576850891, |
|
"learning_rate": 2.6912569338248315e-05, |
|
"loss": 0.8603, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.028832984934765372, |
|
"eval_loss": 0.6622347831726074, |
|
"eval_runtime": 804.7816, |
|
"eval_samples_per_second": 1.815, |
|
"eval_steps_per_second": 0.908, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.02940964463346068, |
|
"grad_norm": 0.2843848168849945, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.7885, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.029986304332155985, |
|
"grad_norm": 0.2316364049911499, |
|
"learning_rate": 2.3135019582658802e-05, |
|
"loss": 0.662, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.030562964030851295, |
|
"grad_norm": 0.2592354118824005, |
|
"learning_rate": 2.132117818244771e-05, |
|
"loss": 0.7197, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.031139623729546602, |
|
"grad_norm": 0.20807218551635742, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 0.4957, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.03171628342824191, |
|
"grad_norm": 0.22142092883586884, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 0.516, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.032292943126937215, |
|
"grad_norm": 0.2078356295824051, |
|
"learning_rate": 1.622048961921699e-05, |
|
"loss": 0.565, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.032869602825632525, |
|
"grad_norm": 0.24698881804943085, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.9348, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.03344626252432783, |
|
"grad_norm": 0.19391882419586182, |
|
"learning_rate": 1.3136133159493802e-05, |
|
"loss": 0.6557, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.03402292222302314, |
|
"grad_norm": 0.22842319309711456, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 0.8331, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.03459958192171845, |
|
"grad_norm": 0.2654271125793457, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.6458, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.03517624162041375, |
|
"grad_norm": 0.23442628979682922, |
|
"learning_rate": 9.042397785550405e-06, |
|
"loss": 0.5227, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.03575290131910906, |
|
"grad_norm": 0.24609215557575226, |
|
"learning_rate": 7.830427709355725e-06, |
|
"loss": 0.5817, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.036329561017804365, |
|
"grad_norm": 0.23174376785755157, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.9031, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.036906220716499676, |
|
"grad_norm": 0.2596435248851776, |
|
"learning_rate": 5.649458341088915e-06, |
|
"loss": 0.712, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.037482880415194986, |
|
"grad_norm": 0.21098394691944122, |
|
"learning_rate": 4.684610648167503e-06, |
|
"loss": 0.7175, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.03805954011389029, |
|
"grad_norm": 0.22552695870399475, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.6059, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.0386361998125856, |
|
"grad_norm": 0.2653113305568695, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 0.5966, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.0392128595112809, |
|
"grad_norm": 0.22038479149341583, |
|
"learning_rate": 2.314152462588659e-06, |
|
"loss": 0.7124, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.03978951920997621, |
|
"grad_norm": 0.22684645652770996, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 0.6121, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.04036617890867152, |
|
"grad_norm": 0.2504723370075226, |
|
"learning_rate": 1.1851996440033319e-06, |
|
"loss": 0.6044, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.040942838607366826, |
|
"grad_norm": 0.26648664474487305, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 0.6483, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.041519498306062136, |
|
"grad_norm": 0.2237248718738556, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 0.5942, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.04209615800475744, |
|
"grad_norm": 0.22325119376182556, |
|
"learning_rate": 1.9026509541272275e-07, |
|
"loss": 0.6714, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.04267281770345275, |
|
"grad_norm": 0.2624514400959015, |
|
"learning_rate": 4.7588920907110094e-08, |
|
"loss": 0.7899, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.04324947740214806, |
|
"grad_norm": 0.2703254520893097, |
|
"learning_rate": 0.0, |
|
"loss": 0.5225, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.04324947740214806, |
|
"eval_loss": 0.6601256728172302, |
|
"eval_runtime": 803.8545, |
|
"eval_samples_per_second": 1.817, |
|
"eval_steps_per_second": 0.909, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.148511261875241e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|