|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.889763779527559, |
|
"eval_steps": 5, |
|
"global_step": 120, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.015748031496062992, |
|
"grad_norm": 5.510926246643066, |
|
"learning_rate": 1.9894179894179895e-05, |
|
"loss": 0.9249, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.031496062992125984, |
|
"grad_norm": 8.61505126953125, |
|
"learning_rate": 1.978835978835979e-05, |
|
"loss": 0.8445, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.047244094488188976, |
|
"grad_norm": 7.036591529846191, |
|
"learning_rate": 1.9682539682539684e-05, |
|
"loss": 0.9654, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.06299212598425197, |
|
"grad_norm": 5.803933143615723, |
|
"learning_rate": 1.9576719576719577e-05, |
|
"loss": 0.9276, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.07874015748031496, |
|
"grad_norm": 5.716428756713867, |
|
"learning_rate": 1.947089947089947e-05, |
|
"loss": 0.9241, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.07874015748031496, |
|
"eval_accuracy": 0.5678496868475992, |
|
"eval_loss": 0.6996241807937622, |
|
"eval_runtime": 108.7291, |
|
"eval_samples_per_second": 4.405, |
|
"eval_steps_per_second": 0.552, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.09448818897637795, |
|
"grad_norm": 7.55866813659668, |
|
"learning_rate": 1.9365079365079367e-05, |
|
"loss": 0.9947, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.11023622047244094, |
|
"grad_norm": 6.801171779632568, |
|
"learning_rate": 1.925925925925926e-05, |
|
"loss": 0.972, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.12598425196850394, |
|
"grad_norm": 4.845946311950684, |
|
"learning_rate": 1.9153439153439156e-05, |
|
"loss": 0.6478, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.14173228346456693, |
|
"grad_norm": 10.487945556640625, |
|
"learning_rate": 1.904761904761905e-05, |
|
"loss": 0.8597, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.15748031496062992, |
|
"grad_norm": 5.452786445617676, |
|
"learning_rate": 1.8941798941798943e-05, |
|
"loss": 0.7708, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.15748031496062992, |
|
"eval_accuracy": 0.6659707724425887, |
|
"eval_loss": 0.6283570528030396, |
|
"eval_runtime": 108.7155, |
|
"eval_samples_per_second": 4.406, |
|
"eval_steps_per_second": 0.552, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1732283464566929, |
|
"grad_norm": 4.522532939910889, |
|
"learning_rate": 1.8835978835978836e-05, |
|
"loss": 0.6848, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.1889763779527559, |
|
"grad_norm": 6.4987688064575195, |
|
"learning_rate": 1.8730158730158732e-05, |
|
"loss": 0.6644, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.2047244094488189, |
|
"grad_norm": 4.2297682762146, |
|
"learning_rate": 1.8624338624338625e-05, |
|
"loss": 0.7227, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.2204724409448819, |
|
"grad_norm": 6.5658063888549805, |
|
"learning_rate": 1.851851851851852e-05, |
|
"loss": 0.6991, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.23622047244094488, |
|
"grad_norm": 6.549685001373291, |
|
"learning_rate": 1.8412698412698415e-05, |
|
"loss": 0.7875, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.23622047244094488, |
|
"eval_accuracy": 0.7244258872651357, |
|
"eval_loss": 0.5749094486236572, |
|
"eval_runtime": 108.7121, |
|
"eval_samples_per_second": 4.406, |
|
"eval_steps_per_second": 0.552, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.25196850393700787, |
|
"grad_norm": 3.6349198818206787, |
|
"learning_rate": 1.8306878306878308e-05, |
|
"loss": 0.5732, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.2677165354330709, |
|
"grad_norm": 4.741979598999023, |
|
"learning_rate": 1.82010582010582e-05, |
|
"loss": 0.5774, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.28346456692913385, |
|
"grad_norm": 4.751223087310791, |
|
"learning_rate": 1.8095238095238097e-05, |
|
"loss": 0.5738, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.2992125984251969, |
|
"grad_norm": 5.214819431304932, |
|
"learning_rate": 1.798941798941799e-05, |
|
"loss": 0.7182, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.31496062992125984, |
|
"grad_norm": 5.566962718963623, |
|
"learning_rate": 1.7883597883597884e-05, |
|
"loss": 0.6575, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.31496062992125984, |
|
"eval_accuracy": 0.7390396659707724, |
|
"eval_loss": 0.5360159873962402, |
|
"eval_runtime": 108.7252, |
|
"eval_samples_per_second": 4.406, |
|
"eval_steps_per_second": 0.552, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.33070866141732286, |
|
"grad_norm": 4.060683727264404, |
|
"learning_rate": 1.7777777777777777e-05, |
|
"loss": 0.5976, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.3464566929133858, |
|
"grad_norm": 5.9868621826171875, |
|
"learning_rate": 1.7671957671957673e-05, |
|
"loss": 0.7734, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.36220472440944884, |
|
"grad_norm": 3.4295496940612793, |
|
"learning_rate": 1.7566137566137566e-05, |
|
"loss": 0.5543, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.3779527559055118, |
|
"grad_norm": 4.587719917297363, |
|
"learning_rate": 1.7460317460317463e-05, |
|
"loss": 0.6497, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.3937007874015748, |
|
"grad_norm": 4.520890235900879, |
|
"learning_rate": 1.7354497354497356e-05, |
|
"loss": 0.6802, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3937007874015748, |
|
"eval_accuracy": 0.7432150313152401, |
|
"eval_loss": 0.5086582899093628, |
|
"eval_runtime": 108.7194, |
|
"eval_samples_per_second": 4.406, |
|
"eval_steps_per_second": 0.552, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.4094488188976378, |
|
"grad_norm": 3.653116464614868, |
|
"learning_rate": 1.724867724867725e-05, |
|
"loss": 0.4888, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.4251968503937008, |
|
"grad_norm": 4.042315483093262, |
|
"learning_rate": 1.7142857142857142e-05, |
|
"loss": 0.6004, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.4409448818897638, |
|
"grad_norm": 5.317520618438721, |
|
"learning_rate": 1.7037037037037038e-05, |
|
"loss": 0.6253, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.4566929133858268, |
|
"grad_norm": 3.8642020225524902, |
|
"learning_rate": 1.693121693121693e-05, |
|
"loss": 0.5778, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.47244094488188976, |
|
"grad_norm": 2.3941361904144287, |
|
"learning_rate": 1.6825396825396828e-05, |
|
"loss": 0.3982, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.47244094488188976, |
|
"eval_accuracy": 0.7578288100208769, |
|
"eval_loss": 0.4889708459377289, |
|
"eval_runtime": 108.7183, |
|
"eval_samples_per_second": 4.406, |
|
"eval_steps_per_second": 0.552, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.4881889763779528, |
|
"grad_norm": 4.1248650550842285, |
|
"learning_rate": 1.671957671957672e-05, |
|
"loss": 0.777, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.5039370078740157, |
|
"grad_norm": 3.369483470916748, |
|
"learning_rate": 1.6613756613756614e-05, |
|
"loss": 0.5675, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.5196850393700787, |
|
"grad_norm": 3.8457119464874268, |
|
"learning_rate": 1.6507936507936507e-05, |
|
"loss": 0.6227, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.5354330708661418, |
|
"grad_norm": 4.809354782104492, |
|
"learning_rate": 1.6402116402116404e-05, |
|
"loss": 0.7111, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.5511811023622047, |
|
"grad_norm": 2.84769868850708, |
|
"learning_rate": 1.6296296296296297e-05, |
|
"loss": 0.4555, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.5511811023622047, |
|
"eval_accuracy": 0.7599164926931107, |
|
"eval_loss": 0.4774630665779114, |
|
"eval_runtime": 108.7145, |
|
"eval_samples_per_second": 4.406, |
|
"eval_steps_per_second": 0.552, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.5669291338582677, |
|
"grad_norm": 4.496406555175781, |
|
"learning_rate": 1.6190476190476193e-05, |
|
"loss": 0.6703, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.5826771653543307, |
|
"grad_norm": 5.721245288848877, |
|
"learning_rate": 1.6084656084656086e-05, |
|
"loss": 0.7066, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.5984251968503937, |
|
"grad_norm": 4.494580268859863, |
|
"learning_rate": 1.597883597883598e-05, |
|
"loss": 0.4907, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.6141732283464567, |
|
"grad_norm": 2.8905560970306396, |
|
"learning_rate": 1.5873015873015872e-05, |
|
"loss": 0.5501, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.6299212598425197, |
|
"grad_norm": 9.776362419128418, |
|
"learning_rate": 1.576719576719577e-05, |
|
"loss": 0.8838, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.6299212598425197, |
|
"eval_accuracy": 0.7661795407098121, |
|
"eval_loss": 0.46829721331596375, |
|
"eval_runtime": 108.7189, |
|
"eval_samples_per_second": 4.406, |
|
"eval_steps_per_second": 0.552, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.6456692913385826, |
|
"grad_norm": 3.8481881618499756, |
|
"learning_rate": 1.5661375661375662e-05, |
|
"loss": 0.5309, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.6614173228346457, |
|
"grad_norm": 6.0327839851379395, |
|
"learning_rate": 1.555555555555556e-05, |
|
"loss": 0.6414, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.6771653543307087, |
|
"grad_norm": 4.993657112121582, |
|
"learning_rate": 1.544973544973545e-05, |
|
"loss": 0.5727, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.6929133858267716, |
|
"grad_norm": 4.3265252113342285, |
|
"learning_rate": 1.5343915343915344e-05, |
|
"loss": 0.4913, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.7086614173228346, |
|
"grad_norm": 3.6012353897094727, |
|
"learning_rate": 1.523809523809524e-05, |
|
"loss": 0.4692, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.7086614173228346, |
|
"eval_accuracy": 0.7661795407098121, |
|
"eval_loss": 0.4610559344291687, |
|
"eval_runtime": 108.7229, |
|
"eval_samples_per_second": 4.406, |
|
"eval_steps_per_second": 0.552, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.7244094488188977, |
|
"grad_norm": 4.319406509399414, |
|
"learning_rate": 1.5132275132275134e-05, |
|
"loss": 0.5203, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.7401574803149606, |
|
"grad_norm": 3.885263442993164, |
|
"learning_rate": 1.5026455026455027e-05, |
|
"loss": 0.5084, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.7559055118110236, |
|
"grad_norm": 3.547327995300293, |
|
"learning_rate": 1.4920634920634922e-05, |
|
"loss": 0.442, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.7716535433070866, |
|
"grad_norm": 3.8868982791900635, |
|
"learning_rate": 1.4814814814814815e-05, |
|
"loss": 0.5848, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.7874015748031497, |
|
"grad_norm": 2.222346544265747, |
|
"learning_rate": 1.470899470899471e-05, |
|
"loss": 0.5455, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7874015748031497, |
|
"eval_accuracy": 0.7620041753653445, |
|
"eval_loss": 0.4531377851963043, |
|
"eval_runtime": 108.7528, |
|
"eval_samples_per_second": 4.404, |
|
"eval_steps_per_second": 0.552, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.8031496062992126, |
|
"grad_norm": 3.129575252532959, |
|
"learning_rate": 1.4603174603174603e-05, |
|
"loss": 0.4861, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.8188976377952756, |
|
"grad_norm": 4.924710750579834, |
|
"learning_rate": 1.44973544973545e-05, |
|
"loss": 0.5782, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.8346456692913385, |
|
"grad_norm": 5.2157182693481445, |
|
"learning_rate": 1.4391534391534392e-05, |
|
"loss": 0.7203, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.8503937007874016, |
|
"grad_norm": 4.697371959686279, |
|
"learning_rate": 1.4285714285714287e-05, |
|
"loss": 0.4261, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.8661417322834646, |
|
"grad_norm": 2.8899056911468506, |
|
"learning_rate": 1.417989417989418e-05, |
|
"loss": 0.5696, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.8661417322834646, |
|
"eval_accuracy": 0.7661795407098121, |
|
"eval_loss": 0.4459321200847626, |
|
"eval_runtime": 108.7951, |
|
"eval_samples_per_second": 4.403, |
|
"eval_steps_per_second": 0.551, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.8818897637795275, |
|
"grad_norm": 4.532041072845459, |
|
"learning_rate": 1.4074074074074075e-05, |
|
"loss": 0.5723, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.8976377952755905, |
|
"grad_norm": 2.3436343669891357, |
|
"learning_rate": 1.3968253968253968e-05, |
|
"loss": 0.3629, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.9133858267716536, |
|
"grad_norm": 3.333158493041992, |
|
"learning_rate": 1.3862433862433865e-05, |
|
"loss": 0.5433, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.9291338582677166, |
|
"grad_norm": 4.177884101867676, |
|
"learning_rate": 1.3756613756613758e-05, |
|
"loss": 0.3747, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.9448818897637795, |
|
"grad_norm": 5.238712310791016, |
|
"learning_rate": 1.3650793650793652e-05, |
|
"loss": 0.7453, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.9448818897637795, |
|
"eval_accuracy": 0.7766179540709812, |
|
"eval_loss": 0.4413756728172302, |
|
"eval_runtime": 108.7463, |
|
"eval_samples_per_second": 4.405, |
|
"eval_steps_per_second": 0.552, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.9606299212598425, |
|
"grad_norm": 4.022979736328125, |
|
"learning_rate": 1.3544973544973545e-05, |
|
"loss": 0.6177, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.9763779527559056, |
|
"grad_norm": 2.0528969764709473, |
|
"learning_rate": 1.343915343915344e-05, |
|
"loss": 0.3505, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.9921259842519685, |
|
"grad_norm": 3.9705586433410645, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.5858, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.0078740157480315, |
|
"grad_norm": 8.341585159301758, |
|
"learning_rate": 1.322751322751323e-05, |
|
"loss": 0.6721, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.0236220472440944, |
|
"grad_norm": 4.031370162963867, |
|
"learning_rate": 1.3121693121693123e-05, |
|
"loss": 0.5369, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.0236220472440944, |
|
"eval_accuracy": 0.7828810020876826, |
|
"eval_loss": 0.43705105781555176, |
|
"eval_runtime": 108.7278, |
|
"eval_samples_per_second": 4.405, |
|
"eval_steps_per_second": 0.552, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.0393700787401574, |
|
"grad_norm": 2.898926019668579, |
|
"learning_rate": 1.3015873015873018e-05, |
|
"loss": 0.3628, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.0551181102362204, |
|
"grad_norm": 2.9200918674468994, |
|
"learning_rate": 1.291005291005291e-05, |
|
"loss": 0.3311, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.0708661417322836, |
|
"grad_norm": 4.506103992462158, |
|
"learning_rate": 1.2804232804232805e-05, |
|
"loss": 0.5813, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.0866141732283465, |
|
"grad_norm": 4.187809944152832, |
|
"learning_rate": 1.2698412698412699e-05, |
|
"loss": 0.4802, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.1023622047244095, |
|
"grad_norm": 3.5520920753479004, |
|
"learning_rate": 1.2592592592592593e-05, |
|
"loss": 0.3994, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.1023622047244095, |
|
"eval_accuracy": 0.7849686847599165, |
|
"eval_loss": 0.43335652351379395, |
|
"eval_runtime": 108.738, |
|
"eval_samples_per_second": 4.405, |
|
"eval_steps_per_second": 0.552, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.1181102362204725, |
|
"grad_norm": 3.6081998348236084, |
|
"learning_rate": 1.2486772486772486e-05, |
|
"loss": 0.5266, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 1.1338582677165354, |
|
"grad_norm": 3.6554276943206787, |
|
"learning_rate": 1.2380952380952383e-05, |
|
"loss": 0.5231, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.1496062992125984, |
|
"grad_norm": 3.551367998123169, |
|
"learning_rate": 1.2275132275132276e-05, |
|
"loss": 0.4538, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.1653543307086613, |
|
"grad_norm": 4.252958297729492, |
|
"learning_rate": 1.216931216931217e-05, |
|
"loss": 0.4688, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.1811023622047245, |
|
"grad_norm": 4.337672710418701, |
|
"learning_rate": 1.2063492063492064e-05, |
|
"loss": 0.4235, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.1811023622047245, |
|
"eval_accuracy": 0.791231732776618, |
|
"eval_loss": 0.429808109998703, |
|
"eval_runtime": 108.723, |
|
"eval_samples_per_second": 4.406, |
|
"eval_steps_per_second": 0.552, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.1968503937007875, |
|
"grad_norm": 2.607356548309326, |
|
"learning_rate": 1.1957671957671959e-05, |
|
"loss": 0.3639, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.2125984251968505, |
|
"grad_norm": 3.198551654815674, |
|
"learning_rate": 1.1851851851851852e-05, |
|
"loss": 0.4066, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 1.2283464566929134, |
|
"grad_norm": 4.820532321929932, |
|
"learning_rate": 1.1746031746031748e-05, |
|
"loss": 0.5906, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.2440944881889764, |
|
"grad_norm": 3.5706419944763184, |
|
"learning_rate": 1.1640211640211641e-05, |
|
"loss": 0.5065, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 1.2598425196850394, |
|
"grad_norm": 4.763455867767334, |
|
"learning_rate": 1.1534391534391536e-05, |
|
"loss": 0.4811, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.2598425196850394, |
|
"eval_accuracy": 0.791231732776618, |
|
"eval_loss": 0.4265703856945038, |
|
"eval_runtime": 108.7158, |
|
"eval_samples_per_second": 4.406, |
|
"eval_steps_per_second": 0.552, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.2755905511811023, |
|
"grad_norm": 5.053676605224609, |
|
"learning_rate": 1.1428571428571429e-05, |
|
"loss": 0.5317, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 1.2913385826771653, |
|
"grad_norm": 4.484920024871826, |
|
"learning_rate": 1.1322751322751324e-05, |
|
"loss": 0.5474, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 1.3070866141732282, |
|
"grad_norm": 4.059377193450928, |
|
"learning_rate": 1.1216931216931217e-05, |
|
"loss": 0.4936, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 1.3228346456692912, |
|
"grad_norm": 4.017063617706299, |
|
"learning_rate": 1.1111111111111113e-05, |
|
"loss": 0.3574, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.3385826771653544, |
|
"grad_norm": 3.275650978088379, |
|
"learning_rate": 1.1005291005291006e-05, |
|
"loss": 0.5072, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.3385826771653544, |
|
"eval_accuracy": 0.791231732776618, |
|
"eval_loss": 0.4252822697162628, |
|
"eval_runtime": 108.7176, |
|
"eval_samples_per_second": 4.406, |
|
"eval_steps_per_second": 0.552, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.3543307086614174, |
|
"grad_norm": 5.258458614349365, |
|
"learning_rate": 1.0899470899470901e-05, |
|
"loss": 0.6735, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 1.3700787401574803, |
|
"grad_norm": 3.070061445236206, |
|
"learning_rate": 1.0793650793650794e-05, |
|
"loss": 0.4365, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 1.3858267716535433, |
|
"grad_norm": 3.556974172592163, |
|
"learning_rate": 1.0687830687830689e-05, |
|
"loss": 0.5113, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 1.4015748031496063, |
|
"grad_norm": 2.5072743892669678, |
|
"learning_rate": 1.0582010582010582e-05, |
|
"loss": 0.286, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 1.4173228346456692, |
|
"grad_norm": 4.407125949859619, |
|
"learning_rate": 1.0476190476190477e-05, |
|
"loss": 0.4405, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.4173228346456692, |
|
"eval_accuracy": 0.7849686847599165, |
|
"eval_loss": 0.42280957102775574, |
|
"eval_runtime": 108.7146, |
|
"eval_samples_per_second": 4.406, |
|
"eval_steps_per_second": 0.552, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.4330708661417324, |
|
"grad_norm": 6.310215473175049, |
|
"learning_rate": 1.037037037037037e-05, |
|
"loss": 0.6205, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 1.4488188976377954, |
|
"grad_norm": 3.586291551589966, |
|
"learning_rate": 1.0264550264550266e-05, |
|
"loss": 0.4571, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 1.4645669291338583, |
|
"grad_norm": 4.950135707855225, |
|
"learning_rate": 1.015873015873016e-05, |
|
"loss": 0.7159, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 1.4803149606299213, |
|
"grad_norm": 2.9908485412597656, |
|
"learning_rate": 1.0052910052910054e-05, |
|
"loss": 0.4769, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 1.4960629921259843, |
|
"grad_norm": 4.945335865020752, |
|
"learning_rate": 9.947089947089947e-06, |
|
"loss": 0.5349, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.4960629921259843, |
|
"eval_accuracy": 0.7870563674321504, |
|
"eval_loss": 0.41962236166000366, |
|
"eval_runtime": 108.7194, |
|
"eval_samples_per_second": 4.406, |
|
"eval_steps_per_second": 0.552, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.5118110236220472, |
|
"grad_norm": 4.648383140563965, |
|
"learning_rate": 9.841269841269842e-06, |
|
"loss": 0.5035, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.5275590551181102, |
|
"grad_norm": 4.447684288024902, |
|
"learning_rate": 9.735449735449735e-06, |
|
"loss": 0.5128, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 1.5433070866141732, |
|
"grad_norm": 3.652973175048828, |
|
"learning_rate": 9.62962962962963e-06, |
|
"loss": 0.3454, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 1.5590551181102361, |
|
"grad_norm": 3.083529472351074, |
|
"learning_rate": 9.523809523809525e-06, |
|
"loss": 0.4522, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 1.574803149606299, |
|
"grad_norm": 2.6377124786376953, |
|
"learning_rate": 9.417989417989418e-06, |
|
"loss": 0.3342, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.574803149606299, |
|
"eval_accuracy": 0.7828810020876826, |
|
"eval_loss": 0.4169768989086151, |
|
"eval_runtime": 108.7386, |
|
"eval_samples_per_second": 4.405, |
|
"eval_steps_per_second": 0.552, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.590551181102362, |
|
"grad_norm": 5.530861854553223, |
|
"learning_rate": 9.312169312169313e-06, |
|
"loss": 0.6154, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 1.6062992125984252, |
|
"grad_norm": 2.849217176437378, |
|
"learning_rate": 9.206349206349207e-06, |
|
"loss": 0.3823, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 1.6220472440944882, |
|
"grad_norm": 2.8741447925567627, |
|
"learning_rate": 9.1005291005291e-06, |
|
"loss": 0.2884, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 1.6377952755905512, |
|
"grad_norm": 5.099402904510498, |
|
"learning_rate": 8.994708994708995e-06, |
|
"loss": 0.4426, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 1.6535433070866141, |
|
"grad_norm": 3.130911350250244, |
|
"learning_rate": 8.888888888888888e-06, |
|
"loss": 0.5271, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.6535433070866141, |
|
"eval_accuracy": 0.7933194154488518, |
|
"eval_loss": 0.41489914059638977, |
|
"eval_runtime": 108.7149, |
|
"eval_samples_per_second": 4.406, |
|
"eval_steps_per_second": 0.552, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.6692913385826773, |
|
"grad_norm": 3.4253950119018555, |
|
"learning_rate": 8.783068783068783e-06, |
|
"loss": 0.4482, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 1.6850393700787403, |
|
"grad_norm": 3.2515480518341064, |
|
"learning_rate": 8.677248677248678e-06, |
|
"loss": 0.5227, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 1.7007874015748032, |
|
"grad_norm": 3.6166484355926514, |
|
"learning_rate": 8.571428571428571e-06, |
|
"loss": 0.4545, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 1.7165354330708662, |
|
"grad_norm": 2.7220921516418457, |
|
"learning_rate": 8.465608465608466e-06, |
|
"loss": 0.3609, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 1.7322834645669292, |
|
"grad_norm": 2.6449429988861084, |
|
"learning_rate": 8.35978835978836e-06, |
|
"loss": 0.3463, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.7322834645669292, |
|
"eval_accuracy": 0.7974947807933194, |
|
"eval_loss": 0.41358983516693115, |
|
"eval_runtime": 108.7211, |
|
"eval_samples_per_second": 4.406, |
|
"eval_steps_per_second": 0.552, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.7480314960629921, |
|
"grad_norm": 3.9665286540985107, |
|
"learning_rate": 8.253968253968254e-06, |
|
"loss": 0.5416, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 1.763779527559055, |
|
"grad_norm": 3.658632516860962, |
|
"learning_rate": 8.148148148148148e-06, |
|
"loss": 0.423, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 1.779527559055118, |
|
"grad_norm": 3.2784206867218018, |
|
"learning_rate": 8.042328042328043e-06, |
|
"loss": 0.4253, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 1.795275590551181, |
|
"grad_norm": 2.654160737991333, |
|
"learning_rate": 7.936507936507936e-06, |
|
"loss": 0.4836, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 1.811023622047244, |
|
"grad_norm": 4.960519313812256, |
|
"learning_rate": 7.830687830687831e-06, |
|
"loss": 0.4867, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.811023622047244, |
|
"eval_accuracy": 0.7995824634655533, |
|
"eval_loss": 0.4127565622329712, |
|
"eval_runtime": 108.7361, |
|
"eval_samples_per_second": 4.405, |
|
"eval_steps_per_second": 0.552, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.826771653543307, |
|
"grad_norm": 2.9885411262512207, |
|
"learning_rate": 7.724867724867726e-06, |
|
"loss": 0.3218, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 1.84251968503937, |
|
"grad_norm": 3.868762254714966, |
|
"learning_rate": 7.61904761904762e-06, |
|
"loss": 0.5335, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 1.858267716535433, |
|
"grad_norm": 3.111746072769165, |
|
"learning_rate": 7.5132275132275136e-06, |
|
"loss": 0.4498, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 1.874015748031496, |
|
"grad_norm": 3.94144868850708, |
|
"learning_rate": 7.4074074074074075e-06, |
|
"loss": 0.3923, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 1.889763779527559, |
|
"grad_norm": 3.6796834468841553, |
|
"learning_rate": 7.301587301587301e-06, |
|
"loss": 0.3221, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.889763779527559, |
|
"eval_accuracy": 0.7995824634655533, |
|
"eval_loss": 0.4124543368816376, |
|
"eval_runtime": 108.7265, |
|
"eval_samples_per_second": 4.406, |
|
"eval_steps_per_second": 0.552, |
|
"step": 120 |
|
} |
|
], |
|
"logging_steps": 1.0, |
|
"max_steps": 189, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 20, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|