|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 36.8, |
|
"eval_steps": 20, |
|
"global_step": 92, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 2e-05, |
|
"loss": 1.2516, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4e-05, |
|
"loss": 1.2581, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 6e-05, |
|
"loss": 1.2684, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 8e-05, |
|
"loss": 1.2529, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.0001, |
|
"loss": 1.2441, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 0.00012, |
|
"loss": 1.2166, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 0.00014, |
|
"loss": 1.225, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 0.00016, |
|
"loss": 1.1785, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 0.00018, |
|
"loss": 1.1504, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 0.0002, |
|
"loss": 1.1148, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 0.0001999390827019096, |
|
"loss": 1.0771, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 0.00019975640502598244, |
|
"loss": 1.0768, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 5.2, |
|
"learning_rate": 0.00019945218953682734, |
|
"loss": 1.0422, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 5.6, |
|
"learning_rate": 0.00019902680687415705, |
|
"loss": 1.0305, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 0.00019848077530122083, |
|
"loss": 1.0148, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"learning_rate": 0.00019781476007338058, |
|
"loss": 1.0024, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 6.8, |
|
"learning_rate": 0.00019702957262759965, |
|
"loss": 0.9914, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"learning_rate": 0.0001961261695938319, |
|
"loss": 0.9746, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 7.6, |
|
"learning_rate": 0.00019510565162951537, |
|
"loss": 0.9764, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 0.00019396926207859084, |
|
"loss": 0.9548, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 0.9240104556083679, |
|
"eval_runtime": 1.9642, |
|
"eval_samples_per_second": 10.182, |
|
"eval_steps_per_second": 2.036, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 8.4, |
|
"learning_rate": 0.00019271838545667876, |
|
"loss": 0.9726, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 8.8, |
|
"learning_rate": 0.0001913545457642601, |
|
"loss": 0.9381, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 9.2, |
|
"learning_rate": 0.0001898794046299167, |
|
"loss": 0.9339, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"learning_rate": 0.00018829475928589271, |
|
"loss": 0.9256, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 0.9266, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 10.4, |
|
"learning_rate": 0.0001848048096156426, |
|
"loss": 0.9146, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 10.8, |
|
"learning_rate": 0.00018290375725550417, |
|
"loss": 0.9155, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 11.2, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 0.8992, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 11.6, |
|
"learning_rate": 0.00017880107536067218, |
|
"loss": 0.8951, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 0.0001766044443118978, |
|
"loss": 0.8855, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 12.4, |
|
"learning_rate": 0.00017431448254773944, |
|
"loss": 0.8901, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 12.8, |
|
"learning_rate": 0.0001719339800338651, |
|
"loss": 0.8704, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 13.2, |
|
"learning_rate": 0.00016946583704589973, |
|
"loss": 0.869, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 13.6, |
|
"learning_rate": 0.00016691306063588583, |
|
"loss": 0.864, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 0.00016427876096865394, |
|
"loss": 0.862, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 14.4, |
|
"learning_rate": 0.0001615661475325658, |
|
"loss": 0.8577, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 14.8, |
|
"learning_rate": 0.00015877852522924732, |
|
"loss": 0.8544, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 15.2, |
|
"learning_rate": 0.0001559192903470747, |
|
"loss": 0.8467, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 15.6, |
|
"learning_rate": 0.0001529919264233205, |
|
"loss": 0.8379, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.8514, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_loss": 0.8522964119911194, |
|
"eval_runtime": 1.9623, |
|
"eval_samples_per_second": 10.192, |
|
"eval_steps_per_second": 2.038, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 16.4, |
|
"learning_rate": 0.00014694715627858908, |
|
"loss": 0.8333, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 16.8, |
|
"learning_rate": 0.00014383711467890774, |
|
"loss": 0.8379, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 17.2, |
|
"learning_rate": 0.00014067366430758004, |
|
"loss": 0.8278, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 17.6, |
|
"learning_rate": 0.00013746065934159123, |
|
"loss": 0.8273, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 0.00013420201433256689, |
|
"loss": 0.8382, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 18.4, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 0.8208, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 18.8, |
|
"learning_rate": 0.0001275637355816999, |
|
"loss": 0.8203, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 19.2, |
|
"learning_rate": 0.00012419218955996676, |
|
"loss": 0.8118, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 19.6, |
|
"learning_rate": 0.00012079116908177593, |
|
"loss": 0.8175, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 0.00011736481776669306, |
|
"loss": 0.8059, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 20.4, |
|
"learning_rate": 0.00011391731009600654, |
|
"loss": 0.8114, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 20.8, |
|
"learning_rate": 0.00011045284632676536, |
|
"loss": 0.8004, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 21.2, |
|
"learning_rate": 0.00010697564737441252, |
|
"loss": 0.8026, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 21.6, |
|
"learning_rate": 0.00010348994967025012, |
|
"loss": 0.7943, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 0.0001, |
|
"loss": 0.79, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 22.4, |
|
"learning_rate": 9.651005032974994e-05, |
|
"loss": 0.7952, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 22.8, |
|
"learning_rate": 9.302435262558747e-05, |
|
"loss": 0.7898, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 23.2, |
|
"learning_rate": 8.954715367323468e-05, |
|
"loss": 0.7769, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 23.6, |
|
"learning_rate": 8.608268990399349e-05, |
|
"loss": 0.7812, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 8.263518223330697e-05, |
|
"loss": 0.7774, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_loss": 0.8498085141181946, |
|
"eval_runtime": 1.9653, |
|
"eval_samples_per_second": 10.177, |
|
"eval_steps_per_second": 2.035, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 24.4, |
|
"learning_rate": 7.920883091822408e-05, |
|
"loss": 0.7651, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 24.8, |
|
"learning_rate": 7.580781044003324e-05, |
|
"loss": 0.778, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 25.2, |
|
"learning_rate": 7.243626441830009e-05, |
|
"loss": 0.772, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 25.6, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 0.767, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 6.579798566743314e-05, |
|
"loss": 0.7569, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 26.4, |
|
"learning_rate": 6.25393406584088e-05, |
|
"loss": 0.7554, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 26.8, |
|
"learning_rate": 5.9326335692419995e-05, |
|
"loss": 0.76, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 27.2, |
|
"learning_rate": 5.616288532109225e-05, |
|
"loss": 0.7554, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 27.6, |
|
"learning_rate": 5.305284372141095e-05, |
|
"loss": 0.748, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 0.7527, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 28.4, |
|
"learning_rate": 4.700807357667952e-05, |
|
"loss": 0.7595, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 28.8, |
|
"learning_rate": 4.4080709652925336e-05, |
|
"loss": 0.7394, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 29.2, |
|
"learning_rate": 4.12214747707527e-05, |
|
"loss": 0.7348, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 29.6, |
|
"learning_rate": 3.843385246743417e-05, |
|
"loss": 0.7497, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 3.5721239031346066e-05, |
|
"loss": 0.7305, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 30.4, |
|
"learning_rate": 3.308693936411421e-05, |
|
"loss": 0.7317, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 30.8, |
|
"learning_rate": 3.053416295410026e-05, |
|
"loss": 0.7329, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 31.2, |
|
"learning_rate": 2.8066019966134904e-05, |
|
"loss": 0.7494, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 31.6, |
|
"learning_rate": 2.5685517452260567e-05, |
|
"loss": 0.7306, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 2.339555568810221e-05, |
|
"loss": 0.7178, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_loss": 0.8597297072410583, |
|
"eval_runtime": 1.9653, |
|
"eval_samples_per_second": 10.177, |
|
"eval_steps_per_second": 2.035, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 32.4, |
|
"learning_rate": 2.119892463932781e-05, |
|
"loss": 0.7243, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 32.8, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 0.7393, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 33.2, |
|
"learning_rate": 1.7096242744495837e-05, |
|
"loss": 0.7148, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 33.6, |
|
"learning_rate": 1.5195190384357404e-05, |
|
"loss": 0.7261, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 0.7228, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 34.4, |
|
"learning_rate": 1.1705240714107302e-05, |
|
"loss": 0.7194, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 34.8, |
|
"learning_rate": 1.0120595370083318e-05, |
|
"loss": 0.7252, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 35.2, |
|
"learning_rate": 8.645454235739903e-06, |
|
"loss": 0.7188, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 35.6, |
|
"learning_rate": 7.281614543321269e-06, |
|
"loss": 0.7199, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 6.030737921409169e-06, |
|
"loss": 0.7239, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 36.4, |
|
"learning_rate": 4.8943483704846475e-06, |
|
"loss": 0.7099, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 36.8, |
|
"learning_rate": 3.873830406168111e-06, |
|
"loss": 0.7265, |
|
"step": 92 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_train_epochs": 50, |
|
"save_steps": 500, |
|
"total_flos": 7.297007876466278e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|