|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.9375, |
|
"eval_steps": 25, |
|
"global_step": 496, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0, |
|
"loss": 4.0553, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"eval_loss": 4.052073955535889, |
|
"eval_runtime": 57.3905, |
|
"eval_samples_per_second": 4.356, |
|
"eval_steps_per_second": 4.356, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 2e-05, |
|
"loss": 4.3696, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4e-05, |
|
"loss": 4.2401, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 6e-05, |
|
"loss": 4.3077, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 8e-05, |
|
"loss": 4.0741, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001, |
|
"loss": 4.1673, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00012, |
|
"loss": 3.9378, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00014, |
|
"loss": 4.0167, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00016, |
|
"loss": 3.5818, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00018, |
|
"loss": 3.9022, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0002, |
|
"loss": 3.7906, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0001999979107245606, |
|
"loss": 3.6776, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019999164298554375, |
|
"loss": 3.4291, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019998119704485014, |
|
"loss": 3.6101, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00019996657333896877, |
|
"loss": 3.5537, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019994777247895855, |
|
"loss": 3.386, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019992479525042303, |
|
"loss": 3.6359, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019989764261347736, |
|
"loss": 3.6132, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019986631570270832, |
|
"loss": 3.4794, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019983081582712685, |
|
"loss": 3.2395, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00019979114447011323, |
|
"loss": 3.2974, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00019974730328935534, |
|
"loss": 3.2698, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0001996992941167792, |
|
"loss": 3.3091, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019964711895847258, |
|
"loss": 3.2017, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00019959077999460095, |
|
"loss": 3.2469, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"eval_loss": 3.0720062255859375, |
|
"eval_runtime": 61.8274, |
|
"eval_samples_per_second": 4.044, |
|
"eval_steps_per_second": 4.044, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00019953027957931658, |
|
"loss": 3.1102, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00019946562024066014, |
|
"loss": 3.1641, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00019939680468045499, |
|
"loss": 2.9779, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00019932383577419432, |
|
"loss": 3.104, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00019924671657092096, |
|
"loss": 2.7981, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00019916545029310012, |
|
"loss": 2.7673, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00019908004033648453, |
|
"loss": 2.8478, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00019899049026997272, |
|
"loss": 2.7539, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00019889680383545973, |
|
"loss": 2.726, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00019879898494768093, |
|
"loss": 2.81, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00019869703769404828, |
|
"loss": 2.6177, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00019859096633447965, |
|
"loss": 2.6046, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00019848077530122083, |
|
"loss": 2.5004, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0001983664691986601, |
|
"loss": 2.4993, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00019824805280313623, |
|
"loss": 2.2775, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00019812553106273847, |
|
"loss": 2.3306, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00019799890909710013, |
|
"loss": 2.236, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00019786819219718443, |
|
"loss": 2.2798, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0001977333858250636, |
|
"loss": 2.2178, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00019759449561369038, |
|
"loss": 1.9915, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00019745152736666302, |
|
"loss": 2.3066, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00019730448705798239, |
|
"loss": 2.1409, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00019715338083180269, |
|
"loss": 1.9976, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00019699821500217434, |
|
"loss": 2.1843, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0001968389960527806, |
|
"loss": 2.1539, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"eval_loss": 1.9486372470855713, |
|
"eval_runtime": 61.7944, |
|
"eval_samples_per_second": 4.046, |
|
"eval_steps_per_second": 4.046, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0001966757306366662, |
|
"loss": 2.2096, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00019650842557595967, |
|
"loss": 2.0241, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00019633708786158806, |
|
"loss": 1.8814, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00019616172465298492, |
|
"loss": 1.9953, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00019598234327779118, |
|
"loss": 1.9807, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0001957989512315489, |
|
"loss": 1.9743, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00019561155617738797, |
|
"loss": 1.867, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00019542016594570615, |
|
"loss": 1.8289, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00019522478853384155, |
|
"loss": 1.9712, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00019502543210573881, |
|
"loss": 1.6946, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00019482210499160765, |
|
"loss": 1.8674, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00019461481568757506, |
|
"loss": 1.6849, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00019440357285533, |
|
"loss": 1.7003, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00019418838532176173, |
|
"loss": 1.7021, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00019396926207859084, |
|
"loss": 1.8302, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0001937462122819935, |
|
"loss": 1.8549, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.000193519245252219, |
|
"loss": 1.8946, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0001932883704732001, |
|
"loss": 1.799, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00019305359759215685, |
|
"loss": 1.7541, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00019281493641919368, |
|
"loss": 1.7145, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00019257239692688907, |
|
"loss": 1.8254, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00019232598924987903, |
|
"loss": 1.6161, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00019207572368443385, |
|
"loss": 1.5698, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00019182161068802741, |
|
"loss": 1.8155, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0001915636608789006, |
|
"loss": 1.6997, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"eval_loss": 1.6153544187545776, |
|
"eval_runtime": 61.8069, |
|
"eval_samples_per_second": 4.045, |
|
"eval_steps_per_second": 4.045, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00019130188503561741, |
|
"loss": 1.6507, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0001910362940966147, |
|
"loss": 1.6035, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00019076689915974488, |
|
"loss": 1.4493, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00019049371148181253, |
|
"loss": 1.5494, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0001902167424781038, |
|
"loss": 1.5386, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00018993600372190932, |
|
"loss": 1.467, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00018965150694404094, |
|
"loss": 1.6658, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00018936326403234125, |
|
"loss": 1.6175, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00018907128703118695, |
|
"loss": 1.6551, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00018877558814098561, |
|
"loss": 1.409, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00018847617971766577, |
|
"loss": 1.4879, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0001881730742721608, |
|
"loss": 1.3067, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00018786628446988593, |
|
"loss": 1.6869, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0001875558231302091, |
|
"loss": 1.5962, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00018724170322591537, |
|
"loss": 1.3352, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00018692393788266479, |
|
"loss": 1.5016, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 1.6951, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00018627752414301086, |
|
"loss": 1.5388, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00018594890275733453, |
|
"loss": 1.3656, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00018561668995302667, |
|
"loss": 1.4599, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0001852808996117683, |
|
"loss": 1.3428, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00018494154576472976, |
|
"loss": 1.2147, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0001845986425919841, |
|
"loss": 1.4761, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00018425220442191495, |
|
"loss": 1.4571, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00018390224573061747, |
|
"loss": 1.5076, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"eval_loss": 1.49380362033844, |
|
"eval_runtime": 61.7975, |
|
"eval_samples_per_second": 4.045, |
|
"eval_steps_per_second": 4.045, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00018354878114129367, |
|
"loss": 1.3776, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00018319182542364117, |
|
"loss": 1.437, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00018283139349323634, |
|
"loss": 1.3558, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0001824675004109107, |
|
"loss": 1.4502, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00018210016138212187, |
|
"loss": 1.3451, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00018172939175631808, |
|
"loss": 1.403, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00018135520702629675, |
|
"loss": 1.1523, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00018097762282755727, |
|
"loss": 1.2302, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00018059665493764743, |
|
"loss": 1.4151, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.0001802123192755044, |
|
"loss": 1.4853, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0001798246319007893, |
|
"loss": 1.2571, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0001794336090132164, |
|
"loss": 1.3594, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00017903926695187595, |
|
"loss": 1.3997, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00017864162219455163, |
|
"loss": 1.5718, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00017824069135703198, |
|
"loss": 1.314, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00017783649119241602, |
|
"loss": 1.3595, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00017742903859041325, |
|
"loss": 1.4467, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00017701835057663798, |
|
"loss": 1.2837, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.0001766044443118978, |
|
"loss": 1.2669, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.00017618733709147662, |
|
"loss": 1.3115, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.0001757670463444118, |
|
"loss": 1.1836, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00017534358963276607, |
|
"loss": 1.6092, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00017491698465089362, |
|
"loss": 1.2288, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00017448724922470058, |
|
"loss": 1.2305, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00017405440131090048, |
|
"loss": 1.3794, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"eval_loss": 1.4039632081985474, |
|
"eval_runtime": 61.7879, |
|
"eval_samples_per_second": 4.046, |
|
"eval_steps_per_second": 4.046, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.00017361845899626355, |
|
"loss": 1.2283, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00017317944049686124, |
|
"loss": 1.2627, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00017273736415730488, |
|
"loss": 1.2911, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.00017229224844997928, |
|
"loss": 1.1225, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00017184411197427077, |
|
"loss": 1.3, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00017139297345578994, |
|
"loss": 1.1852, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.0001709388517455893, |
|
"loss": 1.1986, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00017048176581937563, |
|
"loss": 1.3396, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00017002173477671686, |
|
"loss": 1.3584, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.0001695587778402442, |
|
"loss": 1.2763, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.0001690929143548488, |
|
"loss": 1.3017, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.0001686241637868734, |
|
"loss": 1.2383, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.00016815254572329896, |
|
"loss": 1.238, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00016767807987092621, |
|
"loss": 1.2702, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00016720078605555224, |
|
"loss": 1.3573, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00016672068422114196, |
|
"loss": 1.2863, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.0001662377944289948, |
|
"loss": 1.2192, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.0001657521368569064, |
|
"loss": 1.1934, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.00016526373179832546, |
|
"loss": 1.1558, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.00016477259966150588, |
|
"loss": 1.3684, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00016427876096865394, |
|
"loss": 1.5386, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.0001637822363550706, |
|
"loss": 1.2404, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00016328304656828952, |
|
"loss": 1.2416, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00016278121246720987, |
|
"loss": 1.3613, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00016227675502122492, |
|
"loss": 1.2755, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"eval_loss": 1.3754363059997559, |
|
"eval_runtime": 61.7475, |
|
"eval_samples_per_second": 4.049, |
|
"eval_steps_per_second": 4.049, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.00016176969530934572, |
|
"loss": 1.3912, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.0001612600545193203, |
|
"loss": 1.2778, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00016074785394674837, |
|
"loss": 1.2326, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.0001602331149941915, |
|
"loss": 1.2526, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00015971585917027862, |
|
"loss": 1.1176, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0001591961080888076, |
|
"loss": 1.1617, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0001586738834678418, |
|
"loss": 1.2921, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00015814920712880267, |
|
"loss": 1.3704, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00015762210099555803, |
|
"loss": 1.1412, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00015709258709350582, |
|
"loss": 1.1516, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00015656068754865387, |
|
"loss": 1.3323, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00015602642458669528, |
|
"loss": 1.2082, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0001554898205320797, |
|
"loss": 1.1952, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.0001549508978070806, |
|
"loss": 1.172, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00015440967893085828, |
|
"loss": 1.3049, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.0001538661865185188, |
|
"loss": 1.1573, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.00015332044328016914, |
|
"loss": 1.3122, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.0001527724720199682, |
|
"loss": 1.2487, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00015222229563517385, |
|
"loss": 1.2054, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.00015166993711518631, |
|
"loss": 1.1871, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00015111541954058734, |
|
"loss": 1.1853, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.0001505587660821759, |
|
"loss": 1.2743, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 1.1493, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00014943914464238468, |
|
"loss": 1.2354, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00014887622344495643, |
|
"loss": 1.1913, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"eval_loss": 1.3428610563278198, |
|
"eval_runtime": 61.7873, |
|
"eval_samples_per_second": 4.046, |
|
"eval_steps_per_second": 4.046, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00014831125992966385, |
|
"loss": 1.2415, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.0001477442777037949, |
|
"loss": 1.1343, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00014717530045899038, |
|
"loss": 1.325, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.0001466043519702539, |
|
"loss": 1.3118, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.0001460314560949586, |
|
"loss": 1.2941, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00014545663677185006, |
|
"loss": 1.1848, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00014487991802004623, |
|
"loss": 1.1337, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00014430132393803352, |
|
"loss": 1.1494, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.0001437208787026601, |
|
"loss": 1.2076, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00014313860656812536, |
|
"loss": 1.1794, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00014255453186496673, |
|
"loss": 1.2402, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.0001419686789990429, |
|
"loss": 1.2741, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00014138107245051392, |
|
"loss": 1.2486, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00014079173677281837, |
|
"loss": 1.2796, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.0001402006965916474, |
|
"loss": 1.0193, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.0001396079766039157, |
|
"loss": 1.1447, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.0001390136015767295, |
|
"loss": 1.0318, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.00013841759634635178, |
|
"loss": 1.1047, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.00013781998581716427, |
|
"loss": 1.1895, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.00013722079496062702, |
|
"loss": 1.1119, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.0001366200488142348, |
|
"loss": 1.0248, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.00013601777248047105, |
|
"loss": 1.0961, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.00013541399112575878, |
|
"loss": 1.2622, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.00013480872997940905, |
|
"loss": 1.1858, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.00013420201433256689, |
|
"loss": 1.1458, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"eval_loss": 1.3336366415023804, |
|
"eval_runtime": 61.796, |
|
"eval_samples_per_second": 4.046, |
|
"eval_steps_per_second": 4.046, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.00013359386953715421, |
|
"loss": 1.3068, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.00013298432100481079, |
|
"loss": 1.0935, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.00013237339420583212, |
|
"loss": 1.1904, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.00013176111466810532, |
|
"loss": 1.1859, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.00013114750797604247, |
|
"loss": 1.1536, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.00013053259976951133, |
|
"loss": 1.2067, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.00012991641574276418, |
|
"loss": 1.2649, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.00012991641574276418, |
|
"loss": 1.2189, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.00012929898164336408, |
|
"loss": 1.301, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.00012868032327110904, |
|
"loss": 1.1506, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.0001280604664769539, |
|
"loss": 1.1964, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.00012743943716193016, |
|
"loss": 1.139, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.00012681726127606376, |
|
"loss": 1.2197, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.0001261939648172906, |
|
"loss": 1.1713, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.0001255695738303704, |
|
"loss": 1.2533, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.00012494411440579814, |
|
"loss": 1.1755, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.00012431761267871417, |
|
"loss": 0.9834, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.00012369009482781192, |
|
"loss": 1.1805, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.00012306158707424403, |
|
"loss": 1.0295, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.00012243211568052677, |
|
"loss": 1.0861, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.0001218017069494426, |
|
"loss": 1.3924, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.0001211703872229411, |
|
"loss": 1.1034, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.0001205381828810382, |
|
"loss": 1.0464, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.00011990512034071406, |
|
"loss": 1.1107, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.00011927122605480898, |
|
"loss": 1.1568, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"eval_loss": 1.31128990650177, |
|
"eval_runtime": 61.808, |
|
"eval_samples_per_second": 4.045, |
|
"eval_steps_per_second": 4.045, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.00011863652651091823, |
|
"loss": 1.0532, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.00011800104823028515, |
|
"loss": 1.0956, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.00011736481776669306, |
|
"loss": 1.2576, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.00011672786170535552, |
|
"loss": 1.081, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.00011609020666180575, |
|
"loss": 1.097, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.00011545187928078406, |
|
"loss": 1.1571, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.0001148129062351249, |
|
"loss": 1.3143, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.00011417331422464205, |
|
"loss": 1.2877, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.00011353312997501313, |
|
"loss": 1.1032, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.00011289238023666266, |
|
"loss": 1.1401, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.00011225109178364455, |
|
"loss": 1.2596, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.00011160929141252303, |
|
"loss": 1.1803, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.00011096700594125318, |
|
"loss": 1.0706, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.00011032426220806018, |
|
"loss": 1.1804, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.00010968108707031792, |
|
"loss": 1.2334, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.00010903750740342682, |
|
"loss": 1.1523, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.00010839355009969068, |
|
"loss": 1.1336, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.0001077492420671931, |
|
"loss": 1.1287, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.00010710461022867302, |
|
"loss": 1.181, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.0001064596815203998, |
|
"loss": 1.2882, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.00010581448289104758, |
|
"loss": 1.0872, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.00010516904130056946, |
|
"loss": 1.1117, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.00010452338371907064, |
|
"loss": 1.1874, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.0001038775371256817, |
|
"loss": 1.0474, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.00010323152850743107, |
|
"loss": 1.1845, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"eval_loss": 1.2912580966949463, |
|
"eval_runtime": 61.8585, |
|
"eval_samples_per_second": 4.041, |
|
"eval_steps_per_second": 4.041, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.00010258538485811765, |
|
"loss": 1.3517, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 0.00010193913317718244, |
|
"loss": 1.2637, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 0.00010129280046858086, |
|
"loss": 1.1032, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 0.00010064641373965393, |
|
"loss": 0.9574, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 0.0001, |
|
"loss": 0.8302, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 9.935358626034606e-05, |
|
"loss": 1.1663, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 9.870719953141917e-05, |
|
"loss": 1.0611, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 9.806086682281758e-05, |
|
"loss": 1.1039, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 9.741461514188242e-05, |
|
"loss": 1.134, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 9.676847149256895e-05, |
|
"loss": 0.8512, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 9.612246287431831e-05, |
|
"loss": 1.01, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 9.547661628092937e-05, |
|
"loss": 1.1625, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 9.483095869943055e-05, |
|
"loss": 1.0704, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 9.418551710895243e-05, |
|
"loss": 0.8177, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 9.354031847960022e-05, |
|
"loss": 0.9913, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 9.289538977132703e-05, |
|
"loss": 1.166, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 9.225075793280692e-05, |
|
"loss": 1.125, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 9.160644990030931e-05, |
|
"loss": 0.9657, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 9.09624925965732e-05, |
|
"loss": 1.0344, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 9.03189129296821e-05, |
|
"loss": 1.0579, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 8.967573779193987e-05, |
|
"loss": 1.0357, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 8.903299405874684e-05, |
|
"loss": 1.1066, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 8.839070858747697e-05, |
|
"loss": 0.9976, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 8.774890821635548e-05, |
|
"loss": 0.8978, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 8.710761976333734e-05, |
|
"loss": 1.0132, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"eval_loss": 1.297065258026123, |
|
"eval_runtime": 61.8107, |
|
"eval_samples_per_second": 4.045, |
|
"eval_steps_per_second": 4.045, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 8.646687002498692e-05, |
|
"loss": 0.9386, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 8.582668577535797e-05, |
|
"loss": 0.8478, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 8.518709376487515e-05, |
|
"loss": 1.0554, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 8.454812071921596e-05, |
|
"loss": 1.0435, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 8.390979333819426e-05, |
|
"loss": 1.04, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 8.327213829464449e-05, |
|
"loss": 1.0513, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 8.263518223330697e-05, |
|
"loss": 1.1234, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 8.199895176971488e-05, |
|
"loss": 1.0556, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 8.13634734890818e-05, |
|
"loss": 0.9852, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 8.072877394519102e-05, |
|
"loss": 0.9753, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 8.009487965928596e-05, |
|
"loss": 1.1802, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 7.94618171189618e-05, |
|
"loss": 0.8883, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 7.882961277705895e-05, |
|
"loss": 1.0442, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 7.819829305055743e-05, |
|
"loss": 0.9863, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 7.756788431947326e-05, |
|
"loss": 1.0352, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 7.693841292575598e-05, |
|
"loss": 1.08, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 7.630990517218808e-05, |
|
"loss": 1.0916, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 7.568238732128585e-05, |
|
"loss": 0.956, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 7.505588559420189e-05, |
|
"loss": 1.0018, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 7.443042616962965e-05, |
|
"loss": 1.0782, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 7.380603518270941e-05, |
|
"loss": 1.098, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 7.318273872393625e-05, |
|
"loss": 1.1226, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 7.256056283806986e-05, |
|
"loss": 0.9865, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 7.193953352304613e-05, |
|
"loss": 0.9185, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 7.131967672889101e-05, |
|
"loss": 0.9713, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"eval_loss": 1.2972489595413208, |
|
"eval_runtime": 61.7846, |
|
"eval_samples_per_second": 4.046, |
|
"eval_steps_per_second": 4.046, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 7.070101835663594e-05, |
|
"loss": 1.0547, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 7.008358425723585e-05, |
|
"loss": 1.1558, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 6.94674002304887e-05, |
|
"loss": 1.0398, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 6.885249202395754e-05, |
|
"loss": 0.8272, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 6.823888533189469e-05, |
|
"loss": 0.9847, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 6.762660579416791e-05, |
|
"loss": 0.919, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 6.701567899518924e-05, |
|
"loss": 0.9437, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 6.640613046284581e-05, |
|
"loss": 1.0459, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 6.579798566743314e-05, |
|
"loss": 0.98, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 6.519127002059095e-05, |
|
"loss": 1.1745, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 6.458600887424125e-05, |
|
"loss": 0.9702, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 6.398222751952899e-05, |
|
"loss": 0.9785, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 6.337995118576521e-05, |
|
"loss": 0.9573, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 6.277920503937303e-05, |
|
"loss": 0.9505, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 6.218001418283576e-05, |
|
"loss": 1.0688, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 6.158240365364823e-05, |
|
"loss": 0.9826, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 6.098639842327052e-05, |
|
"loss": 0.9873, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 6.039202339608432e-05, |
|
"loss": 0.969, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 5.979930340835265e-05, |
|
"loss": 1.004, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 5.920826322718165e-05, |
|
"loss": 1.0032, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 5.861892754948609e-05, |
|
"loss": 1.0514, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 5.80313210009571e-05, |
|
"loss": 1.163, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 5.744546813503328e-05, |
|
"loss": 1.0799, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 5.6861393431874675e-05, |
|
"loss": 0.8988, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 5.6279121297339924e-05, |
|
"loss": 1.0341, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"eval_loss": 1.2901266813278198, |
|
"eval_runtime": 61.8384, |
|
"eval_samples_per_second": 4.043, |
|
"eval_steps_per_second": 4.043, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 5.5698676061966515e-05, |
|
"loss": 1.1851, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 5.5120081979953785e-05, |
|
"loss": 1.0744, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 5.4543363228149946e-05, |
|
"loss": 0.9741, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 5.3968543905041444e-05, |
|
"loss": 0.9947, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 5.339564802974615e-05, |
|
"loss": 1.1458, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 5.282469954100968e-05, |
|
"loss": 0.8714, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 5.22557222962051e-05, |
|
"loss": 0.8682, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 5.168874007033615e-05, |
|
"loss": 0.9135, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 5.112377655504359e-05, |
|
"loss": 1.0744, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 5.056085535761532e-05, |
|
"loss": 0.9575, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 1.0823, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 4.9441233917824106e-05, |
|
"loss": 0.8986, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 4.888458045941269e-05, |
|
"loss": 0.9171, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 4.833006288481371e-05, |
|
"loss": 1.0258, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 4.777770436482617e-05, |
|
"loss": 1.0722, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 4.722752798003184e-05, |
|
"loss": 1.0347, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 4.66795567198309e-05, |
|
"loss": 1.034, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 4.6133813481481246e-05, |
|
"loss": 0.9559, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 4.559032106914173e-05, |
|
"loss": 0.9619, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 4.50491021929194e-05, |
|
"loss": 0.9795, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 4.451017946792032e-05, |
|
"loss": 0.9236, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 4.397357541330476e-05, |
|
"loss": 0.8438, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 4.343931245134616e-05, |
|
"loss": 0.9684, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 4.2907412906494174e-05, |
|
"loss": 0.9478, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 4.2377899004441966e-05, |
|
"loss": 1.0808, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"eval_loss": 1.2958766222000122, |
|
"eval_runtime": 61.8006, |
|
"eval_samples_per_second": 4.045, |
|
"eval_steps_per_second": 4.045, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 4.185079287119733e-05, |
|
"loss": 1.112, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 4.132611653215822e-05, |
|
"loss": 0.983, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 4.080389191119241e-05, |
|
"loss": 1.1557, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 4.028414082972141e-05, |
|
"loss": 0.9879, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 3.9766885005808565e-05, |
|
"loss": 0.833, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 3.9252146053251636e-05, |
|
"loss": 1.0431, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 3.873994548067972e-05, |
|
"loss": 1.0371, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 3.8230304690654304e-05, |
|
"loss": 0.9455, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 3.772324497877511e-05, |
|
"loss": 0.9356, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 3.721878753279017e-05, |
|
"loss": 0.9807, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 3.671695343171052e-05, |
|
"loss": 0.9067, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 3.621776364492939e-05, |
|
"loss": 0.9393, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 3.5721239031346066e-05, |
|
"loss": 0.9856, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 3.522740033849411e-05, |
|
"loss": 1.0474, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 3.4736268201674574e-05, |
|
"loss": 0.9027, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 3.424786314309365e-05, |
|
"loss": 0.9521, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 3.376220557100523e-05, |
|
"loss": 0.9211, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 3.3279315778858036e-05, |
|
"loss": 0.9576, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 3.279921394444776e-05, |
|
"loss": 0.9643, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 3.2321920129073816e-05, |
|
"loss": 1.0322, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 3.18474542767011e-05, |
|
"loss": 0.9762, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 3.137583621312665e-05, |
|
"loss": 1.1331, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 3.090708564515124e-05, |
|
"loss": 1.104, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 3.04412221597558e-05, |
|
"loss": 0.9667, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 2.997826522328315e-05, |
|
"loss": 0.9492, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"eval_loss": 1.2812517881393433, |
|
"eval_runtime": 61.7806, |
|
"eval_samples_per_second": 4.047, |
|
"eval_steps_per_second": 4.047, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 2.9518234180624393e-05, |
|
"loss": 1.0619, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 2.906114825441072e-05, |
|
"loss": 0.9439, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 2.8607026544210114e-05, |
|
"loss": 0.905, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 2.8155888025729273e-05, |
|
"loss": 0.9329, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 2.770775155002071e-05, |
|
"loss": 1.012, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 2.7262635842695127e-05, |
|
"loss": 0.7935, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 2.6820559503138797e-05, |
|
"loss": 0.9079, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 2.6381541003736486e-05, |
|
"loss": 0.884, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 2.594559868909956e-05, |
|
"loss": 0.8965, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 2.5512750775299432e-05, |
|
"loss": 0.7722, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 2.50830153491064e-05, |
|
"loss": 0.8773, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 2.465641036723393e-05, |
|
"loss": 0.8756, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 2.423295365558821e-05, |
|
"loss": 0.8886, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 2.381266290852341e-05, |
|
"loss": 0.9629, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 2.339555568810221e-05, |
|
"loss": 0.9036, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 2.298164942336205e-05, |
|
"loss": 0.8959, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 2.2570961409586754e-05, |
|
"loss": 0.8581, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 2.2163508807583998e-05, |
|
"loss": 0.7808, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 2.1759308642968025e-05, |
|
"loss": 0.8382, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 2.1358377805448382e-05, |
|
"loss": 0.8431, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 2.0960733048124083e-05, |
|
"loss": 0.9881, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 2.0566390986783646e-05, |
|
"loss": 0.7971, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 2.01753680992107e-05, |
|
"loss": 0.9015, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 1.9787680724495617e-05, |
|
"loss": 0.8186, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 1.9403345062352573e-05, |
|
"loss": 0.8436, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"eval_loss": 1.319449782371521, |
|
"eval_runtime": 61.8064, |
|
"eval_samples_per_second": 4.045, |
|
"eval_steps_per_second": 4.045, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 1.9022377172442752e-05, |
|
"loss": 0.7585, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 1.864479297370325e-05, |
|
"loss": 0.8746, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 1.8270608243681953e-05, |
|
"loss": 0.9837, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 1.7899838617878163e-05, |
|
"loss": 0.9746, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 1.7532499589089323e-05, |
|
"loss": 0.7808, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 1.7168606506763695e-05, |
|
"loss": 0.8529, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 1.6808174576358848e-05, |
|
"loss": 1.0696, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 1.6451218858706374e-05, |
|
"loss": 0.9803, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 1.6097754269382536e-05, |
|
"loss": 0.9196, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 1.5747795578085046e-05, |
|
"loss": 0.8965, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 1.5401357408015893e-05, |
|
"loss": 0.8625, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 1.505845423527027e-05, |
|
"loss": 0.8837, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 1.47191003882317e-05, |
|
"loss": 0.9438, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 1.4383310046973365e-05, |
|
"loss": 0.8853, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 1.40510972426655e-05, |
|
"loss": 0.8758, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 1.3722475856989158e-05, |
|
"loss": 0.8716, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 0.9002, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 1.307606211733522e-05, |
|
"loss": 0.9933, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 1.2758296774084632e-05, |
|
"loss": 0.857, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 1.2444176869790925e-05, |
|
"loss": 0.8833, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 1.213371553011411e-05, |
|
"loss": 0.7823, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 1.18269257278392e-05, |
|
"loss": 0.806, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 1.1523820282334219e-05, |
|
"loss": 1.0458, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 1.1224411859014417e-05, |
|
"loss": 0.821, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 1.092871296881307e-05, |
|
"loss": 1.0561, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"eval_loss": 1.3116503953933716, |
|
"eval_runtime": 61.8632, |
|
"eval_samples_per_second": 4.041, |
|
"eval_steps_per_second": 4.041, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 1.0636735967658784e-05, |
|
"loss": 0.9725, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 1.0348493055959062e-05, |
|
"loss": 0.9468, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 1.0063996278090704e-05, |
|
"loss": 0.8698, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 9.783257521896227e-06, |
|
"loss": 0.794, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 9.506288518187467e-06, |
|
"loss": 0.7835, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 9.233100840255127e-06, |
|
"loss": 0.9157, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 8.963705903385345e-06, |
|
"loss": 0.8811, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 8.698114964382598e-06, |
|
"loss": 0.7957, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 8.436339121099412e-06, |
|
"loss": 0.8907, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 8.178389311972612e-06, |
|
"loss": 0.8602, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 7.92427631556617e-06, |
|
"loss": 0.8855, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 7.674010750120964e-06, |
|
"loss": 0.9052, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 7.427603073110967e-06, |
|
"loss": 1.01, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 7.185063580806317e-06, |
|
"loss": 0.9242, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 6.946402407843155e-06, |
|
"loss": 0.7778, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 6.7116295267999455e-06, |
|
"loss": 0.9008, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 6.480754747781037e-06, |
|
"loss": 0.8944, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 6.253787718006498e-06, |
|
"loss": 0.9784, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 6.030737921409169e-06, |
|
"loss": 0.827, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 5.811614678238275e-06, |
|
"loss": 0.8666, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 5.596427144670002e-06, |
|
"loss": 0.9152, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 5.385184312424974e-06, |
|
"loss": 0.9269, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 5.177895008392353e-06, |
|
"loss": 0.8852, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 4.974567894261217e-06, |
|
"loss": 0.8283, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 4.775211466158469e-06, |
|
"loss": 0.9367, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"eval_loss": 1.3111974000930786, |
|
"eval_runtime": 61.8414, |
|
"eval_samples_per_second": 4.043, |
|
"eval_steps_per_second": 4.043, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 4.5798340542938855e-06, |
|
"loss": 0.7843, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 4.3884438226120424e-06, |
|
"loss": 0.9498, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 4.20104876845111e-06, |
|
"loss": 1.0189, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 4.017656722208807e-06, |
|
"loss": 0.9243, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 3.8382753470150854e-06, |
|
"loss": 0.8153, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 3.662912138411967e-06, |
|
"loss": 0.8717, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 3.4915744240403558e-06, |
|
"loss": 0.8371, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 3.3242693633337983e-06, |
|
"loss": 0.9489, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 3.161003947219421e-06, |
|
"loss": 0.9656, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 3.0017849978256516e-06, |
|
"loss": 0.8834, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 2.8466191681973507e-06, |
|
"loss": 0.8328, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 2.6955129420176196e-06, |
|
"loss": 0.948, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 2.548472633337007e-06, |
|
"loss": 0.6954, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 2.4055043863096428e-06, |
|
"loss": 0.9203, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 2.266614174936443e-06, |
|
"loss": 0.9194, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 2.1318078028155888e-06, |
|
"loss": 0.8099, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 2.0010909028998827e-06, |
|
"loss": 0.9428, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 1.874468937261531e-06, |
|
"loss": 0.8694, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 1.751947196863779e-06, |
|
"loss": 0.8197, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 1.6335308013398886e-06, |
|
"loss": 0.9786, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 1.5192246987791981e-06, |
|
"loss": 0.8906, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 1.409033665520354e-06, |
|
"loss": 0.8925, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 1.3029623059517493e-06, |
|
"loss": 0.8353, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 1.201015052319099e-06, |
|
"loss": 0.8909, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 1.1031961645402877e-06, |
|
"loss": 0.9294, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"eval_loss": 1.3116556406021118, |
|
"eval_runtime": 61.8408, |
|
"eval_samples_per_second": 4.043, |
|
"eval_steps_per_second": 4.043, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 1.0095097300273026e-06, |
|
"loss": 0.9107, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 9.199596635154683e-07, |
|
"loss": 1.0287, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 8.345497068998897e-07, |
|
"loss": 0.8321, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 7.532834290790436e-07, |
|
"loss": 0.8194, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 6.761642258056978e-07, |
|
"loss": 0.9389, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 6.031953195450135e-07, |
|
"loss": 0.9331, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 5.343797593398536e-07, |
|
"loss": 0.8131, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 4.6972042068341714e-07, |
|
"loss": 0.8739, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 4.092200053990691e-07, |
|
"loss": 1.1704, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 3.5288104152743083e-07, |
|
"loss": 0.7947, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 3.007058832207976e-07, |
|
"loss": 1.0854, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 2.5269671064467313e-07, |
|
"loss": 0.7943, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 2.088555298867978e-07, |
|
"loss": 0.8828, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 1.6918417287318245e-07, |
|
"loss": 0.769, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 1.3368429729168076e-07, |
|
"loss": 1.0282, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 1.023573865226446e-07, |
|
"loss": 0.958, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 7.520474957699586e-08, |
|
"loss": 0.9203, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 5.2227521041470216e-08, |
|
"loss": 0.9252, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 3.3426661031255026e-08, |
|
"loss": 1.0799, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 1.8802955149865852e-08, |
|
"loss": 0.9298, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 8.357014456272794e-09, |
|
"loss": 0.8072, |
|
"step": 496 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 496, |
|
"num_train_epochs": 4, |
|
"save_steps": 500, |
|
"total_flos": 1.6132667358786355e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|