|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.937313432835821, |
|
"eval_steps": 42, |
|
"global_step": 668, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2e-05, |
|
"loss": 1.6541, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"eval_loss": 1.7634071111679077, |
|
"eval_runtime": 14.5059, |
|
"eval_samples_per_second": 56.46, |
|
"eval_steps_per_second": 28.264, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4e-05, |
|
"loss": 1.6887, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 6e-05, |
|
"loss": 1.828, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 8e-05, |
|
"loss": 1.589, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0001, |
|
"loss": 1.927, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00012, |
|
"loss": 1.6362, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00014, |
|
"loss": 1.8546, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00016, |
|
"loss": 1.5843, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00018, |
|
"loss": 1.5592, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0002, |
|
"loss": 1.5539, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001999988602302209, |
|
"loss": 1.4449, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0001999954409468652, |
|
"loss": 1.8818, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0001999897422278767, |
|
"loss": 1.6656, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019998176420316002, |
|
"loss": 1.4607, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001999715070545774, |
|
"loss": 1.4013, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019995897101594454, |
|
"loss": 1.5258, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019994415637302547, |
|
"loss": 1.404, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019992706346352577, |
|
"loss": 1.3919, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019990769267708516, |
|
"loss": 1.355, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00019988604445526827, |
|
"loss": 1.3763, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.000199862119291555, |
|
"loss": 1.3314, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019983591773132882, |
|
"loss": 1.4246, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019980744037186469, |
|
"loss": 1.5723, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019977668786231534, |
|
"loss": 1.2536, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0001997436609036963, |
|
"loss": 1.3087, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0001997083602488702, |
|
"loss": 1.2783, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019967078670252945, |
|
"loss": 1.2792, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00019963094112117785, |
|
"loss": 1.2476, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00019958882441311126, |
|
"loss": 1.265, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00019954443753839667, |
|
"loss": 1.0884, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019949778150885042, |
|
"loss": 1.3294, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019944885738801518, |
|
"loss": 1.3434, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00019939766629113566, |
|
"loss": 1.1457, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00019934420938513313, |
|
"loss": 1.2138, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00019928848788857887, |
|
"loss": 1.2118, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00019923050307166655, |
|
"loss": 1.1426, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00019917025625618292, |
|
"loss": 1.6279, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.000199107748815478, |
|
"loss": 1.5021, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00019904298217443366, |
|
"loss": 1.3728, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00019897595780943102, |
|
"loss": 1.2034, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00019890667724831707, |
|
"loss": 1.3718, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00019883514207036956, |
|
"loss": 1.2512, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_loss": 0.8978323936462402, |
|
"eval_runtime": 14.6352, |
|
"eval_samples_per_second": 55.961, |
|
"eval_steps_per_second": 28.015, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00019876135390626122, |
|
"loss": 1.1787, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0001986853144380224, |
|
"loss": 1.1917, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00019860702539900287, |
|
"loss": 1.1933, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00019852648857383222, |
|
"loss": 1.1922, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00019844370579837927, |
|
"loss": 1.3017, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00019835867895971014, |
|
"loss": 1.1193, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0001982714099960452, |
|
"loss": 1.1572, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00019818190089671508, |
|
"loss": 1.3277, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00019809015370211502, |
|
"loss": 1.0658, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0001979961705036587, |
|
"loss": 1.1656, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00019789995344373024, |
|
"loss": 1.4204, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00019780150471563558, |
|
"loss": 1.1551, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0001977008265635525, |
|
"loss": 1.0993, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00019759792128247922, |
|
"loss": 1.1311, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00019749279121818235, |
|
"loss": 1.2163, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00019738543876714334, |
|
"loss": 1.3178, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00019727586637650373, |
|
"loss": 1.3744, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00019716407654400952, |
|
"loss": 1.1413, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00019705007181795416, |
|
"loss": 1.0372, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00019693385479712048, |
|
"loss": 1.1601, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00019681542813072145, |
|
"loss": 1.0976, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00019669479451833976, |
|
"loss": 1.1584, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00019657195670986637, |
|
"loss": 1.0962, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00019644691750543767, |
|
"loss": 1.1044, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0001963196797553718, |
|
"loss": 1.2431, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00019619024636010363, |
|
"loss": 1.1651, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00019605862027011856, |
|
"loss": 1.0513, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00019592480448588542, |
|
"loss": 1.0175, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00019578880205778793, |
|
"loss": 1.1306, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00019565061608605526, |
|
"loss": 1.3121, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00019551024972069126, |
|
"loss": 1.266, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00019536770616140276, |
|
"loss": 1.1099, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0001952229886575266, |
|
"loss": 1.1012, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00019507610050795558, |
|
"loss": 1.1272, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0001949270450610631, |
|
"loss": 1.2016, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00019477582571462705, |
|
"loss": 1.1746, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00019462244591575222, |
|
"loss": 1.1349, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0001944669091607919, |
|
"loss": 1.4311, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00019430921899526787, |
|
"loss": 1.1033, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00019414937901378982, |
|
"loss": 1.2491, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0001939873928599734, |
|
"loss": 1.1044, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00019382326422635705, |
|
"loss": 1.1008, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"eval_loss": 0.8307072520256042, |
|
"eval_runtime": 14.6668, |
|
"eval_samples_per_second": 55.841, |
|
"eval_steps_per_second": 27.954, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0001936569968543179, |
|
"loss": 1.2317, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00019348859453398646, |
|
"loss": 1.2317, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00019331806110416027, |
|
"loss": 1.0989, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00019314540045221626, |
|
"loss": 1.0466, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00019297061651402236, |
|
"loss": 1.0798, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0001927937132738476, |
|
"loss": 1.1175, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0001926146947642712, |
|
"loss": 1.1292, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.000192433565066091, |
|
"loss": 1.0464, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00019225032830823011, |
|
"loss": 1.1549, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00019206498866764288, |
|
"loss": 1.1129, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00019187755036921978, |
|
"loss": 0.9965, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0001916880176856909, |
|
"loss": 1.0767, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0001914963949375288, |
|
"loss": 1.1546, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0001913026864928498, |
|
"loss": 0.9627, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00019110689676731454, |
|
"loss": 1.0039, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00019090903022402729, |
|
"loss": 1.066, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00019070909137343408, |
|
"loss": 1.0654, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00019050708477322018, |
|
"loss": 1.3323, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00019030301502820596, |
|
"loss": 1.1247, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0001900968867902419, |
|
"loss": 1.0261, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00018988870475810282, |
|
"loss": 1.0935, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00018967847367738048, |
|
"loss": 1.1043, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00018946619834037546, |
|
"loss": 1.1982, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00018925188358598813, |
|
"loss": 1.0445, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00018903553429960802, |
|
"loss": 1.086, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00018881715541300276, |
|
"loss": 1.1509, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00018859675190420537, |
|
"loss": 1.1473, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00018837432879740114, |
|
"loss": 1.1677, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0001881498911628127, |
|
"loss": 1.006, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00018792344411658468, |
|
"loss": 1.0503, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00018769499282066717, |
|
"loss": 1.163, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00018746454248269777, |
|
"loss": 1.1443, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0001872320983558831, |
|
"loss": 0.9785, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.000186997665738879, |
|
"loss": 1.0605, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0001867612499756697, |
|
"loss": 1.0203, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00018652285645544603, |
|
"loss": 1.146, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00018628249061248262, |
|
"loss": 1.12, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00018604015792601396, |
|
"loss": 1.1499, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00018579586392010943, |
|
"loss": 1.1273, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0001855496141635476, |
|
"loss": 1.0404, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00018530141426968902, |
|
"loss": 1.1066, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0001850512698963485, |
|
"loss": 1.0685, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_loss": 0.8026405572891235, |
|
"eval_runtime": 14.5968, |
|
"eval_samples_per_second": 56.108, |
|
"eval_steps_per_second": 28.088, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.000184799186745666, |
|
"loss": 1.0494, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00018454517056397661, |
|
"loss": 1.1905, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0001842892271416797, |
|
"loss": 1.2034, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00018403136231310684, |
|
"loss": 1.2088, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00018377158195638876, |
|
"loss": 0.9303, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00018350989199332154, |
|
"loss": 1.1574, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00018324629838923132, |
|
"loss": 1.1673, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00018298080715283858, |
|
"loss": 1.1204, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00018271342433612113, |
|
"loss": 1.0752, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00018244415603417603, |
|
"loss": 1.0121, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00018217300838508073, |
|
"loss": 1.0975, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00018189998756975318, |
|
"loss": 0.9982, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00018162509981181084, |
|
"loss": 1.1922, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0001813483513774289, |
|
"loss": 0.9532, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00018106974857519736, |
|
"loss": 1.0813, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0001807892977559774, |
|
"loss": 0.9656, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0001805070053127563, |
|
"loss": 1.1161, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00018022287768050202, |
|
"loss": 1.0143, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0001799369213360163, |
|
"loss": 1.1242, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00017964914279778715, |
|
"loss": 1.0747, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00017935954862584018, |
|
"loss": 1.1551, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.0001790681454215891, |
|
"loss": 0.9538, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00017877493982768527, |
|
"loss": 1.0621, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0001784799385278661, |
|
"loss": 1.0229, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.000178183148246803, |
|
"loss": 0.9991, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00017788457574994778, |
|
"loss": 1.0045, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00017758422784337863, |
|
"loss": 1.0675, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00017728211137364489, |
|
"loss": 0.962, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.000176978233227611, |
|
"loss": 1.0145, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00017667260033229953, |
|
"loss": 0.9576, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00017636521965473323, |
|
"loss": 1.0692, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00017605609820177617, |
|
"loss": 1.2074, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00017574524301997423, |
|
"loss": 1.1489, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00017543266119539422, |
|
"loss": 0.9962, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00017511835985346253, |
|
"loss": 0.8922, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.00017480234615880247, |
|
"loss": 0.9248, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.0001744846273150713, |
|
"loss": 1.1621, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00017416521056479577, |
|
"loss": 1.0516, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00017384410318920697, |
|
"loss": 1.0086, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00017352131250807467, |
|
"loss": 1.0909, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00017319684587954002, |
|
"loss": 1.1085, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.0001728707106999482, |
|
"loss": 1.1573, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.7850226163864136, |
|
"eval_runtime": 14.526, |
|
"eval_samples_per_second": 56.382, |
|
"eval_steps_per_second": 28.225, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00017254291440367968, |
|
"loss": 1.1107, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.0001722134644629807, |
|
"loss": 1.1537, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00017188236838779295, |
|
"loss": 1.1293, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00017154963372558246, |
|
"loss": 1.0653, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.00017121526806116748, |
|
"loss": 1.0901, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.00017087927901654557, |
|
"loss": 1.1569, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00017054167425071995, |
|
"loss": 1.022, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00017020246145952477, |
|
"loss": 1.0639, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00016986164837544987, |
|
"loss": 1.071, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00016951924276746425, |
|
"loss": 0.924, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00016917525244083918, |
|
"loss": 1.0146, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00016882968523697028, |
|
"loss": 1.0599, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00016848254903319867, |
|
"loss": 1.0237, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.00016813385174263137, |
|
"loss": 0.9614, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.0001677836013139611, |
|
"loss": 0.9791, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00016743180573128495, |
|
"loss": 1.0945, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.00016707847301392236, |
|
"loss": 1.0091, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.00016672361121623238, |
|
"loss": 1.1076, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00016636722842743013, |
|
"loss": 0.9472, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.0001660093327714022, |
|
"loss": 1.0485, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.0001656499324065217, |
|
"loss": 1.0056, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.00016528903552546207, |
|
"loss": 1.0819, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.00016492665035501046, |
|
"loss": 1.1128, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.00016456278515588024, |
|
"loss": 1.056, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.00016419744822252253, |
|
"loss": 1.0468, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.0001638306478829373, |
|
"loss": 0.9966, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0001634623924984833, |
|
"loss": 0.9898, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.00016309269046368776, |
|
"loss": 1.0431, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.0001627215502060548, |
|
"loss": 1.0327, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00016234898018587337, |
|
"loss": 1.0382, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00016197498889602448, |
|
"loss": 1.0287, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.0001615995848617876, |
|
"loss": 1.0244, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.0001612227766406461, |
|
"loss": 1.0989, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00016084457282209243, |
|
"loss": 1.1219, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.00016046498202743233, |
|
"loss": 0.9456, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.00016008401290958807, |
|
"loss": 1.0259, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.0001597016741529014, |
|
"loss": 1.0929, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00015931797447293552, |
|
"loss": 1.0716, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00015893292261627643, |
|
"loss": 0.9947, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00015854652736033354, |
|
"loss": 0.9707, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00015815879751313955, |
|
"loss": 0.9492, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0001577697419131497, |
|
"loss": 0.9346, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"eval_loss": 0.7729161381721497, |
|
"eval_runtime": 14.4823, |
|
"eval_samples_per_second": 56.552, |
|
"eval_steps_per_second": 28.31, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.00015737936942904023, |
|
"loss": 0.9925, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00015698768895950642, |
|
"loss": 1.032, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00015659470943305955, |
|
"loss": 1.0372, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00015620043980782327, |
|
"loss": 0.9926, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00015580488907132974, |
|
"loss": 0.9653, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00015540806624031442, |
|
"loss": 1.1205, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00015500998036051074, |
|
"loss": 1.0311, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0001546106405064438, |
|
"loss": 0.9639, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.00015421005578122356, |
|
"loss": 1.0489, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00015380823531633729, |
|
"loss": 1.0312, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00015340518827144145, |
|
"loss": 1.0165, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00015300092383415282, |
|
"loss": 1.009, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0001525954512198392, |
|
"loss": 1.0458, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.0001521887796714092, |
|
"loss": 1.0606, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.0001517809184591017, |
|
"loss": 0.9358, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00015137187688027436, |
|
"loss": 1.0373, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00015096166425919175, |
|
"loss": 1.1689, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00015055028994681284, |
|
"loss": 1.1171, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.00015013776332057786, |
|
"loss": 0.9495, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.0001497240937841944, |
|
"loss": 1.0251, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00014930929076742316, |
|
"loss": 1.132, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.00014889336372586305, |
|
"loss": 1.0223, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.00014847632214073548, |
|
"loss": 0.9179, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00014805817551866838, |
|
"loss": 0.915, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.0001476389333914794, |
|
"loss": 1.0096, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.0001472186053159587, |
|
"loss": 1.0108, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00014679720087365096, |
|
"loss": 0.9954, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.0001463747296706372, |
|
"loss": 1.0534, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00014595120133731565, |
|
"loss": 0.9521, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.0001455266255281821, |
|
"loss": 1.086, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00014510101192161018, |
|
"loss": 0.986, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.0001446743702196304, |
|
"loss": 1.1084, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.00014424671014770906, |
|
"loss": 0.9904, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.0001438180414545267, |
|
"loss": 1.019, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00014338837391175582, |
|
"loss": 1.1684, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00014295771731383797, |
|
"loss": 0.9792, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00014252608147776065, |
|
"loss": 1.0375, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.0001420934762428335, |
|
"loss": 0.9413, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00014165991147046403, |
|
"loss": 1.0807, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00014122539704393265, |
|
"loss": 0.8643, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00014078994286816768, |
|
"loss": 0.9569, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00014035355886951923, |
|
"loss": 1.0299, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"eval_loss": 0.761233925819397, |
|
"eval_runtime": 14.3861, |
|
"eval_samples_per_second": 56.93, |
|
"eval_steps_per_second": 28.5, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00013991625499553325, |
|
"loss": 0.9685, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.0001394780412147245, |
|
"loss": 1.0123, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.00013903892751634947, |
|
"loss": 0.9601, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.00013859892391017865, |
|
"loss": 0.9661, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.00013815804042626828, |
|
"loss": 1.0132, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.00013771628711473172, |
|
"loss": 1.0511, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.00013727367404551055, |
|
"loss": 1.0637, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.0001368302113081447, |
|
"loss": 0.9781, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.00013638590901154276, |
|
"loss": 1.0151, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.00013594077728375128, |
|
"loss": 0.9971, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.0001354948262717241, |
|
"loss": 0.9202, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.00013504806614109098, |
|
"loss": 1.0605, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.0001346005070759258, |
|
"loss": 1.0303, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.0001341521592785145, |
|
"loss": 0.9228, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.00013370303296912249, |
|
"loss": 0.9659, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.0001332531383857616, |
|
"loss": 0.9742, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.0001328024857839569, |
|
"loss": 1.0524, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.00013235108543651272, |
|
"loss": 1.0277, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.0001318989476332785, |
|
"loss": 0.9731, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.00013144608268091435, |
|
"loss": 1.0629, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.000130992500902656, |
|
"loss": 0.8943, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.00013053821263807946, |
|
"loss": 0.9997, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.00013008322824286555, |
|
"loss": 1.0465, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.00012962755808856342, |
|
"loss": 1.0259, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.00012917121256235455, |
|
"loss": 0.9458, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.00012871420206681571, |
|
"loss": 1.0322, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.000128256537019682, |
|
"loss": 0.8538, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.00012779822785360912, |
|
"loss": 0.9602, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.00012733928501593587, |
|
"loss": 0.9953, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.00012687971896844575, |
|
"loss": 0.8757, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.00012641954018712863, |
|
"loss": 0.9382, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00012595875916194188, |
|
"loss": 0.8791, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.00012549738639657115, |
|
"loss": 0.9245, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.00012503543240819127, |
|
"loss": 0.9611, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.00012457290772722608, |
|
"loss": 1.0328, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.00012410982289710865, |
|
"loss": 0.8988, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.0001236461884740409, |
|
"loss": 1.0435, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00012318201502675285, |
|
"loss": 0.9486, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.0001227173131362619, |
|
"loss": 0.8819, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.00012225209339563145, |
|
"loss": 1.024, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.00012178636640972953, |
|
"loss": 0.9137, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.00012132014279498703, |
|
"loss": 1.0057, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"eval_loss": 0.754410445690155, |
|
"eval_runtime": 14.5906, |
|
"eval_samples_per_second": 56.132, |
|
"eval_steps_per_second": 28.1, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.00012085343317915565, |
|
"loss": 1.0209, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.00012038624820106572, |
|
"loss": 0.9985, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.0001199185985103836, |
|
"loss": 0.7608, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 0.00011945049476736905, |
|
"loss": 0.931, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.00011898194764263197, |
|
"loss": 1.0362, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.00011851296781688952, |
|
"loss": 1.097, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.00011804356598072223, |
|
"loss": 0.9304, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.00011757375283433076, |
|
"loss": 0.9989, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.00011710353908729156, |
|
"loss": 0.971, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.00011663293545831302, |
|
"loss": 0.9942, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.00011616195267499102, |
|
"loss": 0.9824, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.00011569060147356441, |
|
"loss": 0.9901, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.00011521889259867032, |
|
"loss": 1.0445, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.00011474683680309912, |
|
"loss": 0.9421, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.0001142744448475494, |
|
"loss": 0.9692, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.00011380172750038269, |
|
"loss": 0.9473, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 0.0001133286955373779, |
|
"loss": 1.0931, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.00011285535974148576, |
|
"loss": 1.0236, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.00011238173090258293, |
|
"loss": 0.9002, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.00011190781981722623, |
|
"loss": 0.9209, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.00011143363728840625, |
|
"loss": 0.9155, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.00011095919412530136, |
|
"loss": 1.0064, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.0001104845011430311, |
|
"loss": 0.947, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.00011000956916240985, |
|
"loss": 0.9293, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.00010953440900969994, |
|
"loss": 0.9596, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.00010905903151636501, |
|
"loss": 0.9835, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 0.00010858344751882304, |
|
"loss": 0.9722, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.00010810766785819946, |
|
"loss": 0.8318, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.00010763170338007978, |
|
"loss": 1.0073, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.00010715556493426262, |
|
"loss": 0.9922, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.00010667926337451217, |
|
"loss": 0.9459, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 0.00010620280955831087, |
|
"loss": 0.9276, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.000105726214346612, |
|
"loss": 1.027, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.00010524948860359193, |
|
"loss": 0.8963, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.00010477264319640252, |
|
"loss": 1.0102, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.00010429568899492348, |
|
"loss": 0.9058, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.0001038186368715145, |
|
"loss": 1.137, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.00010334149770076747, |
|
"loss": 0.9578, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.00010286428235925849, |
|
"loss": 0.9282, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.00010238700172530009, |
|
"loss": 0.9325, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 0.0001019096666786931, |
|
"loss": 0.9145, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.00010143228810047875, |
|
"loss": 0.976, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"eval_loss": 0.7478241920471191, |
|
"eval_runtime": 14.477, |
|
"eval_samples_per_second": 56.573, |
|
"eval_steps_per_second": 28.321, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.00010095487687269054, |
|
"loss": 1.005, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.00010047744387810632, |
|
"loss": 0.9885, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 0.0001, |
|
"loss": 1.0033, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 9.952255612189368e-05, |
|
"loss": 0.867, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 9.904512312730948e-05, |
|
"loss": 1.0728, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 9.856771189952126e-05, |
|
"loss": 0.9978, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 9.809033332130693e-05, |
|
"loss": 0.8017, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 9.761299827469992e-05, |
|
"loss": 0.8687, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 9.713571764074152e-05, |
|
"loss": 1.1273, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 9.665850229923258e-05, |
|
"loss": 0.9474, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 9.618136312848551e-05, |
|
"loss": 0.9905, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 9.570431100507651e-05, |
|
"loss": 0.9277, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 9.52273568035975e-05, |
|
"loss": 0.8655, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 9.475051139640809e-05, |
|
"loss": 0.9271, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 9.4273785653388e-05, |
|
"loss": 1.0323, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 9.379719044168915e-05, |
|
"loss": 0.9624, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 9.332073662548784e-05, |
|
"loss": 0.8565, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 9.28444350657374e-05, |
|
"loss": 0.99, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 9.236829661992023e-05, |
|
"loss": 1.0581, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 9.189233214180056e-05, |
|
"loss": 0.8963, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 9.141655248117698e-05, |
|
"loss": 0.9169, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 9.094096848363502e-05, |
|
"loss": 1.0023, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 9.046559099030012e-05, |
|
"loss": 0.8931, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 8.999043083759017e-05, |
|
"loss": 1.0086, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 8.951549885696889e-05, |
|
"loss": 0.9737, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 8.904080587469868e-05, |
|
"loss": 1.006, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 8.856636271159377e-05, |
|
"loss": 0.8521, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 8.809218018277378e-05, |
|
"loss": 0.8799, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 8.761826909741709e-05, |
|
"loss": 0.86, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 8.714464025851427e-05, |
|
"loss": 0.9277, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 8.667130446262214e-05, |
|
"loss": 0.9753, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 8.619827249961733e-05, |
|
"loss": 0.9232, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 8.57255551524506e-05, |
|
"loss": 1.0167, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 8.525316319690092e-05, |
|
"loss": 1.1038, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 8.47811074013297e-05, |
|
"loss": 0.9303, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 8.430939852643558e-05, |
|
"loss": 0.9683, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 8.383804732500902e-05, |
|
"loss": 0.9889, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 8.336706454168701e-05, |
|
"loss": 0.7554, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 8.289646091270849e-05, |
|
"loss": 0.9968, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 8.242624716566927e-05, |
|
"loss": 1.0212, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 8.195643401927777e-05, |
|
"loss": 0.9033, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 8.148703218311053e-05, |
|
"loss": 1.0765, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"eval_loss": 0.7438732981681824, |
|
"eval_runtime": 14.4192, |
|
"eval_samples_per_second": 56.799, |
|
"eval_steps_per_second": 28.434, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 8.101805235736804e-05, |
|
"loss": 0.9057, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 8.054950523263096e-05, |
|
"loss": 0.9076, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 8.008140148961641e-05, |
|
"loss": 0.9456, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 7.96137517989343e-05, |
|
"loss": 1.0068, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 7.914656682084437e-05, |
|
"loss": 1.015, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 7.867985720501301e-05, |
|
"loss": 0.9624, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 7.821363359027048e-05, |
|
"loss": 0.9623, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 7.774790660436858e-05, |
|
"loss": 0.9155, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 7.728268686373814e-05, |
|
"loss": 1.0116, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 7.681798497324716e-05, |
|
"loss": 0.9035, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 7.635381152595915e-05, |
|
"loss": 0.9018, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 7.589017710289139e-05, |
|
"loss": 1.0231, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 7.542709227277396e-05, |
|
"loss": 0.9325, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 7.496456759180875e-05, |
|
"loss": 1.0224, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 7.450261360342888e-05, |
|
"loss": 0.9142, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 7.404124083805819e-05, |
|
"loss": 0.9583, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 7.358045981287141e-05, |
|
"loss": 1.007, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 7.312028103155426e-05, |
|
"loss": 0.8724, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 7.266071498406417e-05, |
|
"loss": 0.8942, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 7.220177214639088e-05, |
|
"loss": 0.9398, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 7.174346298031804e-05, |
|
"loss": 0.8393, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 7.128579793318428e-05, |
|
"loss": 0.9286, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 7.082878743764545e-05, |
|
"loss": 0.924, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 7.037244191143661e-05, |
|
"loss": 0.96, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 6.991677175713449e-05, |
|
"loss": 0.9981, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 6.946178736192053e-05, |
|
"loss": 1.0843, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 6.900749909734406e-05, |
|
"loss": 0.9198, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 6.855391731908567e-05, |
|
"loss": 0.9537, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 6.810105236672155e-05, |
|
"loss": 0.9219, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 6.764891456348729e-05, |
|
"loss": 0.963, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 6.719751421604309e-05, |
|
"loss": 1.0003, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 6.674686161423843e-05, |
|
"loss": 1.0074, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 6.629696703087754e-05, |
|
"loss": 1.0361, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 6.584784072148555e-05, |
|
"loss": 0.9224, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 6.539949292407421e-05, |
|
"loss": 0.9658, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 6.495193385890901e-05, |
|
"loss": 1.0963, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 6.450517372827591e-05, |
|
"loss": 1.0044, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 6.405922271624874e-05, |
|
"loss": 0.998, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 6.361409098845725e-05, |
|
"loss": 0.8947, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 6.316978869185532e-05, |
|
"loss": 1.057, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 6.272632595448947e-05, |
|
"loss": 1.0015, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 6.22837128852683e-05, |
|
"loss": 0.8845, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"eval_loss": 0.7409489750862122, |
|
"eval_runtime": 14.4788, |
|
"eval_samples_per_second": 56.566, |
|
"eval_steps_per_second": 28.317, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 6.184195957373176e-05, |
|
"loss": 0.8122, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 6.140107608982136e-05, |
|
"loss": 0.984, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 6.0961072483650526e-05, |
|
"loss": 1.065, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 6.05219587852755e-05, |
|
"loss": 0.9648, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 6.008374500446676e-05, |
|
"loss": 0.9267, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 5.964644113048079e-05, |
|
"loss": 0.9327, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 5.921005713183235e-05, |
|
"loss": 0.9545, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 5.877460295606738e-05, |
|
"loss": 0.8972, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 5.834008852953603e-05, |
|
"loss": 0.9857, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 5.790652375716652e-05, |
|
"loss": 0.9189, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 5.74739185222394e-05, |
|
"loss": 0.8486, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 5.704228268616208e-05, |
|
"loss": 0.9442, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 5.6611626088244194e-05, |
|
"loss": 0.9095, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 5.6181958545473325e-05, |
|
"loss": 1.0581, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 5.575328985229098e-05, |
|
"loss": 0.8151, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 5.5325629780369635e-05, |
|
"loss": 0.8941, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 5.48989880783898e-05, |
|
"loss": 0.9767, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 5.4473374471817906e-05, |
|
"loss": 0.8874, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 5.4048798662684376e-05, |
|
"loss": 0.8578, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 5.362527032936277e-05, |
|
"loss": 0.9191, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 5.320279912634907e-05, |
|
"loss": 0.7622, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 5.278139468404133e-05, |
|
"loss": 0.9143, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 5.236106660852058e-05, |
|
"loss": 0.9184, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 5.1941824481331626e-05, |
|
"loss": 0.9776, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 5.1523677859264516e-05, |
|
"loss": 0.9989, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 5.110663627413694e-05, |
|
"loss": 0.8539, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 5.069070923257685e-05, |
|
"loss": 1.0078, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 5.0275906215805625e-05, |
|
"loss": 0.9395, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 4.986223667942214e-05, |
|
"loss": 0.7978, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 4.944971005318716e-05, |
|
"loss": 0.9456, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 4.903833574080825e-05, |
|
"loss": 0.991, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 4.862812311972567e-05, |
|
"loss": 0.8867, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 4.8219081540898295e-05, |
|
"loss": 0.9519, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 4.781122032859079e-05, |
|
"loss": 0.8153, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 4.740454878016084e-05, |
|
"loss": 0.8457, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 4.699907616584721e-05, |
|
"loss": 0.9878, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 4.6594811728558584e-05, |
|
"loss": 0.9616, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 4.6191764683662744e-05, |
|
"loss": 0.8892, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 4.578994421877645e-05, |
|
"loss": 0.8593, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 4.538935949355623e-05, |
|
"loss": 0.8618, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 4.499001963948929e-05, |
|
"loss": 1.0498, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 4.45919337596856e-05, |
|
"loss": 1.0198, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"eval_loss": 0.7379047870635986, |
|
"eval_runtime": 14.5017, |
|
"eval_samples_per_second": 56.476, |
|
"eval_steps_per_second": 28.273, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 4.41951109286703e-05, |
|
"loss": 0.8737, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 4.379956019217675e-05, |
|
"loss": 0.8282, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 4.340529056694047e-05, |
|
"loss": 0.8425, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 4.3012311040493594e-05, |
|
"loss": 0.8594, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 4.2620630570959775e-05, |
|
"loss": 1.0157, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 4.2230258086850374e-05, |
|
"loss": 1.0247, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 4.184120248686048e-05, |
|
"loss": 0.956, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 4.1453472639666457e-05, |
|
"loss": 0.9221, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 4.106707738372357e-05, |
|
"loss": 0.8249, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 4.0682025527064486e-05, |
|
"loss": 0.9179, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 4.029832584709864e-05, |
|
"loss": 1.0457, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 3.991598709041196e-05, |
|
"loss": 0.8553, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 3.9535017972567675e-05, |
|
"loss": 0.8563, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 3.915542717790759e-05, |
|
"loss": 0.9828, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 3.877722335935394e-05, |
|
"loss": 0.9853, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 3.840041513821243e-05, |
|
"loss": 0.9801, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 3.802501110397553e-05, |
|
"loss": 0.8679, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 3.7651019814126654e-05, |
|
"loss": 0.8846, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 3.727844979394526e-05, |
|
"loss": 0.8068, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 3.6907309536312276e-05, |
|
"loss": 0.8033, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 3.6537607501516715e-05, |
|
"loss": 0.9402, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 3.616935211706275e-05, |
|
"loss": 0.8132, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 3.5802551777477476e-05, |
|
"loss": 0.8682, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 3.543721484411976e-05, |
|
"loss": 0.9198, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 3.5073349644989564e-05, |
|
"loss": 0.958, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 3.4710964474537966e-05, |
|
"loss": 0.8973, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 3.4350067593478356e-05, |
|
"loss": 1.0462, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 3.399066722859782e-05, |
|
"loss": 0.9042, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 3.363277157256988e-05, |
|
"loss": 0.9075, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 3.327638878376764e-05, |
|
"loss": 1.0444, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 3.292152698607768e-05, |
|
"loss": 0.9098, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 3.256819426871507e-05, |
|
"loss": 1.0037, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 3.2216398686038926e-05, |
|
"loss": 0.8278, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 3.1866148257368665e-05, |
|
"loss": 0.9869, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 3.15174509668014e-05, |
|
"loss": 0.8511, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 3.117031476302975e-05, |
|
"loss": 0.9983, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 3.0824747559160836e-05, |
|
"loss": 1.0146, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 3.0480757232535772e-05, |
|
"loss": 0.9705, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 3.0138351624550164e-05, |
|
"loss": 0.8197, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 2.979753854047522e-05, |
|
"loss": 1.0835, |
|
"step": 502 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 2.9458325749280057e-05, |
|
"loss": 0.8451, |
|
"step": 503 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 2.9120720983454463e-05, |
|
"loss": 0.9712, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"eval_loss": 0.7352047562599182, |
|
"eval_runtime": 14.5599, |
|
"eval_samples_per_second": 56.251, |
|
"eval_steps_per_second": 28.16, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 2.8784731938832556e-05, |
|
"loss": 1.0257, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 2.845036627441755e-05, |
|
"loss": 1.1134, |
|
"step": 506 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 2.8117631612207084e-05, |
|
"loss": 0.8619, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 2.778653553701932e-05, |
|
"loss": 0.8906, |
|
"step": 508 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 2.745708559632032e-05, |
|
"loss": 0.8772, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 2.7129289300051787e-05, |
|
"loss": 0.8894, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 2.6803154120460007e-05, |
|
"loss": 0.8776, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 2.6478687491925357e-05, |
|
"loss": 0.8533, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 2.6155896810793036e-05, |
|
"loss": 0.8595, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 2.5834789435204243e-05, |
|
"loss": 0.9187, |
|
"step": 514 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 2.5515372684928683e-05, |
|
"loss": 0.8633, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 2.5197653841197543e-05, |
|
"loss": 0.9337, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 2.4881640146537498e-05, |
|
"loss": 0.9204, |
|
"step": 517 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 2.4567338804605756e-05, |
|
"loss": 0.9667, |
|
"step": 518 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 2.4254756980025773e-05, |
|
"loss": 0.8439, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 2.394390179822382e-05, |
|
"loss": 0.8652, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 2.3634780345266806e-05, |
|
"loss": 0.8782, |
|
"step": 521 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 2.3327399667700477e-05, |
|
"loss": 0.9652, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 2.3021766772388986e-05, |
|
"loss": 0.8984, |
|
"step": 523 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 2.2717888626355134e-05, |
|
"loss": 0.8815, |
|
"step": 524 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 2.2415772156621382e-05, |
|
"loss": 0.897, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 2.211542425005223e-05, |
|
"loss": 0.9867, |
|
"step": 526 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 2.181685175319702e-05, |
|
"loss": 0.8769, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 2.1520061472133902e-05, |
|
"loss": 0.987, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 2.122506017231477e-05, |
|
"loss": 0.8578, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 2.0931854578410905e-05, |
|
"loss": 1.0012, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 2.064045137415982e-05, |
|
"loss": 0.9574, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 2.035085720221288e-05, |
|
"loss": 1.0041, |
|
"step": 532 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 2.0063078663983714e-05, |
|
"loss": 0.9809, |
|
"step": 533 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 1.9777122319497986e-05, |
|
"loss": 0.912, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 1.9492994687243714e-05, |
|
"loss": 0.9197, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 1.9210702244022617e-05, |
|
"loss": 1.0735, |
|
"step": 536 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 1.893025142480265e-05, |
|
"loss": 0.8944, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 1.8651648622571128e-05, |
|
"loss": 0.9093, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 1.837490018818917e-05, |
|
"loss": 0.8556, |
|
"step": 539 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 1.8100012430246837e-05, |
|
"loss": 0.8463, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 1.7826991614919265e-05, |
|
"loss": 0.8961, |
|
"step": 541 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 1.7555843965823992e-05, |
|
"loss": 1.0002, |
|
"step": 542 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 1.7286575663878877e-05, |
|
"loss": 0.9748, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 1.7019192847161425e-05, |
|
"loss": 0.8891, |
|
"step": 544 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 1.6753701610768724e-05, |
|
"loss": 0.9421, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 1.6490108006678494e-05, |
|
"loss": 0.9069, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"eval_loss": 0.7350304126739502, |
|
"eval_runtime": 14.3775, |
|
"eval_samples_per_second": 56.964, |
|
"eval_steps_per_second": 28.517, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 1.6228418043611227e-05, |
|
"loss": 0.9492, |
|
"step": 547 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 1.5968637686893186e-05, |
|
"loss": 0.932, |
|
"step": 548 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 1.57107728583203e-05, |
|
"loss": 0.8966, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 1.545482943602341e-05, |
|
"loss": 0.8305, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 1.5200813254334012e-05, |
|
"loss": 0.8215, |
|
"step": 551 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 1.4948730103651498e-05, |
|
"loss": 0.8472, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 1.4698585730310998e-05, |
|
"loss": 0.8684, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 1.4450385836452429e-05, |
|
"loss": 0.8235, |
|
"step": 554 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 1.4204136079890584e-05, |
|
"loss": 0.862, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 1.3959842073986085e-05, |
|
"loss": 0.8883, |
|
"step": 556 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 1.371750938751739e-05, |
|
"loss": 0.9239, |
|
"step": 557 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 1.3477143544553995e-05, |
|
"loss": 0.873, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 1.3238750024330338e-05, |
|
"loss": 0.959, |
|
"step": 559 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 1.300233426112103e-05, |
|
"loss": 0.9025, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 1.2767901644116941e-05, |
|
"loss": 0.8984, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 1.2535457517302263e-05, |
|
"loss": 0.9417, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 1.230500717933285e-05, |
|
"loss": 0.836, |
|
"step": 563 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 1.207655588341534e-05, |
|
"loss": 0.9123, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 1.1850108837187335e-05, |
|
"loss": 1.0009, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 1.1625671202598875e-05, |
|
"loss": 1.0058, |
|
"step": 566 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 1.1403248095794628e-05, |
|
"loss": 0.9754, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 1.1182844586997266e-05, |
|
"loss": 0.9161, |
|
"step": 568 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 1.096446570039198e-05, |
|
"loss": 0.8327, |
|
"step": 569 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 1.0748116414011888e-05, |
|
"loss": 0.8486, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 1.0533801659624531e-05, |
|
"loss": 0.9521, |
|
"step": 571 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 1.0321526322619534e-05, |
|
"loss": 0.9869, |
|
"step": 572 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 1.0111295241897157e-05, |
|
"loss": 0.8474, |
|
"step": 573 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 9.903113209758096e-06, |
|
"loss": 0.9183, |
|
"step": 574 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 9.696984971794065e-06, |
|
"loss": 0.8979, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 9.492915226779808e-06, |
|
"loss": 0.8548, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 9.29090862656593e-06, |
|
"loss": 1.033, |
|
"step": 577 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 9.090969775972736e-06, |
|
"loss": 0.8503, |
|
"step": 578 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 8.89310323268544e-06, |
|
"loss": 0.9644, |
|
"step": 579 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 8.697313507150184e-06, |
|
"loss": 0.9816, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 8.503605062471187e-06, |
|
"loss": 1.0183, |
|
"step": 581 |
|
}, |
|
{ |
|
"epoch": 3.42, |
|
"learning_rate": 8.311982314309109e-06, |
|
"loss": 0.9839, |
|
"step": 582 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 8.122449630780238e-06, |
|
"loss": 0.8224, |
|
"step": 583 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 7.935011332357112e-06, |
|
"loss": 0.8238, |
|
"step": 584 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 7.749671691769911e-06, |
|
"loss": 0.9162, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 7.566434933909006e-06, |
|
"loss": 0.929, |
|
"step": 586 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 7.385305235728801e-06, |
|
"loss": 1.0165, |
|
"step": 587 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 7.206286726152434e-06, |
|
"loss": 0.8973, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"eval_loss": 0.7341805100440979, |
|
"eval_runtime": 14.5889, |
|
"eval_samples_per_second": 56.139, |
|
"eval_steps_per_second": 28.104, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 7.029383485977625e-06, |
|
"loss": 0.8952, |
|
"step": 589 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 6.854599547783736e-06, |
|
"loss": 0.9449, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 6.681938895839746e-06, |
|
"loss": 0.9427, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 6.5114054660135315e-06, |
|
"loss": 0.9323, |
|
"step": 592 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 6.343003145682114e-06, |
|
"loss": 0.9059, |
|
"step": 593 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 6.176735773642961e-06, |
|
"loss": 0.8749, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 6.012607140026605e-06, |
|
"loss": 0.9846, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 5.850620986210198e-06, |
|
"loss": 0.8623, |
|
"step": 596 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 5.69078100473216e-06, |
|
"loss": 0.9239, |
|
"step": 597 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 5.533090839208133e-06, |
|
"loss": 0.9307, |
|
"step": 598 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 5.377554084247771e-06, |
|
"loss": 0.9144, |
|
"step": 599 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 5.224174285372974e-06, |
|
"loss": 0.9267, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 5.0729549389369245e-06, |
|
"loss": 0.9614, |
|
"step": 601 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 4.923899492044437e-06, |
|
"loss": 0.9391, |
|
"step": 602 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 4.777011342473392e-06, |
|
"loss": 1.0601, |
|
"step": 603 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 4.632293838597246e-06, |
|
"loss": 1.0046, |
|
"step": 604 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 4.489750279308757e-06, |
|
"loss": 0.9584, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 4.349383913944771e-06, |
|
"loss": 0.8467, |
|
"step": 606 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 4.211197942212086e-06, |
|
"loss": 0.9803, |
|
"step": 607 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 4.075195514114593e-06, |
|
"loss": 0.9995, |
|
"step": 608 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 3.941379729881456e-06, |
|
"loss": 0.9114, |
|
"step": 609 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 3.8097536398963963e-06, |
|
"loss": 0.9629, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 3.6803202446282214e-06, |
|
"loss": 1.0647, |
|
"step": 611 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 3.5530824945623542e-06, |
|
"loss": 0.8882, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 3.4280432901336425e-06, |
|
"loss": 0.9751, |
|
"step": 613 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 3.3052054816602452e-06, |
|
"loss": 0.9553, |
|
"step": 614 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 3.184571869278574e-06, |
|
"loss": 0.8738, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 3.0661452028795336e-06, |
|
"loss": 0.8258, |
|
"step": 616 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 2.9499281820458692e-06, |
|
"loss": 0.9245, |
|
"step": 617 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 2.835923455990508e-06, |
|
"loss": 0.9208, |
|
"step": 618 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 2.7241336234962944e-06, |
|
"loss": 1.0179, |
|
"step": 619 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 2.6145612328566717e-06, |
|
"loss": 0.8535, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 2.5072087818176382e-06, |
|
"loss": 0.889, |
|
"step": 621 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 2.402078717520795e-06, |
|
"loss": 0.9205, |
|
"step": 622 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 2.2991734364475215e-06, |
|
"loss": 0.7831, |
|
"step": 623 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 2.19849528436441e-06, |
|
"loss": 0.9609, |
|
"step": 624 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 2.1000465562697856e-06, |
|
"loss": 0.9499, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 2.003829496341325e-06, |
|
"loss": 0.9191, |
|
"step": 626 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 1.9098462978849873e-06, |
|
"loss": 0.8958, |
|
"step": 627 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 1.8180991032849426e-06, |
|
"loss": 0.9472, |
|
"step": 628 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 1.7285900039547998e-06, |
|
"loss": 0.9246, |
|
"step": 629 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 1.6413210402898893e-06, |
|
"loss": 0.9359, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"eval_loss": 0.7337182760238647, |
|
"eval_runtime": 14.3549, |
|
"eval_samples_per_second": 57.054, |
|
"eval_steps_per_second": 28.562, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 1.5562942016207338e-06, |
|
"loss": 0.8341, |
|
"step": 631 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 1.473511426167784e-06, |
|
"loss": 0.9046, |
|
"step": 632 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 1.3929746009971433e-06, |
|
"loss": 0.8575, |
|
"step": 633 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 1.3146855619776134e-06, |
|
"loss": 0.8378, |
|
"step": 634 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 1.2386460937387822e-06, |
|
"loss": 0.8821, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 1.1648579296304253e-06, |
|
"loss": 0.8412, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 1.0933227516829347e-06, |
|
"loss": 0.89, |
|
"step": 637 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 1.0240421905689745e-06, |
|
"loss": 0.8251, |
|
"step": 638 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 9.570178255663532e-07, |
|
"loss": 0.71, |
|
"step": 639 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 8.922511845219971e-07, |
|
"loss": 0.8918, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 8.297437438170797e-07, |
|
"loss": 0.8273, |
|
"step": 641 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 7.694969283334575e-07, |
|
"loss": 0.9645, |
|
"step": 642 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 7.115121114211199e-07, |
|
"loss": 0.9352, |
|
"step": 643 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 6.557906148669024e-07, |
|
"loss": 0.8733, |
|
"step": 644 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 6.023337088643665e-07, |
|
"loss": 0.8949, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 5.51142611984834e-07, |
|
"loss": 0.8571, |
|
"step": 646 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 5.022184911495864e-07, |
|
"loss": 0.8543, |
|
"step": 647 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 4.555624616033427e-07, |
|
"loss": 0.9077, |
|
"step": 648 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 4.111755868887346e-07, |
|
"loss": 0.8877, |
|
"step": 649 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 3.6905887882213717e-07, |
|
"loss": 1.1382, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 3.292132974705653e-07, |
|
"loss": 0.8221, |
|
"step": 651 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 2.916397511298019e-07, |
|
"loss": 0.8317, |
|
"step": 652 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 2.5633909630371487e-07, |
|
"loss": 0.9018, |
|
"step": 653 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 2.2331213768468363e-07, |
|
"loss": 0.9389, |
|
"step": 654 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 1.925596281353026e-07, |
|
"loss": 0.8858, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 1.6408226867118403e-07, |
|
"loss": 0.8439, |
|
"step": 656 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 1.378807084450151e-07, |
|
"loss": 0.9853, |
|
"step": 657 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 1.1395554473171422e-07, |
|
"loss": 0.7637, |
|
"step": 658 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 9.230732291485301e-08, |
|
"loss": 0.9044, |
|
"step": 659 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 7.293653647421073e-08, |
|
"loss": 0.8802, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 5.584362697453882e-08, |
|
"loss": 0.9122, |
|
"step": 661 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 4.102898405545785e-08, |
|
"loss": 1.0282, |
|
"step": 662 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 2.8492945422620155e-08, |
|
"loss": 0.8859, |
|
"step": 663 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 1.8235796839982665e-08, |
|
"loss": 0.8389, |
|
"step": 664 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 1.0257772123312137e-08, |
|
"loss": 0.9671, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 4.559053134822744e-09, |
|
"loss": 1.0085, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 1.1397697790793694e-09, |
|
"loss": 0.8158, |
|
"step": 667 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 0.0, |
|
"loss": 0.8907, |
|
"step": 668 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 668, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 167, |
|
"total_flos": 1.391544932153426e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|