|
{ |
|
"best_metric": 1.7264798879623413, |
|
"best_model_checkpoint": "output/slava-kpss/checkpoint-933", |
|
"epoch": 1.0, |
|
"global_step": 933, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0001371902778945302, |
|
"loss": 2.663, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00013716111433378645, |
|
"loss": 2.4309, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00013711251758398495, |
|
"loss": 2.2912, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0001370445014195492, |
|
"loss": 2.3384, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00013695708511920587, |
|
"loss": 2.2697, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0001368502934605203, |
|
"loss": 2.2379, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00013672415671287354, |
|
"loss": 2.1656, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00013657871062888258, |
|
"loss": 2.1628, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00013641399643426666, |
|
"loss": 2.1289, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000136230060816162, |
|
"loss": 2.09, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00013602695590988865, |
|
"loss": 2.0049, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001358047392841732, |
|
"loss": 2.2023, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00013556347392483116, |
|
"loss": 2.1472, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013530322821691406, |
|
"loss": 2.0605, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00013502407592532636, |
|
"loss": 2.1713, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00013472609617391705, |
|
"loss": 2.0815, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001344093734230526, |
|
"loss": 1.998, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00013407399744567734, |
|
"loss": 1.9623, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00013372006330186772, |
|
"loss": 2.0139, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00013334767131188837, |
|
"loss": 2.0258, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00013295692702775685, |
|
"loss": 1.9965, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00013254794120332568, |
|
"loss": 2.0578, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00013212082976288994, |
|
"loss": 1.9713, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00013167571376832926, |
|
"loss": 2.0398, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00013121271938479367, |
|
"loss": 1.9906, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00013073197784494285, |
|
"loss": 1.9652, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0001302336254117493, |
|
"loss": 1.9658, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00012971780333987523, |
|
"loss": 2.0634, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00012918465783563518, |
|
"loss": 2.0384, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00012863434001555456, |
|
"loss": 1.9783, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00012806700586353683, |
|
"loss": 2.0149, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00012748281618665092, |
|
"loss": 1.9433, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00012688193656955137, |
|
"loss": 2.0019, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0001262645373275447, |
|
"loss": 2.036, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0001256307934583145, |
|
"loss": 1.9862, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00012498088459231957, |
|
"loss": 1.9842, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00012431499494187896, |
|
"loss": 2.0212, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.000123633313248958, |
|
"loss": 1.8205, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00012293603273167084, |
|
"loss": 2.0015, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00012222335102951405, |
|
"loss": 1.8653, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00012149547014734692, |
|
"loss": 2.0522, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00012075259639813464, |
|
"loss": 1.8885, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00011999494034447026, |
|
"loss": 1.8864, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00011922271673889206, |
|
"loss": 1.9415, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00011843614446301341, |
|
"loss": 1.9542, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00011763544646548234, |
|
"loss": 1.8528, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00011682084969878809, |
|
"loss": 1.8431, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00011599258505493302, |
|
"loss": 1.9018, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0001151508872999878, |
|
"loss": 1.8971, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00011429599500754859, |
|
"loss": 1.9842, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00011342815049111488, |
|
"loss": 1.8397, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00011254759973540735, |
|
"loss": 1.854, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0001116545923266452, |
|
"loss": 1.9264, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00011074938138180258, |
|
"loss": 1.833, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00010983222347686431, |
|
"loss": 1.8783, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00010890337857410102, |
|
"loss": 1.8777, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00010796310994838476, |
|
"loss": 1.8851, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00010701168411256533, |
|
"loss": 1.8735, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0001060493707419291, |
|
"loss": 1.9696, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00010507644259776136, |
|
"loss": 1.8878, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00010409317545003389, |
|
"loss": 1.8279, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0001030998479992398, |
|
"loss": 1.9672, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00010209674179739785, |
|
"loss": 1.889, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00010108414116824834, |
|
"loss": 1.8617, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00010006233312666341, |
|
"loss": 1.9077, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 9.90316072972947e-05, |
|
"loss": 1.8912, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 9.79922558324811e-05, |
|
"loss": 1.7622, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 9.694457332944009e-05, |
|
"loss": 1.8778, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 9.588885674676624e-05, |
|
"loss": 1.8761, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 9.482540532026027e-05, |
|
"loss": 1.8225, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 9.37545204781125e-05, |
|
"loss": 1.7817, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 9.26765057554653e-05, |
|
"loss": 1.7633, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 9.159166670837789e-05, |
|
"loss": 1.8406, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 9.05003108272186e-05, |
|
"loss": 1.9374, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 8.940274744950875e-05, |
|
"loss": 1.8444, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 8.829928767224302e-05, |
|
"loss": 1.8098, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 8.71902442637111e-05, |
|
"loss": 1.822, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 8.607593157484563e-05, |
|
"loss": 1.87, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 8.495666545012144e-05, |
|
"loss": 1.8821, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 8.383276313803162e-05, |
|
"loss": 1.8619, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 8.270454320116558e-05, |
|
"loss": 1.896, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 8.157232542591454e-05, |
|
"loss": 1.776, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 8.043643073183026e-05, |
|
"loss": 1.8886, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 7.92971810806626e-05, |
|
"loss": 1.8725, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 7.815489938510145e-05, |
|
"loss": 1.8305, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 7.700990941724947e-05, |
|
"loss": 1.8383, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 7.586253571685095e-05, |
|
"loss": 1.872, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 7.471310349930326e-05, |
|
"loss": 1.8617, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 7.356193856347655e-05, |
|
"loss": 1.8118, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 7.24093671993686e-05, |
|
"loss": 1.8363, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 7.125571609561963e-05, |
|
"loss": 1.7498, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 7.010131224691501e-05, |
|
"loss": 1.8902, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 6.894648286130055e-05, |
|
"loss": 1.9075, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 6.779155526743765e-05, |
|
"loss": 1.8081, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 6.66368568218242e-05, |
|
"loss": 1.8239, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 6.548271481600758e-05, |
|
"loss": 1.8584, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 6.432945638381598e-05, |
|
"loss": 1.9037, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 6.317740840863456e-05, |
|
"loss": 1.7551, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 6.202689743075261e-05, |
|
"loss": 1.8329, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 6.0878249554807756e-05, |
|
"loss": 1.7776, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 5.9731790357353845e-05, |
|
"loss": 1.8987, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 5.8587844794578496e-05, |
|
"loss": 1.6097, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 5.744673711019635e-05, |
|
"loss": 1.8081, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 5.630879074354446e-05, |
|
"loss": 1.8023, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 5.517432823790546e-05, |
|
"loss": 1.709, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 5.404367114908498e-05, |
|
"loss": 1.7632, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 5.291713995426862e-05, |
|
"loss": 1.796, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 5.179505396118502e-05, |
|
"loss": 1.7405, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 5.067773121760007e-05, |
|
"loss": 1.8264, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 4.9565488421168504e-05, |
|
"loss": 1.8083, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 4.8458640829668e-05, |
|
"loss": 1.7488, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 4.735750217164156e-05, |
|
"loss": 1.8385, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.6262384557473104e-05, |
|
"loss": 1.8445, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.517359839092207e-05, |
|
"loss": 1.7808, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 4.409145228114133e-05, |
|
"loss": 1.7932, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 4.3016252955204197e-05, |
|
"loss": 1.7931, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 4.1948305171164515e-05, |
|
"loss": 1.7508, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 4.08879116316751e-05, |
|
"loss": 1.7877, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 3.98353728981888e-05, |
|
"loss": 1.8233, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 3.879098730576618e-05, |
|
"loss": 1.8181, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 3.7755050878514536e-05, |
|
"loss": 1.6906, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 3.672785724568185e-05, |
|
"loss": 1.7465, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 3.570969755842952e-05, |
|
"loss": 1.7276, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 3.4700860407307565e-05, |
|
"loss": 1.7561, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 3.3701631740455454e-05, |
|
"loss": 1.8063, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 3.271229478255218e-05, |
|
"loss": 1.6319, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 3.173312995453793e-05, |
|
"loss": 1.7234, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 3.07644147941308e-05, |
|
"loss": 1.811, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 2.9806423877160492e-05, |
|
"loss": 1.8344, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 2.8859428739741754e-05, |
|
"loss": 1.7602, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 2.7923697801309092e-05, |
|
"loss": 1.8365, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 2.699949628853528e-05, |
|
"loss": 1.7506, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 2.60870861601545e-05, |
|
"loss": 1.7098, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 2.518672603271192e-05, |
|
"loss": 1.8215, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 2.429867110726057e-05, |
|
"loss": 1.8035, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 2.3423173097026407e-05, |
|
"loss": 1.7455, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 2.25604801560617e-05, |
|
"loss": 1.8008, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 2.1710836808907555e-05, |
|
"loss": 1.7584, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 2.0874483881285084e-05, |
|
"loss": 1.7836, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 2.0051658431834844e-05, |
|
"loss": 1.7353, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.924259368492425e-05, |
|
"loss": 1.7518, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.844751896454173e-05, |
|
"loss": 1.7968, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.766665962929623e-05, |
|
"loss": 1.751, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.6900237008540944e-05, |
|
"loss": 1.7056, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.6148468339638933e-05, |
|
"loss": 1.7457, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.5411566706388707e-05, |
|
"loss": 1.7712, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.4689740978626948e-05, |
|
"loss": 1.7609, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.3983195753025887e-05, |
|
"loss": 1.7358, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.3292131295101604e-05, |
|
"loss": 1.8011, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.2616743482450217e-05, |
|
"loss": 1.7338, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.1957223749227626e-05, |
|
"loss": 1.7185, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.1313759031888791e-05, |
|
"loss": 1.7987, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.0686531716201893e-05, |
|
"loss": 1.7855, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.0075719585552289e-05, |
|
"loss": 1.755, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 9.481495770550924e-06, |
|
"loss": 1.7749, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 8.90402869996171e-06, |
|
"loss": 1.7306, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 8.343482052961487e-06, |
|
"loss": 1.7385, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 7.800014712746244e-06, |
|
"loss": 1.8382, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 7.273780721496786e-06, |
|
"loss": 1.7857, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 6.7649292367164704e-06, |
|
"loss": 1.7224, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 6.2736044889534784e-06, |
|
"loss": 1.7514, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 5.799945740919712e-06, |
|
"loss": 1.7887, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 5.344087248017646e-06, |
|
"loss": 1.7605, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 4.906158220286551e-06, |
|
"loss": 1.7712, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 4.486282785778806e-06, |
|
"loss": 1.7334, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 4.084579955376559e-06, |
|
"loss": 1.749, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 3.7011635890589766e-06, |
|
"loss": 1.7343, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 3.3361423636293224e-06, |
|
"loss": 1.7663, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 2.989619741911281e-06, |
|
"loss": 1.759, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 2.6616939434230985e-06, |
|
"loss": 1.7025, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 2.352457916537921e-06, |
|
"loss": 1.7564, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 2.0619993121382247e-06, |
|
"loss": 1.8452, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 1.7904004587717305e-06, |
|
"loss": 1.7741, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 1.5377383393159132e-06, |
|
"loss": 1.7832, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.3040845691577635e-06, |
|
"loss": 1.8237, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.0895053758948607e-06, |
|
"loss": 1.7458, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 8.940615805635918e-07, |
|
"loss": 1.7009, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 7.178085803998752e-07, |
|
"loss": 1.725, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 5.607963331371593e-07, |
|
"loss": 1.7469, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 4.2306934284621745e-07, |
|
"loss": 1.6719, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 3.04666647320803e-07, |
|
"loss": 1.8424, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 2.0562180701263117e-07, |
|
"loss": 1.7483, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 1.2596289551889364e-07, |
|
"loss": 1.7528, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 6.571249162498684e-08, |
|
"loss": 1.8179, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 2.4887672904708548e-08, |
|
"loss": 1.7763, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 3.5000108797349717e-09, |
|
"loss": 1.7489, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.7264798879623413, |
|
"eval_runtime": 80.022, |
|
"eval_samples_per_second": 20.494, |
|
"eval_steps_per_second": 2.562, |
|
"step": 933 |
|
} |
|
], |
|
"max_steps": 933, |
|
"num_train_epochs": 1, |
|
"total_flos": 974357987328000.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|