|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.9997409997409998, |
|
"global_step": 1930, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00015, |
|
"loss": 2.1476, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0003, |
|
"loss": 2.1726, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00045, |
|
"loss": 2.1825, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0006, |
|
"loss": 2.1711, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.000599989854771552, |
|
"loss": 2.187, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0005999594197723798, |
|
"loss": 2.2129, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.0005999086970609497, |
|
"loss": 2.1951, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.000599837690067885, |
|
"loss": 2.219, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0005997464035957336, |
|
"loss": 2.1978, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.000599634843818643, |
|
"loss": 2.2003, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0005995030182819427, |
|
"loss": 2.2148, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0005993509359016338, |
|
"loss": 2.2266, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0005991786069637863, |
|
"loss": 2.2008, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0005989860431238432, |
|
"loss": 2.2105, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.000598773257405832, |
|
"loss": 2.203, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.0005985402642014843, |
|
"loss": 2.2201, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.000598287079269262, |
|
"loss": 2.2313, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0005980137197332915, |
|
"loss": 2.1925, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.0005977202040822059, |
|
"loss": 2.2056, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.000597406552167894, |
|
"loss": 2.2045, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0005970727852041579, |
|
"loss": 2.2218, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0005967189257652783, |
|
"loss": 2.2225, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0005963449977844876, |
|
"loss": 2.2218, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0005959510265523509, |
|
"loss": 2.1974, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0005955370387150558, |
|
"loss": 2.2206, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0005951030622726105, |
|
"loss": 2.1967, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0005946491265769492, |
|
"loss": 2.21, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0005941752623299472, |
|
"loss": 2.2199, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0005936815015813451, |
|
"loss": 2.2225, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0005931678777265801, |
|
"loss": 2.2023, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0005926344255045277, |
|
"loss": 2.1881, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0005920811809951525, |
|
"loss": 2.1933, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0005915081816170671, |
|
"loss": 2.2102, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0005909154661250024, |
|
"loss": 2.219, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0005903030746071855, |
|
"loss": 2.2166, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0005896710484826284, |
|
"loss": 2.2027, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0005890194304983277, |
|
"loss": 2.215, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0005883482647263717, |
|
"loss": 2.2242, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0005876575965609614, |
|
"loss": 2.1957, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0005869474727153385, |
|
"loss": 2.2131, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0005862179412186275, |
|
"loss": 2.222, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0005854690514125865, |
|
"loss": 2.213, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0005847008539482696, |
|
"loss": 2.2116, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.000583913400782602, |
|
"loss": 2.2077, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.0005831067451748655, |
|
"loss": 2.2131, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0005822809416830963, |
|
"loss": 2.1922, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0005814360461603949, |
|
"loss": 2.2152, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0005805721157511485, |
|
"loss": 2.2043, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0005796892088871661, |
|
"loss": 2.2169, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0005787873852837267, |
|
"loss": 2.2207, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0005778667059355403, |
|
"loss": 2.1908, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0005769272331126221, |
|
"loss": 2.2244, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0005759690303560815, |
|
"loss": 2.2172, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0005749921624738244, |
|
"loss": 2.2123, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0005739966955361696, |
|
"loss": 2.2207, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.0005729826968713804, |
|
"loss": 2.2079, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0005719502350611107, |
|
"loss": 2.1984, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0005708993799357668, |
|
"loss": 2.2085, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0005698302025697842, |
|
"loss": 2.1944, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0005687427752768203, |
|
"loss": 2.207, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0005676371716048639, |
|
"loss": 2.1949, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0005665134663312602, |
|
"loss": 2.2151, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0005653717354576544, |
|
"loss": 2.2086, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0005642120562048497, |
|
"loss": 2.2219, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.000563034507007586, |
|
"loss": 2.2198, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.000561839167509234, |
|
"loss": 2.1897, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0005606261185564088, |
|
"loss": 2.1839, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.000559395442193502, |
|
"loss": 2.2104, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0005581472216571329, |
|
"loss": 2.2147, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0005568815413705178, |
|
"loss": 2.2079, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0005555984869377613, |
|
"loss": 2.1798, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0005542981451380654, |
|
"loss": 2.2226, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0005529806039198609, |
|
"loss": 2.2157, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0005516459523948589, |
|
"loss": 2.1927, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0005502942808320234, |
|
"loss": 2.2048, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0005489256806514665, |
|
"loss": 2.1928, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0005475402444182646, |
|
"loss": 2.2055, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0005461380658361984, |
|
"loss": 2.2125, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005447192397414148, |
|
"loss": 2.2053, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0005432838620960128, |
|
"loss": 2.188, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005418320299815534, |
|
"loss": 2.2037, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0005403638415924928, |
|
"loss": 2.2026, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.0005388793962295418, |
|
"loss": 2.1892, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0005373787942929494, |
|
"loss": 2.2138, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0005358621372757119, |
|
"loss": 2.2269, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0005343295277567086, |
|
"loss": 2.2143, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0005327810693937642, |
|
"loss": 2.209, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0005312168669166378, |
|
"loss": 2.2103, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0005296370261199389, |
|
"loss": 2.1948, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.000528041653855973, |
|
"loss": 2.2023, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.0005264308580275137, |
|
"loss": 2.1733, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0005248047475805057, |
|
"loss": 2.1756, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.0005231634324966954, |
|
"loss": 2.2086, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0005215070237861924, |
|
"loss": 2.203, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.000519835633479962, |
|
"loss": 2.1826, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0005181493746222475, |
|
"loss": 2.1921, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0005164483612629244, |
|
"loss": 2.1718, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0005147327084497868, |
|
"loss": 2.2043, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.0005130025322207663, |
|
"loss": 2.2003, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0005112579495960835, |
|
"loss": 2.1715, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0005094990785703337, |
|
"loss": 2.1775, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0005077260381045057, |
|
"loss": 2.2061, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0005059389481179369, |
|
"loss": 2.1998, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0005041379294802017, |
|
"loss": 2.1776, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.0005023231040029368, |
|
"loss": 2.1867, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0005004945944316028, |
|
"loss": 2.1742, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0004986525244371818, |
|
"loss": 2.1963, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.000496797018607813, |
|
"loss": 2.179, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0004949282024403667, |
|
"loss": 2.1885, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0004930462023319559, |
|
"loss": 2.1649, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.0004911511455713875, |
|
"loss": 2.1903, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0004892431603305532, |
|
"loss": 2.1838, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0004873223756557606, |
|
"loss": 2.1915, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0004853889214590053, |
|
"loss": 2.2185, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.000483442928509184, |
|
"loss": 2.1799, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00048148452842325027, |
|
"loss": 2.1717, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0004795138536573126, |
|
"loss": 2.1903, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0004775310374976755, |
|
"loss": 2.1837, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00047553621405182513, |
|
"loss": 2.1972, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0004735295182393587, |
|
"loss": 2.1858, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.000471511085782859, |
|
"loss": 2.1872, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0004694810531987154, |
|
"loss": 2.1875, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0004674395577878898, |
|
"loss": 2.1759, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0004653867376266313, |
|
"loss": 2.1846, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00046332273155713633, |
|
"loss": 2.1937, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00046124767917815883, |
|
"loss": 2.2121, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00045916172083556846, |
|
"loss": 2.1887, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0004570649976128579, |
|
"loss": 2.1752, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.0004549576513216011, |
|
"loss": 2.1745, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00045283982449186163, |
|
"loss": 2.1859, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00045071166036255285, |
|
"loss": 2.1767, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00044857330287174994, |
|
"loss": 2.1665, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00044642489664695445, |
|
"loss": 2.1911, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0004442665869953128, |
|
"loss": 2.1772, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00044209851989378816, |
|
"loss": 2.1772, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0004399208419792873, |
|
"loss": 2.1752, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00043773370053874316, |
|
"loss": 2.1915, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.0004355372434991529, |
|
"loss": 2.1685, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0004333316194175727, |
|
"loss": 2.196, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00043111697747107044, |
|
"loss": 2.17, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0004288934674466359, |
|
"loss": 2.1712, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00042666123973105033, |
|
"loss": 2.1902, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0004244204453007143, |
|
"loss": 2.1583, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0004221712357114373, |
|
"loss": 2.1927, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0004199137630881866, |
|
"loss": 2.154, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0004176481801147988, |
|
"loss": 2.1736, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0004153746400236528, |
|
"loss": 2.1706, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0004130932965853059, |
|
"loss": 2.1758, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0004108043040980939, |
|
"loss": 2.1842, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0004085078173776946, |
|
"loss": 2.1766, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.0004062039917466577, |
|
"loss": 2.1855, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00040389298302389844, |
|
"loss": 2.1974, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00040157494751415977, |
|
"loss": 2.1611, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0003992500419974405, |
|
"loss": 2.1799, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00039691842371839106, |
|
"loss": 2.1736, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0003945802503756788, |
|
"loss": 2.1534, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0003922356801113217, |
|
"loss": 2.1655, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00038988487149999284, |
|
"loss": 2.1789, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0003875279835382949, |
|
"loss": 2.1745, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0003851651756340065, |
|
"loss": 2.1541, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00038279660759530054, |
|
"loss": 2.1576, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00038042243961993637, |
|
"loss": 2.1808, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0003780428322844236, |
|
"loss": 2.1623, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0003756579465331627, |
|
"loss": 2.1794, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0003732679436675586, |
|
"loss": 2.1674, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0003708729853351117, |
|
"loss": 2.1941, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00036847323351848484, |
|
"loss": 2.155, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00036606885052454727, |
|
"loss": 2.159, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00036365999897339723, |
|
"loss": 2.1824, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00036124684178736346, |
|
"loss": 2.1599, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00035882954217998544, |
|
"loss": 2.1567, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00035640826364497473, |
|
"loss": 2.1514, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00035398316994515717, |
|
"loss": 2.1693, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0003515544251013968, |
|
"loss": 2.1699, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0003491221933815021, |
|
"loss": 2.1672, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.000346686639289116, |
|
"loss": 2.1637, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0003442479275525895, |
|
"loss": 2.1591, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00034180622311384066, |
|
"loss": 2.1701, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00033936169111719824, |
|
"loss": 2.1534, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0003369144968982327, |
|
"loss": 2.178, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00033446480597257327, |
|
"loss": 2.1516, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0003320127840247139, |
|
"loss": 2.1581, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00032955859689680654, |
|
"loss": 2.1609, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00032710241057744527, |
|
"loss": 2.1462, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.0003246443911904385, |
|
"loss": 2.1576, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.0003221847049835746, |
|
"loss": 2.1653, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.0003197235183173767, |
|
"loss": 2.1584, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.0003172609976538513, |
|
"loss": 2.1607, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.0003147973095452296, |
|
"loss": 2.1645, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00031233262062270293, |
|
"loss": 2.1436, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00030986709758515244, |
|
"loss": 2.1427, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00030740090718787445, |
|
"loss": 2.1642, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.0003049342162313021, |
|
"loss": 2.1637, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 2.0581421852111816, |
|
"eval_runtime": 38.5822, |
|
"eval_samples_per_second": 336.269, |
|
"eval_steps_per_second": 42.04, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00030246719154972406, |
|
"loss": 2.2293, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.0003, |
|
"loss": 2.1221, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.00029753280845027595, |
|
"loss": 2.1087, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.00029506578376869785, |
|
"loss": 2.1146, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.0002925990928121255, |
|
"loss": 2.0984, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00029013290241484757, |
|
"loss": 2.1275, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00028766737937729707, |
|
"loss": 2.1265, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.0002852026904547704, |
|
"loss": 2.1184, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00028273900234614867, |
|
"loss": 2.1145, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00028027648168262324, |
|
"loss": 2.1283, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.0002778152950164254, |
|
"loss": 2.1153, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00027535560880956143, |
|
"loss": 2.1191, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.0002728975894225548, |
|
"loss": 2.1281, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.00027044140310319336, |
|
"loss": 2.1366, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.0002679872159752861, |
|
"loss": 2.1116, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00026553519402742673, |
|
"loss": 2.1241, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.00026308550310176736, |
|
"loss": 2.1125, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0002606383088828017, |
|
"loss": 2.1216, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.0002581937768861593, |
|
"loss": 2.1337, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00025575207244741047, |
|
"loss": 2.1294, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00025331336071088403, |
|
"loss": 2.1172, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.0002508778066184978, |
|
"loss": 2.115, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.00024844557489860314, |
|
"loss": 2.1072, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.0002460168300548428, |
|
"loss": 2.1149, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.00024359173635502525, |
|
"loss": 2.1015, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.0002411704578200146, |
|
"loss": 2.1159, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.00023875315821263648, |
|
"loss": 2.1136, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.00023634000102660274, |
|
"loss": 2.0963, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.00023393114947545276, |
|
"loss": 2.1234, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.0002315267664815152, |
|
"loss": 2.1048, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00022912701466488823, |
|
"loss": 2.1299, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00022673205633244145, |
|
"loss": 2.1266, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00022434205346683728, |
|
"loss": 2.0937, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00022195716771557632, |
|
"loss": 2.0877, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00021957756038006366, |
|
"loss": 2.1252, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00021720339240469935, |
|
"loss": 2.1106, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00021483482436599353, |
|
"loss": 2.121, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.0002124720164617051, |
|
"loss": 2.0997, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.00021011512850000716, |
|
"loss": 2.0981, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00020776431988867825, |
|
"loss": 2.1222, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00020541974962432123, |
|
"loss": 2.092, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00020308157628160894, |
|
"loss": 2.1113, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00020074995800255954, |
|
"loss": 2.1055, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00019842505248584018, |
|
"loss": 2.113, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00019610701697610154, |
|
"loss": 2.1106, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.00019379600825334228, |
|
"loss": 2.1075, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0001914921826223053, |
|
"loss": 2.0971, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00018919569590190614, |
|
"loss": 2.1218, |
|
"step": 1205 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00018690670341469403, |
|
"loss": 2.1019, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.0001846253599763472, |
|
"loss": 2.1028, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.0001823518198852012, |
|
"loss": 2.0939, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00018008623691181345, |
|
"loss": 2.1144, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00017782876428856274, |
|
"loss": 2.1009, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.0001755795546992856, |
|
"loss": 2.1048, |
|
"step": 1235 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00017333876026894967, |
|
"loss": 2.0939, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.00017110653255336398, |
|
"loss": 2.1108, |
|
"step": 1245 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.0001688830225289296, |
|
"loss": 2.0995, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00016666838058242725, |
|
"loss": 2.1223, |
|
"step": 1255 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0001644627565008471, |
|
"loss": 2.0965, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0001622662994612568, |
|
"loss": 2.1283, |
|
"step": 1265 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.00016007915802071268, |
|
"loss": 2.1075, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.00015790148010621184, |
|
"loss": 2.1087, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.0001557334130046871, |
|
"loss": 2.104, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00015357510335304542, |
|
"loss": 2.1068, |
|
"step": 1285 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00015142669712825006, |
|
"loss": 2.1104, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00014928833963744712, |
|
"loss": 2.1141, |
|
"step": 1295 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.0001471601755081384, |
|
"loss": 2.1172, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.0001450423486783989, |
|
"loss": 2.1235, |
|
"step": 1305 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.0001429350023871421, |
|
"loss": 2.093, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00014083827916443163, |
|
"loss": 2.1106, |
|
"step": 1315 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.00013875232082184111, |
|
"loss": 2.1041, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.00013667726844286365, |
|
"loss": 2.1167, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00013461326237336873, |
|
"loss": 2.0964, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00013256044221211013, |
|
"loss": 2.1024, |
|
"step": 1335 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00013051894680128463, |
|
"loss": 2.1037, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00012848891421714094, |
|
"loss": 2.1098, |
|
"step": 1345 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00012647048176064125, |
|
"loss": 2.0885, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.0001244637859481749, |
|
"loss": 2.0902, |
|
"step": 1355 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.0001224689625023245, |
|
"loss": 2.1136, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00012048614634268736, |
|
"loss": 2.1067, |
|
"step": 1365 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00011851547157674971, |
|
"loss": 2.0993, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.000116557071490816, |
|
"loss": 2.1005, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.00011461107854099468, |
|
"loss": 2.1053, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00011267762434423939, |
|
"loss": 2.1033, |
|
"step": 1385 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00011075683966944673, |
|
"loss": 2.095, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00010884885442861251, |
|
"loss": 2.094, |
|
"step": 1395 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00010695379766804407, |
|
"loss": 2.0858, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00010507179755963326, |
|
"loss": 2.095, |
|
"step": 1405 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00010320298139218698, |
|
"loss": 2.092, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00010134747556281819, |
|
"loss": 2.1107, |
|
"step": 1415 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 9.950540556839719e-05, |
|
"loss": 2.0923, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 9.767689599706314e-05, |
|
"loss": 2.09, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 9.586207051979835e-05, |
|
"loss": 2.0879, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 9.40610518820631e-05, |
|
"loss": 2.0912, |
|
"step": 1435 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 9.22739618954943e-05, |
|
"loss": 2.1017, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 9.050092142966632e-05, |
|
"loss": 2.1142, |
|
"step": 1445 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 8.87420504039165e-05, |
|
"loss": 2.0917, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 8.69974677792336e-05, |
|
"loss": 2.0781, |
|
"step": 1455 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 8.526729155021318e-05, |
|
"loss": 2.09, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 8.355163873707559e-05, |
|
"loss": 2.0823, |
|
"step": 1465 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 8.185062537775248e-05, |
|
"loss": 2.0936, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 8.016436652003797e-05, |
|
"loss": 2.0889, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 7.849297621380765e-05, |
|
"loss": 2.0967, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 7.683656750330468e-05, |
|
"loss": 2.0874, |
|
"step": 1485 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 7.519525241949431e-05, |
|
"loss": 2.1044, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 7.356914197248624e-05, |
|
"loss": 2.0911, |
|
"step": 1495 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 7.195834614402702e-05, |
|
"loss": 2.1034, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 7.036297388006104e-05, |
|
"loss": 2.0972, |
|
"step": 1505 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 6.878313308336217e-05, |
|
"loss": 2.0769, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 6.721893060623574e-05, |
|
"loss": 2.0717, |
|
"step": 1515 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 6.567047224329133e-05, |
|
"loss": 2.1128, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 6.413786272428815e-05, |
|
"loss": 2.096, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 6.262120570705059e-05, |
|
"loss": 2.0823, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 6.112060377045816e-05, |
|
"loss": 2.1142, |
|
"step": 1535 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 5.963615840750731e-05, |
|
"loss": 2.0755, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 5.8167970018446596e-05, |
|
"loss": 2.1061, |
|
"step": 1545 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 5.671613790398708e-05, |
|
"loss": 2.098, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 5.528076025858519e-05, |
|
"loss": 2.0948, |
|
"step": 1555 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 5.3861934163801576e-05, |
|
"loss": 2.0849, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 5.245975558173535e-05, |
|
"loss": 2.0689, |
|
"step": 1565 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 5.107431934853351e-05, |
|
"loss": 2.1098, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 4.970571916797655e-05, |
|
"loss": 2.0803, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 4.8354047605141154e-05, |
|
"loss": 2.0872, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 4.7019396080139124e-05, |
|
"loss": 2.095, |
|
"step": 1585 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 4.570185486193457e-05, |
|
"loss": 2.0779, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 4.44015130622387e-05, |
|
"loss": 2.093, |
|
"step": 1595 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 4.311845862948212e-05, |
|
"loss": 2.0729, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 4.185277834286706e-05, |
|
"loss": 2.0871, |
|
"step": 1605 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 4.060455780649787e-05, |
|
"loss": 2.0792, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 3.9373881443591196e-05, |
|
"loss": 2.0971, |
|
"step": 1615 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 3.816083249076608e-05, |
|
"loss": 2.092, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 3.696549299241398e-05, |
|
"loss": 2.0963, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 3.578794379515026e-05, |
|
"loss": 2.0796, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 3.462826454234563e-05, |
|
"loss": 2.082, |
|
"step": 1635 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 3.348653366873971e-05, |
|
"loss": 2.0833, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 3.2362828395136165e-05, |
|
"loss": 2.0829, |
|
"step": 1645 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 3.125722472317973e-05, |
|
"loss": 2.103, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 3.0169797430215724e-05, |
|
"loss": 2.0883, |
|
"step": 1655 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 2.9100620064233116e-05, |
|
"loss": 2.0958, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 2.8049764938889264e-05, |
|
"loss": 2.0925, |
|
"step": 1665 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 2.7017303128619606e-05, |
|
"loss": 2.0865, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 2.6003304463830366e-05, |
|
"loss": 2.0671, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 2.500783752617557e-05, |
|
"loss": 2.0947, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 2.4030969643918474e-05, |
|
"loss": 2.0775, |
|
"step": 1685 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 2.3072766887377937e-05, |
|
"loss": 2.1041, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 2.2133294064459726e-05, |
|
"loss": 2.1082, |
|
"step": 1695 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 2.1212614716273224e-05, |
|
"loss": 2.0846, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 2.031079111283387e-05, |
|
"loss": 2.101, |
|
"step": 1705 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 1.942788424885151e-05, |
|
"loss": 2.0922, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 1.8563953839605107e-05, |
|
"loss": 2.0757, |
|
"step": 1715 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 1.7719058316903577e-05, |
|
"loss": 2.0798, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 1.6893254825134415e-05, |
|
"loss": 2.0884, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 1.6086599217397965e-05, |
|
"loss": 2.0843, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 1.529914605173045e-05, |
|
"loss": 2.0823, |
|
"step": 1735 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 1.4530948587413583e-05, |
|
"loss": 2.0875, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 1.3782058781372396e-05, |
|
"loss": 2.0842, |
|
"step": 1745 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 1.3052527284661407e-05, |
|
"loss": 2.0983, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 1.2342403439038629e-05, |
|
"loss": 2.0771, |
|
"step": 1755 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 1.165173527362817e-05, |
|
"loss": 2.0831, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 1.098056950167231e-05, |
|
"loss": 2.0947, |
|
"step": 1765 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 1.0328951517371475e-05, |
|
"loss": 2.093, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 9.696925392814515e-06, |
|
"loss": 2.0803, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 9.084533874997534e-06, |
|
"loss": 2.0798, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 8.491818382932824e-06, |
|
"loss": 2.0915, |
|
"step": 1785 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 7.918819004847488e-06, |
|
"loss": 2.0742, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 7.3655744954722155e-06, |
|
"loss": 2.066, |
|
"step": 1795 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 6.832122273419849e-06, |
|
"loss": 2.1094, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 6.318498418654805e-06, |
|
"loss": 2.0924, |
|
"step": 1805 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 5.824737670052693e-06, |
|
"loss": 2.0867, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 5.350873423050828e-06, |
|
"loss": 2.0771, |
|
"step": 1815 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 4.8969377273894385e-06, |
|
"loss": 2.0997, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 4.4629612849440996e-06, |
|
"loss": 2.0607, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 4.048973447649095e-06, |
|
"loss": 2.1042, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 3.6550022155124035e-06, |
|
"loss": 2.095, |
|
"step": 1835 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 3.2810742347216146e-06, |
|
"loss": 2.0786, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 2.927214795842037e-06, |
|
"loss": 2.0814, |
|
"step": 1845 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 2.593447832105966e-06, |
|
"loss": 2.0981, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 2.279795917794014e-06, |
|
"loss": 2.109, |
|
"step": 1855 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 1.9862802667083954e-06, |
|
"loss": 2.0946, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 1.7129207307379433e-06, |
|
"loss": 2.0744, |
|
"step": 1865 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 1.4597357985156154e-06, |
|
"loss": 2.0835, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 1.2267425941679287e-06, |
|
"loss": 2.0696, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 1.0139568761567852e-06, |
|
"loss": 2.0975, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 8.213930362136578e-07, |
|
"loss": 2.0809, |
|
"step": 1885 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 6.490640983661366e-07, |
|
"loss": 2.0819, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 4.969817180572321e-07, |
|
"loss": 2.105, |
|
"step": 1895 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 3.6515618135687376e-07, |
|
"loss": 2.0889, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 2.5359640426629856e-07, |
|
"loss": 2.0978, |
|
"step": 1905 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 1.623099321149679e-07, |
|
"loss": 2.0995, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 9.13029390503417e-08, |
|
"loss": 2.0786, |
|
"step": 1915 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 4.058022762021274e-08, |
|
"loss": 2.0808, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 1.0145228447833164e-08, |
|
"loss": 2.0796, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.0, |
|
"loss": 2.0885, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 2.0213370323181152, |
|
"eval_runtime": 38.6316, |
|
"eval_samples_per_second": 335.839, |
|
"eval_steps_per_second": 41.986, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"step": 1930, |
|
"total_flos": 6.455806557526426e+16, |
|
"train_loss": 2.144662865950036, |
|
"train_runtime": 4258.8623, |
|
"train_samples_per_second": 116.039, |
|
"train_steps_per_second": 0.453 |
|
} |
|
], |
|
"max_steps": 1930, |
|
"num_train_epochs": 2, |
|
"total_flos": 6.455806557526426e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|