|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.908103916246607, |
|
"global_step": 7500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0001999867761371633, |
|
"loss": 1.0435, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019993306018843102, |
|
"loss": 0.8918, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019983804784290833, |
|
"loss": 0.8874, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019970177836355307, |
|
"loss": 0.8839, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00019961818913082012, |
|
"loss": 0.8801, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019952430806244534, |
|
"loss": 0.8753, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00019942014485754635, |
|
"loss": 0.8754, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00019930571027751713, |
|
"loss": 0.8751, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.0001991810161449164, |
|
"loss": 0.8819, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019904607534224612, |
|
"loss": 0.8744, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019890090181062063, |
|
"loss": 0.8735, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019874551054832625, |
|
"loss": 0.8703, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019857991760927193, |
|
"loss": 0.8715, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00019840414010133045, |
|
"loss": 0.8714, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00019821819618457114, |
|
"loss": 0.8653, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.0001980221050693837, |
|
"loss": 0.8716, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00019781588701449338, |
|
"loss": 0.8695, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0001975995633248682, |
|
"loss": 0.8746, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00019737315634951762, |
|
"loss": 0.8731, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00019713668947918386, |
|
"loss": 0.867, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.0001968901871439252, |
|
"loss": 0.8706, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.000196633674810592, |
|
"loss": 0.8595, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0001963671789801958, |
|
"loss": 0.8627, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0001960907271851712, |
|
"loss": 0.8607, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00019580434798653173, |
|
"loss": 0.858, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00019550807097091876, |
|
"loss": 0.8589, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00019520192674754515, |
|
"loss": 0.8561, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00019488594694503264, |
|
"loss": 0.8576, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00019456016420814446, |
|
"loss": 0.8597, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00019422461219441254, |
|
"loss": 0.862, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00019387932557066035, |
|
"loss": 0.8577, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.00019352434000942127, |
|
"loss": 0.8632, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00019315969218525333, |
|
"loss": 0.8567, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00019278541977095005, |
|
"loss": 0.8501, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00019240156143364844, |
|
"loss": 0.8596, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00019200815683083434, |
|
"loss": 0.8556, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"eval_loss": 0.8521950244903564, |
|
"eval_runtime": 59.8838, |
|
"eval_samples_per_second": 12.19, |
|
"eval_steps_per_second": 0.885, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.00019160524660624505, |
|
"loss": 0.8531, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00019119287238567045, |
|
"loss": 0.8513, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00019077107677265253, |
|
"loss": 0.8502, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00019033990334408384, |
|
"loss": 0.8469, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00018989939664570545, |
|
"loss": 0.8495, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00018944960218750484, |
|
"loss": 0.8485, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00018899056643901404, |
|
"loss": 0.8534, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00018852233682450893, |
|
"loss": 0.8531, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00018804496171810948, |
|
"loss": 0.8509, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00018755849043878222, |
|
"loss": 0.8445, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0001870629732452449, |
|
"loss": 0.8548, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00018655846133077417, |
|
"loss": 0.8441, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00018604500681791656, |
|
"loss": 0.8533, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00018552266275310373, |
|
"loss": 0.8505, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.0001849914831011719, |
|
"loss": 0.8544, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00018445152273978668, |
|
"loss": 0.845, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.00018390283745377354, |
|
"loss": 0.8376, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0001833454839293545, |
|
"loss": 0.847, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00018277951974829163, |
|
"loss": 0.8473, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.0001822050033819382, |
|
"loss": 0.8438, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00018162199418519785, |
|
"loss": 0.8418, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00018103055239039243, |
|
"loss": 0.842, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.0001804307391010393, |
|
"loss": 0.8435, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00017982261628553842, |
|
"loss": 0.8349, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0001792062467707703, |
|
"loss": 0.8483, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0001785816942356052, |
|
"loss": 0.8387, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00017794902320432429, |
|
"loss": 0.843, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00017730829903995333, |
|
"loss": 0.8424, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00017665958793751006, |
|
"loss": 0.8418, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00017600295691716522, |
|
"loss": 0.8384, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00017533847381731856, |
|
"loss": 0.8445, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00017466620728759033, |
|
"loss": 0.8446, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00017398622678172878, |
|
"loss": 0.838, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.0001732986025504348, |
|
"loss": 0.8415, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.000172603405634104, |
|
"loss": 0.8357, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00017190070785548755, |
|
"loss": 0.8311, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.0001711905818122717, |
|
"loss": 0.8333, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0001704731008695777, |
|
"loss": 0.8387, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0001697483391523821, |
|
"loss": 0.8442, |
|
"step": 1975 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00016901637153785885, |
|
"loss": 0.8399, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_loss": 0.8339959383010864, |
|
"eval_runtime": 58.5829, |
|
"eval_samples_per_second": 12.461, |
|
"eval_steps_per_second": 0.905, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0001682772736476434, |
|
"loss": 0.8334, |
|
"step": 2025 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0001675311218400201, |
|
"loss": 0.835, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00016677799320203332, |
|
"loss": 0.8368, |
|
"step": 2075 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00016601796554152344, |
|
"loss": 0.8278, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00016525111737908827, |
|
"loss": 0.8334, |
|
"step": 2125 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00016447752793997096, |
|
"loss": 0.8416, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00016369727714587483, |
|
"loss": 0.8297, |
|
"step": 2175 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0001629104456067066, |
|
"loss": 0.8327, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00016211711461224825, |
|
"loss": 0.8324, |
|
"step": 2225 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0001613173661237589, |
|
"loss": 0.8313, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.0001605112827655069, |
|
"loss": 0.8292, |
|
"step": 2275 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.0001596989478162339, |
|
"loss": 0.8334, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00015888044520055106, |
|
"loss": 0.8352, |
|
"step": 2325 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00015805585948026852, |
|
"loss": 0.823, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.000157225275845659, |
|
"loss": 0.8293, |
|
"step": 2375 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00015638878010665672, |
|
"loss": 0.8289, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00015554645868399205, |
|
"loss": 0.832, |
|
"step": 2425 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00015469839860026308, |
|
"loss": 0.8294, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.0001538446874709452, |
|
"loss": 0.8281, |
|
"step": 2475 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.00015298541349533925, |
|
"loss": 0.8314, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00015212066544745926, |
|
"loss": 0.831, |
|
"step": 2525 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00015125053266686124, |
|
"loss": 0.8319, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00015037510504941303, |
|
"loss": 0.8259, |
|
"step": 2575 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00014949447303800695, |
|
"loss": 0.8133, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.00014860872761321593, |
|
"loss": 0.8139, |
|
"step": 2625 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00014771796028389405, |
|
"loss": 0.804, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.0001468222630777225, |
|
"loss": 0.8011, |
|
"step": 2675 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00014592172853170193, |
|
"loss": 0.8037, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00014501644968259212, |
|
"loss": 0.8063, |
|
"step": 2725 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.00014410652005730025, |
|
"loss": 0.8155, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00014319203366321826, |
|
"loss": 0.8066, |
|
"step": 2775 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0001422730849785107, |
|
"loss": 0.8091, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.0001413497689423539, |
|
"loss": 0.8067, |
|
"step": 2825 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00014042218094512755, |
|
"loss": 0.8046, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00013949041681855985, |
|
"loss": 0.8053, |
|
"step": 2875 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.0001385545728258264, |
|
"loss": 0.8075, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.0001376147456516055, |
|
"loss": 0.8015, |
|
"step": 2925 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.00013667103239208903, |
|
"loss": 0.8016, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.00013572353054495126, |
|
"loss": 0.8029, |
|
"step": 2975 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.0001347723379992762, |
|
"loss": 0.8017, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"eval_loss": 0.8229297995567322, |
|
"eval_runtime": 59.3398, |
|
"eval_samples_per_second": 12.302, |
|
"eval_steps_per_second": 0.893, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0001338175530254443, |
|
"loss": 0.8049, |
|
"step": 3025 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00013285927426497985, |
|
"loss": 0.8027, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00013189760072036008, |
|
"loss": 0.8028, |
|
"step": 3075 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.0001309326317447869, |
|
"loss": 0.8021, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00012996446703192257, |
|
"loss": 0.8033, |
|
"step": 3125 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00012899320660558986, |
|
"loss": 0.8016, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00012801895080943846, |
|
"loss": 0.7995, |
|
"step": 3175 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0001270418002965782, |
|
"loss": 0.799, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.0001260618560191802, |
|
"loss": 0.8002, |
|
"step": 3225 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00012507921921804717, |
|
"loss": 0.8068, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00012409399141215423, |
|
"loss": 0.8041, |
|
"step": 3275 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.0001231062743881603, |
|
"loss": 0.7999, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.0001221161701898926, |
|
"loss": 0.7995, |
|
"step": 3325 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00012112378110780391, |
|
"loss": 0.7959, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00012012920966840486, |
|
"loss": 0.7999, |
|
"step": 3375 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.00011913255862367151, |
|
"loss": 0.8016, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00011813393094042993, |
|
"loss": 0.7944, |
|
"step": 3425 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.0001171334297897181, |
|
"loss": 0.8026, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.00011613115853612734, |
|
"loss": 0.8004, |
|
"step": 3475 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00011512722072712321, |
|
"loss": 0.7992, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.00011412172008234785, |
|
"loss": 0.8004, |
|
"step": 3525 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0001131147604829043, |
|
"loss": 0.8009, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00011210644596062439, |
|
"loss": 0.7993, |
|
"step": 3575 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00011109688068732081, |
|
"loss": 0.7965, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00011008616896402482, |
|
"loss": 0.7991, |
|
"step": 3625 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00010907441521021072, |
|
"loss": 0.8026, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00010806172395300789, |
|
"loss": 0.7941, |
|
"step": 3675 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.00010704819981640186, |
|
"loss": 0.7989, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00010603394751042522, |
|
"loss": 0.7981, |
|
"step": 3725 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00010501907182033979, |
|
"loss": 0.7985, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.000104003677595811, |
|
"loss": 0.7921, |
|
"step": 3775 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00010298786974007555, |
|
"loss": 0.8012, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00010197175319910343, |
|
"loss": 0.7906, |
|
"step": 3825 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00010095543295075593, |
|
"loss": 0.7928, |
|
"step": 3850 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 9.993901399393979e-05, |
|
"loss": 0.8018, |
|
"step": 3875 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 9.892260133775968e-05, |
|
"loss": 0.7991, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 9.79062999906693e-05, |
|
"loss": 0.795, |
|
"step": 3925 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 9.68902149496227e-05, |
|
"loss": 0.7977, |
|
"step": 3950 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 9.587445118922674e-05, |
|
"loss": 0.8013, |
|
"step": 3975 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 9.485911365089589e-05, |
|
"loss": 0.7978, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"eval_loss": 0.8142631649971008, |
|
"eval_runtime": 59.4108, |
|
"eval_samples_per_second": 12.287, |
|
"eval_steps_per_second": 0.892, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 9.384430723201036e-05, |
|
"loss": 0.7912, |
|
"step": 4025 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 9.283013677507902e-05, |
|
"loss": 0.7919, |
|
"step": 4050 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 9.181670705690761e-05, |
|
"loss": 0.7919, |
|
"step": 4075 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 9.080412277777413e-05, |
|
"loss": 0.8018, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 8.979248855061188e-05, |
|
"loss": 0.7811, |
|
"step": 4125 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 8.878190889020159e-05, |
|
"loss": 0.7919, |
|
"step": 4150 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 8.777248820237376e-05, |
|
"loss": 0.7994, |
|
"step": 4175 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 8.676433077322215e-05, |
|
"loss": 0.7956, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 8.575754075832973e-05, |
|
"loss": 0.7968, |
|
"step": 4225 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 8.475222217200801e-05, |
|
"loss": 0.7905, |
|
"step": 4250 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 8.374847887655112e-05, |
|
"loss": 0.7889, |
|
"step": 4275 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 8.274641457150543e-05, |
|
"loss": 0.7988, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 8.174613278295608e-05, |
|
"loss": 0.7947, |
|
"step": 4325 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 8.074773685283137e-05, |
|
"loss": 0.7929, |
|
"step": 4350 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 7.97513299282264e-05, |
|
"loss": 0.7949, |
|
"step": 4375 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 7.875701495074638e-05, |
|
"loss": 0.7925, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 7.776489464587158e-05, |
|
"loss": 0.7917, |
|
"step": 4425 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 7.677507151234448e-05, |
|
"loss": 0.7905, |
|
"step": 4450 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 7.578764781158034e-05, |
|
"loss": 0.7912, |
|
"step": 4475 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 7.480272555710227e-05, |
|
"loss": 0.8006, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 7.382040650400185e-05, |
|
"loss": 0.7937, |
|
"step": 4525 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 7.28407921384267e-05, |
|
"loss": 0.794, |
|
"step": 4550 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 7.186398366709545e-05, |
|
"loss": 0.7931, |
|
"step": 4575 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 7.089008200684197e-05, |
|
"loss": 0.7982, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 6.991918777418928e-05, |
|
"loss": 0.7916, |
|
"step": 4625 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 6.895140127495455e-05, |
|
"loss": 0.7919, |
|
"step": 4650 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 6.798682249388631e-05, |
|
"loss": 0.7863, |
|
"step": 4675 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 6.702555108433461e-05, |
|
"loss": 0.789, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 6.606768635795574e-05, |
|
"loss": 0.7902, |
|
"step": 4725 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 6.511332727445191e-05, |
|
"loss": 0.7924, |
|
"step": 4750 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 6.416257243134747e-05, |
|
"loss": 0.7957, |
|
"step": 4775 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 6.321552005380256e-05, |
|
"loss": 0.7916, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 6.22722679844652e-05, |
|
"loss": 0.7867, |
|
"step": 4825 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 6.133291367336284e-05, |
|
"loss": 0.7944, |
|
"step": 4850 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 6.039755416783457e-05, |
|
"loss": 0.7982, |
|
"step": 4875 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 5.946628610250484e-05, |
|
"loss": 0.7918, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 5.853920568929996e-05, |
|
"loss": 0.7921, |
|
"step": 4925 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 5.761640870750799e-05, |
|
"loss": 0.7878, |
|
"step": 4950 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 5.669799049388375e-05, |
|
"loss": 0.7901, |
|
"step": 4975 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 5.578404593279911e-05, |
|
"loss": 0.7858, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"eval_loss": 0.807844877243042, |
|
"eval_runtime": 59.586, |
|
"eval_samples_per_second": 12.251, |
|
"eval_steps_per_second": 0.889, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 5.487466944644033e-05, |
|
"loss": 0.7902, |
|
"step": 5025 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 5.3969954985052996e-05, |
|
"loss": 0.7979, |
|
"step": 5050 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 5.306999601723579e-05, |
|
"loss": 0.7931, |
|
"step": 5075 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 5.21748855202839e-05, |
|
"loss": 0.7868, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 5.128471597058342e-05, |
|
"loss": 0.7993, |
|
"step": 5125 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 5.03995793340572e-05, |
|
"loss": 0.7892, |
|
"step": 5150 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 4.9519567056663694e-05, |
|
"loss": 0.7788, |
|
"step": 5175 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 4.864477005494938e-05, |
|
"loss": 0.7654, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 4.777527870665592e-05, |
|
"loss": 0.7468, |
|
"step": 5225 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 4.691118284138296e-05, |
|
"loss": 0.7359, |
|
"step": 5250 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 4.605257173130763e-05, |
|
"loss": 0.7422, |
|
"step": 5275 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 4.519953408196152e-05, |
|
"loss": 0.7424, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 4.435215802306635e-05, |
|
"loss": 0.7521, |
|
"step": 5325 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 4.351053109942894e-05, |
|
"loss": 0.7477, |
|
"step": 5350 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 4.2674740261896776e-05, |
|
"loss": 0.7456, |
|
"step": 5375 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 4.1844871858374844e-05, |
|
"loss": 0.766, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 4.1021011624904814e-05, |
|
"loss": 0.7664, |
|
"step": 5425 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 4.0203244676807353e-05, |
|
"loss": 0.7703, |
|
"step": 5450 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 3.939165549988873e-05, |
|
"loss": 0.7674, |
|
"step": 5475 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 3.858632794171222e-05, |
|
"loss": 0.7722, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 3.778734520293562e-05, |
|
"loss": 0.7716, |
|
"step": 5525 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 3.699478982871561e-05, |
|
"loss": 0.7795, |
|
"step": 5550 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 3.62087437001797e-05, |
|
"loss": 0.7728, |
|
"step": 5575 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 3.5429288025966944e-05, |
|
"loss": 0.7709, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 3.4656503333837956e-05, |
|
"loss": 0.7682, |
|
"step": 5625 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 3.389046946235542e-05, |
|
"loss": 0.7734, |
|
"step": 5650 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 3.313126555263576e-05, |
|
"loss": 0.7716, |
|
"step": 5675 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 3.237897004017276e-05, |
|
"loss": 0.7716, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 3.163366064673427e-05, |
|
"loss": 0.7721, |
|
"step": 5725 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 3.089541437233252e-05, |
|
"loss": 0.7658, |
|
"step": 5750 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 3.0164307487268996e-05, |
|
"loss": 0.7716, |
|
"step": 5775 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 2.944041552425475e-05, |
|
"loss": 0.7687, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 2.8723813270606982e-05, |
|
"loss": 0.7698, |
|
"step": 5825 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 2.8014574760522416e-05, |
|
"loss": 0.7641, |
|
"step": 5850 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 2.731277326742876e-05, |
|
"loss": 0.7746, |
|
"step": 5875 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 2.6618481296414522e-05, |
|
"loss": 0.7722, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 2.5931770576738313e-05, |
|
"loss": 0.7698, |
|
"step": 5925 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 2.525271205441837e-05, |
|
"loss": 0.7751, |
|
"step": 5950 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 2.45813758849028e-05, |
|
"loss": 0.766, |
|
"step": 5975 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 2.3917831425821824e-05, |
|
"loss": 0.7673, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"eval_loss": 0.806900680065155, |
|
"eval_runtime": 58.3254, |
|
"eval_samples_per_second": 12.516, |
|
"eval_steps_per_second": 0.909, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 2.3262147229821984e-05, |
|
"loss": 0.7679, |
|
"step": 6025 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 2.2614391037483983e-05, |
|
"loss": 0.7704, |
|
"step": 6050 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 2.1974629770324106e-05, |
|
"loss": 0.7661, |
|
"step": 6075 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 2.1342929523880416e-05, |
|
"loss": 0.7652, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 2.0719355560884246e-05, |
|
"loss": 0.765, |
|
"step": 6125 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 2.010397230451766e-05, |
|
"loss": 0.7704, |
|
"step": 6150 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 1.9496843331757784e-05, |
|
"loss": 0.767, |
|
"step": 6175 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 1.8898031366808467e-05, |
|
"loss": 0.7654, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 1.830759827462004e-05, |
|
"loss": 0.7753, |
|
"step": 6225 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 1.7725605054497906e-05, |
|
"loss": 0.7725, |
|
"step": 6250 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 1.7152111833800522e-05, |
|
"loss": 0.7698, |
|
"step": 6275 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 1.6587177861727454e-05, |
|
"loss": 0.7703, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 1.6030861503198204e-05, |
|
"loss": 0.7658, |
|
"step": 6325 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 1.548322023282235e-05, |
|
"loss": 0.7677, |
|
"step": 6350 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 1.4944310628961666e-05, |
|
"loss": 0.7694, |
|
"step": 6375 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 1.4414188367884907e-05, |
|
"loss": 0.7668, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 1.3892908218015654e-05, |
|
"loss": 0.7662, |
|
"step": 6425 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 1.3380524034274122e-05, |
|
"loss": 0.7689, |
|
"step": 6450 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 1.2877088752513189e-05, |
|
"loss": 0.7694, |
|
"step": 6475 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 1.2382654384049475e-05, |
|
"loss": 0.7714, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 1.1897272010289884e-05, |
|
"loss": 0.7701, |
|
"step": 6525 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 1.1420991777454315e-05, |
|
"loss": 0.7628, |
|
"step": 6550 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 1.0953862891394795e-05, |
|
"loss": 0.7661, |
|
"step": 6575 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 1.0495933612511976e-05, |
|
"loss": 0.7729, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 1.0047251250769175e-05, |
|
"loss": 0.772, |
|
"step": 6625 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 9.60786216080466e-06, |
|
"loss": 0.7702, |
|
"step": 6650 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 9.177811737142627e-06, |
|
"loss": 0.7711, |
|
"step": 6675 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 8.757144409503359e-06, |
|
"loss": 0.765, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 8.34590363821306e-06, |
|
"loss": 0.7713, |
|
"step": 6725 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 7.944131909713859e-06, |
|
"loss": 0.7631, |
|
"step": 6750 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 7.551870732174416e-06, |
|
"loss": 0.767, |
|
"step": 6775 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 7.169160631201566e-06, |
|
"loss": 0.7692, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 6.796041145653553e-06, |
|
"loss": 0.7677, |
|
"step": 6825 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 6.432550823555128e-06, |
|
"loss": 0.7706, |
|
"step": 6850 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 6.078727218115043e-06, |
|
"loss": 0.7678, |
|
"step": 6875 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 5.734606883846338e-06, |
|
"loss": 0.7717, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 5.40022537278978e-06, |
|
"loss": 0.7701, |
|
"step": 6925 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 5.07561723084089e-06, |
|
"loss": 0.7694, |
|
"step": 6950 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 4.7608159941809e-06, |
|
"loss": 0.7659, |
|
"step": 6975 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 4.455854185812047e-06, |
|
"loss": 0.7639, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"eval_loss": 0.8055068850517273, |
|
"eval_runtime": 58.7443, |
|
"eval_samples_per_second": 12.427, |
|
"eval_steps_per_second": 0.902, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 4.160763312197513e-06, |
|
"loss": 0.7724, |
|
"step": 7025 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 3.875573860006421e-06, |
|
"loss": 0.7696, |
|
"step": 7050 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 3.6003152929641624e-06, |
|
"loss": 0.7625, |
|
"step": 7075 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 3.335016048808437e-06, |
|
"loss": 0.7733, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 3.0797035363512193e-06, |
|
"loss": 0.7685, |
|
"step": 7125 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 2.834404132647128e-06, |
|
"loss": 0.769, |
|
"step": 7150 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 2.5991431802683262e-06, |
|
"loss": 0.7647, |
|
"step": 7175 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 2.3739449846862826e-06, |
|
"loss": 0.7634, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 2.158832811760736e-06, |
|
"loss": 0.7617, |
|
"step": 7225 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 1.95382888533604e-06, |
|
"loss": 0.768, |
|
"step": 7250 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 1.7589543849450996e-06, |
|
"loss": 0.7712, |
|
"step": 7275 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 1.5742294436213223e-06, |
|
"loss": 0.7703, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 1.3996731458185697e-06, |
|
"loss": 0.7618, |
|
"step": 7325 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 1.2353035254395352e-06, |
|
"loss": 0.7767, |
|
"step": 7350 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 1.0811375639725341e-06, |
|
"loss": 0.7659, |
|
"step": 7375 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 9.371911887371965e-07, |
|
"loss": 0.774, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 8.034792712388828e-07, |
|
"loss": 0.7658, |
|
"step": 7425 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 6.800156256323243e-07, |
|
"loss": 0.7626, |
|
"step": 7450 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 5.6681300729442e-07, |
|
"loss": 0.7744, |
|
"step": 7475 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 4.6388311150644683e-07, |
|
"loss": 0.7768, |
|
"step": 7500 |
|
} |
|
], |
|
"max_steps": 7737, |
|
"num_train_epochs": 3, |
|
"total_flos": 3.2299438278321373e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|