|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 5.971724787935909, |
|
"eval_steps": 500, |
|
"global_step": 792, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03770028275212064, |
|
"grad_norm": 3.988708734512329, |
|
"learning_rate": 4.9995083170283816e-05, |
|
"loss": 4.6192, |
|
"num_input_tokens_seen": 50400, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.07540056550424128, |
|
"grad_norm": 2.142688512802124, |
|
"learning_rate": 4.998033461515242e-05, |
|
"loss": 3.9149, |
|
"num_input_tokens_seen": 104016, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.11310084825636192, |
|
"grad_norm": 1.5928359031677246, |
|
"learning_rate": 4.9955760135896534e-05, |
|
"loss": 3.6912, |
|
"num_input_tokens_seen": 155584, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.15080113100848255, |
|
"grad_norm": 1.5493167638778687, |
|
"learning_rate": 4.992136939879856e-05, |
|
"loss": 3.5556, |
|
"num_input_tokens_seen": 202672, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.1885014137606032, |
|
"grad_norm": 1.7764347791671753, |
|
"learning_rate": 4.9877175931330346e-05, |
|
"loss": 3.4256, |
|
"num_input_tokens_seen": 254800, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.22620169651272384, |
|
"grad_norm": 1.2482728958129883, |
|
"learning_rate": 4.982319711683221e-05, |
|
"loss": 3.3128, |
|
"num_input_tokens_seen": 306352, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2639019792648445, |
|
"grad_norm": 1.2829065322875977, |
|
"learning_rate": 4.975945418767529e-05, |
|
"loss": 3.2688, |
|
"num_input_tokens_seen": 356352, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.3016022620169651, |
|
"grad_norm": 1.513293743133545, |
|
"learning_rate": 4.968597221690986e-05, |
|
"loss": 3.297, |
|
"num_input_tokens_seen": 406672, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3393025447690858, |
|
"grad_norm": 1.883090853691101, |
|
"learning_rate": 4.96027801084029e-05, |
|
"loss": 3.232, |
|
"num_input_tokens_seen": 456160, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.3770028275212064, |
|
"grad_norm": 1.402272343635559, |
|
"learning_rate": 4.950991058546893e-05, |
|
"loss": 3.267, |
|
"num_input_tokens_seen": 509680, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.41470311027332707, |
|
"grad_norm": 1.5488755702972412, |
|
"learning_rate": 4.940740017799833e-05, |
|
"loss": 3.2148, |
|
"num_input_tokens_seen": 559968, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.4524033930254477, |
|
"grad_norm": 1.507287859916687, |
|
"learning_rate": 4.929528920808854e-05, |
|
"loss": 3.1403, |
|
"num_input_tokens_seen": 610000, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.49010367577756836, |
|
"grad_norm": 1.9119170904159546, |
|
"learning_rate": 4.917362177418342e-05, |
|
"loss": 3.1515, |
|
"num_input_tokens_seen": 661280, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.527803958529689, |
|
"grad_norm": 1.7253235578536987, |
|
"learning_rate": 4.904244573372733e-05, |
|
"loss": 3.1468, |
|
"num_input_tokens_seen": 713264, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5655042412818096, |
|
"grad_norm": 1.7201606035232544, |
|
"learning_rate": 4.8901812684340564e-05, |
|
"loss": 3.196, |
|
"num_input_tokens_seen": 762576, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6032045240339302, |
|
"grad_norm": 1.6135213375091553, |
|
"learning_rate": 4.8751777943523634e-05, |
|
"loss": 3.0593, |
|
"num_input_tokens_seen": 813392, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.6409048067860509, |
|
"grad_norm": 1.7381868362426758, |
|
"learning_rate": 4.8592400526898314e-05, |
|
"loss": 3.0676, |
|
"num_input_tokens_seen": 860608, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.6786050895381716, |
|
"grad_norm": 1.6142843961715698, |
|
"learning_rate": 4.842374312499405e-05, |
|
"loss": 3.1061, |
|
"num_input_tokens_seen": 909104, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.7163053722902922, |
|
"grad_norm": 2.0389633178710938, |
|
"learning_rate": 4.824587207858888e-05, |
|
"loss": 2.9847, |
|
"num_input_tokens_seen": 959600, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.7540056550424128, |
|
"grad_norm": 1.923561692237854, |
|
"learning_rate": 4.805885735261454e-05, |
|
"loss": 3.0289, |
|
"num_input_tokens_seen": 1013648, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7917059377945335, |
|
"grad_norm": 2.0325896739959717, |
|
"learning_rate": 4.786277250863599e-05, |
|
"loss": 2.9474, |
|
"num_input_tokens_seen": 1065120, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.8294062205466541, |
|
"grad_norm": 1.6685590744018555, |
|
"learning_rate": 4.765769467591625e-05, |
|
"loss": 2.9713, |
|
"num_input_tokens_seen": 1119424, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.8671065032987747, |
|
"grad_norm": 2.0325937271118164, |
|
"learning_rate": 4.744370452107789e-05, |
|
"loss": 3.0012, |
|
"num_input_tokens_seen": 1169888, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.9048067860508954, |
|
"grad_norm": 1.7548010349273682, |
|
"learning_rate": 4.722088621637309e-05, |
|
"loss": 3.0399, |
|
"num_input_tokens_seen": 1218944, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.942507068803016, |
|
"grad_norm": 1.6709191799163818, |
|
"learning_rate": 4.698932740657479e-05, |
|
"loss": 2.9156, |
|
"num_input_tokens_seen": 1269920, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.9802073515551367, |
|
"grad_norm": 1.8369653224945068, |
|
"learning_rate": 4.6749119174501975e-05, |
|
"loss": 3.0288, |
|
"num_input_tokens_seen": 1315536, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.0179076343072573, |
|
"grad_norm": 1.800703525543213, |
|
"learning_rate": 4.6500356005192514e-05, |
|
"loss": 2.8911, |
|
"num_input_tokens_seen": 1360736, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.055607917059378, |
|
"grad_norm": 1.7134617567062378, |
|
"learning_rate": 4.6243135748737864e-05, |
|
"loss": 2.9148, |
|
"num_input_tokens_seen": 1409808, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.0933081998114986, |
|
"grad_norm": 1.9385241270065308, |
|
"learning_rate": 4.597755958179406e-05, |
|
"loss": 2.868, |
|
"num_input_tokens_seen": 1460864, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.1310084825636193, |
|
"grad_norm": 2.1658332347869873, |
|
"learning_rate": 4.570373196778427e-05, |
|
"loss": 2.7477, |
|
"num_input_tokens_seen": 1512640, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.1687087653157398, |
|
"grad_norm": 2.239896774291992, |
|
"learning_rate": 4.5421760615808474e-05, |
|
"loss": 2.932, |
|
"num_input_tokens_seen": 1556048, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.2064090480678604, |
|
"grad_norm": 2.0555717945098877, |
|
"learning_rate": 4.513175643827647e-05, |
|
"loss": 2.8219, |
|
"num_input_tokens_seen": 1607232, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.244109330819981, |
|
"grad_norm": 2.0288779735565186, |
|
"learning_rate": 4.4833833507280884e-05, |
|
"loss": 2.8453, |
|
"num_input_tokens_seen": 1653520, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.2818096135721018, |
|
"grad_norm": 1.9268651008605957, |
|
"learning_rate": 4.4528109009727336e-05, |
|
"loss": 2.7362, |
|
"num_input_tokens_seen": 1703568, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.3195098963242224, |
|
"grad_norm": 2.413874387741089, |
|
"learning_rate": 4.42147032012394e-05, |
|
"loss": 2.9197, |
|
"num_input_tokens_seen": 1752944, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.3572101790763431, |
|
"grad_norm": 2.2018630504608154, |
|
"learning_rate": 4.389373935885646e-05, |
|
"loss": 2.8897, |
|
"num_input_tokens_seen": 1805600, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.3949104618284638, |
|
"grad_norm": 2.1807219982147217, |
|
"learning_rate": 4.356534373254316e-05, |
|
"loss": 2.7946, |
|
"num_input_tokens_seen": 1860688, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.4326107445805842, |
|
"grad_norm": 2.2928526401519775, |
|
"learning_rate": 4.322964549552943e-05, |
|
"loss": 2.8149, |
|
"num_input_tokens_seen": 1913056, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.4703110273327051, |
|
"grad_norm": 2.204533576965332, |
|
"learning_rate": 4.288677669350066e-05, |
|
"loss": 2.7811, |
|
"num_input_tokens_seen": 1961744, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.5080113100848256, |
|
"grad_norm": 2.925762414932251, |
|
"learning_rate": 4.2536872192658036e-05, |
|
"loss": 2.8564, |
|
"num_input_tokens_seen": 2011248, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.5457115928369463, |
|
"grad_norm": 2.398651599884033, |
|
"learning_rate": 4.218006962666934e-05, |
|
"loss": 2.7966, |
|
"num_input_tokens_seen": 2060640, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.583411875589067, |
|
"grad_norm": 2.452263355255127, |
|
"learning_rate": 4.181650934253132e-05, |
|
"loss": 2.7674, |
|
"num_input_tokens_seen": 2113904, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.6211121583411876, |
|
"grad_norm": 2.5911788940429688, |
|
"learning_rate": 4.144633434536467e-05, |
|
"loss": 2.7607, |
|
"num_input_tokens_seen": 2162608, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 1.6588124410933083, |
|
"grad_norm": 2.648517608642578, |
|
"learning_rate": 4.1069690242163484e-05, |
|
"loss": 2.8402, |
|
"num_input_tokens_seen": 2211616, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.6965127238454287, |
|
"grad_norm": 2.6860735416412354, |
|
"learning_rate": 4.06867251845213e-05, |
|
"loss": 2.8019, |
|
"num_input_tokens_seen": 2269440, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.7342130065975496, |
|
"grad_norm": 2.5891222953796387, |
|
"learning_rate": 4.0297589810356165e-05, |
|
"loss": 2.8311, |
|
"num_input_tokens_seen": 2321936, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.77191328934967, |
|
"grad_norm": 2.695114850997925, |
|
"learning_rate": 3.9902437184657784e-05, |
|
"loss": 2.7626, |
|
"num_input_tokens_seen": 2376720, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 1.8096135721017907, |
|
"grad_norm": 2.588127374649048, |
|
"learning_rate": 3.9501422739279956e-05, |
|
"loss": 2.8052, |
|
"num_input_tokens_seen": 2429952, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.8473138548539114, |
|
"grad_norm": 2.1829710006713867, |
|
"learning_rate": 3.909470421180201e-05, |
|
"loss": 2.767, |
|
"num_input_tokens_seen": 2481488, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 1.885014137606032, |
|
"grad_norm": 2.606924295425415, |
|
"learning_rate": 3.8682441583483314e-05, |
|
"loss": 2.7651, |
|
"num_input_tokens_seen": 2530768, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.9227144203581528, |
|
"grad_norm": 2.3635494709014893, |
|
"learning_rate": 3.8264797016335205e-05, |
|
"loss": 2.8097, |
|
"num_input_tokens_seen": 2583088, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 1.9604147031102732, |
|
"grad_norm": 2.560624361038208, |
|
"learning_rate": 3.7841934789335164e-05, |
|
"loss": 2.7269, |
|
"num_input_tokens_seen": 2631456, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.998114985862394, |
|
"grad_norm": 2.7099437713623047, |
|
"learning_rate": 3.741402123380828e-05, |
|
"loss": 2.8586, |
|
"num_input_tokens_seen": 2684848, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 2.0358152686145146, |
|
"grad_norm": 2.552143096923828, |
|
"learning_rate": 3.6981224668001424e-05, |
|
"loss": 2.6131, |
|
"num_input_tokens_seen": 2733408, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.0735155513666355, |
|
"grad_norm": 2.9233176708221436, |
|
"learning_rate": 3.654371533087586e-05, |
|
"loss": 2.4891, |
|
"num_input_tokens_seen": 2786832, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.111215834118756, |
|
"grad_norm": 2.7649636268615723, |
|
"learning_rate": 3.610166531514436e-05, |
|
"loss": 2.5783, |
|
"num_input_tokens_seen": 2828464, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.1489161168708764, |
|
"grad_norm": 3.076122522354126, |
|
"learning_rate": 3.565524849957921e-05, |
|
"loss": 2.59, |
|
"num_input_tokens_seen": 2878192, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 2.1866163996229973, |
|
"grad_norm": 3.242678642272949, |
|
"learning_rate": 3.520464048061758e-05, |
|
"loss": 2.5839, |
|
"num_input_tokens_seen": 2928304, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.2243166823751177, |
|
"grad_norm": 3.139089584350586, |
|
"learning_rate": 3.47500185032913e-05, |
|
"loss": 2.567, |
|
"num_input_tokens_seen": 2978144, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 2.2620169651272386, |
|
"grad_norm": 3.1967153549194336, |
|
"learning_rate": 3.4291561391508185e-05, |
|
"loss": 2.5694, |
|
"num_input_tokens_seen": 3028240, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.299717247879359, |
|
"grad_norm": 3.1987555027008057, |
|
"learning_rate": 3.3829449477712324e-05, |
|
"loss": 2.4965, |
|
"num_input_tokens_seen": 3083328, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 2.3374175306314795, |
|
"grad_norm": 3.4724180698394775, |
|
"learning_rate": 3.336386453195088e-05, |
|
"loss": 2.599, |
|
"num_input_tokens_seen": 3137072, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.3751178133836004, |
|
"grad_norm": 3.381075143814087, |
|
"learning_rate": 3.2894989690375626e-05, |
|
"loss": 2.524, |
|
"num_input_tokens_seen": 3191136, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 2.412818096135721, |
|
"grad_norm": 3.650747537612915, |
|
"learning_rate": 3.2423009383206876e-05, |
|
"loss": 2.5338, |
|
"num_input_tokens_seen": 3239952, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.4505183788878417, |
|
"grad_norm": 3.3886971473693848, |
|
"learning_rate": 3.194810926218861e-05, |
|
"loss": 2.5096, |
|
"num_input_tokens_seen": 3291104, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 2.488218661639962, |
|
"grad_norm": 3.415850877761841, |
|
"learning_rate": 3.147047612756302e-05, |
|
"loss": 2.473, |
|
"num_input_tokens_seen": 3340592, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.525918944392083, |
|
"grad_norm": 3.513828754425049, |
|
"learning_rate": 3.099029785459328e-05, |
|
"loss": 2.5778, |
|
"num_input_tokens_seen": 3388224, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 2.5636192271442035, |
|
"grad_norm": 3.49721360206604, |
|
"learning_rate": 3.0507763319663517e-05, |
|
"loss": 2.5684, |
|
"num_input_tokens_seen": 3440512, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.6013195098963244, |
|
"grad_norm": 3.5137672424316406, |
|
"learning_rate": 3.002306232598497e-05, |
|
"loss": 2.4923, |
|
"num_input_tokens_seen": 3491744, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 2.639019792648445, |
|
"grad_norm": 3.7216403484344482, |
|
"learning_rate": 2.9536385528937567e-05, |
|
"loss": 2.4633, |
|
"num_input_tokens_seen": 3542368, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.6767200754005653, |
|
"grad_norm": 3.48529052734375, |
|
"learning_rate": 2.9047924361076345e-05, |
|
"loss": 2.5703, |
|
"num_input_tokens_seen": 3595360, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 2.7144203581526862, |
|
"grad_norm": 3.4676520824432373, |
|
"learning_rate": 2.8557870956832132e-05, |
|
"loss": 2.4087, |
|
"num_input_tokens_seen": 3640912, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.7521206409048067, |
|
"grad_norm": 4.316717147827148, |
|
"learning_rate": 2.8066418076936167e-05, |
|
"loss": 2.5007, |
|
"num_input_tokens_seen": 3690048, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 2.7898209236569276, |
|
"grad_norm": 4.2354736328125, |
|
"learning_rate": 2.7573759032598366e-05, |
|
"loss": 2.5312, |
|
"num_input_tokens_seen": 3745104, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.827521206409048, |
|
"grad_norm": 3.457280397415161, |
|
"learning_rate": 2.7080087609469062e-05, |
|
"loss": 2.5333, |
|
"num_input_tokens_seen": 3794160, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.8652214891611685, |
|
"grad_norm": 3.417656183242798, |
|
"learning_rate": 2.6585597991414114e-05, |
|
"loss": 2.4185, |
|
"num_input_tokens_seen": 3846576, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 2.9029217719132894, |
|
"grad_norm": 3.7148749828338623, |
|
"learning_rate": 2.6090484684133404e-05, |
|
"loss": 2.4913, |
|
"num_input_tokens_seen": 3891744, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 2.9406220546654103, |
|
"grad_norm": 3.562427520751953, |
|
"learning_rate": 2.5594942438652688e-05, |
|
"loss": 2.5319, |
|
"num_input_tokens_seen": 3949568, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.9783223374175307, |
|
"grad_norm": 4.2560505867004395, |
|
"learning_rate": 2.509916617471903e-05, |
|
"loss": 2.6441, |
|
"num_input_tokens_seen": 4002384, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 3.016022620169651, |
|
"grad_norm": 3.349701166152954, |
|
"learning_rate": 2.46033509041298e-05, |
|
"loss": 2.3576, |
|
"num_input_tokens_seen": 4052688, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.053722902921772, |
|
"grad_norm": 3.660886287689209, |
|
"learning_rate": 2.410769165402549e-05, |
|
"loss": 2.3032, |
|
"num_input_tokens_seen": 4107392, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 3.0914231856738925, |
|
"grad_norm": 4.248249530792236, |
|
"learning_rate": 2.3612383390176503e-05, |
|
"loss": 2.2542, |
|
"num_input_tokens_seen": 4157984, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 3.1291234684260134, |
|
"grad_norm": 4.340310096740723, |
|
"learning_rate": 2.3117620940294048e-05, |
|
"loss": 2.2882, |
|
"num_input_tokens_seen": 4213280, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 3.166823751178134, |
|
"grad_norm": 4.137709617614746, |
|
"learning_rate": 2.2623598917395438e-05, |
|
"loss": 2.2314, |
|
"num_input_tokens_seen": 4265792, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 3.2045240339302543, |
|
"grad_norm": 4.506406307220459, |
|
"learning_rate": 2.213051164325366e-05, |
|
"loss": 2.2679, |
|
"num_input_tokens_seen": 4310832, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 3.242224316682375, |
|
"grad_norm": 4.44052791595459, |
|
"learning_rate": 2.1638553071961708e-05, |
|
"loss": 2.2521, |
|
"num_input_tokens_seen": 4353488, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 3.2799245994344957, |
|
"grad_norm": 4.674520015716553, |
|
"learning_rate": 2.1147916713641367e-05, |
|
"loss": 2.2071, |
|
"num_input_tokens_seen": 4404384, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 3.3176248821866166, |
|
"grad_norm": 4.979199409484863, |
|
"learning_rate": 2.0658795558326743e-05, |
|
"loss": 2.2525, |
|
"num_input_tokens_seen": 4453232, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 3.355325164938737, |
|
"grad_norm": 4.564790725708008, |
|
"learning_rate": 2.017138200005236e-05, |
|
"loss": 2.2431, |
|
"num_input_tokens_seen": 4508640, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 3.3930254476908575, |
|
"grad_norm": 4.888641834259033, |
|
"learning_rate": 1.9685867761175584e-05, |
|
"loss": 2.3357, |
|
"num_input_tokens_seen": 4559360, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.4307257304429783, |
|
"grad_norm": 4.425845623016357, |
|
"learning_rate": 1.9202443816963425e-05, |
|
"loss": 2.2875, |
|
"num_input_tokens_seen": 4609584, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 3.468426013195099, |
|
"grad_norm": 5.38726282119751, |
|
"learning_rate": 1.872130032047302e-05, |
|
"loss": 2.2136, |
|
"num_input_tokens_seen": 4665472, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 3.5061262959472197, |
|
"grad_norm": 4.473924160003662, |
|
"learning_rate": 1.824262652775568e-05, |
|
"loss": 2.294, |
|
"num_input_tokens_seen": 4719360, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 3.54382657869934, |
|
"grad_norm": 5.171916484832764, |
|
"learning_rate": 1.7766610723413684e-05, |
|
"loss": 2.2146, |
|
"num_input_tokens_seen": 4771504, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 3.581526861451461, |
|
"grad_norm": 5.492386817932129, |
|
"learning_rate": 1.7293440146539196e-05, |
|
"loss": 2.3166, |
|
"num_input_tokens_seen": 4820432, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 3.6192271442035815, |
|
"grad_norm": 4.300539493560791, |
|
"learning_rate": 1.682330091706446e-05, |
|
"loss": 2.2775, |
|
"num_input_tokens_seen": 4877984, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 3.6569274269557024, |
|
"grad_norm": 5.470084190368652, |
|
"learning_rate": 1.6356377962552238e-05, |
|
"loss": 2.2442, |
|
"num_input_tokens_seen": 4927712, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 3.694627709707823, |
|
"grad_norm": 5.457830429077148, |
|
"learning_rate": 1.589285494545514e-05, |
|
"loss": 2.2499, |
|
"num_input_tokens_seen": 4979520, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 3.7323279924599433, |
|
"grad_norm": 4.851473808288574, |
|
"learning_rate": 1.5432914190872757e-05, |
|
"loss": 2.214, |
|
"num_input_tokens_seen": 5030720, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 3.770028275212064, |
|
"grad_norm": 4.645096302032471, |
|
"learning_rate": 1.4976736614834664e-05, |
|
"loss": 2.1646, |
|
"num_input_tokens_seen": 5081376, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.8077285579641846, |
|
"grad_norm": 5.5402512550354, |
|
"learning_rate": 1.4524501653137787e-05, |
|
"loss": 2.3151, |
|
"num_input_tokens_seen": 5127888, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 3.8454288407163055, |
|
"grad_norm": 4.753649711608887, |
|
"learning_rate": 1.4076387190766017e-05, |
|
"loss": 2.2602, |
|
"num_input_tokens_seen": 5178720, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 3.883129123468426, |
|
"grad_norm": 5.488243579864502, |
|
"learning_rate": 1.363256949191972e-05, |
|
"loss": 2.1839, |
|
"num_input_tokens_seen": 5227120, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 3.9208294062205464, |
|
"grad_norm": 5.427800178527832, |
|
"learning_rate": 1.3193223130682936e-05, |
|
"loss": 2.2833, |
|
"num_input_tokens_seen": 5275760, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 3.9585296889726673, |
|
"grad_norm": 4.901040077209473, |
|
"learning_rate": 1.2758520922355226e-05, |
|
"loss": 2.1802, |
|
"num_input_tokens_seen": 5319632, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 3.9962299717247878, |
|
"grad_norm": 4.977085590362549, |
|
"learning_rate": 1.2328633855475429e-05, |
|
"loss": 2.2383, |
|
"num_input_tokens_seen": 5369936, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 4.033930254476909, |
|
"grad_norm": 4.724318027496338, |
|
"learning_rate": 1.1903731024563966e-05, |
|
"loss": 2.007, |
|
"num_input_tokens_seen": 5421440, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 4.071630537229029, |
|
"grad_norm": 5.148896217346191, |
|
"learning_rate": 1.148397956361007e-05, |
|
"loss": 2.0286, |
|
"num_input_tokens_seen": 5476736, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 4.10933081998115, |
|
"grad_norm": 5.690558433532715, |
|
"learning_rate": 1.106954458033026e-05, |
|
"loss": 2.0398, |
|
"num_input_tokens_seen": 5531328, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 4.147031102733271, |
|
"grad_norm": 5.595386505126953, |
|
"learning_rate": 1.0660589091223855e-05, |
|
"loss": 2.1157, |
|
"num_input_tokens_seen": 5579216, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 4.184731385485391, |
|
"grad_norm": 6.112159252166748, |
|
"learning_rate": 1.025727395745095e-05, |
|
"loss": 2.094, |
|
"num_input_tokens_seen": 5626208, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 4.222431668237512, |
|
"grad_norm": 5.86374568939209, |
|
"learning_rate": 9.859757821558337e-06, |
|
"loss": 2.0531, |
|
"num_input_tokens_seen": 5679360, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 4.260131950989632, |
|
"grad_norm": 5.2934699058532715, |
|
"learning_rate": 9.468197045077976e-06, |
|
"loss": 1.9652, |
|
"num_input_tokens_seen": 5724608, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 4.297832233741753, |
|
"grad_norm": 6.302525043487549, |
|
"learning_rate": 9.082745647022797e-06, |
|
"loss": 2.0592, |
|
"num_input_tokens_seen": 5779904, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 4.335532516493874, |
|
"grad_norm": 6.2651143074035645, |
|
"learning_rate": 8.703555243303835e-06, |
|
"loss": 2.0418, |
|
"num_input_tokens_seen": 5826880, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 4.3732327992459945, |
|
"grad_norm": 6.225465774536133, |
|
"learning_rate": 8.330774987092712e-06, |
|
"loss": 1.991, |
|
"num_input_tokens_seen": 5875440, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 4.410933081998115, |
|
"grad_norm": 5.812168121337891, |
|
"learning_rate": 7.96455151015272e-06, |
|
"loss": 2.0726, |
|
"num_input_tokens_seen": 5924960, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 4.448633364750235, |
|
"grad_norm": 5.528653621673584, |
|
"learning_rate": 7.605028865161809e-06, |
|
"loss": 2.069, |
|
"num_input_tokens_seen": 5976416, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 4.486333647502356, |
|
"grad_norm": 5.838290691375732, |
|
"learning_rate": 7.25234846904993e-06, |
|
"loss": 2.052, |
|
"num_input_tokens_seen": 6027088, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 4.524033930254477, |
|
"grad_norm": 6.014201641082764, |
|
"learning_rate": 6.906649047373246e-06, |
|
"loss": 2.0651, |
|
"num_input_tokens_seen": 6080528, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 4.561734213006598, |
|
"grad_norm": 6.840231895446777, |
|
"learning_rate": 6.568066579746901e-06, |
|
"loss": 2.0546, |
|
"num_input_tokens_seen": 6125904, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 4.599434495758718, |
|
"grad_norm": 6.350096702575684, |
|
"learning_rate": 6.2367342463579475e-06, |
|
"loss": 2.081, |
|
"num_input_tokens_seen": 6173744, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 4.6371347785108386, |
|
"grad_norm": 6.259740352630615, |
|
"learning_rate": 5.912782375579412e-06, |
|
"loss": 2.0395, |
|
"num_input_tokens_seen": 6222560, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 4.674835061262959, |
|
"grad_norm": 6.564173221588135, |
|
"learning_rate": 5.596338392706077e-06, |
|
"loss": 2.0659, |
|
"num_input_tokens_seen": 6272544, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 4.71253534401508, |
|
"grad_norm": 5.375278949737549, |
|
"learning_rate": 5.2875267698322325e-06, |
|
"loss": 2.0247, |
|
"num_input_tokens_seen": 6323024, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 4.750235626767201, |
|
"grad_norm": 5.922281265258789, |
|
"learning_rate": 4.986468976890993e-06, |
|
"loss": 2.0485, |
|
"num_input_tokens_seen": 6374608, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 4.787935909519321, |
|
"grad_norm": 5.62613582611084, |
|
"learning_rate": 4.693283433874565e-06, |
|
"loss": 2.0561, |
|
"num_input_tokens_seen": 6422208, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 4.825636192271442, |
|
"grad_norm": 6.259154796600342, |
|
"learning_rate": 4.408085464254183e-06, |
|
"loss": 2.1047, |
|
"num_input_tokens_seen": 6468912, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 4.863336475023563, |
|
"grad_norm": 5.757895469665527, |
|
"learning_rate": 4.130987249617993e-06, |
|
"loss": 2.0481, |
|
"num_input_tokens_seen": 6522848, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 4.9010367577756835, |
|
"grad_norm": 5.949391841888428, |
|
"learning_rate": 3.8620977855448935e-06, |
|
"loss": 2.0637, |
|
"num_input_tokens_seen": 6578768, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 4.938737040527804, |
|
"grad_norm": 6.397491931915283, |
|
"learning_rate": 3.601522838731461e-06, |
|
"loss": 2.0429, |
|
"num_input_tokens_seen": 6631936, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 4.976437323279924, |
|
"grad_norm": 6.2142157554626465, |
|
"learning_rate": 3.3493649053890326e-06, |
|
"loss": 2.1212, |
|
"num_input_tokens_seen": 6682992, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 5.014137606032045, |
|
"grad_norm": 5.99893856048584, |
|
"learning_rate": 3.1057231709272077e-06, |
|
"loss": 2.0205, |
|
"num_input_tokens_seen": 6735056, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 5.051837888784166, |
|
"grad_norm": 6.014187335968018, |
|
"learning_rate": 2.8706934709395892e-06, |
|
"loss": 1.9942, |
|
"num_input_tokens_seen": 6784224, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 5.089538171536287, |
|
"grad_norm": 6.134748935699463, |
|
"learning_rate": 2.6443682535072177e-06, |
|
"loss": 1.868, |
|
"num_input_tokens_seen": 6831040, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 5.127238454288407, |
|
"grad_norm": 5.91867733001709, |
|
"learning_rate": 2.4268365428344736e-06, |
|
"loss": 1.9132, |
|
"num_input_tokens_seen": 6883552, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 5.1649387370405275, |
|
"grad_norm": 7.725922584533691, |
|
"learning_rate": 2.21818390423168e-06, |
|
"loss": 1.8698, |
|
"num_input_tokens_seen": 6928272, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 5.202639019792649, |
|
"grad_norm": 5.97230863571167, |
|
"learning_rate": 2.0184924104583613e-06, |
|
"loss": 1.8974, |
|
"num_input_tokens_seen": 6972496, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 5.240339302544769, |
|
"grad_norm": 6.879273414611816, |
|
"learning_rate": 1.8278406094401623e-06, |
|
"loss": 1.9096, |
|
"num_input_tokens_seen": 7018496, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 5.27803958529689, |
|
"grad_norm": 6.802375793457031, |
|
"learning_rate": 1.6463034933723337e-06, |
|
"loss": 2.0098, |
|
"num_input_tokens_seen": 7066400, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 5.31573986804901, |
|
"grad_norm": 6.246311187744141, |
|
"learning_rate": 1.4739524692218314e-06, |
|
"loss": 1.9554, |
|
"num_input_tokens_seen": 7113744, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 5.353440150801131, |
|
"grad_norm": 6.855324745178223, |
|
"learning_rate": 1.3108553306396265e-06, |
|
"loss": 2.0233, |
|
"num_input_tokens_seen": 7166848, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 5.391140433553252, |
|
"grad_norm": 7.124240398406982, |
|
"learning_rate": 1.1570762312943295e-06, |
|
"loss": 1.9629, |
|
"num_input_tokens_seen": 7220048, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 5.4288407163053725, |
|
"grad_norm": 6.546064853668213, |
|
"learning_rate": 1.0126756596375686e-06, |
|
"loss": 1.9036, |
|
"num_input_tokens_seen": 7268064, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 5.466540999057493, |
|
"grad_norm": 6.543118953704834, |
|
"learning_rate": 8.777104151110826e-07, |
|
"loss": 1.988, |
|
"num_input_tokens_seen": 7326512, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 5.504241281809613, |
|
"grad_norm": 6.955906391143799, |
|
"learning_rate": 7.522335858048707e-07, |
|
"loss": 1.9844, |
|
"num_input_tokens_seen": 7382288, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 5.541941564561734, |
|
"grad_norm": 6.836036682128906, |
|
"learning_rate": 6.362945275751736e-07, |
|
"loss": 1.9743, |
|
"num_input_tokens_seen": 7430544, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 5.579641847313855, |
|
"grad_norm": 6.253538608551025, |
|
"learning_rate": 5.299388446305343e-07, |
|
"loss": 2.007, |
|
"num_input_tokens_seen": 7479488, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 5.617342130065976, |
|
"grad_norm": 5.982280731201172, |
|
"learning_rate": 4.3320837159353813e-07, |
|
"loss": 1.9413, |
|
"num_input_tokens_seen": 7533536, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 5.655042412818096, |
|
"grad_norm": 5.737644195556641, |
|
"learning_rate": 3.4614115704533767e-07, |
|
"loss": 1.902, |
|
"num_input_tokens_seen": 7589200, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 5.6927426955702165, |
|
"grad_norm": 6.928066730499268, |
|
"learning_rate": 2.687714485593462e-07, |
|
"loss": 2.0091, |
|
"num_input_tokens_seen": 7638928, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 5.730442978322337, |
|
"grad_norm": 6.864605903625488, |
|
"learning_rate": 2.011296792301165e-07, |
|
"loss": 2.0389, |
|
"num_input_tokens_seen": 7693680, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 5.768143261074458, |
|
"grad_norm": 6.230181694030762, |
|
"learning_rate": 1.4324245570256633e-07, |
|
"loss": 2.0012, |
|
"num_input_tokens_seen": 7743904, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 5.805843543826579, |
|
"grad_norm": 6.436938285827637, |
|
"learning_rate": 9.513254770636137e-08, |
|
"loss": 2.0127, |
|
"num_input_tokens_seen": 7790992, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 5.843543826578699, |
|
"grad_norm": 6.2262349128723145, |
|
"learning_rate": 5.681887909952388e-08, |
|
"loss": 2.0237, |
|
"num_input_tokens_seen": 7843600, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 5.88124410933082, |
|
"grad_norm": 6.8672027587890625, |
|
"learning_rate": 2.831652042480093e-08, |
|
"loss": 1.9273, |
|
"num_input_tokens_seen": 7893968, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 5.918944392082941, |
|
"grad_norm": 6.41185188293457, |
|
"learning_rate": 9.636682981720158e-09, |
|
"loss": 1.9827, |
|
"num_input_tokens_seen": 7945856, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 5.956644674835061, |
|
"grad_norm": 6.624245643615723, |
|
"learning_rate": 7.867144166728846e-10, |
|
"loss": 1.9642, |
|
"num_input_tokens_seen": 7998560, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 5.971724787935909, |
|
"num_input_tokens_seen": 8017392, |
|
"step": 792, |
|
"total_flos": 3.6202835979167334e+17, |
|
"train_loss": 2.4793783682163317, |
|
"train_runtime": 5283.1732, |
|
"train_samples_per_second": 4.82, |
|
"train_steps_per_second": 0.15 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 792, |
|
"num_input_tokens_seen": 8017392, |
|
"num_train_epochs": 6, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.6202835979167334e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|