|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 5.971724787935909, |
|
"eval_steps": 500, |
|
"global_step": 792, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03770028275212064, |
|
"grad_norm": 2.8582637310028076, |
|
"learning_rate": 4.9995083170283816e-05, |
|
"loss": 2.995, |
|
"num_input_tokens_seen": 57008, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.07540056550424128, |
|
"grad_norm": 2.4749696254730225, |
|
"learning_rate": 4.998033461515242e-05, |
|
"loss": 2.7127, |
|
"num_input_tokens_seen": 117936, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.11310084825636192, |
|
"grad_norm": 2.3738160133361816, |
|
"learning_rate": 4.9955760135896534e-05, |
|
"loss": 2.6302, |
|
"num_input_tokens_seen": 176288, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.15080113100848255, |
|
"grad_norm": 2.2132370471954346, |
|
"learning_rate": 4.992136939879856e-05, |
|
"loss": 2.5818, |
|
"num_input_tokens_seen": 229712, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.1885014137606032, |
|
"grad_norm": 2.444084882736206, |
|
"learning_rate": 4.9877175931330346e-05, |
|
"loss": 2.4913, |
|
"num_input_tokens_seen": 288784, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.22620169651272384, |
|
"grad_norm": 2.190948009490967, |
|
"learning_rate": 4.982319711683221e-05, |
|
"loss": 2.4694, |
|
"num_input_tokens_seen": 347712, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2639019792648445, |
|
"grad_norm": 1.9827752113342285, |
|
"learning_rate": 4.975945418767529e-05, |
|
"loss": 2.4525, |
|
"num_input_tokens_seen": 404272, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.3016022620169651, |
|
"grad_norm": 2.3744397163391113, |
|
"learning_rate": 4.968597221690986e-05, |
|
"loss": 2.4745, |
|
"num_input_tokens_seen": 461632, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3393025447690858, |
|
"grad_norm": 2.5817763805389404, |
|
"learning_rate": 4.96027801084029e-05, |
|
"loss": 2.4637, |
|
"num_input_tokens_seen": 518016, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.3770028275212064, |
|
"grad_norm": 2.2218539714813232, |
|
"learning_rate": 4.950991058546893e-05, |
|
"loss": 2.5011, |
|
"num_input_tokens_seen": 579312, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.41470311027332707, |
|
"grad_norm": 2.5139260292053223, |
|
"learning_rate": 4.940740017799833e-05, |
|
"loss": 2.4397, |
|
"num_input_tokens_seen": 636448, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.4524033930254477, |
|
"grad_norm": 2.589820623397827, |
|
"learning_rate": 4.929528920808854e-05, |
|
"loss": 2.4109, |
|
"num_input_tokens_seen": 693504, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.49010367577756836, |
|
"grad_norm": 2.8880276679992676, |
|
"learning_rate": 4.917362177418342e-05, |
|
"loss": 2.4228, |
|
"num_input_tokens_seen": 752160, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.527803958529689, |
|
"grad_norm": 2.480609178543091, |
|
"learning_rate": 4.904244573372733e-05, |
|
"loss": 2.4391, |
|
"num_input_tokens_seen": 811296, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5655042412818096, |
|
"grad_norm": 2.6764094829559326, |
|
"learning_rate": 4.8901812684340564e-05, |
|
"loss": 2.4552, |
|
"num_input_tokens_seen": 867696, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6032045240339302, |
|
"grad_norm": 2.666821002960205, |
|
"learning_rate": 4.8751777943523634e-05, |
|
"loss": 2.3658, |
|
"num_input_tokens_seen": 924944, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.6409048067860509, |
|
"grad_norm": 2.884999990463257, |
|
"learning_rate": 4.8592400526898314e-05, |
|
"loss": 2.359, |
|
"num_input_tokens_seen": 978768, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.6786050895381716, |
|
"grad_norm": 2.58659291267395, |
|
"learning_rate": 4.842374312499405e-05, |
|
"loss": 2.4228, |
|
"num_input_tokens_seen": 1034544, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.7163053722902922, |
|
"grad_norm": 2.6272594928741455, |
|
"learning_rate": 4.824587207858888e-05, |
|
"loss": 2.32, |
|
"num_input_tokens_seen": 1091776, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.7540056550424128, |
|
"grad_norm": 2.6761183738708496, |
|
"learning_rate": 4.805885735261454e-05, |
|
"loss": 2.3393, |
|
"num_input_tokens_seen": 1153056, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7917059377945335, |
|
"grad_norm": 2.721721887588501, |
|
"learning_rate": 4.786277250863599e-05, |
|
"loss": 2.2595, |
|
"num_input_tokens_seen": 1211808, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.8294062205466541, |
|
"grad_norm": 2.718644618988037, |
|
"learning_rate": 4.765769467591625e-05, |
|
"loss": 2.3171, |
|
"num_input_tokens_seen": 1273600, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.8671065032987747, |
|
"grad_norm": 2.6614394187927246, |
|
"learning_rate": 4.744370452107789e-05, |
|
"loss": 2.2949, |
|
"num_input_tokens_seen": 1331392, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.9048067860508954, |
|
"grad_norm": 2.851165771484375, |
|
"learning_rate": 4.722088621637309e-05, |
|
"loss": 2.3818, |
|
"num_input_tokens_seen": 1387664, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.942507068803016, |
|
"grad_norm": 2.692765235900879, |
|
"learning_rate": 4.698932740657479e-05, |
|
"loss": 2.2494, |
|
"num_input_tokens_seen": 1445968, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.9802073515551367, |
|
"grad_norm": 2.7365992069244385, |
|
"learning_rate": 4.6749119174501975e-05, |
|
"loss": 2.3593, |
|
"num_input_tokens_seen": 1497744, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.0179076343072573, |
|
"grad_norm": 2.6355655193328857, |
|
"learning_rate": 4.6500356005192514e-05, |
|
"loss": 2.1892, |
|
"num_input_tokens_seen": 1549008, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.055607917059378, |
|
"grad_norm": 2.8112802505493164, |
|
"learning_rate": 4.6243135748737864e-05, |
|
"loss": 2.1753, |
|
"num_input_tokens_seen": 1605152, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.0933081998114986, |
|
"grad_norm": 3.1792027950286865, |
|
"learning_rate": 4.597755958179406e-05, |
|
"loss": 2.1186, |
|
"num_input_tokens_seen": 1663456, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.1310084825636193, |
|
"grad_norm": 3.5244438648223877, |
|
"learning_rate": 4.570373196778427e-05, |
|
"loss": 2.0526, |
|
"num_input_tokens_seen": 1722320, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.1687087653157398, |
|
"grad_norm": 4.0647783279418945, |
|
"learning_rate": 4.5421760615808474e-05, |
|
"loss": 2.1589, |
|
"num_input_tokens_seen": 1771664, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.2064090480678604, |
|
"grad_norm": 3.4753596782684326, |
|
"learning_rate": 4.513175643827647e-05, |
|
"loss": 2.0505, |
|
"num_input_tokens_seen": 1829824, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.244109330819981, |
|
"grad_norm": 3.573631525039673, |
|
"learning_rate": 4.4833833507280884e-05, |
|
"loss": 2.0924, |
|
"num_input_tokens_seen": 1882384, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.2818096135721018, |
|
"grad_norm": 3.39180588722229, |
|
"learning_rate": 4.4528109009727336e-05, |
|
"loss": 2.0184, |
|
"num_input_tokens_seen": 1939456, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.3195098963242224, |
|
"grad_norm": 4.136682987213135, |
|
"learning_rate": 4.42147032012394e-05, |
|
"loss": 2.1524, |
|
"num_input_tokens_seen": 1996320, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.3572101790763431, |
|
"grad_norm": 3.9565956592559814, |
|
"learning_rate": 4.389373935885646e-05, |
|
"loss": 2.1253, |
|
"num_input_tokens_seen": 2056560, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.3949104618284638, |
|
"grad_norm": 3.7363975048065186, |
|
"learning_rate": 4.356534373254316e-05, |
|
"loss": 2.0752, |
|
"num_input_tokens_seen": 2118496, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.4326107445805842, |
|
"grad_norm": 4.311358451843262, |
|
"learning_rate": 4.322964549552943e-05, |
|
"loss": 2.0574, |
|
"num_input_tokens_seen": 2178352, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.4703110273327051, |
|
"grad_norm": 3.684499502182007, |
|
"learning_rate": 4.288677669350066e-05, |
|
"loss": 2.0481, |
|
"num_input_tokens_seen": 2233440, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.5080113100848256, |
|
"grad_norm": 4.71754264831543, |
|
"learning_rate": 4.2536872192658036e-05, |
|
"loss": 2.0859, |
|
"num_input_tokens_seen": 2289648, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.5457115928369463, |
|
"grad_norm": 4.160897731781006, |
|
"learning_rate": 4.218006962666934e-05, |
|
"loss": 2.0877, |
|
"num_input_tokens_seen": 2346224, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.583411875589067, |
|
"grad_norm": 3.895419120788574, |
|
"learning_rate": 4.181650934253132e-05, |
|
"loss": 2.0488, |
|
"num_input_tokens_seen": 2407392, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.6211121583411876, |
|
"grad_norm": 4.271753787994385, |
|
"learning_rate": 4.144633434536467e-05, |
|
"loss": 2.0074, |
|
"num_input_tokens_seen": 2462400, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 1.6588124410933083, |
|
"grad_norm": 4.415102958679199, |
|
"learning_rate": 4.1069690242163484e-05, |
|
"loss": 2.0844, |
|
"num_input_tokens_seen": 2518304, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.6965127238454287, |
|
"grad_norm": 4.525811672210693, |
|
"learning_rate": 4.06867251845213e-05, |
|
"loss": 2.0943, |
|
"num_input_tokens_seen": 2583168, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.7342130065975496, |
|
"grad_norm": 4.545309543609619, |
|
"learning_rate": 4.0297589810356165e-05, |
|
"loss": 2.0858, |
|
"num_input_tokens_seen": 2643552, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.77191328934967, |
|
"grad_norm": 4.468845844268799, |
|
"learning_rate": 3.9902437184657784e-05, |
|
"loss": 2.044, |
|
"num_input_tokens_seen": 2705600, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 1.8096135721017907, |
|
"grad_norm": 4.4386138916015625, |
|
"learning_rate": 3.9501422739279956e-05, |
|
"loss": 2.1083, |
|
"num_input_tokens_seen": 2765856, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.8473138548539114, |
|
"grad_norm": 3.7564873695373535, |
|
"learning_rate": 3.909470421180201e-05, |
|
"loss": 2.0431, |
|
"num_input_tokens_seen": 2824192, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 1.885014137606032, |
|
"grad_norm": 4.3798418045043945, |
|
"learning_rate": 3.8682441583483314e-05, |
|
"loss": 2.0978, |
|
"num_input_tokens_seen": 2880208, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.9227144203581528, |
|
"grad_norm": 4.044529914855957, |
|
"learning_rate": 3.8264797016335205e-05, |
|
"loss": 2.096, |
|
"num_input_tokens_seen": 2939856, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 1.9604147031102732, |
|
"grad_norm": 4.19630241394043, |
|
"learning_rate": 3.7841934789335164e-05, |
|
"loss": 1.9965, |
|
"num_input_tokens_seen": 2995552, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.998114985862394, |
|
"grad_norm": 4.908398151397705, |
|
"learning_rate": 3.741402123380828e-05, |
|
"loss": 2.1569, |
|
"num_input_tokens_seen": 3055504, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 2.0358152686145146, |
|
"grad_norm": 4.20371150970459, |
|
"learning_rate": 3.6981224668001424e-05, |
|
"loss": 1.7904, |
|
"num_input_tokens_seen": 3110576, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.0735155513666355, |
|
"grad_norm": 5.414203643798828, |
|
"learning_rate": 3.654371533087586e-05, |
|
"loss": 1.7416, |
|
"num_input_tokens_seen": 3171440, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.111215834118756, |
|
"grad_norm": 5.226822853088379, |
|
"learning_rate": 3.610166531514436e-05, |
|
"loss": 1.7513, |
|
"num_input_tokens_seen": 3219792, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.1489161168708764, |
|
"grad_norm": 5.410066604614258, |
|
"learning_rate": 3.565524849957921e-05, |
|
"loss": 1.7565, |
|
"num_input_tokens_seen": 3275792, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 2.1866163996229973, |
|
"grad_norm": 5.878467082977295, |
|
"learning_rate": 3.520464048061758e-05, |
|
"loss": 1.7417, |
|
"num_input_tokens_seen": 3333136, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.2243166823751177, |
|
"grad_norm": 5.918631553649902, |
|
"learning_rate": 3.47500185032913e-05, |
|
"loss": 1.7726, |
|
"num_input_tokens_seen": 3390080, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 2.2620169651272386, |
|
"grad_norm": 6.060715675354004, |
|
"learning_rate": 3.4291561391508185e-05, |
|
"loss": 1.7763, |
|
"num_input_tokens_seen": 3446208, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.299717247879359, |
|
"grad_norm": 6.188582897186279, |
|
"learning_rate": 3.3829449477712324e-05, |
|
"loss": 1.7327, |
|
"num_input_tokens_seen": 3508240, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 2.3374175306314795, |
|
"grad_norm": 6.416080951690674, |
|
"learning_rate": 3.336386453195088e-05, |
|
"loss": 1.7441, |
|
"num_input_tokens_seen": 3569856, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.3751178133836004, |
|
"grad_norm": 6.098644733428955, |
|
"learning_rate": 3.2894989690375626e-05, |
|
"loss": 1.7355, |
|
"num_input_tokens_seen": 3631984, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 2.412818096135721, |
|
"grad_norm": 7.12858772277832, |
|
"learning_rate": 3.2423009383206876e-05, |
|
"loss": 1.7182, |
|
"num_input_tokens_seen": 3688192, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.4505183788878417, |
|
"grad_norm": 6.329550266265869, |
|
"learning_rate": 3.194810926218861e-05, |
|
"loss": 1.7332, |
|
"num_input_tokens_seen": 3746720, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 2.488218661639962, |
|
"grad_norm": 6.534445762634277, |
|
"learning_rate": 3.147047612756302e-05, |
|
"loss": 1.6908, |
|
"num_input_tokens_seen": 3802432, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.525918944392083, |
|
"grad_norm": 6.304942607879639, |
|
"learning_rate": 3.099029785459328e-05, |
|
"loss": 1.7756, |
|
"num_input_tokens_seen": 3856800, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 2.5636192271442035, |
|
"grad_norm": 6.389725208282471, |
|
"learning_rate": 3.0507763319663517e-05, |
|
"loss": 1.7737, |
|
"num_input_tokens_seen": 3916320, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.6013195098963244, |
|
"grad_norm": 6.7536940574646, |
|
"learning_rate": 3.002306232598497e-05, |
|
"loss": 1.7446, |
|
"num_input_tokens_seen": 3974864, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 2.639019792648445, |
|
"grad_norm": 6.574880599975586, |
|
"learning_rate": 2.9536385528937567e-05, |
|
"loss": 1.7283, |
|
"num_input_tokens_seen": 4032656, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.6767200754005653, |
|
"grad_norm": 5.760074138641357, |
|
"learning_rate": 2.9047924361076345e-05, |
|
"loss": 1.7573, |
|
"num_input_tokens_seen": 4092800, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 2.7144203581526862, |
|
"grad_norm": 6.087067604064941, |
|
"learning_rate": 2.8557870956832132e-05, |
|
"loss": 1.6758, |
|
"num_input_tokens_seen": 4144592, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.7521206409048067, |
|
"grad_norm": 7.485050201416016, |
|
"learning_rate": 2.8066418076936167e-05, |
|
"loss": 1.7087, |
|
"num_input_tokens_seen": 4200000, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 2.7898209236569276, |
|
"grad_norm": 7.791069984436035, |
|
"learning_rate": 2.7573759032598366e-05, |
|
"loss": 1.7527, |
|
"num_input_tokens_seen": 4262288, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.827521206409048, |
|
"grad_norm": 6.225643634796143, |
|
"learning_rate": 2.7080087609469062e-05, |
|
"loss": 1.786, |
|
"num_input_tokens_seen": 4317040, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.8652214891611685, |
|
"grad_norm": 5.990758895874023, |
|
"learning_rate": 2.6585597991414114e-05, |
|
"loss": 1.677, |
|
"num_input_tokens_seen": 4376576, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 2.9029217719132894, |
|
"grad_norm": 6.876400470733643, |
|
"learning_rate": 2.6090484684133404e-05, |
|
"loss": 1.7027, |
|
"num_input_tokens_seen": 4429024, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 2.9406220546654103, |
|
"grad_norm": 6.724515914916992, |
|
"learning_rate": 2.5594942438652688e-05, |
|
"loss": 1.7717, |
|
"num_input_tokens_seen": 4494400, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.9783223374175307, |
|
"grad_norm": 7.128418922424316, |
|
"learning_rate": 2.509916617471903e-05, |
|
"loss": 1.8441, |
|
"num_input_tokens_seen": 4554576, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 3.016022620169651, |
|
"grad_norm": 5.73885440826416, |
|
"learning_rate": 2.46033509041298e-05, |
|
"loss": 1.604, |
|
"num_input_tokens_seen": 4612352, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.053722902921772, |
|
"grad_norm": 6.931432247161865, |
|
"learning_rate": 2.410769165402549e-05, |
|
"loss": 1.4357, |
|
"num_input_tokens_seen": 4674960, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 3.0914231856738925, |
|
"grad_norm": 10.487885475158691, |
|
"learning_rate": 2.3612383390176503e-05, |
|
"loss": 1.4401, |
|
"num_input_tokens_seen": 4732736, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 3.1291234684260134, |
|
"grad_norm": 7.611717700958252, |
|
"learning_rate": 2.3117620940294048e-05, |
|
"loss": 1.4433, |
|
"num_input_tokens_seen": 4795856, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 3.166823751178134, |
|
"grad_norm": 7.033103942871094, |
|
"learning_rate": 2.2623598917395438e-05, |
|
"loss": 1.4739, |
|
"num_input_tokens_seen": 4854976, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 3.2045240339302543, |
|
"grad_norm": 7.760231971740723, |
|
"learning_rate": 2.213051164325366e-05, |
|
"loss": 1.4327, |
|
"num_input_tokens_seen": 4906032, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 3.242224316682375, |
|
"grad_norm": 9.03538990020752, |
|
"learning_rate": 2.1638553071961708e-05, |
|
"loss": 1.4281, |
|
"num_input_tokens_seen": 4954352, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 3.2799245994344957, |
|
"grad_norm": 8.354065895080566, |
|
"learning_rate": 2.1147916713641367e-05, |
|
"loss": 1.4006, |
|
"num_input_tokens_seen": 5012208, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 3.3176248821866166, |
|
"grad_norm": 8.750741004943848, |
|
"learning_rate": 2.0658795558326743e-05, |
|
"loss": 1.4065, |
|
"num_input_tokens_seen": 5067616, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 3.355325164938737, |
|
"grad_norm": 8.950455665588379, |
|
"learning_rate": 2.017138200005236e-05, |
|
"loss": 1.4158, |
|
"num_input_tokens_seen": 5131488, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 3.3930254476908575, |
|
"grad_norm": 9.109482765197754, |
|
"learning_rate": 1.9685867761175584e-05, |
|
"loss": 1.5139, |
|
"num_input_tokens_seen": 5189216, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.4307257304429783, |
|
"grad_norm": 8.112481117248535, |
|
"learning_rate": 1.9202443816963425e-05, |
|
"loss": 1.4692, |
|
"num_input_tokens_seen": 5246032, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 3.468426013195099, |
|
"grad_norm": 9.428106307983398, |
|
"learning_rate": 1.872130032047302e-05, |
|
"loss": 1.3873, |
|
"num_input_tokens_seen": 5309648, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 3.5061262959472197, |
|
"grad_norm": 8.657079696655273, |
|
"learning_rate": 1.824262652775568e-05, |
|
"loss": 1.5167, |
|
"num_input_tokens_seen": 5371392, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 3.54382657869934, |
|
"grad_norm": 8.85914134979248, |
|
"learning_rate": 1.7766610723413684e-05, |
|
"loss": 1.4093, |
|
"num_input_tokens_seen": 5430528, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 3.581526861451461, |
|
"grad_norm": 8.869074821472168, |
|
"learning_rate": 1.7293440146539196e-05, |
|
"loss": 1.497, |
|
"num_input_tokens_seen": 5486048, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 3.6192271442035815, |
|
"grad_norm": 7.58018159866333, |
|
"learning_rate": 1.682330091706446e-05, |
|
"loss": 1.5024, |
|
"num_input_tokens_seen": 5551520, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 3.6569274269557024, |
|
"grad_norm": 9.567265510559082, |
|
"learning_rate": 1.6356377962552238e-05, |
|
"loss": 1.3976, |
|
"num_input_tokens_seen": 5607968, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 3.694627709707823, |
|
"grad_norm": 10.099461555480957, |
|
"learning_rate": 1.589285494545514e-05, |
|
"loss": 1.4343, |
|
"num_input_tokens_seen": 5666240, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 3.7323279924599433, |
|
"grad_norm": 8.967694282531738, |
|
"learning_rate": 1.5432914190872757e-05, |
|
"loss": 1.4647, |
|
"num_input_tokens_seen": 5724272, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 3.770028275212064, |
|
"grad_norm": 8.292475700378418, |
|
"learning_rate": 1.4976736614834664e-05, |
|
"loss": 1.3874, |
|
"num_input_tokens_seen": 5782144, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.8077285579641846, |
|
"grad_norm": 9.741270065307617, |
|
"learning_rate": 1.4524501653137787e-05, |
|
"loss": 1.4405, |
|
"num_input_tokens_seen": 5835968, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 3.8454288407163055, |
|
"grad_norm": 9.290925025939941, |
|
"learning_rate": 1.4076387190766017e-05, |
|
"loss": 1.4689, |
|
"num_input_tokens_seen": 5894032, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 3.883129123468426, |
|
"grad_norm": 9.894857406616211, |
|
"learning_rate": 1.363256949191972e-05, |
|
"loss": 1.3515, |
|
"num_input_tokens_seen": 5948928, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 3.9208294062205464, |
|
"grad_norm": 9.370462417602539, |
|
"learning_rate": 1.3193223130682936e-05, |
|
"loss": 1.4372, |
|
"num_input_tokens_seen": 6004016, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 3.9585296889726673, |
|
"grad_norm": 8.76706314086914, |
|
"learning_rate": 1.2758520922355226e-05, |
|
"loss": 1.4051, |
|
"num_input_tokens_seen": 6054192, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 3.9962299717247878, |
|
"grad_norm": 9.206894874572754, |
|
"learning_rate": 1.2328633855475429e-05, |
|
"loss": 1.4304, |
|
"num_input_tokens_seen": 6111840, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 4.033930254476909, |
|
"grad_norm": 8.561943054199219, |
|
"learning_rate": 1.1903731024563966e-05, |
|
"loss": 1.1746, |
|
"num_input_tokens_seen": 6169584, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 4.071630537229029, |
|
"grad_norm": 9.77757453918457, |
|
"learning_rate": 1.148397956361007e-05, |
|
"loss": 1.172, |
|
"num_input_tokens_seen": 6232800, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 4.10933081998115, |
|
"grad_norm": 11.026362419128418, |
|
"learning_rate": 1.106954458033026e-05, |
|
"loss": 1.1709, |
|
"num_input_tokens_seen": 6294496, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 4.147031102733271, |
|
"grad_norm": 9.075161933898926, |
|
"learning_rate": 1.0660589091223855e-05, |
|
"loss": 1.247, |
|
"num_input_tokens_seen": 6348976, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 4.184731385485391, |
|
"grad_norm": 10.577301025390625, |
|
"learning_rate": 1.025727395745095e-05, |
|
"loss": 1.2236, |
|
"num_input_tokens_seen": 6402624, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 4.222431668237512, |
|
"grad_norm": 10.635506629943848, |
|
"learning_rate": 9.859757821558337e-06, |
|
"loss": 1.1751, |
|
"num_input_tokens_seen": 6462992, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 4.260131950989632, |
|
"grad_norm": 10.660634994506836, |
|
"learning_rate": 9.468197045077976e-06, |
|
"loss": 1.1056, |
|
"num_input_tokens_seen": 6514944, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 4.297832233741753, |
|
"grad_norm": 11.758869171142578, |
|
"learning_rate": 9.082745647022797e-06, |
|
"loss": 1.1612, |
|
"num_input_tokens_seen": 6577440, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 4.335532516493874, |
|
"grad_norm": 10.878021240234375, |
|
"learning_rate": 8.703555243303835e-06, |
|
"loss": 1.146, |
|
"num_input_tokens_seen": 6631296, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 4.3732327992459945, |
|
"grad_norm": 11.381221771240234, |
|
"learning_rate": 8.330774987092712e-06, |
|
"loss": 1.1462, |
|
"num_input_tokens_seen": 6686784, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 4.410933081998115, |
|
"grad_norm": 10.535365104675293, |
|
"learning_rate": 7.96455151015272e-06, |
|
"loss": 1.2129, |
|
"num_input_tokens_seen": 6743712, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 4.448633364750235, |
|
"grad_norm": 10.050437927246094, |
|
"learning_rate": 7.605028865161809e-06, |
|
"loss": 1.2495, |
|
"num_input_tokens_seen": 6802416, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 4.486333647502356, |
|
"grad_norm": 10.320448875427246, |
|
"learning_rate": 7.25234846904993e-06, |
|
"loss": 1.1615, |
|
"num_input_tokens_seen": 6860880, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 4.524033930254477, |
|
"grad_norm": 10.299652099609375, |
|
"learning_rate": 6.906649047373246e-06, |
|
"loss": 1.2619, |
|
"num_input_tokens_seen": 6921424, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 4.561734213006598, |
|
"grad_norm": 11.873618125915527, |
|
"learning_rate": 6.568066579746901e-06, |
|
"loss": 1.1949, |
|
"num_input_tokens_seen": 6973504, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 4.599434495758718, |
|
"grad_norm": 11.657974243164062, |
|
"learning_rate": 6.2367342463579475e-06, |
|
"loss": 1.1891, |
|
"num_input_tokens_seen": 7027984, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 4.6371347785108386, |
|
"grad_norm": 10.802141189575195, |
|
"learning_rate": 5.912782375579412e-06, |
|
"loss": 1.1821, |
|
"num_input_tokens_seen": 7082720, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 4.674835061262959, |
|
"grad_norm": 11.281312942504883, |
|
"learning_rate": 5.596338392706077e-06, |
|
"loss": 1.2011, |
|
"num_input_tokens_seen": 7139760, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 4.71253534401508, |
|
"grad_norm": 9.546773910522461, |
|
"learning_rate": 5.2875267698322325e-06, |
|
"loss": 1.2479, |
|
"num_input_tokens_seen": 7196832, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 4.750235626767201, |
|
"grad_norm": 10.804587364196777, |
|
"learning_rate": 4.986468976890993e-06, |
|
"loss": 1.1982, |
|
"num_input_tokens_seen": 7255552, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 4.787935909519321, |
|
"grad_norm": 11.141725540161133, |
|
"learning_rate": 4.693283433874565e-06, |
|
"loss": 1.1955, |
|
"num_input_tokens_seen": 7310144, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 4.825636192271442, |
|
"grad_norm": 11.219179153442383, |
|
"learning_rate": 4.408085464254183e-06, |
|
"loss": 1.2429, |
|
"num_input_tokens_seen": 7363600, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 4.863336475023563, |
|
"grad_norm": 9.999290466308594, |
|
"learning_rate": 4.130987249617993e-06, |
|
"loss": 1.2139, |
|
"num_input_tokens_seen": 7425056, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 4.9010367577756835, |
|
"grad_norm": 10.23133373260498, |
|
"learning_rate": 3.8620977855448935e-06, |
|
"loss": 1.2122, |
|
"num_input_tokens_seen": 7488432, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 4.938737040527804, |
|
"grad_norm": 10.697420120239258, |
|
"learning_rate": 3.601522838731461e-06, |
|
"loss": 1.1634, |
|
"num_input_tokens_seen": 7549248, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 4.976437323279924, |
|
"grad_norm": 11.005745887756348, |
|
"learning_rate": 3.3493649053890326e-06, |
|
"loss": 1.2289, |
|
"num_input_tokens_seen": 7607168, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 5.014137606032045, |
|
"grad_norm": 10.609835624694824, |
|
"learning_rate": 3.1057231709272077e-06, |
|
"loss": 1.1895, |
|
"num_input_tokens_seen": 7666560, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 5.051837888784166, |
|
"grad_norm": 10.383188247680664, |
|
"learning_rate": 2.8706934709395892e-06, |
|
"loss": 1.0644, |
|
"num_input_tokens_seen": 7722432, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 5.089538171536287, |
|
"grad_norm": 11.61797046661377, |
|
"learning_rate": 2.6443682535072177e-06, |
|
"loss": 0.9969, |
|
"num_input_tokens_seen": 7775296, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 5.127238454288407, |
|
"grad_norm": 10.594493865966797, |
|
"learning_rate": 2.4268365428344736e-06, |
|
"loss": 1.0552, |
|
"num_input_tokens_seen": 7834752, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 5.1649387370405275, |
|
"grad_norm": 13.08285140991211, |
|
"learning_rate": 2.21818390423168e-06, |
|
"loss": 0.9982, |
|
"num_input_tokens_seen": 7885680, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 5.202639019792649, |
|
"grad_norm": 11.088661193847656, |
|
"learning_rate": 2.0184924104583613e-06, |
|
"loss": 1.041, |
|
"num_input_tokens_seen": 7936032, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 5.240339302544769, |
|
"grad_norm": 11.851099967956543, |
|
"learning_rate": 1.8278406094401623e-06, |
|
"loss": 1.0186, |
|
"num_input_tokens_seen": 7988256, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 5.27803958529689, |
|
"grad_norm": 11.964996337890625, |
|
"learning_rate": 1.6463034933723337e-06, |
|
"loss": 1.0888, |
|
"num_input_tokens_seen": 8043264, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 5.31573986804901, |
|
"grad_norm": 10.263463973999023, |
|
"learning_rate": 1.4739524692218314e-06, |
|
"loss": 1.0506, |
|
"num_input_tokens_seen": 8096480, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 5.353440150801131, |
|
"grad_norm": 10.614130020141602, |
|
"learning_rate": 1.3108553306396265e-06, |
|
"loss": 1.1435, |
|
"num_input_tokens_seen": 8156224, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 5.391140433553252, |
|
"grad_norm": 12.183756828308105, |
|
"learning_rate": 1.1570762312943295e-06, |
|
"loss": 1.0799, |
|
"num_input_tokens_seen": 8216272, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 5.4288407163053725, |
|
"grad_norm": 11.565719604492188, |
|
"learning_rate": 1.0126756596375686e-06, |
|
"loss": 1.0618, |
|
"num_input_tokens_seen": 8270896, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 5.466540999057493, |
|
"grad_norm": 10.969958305358887, |
|
"learning_rate": 8.777104151110826e-07, |
|
"loss": 1.0932, |
|
"num_input_tokens_seen": 8337264, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 5.504241281809613, |
|
"grad_norm": 12.142751693725586, |
|
"learning_rate": 7.522335858048707e-07, |
|
"loss": 1.064, |
|
"num_input_tokens_seen": 8401616, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 5.541941564561734, |
|
"grad_norm": 12.45744800567627, |
|
"learning_rate": 6.362945275751736e-07, |
|
"loss": 1.0762, |
|
"num_input_tokens_seen": 8456976, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 5.579641847313855, |
|
"grad_norm": 12.17725944519043, |
|
"learning_rate": 5.299388446305343e-07, |
|
"loss": 1.061, |
|
"num_input_tokens_seen": 8513616, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 5.617342130065976, |
|
"grad_norm": 10.246589660644531, |
|
"learning_rate": 4.3320837159353813e-07, |
|
"loss": 1.0795, |
|
"num_input_tokens_seen": 8575136, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 5.655042412818096, |
|
"grad_norm": 10.445602416992188, |
|
"learning_rate": 3.4614115704533767e-07, |
|
"loss": 1.0228, |
|
"num_input_tokens_seen": 8637744, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 5.6927426955702165, |
|
"grad_norm": 11.284125328063965, |
|
"learning_rate": 2.687714485593462e-07, |
|
"loss": 1.0506, |
|
"num_input_tokens_seen": 8693904, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 5.730442978322337, |
|
"grad_norm": 12.569458961486816, |
|
"learning_rate": 2.011296792301165e-07, |
|
"loss": 1.1141, |
|
"num_input_tokens_seen": 8756224, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 5.768143261074458, |
|
"grad_norm": 10.610177040100098, |
|
"learning_rate": 1.4324245570256633e-07, |
|
"loss": 1.1412, |
|
"num_input_tokens_seen": 8813120, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 5.805843543826579, |
|
"grad_norm": 11.221631050109863, |
|
"learning_rate": 9.513254770636137e-08, |
|
"loss": 1.0902, |
|
"num_input_tokens_seen": 8866160, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 5.843543826578699, |
|
"grad_norm": 11.644889831542969, |
|
"learning_rate": 5.681887909952388e-08, |
|
"loss": 1.1053, |
|
"num_input_tokens_seen": 8925888, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 5.88124410933082, |
|
"grad_norm": 12.60010814666748, |
|
"learning_rate": 2.831652042480093e-08, |
|
"loss": 1.0414, |
|
"num_input_tokens_seen": 8983424, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 5.918944392082941, |
|
"grad_norm": 11.038966178894043, |
|
"learning_rate": 9.636682981720158e-09, |
|
"loss": 1.1207, |
|
"num_input_tokens_seen": 9042928, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 5.956644674835061, |
|
"grad_norm": 11.58209228515625, |
|
"learning_rate": 7.867144166728846e-10, |
|
"loss": 1.0952, |
|
"num_input_tokens_seen": 9103184, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 5.971724787935909, |
|
"num_input_tokens_seen": 9124912, |
|
"step": 792, |
|
"total_flos": 3.904530641814815e+17, |
|
"train_loss": 1.665087498799719, |
|
"train_runtime": 5809.5562, |
|
"train_samples_per_second": 4.383, |
|
"train_steps_per_second": 0.136 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 792, |
|
"num_input_tokens_seen": 9124912, |
|
"num_train_epochs": 6, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.904530641814815e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|