|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 9.99964924587864, |
|
"global_step": 14250, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.982394366197183e-06, |
|
"loss": 1.0614, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_accuracy": 0.4345000088214874, |
|
"eval_loss": 1.019625186920166, |
|
"eval_runtime": 18.8668, |
|
"eval_samples_per_second": 106.006, |
|
"eval_steps_per_second": 6.625, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.94718309859155e-06, |
|
"loss": 0.8601, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"eval_accuracy": 0.6460000276565552, |
|
"eval_loss": 0.756058931350708, |
|
"eval_runtime": 19.8113, |
|
"eval_samples_per_second": 100.953, |
|
"eval_steps_per_second": 6.31, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.911971830985916e-06, |
|
"loss": 0.734, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"eval_accuracy": 0.6955000162124634, |
|
"eval_loss": 0.6796478629112244, |
|
"eval_runtime": 19.6375, |
|
"eval_samples_per_second": 101.846, |
|
"eval_steps_per_second": 6.365, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 4.876760563380282e-06, |
|
"loss": 0.6753, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"eval_accuracy": 0.699999988079071, |
|
"eval_loss": 0.6520820260047913, |
|
"eval_runtime": 18.2758, |
|
"eval_samples_per_second": 109.434, |
|
"eval_steps_per_second": 6.84, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 4.841549295774649e-06, |
|
"loss": 0.6408, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"eval_accuracy": 0.7440000176429749, |
|
"eval_loss": 0.6119081974029541, |
|
"eval_runtime": 17.8217, |
|
"eval_samples_per_second": 112.223, |
|
"eval_steps_per_second": 7.014, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 4.806338028169015e-06, |
|
"loss": 0.5991, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"eval_accuracy": 0.7369999885559082, |
|
"eval_loss": 0.6033942699432373, |
|
"eval_runtime": 17.3933, |
|
"eval_samples_per_second": 114.986, |
|
"eval_steps_per_second": 7.187, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 4.771126760563381e-06, |
|
"loss": 0.6069, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_accuracy": 0.737500011920929, |
|
"eval_loss": 0.597550630569458, |
|
"eval_runtime": 20.251, |
|
"eval_samples_per_second": 98.76, |
|
"eval_steps_per_second": 6.173, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 4.735915492957747e-06, |
|
"loss": 0.6122, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"eval_accuracy": 0.7425000071525574, |
|
"eval_loss": 0.5870603322982788, |
|
"eval_runtime": 21.2152, |
|
"eval_samples_per_second": 94.272, |
|
"eval_steps_per_second": 5.892, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 4.7007042253521126e-06, |
|
"loss": 0.5908, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"eval_accuracy": 0.7444999814033508, |
|
"eval_loss": 0.5935022234916687, |
|
"eval_runtime": 22.0881, |
|
"eval_samples_per_second": 90.546, |
|
"eval_steps_per_second": 5.659, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.665492957746479e-06, |
|
"loss": 0.5884, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"eval_accuracy": 0.7519999742507935, |
|
"eval_loss": 0.5792337656021118, |
|
"eval_runtime": 17.6606, |
|
"eval_samples_per_second": 113.246, |
|
"eval_steps_per_second": 7.078, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.630281690140845e-06, |
|
"loss": 0.5839, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"eval_accuracy": 0.7555000185966492, |
|
"eval_loss": 0.578044056892395, |
|
"eval_runtime": 15.9931, |
|
"eval_samples_per_second": 125.054, |
|
"eval_steps_per_second": 7.816, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 4.595070422535211e-06, |
|
"loss": 0.5772, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"eval_accuracy": 0.7570000290870667, |
|
"eval_loss": 0.5727072954177856, |
|
"eval_runtime": 21.5937, |
|
"eval_samples_per_second": 92.62, |
|
"eval_steps_per_second": 5.789, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 4.559859154929578e-06, |
|
"loss": 0.5895, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"eval_accuracy": 0.7549999952316284, |
|
"eval_loss": 0.5601378679275513, |
|
"eval_runtime": 20.1543, |
|
"eval_samples_per_second": 99.234, |
|
"eval_steps_per_second": 6.202, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 4.524647887323944e-06, |
|
"loss": 0.5757, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_accuracy": 0.7524999976158142, |
|
"eval_loss": 0.561326801776886, |
|
"eval_runtime": 15.0299, |
|
"eval_samples_per_second": 133.068, |
|
"eval_steps_per_second": 8.317, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 4.489436619718311e-06, |
|
"loss": 0.5121, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"eval_accuracy": 0.7599999904632568, |
|
"eval_loss": 0.5866703987121582, |
|
"eval_runtime": 18.2735, |
|
"eval_samples_per_second": 109.448, |
|
"eval_steps_per_second": 6.841, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 4.454225352112677e-06, |
|
"loss": 0.5254, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"eval_accuracy": 0.7630000114440918, |
|
"eval_loss": 0.5595362186431885, |
|
"eval_runtime": 21.9878, |
|
"eval_samples_per_second": 90.959, |
|
"eval_steps_per_second": 5.685, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 4.419014084507043e-06, |
|
"loss": 0.5074, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"eval_accuracy": 0.7584999799728394, |
|
"eval_loss": 0.559354841709137, |
|
"eval_runtime": 19.9155, |
|
"eval_samples_per_second": 100.424, |
|
"eval_steps_per_second": 6.277, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 4.383802816901409e-06, |
|
"loss": 0.4947, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"eval_accuracy": 0.7574999928474426, |
|
"eval_loss": 0.5696709156036377, |
|
"eval_runtime": 22.8735, |
|
"eval_samples_per_second": 87.438, |
|
"eval_steps_per_second": 5.465, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 4.3485915492957745e-06, |
|
"loss": 0.5019, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"eval_accuracy": 0.7580000162124634, |
|
"eval_loss": 0.5664528608322144, |
|
"eval_runtime": 22.2002, |
|
"eval_samples_per_second": 90.089, |
|
"eval_steps_per_second": 5.631, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 4.313380281690141e-06, |
|
"loss": 0.5005, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"eval_accuracy": 0.765500009059906, |
|
"eval_loss": 0.5484071969985962, |
|
"eval_runtime": 21.9941, |
|
"eval_samples_per_second": 90.934, |
|
"eval_steps_per_second": 5.683, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 4.278169014084507e-06, |
|
"loss": 0.5125, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"eval_accuracy": 0.7605000138282776, |
|
"eval_loss": 0.5626400709152222, |
|
"eval_runtime": 17.7868, |
|
"eval_samples_per_second": 112.443, |
|
"eval_steps_per_second": 7.028, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 4.242957746478873e-06, |
|
"loss": 0.5241, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"eval_accuracy": 0.7559999823570251, |
|
"eval_loss": 0.556066632270813, |
|
"eval_runtime": 18.9121, |
|
"eval_samples_per_second": 105.753, |
|
"eval_steps_per_second": 6.61, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 4.20774647887324e-06, |
|
"loss": 0.5198, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"eval_accuracy": 0.7599999904632568, |
|
"eval_loss": 0.560243546962738, |
|
"eval_runtime": 20.0908, |
|
"eval_samples_per_second": 99.548, |
|
"eval_steps_per_second": 6.222, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 4.172535211267606e-06, |
|
"loss": 0.5124, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"eval_accuracy": 0.7490000128746033, |
|
"eval_loss": 0.5654177665710449, |
|
"eval_runtime": 19.7883, |
|
"eval_samples_per_second": 101.07, |
|
"eval_steps_per_second": 6.317, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 4.137323943661972e-06, |
|
"loss": 0.5096, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_accuracy": 0.7515000104904175, |
|
"eval_loss": 0.5803455710411072, |
|
"eval_runtime": 22.0507, |
|
"eval_samples_per_second": 90.7, |
|
"eval_steps_per_second": 5.669, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 4.102112676056339e-06, |
|
"loss": 0.4885, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"eval_accuracy": 0.75, |
|
"eval_loss": 0.5889333486557007, |
|
"eval_runtime": 21.1933, |
|
"eval_samples_per_second": 94.369, |
|
"eval_steps_per_second": 5.898, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 4.0669014084507045e-06, |
|
"loss": 0.5111, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"eval_accuracy": 0.7664999961853027, |
|
"eval_loss": 0.5507832169532776, |
|
"eval_runtime": 20.2761, |
|
"eval_samples_per_second": 98.638, |
|
"eval_steps_per_second": 6.165, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 4.031690140845071e-06, |
|
"loss": 0.4868, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"eval_accuracy": 0.7634999752044678, |
|
"eval_loss": 0.5621495842933655, |
|
"eval_runtime": 18.2341, |
|
"eval_samples_per_second": 109.685, |
|
"eval_steps_per_second": 6.855, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 3.996478873239437e-06, |
|
"loss": 0.4599, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"eval_accuracy": 0.7615000009536743, |
|
"eval_loss": 0.5994852185249329, |
|
"eval_runtime": 17.787, |
|
"eval_samples_per_second": 112.442, |
|
"eval_steps_per_second": 7.028, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 3.961267605633803e-06, |
|
"loss": 0.4147, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"eval_accuracy": 0.753000020980835, |
|
"eval_loss": 0.6202083230018616, |
|
"eval_runtime": 20.5139, |
|
"eval_samples_per_second": 97.495, |
|
"eval_steps_per_second": 6.093, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 3.926056338028169e-06, |
|
"loss": 0.4233, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"eval_accuracy": 0.762499988079071, |
|
"eval_loss": 0.5875486135482788, |
|
"eval_runtime": 19.4349, |
|
"eval_samples_per_second": 102.908, |
|
"eval_steps_per_second": 6.432, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 3.890845070422535e-06, |
|
"loss": 0.4324, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"eval_accuracy": 0.7609999775886536, |
|
"eval_loss": 0.5794370174407959, |
|
"eval_runtime": 18.3807, |
|
"eval_samples_per_second": 108.81, |
|
"eval_steps_per_second": 6.801, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 3.855633802816902e-06, |
|
"loss": 0.4141, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"eval_accuracy": 0.7459999918937683, |
|
"eval_loss": 0.5901930928230286, |
|
"eval_runtime": 20.1122, |
|
"eval_samples_per_second": 99.442, |
|
"eval_steps_per_second": 6.215, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 3.820422535211268e-06, |
|
"loss": 0.4306, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"eval_accuracy": 0.7544999718666077, |
|
"eval_loss": 0.6053192019462585, |
|
"eval_runtime": 20.0091, |
|
"eval_samples_per_second": 99.955, |
|
"eval_steps_per_second": 6.247, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 3.785211267605634e-06, |
|
"loss": 0.4266, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"eval_accuracy": 0.7570000290870667, |
|
"eval_loss": 0.5978769659996033, |
|
"eval_runtime": 19.8772, |
|
"eval_samples_per_second": 100.618, |
|
"eval_steps_per_second": 6.289, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 3.7500000000000005e-06, |
|
"loss": 0.4227, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"eval_accuracy": 0.7649999856948853, |
|
"eval_loss": 0.5919951796531677, |
|
"eval_runtime": 21.1736, |
|
"eval_samples_per_second": 94.457, |
|
"eval_steps_per_second": 5.904, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 3.7147887323943665e-06, |
|
"loss": 0.4226, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"eval_accuracy": 0.7455000281333923, |
|
"eval_loss": 0.6165611743927002, |
|
"eval_runtime": 20.5872, |
|
"eval_samples_per_second": 97.148, |
|
"eval_steps_per_second": 6.072, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 3.679577464788733e-06, |
|
"loss": 0.3978, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"eval_accuracy": 0.7559999823570251, |
|
"eval_loss": 0.6125866770744324, |
|
"eval_runtime": 22.1514, |
|
"eval_samples_per_second": 90.288, |
|
"eval_steps_per_second": 5.643, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 3.644366197183099e-06, |
|
"loss": 0.3954, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"eval_accuracy": 0.7549999952316284, |
|
"eval_loss": 0.615158200263977, |
|
"eval_runtime": 17.1074, |
|
"eval_samples_per_second": 116.908, |
|
"eval_steps_per_second": 7.307, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 3.609154929577465e-06, |
|
"loss": 0.4209, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"eval_accuracy": 0.75, |
|
"eval_loss": 0.597953736782074, |
|
"eval_runtime": 22.6397, |
|
"eval_samples_per_second": 88.34, |
|
"eval_steps_per_second": 5.521, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 3.5739436619718315e-06, |
|
"loss": 0.3982, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"eval_accuracy": 0.7490000128746033, |
|
"eval_loss": 0.6096097230911255, |
|
"eval_runtime": 22.0097, |
|
"eval_samples_per_second": 90.869, |
|
"eval_steps_per_second": 5.679, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 3.538732394366197e-06, |
|
"loss": 0.4016, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"eval_accuracy": 0.7425000071525574, |
|
"eval_loss": 0.6540722846984863, |
|
"eval_runtime": 16.1358, |
|
"eval_samples_per_second": 123.948, |
|
"eval_steps_per_second": 7.747, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 3.5035211267605634e-06, |
|
"loss": 0.3966, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"eval_accuracy": 0.7544999718666077, |
|
"eval_loss": 0.6377372145652771, |
|
"eval_runtime": 20.4003, |
|
"eval_samples_per_second": 98.038, |
|
"eval_steps_per_second": 6.127, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 3.4683098591549297e-06, |
|
"loss": 0.3074, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"eval_accuracy": 0.75, |
|
"eval_loss": 0.6859884262084961, |
|
"eval_runtime": 20.6366, |
|
"eval_samples_per_second": 96.915, |
|
"eval_steps_per_second": 6.057, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 3.433098591549296e-06, |
|
"loss": 0.3551, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"eval_accuracy": 0.7549999952316284, |
|
"eval_loss": 0.6160025596618652, |
|
"eval_runtime": 21.368, |
|
"eval_samples_per_second": 93.598, |
|
"eval_steps_per_second": 5.85, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 3.397887323943662e-06, |
|
"loss": 0.3323, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"eval_accuracy": 0.7519999742507935, |
|
"eval_loss": 0.6714155077934265, |
|
"eval_runtime": 21.1568, |
|
"eval_samples_per_second": 94.532, |
|
"eval_steps_per_second": 5.908, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 3.3626760563380284e-06, |
|
"loss": 0.3171, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"eval_accuracy": 0.7534999847412109, |
|
"eval_loss": 0.6537904739379883, |
|
"eval_runtime": 17.8954, |
|
"eval_samples_per_second": 111.76, |
|
"eval_steps_per_second": 6.985, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 3.3274647887323947e-06, |
|
"loss": 0.3403, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"eval_accuracy": 0.7465000152587891, |
|
"eval_loss": 0.677370548248291, |
|
"eval_runtime": 22.2854, |
|
"eval_samples_per_second": 89.745, |
|
"eval_steps_per_second": 5.609, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 3.292253521126761e-06, |
|
"loss": 0.3396, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"eval_accuracy": 0.7465000152587891, |
|
"eval_loss": 0.6725812554359436, |
|
"eval_runtime": 20.4072, |
|
"eval_samples_per_second": 98.005, |
|
"eval_steps_per_second": 6.125, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 3.257042253521127e-06, |
|
"loss": 0.3259, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"eval_accuracy": 0.7480000257492065, |
|
"eval_loss": 0.6465049982070923, |
|
"eval_runtime": 16.3957, |
|
"eval_samples_per_second": 121.983, |
|
"eval_steps_per_second": 7.624, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 3.2218309859154934e-06, |
|
"loss": 0.3392, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"eval_accuracy": 0.7459999918937683, |
|
"eval_loss": 0.6860352754592896, |
|
"eval_runtime": 19.1797, |
|
"eval_samples_per_second": 104.277, |
|
"eval_steps_per_second": 6.517, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 3.1866197183098598e-06, |
|
"loss": 0.3251, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"eval_accuracy": 0.7494999766349792, |
|
"eval_loss": 0.6696720123291016, |
|
"eval_runtime": 17.883, |
|
"eval_samples_per_second": 111.838, |
|
"eval_steps_per_second": 6.99, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 3.1514084507042257e-06, |
|
"loss": 0.3253, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"eval_accuracy": 0.7429999709129333, |
|
"eval_loss": 0.6769505739212036, |
|
"eval_runtime": 19.8341, |
|
"eval_samples_per_second": 100.836, |
|
"eval_steps_per_second": 6.302, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 3.1161971830985916e-06, |
|
"loss": 0.3455, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"eval_accuracy": 0.7360000014305115, |
|
"eval_loss": 0.7176979780197144, |
|
"eval_runtime": 17.9226, |
|
"eval_samples_per_second": 111.591, |
|
"eval_steps_per_second": 6.974, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 3.0809859154929576e-06, |
|
"loss": 0.3323, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"eval_accuracy": 0.7400000095367432, |
|
"eval_loss": 0.6943067908287048, |
|
"eval_runtime": 21.4155, |
|
"eval_samples_per_second": 93.39, |
|
"eval_steps_per_second": 5.837, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 3.045774647887324e-06, |
|
"loss": 0.3335, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"eval_accuracy": 0.7555000185966492, |
|
"eval_loss": 0.6506811380386353, |
|
"eval_runtime": 21.6099, |
|
"eval_samples_per_second": 92.55, |
|
"eval_steps_per_second": 5.784, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 3.0105633802816903e-06, |
|
"loss": 0.3368, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.7484999895095825, |
|
"eval_loss": 0.6580154895782471, |
|
"eval_runtime": 21.7515, |
|
"eval_samples_per_second": 91.948, |
|
"eval_steps_per_second": 5.747, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 2.9753521126760567e-06, |
|
"loss": 0.2479, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"eval_accuracy": 0.7429999709129333, |
|
"eval_loss": 0.7666531801223755, |
|
"eval_runtime": 15.994, |
|
"eval_samples_per_second": 125.047, |
|
"eval_steps_per_second": 7.815, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"learning_rate": 2.9401408450704226e-06, |
|
"loss": 0.2613, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 4.14, |
|
"eval_accuracy": 0.7505000233650208, |
|
"eval_loss": 0.751265823841095, |
|
"eval_runtime": 16.5258, |
|
"eval_samples_per_second": 121.023, |
|
"eval_steps_per_second": 7.564, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 2.904929577464789e-06, |
|
"loss": 0.2557, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"eval_accuracy": 0.7484999895095825, |
|
"eval_loss": 0.7926999926567078, |
|
"eval_runtime": 20.2799, |
|
"eval_samples_per_second": 98.62, |
|
"eval_steps_per_second": 6.164, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 2.8697183098591553e-06, |
|
"loss": 0.243, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"eval_accuracy": 0.7450000047683716, |
|
"eval_loss": 0.77916020154953, |
|
"eval_runtime": 19.6554, |
|
"eval_samples_per_second": 101.753, |
|
"eval_steps_per_second": 6.36, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"learning_rate": 2.8345070422535217e-06, |
|
"loss": 0.2473, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 4.35, |
|
"eval_accuracy": 0.7354999780654907, |
|
"eval_loss": 0.8106710314750671, |
|
"eval_runtime": 14.7909, |
|
"eval_samples_per_second": 135.218, |
|
"eval_steps_per_second": 8.451, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 2.7992957746478876e-06, |
|
"loss": 0.2447, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"eval_accuracy": 0.7369999885559082, |
|
"eval_loss": 0.7850819230079651, |
|
"eval_runtime": 15.145, |
|
"eval_samples_per_second": 132.057, |
|
"eval_steps_per_second": 8.254, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 2.764084507042254e-06, |
|
"loss": 0.2515, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"eval_accuracy": 0.7465000152587891, |
|
"eval_loss": 0.7529160380363464, |
|
"eval_runtime": 14.1294, |
|
"eval_samples_per_second": 141.549, |
|
"eval_steps_per_second": 8.847, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 2.7288732394366203e-06, |
|
"loss": 0.274, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"eval_accuracy": 0.7465000152587891, |
|
"eval_loss": 0.7389978170394897, |
|
"eval_runtime": 18.3936, |
|
"eval_samples_per_second": 108.733, |
|
"eval_steps_per_second": 6.796, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 2.693661971830986e-06, |
|
"loss": 0.2674, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"eval_accuracy": 0.7459999918937683, |
|
"eval_loss": 0.7657651305198669, |
|
"eval_runtime": 19.2395, |
|
"eval_samples_per_second": 103.953, |
|
"eval_steps_per_second": 6.497, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 2.6584507042253522e-06, |
|
"loss": 0.2416, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"eval_accuracy": 0.7484999895095825, |
|
"eval_loss": 0.7914510369300842, |
|
"eval_runtime": 17.7833, |
|
"eval_samples_per_second": 112.465, |
|
"eval_steps_per_second": 7.029, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 2.623239436619718e-06, |
|
"loss": 0.2432, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"eval_accuracy": 0.7434999942779541, |
|
"eval_loss": 0.7988595962524414, |
|
"eval_runtime": 16.8516, |
|
"eval_samples_per_second": 118.683, |
|
"eval_steps_per_second": 7.418, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 2.5880281690140845e-06, |
|
"loss": 0.2595, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"eval_accuracy": 0.7379999756813049, |
|
"eval_loss": 0.7850367426872253, |
|
"eval_runtime": 22.0351, |
|
"eval_samples_per_second": 90.764, |
|
"eval_steps_per_second": 5.673, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"learning_rate": 2.552816901408451e-06, |
|
"loss": 0.2736, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"eval_accuracy": 0.7394999861717224, |
|
"eval_loss": 0.7577053308486938, |
|
"eval_runtime": 22.4529, |
|
"eval_samples_per_second": 89.075, |
|
"eval_steps_per_second": 5.567, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 4.98, |
|
"learning_rate": 2.5176056338028172e-06, |
|
"loss": 0.2783, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 4.98, |
|
"eval_accuracy": 0.7404999732971191, |
|
"eval_loss": 0.7649760842323303, |
|
"eval_runtime": 18.1063, |
|
"eval_samples_per_second": 110.459, |
|
"eval_steps_per_second": 6.904, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 2.482394366197183e-06, |
|
"loss": 0.2304, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"eval_accuracy": 0.7384999990463257, |
|
"eval_loss": 0.8541684746742249, |
|
"eval_runtime": 20.2711, |
|
"eval_samples_per_second": 98.663, |
|
"eval_steps_per_second": 6.166, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"learning_rate": 2.4471830985915495e-06, |
|
"loss": 0.1937, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"eval_accuracy": 0.734499990940094, |
|
"eval_loss": 0.8389941453933716, |
|
"eval_runtime": 21.6926, |
|
"eval_samples_per_second": 92.197, |
|
"eval_steps_per_second": 5.762, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 5.19, |
|
"learning_rate": 2.411971830985916e-06, |
|
"loss": 0.1878, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 5.19, |
|
"eval_accuracy": 0.7329999804496765, |
|
"eval_loss": 0.9149684906005859, |
|
"eval_runtime": 19.4205, |
|
"eval_samples_per_second": 102.984, |
|
"eval_steps_per_second": 6.436, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 5.26, |
|
"learning_rate": 2.376760563380282e-06, |
|
"loss": 0.1921, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 5.26, |
|
"eval_accuracy": 0.7404999732971191, |
|
"eval_loss": 0.8792451024055481, |
|
"eval_runtime": 20.264, |
|
"eval_samples_per_second": 98.697, |
|
"eval_steps_per_second": 6.169, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"learning_rate": 2.341549295774648e-06, |
|
"loss": 0.1916, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"eval_accuracy": 0.7409999966621399, |
|
"eval_loss": 0.8891890645027161, |
|
"eval_runtime": 20.8102, |
|
"eval_samples_per_second": 96.107, |
|
"eval_steps_per_second": 6.007, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 5.4, |
|
"learning_rate": 2.306338028169014e-06, |
|
"loss": 0.2011, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 5.4, |
|
"eval_accuracy": 0.7325000166893005, |
|
"eval_loss": 0.9012252688407898, |
|
"eval_runtime": 21.4132, |
|
"eval_samples_per_second": 93.4, |
|
"eval_steps_per_second": 5.838, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 5.47, |
|
"learning_rate": 2.2711267605633805e-06, |
|
"loss": 0.211, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 5.47, |
|
"eval_accuracy": 0.7419999837875366, |
|
"eval_loss": 0.8607960343360901, |
|
"eval_runtime": 17.8954, |
|
"eval_samples_per_second": 111.761, |
|
"eval_steps_per_second": 6.985, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 5.54, |
|
"learning_rate": 2.235915492957747e-06, |
|
"loss": 0.2194, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 5.54, |
|
"eval_accuracy": 0.7319999933242798, |
|
"eval_loss": 0.8851566314697266, |
|
"eval_runtime": 12.4785, |
|
"eval_samples_per_second": 160.275, |
|
"eval_steps_per_second": 10.017, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 5.61, |
|
"learning_rate": 2.200704225352113e-06, |
|
"loss": 0.205, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 5.61, |
|
"eval_accuracy": 0.7384999990463257, |
|
"eval_loss": 0.8803377151489258, |
|
"eval_runtime": 17.7919, |
|
"eval_samples_per_second": 112.41, |
|
"eval_steps_per_second": 7.026, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 5.68, |
|
"learning_rate": 2.1654929577464787e-06, |
|
"loss": 0.1981, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 5.68, |
|
"eval_accuracy": 0.7329999804496765, |
|
"eval_loss": 0.86810302734375, |
|
"eval_runtime": 15.0816, |
|
"eval_samples_per_second": 132.612, |
|
"eval_steps_per_second": 8.288, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 5.75, |
|
"learning_rate": 2.130281690140845e-06, |
|
"loss": 0.1908, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 5.75, |
|
"eval_accuracy": 0.7434999942779541, |
|
"eval_loss": 0.9019960761070251, |
|
"eval_runtime": 16.0702, |
|
"eval_samples_per_second": 124.454, |
|
"eval_steps_per_second": 7.778, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 5.82, |
|
"learning_rate": 2.0950704225352115e-06, |
|
"loss": 0.1942, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 5.82, |
|
"eval_accuracy": 0.7409999966621399, |
|
"eval_loss": 0.8780096173286438, |
|
"eval_runtime": 18.9162, |
|
"eval_samples_per_second": 105.73, |
|
"eval_steps_per_second": 6.608, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 5.89, |
|
"learning_rate": 2.059859154929578e-06, |
|
"loss": 0.1958, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 5.89, |
|
"eval_accuracy": 0.734499990940094, |
|
"eval_loss": 0.8936640620231628, |
|
"eval_runtime": 19.3712, |
|
"eval_samples_per_second": 103.246, |
|
"eval_steps_per_second": 6.453, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 5.96, |
|
"learning_rate": 2.0246478873239438e-06, |
|
"loss": 0.1883, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 5.96, |
|
"eval_accuracy": 0.7360000014305115, |
|
"eval_loss": 0.9120668172836304, |
|
"eval_runtime": 17.1206, |
|
"eval_samples_per_second": 116.818, |
|
"eval_steps_per_second": 7.301, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 6.04, |
|
"learning_rate": 1.98943661971831e-06, |
|
"loss": 0.1819, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 6.04, |
|
"eval_accuracy": 0.7429999709129333, |
|
"eval_loss": 0.94089674949646, |
|
"eval_runtime": 22.6258, |
|
"eval_samples_per_second": 88.395, |
|
"eval_steps_per_second": 5.525, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 6.11, |
|
"learning_rate": 1.954225352112676e-06, |
|
"loss": 0.145, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 6.11, |
|
"eval_accuracy": 0.7264999747276306, |
|
"eval_loss": 1.1389663219451904, |
|
"eval_runtime": 15.9579, |
|
"eval_samples_per_second": 125.33, |
|
"eval_steps_per_second": 7.833, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 6.18, |
|
"learning_rate": 1.9190140845070424e-06, |
|
"loss": 0.1696, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 6.18, |
|
"eval_accuracy": 0.7429999709129333, |
|
"eval_loss": 0.9188500046730042, |
|
"eval_runtime": 21.2758, |
|
"eval_samples_per_second": 94.003, |
|
"eval_steps_per_second": 5.875, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"learning_rate": 1.8838028169014086e-06, |
|
"loss": 0.1488, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 6.25, |
|
"eval_accuracy": 0.7400000095367432, |
|
"eval_loss": 0.9717501401901245, |
|
"eval_runtime": 17.3967, |
|
"eval_samples_per_second": 114.964, |
|
"eval_steps_per_second": 7.185, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 6.32, |
|
"learning_rate": 1.848591549295775e-06, |
|
"loss": 0.1637, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 6.32, |
|
"eval_accuracy": 0.7450000047683716, |
|
"eval_loss": 0.9701842069625854, |
|
"eval_runtime": 19.9143, |
|
"eval_samples_per_second": 100.43, |
|
"eval_steps_per_second": 6.277, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"learning_rate": 1.813380281690141e-06, |
|
"loss": 0.1547, |
|
"step": 9100 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"eval_accuracy": 0.7409999966621399, |
|
"eval_loss": 1.0032541751861572, |
|
"eval_runtime": 21.7985, |
|
"eval_samples_per_second": 91.749, |
|
"eval_steps_per_second": 5.734, |
|
"step": 9100 |
|
}, |
|
{ |
|
"epoch": 6.46, |
|
"learning_rate": 1.7781690140845072e-06, |
|
"loss": 0.1605, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 6.46, |
|
"eval_accuracy": 0.7354999780654907, |
|
"eval_loss": 0.99726402759552, |
|
"eval_runtime": 21.688, |
|
"eval_samples_per_second": 92.217, |
|
"eval_steps_per_second": 5.764, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 6.53, |
|
"learning_rate": 1.7429577464788734e-06, |
|
"loss": 0.1552, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 6.53, |
|
"eval_accuracy": 0.7289999723434448, |
|
"eval_loss": 1.0491423606872559, |
|
"eval_runtime": 20.7168, |
|
"eval_samples_per_second": 96.54, |
|
"eval_steps_per_second": 6.034, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 6.6, |
|
"learning_rate": 1.7077464788732395e-06, |
|
"loss": 0.1731, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 6.6, |
|
"eval_accuracy": 0.7335000038146973, |
|
"eval_loss": 1.027091145515442, |
|
"eval_runtime": 21.2758, |
|
"eval_samples_per_second": 94.003, |
|
"eval_steps_per_second": 5.875, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 6.67, |
|
"learning_rate": 1.6725352112676057e-06, |
|
"loss": 0.1738, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 6.67, |
|
"eval_accuracy": 0.7429999709129333, |
|
"eval_loss": 0.9575192928314209, |
|
"eval_runtime": 18.6703, |
|
"eval_samples_per_second": 107.122, |
|
"eval_steps_per_second": 6.695, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 6.74, |
|
"learning_rate": 1.637323943661972e-06, |
|
"loss": 0.1669, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 6.74, |
|
"eval_accuracy": 0.7350000143051147, |
|
"eval_loss": 0.9613668322563171, |
|
"eval_runtime": 21.3968, |
|
"eval_samples_per_second": 93.472, |
|
"eval_steps_per_second": 5.842, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 6.81, |
|
"learning_rate": 1.6021126760563382e-06, |
|
"loss": 0.1347, |
|
"step": 9700 |
|
}, |
|
{ |
|
"epoch": 6.81, |
|
"eval_accuracy": 0.7365000247955322, |
|
"eval_loss": 1.0263434648513794, |
|
"eval_runtime": 19.7329, |
|
"eval_samples_per_second": 101.353, |
|
"eval_steps_per_second": 6.335, |
|
"step": 9700 |
|
}, |
|
{ |
|
"epoch": 6.88, |
|
"learning_rate": 1.5669014084507045e-06, |
|
"loss": 0.1593, |
|
"step": 9800 |
|
}, |
|
{ |
|
"epoch": 6.88, |
|
"eval_accuracy": 0.7360000014305115, |
|
"eval_loss": 1.017268180847168, |
|
"eval_runtime": 16.8592, |
|
"eval_samples_per_second": 118.63, |
|
"eval_steps_per_second": 7.414, |
|
"step": 9800 |
|
}, |
|
{ |
|
"epoch": 6.95, |
|
"learning_rate": 1.5316901408450705e-06, |
|
"loss": 0.1549, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 6.95, |
|
"eval_accuracy": 0.7350000143051147, |
|
"eval_loss": 1.0397531986236572, |
|
"eval_runtime": 18.6472, |
|
"eval_samples_per_second": 107.255, |
|
"eval_steps_per_second": 6.703, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 7.02, |
|
"learning_rate": 1.4964788732394366e-06, |
|
"loss": 0.1675, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 7.02, |
|
"eval_accuracy": 0.7379999756813049, |
|
"eval_loss": 0.9975456595420837, |
|
"eval_runtime": 19.9355, |
|
"eval_samples_per_second": 100.323, |
|
"eval_steps_per_second": 6.27, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 7.09, |
|
"learning_rate": 1.461267605633803e-06, |
|
"loss": 0.1182, |
|
"step": 10100 |
|
}, |
|
{ |
|
"epoch": 7.09, |
|
"eval_accuracy": 0.7350000143051147, |
|
"eval_loss": 1.105920672416687, |
|
"eval_runtime": 22.1229, |
|
"eval_samples_per_second": 90.404, |
|
"eval_steps_per_second": 5.65, |
|
"step": 10100 |
|
}, |
|
{ |
|
"epoch": 7.16, |
|
"learning_rate": 1.4260563380281691e-06, |
|
"loss": 0.1351, |
|
"step": 10200 |
|
}, |
|
{ |
|
"epoch": 7.16, |
|
"eval_accuracy": 0.7400000095367432, |
|
"eval_loss": 1.093347191810608, |
|
"eval_runtime": 20.9448, |
|
"eval_samples_per_second": 95.489, |
|
"eval_steps_per_second": 5.968, |
|
"step": 10200 |
|
}, |
|
{ |
|
"epoch": 7.23, |
|
"learning_rate": 1.3908450704225355e-06, |
|
"loss": 0.1496, |
|
"step": 10300 |
|
}, |
|
{ |
|
"epoch": 7.23, |
|
"eval_accuracy": 0.7354999780654907, |
|
"eval_loss": 1.0731019973754883, |
|
"eval_runtime": 17.3295, |
|
"eval_samples_per_second": 115.41, |
|
"eval_steps_per_second": 7.213, |
|
"step": 10300 |
|
}, |
|
{ |
|
"epoch": 7.3, |
|
"learning_rate": 1.3556338028169017e-06, |
|
"loss": 0.1197, |
|
"step": 10400 |
|
}, |
|
{ |
|
"epoch": 7.3, |
|
"eval_accuracy": 0.7360000014305115, |
|
"eval_loss": 1.1089140176773071, |
|
"eval_runtime": 19.6723, |
|
"eval_samples_per_second": 101.666, |
|
"eval_steps_per_second": 6.354, |
|
"step": 10400 |
|
}, |
|
{ |
|
"epoch": 7.37, |
|
"learning_rate": 1.3204225352112676e-06, |
|
"loss": 0.1111, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 7.37, |
|
"eval_accuracy": 0.7404999732971191, |
|
"eval_loss": 1.1381380558013916, |
|
"eval_runtime": 18.6584, |
|
"eval_samples_per_second": 107.191, |
|
"eval_steps_per_second": 6.699, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 7.44, |
|
"learning_rate": 1.285211267605634e-06, |
|
"loss": 0.1494, |
|
"step": 10600 |
|
}, |
|
{ |
|
"epoch": 7.44, |
|
"eval_accuracy": 0.7425000071525574, |
|
"eval_loss": 1.0251615047454834, |
|
"eval_runtime": 20.1427, |
|
"eval_samples_per_second": 99.292, |
|
"eval_steps_per_second": 6.206, |
|
"step": 10600 |
|
}, |
|
{ |
|
"epoch": 7.51, |
|
"learning_rate": 1.25e-06, |
|
"loss": 0.1235, |
|
"step": 10700 |
|
}, |
|
{ |
|
"epoch": 7.51, |
|
"eval_accuracy": 0.7360000014305115, |
|
"eval_loss": 1.0906413793563843, |
|
"eval_runtime": 22.5849, |
|
"eval_samples_per_second": 88.555, |
|
"eval_steps_per_second": 5.535, |
|
"step": 10700 |
|
}, |
|
{ |
|
"epoch": 7.58, |
|
"learning_rate": 1.2147887323943663e-06, |
|
"loss": 0.133, |
|
"step": 10800 |
|
}, |
|
{ |
|
"epoch": 7.58, |
|
"eval_accuracy": 0.737500011920929, |
|
"eval_loss": 1.1796296834945679, |
|
"eval_runtime": 12.3686, |
|
"eval_samples_per_second": 161.699, |
|
"eval_steps_per_second": 10.106, |
|
"step": 10800 |
|
}, |
|
{ |
|
"epoch": 7.65, |
|
"learning_rate": 1.1795774647887324e-06, |
|
"loss": 0.1248, |
|
"step": 10900 |
|
}, |
|
{ |
|
"epoch": 7.65, |
|
"eval_accuracy": 0.7419999837875366, |
|
"eval_loss": 1.1331868171691895, |
|
"eval_runtime": 21.2537, |
|
"eval_samples_per_second": 94.101, |
|
"eval_steps_per_second": 5.881, |
|
"step": 10900 |
|
}, |
|
{ |
|
"epoch": 7.72, |
|
"learning_rate": 1.1443661971830988e-06, |
|
"loss": 0.1268, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 7.72, |
|
"eval_accuracy": 0.7415000200271606, |
|
"eval_loss": 1.1304017305374146, |
|
"eval_runtime": 20.2304, |
|
"eval_samples_per_second": 98.861, |
|
"eval_steps_per_second": 6.179, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 7.79, |
|
"learning_rate": 1.109154929577465e-06, |
|
"loss": 0.1368, |
|
"step": 11100 |
|
}, |
|
{ |
|
"epoch": 7.79, |
|
"eval_accuracy": 0.7379999756813049, |
|
"eval_loss": 1.1345131397247314, |
|
"eval_runtime": 21.0499, |
|
"eval_samples_per_second": 95.013, |
|
"eval_steps_per_second": 5.938, |
|
"step": 11100 |
|
}, |
|
{ |
|
"epoch": 7.86, |
|
"learning_rate": 1.073943661971831e-06, |
|
"loss": 0.1228, |
|
"step": 11200 |
|
}, |
|
{ |
|
"epoch": 7.86, |
|
"eval_accuracy": 0.7319999933242798, |
|
"eval_loss": 1.2018308639526367, |
|
"eval_runtime": 21.3555, |
|
"eval_samples_per_second": 93.653, |
|
"eval_steps_per_second": 5.853, |
|
"step": 11200 |
|
}, |
|
{ |
|
"epoch": 7.93, |
|
"learning_rate": 1.0387323943661972e-06, |
|
"loss": 0.1281, |
|
"step": 11300 |
|
}, |
|
{ |
|
"epoch": 7.93, |
|
"eval_accuracy": 0.7350000143051147, |
|
"eval_loss": 1.1884474754333496, |
|
"eval_runtime": 18.4277, |
|
"eval_samples_per_second": 108.532, |
|
"eval_steps_per_second": 6.783, |
|
"step": 11300 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 1.0035211267605636e-06, |
|
"loss": 0.1449, |
|
"step": 11400 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.734499990940094, |
|
"eval_loss": 1.157057762145996, |
|
"eval_runtime": 16.3477, |
|
"eval_samples_per_second": 122.341, |
|
"eval_steps_per_second": 7.646, |
|
"step": 11400 |
|
}, |
|
{ |
|
"epoch": 8.07, |
|
"learning_rate": 9.683098591549295e-07, |
|
"loss": 0.1025, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 8.07, |
|
"eval_accuracy": 0.734499990940094, |
|
"eval_loss": 1.153812289237976, |
|
"eval_runtime": 15.7861, |
|
"eval_samples_per_second": 126.694, |
|
"eval_steps_per_second": 7.918, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 8.14, |
|
"learning_rate": 9.330985915492959e-07, |
|
"loss": 0.1199, |
|
"step": 11600 |
|
}, |
|
{ |
|
"epoch": 8.14, |
|
"eval_accuracy": 0.7390000224113464, |
|
"eval_loss": 1.2113364934921265, |
|
"eval_runtime": 16.1478, |
|
"eval_samples_per_second": 123.856, |
|
"eval_steps_per_second": 7.741, |
|
"step": 11600 |
|
}, |
|
{ |
|
"epoch": 8.21, |
|
"learning_rate": 8.978873239436621e-07, |
|
"loss": 0.1016, |
|
"step": 11700 |
|
}, |
|
{ |
|
"epoch": 8.21, |
|
"eval_accuracy": 0.7369999885559082, |
|
"eval_loss": 1.2881745100021362, |
|
"eval_runtime": 23.0414, |
|
"eval_samples_per_second": 86.8, |
|
"eval_steps_per_second": 5.425, |
|
"step": 11700 |
|
}, |
|
{ |
|
"epoch": 8.28, |
|
"learning_rate": 8.626760563380282e-07, |
|
"loss": 0.114, |
|
"step": 11800 |
|
}, |
|
{ |
|
"epoch": 8.28, |
|
"eval_accuracy": 0.7390000224113464, |
|
"eval_loss": 1.287195086479187, |
|
"eval_runtime": 19.2083, |
|
"eval_samples_per_second": 104.122, |
|
"eval_steps_per_second": 6.508, |
|
"step": 11800 |
|
}, |
|
{ |
|
"epoch": 8.35, |
|
"learning_rate": 8.274647887323944e-07, |
|
"loss": 0.1019, |
|
"step": 11900 |
|
}, |
|
{ |
|
"epoch": 8.35, |
|
"eval_accuracy": 0.7379999756813049, |
|
"eval_loss": 1.287625789642334, |
|
"eval_runtime": 19.2034, |
|
"eval_samples_per_second": 104.148, |
|
"eval_steps_per_second": 6.509, |
|
"step": 11900 |
|
}, |
|
{ |
|
"epoch": 8.42, |
|
"learning_rate": 7.922535211267607e-07, |
|
"loss": 0.1142, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 8.42, |
|
"eval_accuracy": 0.7384999990463257, |
|
"eval_loss": 1.2790753841400146, |
|
"eval_runtime": 12.6779, |
|
"eval_samples_per_second": 157.755, |
|
"eval_steps_per_second": 9.86, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 8.49, |
|
"learning_rate": 7.570422535211268e-07, |
|
"loss": 0.1135, |
|
"step": 12100 |
|
}, |
|
{ |
|
"epoch": 8.49, |
|
"eval_accuracy": 0.7379999756813049, |
|
"eval_loss": 1.2882863283157349, |
|
"eval_runtime": 22.9048, |
|
"eval_samples_per_second": 87.318, |
|
"eval_steps_per_second": 5.457, |
|
"step": 12100 |
|
}, |
|
{ |
|
"epoch": 8.56, |
|
"learning_rate": 7.21830985915493e-07, |
|
"loss": 0.1139, |
|
"step": 12200 |
|
}, |
|
{ |
|
"epoch": 8.56, |
|
"eval_accuracy": 0.7360000014305115, |
|
"eval_loss": 1.2828530073165894, |
|
"eval_runtime": 12.6066, |
|
"eval_samples_per_second": 158.647, |
|
"eval_steps_per_second": 9.915, |
|
"step": 12200 |
|
}, |
|
{ |
|
"epoch": 8.63, |
|
"learning_rate": 6.866197183098592e-07, |
|
"loss": 0.1107, |
|
"step": 12300 |
|
}, |
|
{ |
|
"epoch": 8.63, |
|
"eval_accuracy": 0.7365000247955322, |
|
"eval_loss": 1.269805669784546, |
|
"eval_runtime": 18.0397, |
|
"eval_samples_per_second": 110.866, |
|
"eval_steps_per_second": 6.929, |
|
"step": 12300 |
|
}, |
|
{ |
|
"epoch": 8.7, |
|
"learning_rate": 6.514084507042254e-07, |
|
"loss": 0.1183, |
|
"step": 12400 |
|
}, |
|
{ |
|
"epoch": 8.7, |
|
"eval_accuracy": 0.734499990940094, |
|
"eval_loss": 1.266024112701416, |
|
"eval_runtime": 18.8559, |
|
"eval_samples_per_second": 106.068, |
|
"eval_steps_per_second": 6.629, |
|
"step": 12400 |
|
}, |
|
{ |
|
"epoch": 8.77, |
|
"learning_rate": 6.161971830985916e-07, |
|
"loss": 0.1064, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 8.77, |
|
"eval_accuracy": 0.7365000247955322, |
|
"eval_loss": 1.288902759552002, |
|
"eval_runtime": 20.1463, |
|
"eval_samples_per_second": 99.274, |
|
"eval_steps_per_second": 6.205, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 8.84, |
|
"learning_rate": 5.809859154929578e-07, |
|
"loss": 0.0895, |
|
"step": 12600 |
|
}, |
|
{ |
|
"epoch": 8.84, |
|
"eval_accuracy": 0.7329999804496765, |
|
"eval_loss": 1.3480335474014282, |
|
"eval_runtime": 19.9481, |
|
"eval_samples_per_second": 100.26, |
|
"eval_steps_per_second": 6.266, |
|
"step": 12600 |
|
}, |
|
{ |
|
"epoch": 8.91, |
|
"learning_rate": 5.457746478873239e-07, |
|
"loss": 0.1244, |
|
"step": 12700 |
|
}, |
|
{ |
|
"epoch": 8.91, |
|
"eval_accuracy": 0.7325000166893005, |
|
"eval_loss": 1.2872198820114136, |
|
"eval_runtime": 17.773, |
|
"eval_samples_per_second": 112.53, |
|
"eval_steps_per_second": 7.033, |
|
"step": 12700 |
|
}, |
|
{ |
|
"epoch": 8.98, |
|
"learning_rate": 5.105633802816902e-07, |
|
"loss": 0.1209, |
|
"step": 12800 |
|
}, |
|
{ |
|
"epoch": 8.98, |
|
"eval_accuracy": 0.737500011920929, |
|
"eval_loss": 1.2680846452713013, |
|
"eval_runtime": 22.2614, |
|
"eval_samples_per_second": 89.842, |
|
"eval_steps_per_second": 5.615, |
|
"step": 12800 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"learning_rate": 4.7535211267605635e-07, |
|
"loss": 0.1144, |
|
"step": 12900 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"eval_accuracy": 0.7369999885559082, |
|
"eval_loss": 1.2711447477340698, |
|
"eval_runtime": 18.1651, |
|
"eval_samples_per_second": 110.101, |
|
"eval_steps_per_second": 6.881, |
|
"step": 12900 |
|
}, |
|
{ |
|
"epoch": 9.12, |
|
"learning_rate": 4.4014084507042255e-07, |
|
"loss": 0.1034, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 9.12, |
|
"eval_accuracy": 0.7360000014305115, |
|
"eval_loss": 1.2800805568695068, |
|
"eval_runtime": 20.275, |
|
"eval_samples_per_second": 98.644, |
|
"eval_steps_per_second": 6.165, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 9.19, |
|
"learning_rate": 4.049295774647888e-07, |
|
"loss": 0.113, |
|
"step": 13100 |
|
}, |
|
{ |
|
"epoch": 9.19, |
|
"eval_accuracy": 0.7350000143051147, |
|
"eval_loss": 1.2801427841186523, |
|
"eval_runtime": 19.6919, |
|
"eval_samples_per_second": 101.565, |
|
"eval_steps_per_second": 6.348, |
|
"step": 13100 |
|
}, |
|
{ |
|
"epoch": 9.26, |
|
"learning_rate": 3.6971830985915495e-07, |
|
"loss": 0.0994, |
|
"step": 13200 |
|
}, |
|
{ |
|
"epoch": 9.26, |
|
"eval_accuracy": 0.7360000014305115, |
|
"eval_loss": 1.2920359373092651, |
|
"eval_runtime": 18.5273, |
|
"eval_samples_per_second": 107.949, |
|
"eval_steps_per_second": 6.747, |
|
"step": 13200 |
|
}, |
|
{ |
|
"epoch": 9.33, |
|
"learning_rate": 3.345070422535211e-07, |
|
"loss": 0.0966, |
|
"step": 13300 |
|
}, |
|
{ |
|
"epoch": 9.33, |
|
"eval_accuracy": 0.7335000038146973, |
|
"eval_loss": 1.2760682106018066, |
|
"eval_runtime": 21.2516, |
|
"eval_samples_per_second": 94.11, |
|
"eval_steps_per_second": 5.882, |
|
"step": 13300 |
|
}, |
|
{ |
|
"epoch": 9.4, |
|
"learning_rate": 2.992957746478873e-07, |
|
"loss": 0.0939, |
|
"step": 13400 |
|
}, |
|
{ |
|
"epoch": 9.4, |
|
"eval_accuracy": 0.7365000247955322, |
|
"eval_loss": 1.2908642292022705, |
|
"eval_runtime": 17.0257, |
|
"eval_samples_per_second": 117.47, |
|
"eval_steps_per_second": 7.342, |
|
"step": 13400 |
|
}, |
|
{ |
|
"epoch": 9.47, |
|
"learning_rate": 2.6408450704225356e-07, |
|
"loss": 0.0975, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 9.47, |
|
"eval_accuracy": 0.7360000014305115, |
|
"eval_loss": 1.2952693700790405, |
|
"eval_runtime": 20.8537, |
|
"eval_samples_per_second": 95.906, |
|
"eval_steps_per_second": 5.994, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 9.54, |
|
"learning_rate": 2.2887323943661974e-07, |
|
"loss": 0.0842, |
|
"step": 13600 |
|
}, |
|
{ |
|
"epoch": 9.54, |
|
"eval_accuracy": 0.7335000038146973, |
|
"eval_loss": 1.3179150819778442, |
|
"eval_runtime": 20.9509, |
|
"eval_samples_per_second": 95.461, |
|
"eval_steps_per_second": 5.966, |
|
"step": 13600 |
|
}, |
|
{ |
|
"epoch": 9.61, |
|
"learning_rate": 1.936619718309859e-07, |
|
"loss": 0.0871, |
|
"step": 13700 |
|
}, |
|
{ |
|
"epoch": 9.61, |
|
"eval_accuracy": 0.7384999990463257, |
|
"eval_loss": 1.314935564994812, |
|
"eval_runtime": 20.6989, |
|
"eval_samples_per_second": 96.623, |
|
"eval_steps_per_second": 6.039, |
|
"step": 13700 |
|
}, |
|
{ |
|
"epoch": 9.68, |
|
"learning_rate": 1.5845070422535212e-07, |
|
"loss": 0.1162, |
|
"step": 13800 |
|
}, |
|
{ |
|
"epoch": 9.68, |
|
"eval_accuracy": 0.7350000143051147, |
|
"eval_loss": 1.3124284744262695, |
|
"eval_runtime": 21.7736, |
|
"eval_samples_per_second": 91.854, |
|
"eval_steps_per_second": 5.741, |
|
"step": 13800 |
|
}, |
|
{ |
|
"epoch": 9.75, |
|
"learning_rate": 1.2323943661971832e-07, |
|
"loss": 0.085, |
|
"step": 13900 |
|
}, |
|
{ |
|
"epoch": 9.75, |
|
"eval_accuracy": 0.7354999780654907, |
|
"eval_loss": 1.3206626176834106, |
|
"eval_runtime": 18.1587, |
|
"eval_samples_per_second": 110.14, |
|
"eval_steps_per_second": 6.884, |
|
"step": 13900 |
|
}, |
|
{ |
|
"epoch": 9.82, |
|
"learning_rate": 8.802816901408452e-08, |
|
"loss": 0.0966, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 9.82, |
|
"eval_accuracy": 0.7335000038146973, |
|
"eval_loss": 1.3247543573379517, |
|
"eval_runtime": 16.4512, |
|
"eval_samples_per_second": 121.572, |
|
"eval_steps_per_second": 7.598, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 9.89, |
|
"learning_rate": 5.281690140845071e-08, |
|
"loss": 0.1064, |
|
"step": 14100 |
|
}, |
|
{ |
|
"epoch": 9.89, |
|
"eval_accuracy": 0.7335000038146973, |
|
"eval_loss": 1.3260987997055054, |
|
"eval_runtime": 22.2976, |
|
"eval_samples_per_second": 89.696, |
|
"eval_steps_per_second": 5.606, |
|
"step": 14100 |
|
}, |
|
{ |
|
"epoch": 9.96, |
|
"learning_rate": 1.7605633802816902e-08, |
|
"loss": 0.1046, |
|
"step": 14200 |
|
}, |
|
{ |
|
"epoch": 9.96, |
|
"eval_accuracy": 0.7360000014305115, |
|
"eval_loss": 1.3255339860916138, |
|
"eval_runtime": 19.2834, |
|
"eval_samples_per_second": 103.716, |
|
"eval_steps_per_second": 6.482, |
|
"step": 14200 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 14250, |
|
"total_flos": 2.1254540922139392e+17, |
|
"train_loss": 0.2872312853629129, |
|
"train_runtime": 13159.309, |
|
"train_samples_per_second": 34.664, |
|
"train_steps_per_second": 1.083 |
|
} |
|
], |
|
"max_steps": 14250, |
|
"num_train_epochs": 10, |
|
"total_flos": 2.1254540922139392e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|