|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 25.823111684958036, |
|
"eval_steps": 500, |
|
"global_step": 2500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.9875000000000002e-05, |
|
"loss": 1.9446, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.9736111111111115e-05, |
|
"loss": 1.7941, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.9597222222222224e-05, |
|
"loss": 1.5361, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.9458333333333333e-05, |
|
"loss": 1.1041, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 1.9319444444444446e-05, |
|
"loss": 0.6053, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 1.918055555555556e-05, |
|
"loss": 0.4088, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 1.9041666666666668e-05, |
|
"loss": 0.3813, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 1.890277777777778e-05, |
|
"loss": 0.3733, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 1.876388888888889e-05, |
|
"loss": 0.3678, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 1.8625000000000002e-05, |
|
"loss": 0.3604, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 1.8486111111111115e-05, |
|
"loss": 0.3368, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 1.8347222222222224e-05, |
|
"loss": 0.3356, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 1.8208333333333333e-05, |
|
"loss": 0.3439, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 1.8069444444444446e-05, |
|
"loss": 0.3277, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 1.793055555555556e-05, |
|
"loss": 0.3288, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 1.7791666666666668e-05, |
|
"loss": 0.3227, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 1.765277777777778e-05, |
|
"loss": 0.3213, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 1.751388888888889e-05, |
|
"loss": 0.3128, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 1.7375000000000002e-05, |
|
"loss": 0.3071, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 1.7236111111111115e-05, |
|
"loss": 0.3022, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 1.7097222222222224e-05, |
|
"loss": 0.2968, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 1.6958333333333333e-05, |
|
"loss": 0.3023, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"learning_rate": 1.6819444444444446e-05, |
|
"loss": 0.2889, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 4.96, |
|
"learning_rate": 1.668055555555556e-05, |
|
"loss": 0.2763, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 5.16, |
|
"learning_rate": 1.6541666666666668e-05, |
|
"loss": 0.2778, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 5.37, |
|
"learning_rate": 1.6402777777777777e-05, |
|
"loss": 0.2689, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 5.58, |
|
"learning_rate": 1.626388888888889e-05, |
|
"loss": 0.2732, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 5.78, |
|
"learning_rate": 1.6125000000000002e-05, |
|
"loss": 0.2604, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 5.99, |
|
"learning_rate": 1.5986111111111115e-05, |
|
"loss": 0.2608, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 6.2, |
|
"learning_rate": 1.5847222222222224e-05, |
|
"loss": 0.2356, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"learning_rate": 1.5708333333333333e-05, |
|
"loss": 0.2414, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 6.61, |
|
"learning_rate": 1.5569444444444446e-05, |
|
"loss": 0.2339, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 6.82, |
|
"learning_rate": 1.543055555555556e-05, |
|
"loss": 0.2394, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 7.02, |
|
"learning_rate": 1.5291666666666668e-05, |
|
"loss": 0.223, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 7.23, |
|
"learning_rate": 1.5152777777777779e-05, |
|
"loss": 0.2155, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 7.44, |
|
"learning_rate": 1.501388888888889e-05, |
|
"loss": 0.2099, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 7.64, |
|
"learning_rate": 1.4875000000000002e-05, |
|
"loss": 0.2041, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 7.85, |
|
"learning_rate": 1.4736111111111113e-05, |
|
"loss": 0.2067, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 8.06, |
|
"learning_rate": 1.4597222222222223e-05, |
|
"loss": 0.193, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 8.26, |
|
"learning_rate": 1.4458333333333334e-05, |
|
"loss": 0.1802, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 8.47, |
|
"learning_rate": 1.4319444444444446e-05, |
|
"loss": 0.1828, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 8.68, |
|
"learning_rate": 1.4180555555555557e-05, |
|
"loss": 0.1754, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 8.88, |
|
"learning_rate": 1.4041666666666666e-05, |
|
"loss": 0.1745, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"learning_rate": 1.3902777777777779e-05, |
|
"loss": 0.1692, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 9.3, |
|
"learning_rate": 1.376388888888889e-05, |
|
"loss": 0.1648, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"learning_rate": 1.3625e-05, |
|
"loss": 0.1463, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 9.71, |
|
"learning_rate": 1.3486111111111113e-05, |
|
"loss": 0.1525, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 9.92, |
|
"learning_rate": 1.3347222222222223e-05, |
|
"loss": 0.1458, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 10.12, |
|
"learning_rate": 1.3208333333333334e-05, |
|
"loss": 0.137, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 10.33, |
|
"learning_rate": 1.3069444444444446e-05, |
|
"loss": 0.1318, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 10.54, |
|
"learning_rate": 1.2930555555555557e-05, |
|
"loss": 0.138, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 10.74, |
|
"learning_rate": 1.2791666666666666e-05, |
|
"loss": 0.1282, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 10.95, |
|
"learning_rate": 1.2652777777777779e-05, |
|
"loss": 0.1277, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 11.16, |
|
"learning_rate": 1.251388888888889e-05, |
|
"loss": 0.1201, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 11.36, |
|
"learning_rate": 1.2375000000000001e-05, |
|
"loss": 0.1189, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 11.57, |
|
"learning_rate": 1.2236111111111114e-05, |
|
"loss": 0.1153, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 11.78, |
|
"learning_rate": 1.2097222222222223e-05, |
|
"loss": 0.1164, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 11.98, |
|
"learning_rate": 1.1958333333333334e-05, |
|
"loss": 0.1062, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 12.19, |
|
"learning_rate": 1.1819444444444446e-05, |
|
"loss": 0.107, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 12.4, |
|
"learning_rate": 1.1680555555555557e-05, |
|
"loss": 0.1, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 12.6, |
|
"learning_rate": 1.1541666666666667e-05, |
|
"loss": 0.1017, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 12.81, |
|
"learning_rate": 1.1402777777777777e-05, |
|
"loss": 0.0984, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 13.01, |
|
"learning_rate": 1.126388888888889e-05, |
|
"loss": 0.0948, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 13.22, |
|
"learning_rate": 1.1125000000000001e-05, |
|
"loss": 0.0886, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 13.43, |
|
"learning_rate": 1.0986111111111114e-05, |
|
"loss": 0.0908, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 13.63, |
|
"learning_rate": 1.0847222222222223e-05, |
|
"loss": 0.0907, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 13.84, |
|
"learning_rate": 1.0708333333333334e-05, |
|
"loss": 0.0886, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 14.05, |
|
"learning_rate": 1.0569444444444445e-05, |
|
"loss": 0.0835, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 14.25, |
|
"learning_rate": 1.0430555555555557e-05, |
|
"loss": 0.0809, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 14.46, |
|
"learning_rate": 1.0291666666666667e-05, |
|
"loss": 0.0793, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 14.67, |
|
"learning_rate": 1.0152777777777778e-05, |
|
"loss": 0.0788, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 14.87, |
|
"learning_rate": 1.001388888888889e-05, |
|
"loss": 0.0769, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 15.08, |
|
"learning_rate": 9.875000000000001e-06, |
|
"loss": 0.0738, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 15.29, |
|
"learning_rate": 9.736111111111112e-06, |
|
"loss": 0.0747, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 15.49, |
|
"learning_rate": 9.597222222222223e-06, |
|
"loss": 0.0656, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 15.7, |
|
"learning_rate": 9.458333333333334e-06, |
|
"loss": 0.0735, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 15.91, |
|
"learning_rate": 9.319444444444445e-06, |
|
"loss": 0.0693, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 16.11, |
|
"learning_rate": 9.180555555555556e-06, |
|
"loss": 0.0646, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 16.32, |
|
"learning_rate": 9.041666666666667e-06, |
|
"loss": 0.0658, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 16.53, |
|
"learning_rate": 8.902777777777778e-06, |
|
"loss": 0.063, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 16.73, |
|
"learning_rate": 8.76388888888889e-06, |
|
"loss": 0.0652, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 16.94, |
|
"learning_rate": 8.625000000000001e-06, |
|
"loss": 0.0624, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 17.15, |
|
"learning_rate": 8.486111111111112e-06, |
|
"loss": 0.0597, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 17.35, |
|
"learning_rate": 8.347222222222223e-06, |
|
"loss": 0.0597, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 17.56, |
|
"learning_rate": 8.208333333333334e-06, |
|
"loss": 0.0549, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 17.77, |
|
"learning_rate": 8.069444444444445e-06, |
|
"loss": 0.0574, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 17.97, |
|
"learning_rate": 7.930555555555556e-06, |
|
"loss": 0.0575, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 18.18, |
|
"learning_rate": 7.791666666666667e-06, |
|
"loss": 0.0561, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 18.39, |
|
"learning_rate": 7.652777777777778e-06, |
|
"loss": 0.0541, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 18.59, |
|
"learning_rate": 7.51388888888889e-06, |
|
"loss": 0.0528, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 18.8, |
|
"learning_rate": 7.375000000000001e-06, |
|
"loss": 0.0525, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 19.01, |
|
"learning_rate": 7.2361111111111115e-06, |
|
"loss": 0.0503, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 19.21, |
|
"learning_rate": 7.097222222222223e-06, |
|
"loss": 0.0485, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 19.42, |
|
"learning_rate": 6.958333333333333e-06, |
|
"loss": 0.05, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 19.63, |
|
"learning_rate": 6.819444444444445e-06, |
|
"loss": 0.0501, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 19.83, |
|
"learning_rate": 6.680555555555556e-06, |
|
"loss": 0.0489, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 20.04, |
|
"learning_rate": 6.541666666666667e-06, |
|
"loss": 0.0472, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 20.25, |
|
"learning_rate": 6.402777777777778e-06, |
|
"loss": 0.0442, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 20.45, |
|
"learning_rate": 6.26388888888889e-06, |
|
"loss": 0.0498, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 20.66, |
|
"learning_rate": 6.125000000000001e-06, |
|
"loss": 0.0455, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 20.87, |
|
"learning_rate": 5.986111111111112e-06, |
|
"loss": 0.0443, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 21.07, |
|
"learning_rate": 5.847222222222223e-06, |
|
"loss": 0.0429, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 21.28, |
|
"learning_rate": 5.7083333333333335e-06, |
|
"loss": 0.0453, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 21.48, |
|
"learning_rate": 5.569444444444445e-06, |
|
"loss": 0.0412, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 21.69, |
|
"learning_rate": 5.430555555555555e-06, |
|
"loss": 0.0434, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 21.9, |
|
"learning_rate": 5.291666666666667e-06, |
|
"loss": 0.0403, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 22.1, |
|
"learning_rate": 5.152777777777778e-06, |
|
"loss": 0.0412, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 22.31, |
|
"learning_rate": 5.013888888888889e-06, |
|
"loss": 0.0408, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 22.52, |
|
"learning_rate": 4.875e-06, |
|
"loss": 0.0403, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 22.72, |
|
"learning_rate": 4.736111111111112e-06, |
|
"loss": 0.04, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 22.93, |
|
"learning_rate": 4.597222222222223e-06, |
|
"loss": 0.0396, |
|
"step": 2220 |
|
}, |
|
{ |
|
"epoch": 23.14, |
|
"learning_rate": 4.4583333333333336e-06, |
|
"loss": 0.0396, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 23.34, |
|
"learning_rate": 4.319444444444445e-06, |
|
"loss": 0.038, |
|
"step": 2260 |
|
}, |
|
{ |
|
"epoch": 23.55, |
|
"learning_rate": 4.180555555555556e-06, |
|
"loss": 0.0388, |
|
"step": 2280 |
|
}, |
|
{ |
|
"epoch": 23.76, |
|
"learning_rate": 4.041666666666667e-06, |
|
"loss": 0.0388, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 23.96, |
|
"learning_rate": 3.902777777777778e-06, |
|
"loss": 0.0368, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 24.17, |
|
"learning_rate": 3.763888888888889e-06, |
|
"loss": 0.0357, |
|
"step": 2340 |
|
}, |
|
{ |
|
"epoch": 24.38, |
|
"learning_rate": 3.625e-06, |
|
"loss": 0.0373, |
|
"step": 2360 |
|
}, |
|
{ |
|
"epoch": 24.58, |
|
"learning_rate": 3.4861111111111114e-06, |
|
"loss": 0.0373, |
|
"step": 2380 |
|
}, |
|
{ |
|
"epoch": 24.79, |
|
"learning_rate": 3.3472222222222223e-06, |
|
"loss": 0.0366, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 3.2083333333333337e-06, |
|
"loss": 0.0343, |
|
"step": 2420 |
|
}, |
|
{ |
|
"epoch": 25.2, |
|
"learning_rate": 3.069444444444445e-06, |
|
"loss": 0.0365, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 25.41, |
|
"learning_rate": 2.930555555555556e-06, |
|
"loss": 0.0356, |
|
"step": 2460 |
|
}, |
|
{ |
|
"epoch": 25.62, |
|
"learning_rate": 2.791666666666667e-06, |
|
"loss": 0.0359, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 25.82, |
|
"learning_rate": 2.652777777777778e-06, |
|
"loss": 0.0341, |
|
"step": 2500 |
|
} |
|
], |
|
"logging_steps": 20, |
|
"max_steps": 2880, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 30, |
|
"save_steps": 500, |
|
"total_flos": 7.921287813336269e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|