|
{ |
|
"best_metric": 0.35982879996299744, |
|
"best_model_checkpoint": "./exper_batch_16_e4/checkpoint-2500", |
|
"epoch": 4.0, |
|
"global_step": 2556, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0001992175273865415, |
|
"loss": 4.9483, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00019843505477308294, |
|
"loss": 4.7934, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019765258215962445, |
|
"loss": 4.6354, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001968701095461659, |
|
"loss": 4.4364, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019608763693270738, |
|
"loss": 4.3212, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00019530516431924883, |
|
"loss": 4.2134, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0001945226917057903, |
|
"loss": 4.0868, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019374021909233179, |
|
"loss": 3.992, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019295774647887326, |
|
"loss": 3.9904, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00019217527386541472, |
|
"loss": 3.7606, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_accuracy": 0.19892058596761758, |
|
"eval_loss": 3.783871650695801, |
|
"eval_runtime": 41.3855, |
|
"eval_samples_per_second": 62.679, |
|
"eval_steps_per_second": 7.853, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0001913928012519562, |
|
"loss": 3.8002, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019061032863849765, |
|
"loss": 3.6512, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00018982785602503915, |
|
"loss": 3.5586, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.0001890453834115806, |
|
"loss": 3.3856, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00018826291079812208, |
|
"loss": 3.4324, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00018748043818466354, |
|
"loss": 3.3126, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00018669796557120501, |
|
"loss": 3.1905, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.0001859154929577465, |
|
"loss": 3.1417, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00018513302034428797, |
|
"loss": 3.0441, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.00018435054773082942, |
|
"loss": 3.1072, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"eval_accuracy": 0.3284502698535081, |
|
"eval_loss": 3.0251049995422363, |
|
"eval_runtime": 40.3565, |
|
"eval_samples_per_second": 64.277, |
|
"eval_steps_per_second": 8.053, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.0001835680751173709, |
|
"loss": 2.8646, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00018278560250391235, |
|
"loss": 2.9163, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.00018200312989045386, |
|
"loss": 2.9385, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.0001812206572769953, |
|
"loss": 2.7414, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.0001804381846635368, |
|
"loss": 2.628, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.00017965571205007824, |
|
"loss": 2.7957, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00017887323943661972, |
|
"loss": 2.6525, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.0001780907668231612, |
|
"loss": 2.7478, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00017730829420970268, |
|
"loss": 2.4893, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00017652582159624413, |
|
"loss": 2.4068, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"eval_accuracy": 0.471858134155744, |
|
"eval_loss": 2.4379851818084717, |
|
"eval_runtime": 40.2783, |
|
"eval_samples_per_second": 64.402, |
|
"eval_steps_per_second": 8.069, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0001757433489827856, |
|
"loss": 2.5014, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0001749608763693271, |
|
"loss": 2.3865, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00017417840375586857, |
|
"loss": 2.3867, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00017339593114241002, |
|
"loss": 2.4403, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0001726134585289515, |
|
"loss": 2.2411, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00017183098591549295, |
|
"loss": 2.0845, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00017104851330203443, |
|
"loss": 2.1195, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0001702660406885759, |
|
"loss": 2.0963, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00016948356807511739, |
|
"loss": 2.1239, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00016870109546165884, |
|
"loss": 2.0881, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"eval_accuracy": 0.5412490362374711, |
|
"eval_loss": 2.048940658569336, |
|
"eval_runtime": 40.084, |
|
"eval_samples_per_second": 64.714, |
|
"eval_steps_per_second": 8.108, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00016791862284820032, |
|
"loss": 2.0617, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0001671361502347418, |
|
"loss": 2.0724, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00016635367762128327, |
|
"loss": 1.8695, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00016557120500782473, |
|
"loss": 1.8601, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0001647887323943662, |
|
"loss": 1.8267, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00016400625978090766, |
|
"loss": 1.9212, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00016322378716744916, |
|
"loss": 1.881, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00016244131455399061, |
|
"loss": 1.8141, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.0001616588419405321, |
|
"loss": 1.8193, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00016087636932707354, |
|
"loss": 1.6817, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_accuracy": 0.60254433307633, |
|
"eval_loss": 1.7968339920043945, |
|
"eval_runtime": 40.4492, |
|
"eval_samples_per_second": 64.13, |
|
"eval_steps_per_second": 8.035, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00016009389671361502, |
|
"loss": 1.5543, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0001593114241001565, |
|
"loss": 1.638, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00015852895148669798, |
|
"loss": 1.6943, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00015774647887323943, |
|
"loss": 1.5593, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.0001569640062597809, |
|
"loss": 1.5751, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00015618153364632236, |
|
"loss": 1.4714, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00015539906103286387, |
|
"loss": 1.3496, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00015461658841940532, |
|
"loss": 1.458, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.0001538341158059468, |
|
"loss": 1.291, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.00015305164319248828, |
|
"loss": 1.342, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"eval_accuracy": 0.6249036237471087, |
|
"eval_loss": 1.5044325590133667, |
|
"eval_runtime": 40.4134, |
|
"eval_samples_per_second": 64.187, |
|
"eval_steps_per_second": 8.042, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00015226917057902973, |
|
"loss": 1.519, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.0001514866979655712, |
|
"loss": 1.4489, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0001507042253521127, |
|
"loss": 1.5582, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00014992175273865417, |
|
"loss": 1.1882, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.00014913928012519562, |
|
"loss": 1.0538, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.0001483568075117371, |
|
"loss": 1.0577, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00014757433489827858, |
|
"loss": 1.0974, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00014679186228482006, |
|
"loss": 0.9575, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.0001460093896713615, |
|
"loss": 1.0651, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00014522691705790299, |
|
"loss": 0.9343, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"eval_accuracy": 0.7131842713955281, |
|
"eval_loss": 1.1881427764892578, |
|
"eval_runtime": 40.2414, |
|
"eval_samples_per_second": 64.461, |
|
"eval_steps_per_second": 8.076, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00014444444444444444, |
|
"loss": 0.8771, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.00014366197183098594, |
|
"loss": 0.936, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.0001428794992175274, |
|
"loss": 0.9483, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00014209702660406887, |
|
"loss": 0.8231, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00014131455399061033, |
|
"loss": 0.8502, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.0001405320813771518, |
|
"loss": 0.9962, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00013974960876369328, |
|
"loss": 0.7861, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00013896713615023476, |
|
"loss": 1.121, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.00013818466353677621, |
|
"loss": 0.8718, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.0001374021909233177, |
|
"loss": 0.9552, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"eval_accuracy": 0.7224363916730917, |
|
"eval_loss": 1.1064492464065552, |
|
"eval_runtime": 40.4181, |
|
"eval_samples_per_second": 64.179, |
|
"eval_steps_per_second": 8.041, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00013661971830985914, |
|
"loss": 0.999, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00013583724569640065, |
|
"loss": 0.9067, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.0001350547730829421, |
|
"loss": 0.9772, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00013427230046948358, |
|
"loss": 0.7953, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00013348982785602503, |
|
"loss": 0.9132, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.0001327073552425665, |
|
"loss": 0.8452, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.000131924882629108, |
|
"loss": 0.8477, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00013114241001564947, |
|
"loss": 0.7612, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00013035993740219092, |
|
"loss": 0.8274, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.0001295774647887324, |
|
"loss": 0.7265, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"eval_accuracy": 0.776792598303778, |
|
"eval_loss": 0.9189362525939941, |
|
"eval_runtime": 40.6855, |
|
"eval_samples_per_second": 63.757, |
|
"eval_steps_per_second": 7.988, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00012879499217527385, |
|
"loss": 0.7843, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00012801251956181536, |
|
"loss": 0.6628, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.0001272300469483568, |
|
"loss": 0.7944, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.0001264475743348983, |
|
"loss": 0.8215, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00012566510172143974, |
|
"loss": 0.7672, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00012488262910798122, |
|
"loss": 0.7514, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.0001241001564945227, |
|
"loss": 0.5692, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.00012331768388106418, |
|
"loss": 0.6487, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.00012253521126760563, |
|
"loss": 0.7988, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.00012175273865414711, |
|
"loss": 0.6732, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"eval_accuracy": 0.7606013878180417, |
|
"eval_loss": 0.9226632118225098, |
|
"eval_runtime": 40.3502, |
|
"eval_samples_per_second": 64.287, |
|
"eval_steps_per_second": 8.054, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.00012097026604068857, |
|
"loss": 0.6441, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.00012018779342723005, |
|
"loss": 0.6007, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.00011940532081377152, |
|
"loss": 0.5589, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.000118622848200313, |
|
"loss": 0.6297, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.00011784037558685446, |
|
"loss": 0.6216, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.00011705790297339594, |
|
"loss": 0.5161, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.0001162754303599374, |
|
"loss": 0.5566, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.00011549295774647888, |
|
"loss": 0.7403, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00011471048513302034, |
|
"loss": 0.6499, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.00011392801251956183, |
|
"loss": 0.5587, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"eval_accuracy": 0.7902852737085582, |
|
"eval_loss": 0.7912017703056335, |
|
"eval_runtime": 40.6432, |
|
"eval_samples_per_second": 63.824, |
|
"eval_steps_per_second": 7.996, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 0.00011314553990610328, |
|
"loss": 0.6908, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 0.00011236306729264476, |
|
"loss": 0.4689, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.00011158059467918622, |
|
"loss": 0.6043, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.0001107981220657277, |
|
"loss": 0.507, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 0.00011001564945226917, |
|
"loss": 0.4835, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 0.00010923317683881065, |
|
"loss": 0.6085, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 0.00010845070422535213, |
|
"loss": 0.6501, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 0.00010766823161189359, |
|
"loss": 0.3569, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.00010688575899843507, |
|
"loss": 0.531, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 0.00010610328638497653, |
|
"loss": 0.6332, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"eval_accuracy": 0.7945258288357748, |
|
"eval_loss": 0.76058030128479, |
|
"eval_runtime": 40.0299, |
|
"eval_samples_per_second": 64.801, |
|
"eval_steps_per_second": 8.119, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 0.00010532081377151801, |
|
"loss": 0.6965, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 0.00010453834115805947, |
|
"loss": 0.598, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 0.00010375586854460096, |
|
"loss": 0.719, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.00010297339593114241, |
|
"loss": 0.5609, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.0001021909233176839, |
|
"loss": 0.5989, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 0.00010140845070422535, |
|
"loss": 0.5327, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.00010062597809076683, |
|
"loss": 0.5099, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 9.98435054773083e-05, |
|
"loss": 0.4551, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 9.906103286384976e-05, |
|
"loss": 0.2834, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 9.827856025039124e-05, |
|
"loss": 0.3188, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"eval_accuracy": 0.8288357748650732, |
|
"eval_loss": 0.6534976363182068, |
|
"eval_runtime": 40.5835, |
|
"eval_samples_per_second": 63.918, |
|
"eval_steps_per_second": 8.008, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 9.749608763693271e-05, |
|
"loss": 0.2455, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 9.671361502347419e-05, |
|
"loss": 0.3339, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 9.593114241001565e-05, |
|
"loss": 0.2413, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 9.514866979655712e-05, |
|
"loss": 0.3398, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 9.43661971830986e-05, |
|
"loss": 0.2706, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 9.358372456964006e-05, |
|
"loss": 0.3681, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 9.280125195618154e-05, |
|
"loss": 0.2871, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 9.2018779342723e-05, |
|
"loss": 0.2859, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 9.123630672926447e-05, |
|
"loss": 0.1933, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 9.045383411580595e-05, |
|
"loss": 0.3079, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"eval_accuracy": 0.8577486507324595, |
|
"eval_loss": 0.5686159133911133, |
|
"eval_runtime": 40.5767, |
|
"eval_samples_per_second": 63.928, |
|
"eval_steps_per_second": 8.01, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 8.967136150234741e-05, |
|
"loss": 0.222, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 8.888888888888889e-05, |
|
"loss": 0.326, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 8.810641627543036e-05, |
|
"loss": 0.3696, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 8.732394366197182e-05, |
|
"loss": 0.2816, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 8.65414710485133e-05, |
|
"loss": 0.2642, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 8.575899843505478e-05, |
|
"loss": 0.26, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 8.497652582159625e-05, |
|
"loss": 0.2538, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 8.419405320813773e-05, |
|
"loss": 0.305, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 8.341158059467919e-05, |
|
"loss": 0.3137, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 8.262910798122067e-05, |
|
"loss": 0.2518, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"eval_accuracy": 0.8577486507324595, |
|
"eval_loss": 0.5517102479934692, |
|
"eval_runtime": 40.6225, |
|
"eval_samples_per_second": 63.856, |
|
"eval_steps_per_second": 8.0, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 8.184663536776213e-05, |
|
"loss": 0.2775, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 8.106416275430361e-05, |
|
"loss": 0.2882, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 8.028169014084508e-05, |
|
"loss": 0.2506, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 7.949921752738654e-05, |
|
"loss": 0.2111, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 7.871674491392802e-05, |
|
"loss": 0.2025, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 7.793427230046949e-05, |
|
"loss": 0.2137, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 7.715179968701097e-05, |
|
"loss": 0.1643, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 7.636932707355243e-05, |
|
"loss": 0.1827, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 7.55868544600939e-05, |
|
"loss": 0.1774, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 7.480438184663538e-05, |
|
"loss": 0.2, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"eval_accuracy": 0.8631457208943716, |
|
"eval_loss": 0.5276668667793274, |
|
"eval_runtime": 40.6494, |
|
"eval_samples_per_second": 63.814, |
|
"eval_steps_per_second": 7.995, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 7.402190923317684e-05, |
|
"loss": 0.1489, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 7.323943661971832e-05, |
|
"loss": 0.1583, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 7.245696400625979e-05, |
|
"loss": 0.194, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 7.167449139280125e-05, |
|
"loss": 0.3158, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 7.089201877934273e-05, |
|
"loss": 0.2098, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 7.01095461658842e-05, |
|
"loss": 0.2711, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 6.932707355242567e-05, |
|
"loss": 0.2274, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 6.854460093896714e-05, |
|
"loss": 0.1626, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 6.77621283255086e-05, |
|
"loss": 0.1899, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 6.697965571205008e-05, |
|
"loss": 0.2032, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"eval_accuracy": 0.8700848111025443, |
|
"eval_loss": 0.4840567409992218, |
|
"eval_runtime": 40.6495, |
|
"eval_samples_per_second": 63.814, |
|
"eval_steps_per_second": 7.995, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 6.619718309859155e-05, |
|
"loss": 0.1504, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 6.541471048513303e-05, |
|
"loss": 0.2503, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 6.463223787167449e-05, |
|
"loss": 0.2004, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 6.384976525821597e-05, |
|
"loss": 0.2705, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 6.306729264475744e-05, |
|
"loss": 0.1809, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 6.22848200312989e-05, |
|
"loss": 0.2257, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 6.150234741784038e-05, |
|
"loss": 0.1847, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 6.071987480438185e-05, |
|
"loss": 0.1488, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 5.993740219092332e-05, |
|
"loss": 0.2469, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 5.915492957746479e-05, |
|
"loss": 0.1555, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"eval_accuracy": 0.8793369313801079, |
|
"eval_loss": 0.4578264057636261, |
|
"eval_runtime": 40.536, |
|
"eval_samples_per_second": 63.992, |
|
"eval_steps_per_second": 8.018, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 5.837245696400626e-05, |
|
"loss": 0.2625, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 5.7589984350547735e-05, |
|
"loss": 0.1896, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 5.68075117370892e-05, |
|
"loss": 0.1443, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 5.602503912363067e-05, |
|
"loss": 0.205, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 5.5242566510172144e-05, |
|
"loss": 0.135, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 5.4460093896713616e-05, |
|
"loss": 0.1833, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 5.367762128325509e-05, |
|
"loss": 0.1534, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 5.2895148669796554e-05, |
|
"loss": 0.2702, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 5.2112676056338026e-05, |
|
"loss": 0.2304, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 5.13302034428795e-05, |
|
"loss": 0.145, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"eval_accuracy": 0.8754818812644565, |
|
"eval_loss": 0.4465731978416443, |
|
"eval_runtime": 39.9438, |
|
"eval_samples_per_second": 64.941, |
|
"eval_steps_per_second": 8.136, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 5.054773082942097e-05, |
|
"loss": 0.1804, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 4.976525821596245e-05, |
|
"loss": 0.1421, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 4.8982785602503914e-05, |
|
"loss": 0.0721, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 4.8200312989045386e-05, |
|
"loss": 0.0716, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 4.741784037558686e-05, |
|
"loss": 0.1382, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 4.663536776212833e-05, |
|
"loss": 0.103, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 4.58528951486698e-05, |
|
"loss": 0.0979, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 4.507042253521127e-05, |
|
"loss": 0.0934, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 4.428794992175274e-05, |
|
"loss": 0.0561, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 4.350547730829421e-05, |
|
"loss": 0.0985, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"eval_accuracy": 0.8866615265998458, |
|
"eval_loss": 0.4248804450035095, |
|
"eval_runtime": 39.986, |
|
"eval_samples_per_second": 64.873, |
|
"eval_steps_per_second": 8.128, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 4.2723004694835684e-05, |
|
"loss": 0.0915, |
|
"step": 2010 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 4.1940532081377156e-05, |
|
"loss": 0.1015, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 4.115805946791863e-05, |
|
"loss": 0.1064, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 4.037558685446009e-05, |
|
"loss": 0.0545, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 3.9593114241001565e-05, |
|
"loss": 0.0653, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 3.881064162754304e-05, |
|
"loss": 0.0589, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 3.802816901408451e-05, |
|
"loss": 0.073, |
|
"step": 2070 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 3.724569640062598e-05, |
|
"loss": 0.0783, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 3.646322378716745e-05, |
|
"loss": 0.0746, |
|
"step": 2090 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"learning_rate": 3.568075117370892e-05, |
|
"loss": 0.0955, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 3.29, |
|
"eval_accuracy": 0.8932151117964534, |
|
"eval_loss": 0.3977428078651428, |
|
"eval_runtime": 40.7031, |
|
"eval_samples_per_second": 63.73, |
|
"eval_steps_per_second": 7.985, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 3.489827856025039e-05, |
|
"loss": 0.0682, |
|
"step": 2110 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 3.411580594679186e-05, |
|
"loss": 0.05, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.0565, |
|
"step": 2130 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 3.255086071987481e-05, |
|
"loss": 0.1286, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 3.176838810641627e-05, |
|
"loss": 0.0797, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 3.0985915492957744e-05, |
|
"loss": 0.0585, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 3.0203442879499216e-05, |
|
"loss": 0.0914, |
|
"step": 2170 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 2.9420970266040692e-05, |
|
"loss": 0.1152, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 2.8638497652582164e-05, |
|
"loss": 0.035, |
|
"step": 2190 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 2.7856025039123636e-05, |
|
"loss": 0.0438, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"eval_accuracy": 0.9036237471087124, |
|
"eval_loss": 0.37847575545310974, |
|
"eval_runtime": 40.8463, |
|
"eval_samples_per_second": 63.506, |
|
"eval_steps_per_second": 7.957, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 2.7073552425665105e-05, |
|
"loss": 0.0348, |
|
"step": 2210 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 2.6291079812206577e-05, |
|
"loss": 0.0641, |
|
"step": 2220 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 2.5508607198748045e-05, |
|
"loss": 0.062, |
|
"step": 2230 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 2.4726134585289514e-05, |
|
"loss": 0.0845, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 2.3943661971830986e-05, |
|
"loss": 0.05, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 2.3161189358372458e-05, |
|
"loss": 0.0964, |
|
"step": 2260 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 2.237871674491393e-05, |
|
"loss": 0.0641, |
|
"step": 2270 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 2.1596244131455402e-05, |
|
"loss": 0.0484, |
|
"step": 2280 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 2.081377151799687e-05, |
|
"loss": 0.0393, |
|
"step": 2290 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 2.0031298904538343e-05, |
|
"loss": 0.0589, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"eval_accuracy": 0.9016962220508866, |
|
"eval_loss": 0.37170541286468506, |
|
"eval_runtime": 40.4294, |
|
"eval_samples_per_second": 64.161, |
|
"eval_steps_per_second": 8.039, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 1.9248826291079812e-05, |
|
"loss": 0.0749, |
|
"step": 2310 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 1.8466353677621284e-05, |
|
"loss": 0.0486, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 1.7683881064162756e-05, |
|
"loss": 0.0492, |
|
"step": 2330 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 1.6901408450704224e-05, |
|
"loss": 0.0822, |
|
"step": 2340 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 1.6118935837245697e-05, |
|
"loss": 0.0316, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 1.533646322378717e-05, |
|
"loss": 0.0388, |
|
"step": 2360 |
|
}, |
|
{ |
|
"epoch": 3.71, |
|
"learning_rate": 1.4553990610328639e-05, |
|
"loss": 0.033, |
|
"step": 2370 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 1.377151799687011e-05, |
|
"loss": 0.0351, |
|
"step": 2380 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 1.298904538341158e-05, |
|
"loss": 0.0724, |
|
"step": 2390 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 1.2206572769953052e-05, |
|
"loss": 0.0709, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"eval_accuracy": 0.905165767154973, |
|
"eval_loss": 0.3609260618686676, |
|
"eval_runtime": 40.4534, |
|
"eval_samples_per_second": 64.123, |
|
"eval_steps_per_second": 8.034, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 1.1424100156494522e-05, |
|
"loss": 0.0492, |
|
"step": 2410 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 1.0641627543035994e-05, |
|
"loss": 0.0414, |
|
"step": 2420 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 9.859154929577465e-06, |
|
"loss": 0.0646, |
|
"step": 2430 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 9.076682316118937e-06, |
|
"loss": 0.0489, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 8.294209702660407e-06, |
|
"loss": 0.0339, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 7.511737089201878e-06, |
|
"loss": 0.0662, |
|
"step": 2460 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 6.7292644757433494e-06, |
|
"loss": 0.0369, |
|
"step": 2470 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 5.94679186228482e-06, |
|
"loss": 0.1006, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 5.164319248826292e-06, |
|
"loss": 0.0295, |
|
"step": 2490 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 4.381846635367762e-06, |
|
"loss": 0.0706, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"eval_accuracy": 0.9059367771781033, |
|
"eval_loss": 0.35982879996299744, |
|
"eval_runtime": 40.5931, |
|
"eval_samples_per_second": 63.903, |
|
"eval_steps_per_second": 8.006, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 3.5993740219092334e-06, |
|
"loss": 0.0756, |
|
"step": 2510 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 2.8169014084507042e-06, |
|
"loss": 0.0523, |
|
"step": 2520 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 2.0344287949921754e-06, |
|
"loss": 0.0368, |
|
"step": 2530 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 1.2519561815336464e-06, |
|
"loss": 0.0417, |
|
"step": 2540 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 4.694835680751174e-07, |
|
"loss": 0.0598, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"step": 2556, |
|
"total_flos": 3.1689424899978854e+18, |
|
"train_loss": 0.9094190793184794, |
|
"train_runtime": 2613.3405, |
|
"train_samples_per_second": 15.628, |
|
"train_steps_per_second": 0.978 |
|
} |
|
], |
|
"max_steps": 2556, |
|
"num_train_epochs": 4, |
|
"total_flos": 3.1689424899978854e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|