|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 11.0, |
|
"global_step": 12914, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.9914821124361162e-05, |
|
"loss": 4.8491, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"eval_loss": 4.696099281311035, |
|
"eval_runtime": 907.3534, |
|
"eval_samples_per_second": 0.323, |
|
"eval_steps_per_second": 0.041, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.982964224872232e-05, |
|
"loss": 4.88, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"eval_loss": 4.71148681640625, |
|
"eval_runtime": 901.0601, |
|
"eval_samples_per_second": 0.325, |
|
"eval_steps_per_second": 0.041, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.9744463373083477e-05, |
|
"loss": 4.6409, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_loss": 4.671314239501953, |
|
"eval_runtime": 906.18, |
|
"eval_samples_per_second": 0.323, |
|
"eval_steps_per_second": 0.041, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.9659284497444635e-05, |
|
"loss": 4.7222, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"eval_loss": 4.637036323547363, |
|
"eval_runtime": 903.4161, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.9574105621805795e-05, |
|
"loss": 4.6357, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"eval_loss": 4.6376471519470215, |
|
"eval_runtime": 912.9746, |
|
"eval_samples_per_second": 0.321, |
|
"eval_steps_per_second": 0.041, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.9488926746166953e-05, |
|
"loss": 4.6993, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_loss": 4.662974834442139, |
|
"eval_runtime": 908.9893, |
|
"eval_samples_per_second": 0.322, |
|
"eval_steps_per_second": 0.041, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.940374787052811e-05, |
|
"loss": 4.7104, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"eval_loss": 4.587018966674805, |
|
"eval_runtime": 913.6781, |
|
"eval_samples_per_second": 0.321, |
|
"eval_steps_per_second": 0.04, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.9318568994889268e-05, |
|
"loss": 4.5864, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"eval_loss": 4.565457344055176, |
|
"eval_runtime": 900.1349, |
|
"eval_samples_per_second": 0.326, |
|
"eval_steps_per_second": 0.041, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.923339011925043e-05, |
|
"loss": 4.6003, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"eval_loss": 4.560632705688477, |
|
"eval_runtime": 913.1928, |
|
"eval_samples_per_second": 0.321, |
|
"eval_steps_per_second": 0.041, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.9148211243611586e-05, |
|
"loss": 4.6166, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"eval_loss": 4.5305914878845215, |
|
"eval_runtime": 898.0505, |
|
"eval_samples_per_second": 0.326, |
|
"eval_steps_per_second": 0.041, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.9063032367972743e-05, |
|
"loss": 4.6019, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"eval_loss": 4.547730922698975, |
|
"eval_runtime": 916.6743, |
|
"eval_samples_per_second": 0.32, |
|
"eval_steps_per_second": 0.04, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 1.89778534923339e-05, |
|
"loss": 4.4833, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"eval_loss": 4.527022838592529, |
|
"eval_runtime": 905.1447, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 1.889267461669506e-05, |
|
"loss": 4.4664, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"eval_loss": 4.533600807189941, |
|
"eval_runtime": 900.1154, |
|
"eval_samples_per_second": 0.326, |
|
"eval_steps_per_second": 0.041, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 1.880749574105622e-05, |
|
"loss": 4.4533, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"eval_loss": 4.512392044067383, |
|
"eval_runtime": 897.9913, |
|
"eval_samples_per_second": 0.326, |
|
"eval_steps_per_second": 0.041, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 1.872231686541738e-05, |
|
"loss": 4.2969, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"eval_loss": 4.5288238525390625, |
|
"eval_runtime": 902.8396, |
|
"eval_samples_per_second": 0.325, |
|
"eval_steps_per_second": 0.041, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 1.8637137989778534e-05, |
|
"loss": 4.4203, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"eval_loss": 4.510149002075195, |
|
"eval_runtime": 896.5776, |
|
"eval_samples_per_second": 0.327, |
|
"eval_steps_per_second": 0.041, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 1.8551959114139694e-05, |
|
"loss": 4.4828, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"eval_loss": 4.4747633934021, |
|
"eval_runtime": 900.0595, |
|
"eval_samples_per_second": 0.326, |
|
"eval_steps_per_second": 0.041, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 1.8466780238500855e-05, |
|
"loss": 4.5045, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"eval_loss": 4.477636337280273, |
|
"eval_runtime": 930.3322, |
|
"eval_samples_per_second": 0.315, |
|
"eval_steps_per_second": 0.04, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 1.8381601362862013e-05, |
|
"loss": 4.2977, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"eval_loss": 4.475375175476074, |
|
"eval_runtime": 897.0451, |
|
"eval_samples_per_second": 0.327, |
|
"eval_steps_per_second": 0.041, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 1.829642248722317e-05, |
|
"loss": 4.4022, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"eval_loss": 4.451622486114502, |
|
"eval_runtime": 924.228, |
|
"eval_samples_per_second": 0.317, |
|
"eval_steps_per_second": 0.04, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 1.8211243611584328e-05, |
|
"loss": 4.3429, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"eval_loss": 4.43123722076416, |
|
"eval_runtime": 903.4516, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 1.812606473594549e-05, |
|
"loss": 4.3748, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"eval_loss": 4.475191116333008, |
|
"eval_runtime": 901.3458, |
|
"eval_samples_per_second": 0.325, |
|
"eval_steps_per_second": 0.041, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 1.8040885860306646e-05, |
|
"loss": 4.343, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"eval_loss": 4.455069541931152, |
|
"eval_runtime": 919.5227, |
|
"eval_samples_per_second": 0.319, |
|
"eval_steps_per_second": 0.04, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 1.7955706984667803e-05, |
|
"loss": 4.1796, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"eval_loss": 4.424516201019287, |
|
"eval_runtime": 912.0297, |
|
"eval_samples_per_second": 0.321, |
|
"eval_steps_per_second": 0.041, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 1.787052810902896e-05, |
|
"loss": 4.2325, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"eval_loss": 4.425054550170898, |
|
"eval_runtime": 900.4656, |
|
"eval_samples_per_second": 0.325, |
|
"eval_steps_per_second": 0.041, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 1.778534923339012e-05, |
|
"loss": 4.2419, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"eval_loss": 4.426344871520996, |
|
"eval_runtime": 907.7136, |
|
"eval_samples_per_second": 0.323, |
|
"eval_steps_per_second": 0.041, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 1.770017035775128e-05, |
|
"loss": 4.2556, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"eval_loss": 4.392487525939941, |
|
"eval_runtime": 904.6208, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 1.761499148211244e-05, |
|
"loss": 4.2501, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"eval_loss": 4.414584636688232, |
|
"eval_runtime": 904.1877, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 1.7529812606473594e-05, |
|
"loss": 4.2199, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"eval_loss": 4.391025543212891, |
|
"eval_runtime": 904.8782, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 1.7444633730834754e-05, |
|
"loss": 4.1842, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"eval_loss": 4.3928704261779785, |
|
"eval_runtime": 911.7639, |
|
"eval_samples_per_second": 0.321, |
|
"eval_steps_per_second": 0.041, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 1.7359454855195912e-05, |
|
"loss": 4.1789, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"eval_loss": 4.373032569885254, |
|
"eval_runtime": 903.6693, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 1.7274275979557073e-05, |
|
"loss": 4.1993, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"eval_loss": 4.401707172393799, |
|
"eval_runtime": 906.6931, |
|
"eval_samples_per_second": 0.323, |
|
"eval_steps_per_second": 0.041, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 1.718909710391823e-05, |
|
"loss": 4.1348, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"eval_loss": 4.366961479187012, |
|
"eval_runtime": 909.5878, |
|
"eval_samples_per_second": 0.322, |
|
"eval_steps_per_second": 0.041, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 1.7103918228279387e-05, |
|
"loss": 4.196, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"eval_loss": 4.354998588562012, |
|
"eval_runtime": 914.5578, |
|
"eval_samples_per_second": 0.32, |
|
"eval_steps_per_second": 0.04, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 1.7018739352640548e-05, |
|
"loss": 4.1906, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"eval_loss": 4.349732875823975, |
|
"eval_runtime": 898.3438, |
|
"eval_samples_per_second": 0.326, |
|
"eval_steps_per_second": 0.041, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"learning_rate": 1.6933560477001706e-05, |
|
"loss": 4.0583, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 3.07, |
|
"eval_loss": 4.3627471923828125, |
|
"eval_runtime": 897.909, |
|
"eval_samples_per_second": 0.326, |
|
"eval_steps_per_second": 0.041, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"learning_rate": 1.6848381601362863e-05, |
|
"loss": 4.1298, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"eval_loss": 4.362400531768799, |
|
"eval_runtime": 912.2819, |
|
"eval_samples_per_second": 0.321, |
|
"eval_steps_per_second": 0.041, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 1.676320272572402e-05, |
|
"loss": 4.0244, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"eval_loss": 4.338736534118652, |
|
"eval_runtime": 896.0396, |
|
"eval_samples_per_second": 0.327, |
|
"eval_steps_per_second": 0.041, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 1.667802385008518e-05, |
|
"loss": 4.0549, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"eval_loss": 4.341971397399902, |
|
"eval_runtime": 901.737, |
|
"eval_samples_per_second": 0.325, |
|
"eval_steps_per_second": 0.041, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 1.659284497444634e-05, |
|
"loss": 4.0006, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"eval_loss": 4.326377868652344, |
|
"eval_runtime": 898.1346, |
|
"eval_samples_per_second": 0.326, |
|
"eval_steps_per_second": 0.041, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 1.65076660988075e-05, |
|
"loss": 4.0435, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"eval_loss": 4.331975936889648, |
|
"eval_runtime": 901.1995, |
|
"eval_samples_per_second": 0.325, |
|
"eval_steps_per_second": 0.041, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 1.6422487223168653e-05, |
|
"loss": 4.0948, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"eval_loss": 4.32388973236084, |
|
"eval_runtime": 904.6488, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"learning_rate": 1.6337308347529814e-05, |
|
"loss": 3.933, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 3.66, |
|
"eval_loss": 4.314174175262451, |
|
"eval_runtime": 902.3829, |
|
"eval_samples_per_second": 0.325, |
|
"eval_steps_per_second": 0.041, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 1.625212947189097e-05, |
|
"loss": 3.9773, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"eval_loss": 4.353682041168213, |
|
"eval_runtime": 901.0754, |
|
"eval_samples_per_second": 0.325, |
|
"eval_steps_per_second": 0.041, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 1.6166950596252132e-05, |
|
"loss": 4.1143, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"eval_loss": 4.3104963302612305, |
|
"eval_runtime": 903.7164, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 1.608177172061329e-05, |
|
"loss": 3.961, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"eval_loss": 4.292612552642822, |
|
"eval_runtime": 900.6329, |
|
"eval_samples_per_second": 0.325, |
|
"eval_steps_per_second": 0.041, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 1.5996592844974447e-05, |
|
"loss": 4.0047, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 4.3063883781433105, |
|
"eval_runtime": 900.2789, |
|
"eval_samples_per_second": 0.325, |
|
"eval_steps_per_second": 0.041, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 1.5911413969335605e-05, |
|
"loss": 3.9058, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"eval_loss": 4.303318023681641, |
|
"eval_runtime": 906.0598, |
|
"eval_samples_per_second": 0.323, |
|
"eval_steps_per_second": 0.041, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 1.5826235093696765e-05, |
|
"loss": 3.7936, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"eval_loss": 4.301506996154785, |
|
"eval_runtime": 906.1238, |
|
"eval_samples_per_second": 0.323, |
|
"eval_steps_per_second": 0.041, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 1.5741056218057923e-05, |
|
"loss": 3.8733, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"eval_loss": 4.344919681549072, |
|
"eval_runtime": 916.0111, |
|
"eval_samples_per_second": 0.32, |
|
"eval_steps_per_second": 0.04, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 1.565587734241908e-05, |
|
"loss": 3.8625, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"eval_loss": 4.274160385131836, |
|
"eval_runtime": 906.5215, |
|
"eval_samples_per_second": 0.323, |
|
"eval_steps_per_second": 0.041, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"learning_rate": 1.557069846678024e-05, |
|
"loss": 4.0015, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"eval_loss": 4.2957258224487305, |
|
"eval_runtime": 920.9763, |
|
"eval_samples_per_second": 0.318, |
|
"eval_steps_per_second": 0.04, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 1.54855195911414e-05, |
|
"loss": 4.0173, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"eval_loss": 4.30204439163208, |
|
"eval_runtime": 900.2302, |
|
"eval_samples_per_second": 0.325, |
|
"eval_steps_per_second": 0.041, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 1.5400340715502556e-05, |
|
"loss": 3.8079, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"eval_loss": 4.274052619934082, |
|
"eval_runtime": 902.0347, |
|
"eval_samples_per_second": 0.325, |
|
"eval_steps_per_second": 0.041, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 1.5315161839863713e-05, |
|
"loss": 4.0081, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"eval_loss": 4.2950968742370605, |
|
"eval_runtime": 910.5366, |
|
"eval_samples_per_second": 0.322, |
|
"eval_steps_per_second": 0.041, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 1.5229982964224874e-05, |
|
"loss": 3.8823, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"eval_loss": 4.277224540710449, |
|
"eval_runtime": 901.0907, |
|
"eval_samples_per_second": 0.325, |
|
"eval_steps_per_second": 0.041, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 1.5144804088586031e-05, |
|
"loss": 3.9056, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"eval_loss": 4.272303581237793, |
|
"eval_runtime": 911.2022, |
|
"eval_samples_per_second": 0.322, |
|
"eval_steps_per_second": 0.041, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 4.94, |
|
"learning_rate": 1.505962521294719e-05, |
|
"loss": 3.813, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 4.94, |
|
"eval_loss": 4.275490760803223, |
|
"eval_runtime": 913.2487, |
|
"eval_samples_per_second": 0.321, |
|
"eval_steps_per_second": 0.041, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"learning_rate": 1.4974446337308348e-05, |
|
"loss": 3.9263, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 5.03, |
|
"eval_loss": 4.2567267417907715, |
|
"eval_runtime": 922.3278, |
|
"eval_samples_per_second": 0.318, |
|
"eval_steps_per_second": 0.04, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 5.11, |
|
"learning_rate": 1.4889267461669507e-05, |
|
"loss": 3.7405, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 5.11, |
|
"eval_loss": 4.279594421386719, |
|
"eval_runtime": 902.112, |
|
"eval_samples_per_second": 0.325, |
|
"eval_steps_per_second": 0.041, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 5.2, |
|
"learning_rate": 1.4804088586030664e-05, |
|
"loss": 3.8432, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 5.2, |
|
"eval_loss": 4.277362823486328, |
|
"eval_runtime": 909.7644, |
|
"eval_samples_per_second": 0.322, |
|
"eval_steps_per_second": 0.041, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 5.28, |
|
"learning_rate": 1.4718909710391824e-05, |
|
"loss": 3.797, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 5.28, |
|
"eval_loss": 4.2573161125183105, |
|
"eval_runtime": 900.2164, |
|
"eval_samples_per_second": 0.325, |
|
"eval_steps_per_second": 0.041, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 5.37, |
|
"learning_rate": 1.4633730834752981e-05, |
|
"loss": 3.7348, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 5.37, |
|
"eval_loss": 4.261054515838623, |
|
"eval_runtime": 899.5894, |
|
"eval_samples_per_second": 0.326, |
|
"eval_steps_per_second": 0.041, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"learning_rate": 1.4548551959114142e-05, |
|
"loss": 3.7363, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"eval_loss": 4.2909698486328125, |
|
"eval_runtime": 914.0387, |
|
"eval_samples_per_second": 0.321, |
|
"eval_steps_per_second": 0.04, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 5.54, |
|
"learning_rate": 1.44633730834753e-05, |
|
"loss": 3.8448, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 5.54, |
|
"eval_loss": 4.257021903991699, |
|
"eval_runtime": 906.6331, |
|
"eval_samples_per_second": 0.323, |
|
"eval_steps_per_second": 0.041, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"learning_rate": 1.4378194207836458e-05, |
|
"loss": 3.8023, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"eval_loss": 4.263571262359619, |
|
"eval_runtime": 903.3527, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 5.71, |
|
"learning_rate": 1.4293015332197616e-05, |
|
"loss": 3.8202, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 5.71, |
|
"eval_loss": 4.226749897003174, |
|
"eval_runtime": 913.4737, |
|
"eval_samples_per_second": 0.321, |
|
"eval_steps_per_second": 0.041, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 5.79, |
|
"learning_rate": 1.4207836456558775e-05, |
|
"loss": 3.731, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 5.79, |
|
"eval_loss": 4.232919216156006, |
|
"eval_runtime": 905.4134, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 1.4122657580919934e-05, |
|
"loss": 3.7507, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"eval_loss": 4.2412004470825195, |
|
"eval_runtime": 917.4159, |
|
"eval_samples_per_second": 0.319, |
|
"eval_steps_per_second": 0.04, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 5.96, |
|
"learning_rate": 1.4037478705281091e-05, |
|
"loss": 3.8812, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 5.96, |
|
"eval_loss": 4.239901542663574, |
|
"eval_runtime": 904.0296, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"learning_rate": 1.395229982964225e-05, |
|
"loss": 3.6882, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 6.05, |
|
"eval_loss": 4.252227306365967, |
|
"eval_runtime": 909.685, |
|
"eval_samples_per_second": 0.322, |
|
"eval_steps_per_second": 0.041, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 6.13, |
|
"learning_rate": 1.3867120954003408e-05, |
|
"loss": 3.6485, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 6.13, |
|
"eval_loss": 4.240085601806641, |
|
"eval_runtime": 917.8845, |
|
"eval_samples_per_second": 0.319, |
|
"eval_steps_per_second": 0.04, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 6.22, |
|
"learning_rate": 1.3781942078364567e-05, |
|
"loss": 3.6561, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 6.22, |
|
"eval_loss": 4.261867046356201, |
|
"eval_runtime": 912.6991, |
|
"eval_samples_per_second": 0.321, |
|
"eval_steps_per_second": 0.041, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"learning_rate": 1.3696763202725724e-05, |
|
"loss": 3.6851, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"eval_loss": 4.274002552032471, |
|
"eval_runtime": 911.5799, |
|
"eval_samples_per_second": 0.321, |
|
"eval_steps_per_second": 0.041, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"learning_rate": 1.3611584327086883e-05, |
|
"loss": 3.7842, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"eval_loss": 4.243954658508301, |
|
"eval_runtime": 906.9325, |
|
"eval_samples_per_second": 0.323, |
|
"eval_steps_per_second": 0.041, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 6.47, |
|
"learning_rate": 1.352640545144804e-05, |
|
"loss": 3.6484, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 6.47, |
|
"eval_loss": 4.238761901855469, |
|
"eval_runtime": 910.5622, |
|
"eval_samples_per_second": 0.322, |
|
"eval_steps_per_second": 0.041, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 6.56, |
|
"learning_rate": 1.3441226575809202e-05, |
|
"loss": 3.7509, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 6.56, |
|
"eval_loss": 4.240095138549805, |
|
"eval_runtime": 935.7714, |
|
"eval_samples_per_second": 0.313, |
|
"eval_steps_per_second": 0.04, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 6.64, |
|
"learning_rate": 1.3356047700170357e-05, |
|
"loss": 3.7597, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 6.64, |
|
"eval_loss": 4.256958484649658, |
|
"eval_runtime": 915.2208, |
|
"eval_samples_per_second": 0.32, |
|
"eval_steps_per_second": 0.04, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 6.73, |
|
"learning_rate": 1.3270868824531518e-05, |
|
"loss": 3.7253, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 6.73, |
|
"eval_loss": 4.291894435882568, |
|
"eval_runtime": 915.8862, |
|
"eval_samples_per_second": 0.32, |
|
"eval_steps_per_second": 0.04, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 6.81, |
|
"learning_rate": 1.3185689948892676e-05, |
|
"loss": 3.7335, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 6.81, |
|
"eval_loss": 4.206986427307129, |
|
"eval_runtime": 916.8897, |
|
"eval_samples_per_second": 0.32, |
|
"eval_steps_per_second": 0.04, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 6.9, |
|
"learning_rate": 1.3100511073253835e-05, |
|
"loss": 3.6213, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 6.9, |
|
"eval_loss": 4.203628063201904, |
|
"eval_runtime": 910.5741, |
|
"eval_samples_per_second": 0.322, |
|
"eval_steps_per_second": 0.041, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 6.98, |
|
"learning_rate": 1.3015332197614992e-05, |
|
"loss": 3.588, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 6.98, |
|
"eval_loss": 4.219061851501465, |
|
"eval_runtime": 910.3103, |
|
"eval_samples_per_second": 0.322, |
|
"eval_steps_per_second": 0.041, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 7.07, |
|
"learning_rate": 1.2930153321976151e-05, |
|
"loss": 3.6381, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 7.07, |
|
"eval_loss": 4.232492923736572, |
|
"eval_runtime": 903.8691, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 7.16, |
|
"learning_rate": 1.284497444633731e-05, |
|
"loss": 3.6023, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 7.16, |
|
"eval_loss": 4.247186183929443, |
|
"eval_runtime": 910.7714, |
|
"eval_samples_per_second": 0.322, |
|
"eval_steps_per_second": 0.041, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 7.24, |
|
"learning_rate": 1.2759795570698468e-05, |
|
"loss": 3.5601, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 7.24, |
|
"eval_loss": 4.2428107261657715, |
|
"eval_runtime": 926.441, |
|
"eval_samples_per_second": 0.316, |
|
"eval_steps_per_second": 0.04, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 7.33, |
|
"learning_rate": 1.2674616695059627e-05, |
|
"loss": 3.6893, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 7.33, |
|
"eval_loss": 4.235583305358887, |
|
"eval_runtime": 905.7471, |
|
"eval_samples_per_second": 0.323, |
|
"eval_steps_per_second": 0.041, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 7.41, |
|
"learning_rate": 1.2589437819420784e-05, |
|
"loss": 3.4977, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 7.41, |
|
"eval_loss": 4.210697650909424, |
|
"eval_runtime": 909.4116, |
|
"eval_samples_per_second": 0.322, |
|
"eval_steps_per_second": 0.041, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 1.2504258943781943e-05, |
|
"loss": 3.5489, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"eval_loss": 4.2333455085754395, |
|
"eval_runtime": 899.4646, |
|
"eval_samples_per_second": 0.326, |
|
"eval_steps_per_second": 0.041, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 7.58, |
|
"learning_rate": 1.24190800681431e-05, |
|
"loss": 3.5786, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 7.58, |
|
"eval_loss": 4.2256317138671875, |
|
"eval_runtime": 907.6718, |
|
"eval_samples_per_second": 0.323, |
|
"eval_steps_per_second": 0.041, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 7.67, |
|
"learning_rate": 1.2333901192504261e-05, |
|
"loss": 3.5991, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 7.67, |
|
"eval_loss": 4.206646919250488, |
|
"eval_runtime": 928.2897, |
|
"eval_samples_per_second": 0.316, |
|
"eval_steps_per_second": 0.04, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 7.75, |
|
"learning_rate": 1.2248722316865417e-05, |
|
"loss": 3.6571, |
|
"step": 9100 |
|
}, |
|
{ |
|
"epoch": 7.75, |
|
"eval_loss": 4.213482856750488, |
|
"eval_runtime": 913.2222, |
|
"eval_samples_per_second": 0.321, |
|
"eval_steps_per_second": 0.041, |
|
"step": 9100 |
|
}, |
|
{ |
|
"epoch": 7.84, |
|
"learning_rate": 1.2163543441226578e-05, |
|
"loss": 3.5465, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 7.84, |
|
"eval_loss": 4.197275638580322, |
|
"eval_runtime": 906.8727, |
|
"eval_samples_per_second": 0.323, |
|
"eval_steps_per_second": 0.041, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 7.92, |
|
"learning_rate": 1.2078364565587735e-05, |
|
"loss": 3.6476, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 7.92, |
|
"eval_loss": 4.191103935241699, |
|
"eval_runtime": 910.3991, |
|
"eval_samples_per_second": 0.322, |
|
"eval_steps_per_second": 0.041, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 8.01, |
|
"learning_rate": 1.1993185689948894e-05, |
|
"loss": 3.6669, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 8.01, |
|
"eval_loss": 4.2001953125, |
|
"eval_runtime": 908.1025, |
|
"eval_samples_per_second": 0.323, |
|
"eval_steps_per_second": 0.041, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 8.09, |
|
"learning_rate": 1.1908006814310052e-05, |
|
"loss": 3.4399, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 8.09, |
|
"eval_loss": 4.230240345001221, |
|
"eval_runtime": 914.9264, |
|
"eval_samples_per_second": 0.32, |
|
"eval_steps_per_second": 0.04, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 8.18, |
|
"learning_rate": 1.1822827938671211e-05, |
|
"loss": 3.5381, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 8.18, |
|
"eval_loss": 4.2084150314331055, |
|
"eval_runtime": 928.1244, |
|
"eval_samples_per_second": 0.316, |
|
"eval_steps_per_second": 0.04, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 8.26, |
|
"learning_rate": 1.1737649063032368e-05, |
|
"loss": 3.5008, |
|
"step": 9700 |
|
}, |
|
{ |
|
"epoch": 8.26, |
|
"eval_loss": 4.216128826141357, |
|
"eval_runtime": 913.9443, |
|
"eval_samples_per_second": 0.321, |
|
"eval_steps_per_second": 0.04, |
|
"step": 9700 |
|
}, |
|
{ |
|
"epoch": 8.35, |
|
"learning_rate": 1.1652470187393527e-05, |
|
"loss": 3.6199, |
|
"step": 9800 |
|
}, |
|
{ |
|
"epoch": 8.35, |
|
"eval_loss": 4.218649387359619, |
|
"eval_runtime": 916.6151, |
|
"eval_samples_per_second": 0.32, |
|
"eval_steps_per_second": 0.04, |
|
"step": 9800 |
|
}, |
|
{ |
|
"epoch": 8.43, |
|
"learning_rate": 1.1567291311754685e-05, |
|
"loss": 3.5997, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 8.43, |
|
"eval_loss": 4.269123554229736, |
|
"eval_runtime": 902.9645, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 8.52, |
|
"learning_rate": 1.1482112436115844e-05, |
|
"loss": 3.5075, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 8.52, |
|
"eval_loss": 4.2026848793029785, |
|
"eval_runtime": 903.7139, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 8.6, |
|
"learning_rate": 1.1396933560477003e-05, |
|
"loss": 3.5163, |
|
"step": 10100 |
|
}, |
|
{ |
|
"epoch": 8.6, |
|
"eval_loss": 4.2565717697143555, |
|
"eval_runtime": 909.1873, |
|
"eval_samples_per_second": 0.322, |
|
"eval_steps_per_second": 0.041, |
|
"step": 10100 |
|
}, |
|
{ |
|
"epoch": 8.69, |
|
"learning_rate": 1.131175468483816e-05, |
|
"loss": 3.4902, |
|
"step": 10200 |
|
}, |
|
{ |
|
"epoch": 8.69, |
|
"eval_loss": 4.208179473876953, |
|
"eval_runtime": 901.8843, |
|
"eval_samples_per_second": 0.325, |
|
"eval_steps_per_second": 0.041, |
|
"step": 10200 |
|
}, |
|
{ |
|
"epoch": 8.77, |
|
"learning_rate": 1.1226575809199321e-05, |
|
"loss": 3.4829, |
|
"step": 10300 |
|
}, |
|
{ |
|
"epoch": 8.77, |
|
"eval_loss": 4.217052459716797, |
|
"eval_runtime": 913.5755, |
|
"eval_samples_per_second": 0.321, |
|
"eval_steps_per_second": 0.041, |
|
"step": 10300 |
|
}, |
|
{ |
|
"epoch": 8.86, |
|
"learning_rate": 1.1141396933560477e-05, |
|
"loss": 3.599, |
|
"step": 10400 |
|
}, |
|
{ |
|
"epoch": 8.86, |
|
"eval_loss": 4.216422080993652, |
|
"eval_runtime": 904.6206, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 10400 |
|
}, |
|
{ |
|
"epoch": 8.94, |
|
"learning_rate": 1.1056218057921638e-05, |
|
"loss": 3.5058, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 8.94, |
|
"eval_loss": 4.200262069702148, |
|
"eval_runtime": 908.4763, |
|
"eval_samples_per_second": 0.323, |
|
"eval_steps_per_second": 0.041, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 9.03, |
|
"learning_rate": 1.0971039182282795e-05, |
|
"loss": 3.4622, |
|
"step": 10600 |
|
}, |
|
{ |
|
"epoch": 9.03, |
|
"eval_loss": 4.201136112213135, |
|
"eval_runtime": 918.5088, |
|
"eval_samples_per_second": 0.319, |
|
"eval_steps_per_second": 0.04, |
|
"step": 10600 |
|
}, |
|
{ |
|
"epoch": 9.11, |
|
"learning_rate": 1.0885860306643954e-05, |
|
"loss": 3.4836, |
|
"step": 10700 |
|
}, |
|
{ |
|
"epoch": 9.11, |
|
"eval_loss": 4.196112632751465, |
|
"eval_runtime": 907.3086, |
|
"eval_samples_per_second": 0.323, |
|
"eval_steps_per_second": 0.041, |
|
"step": 10700 |
|
}, |
|
{ |
|
"epoch": 9.2, |
|
"learning_rate": 1.0800681431005112e-05, |
|
"loss": 3.4177, |
|
"step": 10800 |
|
}, |
|
{ |
|
"epoch": 9.2, |
|
"eval_loss": 4.2310028076171875, |
|
"eval_runtime": 904.8795, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 10800 |
|
}, |
|
{ |
|
"epoch": 9.28, |
|
"learning_rate": 1.071550255536627e-05, |
|
"loss": 3.4407, |
|
"step": 10900 |
|
}, |
|
{ |
|
"epoch": 9.28, |
|
"eval_loss": 4.2216362953186035, |
|
"eval_runtime": 910.613, |
|
"eval_samples_per_second": 0.322, |
|
"eval_steps_per_second": 0.041, |
|
"step": 10900 |
|
}, |
|
{ |
|
"epoch": 9.37, |
|
"learning_rate": 1.0630323679727428e-05, |
|
"loss": 3.44, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 9.37, |
|
"eval_loss": 4.2364935874938965, |
|
"eval_runtime": 907.4387, |
|
"eval_samples_per_second": 0.323, |
|
"eval_steps_per_second": 0.041, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 9.45, |
|
"learning_rate": 1.0545144804088587e-05, |
|
"loss": 3.5116, |
|
"step": 11100 |
|
}, |
|
{ |
|
"epoch": 9.45, |
|
"eval_loss": 4.201255798339844, |
|
"eval_runtime": 905.3716, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 11100 |
|
}, |
|
{ |
|
"epoch": 9.54, |
|
"learning_rate": 1.0459965928449745e-05, |
|
"loss": 3.4793, |
|
"step": 11200 |
|
}, |
|
{ |
|
"epoch": 9.54, |
|
"eval_loss": 4.203760147094727, |
|
"eval_runtime": 903.5367, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 11200 |
|
}, |
|
{ |
|
"epoch": 9.63, |
|
"learning_rate": 1.0374787052810904e-05, |
|
"loss": 3.4414, |
|
"step": 11300 |
|
}, |
|
{ |
|
"epoch": 9.63, |
|
"eval_loss": 4.177506923675537, |
|
"eval_runtime": 904.4055, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 11300 |
|
}, |
|
{ |
|
"epoch": 9.71, |
|
"learning_rate": 1.0289608177172061e-05, |
|
"loss": 3.509, |
|
"step": 11400 |
|
}, |
|
{ |
|
"epoch": 9.71, |
|
"eval_loss": 4.204129695892334, |
|
"eval_runtime": 904.4565, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 11400 |
|
}, |
|
{ |
|
"epoch": 9.8, |
|
"learning_rate": 1.020442930153322e-05, |
|
"loss": 3.464, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 9.8, |
|
"eval_loss": 4.2182393074035645, |
|
"eval_runtime": 915.8218, |
|
"eval_samples_per_second": 0.32, |
|
"eval_steps_per_second": 0.04, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 9.88, |
|
"learning_rate": 1.0119250425894378e-05, |
|
"loss": 3.3574, |
|
"step": 11600 |
|
}, |
|
{ |
|
"epoch": 9.88, |
|
"eval_loss": 4.222841262817383, |
|
"eval_runtime": 916.715, |
|
"eval_samples_per_second": 0.32, |
|
"eval_steps_per_second": 0.04, |
|
"step": 11600 |
|
}, |
|
{ |
|
"epoch": 9.97, |
|
"learning_rate": 1.0034071550255537e-05, |
|
"loss": 3.4134, |
|
"step": 11700 |
|
}, |
|
{ |
|
"epoch": 9.97, |
|
"eval_loss": 4.198949813842773, |
|
"eval_runtime": 911.1975, |
|
"eval_samples_per_second": 0.322, |
|
"eval_steps_per_second": 0.041, |
|
"step": 11700 |
|
}, |
|
{ |
|
"epoch": 10.05, |
|
"learning_rate": 9.948892674616696e-06, |
|
"loss": 3.4876, |
|
"step": 11800 |
|
}, |
|
{ |
|
"epoch": 10.05, |
|
"eval_loss": 4.1779093742370605, |
|
"eval_runtime": 905.3949, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 11800 |
|
}, |
|
{ |
|
"epoch": 10.14, |
|
"learning_rate": 9.863713798977853e-06, |
|
"loss": 3.3188, |
|
"step": 11900 |
|
}, |
|
{ |
|
"epoch": 10.14, |
|
"eval_loss": 4.199155330657959, |
|
"eval_runtime": 906.4403, |
|
"eval_samples_per_second": 0.323, |
|
"eval_steps_per_second": 0.041, |
|
"step": 11900 |
|
}, |
|
{ |
|
"epoch": 10.22, |
|
"learning_rate": 9.778534923339012e-06, |
|
"loss": 3.4336, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 10.22, |
|
"eval_loss": 4.215114593505859, |
|
"eval_runtime": 918.8261, |
|
"eval_samples_per_second": 0.319, |
|
"eval_steps_per_second": 0.04, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 10.31, |
|
"learning_rate": 9.693356047700172e-06, |
|
"loss": 3.3774, |
|
"step": 12100 |
|
}, |
|
{ |
|
"epoch": 10.31, |
|
"eval_loss": 4.194676876068115, |
|
"eval_runtime": 912.8562, |
|
"eval_samples_per_second": 0.321, |
|
"eval_steps_per_second": 0.041, |
|
"step": 12100 |
|
}, |
|
{ |
|
"epoch": 10.39, |
|
"learning_rate": 9.608177172061329e-06, |
|
"loss": 3.2942, |
|
"step": 12200 |
|
}, |
|
{ |
|
"epoch": 10.39, |
|
"eval_loss": 4.212912082672119, |
|
"eval_runtime": 905.2405, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 12200 |
|
}, |
|
{ |
|
"epoch": 10.48, |
|
"learning_rate": 9.522998296422488e-06, |
|
"loss": 3.4431, |
|
"step": 12300 |
|
}, |
|
{ |
|
"epoch": 10.48, |
|
"eval_loss": 4.1929144859313965, |
|
"eval_runtime": 908.9828, |
|
"eval_samples_per_second": 0.322, |
|
"eval_steps_per_second": 0.041, |
|
"step": 12300 |
|
}, |
|
{ |
|
"epoch": 10.56, |
|
"learning_rate": 9.437819420783645e-06, |
|
"loss": 3.3895, |
|
"step": 12400 |
|
}, |
|
{ |
|
"epoch": 10.56, |
|
"eval_loss": 4.221463203430176, |
|
"eval_runtime": 905.8419, |
|
"eval_samples_per_second": 0.323, |
|
"eval_steps_per_second": 0.041, |
|
"step": 12400 |
|
}, |
|
{ |
|
"epoch": 10.65, |
|
"learning_rate": 9.352640545144805e-06, |
|
"loss": 3.4624, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 10.65, |
|
"eval_loss": 4.192135334014893, |
|
"eval_runtime": 915.7075, |
|
"eval_samples_per_second": 0.32, |
|
"eval_steps_per_second": 0.04, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 10.73, |
|
"learning_rate": 9.267461669505964e-06, |
|
"loss": 3.3823, |
|
"step": 12600 |
|
}, |
|
{ |
|
"epoch": 10.73, |
|
"eval_loss": 4.197402477264404, |
|
"eval_runtime": 902.8153, |
|
"eval_samples_per_second": 0.325, |
|
"eval_steps_per_second": 0.041, |
|
"step": 12600 |
|
}, |
|
{ |
|
"epoch": 10.82, |
|
"learning_rate": 9.182282793867123e-06, |
|
"loss": 3.3671, |
|
"step": 12700 |
|
}, |
|
{ |
|
"epoch": 10.82, |
|
"eval_loss": 4.152112007141113, |
|
"eval_runtime": 903.9495, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 12700 |
|
}, |
|
{ |
|
"epoch": 10.9, |
|
"learning_rate": 9.09710391822828e-06, |
|
"loss": 3.2883, |
|
"step": 12800 |
|
}, |
|
{ |
|
"epoch": 10.9, |
|
"eval_loss": 4.178409576416016, |
|
"eval_runtime": 904.0493, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 12800 |
|
}, |
|
{ |
|
"epoch": 10.99, |
|
"learning_rate": 9.01192504258944e-06, |
|
"loss": 3.4145, |
|
"step": 12900 |
|
}, |
|
{ |
|
"epoch": 10.99, |
|
"eval_loss": 4.208373546600342, |
|
"eval_runtime": 903.9191, |
|
"eval_samples_per_second": 0.324, |
|
"eval_steps_per_second": 0.041, |
|
"step": 12900 |
|
} |
|
], |
|
"max_steps": 23480, |
|
"num_train_epochs": 20, |
|
"total_flos": 4.542855419394294e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|