|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9999667608442746, |
|
"eval_steps": 500, |
|
"global_step": 7521, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.6993074923464342e-05, |
|
"loss": 0.7944, |
|
"r_loss": 0.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.9549282408750108e-05, |
|
"loss": 0.8137, |
|
"r_loss": 0.0, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.9799962988348183e-05, |
|
"loss": 0.8198, |
|
"r_loss": 0.0, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.952593968471556e-05, |
|
"loss": 0.8145, |
|
"r_loss": 0.0, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.9251916381082937e-05, |
|
"loss": 0.811, |
|
"r_loss": 0.0, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.8977893077450312e-05, |
|
"loss": 0.8089, |
|
"r_loss": 0.0, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.8703869773817683e-05, |
|
"loss": 0.8054, |
|
"r_loss": 0.0, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.842984647018506e-05, |
|
"loss": 0.8005, |
|
"r_loss": 0.0, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.8155823166552433e-05, |
|
"loss": 0.7925, |
|
"r_loss": 0.0, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.788179986291981e-05, |
|
"loss": 0.7915, |
|
"r_loss": 0.0, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.7607776559287187e-05, |
|
"loss": 0.7807, |
|
"r_loss": 0.0, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.733375325565456e-05, |
|
"loss": 0.7767, |
|
"r_loss": 0.0, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.7059729952021934e-05, |
|
"loss": 0.771, |
|
"r_loss": 0.0, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.678570664838931e-05, |
|
"loss": 0.7706, |
|
"r_loss": 0.0, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.6511683344756684e-05, |
|
"loss": 0.7632, |
|
"r_loss": 0.0, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.623766004112406e-05, |
|
"loss": 0.7611, |
|
"r_loss": 0.0, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.5963636737491434e-05, |
|
"loss": 0.7516, |
|
"r_loss": 0.0, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.568961343385881e-05, |
|
"loss": 0.7506, |
|
"r_loss": 0.0, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.5415590130226184e-05, |
|
"loss": 0.7481, |
|
"r_loss": 0.0, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.5141566826593557e-05, |
|
"loss": 0.7518, |
|
"r_loss": 0.0, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.4867543522960932e-05, |
|
"loss": 0.7441, |
|
"r_loss": 0.0, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.4593520219328307e-05, |
|
"loss": 0.7415, |
|
"r_loss": 0.0, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.431949691569568e-05, |
|
"loss": 0.7405, |
|
"r_loss": 0.0, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.4045473612063056e-05, |
|
"loss": 0.7319, |
|
"r_loss": 0.0, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.3771450308430432e-05, |
|
"loss": 0.7332, |
|
"r_loss": 0.0, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.3497427004797807e-05, |
|
"loss": 0.7263, |
|
"r_loss": 0.0, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.3223403701165183e-05, |
|
"loss": 0.7231, |
|
"r_loss": 0.0, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.2949380397532556e-05, |
|
"loss": 0.7196, |
|
"r_loss": 0.0, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.2675357093899931e-05, |
|
"loss": 0.7188, |
|
"r_loss": 0.0, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.2401333790267306e-05, |
|
"loss": 0.7116, |
|
"r_loss": 0.0, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.2127310486634681e-05, |
|
"loss": 0.7105, |
|
"r_loss": 0.0, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.1853287183002058e-05, |
|
"loss": 0.7084, |
|
"r_loss": 0.0, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.1579263879369431e-05, |
|
"loss": 0.7047, |
|
"r_loss": 0.0, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 1.1305240575736806e-05, |
|
"loss": 0.7025, |
|
"r_loss": 0.0, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.1031217272104181e-05, |
|
"loss": 0.6981, |
|
"r_loss": 0.0, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.0757193968471556e-05, |
|
"loss": 0.6983, |
|
"r_loss": 0.0, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.048317066483893e-05, |
|
"loss": 0.6922, |
|
"r_loss": 0.0, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.0209147361206305e-05, |
|
"loss": 0.6925, |
|
"r_loss": 0.0, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.93512405757368e-06, |
|
"loss": 0.696, |
|
"r_loss": 0.0, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 9.661100753941055e-06, |
|
"loss": 0.6857, |
|
"r_loss": 0.0, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 9.387077450308431e-06, |
|
"loss": 0.6823, |
|
"r_loss": 0.0, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 9.113054146675805e-06, |
|
"loss": 0.6834, |
|
"r_loss": 0.0, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 8.83903084304318e-06, |
|
"loss": 0.6789, |
|
"r_loss": 0.0, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 8.565007539410555e-06, |
|
"loss": 0.6775, |
|
"r_loss": 0.0, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 8.29098423577793e-06, |
|
"loss": 0.678, |
|
"r_loss": 0.0, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 8.016960932145305e-06, |
|
"loss": 0.6696, |
|
"r_loss": 0.0, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 7.74293762851268e-06, |
|
"loss": 0.6698, |
|
"r_loss": 0.0, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 7.468914324880055e-06, |
|
"loss": 0.6671, |
|
"r_loss": 0.0, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 7.19489102124743e-06, |
|
"loss": 0.6615, |
|
"r_loss": 0.0, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 6.920867717614804e-06, |
|
"loss": 0.6569, |
|
"r_loss": 0.0, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 6.64684441398218e-06, |
|
"loss": 0.659, |
|
"r_loss": 0.0, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 6.372821110349554e-06, |
|
"loss": 0.6586, |
|
"r_loss": 0.0, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 6.098797806716929e-06, |
|
"loss": 0.654, |
|
"r_loss": 0.0, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 5.8247745030843036e-06, |
|
"loss": 0.6541, |
|
"r_loss": 0.0, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 5.5507511994516794e-06, |
|
"loss": 0.6445, |
|
"r_loss": 0.0, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 5.2767278958190545e-06, |
|
"loss": 0.6507, |
|
"r_loss": 0.0, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 5.002704592186429e-06, |
|
"loss": 0.6492, |
|
"r_loss": 0.0, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.728681288553804e-06, |
|
"loss": 0.6435, |
|
"r_loss": 0.0, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 4.454657984921179e-06, |
|
"loss": 0.6453, |
|
"r_loss": 0.0, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.180634681288554e-06, |
|
"loss": 0.6363, |
|
"r_loss": 0.0, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 3.906611377655929e-06, |
|
"loss": 0.6379, |
|
"r_loss": 0.0, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 3.6325880740233035e-06, |
|
"loss": 0.635, |
|
"r_loss": 0.0, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.3585647703906785e-06, |
|
"loss": 0.6318, |
|
"r_loss": 0.0, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 3.084541466758053e-06, |
|
"loss": 0.6287, |
|
"r_loss": 0.0, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 2.810518163125428e-06, |
|
"loss": 0.6301, |
|
"r_loss": 0.0, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.5364948594928036e-06, |
|
"loss": 0.6298, |
|
"r_loss": 0.0, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 2.2624715558601782e-06, |
|
"loss": 0.6285, |
|
"r_loss": 0.0, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 1.988448252227553e-06, |
|
"loss": 0.6256, |
|
"r_loss": 0.0, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 1.714424948594928e-06, |
|
"loss": 0.627, |
|
"r_loss": 0.0, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 1.440401644962303e-06, |
|
"loss": 0.6246, |
|
"r_loss": 0.0, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 1.1663783413296778e-06, |
|
"loss": 0.6197, |
|
"r_loss": 0.0, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 8.923550376970528e-07, |
|
"loss": 0.6222, |
|
"r_loss": 0.0, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 6.183317340644278e-07, |
|
"loss": 0.6236, |
|
"r_loss": 0.0, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.443084304318026e-07, |
|
"loss": 0.6166, |
|
"r_loss": 0.0, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 7.028512679917751e-08, |
|
"loss": 0.6157, |
|
"r_loss": 0.0, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 7521, |
|
"total_flos": 2.2194896440188207e+19, |
|
"train_loss": 0.7007184202977593, |
|
"train_runtime": 59240.1132, |
|
"train_samples_per_second": 16.251, |
|
"train_steps_per_second": 0.127 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 7521, |
|
"num_train_epochs": 1, |
|
"save_steps": 1000, |
|
"total_flos": 2.2194896440188207e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|