|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 12.0, |
|
"global_step": 10452, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 4.760811327975507e-05, |
|
"loss": 2.2326, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 2.150918960571289, |
|
"eval_runtime": 10.3767, |
|
"eval_samples_per_second": 282.652, |
|
"eval_steps_per_second": 35.368, |
|
"step": 871 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 4.521622655951014e-05, |
|
"loss": 2.1995, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 4.282433983926522e-05, |
|
"loss": 2.1375, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 2.1088852882385254, |
|
"eval_runtime": 10.4009, |
|
"eval_samples_per_second": 281.994, |
|
"eval_steps_per_second": 35.285, |
|
"step": 1742 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 4.043245311902029e-05, |
|
"loss": 2.0858, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 3.8040566398775356e-05, |
|
"loss": 2.0442, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 2.0654592514038086, |
|
"eval_runtime": 10.4234, |
|
"eval_samples_per_second": 281.386, |
|
"eval_steps_per_second": 35.209, |
|
"step": 2613 |
|
}, |
|
{ |
|
"epoch": 3.44, |
|
"learning_rate": 3.5648679678530425e-05, |
|
"loss": 2.0116, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 2.0432863235473633, |
|
"eval_runtime": 10.4039, |
|
"eval_samples_per_second": 281.913, |
|
"eval_steps_per_second": 35.275, |
|
"step": 3484 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 3.32567929582855e-05, |
|
"loss": 2.0013, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 3.086490623804057e-05, |
|
"loss": 1.9346, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 2.0133845806121826, |
|
"eval_runtime": 10.3456, |
|
"eval_samples_per_second": 283.503, |
|
"eval_steps_per_second": 35.474, |
|
"step": 4355 |
|
}, |
|
{ |
|
"epoch": 5.17, |
|
"learning_rate": 2.847301951779564e-05, |
|
"loss": 1.9208, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 5.74, |
|
"learning_rate": 2.608113279755071e-05, |
|
"loss": 1.9056, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 1.9956320524215698, |
|
"eval_runtime": 10.2631, |
|
"eval_samples_per_second": 285.782, |
|
"eval_steps_per_second": 35.759, |
|
"step": 5226 |
|
}, |
|
{ |
|
"epoch": 6.31, |
|
"learning_rate": 2.368924607730578e-05, |
|
"loss": 1.866, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 6.89, |
|
"learning_rate": 2.1297359357060852e-05, |
|
"loss": 1.8295, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 2.028700828552246, |
|
"eval_runtime": 10.2135, |
|
"eval_samples_per_second": 287.168, |
|
"eval_steps_per_second": 35.933, |
|
"step": 6097 |
|
}, |
|
{ |
|
"epoch": 7.46, |
|
"learning_rate": 1.890547263681592e-05, |
|
"loss": 1.8204, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 2.0173206329345703, |
|
"eval_runtime": 10.2204, |
|
"eval_samples_per_second": 286.974, |
|
"eval_steps_per_second": 35.909, |
|
"step": 6968 |
|
}, |
|
{ |
|
"epoch": 8.04, |
|
"learning_rate": 1.651358591657099e-05, |
|
"loss": 1.7944, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 8.61, |
|
"learning_rate": 1.4121699196326064e-05, |
|
"loss": 1.7928, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 2.0250914096832275, |
|
"eval_runtime": 10.2198, |
|
"eval_samples_per_second": 286.991, |
|
"eval_steps_per_second": 35.911, |
|
"step": 7839 |
|
}, |
|
{ |
|
"epoch": 9.18, |
|
"learning_rate": 1.1729812476081135e-05, |
|
"loss": 1.7555, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 9.76, |
|
"learning_rate": 9.337925755836204e-06, |
|
"loss": 1.7357, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 2.014782667160034, |
|
"eval_runtime": 10.2205, |
|
"eval_samples_per_second": 286.972, |
|
"eval_steps_per_second": 35.908, |
|
"step": 8710 |
|
}, |
|
{ |
|
"epoch": 10.33, |
|
"learning_rate": 6.946039035591274e-06, |
|
"loss": 1.7191, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 10.91, |
|
"learning_rate": 4.554152315346345e-06, |
|
"loss": 1.7318, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_loss": 1.9273912906646729, |
|
"eval_runtime": 10.2143, |
|
"eval_samples_per_second": 287.148, |
|
"eval_steps_per_second": 35.93, |
|
"step": 9581 |
|
}, |
|
{ |
|
"epoch": 11.48, |
|
"learning_rate": 2.1622655951014164e-06, |
|
"loss": 1.7311, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_loss": 1.931414246559143, |
|
"eval_runtime": 10.2191, |
|
"eval_samples_per_second": 287.011, |
|
"eval_steps_per_second": 35.913, |
|
"step": 10452 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"step": 10452, |
|
"total_flos": 4.400971970398618e+16, |
|
"train_loss": 1.9038402734019955, |
|
"train_runtime": 9444.1472, |
|
"train_samples_per_second": 70.82, |
|
"train_steps_per_second": 1.107 |
|
} |
|
], |
|
"max_steps": 10452, |
|
"num_train_epochs": 12, |
|
"total_flos": 4.400971970398618e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|