|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.0, |
|
"global_step": 19510, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 4.8718605843157357e-05, |
|
"loss": 0.5024, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.743721168631472e-05, |
|
"loss": 0.3004, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.6155817529472065e-05, |
|
"loss": 0.2771, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.487442337262942e-05, |
|
"loss": 0.2631, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.359302921578678e-05, |
|
"loss": 0.2525, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.2311635058944134e-05, |
|
"loss": 0.246, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.103024090210149e-05, |
|
"loss": 0.2379, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 3.974884674525884e-05, |
|
"loss": 0.2329, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 3.8467452588416197e-05, |
|
"loss": 0.2266, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 3.718605843157356e-05, |
|
"loss": 0.224, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 3.590466427473091e-05, |
|
"loss": 0.219, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 3.462327011788826e-05, |
|
"loss": 0.2148, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 3.334187596104562e-05, |
|
"loss": 0.2128, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 3.2060481804202974e-05, |
|
"loss": 0.2118, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 3.077908764736033e-05, |
|
"loss": 0.2088, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 2.9497693490517686e-05, |
|
"loss": 0.2042, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 2.821629933367504e-05, |
|
"loss": 0.2031, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 2.693490517683239e-05, |
|
"loss": 0.2031, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 2.565351101998975e-05, |
|
"loss": 0.1993, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_bleu": 0.0, |
|
"eval_gen_len": 19.0, |
|
"eval_loss": 0.17119529843330383, |
|
"eval_runtime": 1684.1894, |
|
"eval_samples_per_second": 46.336, |
|
"eval_steps_per_second": 0.724, |
|
"step": 9755 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 2.4372116863147106e-05, |
|
"loss": 0.1977, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 2.309072270630446e-05, |
|
"loss": 0.1966, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 2.1809328549461817e-05, |
|
"loss": 0.1957, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 2.052793439261917e-05, |
|
"loss": 0.1938, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 1.9246540235776526e-05, |
|
"loss": 0.1927, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 1.796514607893388e-05, |
|
"loss": 0.1926, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 1.6683751922091237e-05, |
|
"loss": 0.1908, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 1.540235776524859e-05, |
|
"loss": 0.1901, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 1.4120963608405946e-05, |
|
"loss": 0.1892, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 1.2839569451563302e-05, |
|
"loss": 0.1886, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 1.1558175294720656e-05, |
|
"loss": 0.1893, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 1.0276781137878012e-05, |
|
"loss": 0.1872, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 8.995386981035367e-06, |
|
"loss": 0.1879, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 7.713992824192722e-06, |
|
"loss": 0.1862, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 6.432598667350077e-06, |
|
"loss": 0.1862, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 5.151204510507432e-06, |
|
"loss": 0.1861, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 3.869810353664787e-06, |
|
"loss": 0.185, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 2.588416196822143e-06, |
|
"loss": 0.1848, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 1.3070220399794978e-06, |
|
"loss": 0.186, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 2.5627883136852895e-08, |
|
"loss": 0.1863, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_bleu": 0.0, |
|
"eval_gen_len": 19.0, |
|
"eval_loss": 0.163666233420372, |
|
"eval_runtime": 1684.6721, |
|
"eval_samples_per_second": 46.323, |
|
"eval_steps_per_second": 0.724, |
|
"step": 19510 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"step": 19510, |
|
"total_flos": 3.801797311667896e+17, |
|
"train_loss": 0.21620578862409723, |
|
"train_runtime": 45769.0844, |
|
"train_samples_per_second": 27.281, |
|
"train_steps_per_second": 0.426 |
|
} |
|
], |
|
"max_steps": 19510, |
|
"num_train_epochs": 2, |
|
"total_flos": 3.801797311667896e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|