|
{ |
|
"best_metric": 2.608954906463623, |
|
"best_model_checkpoint": "./models/final_bart/checkpoint-2000", |
|
"epoch": 5.0, |
|
"global_step": 3310, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 9.063444108761328e-06, |
|
"loss": 5.5998, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.8126888217522657e-05, |
|
"loss": 2.7748, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 2.719033232628399e-05, |
|
"loss": 2.5362, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 2.9305135951661632e-05, |
|
"loss": 2.4214, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 2.8298086606243708e-05, |
|
"loss": 2.3583, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 2.729103726082578e-05, |
|
"loss": 2.3157, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 2.6283987915407858e-05, |
|
"loss": 2.2639, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 2.527693856998993e-05, |
|
"loss": 2.1867, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 2.4269889224572005e-05, |
|
"loss": 2.155, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 2.3262839879154077e-05, |
|
"loss": 2.1622, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"eval_bleu1": 29.4018, |
|
"eval_bleu2": 17.2004, |
|
"eval_bleu3": 10.3744, |
|
"eval_bleu4": 6.052, |
|
"eval_gen_len": 49.4266, |
|
"eval_loss": 2.6686596870422363, |
|
"eval_rdass": 0.6378999948501587, |
|
"eval_rouge1": 35.4366, |
|
"eval_rouge2": 12.8631, |
|
"eval_rougeL": 23.1588, |
|
"eval_runtime": 119.7752, |
|
"eval_samples_per_second": 3.582, |
|
"eval_steps_per_second": 0.058, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 2.2255790533736152e-05, |
|
"loss": 2.1548, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 2.124874118831823e-05, |
|
"loss": 2.1442, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 2.0241691842900302e-05, |
|
"loss": 2.1273, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 1.9234642497482378e-05, |
|
"loss": 2.0555, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 1.822759315206445e-05, |
|
"loss": 2.0243, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 1.7220543806646525e-05, |
|
"loss": 2.0054, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 1.6213494461228603e-05, |
|
"loss": 2.0066, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 1.5206445115810675e-05, |
|
"loss": 2.009, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 1.419939577039275e-05, |
|
"loss": 2.0079, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 1.3192346424974824e-05, |
|
"loss": 2.0114, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"eval_bleu1": 28.8917, |
|
"eval_bleu2": 17.0965, |
|
"eval_bleu3": 10.1873, |
|
"eval_bleu4": 5.896, |
|
"eval_gen_len": 46.1096, |
|
"eval_loss": 2.608954906463623, |
|
"eval_rdass": 0.6388999819755554, |
|
"eval_rouge1": 35.1436, |
|
"eval_rouge2": 13.0347, |
|
"eval_rougeL": 23.4682, |
|
"eval_runtime": 105.5108, |
|
"eval_samples_per_second": 4.066, |
|
"eval_steps_per_second": 0.066, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 1.2185297079556899e-05, |
|
"loss": 1.9198, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 1.1178247734138972e-05, |
|
"loss": 1.9285, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 1.0171198388721048e-05, |
|
"loss": 1.9271, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 9.164149043303123e-06, |
|
"loss": 1.9449, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 8.157099697885196e-06, |
|
"loss": 1.9189, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 7.1500503524672715e-06, |
|
"loss": 1.9269, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 6.143001007049345e-06, |
|
"loss": 1.8913, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 5.13595166163142e-06, |
|
"loss": 1.8721, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 4.1289023162134946e-06, |
|
"loss": 1.8779, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 3.121852970795569e-06, |
|
"loss": 1.8758, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"eval_bleu1": 29.5206, |
|
"eval_bleu2": 17.3914, |
|
"eval_bleu3": 10.5577, |
|
"eval_bleu4": 6.1502, |
|
"eval_gen_len": 49.7389, |
|
"eval_loss": 2.6099660396575928, |
|
"eval_rdass": 0.6449000239372253, |
|
"eval_rouge1": 35.5593, |
|
"eval_rouge2": 13.0497, |
|
"eval_rougeL": 23.5672, |
|
"eval_runtime": 115.4788, |
|
"eval_samples_per_second": 3.715, |
|
"eval_steps_per_second": 0.061, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 2.1148036253776437e-06, |
|
"loss": 1.8709, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 1.107754279959718e-06, |
|
"loss": 1.8656, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 4.98, |
|
"learning_rate": 1.0070493454179255e-07, |
|
"loss": 1.8666, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 3310, |
|
"total_flos": 5.527100997669888e+16, |
|
"train_loss": 2.181030597859639, |
|
"train_runtime": 4138.7205, |
|
"train_samples_per_second": 51.184, |
|
"train_steps_per_second": 0.8 |
|
} |
|
], |
|
"max_steps": 3310, |
|
"num_train_epochs": 5, |
|
"total_flos": 5.527100997669888e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|