|
{ |
|
"best_metric": 0.4631750285625458, |
|
"best_model_checkpoint": "./Vit-GPT2-COCO2017Flickr-85k-11/checkpoint-2500", |
|
"epoch": 0.4665484743864888, |
|
"eval_steps": 500, |
|
"global_step": 2500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09330969487729775, |
|
"grad_norm": 1.14389169216156, |
|
"learning_rate": 4.8444693293517484e-05, |
|
"loss": 0.378, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09330969487729775, |
|
"eval_gen_len": 11.7725, |
|
"eval_loss": 0.469292551279068, |
|
"eval_rouge1": 40.2274, |
|
"eval_rouge2": 15.0119, |
|
"eval_rougeL": 36.4563, |
|
"eval_rougeLsum": 36.4656, |
|
"eval_runtime": 450.3853, |
|
"eval_samples_per_second": 8.881, |
|
"eval_steps_per_second": 2.22, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.1866193897545955, |
|
"grad_norm": 0.8436835408210754, |
|
"learning_rate": 4.6889386587034965e-05, |
|
"loss": 0.3748, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.1866193897545955, |
|
"eval_gen_len": 12.16675, |
|
"eval_loss": 0.46400758624076843, |
|
"eval_rouge1": 40.199, |
|
"eval_rouge2": 15.321, |
|
"eval_rougeL": 36.4279, |
|
"eval_rougeLsum": 36.4457, |
|
"eval_runtime": 431.1423, |
|
"eval_samples_per_second": 9.278, |
|
"eval_steps_per_second": 2.319, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.27992908463189325, |
|
"grad_norm": 0.8730084896087646, |
|
"learning_rate": 4.5334079880552446e-05, |
|
"loss": 0.374, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.27992908463189325, |
|
"eval_gen_len": 11.8, |
|
"eval_loss": 0.4668748080730438, |
|
"eval_rouge1": 39.9523, |
|
"eval_rouge2": 15.0587, |
|
"eval_rougeL": 36.3639, |
|
"eval_rougeLsum": 36.375, |
|
"eval_runtime": 424.8986, |
|
"eval_samples_per_second": 9.414, |
|
"eval_steps_per_second": 2.354, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.373238779509191, |
|
"grad_norm": 0.8020262718200684, |
|
"learning_rate": 4.3778773174069934e-05, |
|
"loss": 0.3721, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.373238779509191, |
|
"eval_gen_len": 11.2095, |
|
"eval_loss": 0.46446970105171204, |
|
"eval_rouge1": 40.3597, |
|
"eval_rouge2": 15.2173, |
|
"eval_rougeL": 36.6938, |
|
"eval_rougeLsum": 36.705, |
|
"eval_runtime": 410.7504, |
|
"eval_samples_per_second": 9.738, |
|
"eval_steps_per_second": 2.435, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.4665484743864888, |
|
"grad_norm": 0.8208432793617249, |
|
"learning_rate": 4.222346646758741e-05, |
|
"loss": 0.3673, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.4665484743864888, |
|
"eval_gen_len": 11.93425, |
|
"eval_loss": 0.4631750285625458, |
|
"eval_rouge1": 40.3875, |
|
"eval_rouge2": 15.2532, |
|
"eval_rougeL": 36.5923, |
|
"eval_rougeLsum": 36.6182, |
|
"eval_runtime": 441.242, |
|
"eval_samples_per_second": 9.065, |
|
"eval_steps_per_second": 2.266, |
|
"step": 2500 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 16074, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.21855564480512e+18, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|