|
{ |
|
"best_metric": 0.47202983498573303, |
|
"best_model_checkpoint": "/home/iais_marenpielka/Bouthaina/res_nw_yem/checkpoint-918", |
|
"epoch": 16.0, |
|
"eval_steps": 500, |
|
"global_step": 2448, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 43.000205993652344, |
|
"learning_rate": 1.53e-05, |
|
"loss": 7.8314, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_bleu": 0.010356123410675664, |
|
"eval_loss": 2.359743595123291, |
|
"eval_rouge1": 0.09828786255795381, |
|
"eval_rouge2": 0.0018177296310952823, |
|
"eval_rougeL": 0.09639405964210626, |
|
"eval_runtime": 1.1837, |
|
"eval_samples_per_second": 256.818, |
|
"eval_steps_per_second": 32.102, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 1.3453600406646729, |
|
"learning_rate": 3.06e-05, |
|
"loss": 0.9995, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_bleu": 0.11609954154539694, |
|
"eval_loss": 0.5537932515144348, |
|
"eval_rouge1": 0.47363801342590006, |
|
"eval_rouge2": 0.18538347478382655, |
|
"eval_rougeL": 0.4727595492611425, |
|
"eval_runtime": 9.2767, |
|
"eval_samples_per_second": 32.77, |
|
"eval_steps_per_second": 4.096, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 1.2846729755401611, |
|
"learning_rate": 4.5900000000000004e-05, |
|
"loss": 0.4848, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_bleu": 0.14721751205352876, |
|
"eval_loss": 0.5034472942352295, |
|
"eval_rouge1": 0.500839694175635, |
|
"eval_rouge2": 0.21735831386751384, |
|
"eval_rougeL": 0.5001625449149274, |
|
"eval_runtime": 10.2581, |
|
"eval_samples_per_second": 29.635, |
|
"eval_steps_per_second": 3.704, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 1.2576143741607666, |
|
"learning_rate": 4.7812500000000003e-05, |
|
"loss": 0.3823, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_bleu": 0.1911807573581778, |
|
"eval_loss": 0.4827471375465393, |
|
"eval_rouge1": 0.5330693017126298, |
|
"eval_rouge2": 0.27439720992270494, |
|
"eval_rougeL": 0.531424425624079, |
|
"eval_runtime": 5.4191, |
|
"eval_samples_per_second": 56.098, |
|
"eval_steps_per_second": 7.012, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 1.7599295377731323, |
|
"learning_rate": 4.482421875e-05, |
|
"loss": 0.293, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_bleu": 0.20959557277319146, |
|
"eval_loss": 0.4732062816619873, |
|
"eval_rouge1": 0.5619100572084703, |
|
"eval_rouge2": 0.30991934212678557, |
|
"eval_rougeL": 0.5587085347101918, |
|
"eval_runtime": 2.8763, |
|
"eval_samples_per_second": 105.69, |
|
"eval_steps_per_second": 13.211, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 1.4758834838867188, |
|
"learning_rate": 4.18359375e-05, |
|
"loss": 0.2239, |
|
"step": 918 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_bleu": 0.22970619705356748, |
|
"eval_loss": 0.47202983498573303, |
|
"eval_rouge1": 0.5777164933812552, |
|
"eval_rouge2": 0.33405816844574837, |
|
"eval_rougeL": 0.5758449342460217, |
|
"eval_runtime": 1.7018, |
|
"eval_samples_per_second": 178.629, |
|
"eval_steps_per_second": 22.329, |
|
"step": 918 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 1.1557574272155762, |
|
"learning_rate": 3.884765625e-05, |
|
"loss": 0.1766, |
|
"step": 1071 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_bleu": 0.23015149161791434, |
|
"eval_loss": 0.47373390197753906, |
|
"eval_rouge1": 0.588487328464713, |
|
"eval_rouge2": 0.3467647604638442, |
|
"eval_rougeL": 0.5872191002290259, |
|
"eval_runtime": 19.3271, |
|
"eval_samples_per_second": 15.729, |
|
"eval_steps_per_second": 1.966, |
|
"step": 1071 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 1.236377477645874, |
|
"learning_rate": 3.5859375e-05, |
|
"loss": 0.1434, |
|
"step": 1224 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_bleu": 0.24506232565608202, |
|
"eval_loss": 0.47583380341529846, |
|
"eval_rouge1": 0.5938395263674476, |
|
"eval_rouge2": 0.3647516376509473, |
|
"eval_rougeL": 0.5903133777624114, |
|
"eval_runtime": 12.6597, |
|
"eval_samples_per_second": 24.013, |
|
"eval_steps_per_second": 3.002, |
|
"step": 1224 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"grad_norm": 1.2275965213775635, |
|
"learning_rate": 3.287109375e-05, |
|
"loss": 0.1202, |
|
"step": 1377 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_bleu": 0.2508515461433435, |
|
"eval_loss": 0.4827924072742462, |
|
"eval_rouge1": 0.6046956358537867, |
|
"eval_rouge2": 0.3682856780453533, |
|
"eval_rougeL": 0.6015063720236549, |
|
"eval_runtime": 30.4628, |
|
"eval_samples_per_second": 9.979, |
|
"eval_steps_per_second": 1.247, |
|
"step": 1377 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.9903411865234375, |
|
"learning_rate": 2.9882812500000002e-05, |
|
"loss": 0.1042, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_bleu": 0.24386094085391116, |
|
"eval_loss": 0.4852388799190521, |
|
"eval_rouge1": 0.5980129828644593, |
|
"eval_rouge2": 0.3703776134768108, |
|
"eval_rougeL": 0.5951273362785527, |
|
"eval_runtime": 23.4553, |
|
"eval_samples_per_second": 12.961, |
|
"eval_steps_per_second": 1.62, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"grad_norm": 1.104041337966919, |
|
"learning_rate": 2.689453125e-05, |
|
"loss": 0.0955, |
|
"step": 1683 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_bleu": 0.25597221159625966, |
|
"eval_loss": 0.4884573221206665, |
|
"eval_rouge1": 0.6115922153776887, |
|
"eval_rouge2": 0.37952408975600893, |
|
"eval_rougeL": 0.6085537919616412, |
|
"eval_runtime": 1.297, |
|
"eval_samples_per_second": 234.387, |
|
"eval_steps_per_second": 29.298, |
|
"step": 1683 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"grad_norm": 0.6220578551292419, |
|
"learning_rate": 2.3906250000000002e-05, |
|
"loss": 0.0875, |
|
"step": 1836 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_bleu": 0.25506926105311084, |
|
"eval_loss": 0.4913596212863922, |
|
"eval_rouge1": 0.6097696683692526, |
|
"eval_rouge2": 0.38019766139150524, |
|
"eval_rougeL": 0.6055686891522601, |
|
"eval_runtime": 2.0886, |
|
"eval_samples_per_second": 145.549, |
|
"eval_steps_per_second": 18.194, |
|
"step": 1836 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"grad_norm": 0.7318098545074463, |
|
"learning_rate": 2.091796875e-05, |
|
"loss": 0.0825, |
|
"step": 1989 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_bleu": 0.26272086017465046, |
|
"eval_loss": 0.4981193542480469, |
|
"eval_rouge1": 0.6144877321856528, |
|
"eval_rouge2": 0.3872195934378641, |
|
"eval_rougeL": 0.6108120100650027, |
|
"eval_runtime": 1.1422, |
|
"eval_samples_per_second": 266.148, |
|
"eval_steps_per_second": 33.268, |
|
"step": 1989 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"grad_norm": 0.794402539730072, |
|
"learning_rate": 1.79296875e-05, |
|
"loss": 0.0787, |
|
"step": 2142 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_bleu": 0.25895474712357536, |
|
"eval_loss": 0.5004830360412598, |
|
"eval_rouge1": 0.6138305476317043, |
|
"eval_rouge2": 0.3796550798534031, |
|
"eval_rougeL": 0.6100541920692137, |
|
"eval_runtime": 1.1033, |
|
"eval_samples_per_second": 275.529, |
|
"eval_steps_per_second": 34.441, |
|
"step": 2142 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"grad_norm": 0.7674385905265808, |
|
"learning_rate": 1.4941406250000001e-05, |
|
"loss": 0.0757, |
|
"step": 2295 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_bleu": 0.2632177488955002, |
|
"eval_loss": 0.5019999146461487, |
|
"eval_rouge1": 0.6170590027553351, |
|
"eval_rouge2": 0.3850023586523127, |
|
"eval_rougeL": 0.6140574753716048, |
|
"eval_runtime": 4.8266, |
|
"eval_samples_per_second": 62.985, |
|
"eval_steps_per_second": 7.873, |
|
"step": 2295 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"grad_norm": 0.7136771082878113, |
|
"learning_rate": 1.1953125000000001e-05, |
|
"loss": 0.074, |
|
"step": 2448 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_bleu": 0.25943804422988964, |
|
"eval_loss": 0.5049206018447876, |
|
"eval_rouge1": 0.6161012779292924, |
|
"eval_rouge2": 0.3828890396119353, |
|
"eval_rougeL": 0.6124852816095869, |
|
"eval_runtime": 1.0975, |
|
"eval_samples_per_second": 276.998, |
|
"eval_steps_per_second": 34.625, |
|
"step": 2448 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"step": 2448, |
|
"total_flos": 1275105116160000.0, |
|
"train_loss": 0.024901744976542354, |
|
"train_runtime": 209.6658, |
|
"train_samples_per_second": 116.376, |
|
"train_steps_per_second": 14.595 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 3060, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 20, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1275105116160000.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|