|
{ |
|
"best_metric": 1.9793106317520142, |
|
"best_model_checkpoint": "/home/iais_marenpielka/Bouthaina/results_fixed/checkpoint-6915", |
|
"epoch": 20.0, |
|
"eval_steps": 500, |
|
"global_step": 8840, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0845986984815619, |
|
"grad_norm": 1.1301698684692383, |
|
"learning_rate": 5e-05, |
|
"loss": 3.9816, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.0845986984815619, |
|
"eval_bleu": 0.22704779726518312, |
|
"eval_loss": 2.8213491439819336, |
|
"eval_rouge1": 0.3850822060355352, |
|
"eval_rouge2": 0.1625206163604567, |
|
"eval_rougeL": 0.31976194879885783, |
|
"eval_runtime": 89.8358, |
|
"eval_samples_per_second": 10.297, |
|
"eval_steps_per_second": 1.291, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_bleu": 0.2480969312402941, |
|
"eval_loss": 2.4793343544006348, |
|
"eval_rouge1": 0.4405430075781575, |
|
"eval_rouge2": 0.20302695232764206, |
|
"eval_rougeL": 0.3844289432538539, |
|
"eval_runtime": 29.4641, |
|
"eval_samples_per_second": 31.394, |
|
"eval_steps_per_second": 3.937, |
|
"step": 922 |
|
}, |
|
{ |
|
"epoch": 2.1691973969631237, |
|
"grad_norm": 1.0731440782546997, |
|
"learning_rate": 4.713302752293578e-05, |
|
"loss": 2.6514, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_bleu": 0.2658003323042654, |
|
"eval_loss": 2.2930543422698975, |
|
"eval_rouge1": 0.48284806373571365, |
|
"eval_rouge2": 0.2384861670767343, |
|
"eval_rougeL": 0.43274525400088326, |
|
"eval_runtime": 91.1451, |
|
"eval_samples_per_second": 10.149, |
|
"eval_steps_per_second": 1.273, |
|
"step": 1383 |
|
}, |
|
{ |
|
"epoch": 3.2537960954446854, |
|
"grad_norm": 1.1006942987442017, |
|
"learning_rate": 4.426605504587156e-05, |
|
"loss": 2.3308, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_bleu": 0.27970338794806143, |
|
"eval_loss": 2.180091619491577, |
|
"eval_rouge1": 0.5114337163243292, |
|
"eval_rouge2": 0.267870161729887, |
|
"eval_rougeL": 0.4658699841467887, |
|
"eval_runtime": 29.4867, |
|
"eval_samples_per_second": 31.37, |
|
"eval_steps_per_second": 3.934, |
|
"step": 1844 |
|
}, |
|
{ |
|
"epoch": 4.3383947939262475, |
|
"grad_norm": 1.0734232664108276, |
|
"learning_rate": 4.139908256880734e-05, |
|
"loss": 2.1322, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_bleu": 0.2886420174009109, |
|
"eval_loss": 2.1132538318634033, |
|
"eval_rouge1": 0.5263880712269413, |
|
"eval_rouge2": 0.28674622405060524, |
|
"eval_rougeL": 0.4851858911276635, |
|
"eval_runtime": 29.5338, |
|
"eval_samples_per_second": 31.32, |
|
"eval_steps_per_second": 3.928, |
|
"step": 2305 |
|
}, |
|
{ |
|
"epoch": 5.422993492407809, |
|
"grad_norm": 1.058827519416809, |
|
"learning_rate": 3.8532110091743125e-05, |
|
"loss": 1.9942, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_bleu": 0.2925900760553107, |
|
"eval_loss": 2.064920663833618, |
|
"eval_rouge1": 0.5417489504784114, |
|
"eval_rouge2": 0.2993448660857305, |
|
"eval_rougeL": 0.5002749085104436, |
|
"eval_runtime": 29.6386, |
|
"eval_samples_per_second": 31.209, |
|
"eval_steps_per_second": 3.914, |
|
"step": 2766 |
|
}, |
|
{ |
|
"epoch": 6.507592190889371, |
|
"grad_norm": 1.0469533205032349, |
|
"learning_rate": 3.56651376146789e-05, |
|
"loss": 1.8884, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_bleu": 0.2966637771816971, |
|
"eval_loss": 2.03544545173645, |
|
"eval_rouge1": 0.5528641921007253, |
|
"eval_rouge2": 0.3107950360794697, |
|
"eval_rougeL": 0.5117019175789275, |
|
"eval_runtime": 29.7995, |
|
"eval_samples_per_second": 31.041, |
|
"eval_steps_per_second": 3.893, |
|
"step": 3227 |
|
}, |
|
{ |
|
"epoch": 7.592190889370933, |
|
"grad_norm": 1.1290621757507324, |
|
"learning_rate": 3.2798165137614676e-05, |
|
"loss": 1.8003, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_bleu": 0.297270373541541, |
|
"eval_loss": 2.016411304473877, |
|
"eval_rouge1": 0.5596603132781475, |
|
"eval_rouge2": 0.3199340438705295, |
|
"eval_rougeL": 0.520436776425443, |
|
"eval_runtime": 29.6516, |
|
"eval_samples_per_second": 31.196, |
|
"eval_steps_per_second": 3.912, |
|
"step": 3688 |
|
}, |
|
{ |
|
"epoch": 8.676789587852495, |
|
"grad_norm": 1.0918660163879395, |
|
"learning_rate": 2.9931192660550462e-05, |
|
"loss": 1.7305, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_bleu": 0.30530297795149647, |
|
"eval_loss": 2.0036492347717285, |
|
"eval_rouge1": 0.5659695387091741, |
|
"eval_rouge2": 0.3261535326554196, |
|
"eval_rougeL": 0.5265625742800235, |
|
"eval_runtime": 29.7893, |
|
"eval_samples_per_second": 31.051, |
|
"eval_steps_per_second": 3.894, |
|
"step": 4149 |
|
}, |
|
{ |
|
"epoch": 9.761388286334057, |
|
"grad_norm": 1.1562738418579102, |
|
"learning_rate": 2.7064220183486238e-05, |
|
"loss": 1.6672, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_bleu": 0.3071835871794814, |
|
"eval_loss": 1.993295669555664, |
|
"eval_rouge1": 0.5704281178813693, |
|
"eval_rouge2": 0.3318532927873396, |
|
"eval_rougeL": 0.5325234479218677, |
|
"eval_runtime": 29.7553, |
|
"eval_samples_per_second": 31.087, |
|
"eval_steps_per_second": 3.898, |
|
"step": 4610 |
|
}, |
|
{ |
|
"epoch": 10.845986984815617, |
|
"grad_norm": 1.1015173196792603, |
|
"learning_rate": 2.419724770642202e-05, |
|
"loss": 1.6132, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_bleu": 0.3093132521096642, |
|
"eval_loss": 1.9886106252670288, |
|
"eval_rouge1": 0.5737300860653806, |
|
"eval_rouge2": 0.33663827794301615, |
|
"eval_rougeL": 0.536346207031389, |
|
"eval_runtime": 29.743, |
|
"eval_samples_per_second": 31.1, |
|
"eval_steps_per_second": 3.9, |
|
"step": 5071 |
|
}, |
|
{ |
|
"epoch": 11.93058568329718, |
|
"grad_norm": 1.0905733108520508, |
|
"learning_rate": 2.13302752293578e-05, |
|
"loss": 1.5659, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_bleu": 0.3098881020021761, |
|
"eval_loss": 1.9834030866622925, |
|
"eval_rouge1": 0.5777003074459022, |
|
"eval_rouge2": 0.3397382155739319, |
|
"eval_rougeL": 0.5395519937552808, |
|
"eval_runtime": 29.7562, |
|
"eval_samples_per_second": 31.086, |
|
"eval_steps_per_second": 3.898, |
|
"step": 5532 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_bleu": 0.3117110664590574, |
|
"eval_loss": 1.981902003288269, |
|
"eval_rouge1": 0.5796236336727056, |
|
"eval_rouge2": 0.34230164752840053, |
|
"eval_rougeL": 0.5417627454602093, |
|
"eval_runtime": 29.7814, |
|
"eval_samples_per_second": 31.06, |
|
"eval_steps_per_second": 3.895, |
|
"step": 5993 |
|
}, |
|
{ |
|
"epoch": 13.015184381778742, |
|
"grad_norm": 1.0530316829681396, |
|
"learning_rate": 1.8463302752293578e-05, |
|
"loss": 1.5244, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_bleu": 0.3126312196492616, |
|
"eval_loss": 1.9798119068145752, |
|
"eval_rouge1": 0.5833425546826478, |
|
"eval_rouge2": 0.34524587884448443, |
|
"eval_rougeL": 0.5450520234727538, |
|
"eval_runtime": 29.8075, |
|
"eval_samples_per_second": 31.032, |
|
"eval_steps_per_second": 3.892, |
|
"step": 6454 |
|
}, |
|
{ |
|
"epoch": 14.099783080260304, |
|
"grad_norm": 1.1455042362213135, |
|
"learning_rate": 1.559633027522936e-05, |
|
"loss": 1.4884, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_bleu": 0.31303204778954513, |
|
"eval_loss": 1.9793106317520142, |
|
"eval_rouge1": 0.5832240471437244, |
|
"eval_rouge2": 0.34612838297153165, |
|
"eval_rougeL": 0.545438606875621, |
|
"eval_runtime": 29.7921, |
|
"eval_samples_per_second": 31.048, |
|
"eval_steps_per_second": 3.894, |
|
"step": 6915 |
|
}, |
|
{ |
|
"epoch": 15.184381778741866, |
|
"grad_norm": 1.1202692985534668, |
|
"learning_rate": 1.2729357798165138e-05, |
|
"loss": 1.4594, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_bleu": 0.3133487171249794, |
|
"eval_loss": 1.9800282716751099, |
|
"eval_rouge1": 0.584603146586737, |
|
"eval_rouge2": 0.34685127859250275, |
|
"eval_rougeL": 0.5465551086322522, |
|
"eval_runtime": 30.1539, |
|
"eval_samples_per_second": 30.676, |
|
"eval_steps_per_second": 3.847, |
|
"step": 7376 |
|
}, |
|
{ |
|
"epoch": 16.268980477223426, |
|
"grad_norm": 1.1474549770355225, |
|
"learning_rate": 9.862385321100918e-06, |
|
"loss": 1.4361, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_bleu": 0.3150747205411821, |
|
"eval_loss": 1.979936957359314, |
|
"eval_rouge1": 0.5865159051716312, |
|
"eval_rouge2": 0.34931764934488574, |
|
"eval_rougeL": 0.5485201965669062, |
|
"eval_runtime": 29.8082, |
|
"eval_samples_per_second": 31.032, |
|
"eval_steps_per_second": 3.892, |
|
"step": 7837 |
|
}, |
|
{ |
|
"epoch": 17.35357917570499, |
|
"grad_norm": 1.0869196653366089, |
|
"learning_rate": 6.995412844036697e-06, |
|
"loss": 1.4159, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_bleu": 0.31486980402021325, |
|
"eval_loss": 1.9808851480484009, |
|
"eval_rouge1": 0.5865213969000711, |
|
"eval_rouge2": 0.3495173970652049, |
|
"eval_rougeL": 0.5486068806911688, |
|
"eval_runtime": 29.8468, |
|
"eval_samples_per_second": 30.992, |
|
"eval_steps_per_second": 3.887, |
|
"step": 8298 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_bleu": 0.3098880521487277, |
|
"eval_loss": 2.0766849517822266, |
|
"eval_rouge1": 0.5857850781975715, |
|
"eval_rouge2": 0.3476439342876294, |
|
"eval_rougeL": 0.5470857900583828, |
|
"eval_runtime": 29.9167, |
|
"eval_samples_per_second": 29.549, |
|
"eval_steps_per_second": 3.71, |
|
"step": 8398 |
|
}, |
|
{ |
|
"epoch": 19.23076923076923, |
|
"grad_norm": 1.234198808670044, |
|
"learning_rate": 2.0383693045563552e-06, |
|
"loss": 1.6189, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_bleu": 0.31187064199371684, |
|
"eval_loss": 2.06535267829895, |
|
"eval_rouge1": 0.5861980524100046, |
|
"eval_rouge2": 0.3488620525891558, |
|
"eval_rougeL": 0.5478910457529194, |
|
"eval_runtime": 29.8775, |
|
"eval_samples_per_second": 29.588, |
|
"eval_steps_per_second": 3.715, |
|
"step": 8840 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"step": 8840, |
|
"total_flos": 2.7664555180032e+16, |
|
"train_loss": 0.0, |
|
"train_runtime": 0.0941, |
|
"train_samples_per_second": 112613.962, |
|
"train_steps_per_second": 14096.678 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 1326, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.7664555180032e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|