{ "best_metric": 0.5335076451301575, "best_model_checkpoint": "/home/iais_marenpielka/Bouthaina/res_nw_dj_03/checkpoint-5358", "epoch": 7.0, "eval_steps": 500, "global_step": 18753, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.0, "grad_norm": 1.8902978897094727, "learning_rate": 4.7947437829691034e-05, "loss": 0.8829, "step": 2679 }, { "epoch": 1.0, "eval_bleu": 0.40331287333676363, "eval_loss": 0.5615599751472473, "eval_rouge1": 0.6238739270954698, "eval_rouge2": 0.3695879500967286, "eval_rougeL": 0.6222094852350151, "eval_runtime": 224.9918, "eval_samples_per_second": 23.912, "eval_steps_per_second": 2.991, "step": 2679 }, { "epoch": 2.0, "grad_norm": 2.3653810024261475, "learning_rate": 4.542388847023361e-05, "loss": 0.4337, "step": 5358 }, { "epoch": 2.0, "eval_bleu": 0.42251895839624176, "eval_loss": 0.5335076451301575, "eval_rouge1": 0.6607626579317042, "eval_rouge2": 0.4163462041346451, "eval_rougeL": 0.6589800378443118, "eval_runtime": 163.0544, "eval_samples_per_second": 32.995, "eval_steps_per_second": 4.127, "step": 5358 }, { "epoch": 3.0, "grad_norm": 1.8291383981704712, "learning_rate": 4.290033911077619e-05, "loss": 0.2951, "step": 8037 }, { "epoch": 3.0, "eval_bleu": 0.4308510587099172, "eval_loss": 0.5472562909126282, "eval_rouge1": 0.6702713862651033, "eval_rouge2": 0.4320237158236569, "eval_rougeL": 0.6688182372945262, "eval_runtime": 162.9515, "eval_samples_per_second": 33.016, "eval_steps_per_second": 4.13, "step": 8037 }, { "epoch": 4.0, "grad_norm": 2.728090524673462, "learning_rate": 4.0376789751318766e-05, "loss": 0.2054, "step": 10716 }, { "epoch": 4.0, "eval_bleu": 0.4276970379095206, "eval_loss": 0.5758230686187744, "eval_rouge1": 0.6763953460411876, "eval_rouge2": 0.43998373859116113, "eval_rougeL": 0.6748324672744146, "eval_runtime": 40.4598, "eval_samples_per_second": 132.971, "eval_steps_per_second": 16.634, "step": 10716 }, { "epoch": 5.0, "grad_norm": 1.6252306699752808, "learning_rate": 3.785324039186134e-05, "loss": 0.1557, "step": 13395 }, { "epoch": 5.0, "eval_bleu": 0.43093263057964937, "eval_loss": 0.6072628498077393, "eval_rouge1": 0.6728345471617314, "eval_rouge2": 0.4398083411751642, "eval_rougeL": 0.6716233259289648, "eval_runtime": 76.1433, "eval_samples_per_second": 70.656, "eval_steps_per_second": 8.839, "step": 13395 }, { "epoch": 6.0, "grad_norm": 1.810899019241333, "learning_rate": 3.532969103240392e-05, "loss": 0.1308, "step": 16074 }, { "epoch": 6.0, "eval_bleu": 0.4314626379100602, "eval_loss": 0.6262651681900024, "eval_rouge1": 0.6767217787616953, "eval_rouge2": 0.44252126541905024, "eval_rougeL": 0.675198694040748, "eval_runtime": 42.2219, "eval_samples_per_second": 127.422, "eval_steps_per_second": 15.94, "step": 16074 }, { "epoch": 7.0, "grad_norm": 1.121442198753357, "learning_rate": 3.28061416729465e-05, "loss": 0.1183, "step": 18753 }, { "epoch": 7.0, "eval_bleu": 0.42802429549225074, "eval_loss": 0.6490646600723267, "eval_rouge1": 0.6730562948760737, "eval_rouge2": 0.43834978582096723, "eval_rougeL": 0.6715407272843534, "eval_runtime": 174.5563, "eval_samples_per_second": 30.821, "eval_steps_per_second": 3.855, "step": 18753 }, { "epoch": 7.0, "step": 18753, "total_flos": 3.4826972570320896e+16, "train_loss": 0.31742249039632825, "train_runtime": 6981.9193, "train_samples_per_second": 61.384, "train_steps_per_second": 7.674 } ], "logging_steps": 500, "max_steps": 53580, "num_input_tokens_seen": 0, "num_train_epochs": 20, "save_steps": 500, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 5, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 3.4826972570320896e+16, "train_batch_size": 8, "trial_name": null, "trial_params": null }