|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0432900432900434, |
|
"eval_steps": 15, |
|
"global_step": 60, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.017316017316017316, |
|
"eval_loss": 0.8123326897621155, |
|
"eval_runtime": 21.4215, |
|
"eval_samples_per_second": 4.528, |
|
"eval_steps_per_second": 0.607, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05194805194805195, |
|
"grad_norm": 1.44769287109375, |
|
"learning_rate": 3e-05, |
|
"loss": 3.0463, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.1038961038961039, |
|
"grad_norm": 0.9673851728439331, |
|
"learning_rate": 6e-05, |
|
"loss": 2.883, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.15584415584415584, |
|
"grad_norm": 1.2237026691436768, |
|
"learning_rate": 9e-05, |
|
"loss": 3.0048, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.2077922077922078, |
|
"grad_norm": 1.0807669162750244, |
|
"learning_rate": 9.9962857531815e-05, |
|
"loss": 2.8812, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.2597402597402597, |
|
"grad_norm": 0.8111446499824524, |
|
"learning_rate": 9.976801044672608e-05, |
|
"loss": 2.8254, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.2597402597402597, |
|
"eval_loss": 0.7221285700798035, |
|
"eval_runtime": 21.8495, |
|
"eval_samples_per_second": 4.439, |
|
"eval_steps_per_second": 0.595, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.3116883116883117, |
|
"grad_norm": 0.7993184924125671, |
|
"learning_rate": 9.940682350363912e-05, |
|
"loss": 2.5783, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.36363636363636365, |
|
"grad_norm": 0.88717120885849, |
|
"learning_rate": 9.888050389939172e-05, |
|
"loss": 3.0288, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.4155844155844156, |
|
"grad_norm": 0.9446913003921509, |
|
"learning_rate": 9.819081075450014e-05, |
|
"loss": 2.6752, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.4675324675324675, |
|
"grad_norm": 0.7598215937614441, |
|
"learning_rate": 9.734004923364257e-05, |
|
"loss": 2.6636, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.5194805194805194, |
|
"grad_norm": 0.9138664603233337, |
|
"learning_rate": 9.63310628410961e-05, |
|
"loss": 2.6451, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5194805194805194, |
|
"eval_loss": 0.6947051286697388, |
|
"eval_runtime": 21.8569, |
|
"eval_samples_per_second": 4.438, |
|
"eval_steps_per_second": 0.595, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 0.7083372473716736, |
|
"learning_rate": 9.516722391687902e-05, |
|
"loss": 2.5942, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.6233766233766234, |
|
"grad_norm": 0.6996327042579651, |
|
"learning_rate": 9.38524223653626e-05, |
|
"loss": 2.648, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.6753246753246753, |
|
"grad_norm": 0.7574095726013184, |
|
"learning_rate": 9.239105265402525e-05, |
|
"loss": 2.5124, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.7272727272727273, |
|
"grad_norm": 0.7147637605667114, |
|
"learning_rate": 9.078799912580304e-05, |
|
"loss": 2.652, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.7792207792207793, |
|
"grad_norm": 0.8316633701324463, |
|
"learning_rate": 8.904861967412703e-05, |
|
"loss": 2.5992, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.7792207792207793, |
|
"eval_loss": 0.6810405254364014, |
|
"eval_runtime": 21.865, |
|
"eval_samples_per_second": 4.436, |
|
"eval_steps_per_second": 0.595, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.8311688311688312, |
|
"grad_norm": 0.7336857318878174, |
|
"learning_rate": 8.717872783521047e-05, |
|
"loss": 2.4763, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.8831168831168831, |
|
"grad_norm": 0.6515845060348511, |
|
"learning_rate": 8.518457335743926e-05, |
|
"loss": 2.5329, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.935064935064935, |
|
"grad_norm": 0.7598447203636169, |
|
"learning_rate": 8.307282131280804e-05, |
|
"loss": 2.6017, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.987012987012987, |
|
"grad_norm": 0.6881215572357178, |
|
"learning_rate": 8.085052982021847e-05, |
|
"loss": 2.5885, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.0432900432900434, |
|
"grad_norm": 0.6885347962379456, |
|
"learning_rate": 7.85251264550948e-05, |
|
"loss": 2.3153, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.0432900432900434, |
|
"eval_loss": 0.6708703637123108, |
|
"eval_runtime": 21.8845, |
|
"eval_samples_per_second": 4.432, |
|
"eval_steps_per_second": 0.594, |
|
"step": 60 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 173, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 15, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.091237347262464e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|