|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.03537944454272068, |
|
"eval_steps": 10, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0007075888908544136, |
|
"eval_loss": 10.552796363830566, |
|
"eval_runtime": 47.6423, |
|
"eval_samples_per_second": 12.489, |
|
"eval_steps_per_second": 6.255, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0035379444542720678, |
|
"grad_norm": 26.04566192626953, |
|
"learning_rate": 5e-05, |
|
"loss": 10.6868, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0070758889085441356, |
|
"grad_norm": 25.12905502319336, |
|
"learning_rate": 0.0001, |
|
"loss": 7.7325, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0070758889085441356, |
|
"eval_loss": 4.372666835784912, |
|
"eval_runtime": 47.9791, |
|
"eval_samples_per_second": 12.401, |
|
"eval_steps_per_second": 6.211, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.010613833362816204, |
|
"grad_norm": 26.672901153564453, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 2.7413, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.014151777817088271, |
|
"grad_norm": 11.739270210266113, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.4716, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.014151777817088271, |
|
"eval_loss": 0.4289584755897522, |
|
"eval_runtime": 47.8845, |
|
"eval_samples_per_second": 12.426, |
|
"eval_steps_per_second": 6.223, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01768972227136034, |
|
"grad_norm": 5.6261467933654785, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.3967, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.021227666725632408, |
|
"grad_norm": 7.217609405517578, |
|
"learning_rate": 5e-05, |
|
"loss": 0.2675, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.021227666725632408, |
|
"eval_loss": 0.19041143357753754, |
|
"eval_runtime": 47.9386, |
|
"eval_samples_per_second": 12.412, |
|
"eval_steps_per_second": 6.216, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.024765611179904477, |
|
"grad_norm": 2.8961856365203857, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.2384, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.028303555634176542, |
|
"grad_norm": 12.801621437072754, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.2437, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.028303555634176542, |
|
"eval_loss": 0.1584031879901886, |
|
"eval_runtime": 48.0401, |
|
"eval_samples_per_second": 12.385, |
|
"eval_steps_per_second": 6.203, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03184150008844861, |
|
"grad_norm": 5.449904441833496, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.1927, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.03537944454272068, |
|
"grad_norm": 7.277150630950928, |
|
"learning_rate": 0.0, |
|
"loss": 0.2981, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.03537944454272068, |
|
"eval_loss": 0.13875603675842285, |
|
"eval_runtime": 47.7713, |
|
"eval_samples_per_second": 12.455, |
|
"eval_steps_per_second": 6.238, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 13, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9555457081344000.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|