|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.04849660523763336, |
|
"eval_steps": 9, |
|
"global_step": 25, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0019398642095053346, |
|
"grad_norm": 5.092199802398682, |
|
"learning_rate": 1e-05, |
|
"loss": 4.7383, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0019398642095053346, |
|
"eval_loss": 4.821082592010498, |
|
"eval_runtime": 9.658, |
|
"eval_samples_per_second": 44.937, |
|
"eval_steps_per_second": 5.695, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0038797284190106693, |
|
"grad_norm": 4.449008464813232, |
|
"learning_rate": 2e-05, |
|
"loss": 4.6036, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.005819592628516004, |
|
"grad_norm": 4.542291164398193, |
|
"learning_rate": 3e-05, |
|
"loss": 4.7654, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.007759456838021339, |
|
"grad_norm": 4.831502437591553, |
|
"learning_rate": 4e-05, |
|
"loss": 4.6307, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.009699321047526674, |
|
"grad_norm": 4.798445224761963, |
|
"learning_rate": 5e-05, |
|
"loss": 4.689, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.011639185257032008, |
|
"grad_norm": 4.600784778594971, |
|
"learning_rate": 6e-05, |
|
"loss": 4.4962, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.013579049466537343, |
|
"grad_norm": 4.6975178718566895, |
|
"learning_rate": 7e-05, |
|
"loss": 4.6526, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.015518913676042677, |
|
"grad_norm": 4.297379970550537, |
|
"learning_rate": 8e-05, |
|
"loss": 4.4603, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.01745877788554801, |
|
"grad_norm": 4.602691650390625, |
|
"learning_rate": 9e-05, |
|
"loss": 4.0801, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.01745877788554801, |
|
"eval_loss": 3.935520887374878, |
|
"eval_runtime": 7.7589, |
|
"eval_samples_per_second": 55.935, |
|
"eval_steps_per_second": 7.089, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.019398642095053348, |
|
"grad_norm": 3.76698899269104, |
|
"learning_rate": 0.0001, |
|
"loss": 3.9645, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02133850630455868, |
|
"grad_norm": 3.087214708328247, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 3.9473, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.023278370514064017, |
|
"grad_norm": 3.322976589202881, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 3.652, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.02521823472356935, |
|
"grad_norm": 3.5539023876190186, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 3.3627, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.027158098933074686, |
|
"grad_norm": 3.190157413482666, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 3.1173, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.029097963142580018, |
|
"grad_norm": 3.779177188873291, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 2.8321, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.031037827352085354, |
|
"grad_norm": 3.7224929332733154, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 2.504, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.03297769156159069, |
|
"grad_norm": 3.4633424282073975, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 2.2944, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.03491755577109602, |
|
"grad_norm": 2.928765296936035, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 2.2625, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.03491755577109602, |
|
"eval_loss": 2.196943759918213, |
|
"eval_runtime": 7.7522, |
|
"eval_samples_per_second": 55.984, |
|
"eval_steps_per_second": 7.095, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.03685741998060136, |
|
"grad_norm": 3.9546890258789062, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 2.3878, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.038797284190106696, |
|
"grad_norm": 4.105186462402344, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.1788, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.040737148399612025, |
|
"grad_norm": 3.8948333263397217, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 1.8275, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.04267701260911736, |
|
"grad_norm": 3.057760238647461, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 1.621, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0446168768186227, |
|
"grad_norm": 2.3399696350097656, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 1.655, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.04655674102812803, |
|
"grad_norm": 2.517136335372925, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 1.7654, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.04849660523763336, |
|
"grad_norm": 2.6656148433685303, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.7639, |
|
"step": 25 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 645594775289856.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|