|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 243, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.12345679012345678, |
|
"grad_norm": 6.615536361844481, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 1.0007, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.24691358024691357, |
|
"grad_norm": 1.9517030276543728, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.5003, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.37037037037037035, |
|
"grad_norm": 1.2616137032361001, |
|
"learning_rate": 9.98702585145264e-06, |
|
"loss": 0.3963, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.49382716049382713, |
|
"grad_norm": 1.1703778892793937, |
|
"learning_rate": 9.883636302119911e-06, |
|
"loss": 0.3598, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.6172839506172839, |
|
"grad_norm": 0.9308014595748675, |
|
"learning_rate": 9.679000645102771e-06, |
|
"loss": 0.3459, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7407407407407407, |
|
"grad_norm": 0.9239080870941112, |
|
"learning_rate": 9.377361326497673e-06, |
|
"loss": 0.3467, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8641975308641975, |
|
"grad_norm": 0.9292175392332163, |
|
"learning_rate": 8.984971843707787e-06, |
|
"loss": 0.3371, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9876543209876543, |
|
"grad_norm": 0.9423895888927237, |
|
"learning_rate": 8.509967099778934e-06, |
|
"loss": 0.2955, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.1111111111111112, |
|
"grad_norm": 1.0014547154310927, |
|
"learning_rate": 7.962194752988519e-06, |
|
"loss": 0.2394, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.2345679012345678, |
|
"grad_norm": 0.9750900984803165, |
|
"learning_rate": 7.353011058098104e-06, |
|
"loss": 0.2803, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.3580246913580247, |
|
"grad_norm": 0.9897175591002245, |
|
"learning_rate": 6.695045431828524e-06, |
|
"loss": 0.2512, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.4814814814814814, |
|
"grad_norm": 1.018954236882003, |
|
"learning_rate": 6.0019386235167055e-06, |
|
"loss": 0.2652, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.6049382716049383, |
|
"grad_norm": 1.1220883965697188, |
|
"learning_rate": 5.288059919122922e-06, |
|
"loss": 0.2907, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.7283950617283952, |
|
"grad_norm": 0.9390825986027425, |
|
"learning_rate": 4.568209241431615e-06, |
|
"loss": 0.2387, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.8518518518518519, |
|
"grad_norm": 1.1100888683091072, |
|
"learning_rate": 3.857310322416555e-06, |
|
"loss": 0.2614, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.9753086419753085, |
|
"grad_norm": 1.1933595819251464, |
|
"learning_rate": 3.170101308830421e-06, |
|
"loss": 0.2313, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.0987654320987654, |
|
"grad_norm": 1.2310270632991422, |
|
"learning_rate": 2.520829215292426e-06, |
|
"loss": 0.2175, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.2222222222222223, |
|
"grad_norm": 1.0112810436386979, |
|
"learning_rate": 1.9229545593825367e-06, |
|
"loss": 0.1842, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.3456790123456788, |
|
"grad_norm": 1.0391882674412423, |
|
"learning_rate": 1.388872302160353e-06, |
|
"loss": 0.1915, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.4691358024691357, |
|
"grad_norm": 1.0330142263823023, |
|
"learning_rate": 9.296548794875659e-07, |
|
"loss": 0.204, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.5925925925925926, |
|
"grad_norm": 0.8921694870341355, |
|
"learning_rate": 5.548226515528133e-07, |
|
"loss": 0.1984, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.7160493827160495, |
|
"grad_norm": 0.8915381430289088, |
|
"learning_rate": 2.721465295716996e-07, |
|
"loss": 0.1975, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.8395061728395063, |
|
"grad_norm": 1.0350358381925495, |
|
"learning_rate": 8.748687154702673e-08, |
|
"loss": 0.1757, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.962962962962963, |
|
"grad_norm": 0.9562619814768147, |
|
"learning_rate": 4.671987054842842e-09, |
|
"loss": 0.2137, |
|
"step": 240 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 243, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9395391430656.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|