|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.8257638315441783, |
|
"eval_steps": 500, |
|
"global_step": 1500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.055050922102945224, |
|
"grad_norm": 3.223727226257324, |
|
"learning_rate": 5.494505494505495e-05, |
|
"loss": 2.0113, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.11010184420589045, |
|
"grad_norm": 2.7249491214752197, |
|
"learning_rate": 9.997006099836617e-05, |
|
"loss": 1.7306, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.16515276630883569, |
|
"grad_norm": 2.2732579708099365, |
|
"learning_rate": 9.871874350577997e-05, |
|
"loss": 1.6522, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.2202036884117809, |
|
"grad_norm": 2.5243077278137207, |
|
"learning_rate": 9.567205799555621e-05, |
|
"loss": 1.5667, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.2752546105147261, |
|
"grad_norm": 2.2532739639282227, |
|
"learning_rate": 9.09422799766929e-05, |
|
"loss": 1.5134, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.33030553261767137, |
|
"grad_norm": 2.3742384910583496, |
|
"learning_rate": 8.47037097610317e-05, |
|
"loss": 1.4548, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.3853564547206166, |
|
"grad_norm": 2.96374773979187, |
|
"learning_rate": 7.718624920225358e-05, |
|
"loss": 1.4309, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.4404073768235618, |
|
"grad_norm": 2.140960693359375, |
|
"learning_rate": 6.866692942410824e-05, |
|
"loss": 1.4181, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.495458298926507, |
|
"grad_norm": 1.9198336601257324, |
|
"learning_rate": 5.9459701755408125e-05, |
|
"loss": 1.3413, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.5505092210294522, |
|
"grad_norm": 1.8933072090148926, |
|
"learning_rate": 4.9903868093081854e-05, |
|
"loss": 1.3367, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.6055601431323975, |
|
"grad_norm": 1.8014689683914185, |
|
"learning_rate": 4.0351577053958396e-05, |
|
"loss": 1.2852, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.6606110652353427, |
|
"grad_norm": 1.8801296949386597, |
|
"learning_rate": 3.1154846703215746e-05, |
|
"loss": 1.229, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.7156619873382879, |
|
"grad_norm": 1.738985538482666, |
|
"learning_rate": 2.2652592093878666e-05, |
|
"loss": 1.2434, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.7707129094412332, |
|
"grad_norm": 2.303053379058838, |
|
"learning_rate": 1.5158135674454127e-05, |
|
"loss": 1.2021, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.8257638315441783, |
|
"grad_norm": 1.6559442281723022, |
|
"learning_rate": 8.947660827286925e-06, |
|
"loss": 1.1831, |
|
"step": 1500 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 1816, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.96786646670508e+17, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|