|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 59.927739863508634, |
|
"eval_steps": 1866, |
|
"global_step": 18660, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 5.992773986350863, |
|
"grad_norm": 4.623592853546143, |
|
"learning_rate": 9.001071811361202e-06, |
|
"loss": 2.0366, |
|
"step": 1866 |
|
}, |
|
{ |
|
"epoch": 5.992773986350863, |
|
"eval_accuracy": 0.32132862878695706, |
|
"eval_loss": 2.3591790199279785, |
|
"eval_runtime": 26.4016, |
|
"eval_samples_per_second": 311.307, |
|
"eval_steps_per_second": 15.567, |
|
"step": 1866 |
|
}, |
|
{ |
|
"epoch": 11.985547972701726, |
|
"grad_norm": 4.165753364562988, |
|
"learning_rate": 8.001607717041802e-06, |
|
"loss": 1.8711, |
|
"step": 3732 |
|
}, |
|
{ |
|
"epoch": 11.985547972701726, |
|
"eval_accuracy": 0.31104757269740846, |
|
"eval_loss": 2.4432082176208496, |
|
"eval_runtime": 26.6876, |
|
"eval_samples_per_second": 307.971, |
|
"eval_steps_per_second": 15.4, |
|
"step": 3732 |
|
}, |
|
{ |
|
"epoch": 17.97832195905259, |
|
"grad_norm": 3.7397663593292236, |
|
"learning_rate": 7.002143622722402e-06, |
|
"loss": 1.7987, |
|
"step": 5598 |
|
}, |
|
{ |
|
"epoch": 17.97832195905259, |
|
"eval_accuracy": 0.3014154195563126, |
|
"eval_loss": 2.5181965827941895, |
|
"eval_runtime": 26.5667, |
|
"eval_samples_per_second": 309.372, |
|
"eval_steps_per_second": 15.47, |
|
"step": 5598 |
|
}, |
|
{ |
|
"epoch": 23.97109594540345, |
|
"grad_norm": 3.9918863773345947, |
|
"learning_rate": 6.002679528403002e-06, |
|
"loss": 1.7585, |
|
"step": 7464 |
|
}, |
|
{ |
|
"epoch": 23.97109594540345, |
|
"eval_accuracy": 0.2936792797177272, |
|
"eval_loss": 2.556520938873291, |
|
"eval_runtime": 26.5054, |
|
"eval_samples_per_second": 310.088, |
|
"eval_steps_per_second": 15.506, |
|
"step": 7464 |
|
}, |
|
{ |
|
"epoch": 29.963869931754317, |
|
"grad_norm": 2.9927406311035156, |
|
"learning_rate": 5.003215434083602e-06, |
|
"loss": 1.7381, |
|
"step": 9330 |
|
}, |
|
{ |
|
"epoch": 29.963869931754317, |
|
"eval_accuracy": 0.2894756053047816, |
|
"eval_loss": 2.597092628479004, |
|
"eval_runtime": 26.5051, |
|
"eval_samples_per_second": 310.092, |
|
"eval_steps_per_second": 15.506, |
|
"step": 9330 |
|
}, |
|
{ |
|
"epoch": 35.95664391810518, |
|
"grad_norm": 2.9211699962615967, |
|
"learning_rate": 4.003751339764202e-06, |
|
"loss": 1.7139, |
|
"step": 11196 |
|
}, |
|
{ |
|
"epoch": 35.95664391810518, |
|
"eval_accuracy": 0.28486839437076694, |
|
"eval_loss": 2.6405980587005615, |
|
"eval_runtime": 26.4167, |
|
"eval_samples_per_second": 311.128, |
|
"eval_steps_per_second": 15.558, |
|
"step": 11196 |
|
}, |
|
{ |
|
"epoch": 41.94941790445604, |
|
"grad_norm": 3.3739333152770996, |
|
"learning_rate": 3.004287245444802e-06, |
|
"loss": 1.7102, |
|
"step": 13062 |
|
}, |
|
{ |
|
"epoch": 41.94941790445604, |
|
"eval_accuracy": 0.2814732414440408, |
|
"eval_loss": 2.67031192779541, |
|
"eval_runtime": 26.3808, |
|
"eval_samples_per_second": 311.552, |
|
"eval_steps_per_second": 15.579, |
|
"step": 13062 |
|
}, |
|
{ |
|
"epoch": 47.9421918908069, |
|
"grad_norm": 3.284862995147705, |
|
"learning_rate": 2.0053590568060023e-06, |
|
"loss": 1.6951, |
|
"step": 14928 |
|
}, |
|
{ |
|
"epoch": 47.9421918908069, |
|
"eval_accuracy": 0.27834894756053047, |
|
"eval_loss": 2.6752700805664062, |
|
"eval_runtime": 26.5225, |
|
"eval_samples_per_second": 309.888, |
|
"eval_steps_per_second": 15.496, |
|
"step": 14928 |
|
}, |
|
{ |
|
"epoch": 53.934965877157765, |
|
"grad_norm": 2.573415994644165, |
|
"learning_rate": 1.005359056806002e-06, |
|
"loss": 1.6954, |
|
"step": 16794 |
|
}, |
|
{ |
|
"epoch": 53.934965877157765, |
|
"eval_accuracy": 0.2760811669438023, |
|
"eval_loss": 2.6846840381622314, |
|
"eval_runtime": 26.5482, |
|
"eval_samples_per_second": 309.588, |
|
"eval_steps_per_second": 15.481, |
|
"step": 16794 |
|
}, |
|
{ |
|
"epoch": 59.927739863508634, |
|
"grad_norm": 5.332968235015869, |
|
"learning_rate": 5.359056806002144e-09, |
|
"loss": 1.6888, |
|
"step": 18660 |
|
}, |
|
{ |
|
"epoch": 59.927739863508634, |
|
"eval_accuracy": 0.27407227156588393, |
|
"eval_loss": 2.6999714374542236, |
|
"eval_runtime": 26.3708, |
|
"eval_samples_per_second": 311.67, |
|
"eval_steps_per_second": 15.585, |
|
"step": 18660 |
|
}, |
|
{ |
|
"epoch": 59.927739863508634, |
|
"step": 18660, |
|
"total_flos": 9.793402242125722e+17, |
|
"train_loss": 1.7706359928699424, |
|
"train_runtime": 26526.4476, |
|
"train_samples_per_second": 140.814, |
|
"train_steps_per_second": 0.703 |
|
} |
|
], |
|
"logging_steps": 1866, |
|
"max_steps": 18660, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 60, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9.793402242125722e+17, |
|
"train_batch_size": 25, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|