|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.13966480446927373, |
|
"eval_steps": 10, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.002793296089385475, |
|
"eval_loss": 2.4049742221832275, |
|
"eval_runtime": 13.2901, |
|
"eval_samples_per_second": 11.362, |
|
"eval_steps_per_second": 5.719, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.013966480446927373, |
|
"grad_norm": 2.8598062992095947, |
|
"learning_rate": 5e-05, |
|
"loss": 9.1267, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.027932960893854747, |
|
"grad_norm": 4.437131881713867, |
|
"learning_rate": 0.0001, |
|
"loss": 8.7871, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.027932960893854747, |
|
"eval_loss": 2.1106879711151123, |
|
"eval_runtime": 12.4419, |
|
"eval_samples_per_second": 12.136, |
|
"eval_steps_per_second": 6.108, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04189944134078212, |
|
"grad_norm": 3.954702854156494, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 7.7161, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.055865921787709494, |
|
"grad_norm": 4.341027736663818, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 6.666, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.055865921787709494, |
|
"eval_loss": 1.68687903881073, |
|
"eval_runtime": 12.5323, |
|
"eval_samples_per_second": 12.049, |
|
"eval_steps_per_second": 6.064, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06983240223463687, |
|
"grad_norm": 4.4329705238342285, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 6.7389, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.08379888268156424, |
|
"grad_norm": 4.825051307678223, |
|
"learning_rate": 5e-05, |
|
"loss": 6.4153, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08379888268156424, |
|
"eval_loss": 1.5849660634994507, |
|
"eval_runtime": 12.6209, |
|
"eval_samples_per_second": 11.964, |
|
"eval_steps_per_second": 6.022, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09776536312849161, |
|
"grad_norm": 4.308661937713623, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 6.0258, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.11173184357541899, |
|
"grad_norm": 4.541212558746338, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 5.8698, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.11173184357541899, |
|
"eval_loss": 1.5316410064697266, |
|
"eval_runtime": 12.7421, |
|
"eval_samples_per_second": 11.85, |
|
"eval_steps_per_second": 5.964, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.12569832402234637, |
|
"grad_norm": 5.563971996307373, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 6.0079, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.13966480446927373, |
|
"grad_norm": 4.291133403778076, |
|
"learning_rate": 0.0, |
|
"loss": 6.0674, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13966480446927373, |
|
"eval_loss": 1.521949052810669, |
|
"eval_runtime": 12.6138, |
|
"eval_samples_per_second": 11.971, |
|
"eval_steps_per_second": 6.025, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 13, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.4264026791936e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|