|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.0024489296136813534, |
|
"eval_steps": 5, |
|
"global_step": 15, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00016326197424542356, |
|
"grad_norm": 0.6254855394363403, |
|
"learning_rate": 1e-05, |
|
"loss": 1.7351, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00016326197424542356, |
|
"eval_loss": 1.7825391292572021, |
|
"eval_runtime": 350.7469, |
|
"eval_samples_per_second": 29.412, |
|
"eval_steps_per_second": 14.706, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0003265239484908471, |
|
"grad_norm": 0.6790631413459778, |
|
"learning_rate": 2e-05, |
|
"loss": 1.8095, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0004897859227362707, |
|
"grad_norm": 0.5952022075653076, |
|
"learning_rate": 3e-05, |
|
"loss": 1.6803, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0006530478969816942, |
|
"grad_norm": 0.677971363067627, |
|
"learning_rate": 4e-05, |
|
"loss": 1.9548, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0008163098712271178, |
|
"grad_norm": 0.5969003438949585, |
|
"learning_rate": 5e-05, |
|
"loss": 1.418, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0008163098712271178, |
|
"eval_loss": 1.7585334777832031, |
|
"eval_runtime": 287.1049, |
|
"eval_samples_per_second": 35.931, |
|
"eval_steps_per_second": 17.966, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0009795718454725414, |
|
"grad_norm": 0.6082297563552856, |
|
"learning_rate": 6e-05, |
|
"loss": 1.7165, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.001142833819717965, |
|
"grad_norm": 0.6354915499687195, |
|
"learning_rate": 7e-05, |
|
"loss": 1.768, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0013060957939633885, |
|
"grad_norm": 0.7055122256278992, |
|
"learning_rate": 8e-05, |
|
"loss": 1.7597, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.001469357768208812, |
|
"grad_norm": 0.685647189617157, |
|
"learning_rate": 9e-05, |
|
"loss": 1.5083, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0016326197424542355, |
|
"grad_norm": 0.616553008556366, |
|
"learning_rate": 0.0001, |
|
"loss": 1.6264, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0016326197424542355, |
|
"eval_loss": 1.694636583328247, |
|
"eval_runtime": 464.5717, |
|
"eval_samples_per_second": 22.205, |
|
"eval_steps_per_second": 11.103, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0017958817166996593, |
|
"grad_norm": 0.7647472023963928, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 1.7135, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.001959143690945083, |
|
"grad_norm": 0.7802902460098267, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 1.5259, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0021224056651905064, |
|
"grad_norm": 0.7555536031723022, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 1.7871, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.00228566763943593, |
|
"grad_norm": 0.8001874685287476, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 1.5847, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0024489296136813534, |
|
"grad_norm": 0.6708170771598816, |
|
"learning_rate": 5e-05, |
|
"loss": 1.5489, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0024489296136813534, |
|
"eval_loss": 1.675947666168213, |
|
"eval_runtime": 504.0522, |
|
"eval_samples_per_second": 20.466, |
|
"eval_steps_per_second": 10.233, |
|
"step": 15 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 20, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.227876234952704e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|