kokovova's picture
Training in progress, step 30, checkpoint
0dd040e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.012244897959183673,
"eval_steps": 8,
"global_step": 30,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00040816326530612246,
"eval_loss": 1.3330155611038208,
"eval_runtime": 72.1822,
"eval_samples_per_second": 14.297,
"eval_steps_per_second": 7.149,
"step": 1
},
{
"epoch": 0.0012244897959183673,
"grad_norm": 2.0126116275787354,
"learning_rate": 6e-05,
"loss": 4.883,
"step": 3
},
{
"epoch": 0.0024489795918367346,
"grad_norm": 1.7460933923721313,
"learning_rate": 0.00012,
"loss": 5.1913,
"step": 6
},
{
"epoch": 0.0032653061224489797,
"eval_loss": 1.1873412132263184,
"eval_runtime": 71.2572,
"eval_samples_per_second": 14.483,
"eval_steps_per_second": 7.241,
"step": 8
},
{
"epoch": 0.003673469387755102,
"grad_norm": 1.6684772968292236,
"learning_rate": 0.00018,
"loss": 4.5746,
"step": 9
},
{
"epoch": 0.004897959183673469,
"grad_norm": 2.312330961227417,
"learning_rate": 0.00019510565162951537,
"loss": 4.6686,
"step": 12
},
{
"epoch": 0.006122448979591836,
"grad_norm": 2.092863082885742,
"learning_rate": 0.00017071067811865476,
"loss": 4.0921,
"step": 15
},
{
"epoch": 0.006530612244897959,
"eval_loss": 1.0457439422607422,
"eval_runtime": 71.3543,
"eval_samples_per_second": 14.463,
"eval_steps_per_second": 7.232,
"step": 16
},
{
"epoch": 0.007346938775510204,
"grad_norm": 1.790899395942688,
"learning_rate": 0.00013090169943749476,
"loss": 4.1099,
"step": 18
},
{
"epoch": 0.008571428571428572,
"grad_norm": 1.987960696220398,
"learning_rate": 8.435655349597689e-05,
"loss": 4.4062,
"step": 21
},
{
"epoch": 0.009795918367346938,
"grad_norm": 1.5989941358566284,
"learning_rate": 4.12214747707527e-05,
"loss": 3.8699,
"step": 24
},
{
"epoch": 0.009795918367346938,
"eval_loss": 0.9915894269943237,
"eval_runtime": 71.3937,
"eval_samples_per_second": 14.455,
"eval_steps_per_second": 7.228,
"step": 24
},
{
"epoch": 0.011020408163265306,
"grad_norm": 2.049525499343872,
"learning_rate": 1.0899347581163221e-05,
"loss": 4.1971,
"step": 27
},
{
"epoch": 0.012244897959183673,
"grad_norm": 2.069650411605835,
"learning_rate": 0.0,
"loss": 3.7882,
"step": 30
}
],
"logging_steps": 3,
"max_steps": 30,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.772334209826816e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}