|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 5.0, |
|
"global_step": 25000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.9600000000000002e-05, |
|
"loss": 1.558, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.9200000000000003e-05, |
|
"loss": 1.4467, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.88e-05, |
|
"loss": 1.4187, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.8400000000000003e-05, |
|
"loss": 1.4271, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.8e-05, |
|
"loss": 1.4207, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.76e-05, |
|
"loss": 1.4212, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.72e-05, |
|
"loss": 1.4041, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.6800000000000002e-05, |
|
"loss": 1.4028, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 1.64e-05, |
|
"loss": 1.3807, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 1.3853, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.2580437660217285, |
|
"eval_runtime": 314.0588, |
|
"eval_samples_per_second": 31.841, |
|
"eval_steps_per_second": 3.98, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 1.5600000000000003e-05, |
|
"loss": 1.3783, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 1.5200000000000002e-05, |
|
"loss": 1.3674, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 1.48e-05, |
|
"loss": 1.369, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 1.4400000000000001e-05, |
|
"loss": 1.3729, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 1.4e-05, |
|
"loss": 1.3774, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 1.3600000000000002e-05, |
|
"loss": 1.3641, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 1.3200000000000002e-05, |
|
"loss": 1.3798, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 1.2800000000000001e-05, |
|
"loss": 1.3602, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 1.2400000000000002e-05, |
|
"loss": 1.3641, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.2e-05, |
|
"loss": 1.3537, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 1.234650731086731, |
|
"eval_runtime": 312.8974, |
|
"eval_samples_per_second": 31.959, |
|
"eval_steps_per_second": 3.995, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 1.16e-05, |
|
"loss": 1.3352, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 1.1200000000000001e-05, |
|
"loss": 1.3482, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 1.0800000000000002e-05, |
|
"loss": 1.3437, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 1.04e-05, |
|
"loss": 1.3391, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 1e-05, |
|
"loss": 1.3408, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 9.600000000000001e-06, |
|
"loss": 1.335, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 9.200000000000002e-06, |
|
"loss": 1.3334, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 8.8e-06, |
|
"loss": 1.3466, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 8.400000000000001e-06, |
|
"loss": 1.3279, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 1.3286, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 1.2182681560516357, |
|
"eval_runtime": 313.6417, |
|
"eval_samples_per_second": 31.884, |
|
"eval_steps_per_second": 3.985, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 7.600000000000001e-06, |
|
"loss": 1.3196, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 7.2000000000000005e-06, |
|
"loss": 1.3267, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 6.800000000000001e-06, |
|
"loss": 1.3224, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 6.4000000000000006e-06, |
|
"loss": 1.3191, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 6e-06, |
|
"loss": 1.3106, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 5.600000000000001e-06, |
|
"loss": 1.3268, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 5.2e-06, |
|
"loss": 1.3112, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 4.800000000000001e-06, |
|
"loss": 1.3199, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 4.4e-06, |
|
"loss": 1.3137, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 1.3216, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 1.1937603950500488, |
|
"eval_runtime": 291.0578, |
|
"eval_samples_per_second": 34.357, |
|
"eval_steps_per_second": 4.295, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 3.6000000000000003e-06, |
|
"loss": 1.3143, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 3.2000000000000003e-06, |
|
"loss": 1.327, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 2.8000000000000003e-06, |
|
"loss": 1.3079, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"learning_rate": 2.4000000000000003e-06, |
|
"loss": 1.3193, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 1.3018, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 1.6000000000000001e-06, |
|
"loss": 1.3092, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 1.2000000000000002e-06, |
|
"loss": 1.3076, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 8.000000000000001e-07, |
|
"loss": 1.3124, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 4.0000000000000003e-07, |
|
"loss": 1.3079, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 0.0, |
|
"loss": 1.3194, |
|
"step": 25000 |
|
} |
|
], |
|
"max_steps": 25000, |
|
"num_train_epochs": 5, |
|
"total_flos": 2.65243613184e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|