|
{ |
|
"best_metric": 0.36799120903015137, |
|
"best_model_checkpoint": "/tmp/model/checkpoint-4000", |
|
"epoch": 2.0, |
|
"eval_steps": 500, |
|
"global_step": 4000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 5e-06, |
|
"loss": 0.581, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1e-05, |
|
"loss": 0.5379, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.4945, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 2e-05, |
|
"loss": 0.4066, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.4182, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 3e-05, |
|
"loss": 0.4407, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 3.5e-05, |
|
"loss": 0.418, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 4e-05, |
|
"loss": 0.4258, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.3885, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 5e-05, |
|
"loss": 0.4283, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 4.9444444444444446e-05, |
|
"loss": 0.4148, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 4.888888888888889e-05, |
|
"loss": 0.3951, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 4.8333333333333334e-05, |
|
"loss": 0.4255, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.7777777777777784e-05, |
|
"loss": 0.3842, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 4.722222222222222e-05, |
|
"loss": 0.4461, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 0.3789, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 4.6111111111111115e-05, |
|
"loss": 0.3893, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 4.555555555555556e-05, |
|
"loss": 0.3833, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.3817, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.3654, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.84475, |
|
"eval_auc": 0.8835736098526796, |
|
"eval_f1": 0.689344672336168, |
|
"eval_loss": 0.40313366055488586, |
|
"eval_precision": 0.6828543111992071, |
|
"eval_recall": 0.695959595959596, |
|
"eval_runtime": 738.7835, |
|
"eval_samples_per_second": 5.414, |
|
"eval_steps_per_second": 0.338, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 4.388888888888889e-05, |
|
"loss": 0.3236, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 4.3333333333333334e-05, |
|
"loss": 0.3882, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 4.277777777777778e-05, |
|
"loss": 0.3413, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 4.222222222222222e-05, |
|
"loss": 0.303, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 4.166666666666667e-05, |
|
"loss": 0.352, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 4.111111111111111e-05, |
|
"loss": 0.323, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 4.055555555555556e-05, |
|
"loss": 0.3494, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 4e-05, |
|
"loss": 0.3125, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 3.944444444444445e-05, |
|
"loss": 0.314, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 3.888888888888889e-05, |
|
"loss": 0.394, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 3.8333333333333334e-05, |
|
"loss": 0.3142, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 3.777777777777778e-05, |
|
"loss": 0.3179, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 3.722222222222222e-05, |
|
"loss": 0.2756, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 3.6666666666666666e-05, |
|
"loss": 0.3178, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 3.611111111111111e-05, |
|
"loss": 0.3101, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 3.555555555555556e-05, |
|
"loss": 0.3615, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 3.5e-05, |
|
"loss": 0.3084, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 3.444444444444445e-05, |
|
"loss": 0.308, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 3.388888888888889e-05, |
|
"loss": 0.3686, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.3642, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.85775, |
|
"eval_auc": 0.8958072418537535, |
|
"eval_f1": 0.7044155844155844, |
|
"eval_loss": 0.36799120903015137, |
|
"eval_precision": 0.7251336898395722, |
|
"eval_recall": 0.6848484848484848, |
|
"eval_runtime": 782.6408, |
|
"eval_samples_per_second": 5.111, |
|
"eval_steps_per_second": 0.319, |
|
"step": 4000 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 10000, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"total_flos": 2104888442880000.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|