|
{ |
|
"best_metric": 0.9863776360625434, |
|
"best_model_checkpoint": "distilbert-base-uncased_finetuned_text_2_disease_cel/checkpoint-501", |
|
"epoch": 3.0, |
|
"global_step": 501, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.9600798403193614e-05, |
|
"loss": 3.2148, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.920159680638723e-05, |
|
"loss": 3.1884, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.8802395209580838e-05, |
|
"loss": 3.1211, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.8403193612774454e-05, |
|
"loss": 3.0229, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.8003992015968066e-05, |
|
"loss": 2.86, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.7604790419161678e-05, |
|
"loss": 2.7413, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.720558882235529e-05, |
|
"loss": 2.5771, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.6806387225548902e-05, |
|
"loss": 2.4409, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 1.6407185628742518e-05, |
|
"loss": 2.252, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.600798403193613e-05, |
|
"loss": 2.1139, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.5608782435129742e-05, |
|
"loss": 2.0014, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.5209580838323354e-05, |
|
"loss": 1.8311, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.4810379241516968e-05, |
|
"loss": 1.7216, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.4411177644710582e-05, |
|
"loss": 1.6513, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 1.4011976047904192e-05, |
|
"loss": 1.4706, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 1.3612774451097806e-05, |
|
"loss": 1.3868, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.8648648648648649, |
|
"eval_f1": 0.8458007872306277, |
|
"eval_loss": 1.1692062616348267, |
|
"eval_precision": 0.8572972892114447, |
|
"eval_recall": 0.8648648648648649, |
|
"eval_runtime": 3.7382, |
|
"eval_samples_per_second": 178.16, |
|
"eval_steps_per_second": 5.618, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 1.3213572854291418e-05, |
|
"loss": 1.2438, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 1.2814371257485032e-05, |
|
"loss": 1.1916, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 1.2415169660678643e-05, |
|
"loss": 1.0778, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 1.2015968063872256e-05, |
|
"loss": 1.0055, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 1.161676646706587e-05, |
|
"loss": 1.0151, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 1.1217564870259482e-05, |
|
"loss": 0.9616, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 1.0818363273453095e-05, |
|
"loss": 0.9831, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 1.0419161676646707e-05, |
|
"loss": 0.866, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 1.001996007984032e-05, |
|
"loss": 0.8548, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 9.620758483033933e-06, |
|
"loss": 0.7455, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 9.221556886227547e-06, |
|
"loss": 0.693, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 8.822355289421159e-06, |
|
"loss": 0.6751, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 8.42315369261477e-06, |
|
"loss": 0.6852, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 8.023952095808385e-06, |
|
"loss": 0.5647, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 7.624750499001997e-06, |
|
"loss": 0.5876, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 7.225548902195609e-06, |
|
"loss": 0.5605, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 6.826347305389223e-06, |
|
"loss": 0.5345, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9744744744744744, |
|
"eval_f1": 0.973584165402074, |
|
"eval_loss": 0.42139312624931335, |
|
"eval_precision": 0.976877941401268, |
|
"eval_recall": 0.9744744744744744, |
|
"eval_runtime": 3.9585, |
|
"eval_samples_per_second": 168.247, |
|
"eval_steps_per_second": 5.305, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 6.427145708582835e-06, |
|
"loss": 0.5248, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 6.027944111776448e-06, |
|
"loss": 0.4671, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 5.62874251497006e-06, |
|
"loss": 0.4804, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 5.229540918163674e-06, |
|
"loss": 0.4354, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 4.830339321357286e-06, |
|
"loss": 0.4218, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 4.431137724550898e-06, |
|
"loss": 0.4162, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 4.031936127744511e-06, |
|
"loss": 0.4194, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 3.6327345309381242e-06, |
|
"loss": 0.3967, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 3.2335329341317368e-06, |
|
"loss": 0.3876, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 2.8343313373253494e-06, |
|
"loss": 0.3711, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 2.4351297405189623e-06, |
|
"loss": 0.4061, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 2.035928143712575e-06, |
|
"loss": 0.3654, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 1.6367265469061877e-06, |
|
"loss": 0.379, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 1.2375249500998005e-06, |
|
"loss": 0.3622, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 8.383233532934132e-07, |
|
"loss": 0.3553, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 4.39121756487026e-07, |
|
"loss": 0.3665, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 3.992015968063872e-08, |
|
"loss": 0.3472, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9864864864864865, |
|
"eval_f1": 0.9863776360625434, |
|
"eval_loss": 0.27318987250328064, |
|
"eval_precision": 0.987865864189957, |
|
"eval_recall": 0.9864864864864865, |
|
"eval_runtime": 3.7863, |
|
"eval_samples_per_second": 175.899, |
|
"eval_steps_per_second": 5.546, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 501, |
|
"total_flos": 2118227382190080.0, |
|
"train_loss": 1.1732605536302882, |
|
"train_runtime": 338.4848, |
|
"train_samples_per_second": 47.222, |
|
"train_steps_per_second": 1.48 |
|
} |
|
], |
|
"max_steps": 501, |
|
"num_train_epochs": 3, |
|
"total_flos": 2118227382190080.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|