|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 150.0, |
|
"global_step": 5400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 7.2e-06, |
|
"loss": 22.7336, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 5.56, |
|
"learning_rate": 1.47e-05, |
|
"loss": 8.3017, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 2.2199999999999998e-05, |
|
"loss": 5.4371, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 11.11, |
|
"learning_rate": 2.97e-05, |
|
"loss": 4.1583, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 13.89, |
|
"learning_rate": 3.7199999999999996e-05, |
|
"loss": 3.6082, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 13.89, |
|
"eval_loss": 3.501119613647461, |
|
"eval_runtime": 2.3778, |
|
"eval_samples_per_second": 146.354, |
|
"eval_steps_per_second": 4.626, |
|
"eval_wer": 0.9825350621857635, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 16.67, |
|
"learning_rate": 4.4699999999999996e-05, |
|
"loss": 3.4894, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 19.44, |
|
"learning_rate": 5.2199999999999995e-05, |
|
"loss": 3.448, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 22.22, |
|
"learning_rate": 5.97e-05, |
|
"loss": 3.4155, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 6.72e-05, |
|
"loss": 3.3702, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 27.78, |
|
"learning_rate": 7.47e-05, |
|
"loss": 3.2566, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 27.78, |
|
"eval_loss": 2.9146299362182617, |
|
"eval_runtime": 2.224, |
|
"eval_samples_per_second": 156.477, |
|
"eval_steps_per_second": 4.946, |
|
"eval_wer": 0.9367557554908706, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 30.56, |
|
"learning_rate": 7.336363636363636e-05, |
|
"loss": 2.832, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 7.16590909090909e-05, |
|
"loss": 2.2848, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 36.11, |
|
"learning_rate": 6.995454545454545e-05, |
|
"loss": 1.9336, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 38.89, |
|
"learning_rate": 6.824999999999999e-05, |
|
"loss": 1.7366, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 41.67, |
|
"learning_rate": 6.654545454545455e-05, |
|
"loss": 1.5744, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 41.67, |
|
"eval_loss": 0.6705893278121948, |
|
"eval_runtime": 2.2526, |
|
"eval_samples_per_second": 154.49, |
|
"eval_steps_per_second": 4.883, |
|
"eval_wer": 0.20706536120666844, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 44.44, |
|
"learning_rate": 6.484090909090909e-05, |
|
"loss": 1.4626, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 47.22, |
|
"learning_rate": 6.313636363636363e-05, |
|
"loss": 1.4203, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 6.143181818181818e-05, |
|
"loss": 1.3699, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 52.78, |
|
"learning_rate": 5.9727272727272724e-05, |
|
"loss": 1.3123, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 55.56, |
|
"learning_rate": 5.802272727272727e-05, |
|
"loss": 1.2744, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 55.56, |
|
"eval_loss": 0.540650486946106, |
|
"eval_runtime": 2.2596, |
|
"eval_samples_per_second": 154.01, |
|
"eval_steps_per_second": 4.868, |
|
"eval_wer": 0.1749139984122784, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 58.33, |
|
"learning_rate": 5.631818181818181e-05, |
|
"loss": 1.2163, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 61.11, |
|
"learning_rate": 5.463068181818181e-05, |
|
"loss": 1.187, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 63.89, |
|
"learning_rate": 5.292613636363636e-05, |
|
"loss": 1.1699, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 66.67, |
|
"learning_rate": 5.122159090909091e-05, |
|
"loss": 1.1208, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 69.44, |
|
"learning_rate": 4.9517045454545456e-05, |
|
"loss": 1.0734, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 69.44, |
|
"eval_loss": 0.527560830116272, |
|
"eval_runtime": 2.3157, |
|
"eval_samples_per_second": 150.278, |
|
"eval_steps_per_second": 4.75, |
|
"eval_wer": 0.17610478962688542, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 72.22, |
|
"learning_rate": 4.781249999999999e-05, |
|
"loss": 1.0427, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 4.610795454545454e-05, |
|
"loss": 1.0227, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 77.78, |
|
"learning_rate": 4.4420454545454544e-05, |
|
"loss": 0.9966, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 80.56, |
|
"learning_rate": 4.271590909090909e-05, |
|
"loss": 0.9804, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 83.33, |
|
"learning_rate": 4.101136363636364e-05, |
|
"loss": 0.9451, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 83.33, |
|
"eval_loss": 0.4845634400844574, |
|
"eval_runtime": 2.2522, |
|
"eval_samples_per_second": 154.518, |
|
"eval_steps_per_second": 4.884, |
|
"eval_wer": 0.17068007409367558, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 86.11, |
|
"learning_rate": 3.9306818181818174e-05, |
|
"loss": 0.9217, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 88.89, |
|
"learning_rate": 3.760227272727272e-05, |
|
"loss": 0.904, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 91.67, |
|
"learning_rate": 3.591477272727273e-05, |
|
"loss": 0.8857, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 94.44, |
|
"learning_rate": 3.421022727272727e-05, |
|
"loss": 0.88, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 97.22, |
|
"learning_rate": 3.250568181818182e-05, |
|
"loss": 0.8559, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 97.22, |
|
"eval_loss": 0.494262158870697, |
|
"eval_runtime": 2.0455, |
|
"eval_samples_per_second": 170.13, |
|
"eval_steps_per_second": 5.378, |
|
"eval_wer": 0.1672400105848108, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 3.0801136363636364e-05, |
|
"loss": 0.819, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 102.78, |
|
"learning_rate": 2.9096590909090905e-05, |
|
"loss": 0.8242, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 105.56, |
|
"learning_rate": 2.7392045454545453e-05, |
|
"loss": 0.7983, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 108.33, |
|
"learning_rate": 2.56875e-05, |
|
"loss": 0.7911, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 111.11, |
|
"learning_rate": 2.3982954545454542e-05, |
|
"loss": 0.7725, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 111.11, |
|
"eval_loss": 0.4919077455997467, |
|
"eval_runtime": 2.1172, |
|
"eval_samples_per_second": 164.365, |
|
"eval_steps_per_second": 5.195, |
|
"eval_wer": 0.1672400105848108, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 113.89, |
|
"learning_rate": 2.227840909090909e-05, |
|
"loss": 0.7691, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 116.67, |
|
"learning_rate": 2.0573863636363634e-05, |
|
"loss": 0.739, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 119.44, |
|
"learning_rate": 1.886931818181818e-05, |
|
"loss": 0.7452, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 122.22, |
|
"learning_rate": 1.7164772727272726e-05, |
|
"loss": 0.7218, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 125.0, |
|
"learning_rate": 1.546022727272727e-05, |
|
"loss": 0.7171, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 125.0, |
|
"eval_loss": 0.5054941773414612, |
|
"eval_runtime": 2.2494, |
|
"eval_samples_per_second": 154.711, |
|
"eval_steps_per_second": 4.89, |
|
"eval_wer": 0.16115374437681926, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 127.78, |
|
"learning_rate": 1.3755681818181817e-05, |
|
"loss": 0.7135, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 130.56, |
|
"learning_rate": 1.2051136363636363e-05, |
|
"loss": 0.6951, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 133.33, |
|
"learning_rate": 1.0346590909090907e-05, |
|
"loss": 0.698, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 136.11, |
|
"learning_rate": 8.642045454545455e-06, |
|
"loss": 0.6711, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 138.89, |
|
"learning_rate": 6.937499999999999e-06, |
|
"loss": 0.6749, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 138.89, |
|
"eval_loss": 0.48401492834091187, |
|
"eval_runtime": 2.2496, |
|
"eval_samples_per_second": 154.697, |
|
"eval_steps_per_second": 4.89, |
|
"eval_wer": 0.16260915586133898, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 141.67, |
|
"learning_rate": 5.232954545454545e-06, |
|
"loss": 0.6719, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 144.44, |
|
"learning_rate": 3.545454545454545e-06, |
|
"loss": 0.6746, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 147.22, |
|
"learning_rate": 1.8409090909090906e-06, |
|
"loss": 0.6625, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 150.0, |
|
"learning_rate": 1.3636363636363635e-07, |
|
"loss": 0.6488, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 150.0, |
|
"step": 5400, |
|
"total_flos": 1.8350456782448493e+19, |
|
"train_loss": 1.9894317994294344, |
|
"train_runtime": 2069.4343, |
|
"train_samples_per_second": 83.356, |
|
"train_steps_per_second": 2.609 |
|
} |
|
], |
|
"max_steps": 5400, |
|
"num_train_epochs": 150, |
|
"total_flos": 1.8350456782448493e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|