|
{ |
|
"best_metric": 0.06361353581250748, |
|
"best_model_checkpoint": "./wav2vec2-xls-r-300m-ar/checkpoint-10000", |
|
"epoch": 88.49557522123894, |
|
"global_step": 10000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.4999999999999999e-05, |
|
"loss": 9.1248, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 6.6739, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 4.4999999999999996e-05, |
|
"loss": 4.632, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 3.917, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 7.5e-05, |
|
"loss": 3.3449, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 2.8486, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.00010499999999999999, |
|
"loss": 2.5717, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 2.3746, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 0.000135, |
|
"loss": 2.2744, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 0.00015, |
|
"loss": 2.0759, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 0.000165, |
|
"loss": 1.7813, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 1.2002, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 0.000195, |
|
"loss": 0.7684, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 0.527, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 0.000225, |
|
"loss": 0.3734, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 0.3184, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 0.00025499999999999996, |
|
"loss": 0.2822, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 0.00027, |
|
"loss": 0.2074, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 0.000285, |
|
"loss": 0.1985, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 0.0003, |
|
"loss": 0.1917, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 0.00029921052631578946, |
|
"loss": 0.1853, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 0.00029842105263157894, |
|
"loss": 0.1828, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"learning_rate": 0.00029763157894736837, |
|
"loss": 0.1715, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 5.31, |
|
"learning_rate": 0.00029684210526315785, |
|
"loss": 0.1457, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 5.53, |
|
"learning_rate": 0.00029605263157894733, |
|
"loss": 0.1247, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 5.75, |
|
"learning_rate": 0.0002952631578947368, |
|
"loss": 0.1266, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 5.97, |
|
"learning_rate": 0.0002944736842105263, |
|
"loss": 0.117, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 6.19, |
|
"learning_rate": 0.0002936842105263158, |
|
"loss": 0.117, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 6.42, |
|
"learning_rate": 0.00029289473684210527, |
|
"loss": 0.125, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 6.64, |
|
"learning_rate": 0.0002921052631578947, |
|
"loss": 0.1084, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 6.86, |
|
"learning_rate": 0.0002913157894736842, |
|
"loss": 0.1178, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 7.08, |
|
"learning_rate": 0.00029052631578947366, |
|
"loss": 0.1193, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 7.3, |
|
"learning_rate": 0.00028973684210526314, |
|
"loss": 0.0925, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 7.52, |
|
"learning_rate": 0.00028894736842105263, |
|
"loss": 0.085, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 7.74, |
|
"learning_rate": 0.00028815789473684206, |
|
"loss": 0.0865, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 7.96, |
|
"learning_rate": 0.00028736842105263154, |
|
"loss": 0.0811, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 8.19, |
|
"learning_rate": 0.000286578947368421, |
|
"loss": 0.0835, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 8.41, |
|
"learning_rate": 0.0002857894736842105, |
|
"loss": 0.0886, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 8.63, |
|
"learning_rate": 0.000285, |
|
"loss": 0.0991, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 8.85, |
|
"learning_rate": 0.0002842105263157894, |
|
"loss": 0.0793, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 8.85, |
|
"eval_loss": 0.16262491047382355, |
|
"eval_runtime": 11.6481, |
|
"eval_samples_per_second": 8.585, |
|
"eval_steps_per_second": 1.116, |
|
"eval_wer": 0.07856032524213799, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 9.07, |
|
"learning_rate": 0.00028342105263157895, |
|
"loss": 0.0863, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 9.29, |
|
"learning_rate": 0.0002826315789473684, |
|
"loss": 0.0638, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 9.51, |
|
"learning_rate": 0.00028184210526315787, |
|
"loss": 0.0604, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 9.73, |
|
"learning_rate": 0.00028105263157894735, |
|
"loss": 0.0621, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 9.96, |
|
"learning_rate": 0.00028026315789473683, |
|
"loss": 0.0707, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 10.18, |
|
"learning_rate": 0.0002794736842105263, |
|
"loss": 0.0799, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 10.4, |
|
"learning_rate": 0.00027868421052631574, |
|
"loss": 0.0694, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 10.62, |
|
"learning_rate": 0.00027789473684210523, |
|
"loss": 0.0655, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 10.84, |
|
"learning_rate": 0.0002771052631578947, |
|
"loss": 0.0776, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 11.06, |
|
"learning_rate": 0.0002763157894736842, |
|
"loss": 0.0582, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 11.28, |
|
"learning_rate": 0.0002755263157894737, |
|
"loss": 0.0547, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 0.0002747368421052631, |
|
"loss": 0.0597, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 11.73, |
|
"learning_rate": 0.0002739473684210526, |
|
"loss": 0.0492, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 11.95, |
|
"learning_rate": 0.00027315789473684207, |
|
"loss": 0.0699, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 12.17, |
|
"learning_rate": 0.00027236842105263155, |
|
"loss": 0.0594, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 12.39, |
|
"learning_rate": 0.00027157894736842104, |
|
"loss": 0.0541, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 12.61, |
|
"learning_rate": 0.0002707894736842105, |
|
"loss": 0.0522, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 12.83, |
|
"learning_rate": 0.00027, |
|
"loss": 0.0645, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 13.05, |
|
"learning_rate": 0.00026921052631578943, |
|
"loss": 0.0583, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 13.27, |
|
"learning_rate": 0.0002684210526315789, |
|
"loss": 0.0443, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"learning_rate": 0.0002676315789473684, |
|
"loss": 0.0548, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 13.72, |
|
"learning_rate": 0.0002668421052631579, |
|
"loss": 0.0484, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 13.94, |
|
"learning_rate": 0.00026605263157894736, |
|
"loss": 0.0775, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 14.16, |
|
"learning_rate": 0.0002652631578947368, |
|
"loss": 0.0554, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 14.38, |
|
"learning_rate": 0.0002644736842105263, |
|
"loss": 0.0456, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 14.6, |
|
"learning_rate": 0.00026368421052631576, |
|
"loss": 0.0563, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 14.82, |
|
"learning_rate": 0.00026289473684210524, |
|
"loss": 0.0493, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 15.04, |
|
"learning_rate": 0.0002621052631578947, |
|
"loss": 0.0452, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 15.27, |
|
"learning_rate": 0.0002613157894736842, |
|
"loss": 0.0389, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 15.49, |
|
"learning_rate": 0.0002605263157894737, |
|
"loss": 0.0436, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 15.71, |
|
"learning_rate": 0.0002597368421052631, |
|
"loss": 0.0407, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 15.93, |
|
"learning_rate": 0.0002589473684210526, |
|
"loss": 0.0448, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 16.15, |
|
"learning_rate": 0.0002581578947368421, |
|
"loss": 0.0457, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 16.37, |
|
"learning_rate": 0.00025736842105263157, |
|
"loss": 0.0461, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 16.59, |
|
"learning_rate": 0.00025657894736842105, |
|
"loss": 0.0452, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 16.81, |
|
"learning_rate": 0.0002557894736842105, |
|
"loss": 0.0484, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 17.04, |
|
"learning_rate": 0.00025499999999999996, |
|
"loss": 0.0376, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 17.26, |
|
"learning_rate": 0.00025421052631578945, |
|
"loss": 0.0409, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 17.48, |
|
"learning_rate": 0.00025342105263157893, |
|
"loss": 0.0315, |
|
"step": 1975 |
|
}, |
|
{ |
|
"epoch": 17.7, |
|
"learning_rate": 0.00025263157894736836, |
|
"loss": 0.0396, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 17.7, |
|
"eval_loss": 0.21985749900341034, |
|
"eval_runtime": 11.0577, |
|
"eval_samples_per_second": 9.043, |
|
"eval_steps_per_second": 1.176, |
|
"eval_wer": 0.08071266292000479, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 17.92, |
|
"learning_rate": 0.00025184210526315784, |
|
"loss": 0.0406, |
|
"step": 2025 |
|
}, |
|
{ |
|
"epoch": 18.14, |
|
"learning_rate": 0.0002510526315789474, |
|
"loss": 0.0322, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 18.36, |
|
"learning_rate": 0.0002502631578947368, |
|
"loss": 0.0393, |
|
"step": 2075 |
|
}, |
|
{ |
|
"epoch": 18.58, |
|
"learning_rate": 0.0002494736842105263, |
|
"loss": 0.0438, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 18.81, |
|
"learning_rate": 0.0002486842105263158, |
|
"loss": 0.0372, |
|
"step": 2125 |
|
}, |
|
{ |
|
"epoch": 19.03, |
|
"learning_rate": 0.00024789473684210526, |
|
"loss": 0.0335, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 19.25, |
|
"learning_rate": 0.00024710526315789474, |
|
"loss": 0.0327, |
|
"step": 2175 |
|
}, |
|
{ |
|
"epoch": 19.47, |
|
"learning_rate": 0.00024631578947368417, |
|
"loss": 0.0307, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 19.69, |
|
"learning_rate": 0.00024552631578947365, |
|
"loss": 0.0374, |
|
"step": 2225 |
|
}, |
|
{ |
|
"epoch": 19.91, |
|
"learning_rate": 0.00024473684210526314, |
|
"loss": 0.035, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 20.13, |
|
"learning_rate": 0.0002439473684210526, |
|
"loss": 0.0336, |
|
"step": 2275 |
|
}, |
|
{ |
|
"epoch": 20.35, |
|
"learning_rate": 0.00024315789473684207, |
|
"loss": 0.0426, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 20.58, |
|
"learning_rate": 0.00024236842105263156, |
|
"loss": 0.0375, |
|
"step": 2325 |
|
}, |
|
{ |
|
"epoch": 20.8, |
|
"learning_rate": 0.000241578947368421, |
|
"loss": 0.0352, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 21.02, |
|
"learning_rate": 0.00024078947368421052, |
|
"loss": 0.0319, |
|
"step": 2375 |
|
}, |
|
{ |
|
"epoch": 21.24, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 0.0339, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 21.46, |
|
"learning_rate": 0.00023921052631578946, |
|
"loss": 0.0335, |
|
"step": 2425 |
|
}, |
|
{ |
|
"epoch": 21.68, |
|
"learning_rate": 0.00023842105263157895, |
|
"loss": 0.0329, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 21.9, |
|
"learning_rate": 0.0002376315789473684, |
|
"loss": 0.0373, |
|
"step": 2475 |
|
}, |
|
{ |
|
"epoch": 22.12, |
|
"learning_rate": 0.00023684210526315788, |
|
"loss": 0.0371, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 22.35, |
|
"learning_rate": 0.00023605263157894734, |
|
"loss": 0.0273, |
|
"step": 2525 |
|
}, |
|
{ |
|
"epoch": 22.57, |
|
"learning_rate": 0.00023526315789473682, |
|
"loss": 0.0361, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 22.79, |
|
"learning_rate": 0.00023447368421052628, |
|
"loss": 0.0367, |
|
"step": 2575 |
|
}, |
|
{ |
|
"epoch": 23.01, |
|
"learning_rate": 0.00023368421052631576, |
|
"loss": 0.0374, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 23.23, |
|
"learning_rate": 0.00023289473684210524, |
|
"loss": 0.0264, |
|
"step": 2625 |
|
}, |
|
{ |
|
"epoch": 23.45, |
|
"learning_rate": 0.0002321052631578947, |
|
"loss": 0.0331, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 23.67, |
|
"learning_rate": 0.00023131578947368418, |
|
"loss": 0.0407, |
|
"step": 2675 |
|
}, |
|
{ |
|
"epoch": 23.89, |
|
"learning_rate": 0.00023052631578947364, |
|
"loss": 0.031, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 24.12, |
|
"learning_rate": 0.00022973684210526315, |
|
"loss": 0.0294, |
|
"step": 2725 |
|
}, |
|
{ |
|
"epoch": 24.34, |
|
"learning_rate": 0.00022894736842105263, |
|
"loss": 0.0345, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 24.56, |
|
"learning_rate": 0.0002281578947368421, |
|
"loss": 0.027, |
|
"step": 2775 |
|
}, |
|
{ |
|
"epoch": 24.78, |
|
"learning_rate": 0.00022736842105263157, |
|
"loss": 0.0241, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 0.00022657894736842103, |
|
"loss": 0.0304, |
|
"step": 2825 |
|
}, |
|
{ |
|
"epoch": 25.22, |
|
"learning_rate": 0.0002257894736842105, |
|
"loss": 0.026, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 25.44, |
|
"learning_rate": 0.000225, |
|
"loss": 0.0325, |
|
"step": 2875 |
|
}, |
|
{ |
|
"epoch": 25.66, |
|
"learning_rate": 0.00022421052631578945, |
|
"loss": 0.0374, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 25.88, |
|
"learning_rate": 0.00022342105263157893, |
|
"loss": 0.036, |
|
"step": 2925 |
|
}, |
|
{ |
|
"epoch": 26.11, |
|
"learning_rate": 0.0002226315789473684, |
|
"loss": 0.0339, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 26.33, |
|
"learning_rate": 0.00022184210526315787, |
|
"loss": 0.0312, |
|
"step": 2975 |
|
}, |
|
{ |
|
"epoch": 26.55, |
|
"learning_rate": 0.00022105263157894733, |
|
"loss": 0.0285, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 26.55, |
|
"eval_loss": 0.22891539335250854, |
|
"eval_runtime": 10.8646, |
|
"eval_samples_per_second": 9.204, |
|
"eval_steps_per_second": 1.197, |
|
"eval_wer": 0.0693531029534856, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 26.77, |
|
"learning_rate": 0.0002202631578947368, |
|
"loss": 0.0238, |
|
"step": 3025 |
|
}, |
|
{ |
|
"epoch": 26.99, |
|
"learning_rate": 0.00021947368421052632, |
|
"loss": 0.0226, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 27.21, |
|
"learning_rate": 0.00021868421052631578, |
|
"loss": 0.0255, |
|
"step": 3075 |
|
}, |
|
{ |
|
"epoch": 27.43, |
|
"learning_rate": 0.00021789473684210526, |
|
"loss": 0.0326, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 27.65, |
|
"learning_rate": 0.00021710526315789472, |
|
"loss": 0.0629, |
|
"step": 3125 |
|
}, |
|
{ |
|
"epoch": 27.88, |
|
"learning_rate": 0.0002163157894736842, |
|
"loss": 0.0348, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 28.1, |
|
"learning_rate": 0.00021552631578947368, |
|
"loss": 0.0311, |
|
"step": 3175 |
|
}, |
|
{ |
|
"epoch": 28.32, |
|
"learning_rate": 0.00021473684210526314, |
|
"loss": 0.023, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 28.54, |
|
"learning_rate": 0.00021394736842105262, |
|
"loss": 0.0214, |
|
"step": 3225 |
|
}, |
|
{ |
|
"epoch": 28.76, |
|
"learning_rate": 0.00021315789473684208, |
|
"loss": 0.0201, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 28.98, |
|
"learning_rate": 0.00021236842105263156, |
|
"loss": 0.0202, |
|
"step": 3275 |
|
}, |
|
{ |
|
"epoch": 29.2, |
|
"learning_rate": 0.00021157894736842102, |
|
"loss": 0.0231, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 29.42, |
|
"learning_rate": 0.0002107894736842105, |
|
"loss": 0.0291, |
|
"step": 3325 |
|
}, |
|
{ |
|
"epoch": 29.65, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 0.0241, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 29.87, |
|
"learning_rate": 0.00020921052631578944, |
|
"loss": 0.0261, |
|
"step": 3375 |
|
}, |
|
{ |
|
"epoch": 30.09, |
|
"learning_rate": 0.00020842105263157895, |
|
"loss": 0.0281, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 30.31, |
|
"learning_rate": 0.0002076315789473684, |
|
"loss": 0.0237, |
|
"step": 3425 |
|
}, |
|
{ |
|
"epoch": 30.53, |
|
"learning_rate": 0.0002068421052631579, |
|
"loss": 0.0199, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 30.75, |
|
"learning_rate": 0.00020605263157894737, |
|
"loss": 0.0199, |
|
"step": 3475 |
|
}, |
|
{ |
|
"epoch": 30.97, |
|
"learning_rate": 0.00020526315789473683, |
|
"loss": 0.0207, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 31.19, |
|
"learning_rate": 0.0002044736842105263, |
|
"loss": 0.0221, |
|
"step": 3525 |
|
}, |
|
{ |
|
"epoch": 31.42, |
|
"learning_rate": 0.00020368421052631576, |
|
"loss": 0.0234, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 31.64, |
|
"learning_rate": 0.00020289473684210525, |
|
"loss": 0.025, |
|
"step": 3575 |
|
}, |
|
{ |
|
"epoch": 31.86, |
|
"learning_rate": 0.0002021052631578947, |
|
"loss": 0.0251, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 32.08, |
|
"learning_rate": 0.0002013157894736842, |
|
"loss": 0.0232, |
|
"step": 3625 |
|
}, |
|
{ |
|
"epoch": 32.3, |
|
"learning_rate": 0.00020052631578947367, |
|
"loss": 0.0215, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 32.52, |
|
"learning_rate": 0.00019973684210526313, |
|
"loss": 0.019, |
|
"step": 3675 |
|
}, |
|
{ |
|
"epoch": 32.74, |
|
"learning_rate": 0.0001989473684210526, |
|
"loss": 0.0215, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 32.96, |
|
"learning_rate": 0.0001981578947368421, |
|
"loss": 0.0209, |
|
"step": 3725 |
|
}, |
|
{ |
|
"epoch": 33.19, |
|
"learning_rate": 0.00019736842105263157, |
|
"loss": 0.0258, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 33.41, |
|
"learning_rate": 0.00019657894736842106, |
|
"loss": 0.0206, |
|
"step": 3775 |
|
}, |
|
{ |
|
"epoch": 33.63, |
|
"learning_rate": 0.0001957894736842105, |
|
"loss": 0.019, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 33.85, |
|
"learning_rate": 0.000195, |
|
"loss": 0.0205, |
|
"step": 3825 |
|
}, |
|
{ |
|
"epoch": 34.07, |
|
"learning_rate": 0.00019421052631578945, |
|
"loss": 0.0243, |
|
"step": 3850 |
|
}, |
|
{ |
|
"epoch": 34.29, |
|
"learning_rate": 0.00019342105263157894, |
|
"loss": 0.0166, |
|
"step": 3875 |
|
}, |
|
{ |
|
"epoch": 34.51, |
|
"learning_rate": 0.0001926315789473684, |
|
"loss": 0.0159, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 34.73, |
|
"learning_rate": 0.00019184210526315787, |
|
"loss": 0.0194, |
|
"step": 3925 |
|
}, |
|
{ |
|
"epoch": 34.96, |
|
"learning_rate": 0.00019105263157894736, |
|
"loss": 0.0207, |
|
"step": 3950 |
|
}, |
|
{ |
|
"epoch": 35.18, |
|
"learning_rate": 0.0001902631578947368, |
|
"loss": 0.0212, |
|
"step": 3975 |
|
}, |
|
{ |
|
"epoch": 35.4, |
|
"learning_rate": 0.0001894736842105263, |
|
"loss": 0.021, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 35.4, |
|
"eval_loss": 0.26615452766418457, |
|
"eval_runtime": 10.7513, |
|
"eval_samples_per_second": 9.301, |
|
"eval_steps_per_second": 1.209, |
|
"eval_wer": 0.07222288652397466, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 35.62, |
|
"learning_rate": 0.00018868421052631575, |
|
"loss": 0.0222, |
|
"step": 4025 |
|
}, |
|
{ |
|
"epoch": 35.84, |
|
"learning_rate": 0.00018789473684210524, |
|
"loss": 0.0206, |
|
"step": 4050 |
|
}, |
|
{ |
|
"epoch": 36.06, |
|
"learning_rate": 0.00018710526315789475, |
|
"loss": 0.0196, |
|
"step": 4075 |
|
}, |
|
{ |
|
"epoch": 36.28, |
|
"learning_rate": 0.0001863157894736842, |
|
"loss": 0.0186, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 36.5, |
|
"learning_rate": 0.00018552631578947368, |
|
"loss": 0.0178, |
|
"step": 4125 |
|
}, |
|
{ |
|
"epoch": 36.73, |
|
"learning_rate": 0.00018473684210526314, |
|
"loss": 0.0159, |
|
"step": 4150 |
|
}, |
|
{ |
|
"epoch": 36.95, |
|
"learning_rate": 0.00018394736842105262, |
|
"loss": 0.0249, |
|
"step": 4175 |
|
}, |
|
{ |
|
"epoch": 37.17, |
|
"learning_rate": 0.00018315789473684208, |
|
"loss": 0.0258, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 37.39, |
|
"learning_rate": 0.00018236842105263156, |
|
"loss": 0.0202, |
|
"step": 4225 |
|
}, |
|
{ |
|
"epoch": 37.61, |
|
"learning_rate": 0.00018157894736842105, |
|
"loss": 0.022, |
|
"step": 4250 |
|
}, |
|
{ |
|
"epoch": 37.83, |
|
"learning_rate": 0.0001807894736842105, |
|
"loss": 0.0266, |
|
"step": 4275 |
|
}, |
|
{ |
|
"epoch": 38.05, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 0.0183, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 38.27, |
|
"learning_rate": 0.00017921052631578944, |
|
"loss": 0.0159, |
|
"step": 4325 |
|
}, |
|
{ |
|
"epoch": 38.5, |
|
"learning_rate": 0.00017842105263157892, |
|
"loss": 0.0167, |
|
"step": 4350 |
|
}, |
|
{ |
|
"epoch": 38.72, |
|
"learning_rate": 0.00017763157894736838, |
|
"loss": 0.0163, |
|
"step": 4375 |
|
}, |
|
{ |
|
"epoch": 38.94, |
|
"learning_rate": 0.00017684210526315786, |
|
"loss": 0.0151, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 39.16, |
|
"learning_rate": 0.00017605263157894737, |
|
"loss": 0.0172, |
|
"step": 4425 |
|
}, |
|
{ |
|
"epoch": 39.38, |
|
"learning_rate": 0.00017526315789473683, |
|
"loss": 0.0175, |
|
"step": 4450 |
|
}, |
|
{ |
|
"epoch": 39.6, |
|
"learning_rate": 0.0001744736842105263, |
|
"loss": 0.0193, |
|
"step": 4475 |
|
}, |
|
{ |
|
"epoch": 39.82, |
|
"learning_rate": 0.0001736842105263158, |
|
"loss": 0.0241, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 40.04, |
|
"learning_rate": 0.00017289473684210525, |
|
"loss": 0.0192, |
|
"step": 4525 |
|
}, |
|
{ |
|
"epoch": 40.27, |
|
"learning_rate": 0.00017210526315789473, |
|
"loss": 0.019, |
|
"step": 4550 |
|
}, |
|
{ |
|
"epoch": 40.49, |
|
"learning_rate": 0.0001713157894736842, |
|
"loss": 0.0138, |
|
"step": 4575 |
|
}, |
|
{ |
|
"epoch": 40.71, |
|
"learning_rate": 0.00017052631578947367, |
|
"loss": 0.0158, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 40.93, |
|
"learning_rate": 0.00016973684210526313, |
|
"loss": 0.0181, |
|
"step": 4625 |
|
}, |
|
{ |
|
"epoch": 41.15, |
|
"learning_rate": 0.0001689473684210526, |
|
"loss": 0.0167, |
|
"step": 4650 |
|
}, |
|
{ |
|
"epoch": 41.37, |
|
"learning_rate": 0.00016815789473684207, |
|
"loss": 0.0214, |
|
"step": 4675 |
|
}, |
|
{ |
|
"epoch": 41.59, |
|
"learning_rate": 0.00016736842105263155, |
|
"loss": 0.0184, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 41.81, |
|
"learning_rate": 0.00016657894736842103, |
|
"loss": 0.0166, |
|
"step": 4725 |
|
}, |
|
{ |
|
"epoch": 42.04, |
|
"learning_rate": 0.00016578947368421052, |
|
"loss": 0.0128, |
|
"step": 4750 |
|
}, |
|
{ |
|
"epoch": 42.26, |
|
"learning_rate": 0.000165, |
|
"loss": 0.0156, |
|
"step": 4775 |
|
}, |
|
{ |
|
"epoch": 42.48, |
|
"learning_rate": 0.00016421052631578948, |
|
"loss": 0.0131, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 42.7, |
|
"learning_rate": 0.00016342105263157894, |
|
"loss": 0.0146, |
|
"step": 4825 |
|
}, |
|
{ |
|
"epoch": 42.92, |
|
"learning_rate": 0.00016263157894736842, |
|
"loss": 0.0157, |
|
"step": 4850 |
|
}, |
|
{ |
|
"epoch": 43.14, |
|
"learning_rate": 0.00016184210526315788, |
|
"loss": 0.0158, |
|
"step": 4875 |
|
}, |
|
{ |
|
"epoch": 43.36, |
|
"learning_rate": 0.00016105263157894736, |
|
"loss": 0.0157, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 43.58, |
|
"learning_rate": 0.00016026315789473682, |
|
"loss": 0.0159, |
|
"step": 4925 |
|
}, |
|
{ |
|
"epoch": 43.81, |
|
"learning_rate": 0.0001594736842105263, |
|
"loss": 0.0217, |
|
"step": 4950 |
|
}, |
|
{ |
|
"epoch": 44.03, |
|
"learning_rate": 0.00015868421052631578, |
|
"loss": 0.0178, |
|
"step": 4975 |
|
}, |
|
{ |
|
"epoch": 44.25, |
|
"learning_rate": 0.00015789473684210524, |
|
"loss": 0.0177, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 44.25, |
|
"eval_loss": 0.24588559567928314, |
|
"eval_runtime": 10.7371, |
|
"eval_samples_per_second": 9.313, |
|
"eval_steps_per_second": 1.211, |
|
"eval_wer": 0.07437522420184145, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 44.47, |
|
"learning_rate": 0.00015710526315789472, |
|
"loss": 0.0163, |
|
"step": 5025 |
|
}, |
|
{ |
|
"epoch": 44.69, |
|
"learning_rate": 0.00015631578947368418, |
|
"loss": 0.0196, |
|
"step": 5050 |
|
}, |
|
{ |
|
"epoch": 44.91, |
|
"learning_rate": 0.00015552631578947366, |
|
"loss": 0.0161, |
|
"step": 5075 |
|
}, |
|
{ |
|
"epoch": 45.13, |
|
"learning_rate": 0.00015473684210526317, |
|
"loss": 0.0158, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 45.35, |
|
"learning_rate": 0.00015394736842105263, |
|
"loss": 0.0159, |
|
"step": 5125 |
|
}, |
|
{ |
|
"epoch": 45.58, |
|
"learning_rate": 0.0001531578947368421, |
|
"loss": 0.0163, |
|
"step": 5150 |
|
}, |
|
{ |
|
"epoch": 45.8, |
|
"learning_rate": 0.00015236842105263156, |
|
"loss": 0.0137, |
|
"step": 5175 |
|
}, |
|
{ |
|
"epoch": 46.02, |
|
"learning_rate": 0.00015157894736842105, |
|
"loss": 0.0131, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 46.24, |
|
"learning_rate": 0.0001507894736842105, |
|
"loss": 0.0125, |
|
"step": 5225 |
|
}, |
|
{ |
|
"epoch": 46.46, |
|
"learning_rate": 0.00015, |
|
"loss": 0.0138, |
|
"step": 5250 |
|
}, |
|
{ |
|
"epoch": 46.68, |
|
"learning_rate": 0.00014921052631578947, |
|
"loss": 0.0179, |
|
"step": 5275 |
|
}, |
|
{ |
|
"epoch": 46.9, |
|
"learning_rate": 0.00014842105263157893, |
|
"loss": 0.0169, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 47.12, |
|
"learning_rate": 0.0001476315789473684, |
|
"loss": 0.0152, |
|
"step": 5325 |
|
}, |
|
{ |
|
"epoch": 47.35, |
|
"learning_rate": 0.0001468421052631579, |
|
"loss": 0.0146, |
|
"step": 5350 |
|
}, |
|
{ |
|
"epoch": 47.57, |
|
"learning_rate": 0.00014605263157894735, |
|
"loss": 0.0143, |
|
"step": 5375 |
|
}, |
|
{ |
|
"epoch": 47.79, |
|
"learning_rate": 0.00014526315789473683, |
|
"loss": 0.0116, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 48.01, |
|
"learning_rate": 0.00014447368421052631, |
|
"loss": 0.0162, |
|
"step": 5425 |
|
}, |
|
{ |
|
"epoch": 48.23, |
|
"learning_rate": 0.00014368421052631577, |
|
"loss": 0.0115, |
|
"step": 5450 |
|
}, |
|
{ |
|
"epoch": 48.45, |
|
"learning_rate": 0.00014289473684210525, |
|
"loss": 0.0156, |
|
"step": 5475 |
|
}, |
|
{ |
|
"epoch": 48.67, |
|
"learning_rate": 0.0001421052631578947, |
|
"loss": 0.0144, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 48.89, |
|
"learning_rate": 0.0001413157894736842, |
|
"loss": 0.0155, |
|
"step": 5525 |
|
}, |
|
{ |
|
"epoch": 49.12, |
|
"learning_rate": 0.00014052631578947367, |
|
"loss": 0.0146, |
|
"step": 5550 |
|
}, |
|
{ |
|
"epoch": 49.34, |
|
"learning_rate": 0.00013973684210526316, |
|
"loss": 0.0164, |
|
"step": 5575 |
|
}, |
|
{ |
|
"epoch": 49.56, |
|
"learning_rate": 0.00013894736842105261, |
|
"loss": 0.0128, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 49.78, |
|
"learning_rate": 0.0001381578947368421, |
|
"loss": 0.0133, |
|
"step": 5625 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 0.00013736842105263155, |
|
"loss": 0.0126, |
|
"step": 5650 |
|
}, |
|
{ |
|
"epoch": 50.22, |
|
"learning_rate": 0.00013657894736842104, |
|
"loss": 0.011, |
|
"step": 5675 |
|
}, |
|
{ |
|
"epoch": 50.44, |
|
"learning_rate": 0.00013578947368421052, |
|
"loss": 0.0154, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 50.66, |
|
"learning_rate": 0.000135, |
|
"loss": 0.0154, |
|
"step": 5725 |
|
}, |
|
{ |
|
"epoch": 50.88, |
|
"learning_rate": 0.00013421052631578946, |
|
"loss": 0.0131, |
|
"step": 5750 |
|
}, |
|
{ |
|
"epoch": 51.11, |
|
"learning_rate": 0.00013342105263157894, |
|
"loss": 0.0161, |
|
"step": 5775 |
|
}, |
|
{ |
|
"epoch": 51.33, |
|
"learning_rate": 0.0001326315789473684, |
|
"loss": 0.0135, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 51.55, |
|
"learning_rate": 0.00013184210526315788, |
|
"loss": 0.0125, |
|
"step": 5825 |
|
}, |
|
{ |
|
"epoch": 51.77, |
|
"learning_rate": 0.00013105263157894736, |
|
"loss": 0.0114, |
|
"step": 5850 |
|
}, |
|
{ |
|
"epoch": 51.99, |
|
"learning_rate": 0.00013026315789473685, |
|
"loss": 0.0106, |
|
"step": 5875 |
|
}, |
|
{ |
|
"epoch": 52.21, |
|
"learning_rate": 0.0001294736842105263, |
|
"loss": 0.0119, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 52.43, |
|
"learning_rate": 0.00012868421052631578, |
|
"loss": 0.0123, |
|
"step": 5925 |
|
}, |
|
{ |
|
"epoch": 52.65, |
|
"learning_rate": 0.00012789473684210524, |
|
"loss": 0.0111, |
|
"step": 5950 |
|
}, |
|
{ |
|
"epoch": 52.88, |
|
"learning_rate": 0.00012710526315789472, |
|
"loss": 0.0139, |
|
"step": 5975 |
|
}, |
|
{ |
|
"epoch": 53.1, |
|
"learning_rate": 0.00012631578947368418, |
|
"loss": 0.0155, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 53.1, |
|
"eval_loss": 0.2688598036766052, |
|
"eval_runtime": 10.7568, |
|
"eval_samples_per_second": 9.296, |
|
"eval_steps_per_second": 1.209, |
|
"eval_wer": 0.06791821116824107, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 53.32, |
|
"learning_rate": 0.0001255263157894737, |
|
"loss": 0.0153, |
|
"step": 6025 |
|
}, |
|
{ |
|
"epoch": 53.54, |
|
"learning_rate": 0.00012473684210526315, |
|
"loss": 0.013, |
|
"step": 6050 |
|
}, |
|
{ |
|
"epoch": 53.76, |
|
"learning_rate": 0.00012394736842105263, |
|
"loss": 0.0093, |
|
"step": 6075 |
|
}, |
|
{ |
|
"epoch": 53.98, |
|
"learning_rate": 0.00012315789473684208, |
|
"loss": 0.0125, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 54.2, |
|
"learning_rate": 0.00012236842105263157, |
|
"loss": 0.0122, |
|
"step": 6125 |
|
}, |
|
{ |
|
"epoch": 54.42, |
|
"learning_rate": 0.00012157894736842104, |
|
"loss": 0.0131, |
|
"step": 6150 |
|
}, |
|
{ |
|
"epoch": 54.65, |
|
"learning_rate": 0.0001207894736842105, |
|
"loss": 0.0129, |
|
"step": 6175 |
|
}, |
|
{ |
|
"epoch": 54.87, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 0.0119, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 55.09, |
|
"learning_rate": 0.00011921052631578947, |
|
"loss": 0.012, |
|
"step": 6225 |
|
}, |
|
{ |
|
"epoch": 55.31, |
|
"learning_rate": 0.00011842105263157894, |
|
"loss": 0.0106, |
|
"step": 6250 |
|
}, |
|
{ |
|
"epoch": 55.53, |
|
"learning_rate": 0.00011763157894736841, |
|
"loss": 0.0077, |
|
"step": 6275 |
|
}, |
|
{ |
|
"epoch": 55.75, |
|
"learning_rate": 0.00011684210526315788, |
|
"loss": 0.0093, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 55.97, |
|
"learning_rate": 0.00011605263157894735, |
|
"loss": 0.0103, |
|
"step": 6325 |
|
}, |
|
{ |
|
"epoch": 56.19, |
|
"learning_rate": 0.00011526315789473682, |
|
"loss": 0.0148, |
|
"step": 6350 |
|
}, |
|
{ |
|
"epoch": 56.42, |
|
"learning_rate": 0.00011447368421052632, |
|
"loss": 0.0158, |
|
"step": 6375 |
|
}, |
|
{ |
|
"epoch": 56.64, |
|
"learning_rate": 0.00011368421052631579, |
|
"loss": 0.0108, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 56.86, |
|
"learning_rate": 0.00011289473684210526, |
|
"loss": 0.0135, |
|
"step": 6425 |
|
}, |
|
{ |
|
"epoch": 57.08, |
|
"learning_rate": 0.00011210526315789472, |
|
"loss": 0.0116, |
|
"step": 6450 |
|
}, |
|
{ |
|
"epoch": 57.3, |
|
"learning_rate": 0.0001113157894736842, |
|
"loss": 0.0105, |
|
"step": 6475 |
|
}, |
|
{ |
|
"epoch": 57.52, |
|
"learning_rate": 0.00011052631578947366, |
|
"loss": 0.0101, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 57.74, |
|
"learning_rate": 0.00010973684210526316, |
|
"loss": 0.0089, |
|
"step": 6525 |
|
}, |
|
{ |
|
"epoch": 57.96, |
|
"learning_rate": 0.00010894736842105263, |
|
"loss": 0.0098, |
|
"step": 6550 |
|
}, |
|
{ |
|
"epoch": 58.19, |
|
"learning_rate": 0.0001081578947368421, |
|
"loss": 0.0121, |
|
"step": 6575 |
|
}, |
|
{ |
|
"epoch": 58.41, |
|
"learning_rate": 0.00010736842105263157, |
|
"loss": 0.013, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 58.63, |
|
"learning_rate": 0.00010657894736842104, |
|
"loss": 0.012, |
|
"step": 6625 |
|
}, |
|
{ |
|
"epoch": 58.85, |
|
"learning_rate": 0.00010578947368421051, |
|
"loss": 0.0089, |
|
"step": 6650 |
|
}, |
|
{ |
|
"epoch": 59.07, |
|
"learning_rate": 0.00010499999999999999, |
|
"loss": 0.0111, |
|
"step": 6675 |
|
}, |
|
{ |
|
"epoch": 59.29, |
|
"learning_rate": 0.00010421052631578947, |
|
"loss": 0.0085, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 59.51, |
|
"learning_rate": 0.00010342105263157894, |
|
"loss": 0.0094, |
|
"step": 6725 |
|
}, |
|
{ |
|
"epoch": 59.73, |
|
"learning_rate": 0.00010263157894736841, |
|
"loss": 0.0098, |
|
"step": 6750 |
|
}, |
|
{ |
|
"epoch": 59.96, |
|
"learning_rate": 0.00010184210526315788, |
|
"loss": 0.0109, |
|
"step": 6775 |
|
}, |
|
{ |
|
"epoch": 60.18, |
|
"learning_rate": 0.00010105263157894735, |
|
"loss": 0.0129, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 60.4, |
|
"learning_rate": 0.00010026315789473683, |
|
"loss": 0.01, |
|
"step": 6825 |
|
}, |
|
{ |
|
"epoch": 60.62, |
|
"learning_rate": 9.94736842105263e-05, |
|
"loss": 0.0089, |
|
"step": 6850 |
|
}, |
|
{ |
|
"epoch": 60.84, |
|
"learning_rate": 9.868421052631579e-05, |
|
"loss": 0.0122, |
|
"step": 6875 |
|
}, |
|
{ |
|
"epoch": 61.06, |
|
"learning_rate": 9.789473684210526e-05, |
|
"loss": 0.0167, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 61.28, |
|
"learning_rate": 9.710526315789473e-05, |
|
"loss": 0.0105, |
|
"step": 6925 |
|
}, |
|
{ |
|
"epoch": 61.5, |
|
"learning_rate": 9.63157894736842e-05, |
|
"loss": 0.0089, |
|
"step": 6950 |
|
}, |
|
{ |
|
"epoch": 61.73, |
|
"learning_rate": 9.552631578947368e-05, |
|
"loss": 0.0102, |
|
"step": 6975 |
|
}, |
|
{ |
|
"epoch": 61.95, |
|
"learning_rate": 9.473684210526315e-05, |
|
"loss": 0.0149, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 61.95, |
|
"eval_loss": 0.2759546935558319, |
|
"eval_runtime": 10.7835, |
|
"eval_samples_per_second": 9.273, |
|
"eval_steps_per_second": 1.206, |
|
"eval_wer": 0.07174458926222647, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 62.17, |
|
"learning_rate": 9.394736842105262e-05, |
|
"loss": 0.0116, |
|
"step": 7025 |
|
}, |
|
{ |
|
"epoch": 62.39, |
|
"learning_rate": 9.31578947368421e-05, |
|
"loss": 0.0102, |
|
"step": 7050 |
|
}, |
|
{ |
|
"epoch": 62.61, |
|
"learning_rate": 9.236842105263157e-05, |
|
"loss": 0.0118, |
|
"step": 7075 |
|
}, |
|
{ |
|
"epoch": 62.83, |
|
"learning_rate": 9.157894736842104e-05, |
|
"loss": 0.0093, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 63.05, |
|
"learning_rate": 9.078947368421052e-05, |
|
"loss": 0.0074, |
|
"step": 7125 |
|
}, |
|
{ |
|
"epoch": 63.27, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 0.0079, |
|
"step": 7150 |
|
}, |
|
{ |
|
"epoch": 63.5, |
|
"learning_rate": 8.921052631578946e-05, |
|
"loss": 0.0075, |
|
"step": 7175 |
|
}, |
|
{ |
|
"epoch": 63.72, |
|
"learning_rate": 8.842105263157893e-05, |
|
"loss": 0.0099, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 63.94, |
|
"learning_rate": 8.763157894736841e-05, |
|
"loss": 0.0098, |
|
"step": 7225 |
|
}, |
|
{ |
|
"epoch": 64.16, |
|
"learning_rate": 8.68421052631579e-05, |
|
"loss": 0.0109, |
|
"step": 7250 |
|
}, |
|
{ |
|
"epoch": 64.38, |
|
"learning_rate": 8.605263157894737e-05, |
|
"loss": 0.0103, |
|
"step": 7275 |
|
}, |
|
{ |
|
"epoch": 64.6, |
|
"learning_rate": 8.526315789473684e-05, |
|
"loss": 0.0074, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 64.82, |
|
"learning_rate": 8.44736842105263e-05, |
|
"loss": 0.0101, |
|
"step": 7325 |
|
}, |
|
{ |
|
"epoch": 65.04, |
|
"learning_rate": 8.368421052631578e-05, |
|
"loss": 0.009, |
|
"step": 7350 |
|
}, |
|
{ |
|
"epoch": 65.27, |
|
"learning_rate": 8.289473684210526e-05, |
|
"loss": 0.0077, |
|
"step": 7375 |
|
}, |
|
{ |
|
"epoch": 65.49, |
|
"learning_rate": 8.210526315789474e-05, |
|
"loss": 0.009, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 65.71, |
|
"learning_rate": 8.131578947368421e-05, |
|
"loss": 0.0098, |
|
"step": 7425 |
|
}, |
|
{ |
|
"epoch": 65.93, |
|
"learning_rate": 8.052631578947368e-05, |
|
"loss": 0.0096, |
|
"step": 7450 |
|
}, |
|
{ |
|
"epoch": 66.15, |
|
"learning_rate": 7.973684210526315e-05, |
|
"loss": 0.0136, |
|
"step": 7475 |
|
}, |
|
{ |
|
"epoch": 66.37, |
|
"learning_rate": 7.894736842105262e-05, |
|
"loss": 0.0082, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 66.59, |
|
"learning_rate": 7.815789473684209e-05, |
|
"loss": 0.0089, |
|
"step": 7525 |
|
}, |
|
{ |
|
"epoch": 66.81, |
|
"learning_rate": 7.736842105263159e-05, |
|
"loss": 0.0089, |
|
"step": 7550 |
|
}, |
|
{ |
|
"epoch": 67.04, |
|
"learning_rate": 7.657894736842105e-05, |
|
"loss": 0.0084, |
|
"step": 7575 |
|
}, |
|
{ |
|
"epoch": 67.26, |
|
"learning_rate": 7.578947368421052e-05, |
|
"loss": 0.0073, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 67.48, |
|
"learning_rate": 7.5e-05, |
|
"loss": 0.008, |
|
"step": 7625 |
|
}, |
|
{ |
|
"epoch": 67.7, |
|
"learning_rate": 7.421052631578946e-05, |
|
"loss": 0.0083, |
|
"step": 7650 |
|
}, |
|
{ |
|
"epoch": 67.92, |
|
"learning_rate": 7.342105263157895e-05, |
|
"loss": 0.0068, |
|
"step": 7675 |
|
}, |
|
{ |
|
"epoch": 68.14, |
|
"learning_rate": 7.263157894736842e-05, |
|
"loss": 0.007, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 68.36, |
|
"learning_rate": 7.184210526315788e-05, |
|
"loss": 0.0086, |
|
"step": 7725 |
|
}, |
|
{ |
|
"epoch": 68.58, |
|
"learning_rate": 7.105263157894735e-05, |
|
"loss": 0.0077, |
|
"step": 7750 |
|
}, |
|
{ |
|
"epoch": 68.81, |
|
"learning_rate": 7.026315789473684e-05, |
|
"loss": 0.0076, |
|
"step": 7775 |
|
}, |
|
{ |
|
"epoch": 69.03, |
|
"learning_rate": 6.947368421052631e-05, |
|
"loss": 0.006, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 69.25, |
|
"learning_rate": 6.868421052631578e-05, |
|
"loss": 0.006, |
|
"step": 7825 |
|
}, |
|
{ |
|
"epoch": 69.47, |
|
"learning_rate": 6.789473684210526e-05, |
|
"loss": 0.0077, |
|
"step": 7850 |
|
}, |
|
{ |
|
"epoch": 69.69, |
|
"learning_rate": 6.710526315789473e-05, |
|
"loss": 0.009, |
|
"step": 7875 |
|
}, |
|
{ |
|
"epoch": 69.91, |
|
"learning_rate": 6.63157894736842e-05, |
|
"loss": 0.0081, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 70.13, |
|
"learning_rate": 6.552631578947368e-05, |
|
"loss": 0.0085, |
|
"step": 7925 |
|
}, |
|
{ |
|
"epoch": 70.35, |
|
"learning_rate": 6.473684210526315e-05, |
|
"loss": 0.007, |
|
"step": 7950 |
|
}, |
|
{ |
|
"epoch": 70.58, |
|
"learning_rate": 6.394736842105262e-05, |
|
"loss": 0.0088, |
|
"step": 7975 |
|
}, |
|
{ |
|
"epoch": 70.8, |
|
"learning_rate": 6.315789473684209e-05, |
|
"loss": 0.0074, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 70.8, |
|
"eval_loss": 0.30043184757232666, |
|
"eval_runtime": 10.795, |
|
"eval_samples_per_second": 9.264, |
|
"eval_steps_per_second": 1.204, |
|
"eval_wer": 0.0680377854836781, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 71.02, |
|
"learning_rate": 6.236842105263157e-05, |
|
"loss": 0.0061, |
|
"step": 8025 |
|
}, |
|
{ |
|
"epoch": 71.24, |
|
"learning_rate": 6.157894736842104e-05, |
|
"loss": 0.0059, |
|
"step": 8050 |
|
}, |
|
{ |
|
"epoch": 71.46, |
|
"learning_rate": 6.078947368421052e-05, |
|
"loss": 0.008, |
|
"step": 8075 |
|
}, |
|
{ |
|
"epoch": 71.68, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 0.0105, |
|
"step": 8100 |
|
}, |
|
{ |
|
"epoch": 71.9, |
|
"learning_rate": 5.921052631578947e-05, |
|
"loss": 0.0066, |
|
"step": 8125 |
|
}, |
|
{ |
|
"epoch": 72.12, |
|
"learning_rate": 5.842105263157894e-05, |
|
"loss": 0.0075, |
|
"step": 8150 |
|
}, |
|
{ |
|
"epoch": 72.35, |
|
"learning_rate": 5.763157894736841e-05, |
|
"loss": 0.01, |
|
"step": 8175 |
|
}, |
|
{ |
|
"epoch": 72.57, |
|
"learning_rate": 5.684210526315789e-05, |
|
"loss": 0.0097, |
|
"step": 8200 |
|
}, |
|
{ |
|
"epoch": 72.79, |
|
"learning_rate": 5.605263157894736e-05, |
|
"loss": 0.007, |
|
"step": 8225 |
|
}, |
|
{ |
|
"epoch": 73.01, |
|
"learning_rate": 5.526315789473683e-05, |
|
"loss": 0.0047, |
|
"step": 8250 |
|
}, |
|
{ |
|
"epoch": 73.23, |
|
"learning_rate": 5.4473684210526315e-05, |
|
"loss": 0.006, |
|
"step": 8275 |
|
}, |
|
{ |
|
"epoch": 73.45, |
|
"learning_rate": 5.3684210526315784e-05, |
|
"loss": 0.0078, |
|
"step": 8300 |
|
}, |
|
{ |
|
"epoch": 73.67, |
|
"learning_rate": 5.2894736842105254e-05, |
|
"loss": 0.0085, |
|
"step": 8325 |
|
}, |
|
{ |
|
"epoch": 73.89, |
|
"learning_rate": 5.210526315789474e-05, |
|
"loss": 0.0075, |
|
"step": 8350 |
|
}, |
|
{ |
|
"epoch": 74.12, |
|
"learning_rate": 5.1315789473684206e-05, |
|
"loss": 0.0062, |
|
"step": 8375 |
|
}, |
|
{ |
|
"epoch": 74.34, |
|
"learning_rate": 5.0526315789473676e-05, |
|
"loss": 0.0073, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 74.56, |
|
"learning_rate": 4.973684210526315e-05, |
|
"loss": 0.0063, |
|
"step": 8425 |
|
}, |
|
{ |
|
"epoch": 74.78, |
|
"learning_rate": 4.894736842105263e-05, |
|
"loss": 0.006, |
|
"step": 8450 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 4.81578947368421e-05, |
|
"loss": 0.0064, |
|
"step": 8475 |
|
}, |
|
{ |
|
"epoch": 75.22, |
|
"learning_rate": 4.7368421052631574e-05, |
|
"loss": 0.0058, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 75.44, |
|
"learning_rate": 4.657894736842105e-05, |
|
"loss": 0.0059, |
|
"step": 8525 |
|
}, |
|
{ |
|
"epoch": 75.66, |
|
"learning_rate": 4.578947368421052e-05, |
|
"loss": 0.0064, |
|
"step": 8550 |
|
}, |
|
{ |
|
"epoch": 75.88, |
|
"learning_rate": 4.4999999999999996e-05, |
|
"loss": 0.0085, |
|
"step": 8575 |
|
}, |
|
{ |
|
"epoch": 76.11, |
|
"learning_rate": 4.4210526315789466e-05, |
|
"loss": 0.0065, |
|
"step": 8600 |
|
}, |
|
{ |
|
"epoch": 76.33, |
|
"learning_rate": 4.342105263157895e-05, |
|
"loss": 0.0068, |
|
"step": 8625 |
|
}, |
|
{ |
|
"epoch": 76.55, |
|
"learning_rate": 4.263157894736842e-05, |
|
"loss": 0.0084, |
|
"step": 8650 |
|
}, |
|
{ |
|
"epoch": 76.77, |
|
"learning_rate": 4.184210526315789e-05, |
|
"loss": 0.0049, |
|
"step": 8675 |
|
}, |
|
{ |
|
"epoch": 76.99, |
|
"learning_rate": 4.105263157894737e-05, |
|
"loss": 0.0055, |
|
"step": 8700 |
|
}, |
|
{ |
|
"epoch": 77.21, |
|
"learning_rate": 4.026315789473684e-05, |
|
"loss": 0.0077, |
|
"step": 8725 |
|
}, |
|
{ |
|
"epoch": 77.43, |
|
"learning_rate": 3.947368421052631e-05, |
|
"loss": 0.0071, |
|
"step": 8750 |
|
}, |
|
{ |
|
"epoch": 77.65, |
|
"learning_rate": 3.868421052631579e-05, |
|
"loss": 0.005, |
|
"step": 8775 |
|
}, |
|
{ |
|
"epoch": 77.88, |
|
"learning_rate": 3.789473684210526e-05, |
|
"loss": 0.0066, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 78.1, |
|
"learning_rate": 3.710526315789473e-05, |
|
"loss": 0.0067, |
|
"step": 8825 |
|
}, |
|
{ |
|
"epoch": 78.32, |
|
"learning_rate": 3.631578947368421e-05, |
|
"loss": 0.0058, |
|
"step": 8850 |
|
}, |
|
{ |
|
"epoch": 78.54, |
|
"learning_rate": 3.552631578947368e-05, |
|
"loss": 0.004, |
|
"step": 8875 |
|
}, |
|
{ |
|
"epoch": 78.76, |
|
"learning_rate": 3.4736842105263153e-05, |
|
"loss": 0.0044, |
|
"step": 8900 |
|
}, |
|
{ |
|
"epoch": 78.98, |
|
"learning_rate": 3.394736842105263e-05, |
|
"loss": 0.0052, |
|
"step": 8925 |
|
}, |
|
{ |
|
"epoch": 79.2, |
|
"learning_rate": 3.31578947368421e-05, |
|
"loss": 0.0058, |
|
"step": 8950 |
|
}, |
|
{ |
|
"epoch": 79.42, |
|
"learning_rate": 3.2368421052631575e-05, |
|
"loss": 0.0063, |
|
"step": 8975 |
|
}, |
|
{ |
|
"epoch": 79.65, |
|
"learning_rate": 3.1578947368421045e-05, |
|
"loss": 0.0058, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 79.65, |
|
"eval_loss": 0.31133854389190674, |
|
"eval_runtime": 10.8234, |
|
"eval_samples_per_second": 9.239, |
|
"eval_steps_per_second": 1.201, |
|
"eval_wer": 0.065048427597752, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 79.87, |
|
"learning_rate": 3.078947368421052e-05, |
|
"loss": 0.0052, |
|
"step": 9025 |
|
}, |
|
{ |
|
"epoch": 80.09, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 0.0053, |
|
"step": 9050 |
|
}, |
|
{ |
|
"epoch": 80.31, |
|
"learning_rate": 2.921052631578947e-05, |
|
"loss": 0.0049, |
|
"step": 9075 |
|
}, |
|
{ |
|
"epoch": 80.53, |
|
"learning_rate": 2.8421052631578946e-05, |
|
"loss": 0.0049, |
|
"step": 9100 |
|
}, |
|
{ |
|
"epoch": 80.75, |
|
"learning_rate": 2.7631578947368416e-05, |
|
"loss": 0.0045, |
|
"step": 9125 |
|
}, |
|
{ |
|
"epoch": 80.97, |
|
"learning_rate": 2.6842105263157892e-05, |
|
"loss": 0.0053, |
|
"step": 9150 |
|
}, |
|
{ |
|
"epoch": 81.19, |
|
"learning_rate": 2.605263157894737e-05, |
|
"loss": 0.0051, |
|
"step": 9175 |
|
}, |
|
{ |
|
"epoch": 81.42, |
|
"learning_rate": 2.5263157894736838e-05, |
|
"loss": 0.0056, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 81.64, |
|
"learning_rate": 2.4473684210526314e-05, |
|
"loss": 0.0056, |
|
"step": 9225 |
|
}, |
|
{ |
|
"epoch": 81.86, |
|
"learning_rate": 2.3684210526315787e-05, |
|
"loss": 0.0047, |
|
"step": 9250 |
|
}, |
|
{ |
|
"epoch": 82.08, |
|
"learning_rate": 2.289473684210526e-05, |
|
"loss": 0.0072, |
|
"step": 9275 |
|
}, |
|
{ |
|
"epoch": 82.3, |
|
"learning_rate": 2.2105263157894733e-05, |
|
"loss": 0.0055, |
|
"step": 9300 |
|
}, |
|
{ |
|
"epoch": 82.52, |
|
"learning_rate": 2.131578947368421e-05, |
|
"loss": 0.005, |
|
"step": 9325 |
|
}, |
|
{ |
|
"epoch": 82.74, |
|
"learning_rate": 2.0526315789473685e-05, |
|
"loss": 0.0035, |
|
"step": 9350 |
|
}, |
|
{ |
|
"epoch": 82.96, |
|
"learning_rate": 1.9736842105263155e-05, |
|
"loss": 0.0052, |
|
"step": 9375 |
|
}, |
|
{ |
|
"epoch": 83.19, |
|
"learning_rate": 1.894736842105263e-05, |
|
"loss": 0.0046, |
|
"step": 9400 |
|
}, |
|
{ |
|
"epoch": 83.41, |
|
"learning_rate": 1.8157894736842104e-05, |
|
"loss": 0.0056, |
|
"step": 9425 |
|
}, |
|
{ |
|
"epoch": 83.63, |
|
"learning_rate": 1.7368421052631577e-05, |
|
"loss": 0.005, |
|
"step": 9450 |
|
}, |
|
{ |
|
"epoch": 83.85, |
|
"learning_rate": 1.657894736842105e-05, |
|
"loss": 0.0046, |
|
"step": 9475 |
|
}, |
|
{ |
|
"epoch": 84.07, |
|
"learning_rate": 1.5789473684210522e-05, |
|
"loss": 0.0061, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 84.29, |
|
"learning_rate": 1.4999999999999999e-05, |
|
"loss": 0.0051, |
|
"step": 9525 |
|
}, |
|
{ |
|
"epoch": 84.51, |
|
"learning_rate": 1.4210526315789473e-05, |
|
"loss": 0.0033, |
|
"step": 9550 |
|
}, |
|
{ |
|
"epoch": 84.73, |
|
"learning_rate": 1.3421052631578946e-05, |
|
"loss": 0.0035, |
|
"step": 9575 |
|
}, |
|
{ |
|
"epoch": 84.96, |
|
"learning_rate": 1.2631578947368419e-05, |
|
"loss": 0.0054, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 85.18, |
|
"learning_rate": 1.1842105263157894e-05, |
|
"loss": 0.005, |
|
"step": 9625 |
|
}, |
|
{ |
|
"epoch": 85.4, |
|
"learning_rate": 1.1052631578947366e-05, |
|
"loss": 0.0039, |
|
"step": 9650 |
|
}, |
|
{ |
|
"epoch": 85.62, |
|
"learning_rate": 1.0263157894736843e-05, |
|
"loss": 0.0044, |
|
"step": 9675 |
|
}, |
|
{ |
|
"epoch": 85.84, |
|
"learning_rate": 9.473684210526315e-06, |
|
"loss": 0.0049, |
|
"step": 9700 |
|
}, |
|
{ |
|
"epoch": 86.06, |
|
"learning_rate": 8.684210526315788e-06, |
|
"loss": 0.0042, |
|
"step": 9725 |
|
}, |
|
{ |
|
"epoch": 86.28, |
|
"learning_rate": 7.894736842105261e-06, |
|
"loss": 0.0041, |
|
"step": 9750 |
|
}, |
|
{ |
|
"epoch": 86.5, |
|
"learning_rate": 7.105263157894737e-06, |
|
"loss": 0.0036, |
|
"step": 9775 |
|
}, |
|
{ |
|
"epoch": 86.73, |
|
"learning_rate": 6.3157894736842095e-06, |
|
"loss": 0.0037, |
|
"step": 9800 |
|
}, |
|
{ |
|
"epoch": 86.95, |
|
"learning_rate": 5.526315789473683e-06, |
|
"loss": 0.005, |
|
"step": 9825 |
|
}, |
|
{ |
|
"epoch": 87.17, |
|
"learning_rate": 4.736842105263158e-06, |
|
"loss": 0.0047, |
|
"step": 9850 |
|
}, |
|
{ |
|
"epoch": 87.39, |
|
"learning_rate": 3.947368421052631e-06, |
|
"loss": 0.0039, |
|
"step": 9875 |
|
}, |
|
{ |
|
"epoch": 87.61, |
|
"learning_rate": 3.1578947368421047e-06, |
|
"loss": 0.0041, |
|
"step": 9900 |
|
}, |
|
{ |
|
"epoch": 87.83, |
|
"learning_rate": 2.368421052631579e-06, |
|
"loss": 0.0052, |
|
"step": 9925 |
|
}, |
|
{ |
|
"epoch": 88.05, |
|
"learning_rate": 1.5789473684210524e-06, |
|
"loss": 0.0041, |
|
"step": 9950 |
|
}, |
|
{ |
|
"epoch": 88.27, |
|
"learning_rate": 7.894736842105262e-07, |
|
"loss": 0.004, |
|
"step": 9975 |
|
}, |
|
{ |
|
"epoch": 88.5, |
|
"learning_rate": 0.0, |
|
"loss": 0.0033, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 88.5, |
|
"eval_loss": 0.32122763991355896, |
|
"eval_runtime": 10.9251, |
|
"eval_samples_per_second": 9.153, |
|
"eval_steps_per_second": 1.19, |
|
"eval_wer": 0.06361353581250748, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 88.5, |
|
"step": 10000, |
|
"total_flos": 3.937659402470073e+19, |
|
"train_loss": 0.13773754513785244, |
|
"train_runtime": 20842.6742, |
|
"train_samples_per_second": 7.677, |
|
"train_steps_per_second": 0.48 |
|
} |
|
], |
|
"max_steps": 10000, |
|
"num_train_epochs": 89, |
|
"total_flos": 3.937659402470073e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|