|
{ |
|
"best_metric": 78.86568308105001, |
|
"best_model_checkpoint": "./whisper-small-ha-adam-v4/checkpoint-2000", |
|
"epoch": 15.0, |
|
"eval_steps": 500, |
|
"global_step": 2355, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1592356687898089, |
|
"grad_norm": 21.07309341430664, |
|
"learning_rate": 2.5e-05, |
|
"loss": 4.4409, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3184713375796178, |
|
"grad_norm": 20.01163673400879, |
|
"learning_rate": 5e-05, |
|
"loss": 2.732, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.47770700636942676, |
|
"grad_norm": 20.838537216186523, |
|
"learning_rate": 5e-05, |
|
"loss": 2.1765, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6369426751592356, |
|
"grad_norm": 13.77207088470459, |
|
"learning_rate": 5e-05, |
|
"loss": 1.908, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7961783439490446, |
|
"grad_norm": 13.532313346862793, |
|
"learning_rate": 5e-05, |
|
"loss": 1.745, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.9554140127388535, |
|
"grad_norm": 13.651493072509766, |
|
"learning_rate": 5e-05, |
|
"loss": 1.6779, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.1146496815286624, |
|
"grad_norm": 9.819719314575195, |
|
"learning_rate": 5e-05, |
|
"loss": 1.0426, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.2738853503184713, |
|
"grad_norm": 9.96407699584961, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7765, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.4331210191082802, |
|
"grad_norm": 11.112582206726074, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7978, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.5923566878980893, |
|
"grad_norm": 10.045536041259766, |
|
"learning_rate": 5e-05, |
|
"loss": 0.8032, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.7515923566878981, |
|
"grad_norm": 11.228167533874512, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7916, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.910828025477707, |
|
"grad_norm": 10.779911994934082, |
|
"learning_rate": 5e-05, |
|
"loss": 0.8241, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.070063694267516, |
|
"grad_norm": 6.224853038787842, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5693, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 2.229299363057325, |
|
"grad_norm": 6.078026294708252, |
|
"learning_rate": 5e-05, |
|
"loss": 0.2365, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.388535031847134, |
|
"grad_norm": 5.989273548126221, |
|
"learning_rate": 5e-05, |
|
"loss": 0.2271, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.5477707006369426, |
|
"grad_norm": 8.927431106567383, |
|
"learning_rate": 5e-05, |
|
"loss": 0.2478, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.7070063694267517, |
|
"grad_norm": 7.507767200469971, |
|
"learning_rate": 5e-05, |
|
"loss": 0.2846, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 2.8662420382165603, |
|
"grad_norm": 6.9506330490112305, |
|
"learning_rate": 5e-05, |
|
"loss": 0.2722, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.0254777070063694, |
|
"grad_norm": 4.77834415435791, |
|
"learning_rate": 5e-05, |
|
"loss": 0.2588, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 3.1847133757961785, |
|
"grad_norm": 4.616095066070557, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0995, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.1847133757961785, |
|
"eval_loss": 1.7910184860229492, |
|
"eval_runtime": 264.8902, |
|
"eval_samples_per_second": 2.492, |
|
"eval_steps_per_second": 0.159, |
|
"eval_wer": 88.17781184134891, |
|
"eval_wer_ortho": 90.4296875, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.343949044585987, |
|
"grad_norm": 4.324921607971191, |
|
"learning_rate": 5e-05, |
|
"loss": 0.1136, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 3.5031847133757963, |
|
"grad_norm": 3.88926100730896, |
|
"learning_rate": 5e-05, |
|
"loss": 0.117, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 3.662420382165605, |
|
"grad_norm": 4.270689964294434, |
|
"learning_rate": 5e-05, |
|
"loss": 0.1093, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 3.821656050955414, |
|
"grad_norm": 5.100738048553467, |
|
"learning_rate": 5e-05, |
|
"loss": 0.1185, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.980891719745223, |
|
"grad_norm": 5.05330753326416, |
|
"learning_rate": 5e-05, |
|
"loss": 0.1377, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 4.140127388535032, |
|
"grad_norm": 3.5940771102905273, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0823, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 4.2993630573248405, |
|
"grad_norm": 3.304124355316162, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0763, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 4.45859872611465, |
|
"grad_norm": 3.3643264770507812, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0758, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 4.617834394904459, |
|
"grad_norm": 3.0554709434509277, |
|
"learning_rate": 5e-05, |
|
"loss": 0.084, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 4.777070063694268, |
|
"grad_norm": 5.5209174156188965, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0816, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 4.936305732484076, |
|
"grad_norm": 5.73225736618042, |
|
"learning_rate": 5e-05, |
|
"loss": 0.1064, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 5.095541401273885, |
|
"grad_norm": 2.312955379486084, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0697, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 5.254777070063694, |
|
"grad_norm": 5.729402542114258, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0639, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 5.414012738853503, |
|
"grad_norm": 3.784353733062744, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0733, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 5.573248407643312, |
|
"grad_norm": 2.1651360988616943, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0774, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 5.732484076433121, |
|
"grad_norm": 4.008174419403076, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0653, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 5.89171974522293, |
|
"grad_norm": 4.1395978927612305, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0892, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 6.050955414012739, |
|
"grad_norm": 3.262603998184204, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0573, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 6.210191082802548, |
|
"grad_norm": 2.6815080642700195, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0646, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 6.369426751592357, |
|
"grad_norm": 1.4305006265640259, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0468, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 6.369426751592357, |
|
"eval_loss": 1.959425926208496, |
|
"eval_runtime": 259.4756, |
|
"eval_samples_per_second": 2.544, |
|
"eval_steps_per_second": 0.162, |
|
"eval_wer": 81.14581337420962, |
|
"eval_wer_ortho": 82.83203125, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 6.528662420382165, |
|
"grad_norm": 6.745048522949219, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0515, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 6.687898089171974, |
|
"grad_norm": 2.1589980125427246, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0507, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 6.8471337579617835, |
|
"grad_norm": 3.8626465797424316, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0456, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 7.006369426751593, |
|
"grad_norm": 2.852226734161377, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0507, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 7.165605095541402, |
|
"grad_norm": 1.2226496934890747, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0606, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 7.32484076433121, |
|
"grad_norm": 2.6874589920043945, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0488, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 7.484076433121019, |
|
"grad_norm": 0.9016306400299072, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0526, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 7.643312101910828, |
|
"grad_norm": 2.1149990558624268, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0484, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 7.802547770700637, |
|
"grad_norm": 2.9689130783081055, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0466, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 7.961783439490446, |
|
"grad_norm": 2.814160108566284, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0443, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 8.121019108280255, |
|
"grad_norm": 4.792329788208008, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0374, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 8.280254777070065, |
|
"grad_norm": 3.2325730323791504, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0347, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 8.439490445859873, |
|
"grad_norm": 3.5987560749053955, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0421, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 8.598726114649681, |
|
"grad_norm": 2.2664973735809326, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0337, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 8.757961783439491, |
|
"grad_norm": 2.501434326171875, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0363, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 8.9171974522293, |
|
"grad_norm": 3.5731797218322754, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0511, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 9.07643312101911, |
|
"grad_norm": 2.30594539642334, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0535, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 9.235668789808917, |
|
"grad_norm": 1.6527944803237915, |
|
"learning_rate": 5e-05, |
|
"loss": 0.04, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 9.394904458598726, |
|
"grad_norm": 2.126922130584717, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0299, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 9.554140127388536, |
|
"grad_norm": 2.613511323928833, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0394, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 9.554140127388536, |
|
"eval_loss": 2.0775890350341797, |
|
"eval_runtime": 268.0164, |
|
"eval_samples_per_second": 2.463, |
|
"eval_steps_per_second": 0.157, |
|
"eval_wer": 87.75627514849587, |
|
"eval_wer_ortho": 89.84375, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 9.713375796178344, |
|
"grad_norm": 2.170787811279297, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0327, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 9.872611464968152, |
|
"grad_norm": 2.1923575401306152, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0472, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 10.031847133757962, |
|
"grad_norm": 4.220789909362793, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0463, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 10.19108280254777, |
|
"grad_norm": 1.4491336345672607, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0447, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 10.35031847133758, |
|
"grad_norm": 3.3499913215637207, |
|
"learning_rate": 5e-05, |
|
"loss": 0.043, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 10.509554140127388, |
|
"grad_norm": 2.196830987930298, |
|
"learning_rate": 5e-05, |
|
"loss": 0.035, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 10.668789808917197, |
|
"grad_norm": 2.2914416790008545, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0281, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 10.828025477707007, |
|
"grad_norm": 2.437507152557373, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0431, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 10.987261146496815, |
|
"grad_norm": 5.599733352661133, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0434, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 11.146496815286625, |
|
"grad_norm": 3.811133861541748, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0287, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 11.305732484076433, |
|
"grad_norm": 3.298198938369751, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0477, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 11.464968152866241, |
|
"grad_norm": 1.9741543531417847, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0423, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 11.624203821656051, |
|
"grad_norm": 1.3877679109573364, |
|
"learning_rate": 5e-05, |
|
"loss": 0.033, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 11.78343949044586, |
|
"grad_norm": 1.5006356239318848, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0292, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 11.94267515923567, |
|
"grad_norm": 2.8492929935455322, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0273, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 12.101910828025478, |
|
"grad_norm": 3.423665761947632, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0234, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 12.261146496815286, |
|
"grad_norm": 2.3133459091186523, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0358, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 12.420382165605096, |
|
"grad_norm": 3.495283603668213, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0355, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 12.579617834394904, |
|
"grad_norm": 1.5722227096557617, |
|
"learning_rate": 5e-05, |
|
"loss": 0.039, |
|
"step": 1975 |
|
}, |
|
{ |
|
"epoch": 12.738853503184714, |
|
"grad_norm": 3.156038522720337, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0314, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 12.738853503184714, |
|
"eval_loss": 2.215003490447998, |
|
"eval_runtime": 260.0556, |
|
"eval_samples_per_second": 2.538, |
|
"eval_steps_per_second": 0.162, |
|
"eval_wer": 78.86568308105001, |
|
"eval_wer_ortho": 81.0546875, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 12.898089171974522, |
|
"grad_norm": 2.6695284843444824, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0364, |
|
"step": 2025 |
|
}, |
|
{ |
|
"epoch": 13.05732484076433, |
|
"grad_norm": 1.3920303583145142, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0314, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 13.21656050955414, |
|
"grad_norm": 2.2318477630615234, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0226, |
|
"step": 2075 |
|
}, |
|
{ |
|
"epoch": 13.375796178343949, |
|
"grad_norm": 2.457688093185425, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0374, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 13.535031847133759, |
|
"grad_norm": 3.3562824726104736, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0263, |
|
"step": 2125 |
|
}, |
|
{ |
|
"epoch": 13.694267515923567, |
|
"grad_norm": 2.763430118560791, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0359, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 13.853503184713375, |
|
"grad_norm": 5.378473281860352, |
|
"learning_rate": 5e-05, |
|
"loss": 0.039, |
|
"step": 2175 |
|
}, |
|
{ |
|
"epoch": 14.012738853503185, |
|
"grad_norm": 2.072021007537842, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0373, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 14.171974522292993, |
|
"grad_norm": 1.6606969833374023, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0263, |
|
"step": 2225 |
|
}, |
|
{ |
|
"epoch": 14.331210191082803, |
|
"grad_norm": 3.090102195739746, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0257, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 14.490445859872612, |
|
"grad_norm": 6.045629024505615, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0232, |
|
"step": 2275 |
|
}, |
|
{ |
|
"epoch": 14.64968152866242, |
|
"grad_norm": 0.8971702456474304, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0263, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 14.80891719745223, |
|
"grad_norm": 0.5613566637039185, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0295, |
|
"step": 2325 |
|
}, |
|
{ |
|
"epoch": 14.968152866242038, |
|
"grad_norm": 3.578268051147461, |
|
"learning_rate": 5e-05, |
|
"loss": 0.0246, |
|
"step": 2350 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 2355, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 15, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.08565827600384e+19, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|