|
{ |
|
"best_metric": 98.0078125, |
|
"best_model_checkpoint": "./whisper-small-ha-v9/checkpoint-1000", |
|
"epoch": 12.738853503184714, |
|
"eval_steps": 500, |
|
"global_step": 2000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1592356687898089, |
|
"grad_norm": 14.99692440032959, |
|
"learning_rate": 0.00021, |
|
"loss": 2.5992, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.3184713375796178, |
|
"grad_norm": 17.429601669311523, |
|
"learning_rate": 0.00045000000000000004, |
|
"loss": 2.8909, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.47770700636942676, |
|
"grad_norm": 18.359071731567383, |
|
"learning_rate": 0.0005, |
|
"loss": 3.5945, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.6369426751592356, |
|
"grad_norm": 11.03101634979248, |
|
"learning_rate": 0.0005, |
|
"loss": 3.7546, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7961783439490446, |
|
"grad_norm": 9.160442352294922, |
|
"learning_rate": 0.0005, |
|
"loss": 3.53, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.9554140127388535, |
|
"grad_norm": 8.277521133422852, |
|
"learning_rate": 0.0005, |
|
"loss": 3.516, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.1146496815286624, |
|
"grad_norm": 8.703987121582031, |
|
"learning_rate": 0.0005, |
|
"loss": 2.8911, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.2738853503184713, |
|
"grad_norm": 8.430550575256348, |
|
"learning_rate": 0.0005, |
|
"loss": 2.7461, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.4331210191082802, |
|
"grad_norm": 8.455479621887207, |
|
"learning_rate": 0.0005, |
|
"loss": 2.8827, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.5923566878980893, |
|
"grad_norm": 10.029267311096191, |
|
"learning_rate": 0.0005, |
|
"loss": 2.9323, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.7515923566878981, |
|
"grad_norm": 7.152008533477783, |
|
"learning_rate": 0.0005, |
|
"loss": 2.8411, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.910828025477707, |
|
"grad_norm": 7.405454635620117, |
|
"learning_rate": 0.0005, |
|
"loss": 2.9588, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.070063694267516, |
|
"grad_norm": 7.2899675369262695, |
|
"learning_rate": 0.0005, |
|
"loss": 2.4946, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 2.229299363057325, |
|
"grad_norm": 8.068443298339844, |
|
"learning_rate": 0.0005, |
|
"loss": 2.0882, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.388535031847134, |
|
"grad_norm": 8.143331527709961, |
|
"learning_rate": 0.0005, |
|
"loss": 2.1759, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 2.5477707006369426, |
|
"grad_norm": 8.333662033081055, |
|
"learning_rate": 0.0005, |
|
"loss": 2.2816, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.7070063694267517, |
|
"grad_norm": 7.931263446807861, |
|
"learning_rate": 0.0005, |
|
"loss": 2.3499, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 2.8662420382165603, |
|
"grad_norm": 8.108153343200684, |
|
"learning_rate": 0.0005, |
|
"loss": 2.3946, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.0254777070063694, |
|
"grad_norm": 7.962264060974121, |
|
"learning_rate": 0.0005, |
|
"loss": 2.1774, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 3.1847133757961785, |
|
"grad_norm": 8.008493423461914, |
|
"learning_rate": 0.0005, |
|
"loss": 1.5785, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.1847133757961785, |
|
"eval_loss": 3.8839268684387207, |
|
"eval_runtime": 239.0145, |
|
"eval_samples_per_second": 2.761, |
|
"eval_steps_per_second": 0.176, |
|
"eval_wer": 99.84375, |
|
"eval_wer_ortho": 99.55930254838091, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.343949044585987, |
|
"grad_norm": 7.7201032638549805, |
|
"learning_rate": 0.0005, |
|
"loss": 1.7432, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 3.5031847133757963, |
|
"grad_norm": 7.00828742980957, |
|
"learning_rate": 0.0005, |
|
"loss": 1.8165, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 3.662420382165605, |
|
"grad_norm": 7.801667213439941, |
|
"learning_rate": 0.0005, |
|
"loss": 1.8769, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 3.821656050955414, |
|
"grad_norm": 6.696052074432373, |
|
"learning_rate": 0.0005, |
|
"loss": 1.9189, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.980891719745223, |
|
"grad_norm": 7.849315643310547, |
|
"learning_rate": 0.0005, |
|
"loss": 1.9248, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 4.140127388535032, |
|
"grad_norm": 6.837538719177246, |
|
"learning_rate": 0.0005, |
|
"loss": 1.3935, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 4.2993630573248405, |
|
"grad_norm": 6.533350944519043, |
|
"learning_rate": 0.0005, |
|
"loss": 1.3859, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 4.45859872611465, |
|
"grad_norm": 7.38162899017334, |
|
"learning_rate": 0.0005, |
|
"loss": 1.4676, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 4.617834394904459, |
|
"grad_norm": 7.072102069854736, |
|
"learning_rate": 0.0005, |
|
"loss": 1.5653, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 4.777070063694268, |
|
"grad_norm": 7.383370399475098, |
|
"learning_rate": 0.0005, |
|
"loss": 1.6075, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 4.936305732484076, |
|
"grad_norm": 6.835177898406982, |
|
"learning_rate": 0.0005, |
|
"loss": 1.6546, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 5.095541401273885, |
|
"grad_norm": 6.938526153564453, |
|
"learning_rate": 0.0005, |
|
"loss": 1.3298, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 5.254777070063694, |
|
"grad_norm": 7.478129863739014, |
|
"learning_rate": 0.0005, |
|
"loss": 1.2336, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 5.414012738853503, |
|
"grad_norm": 6.950467109680176, |
|
"learning_rate": 0.0005, |
|
"loss": 1.2652, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 5.573248407643312, |
|
"grad_norm": 8.092499732971191, |
|
"learning_rate": 0.0005, |
|
"loss": 1.4009, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 5.732484076433121, |
|
"grad_norm": 6.488431930541992, |
|
"learning_rate": 0.0005, |
|
"loss": 1.3538, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 5.89171974522293, |
|
"grad_norm": 6.798085689544678, |
|
"learning_rate": 0.0005, |
|
"loss": 1.4269, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 6.050955414012739, |
|
"grad_norm": 6.3589043617248535, |
|
"learning_rate": 0.0005, |
|
"loss": 1.2951, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 6.210191082802548, |
|
"grad_norm": 6.020321369171143, |
|
"learning_rate": 0.0005, |
|
"loss": 1.0916, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 6.369426751592357, |
|
"grad_norm": 6.381227493286133, |
|
"learning_rate": 0.0005, |
|
"loss": 1.1623, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 6.369426751592357, |
|
"eval_loss": 4.484735488891602, |
|
"eval_runtime": 243.1604, |
|
"eval_samples_per_second": 2.714, |
|
"eval_steps_per_second": 0.173, |
|
"eval_wer": 98.0078125, |
|
"eval_wer_ortho": 97.75819122437248, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 6.528662420382165, |
|
"grad_norm": 6.898542404174805, |
|
"learning_rate": 0.0005, |
|
"loss": 1.2127, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 6.687898089171974, |
|
"grad_norm": 6.266862392425537, |
|
"learning_rate": 0.0005, |
|
"loss": 1.2291, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 6.8471337579617835, |
|
"grad_norm": 6.253779888153076, |
|
"learning_rate": 0.0005, |
|
"loss": 1.2292, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 7.006369426751593, |
|
"grad_norm": 5.176928520202637, |
|
"learning_rate": 0.0005, |
|
"loss": 1.2593, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 7.165605095541402, |
|
"grad_norm": 4.826560020446777, |
|
"learning_rate": 0.0005, |
|
"loss": 1.008, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 7.32484076433121, |
|
"grad_norm": 5.738828659057617, |
|
"learning_rate": 0.0005, |
|
"loss": 1.0366, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 7.484076433121019, |
|
"grad_norm": 5.866888523101807, |
|
"learning_rate": 0.0005, |
|
"loss": 1.1405, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 7.643312101910828, |
|
"grad_norm": 6.350291728973389, |
|
"learning_rate": 0.0005, |
|
"loss": 1.1344, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 7.802547770700637, |
|
"grad_norm": 6.462769508361816, |
|
"learning_rate": 0.0005, |
|
"loss": 1.1296, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 7.961783439490446, |
|
"grad_norm": 6.252405166625977, |
|
"learning_rate": 0.0005, |
|
"loss": 1.1576, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 8.121019108280255, |
|
"grad_norm": 6.970676898956299, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9843, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 8.280254777070065, |
|
"grad_norm": 5.744741916656494, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9896, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 8.439490445859873, |
|
"grad_norm": 6.012212753295898, |
|
"learning_rate": 0.0005, |
|
"loss": 1.0226, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 8.598726114649681, |
|
"grad_norm": 5.396963596343994, |
|
"learning_rate": 0.0005, |
|
"loss": 1.0337, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 8.757961783439491, |
|
"grad_norm": 6.041420936584473, |
|
"learning_rate": 0.0005, |
|
"loss": 1.1026, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 8.9171974522293, |
|
"grad_norm": 7.039705276489258, |
|
"learning_rate": 0.0005, |
|
"loss": 1.0957, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 9.07643312101911, |
|
"grad_norm": 5.3199028968811035, |
|
"learning_rate": 0.0005, |
|
"loss": 1.0184, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 9.235668789808917, |
|
"grad_norm": 5.058565616607666, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9048, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 9.394904458598726, |
|
"grad_norm": 5.007670879364014, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9577, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 9.554140127388536, |
|
"grad_norm": 5.517505168914795, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9893, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 9.554140127388536, |
|
"eval_loss": 4.792229652404785, |
|
"eval_runtime": 245.7983, |
|
"eval_samples_per_second": 2.685, |
|
"eval_steps_per_second": 0.171, |
|
"eval_wer": 107.6171875, |
|
"eval_wer_ortho": 108.7373059973175, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 9.713375796178344, |
|
"grad_norm": 5.215830326080322, |
|
"learning_rate": 0.0005, |
|
"loss": 1.0183, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 9.872611464968152, |
|
"grad_norm": 4.925009250640869, |
|
"learning_rate": 0.0005, |
|
"loss": 1.0158, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 10.031847133757962, |
|
"grad_norm": 5.344029903411865, |
|
"learning_rate": 0.0005, |
|
"loss": 1.0167, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 10.19108280254777, |
|
"grad_norm": 5.701788902282715, |
|
"learning_rate": 0.0005, |
|
"loss": 0.8659, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 10.35031847133758, |
|
"grad_norm": 5.5644707679748535, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9147, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 10.509554140127388, |
|
"grad_norm": 5.331553936004639, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9256, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 10.668789808917197, |
|
"grad_norm": 5.594724655151367, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9863, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 10.828025477707007, |
|
"grad_norm": 5.949446201324463, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9587, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 10.987261146496815, |
|
"grad_norm": 4.683850288391113, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9825, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 11.146496815286625, |
|
"grad_norm": 4.596772193908691, |
|
"learning_rate": 0.0005, |
|
"loss": 0.8346, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 11.305732484076433, |
|
"grad_norm": 4.516598701477051, |
|
"learning_rate": 0.0005, |
|
"loss": 0.8364, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 11.464968152866241, |
|
"grad_norm": 4.428603649139404, |
|
"learning_rate": 0.0005, |
|
"loss": 0.8787, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 11.624203821656051, |
|
"grad_norm": 5.087082862854004, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9147, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 11.78343949044586, |
|
"grad_norm": 5.706089496612549, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9533, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 11.94267515923567, |
|
"grad_norm": 5.720147132873535, |
|
"learning_rate": 0.0005, |
|
"loss": 0.9424, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 12.101910828025478, |
|
"grad_norm": 4.992929458618164, |
|
"learning_rate": 0.0005, |
|
"loss": 0.8299, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 12.261146496815286, |
|
"grad_norm": 4.936596393585205, |
|
"learning_rate": 0.0005, |
|
"loss": 0.8422, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 12.420382165605096, |
|
"grad_norm": 4.403645038604736, |
|
"learning_rate": 0.0005, |
|
"loss": 0.8743, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 12.579617834394904, |
|
"grad_norm": 4.9296793937683105, |
|
"learning_rate": 0.0005, |
|
"loss": 0.8704, |
|
"step": 1975 |
|
}, |
|
{ |
|
"epoch": 12.738853503184714, |
|
"grad_norm": 5.615696430206299, |
|
"learning_rate": 0.0005, |
|
"loss": 0.8816, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 12.738853503184714, |
|
"eval_loss": 4.957520008087158, |
|
"eval_runtime": 246.0161, |
|
"eval_samples_per_second": 2.683, |
|
"eval_steps_per_second": 0.171, |
|
"eval_wer": 108.10546875, |
|
"eval_wer_ortho": 110.34680973366547, |
|
"step": 2000 |
|
} |
|
], |
|
"logging_steps": 25, |
|
"max_steps": 2041, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 13, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9.22088071102464e+18, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|