|
{ |
|
"best_metric": 0.1726989597082138, |
|
"best_model_checkpoint": "/scratch/skscla001/speech/results/mms-1b-nyagen-combined-model/checkpoint-2000", |
|
"epoch": 3.1377899045020463, |
|
"eval_steps": 100, |
|
"global_step": 2300, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1364256480218281, |
|
"grad_norm": 2.10461163520813, |
|
"learning_rate": 0.00029099999999999997, |
|
"loss": 6.9978, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.1364256480218281, |
|
"eval_loss": 0.6384420394897461, |
|
"eval_runtime": 30.8578, |
|
"eval_samples_per_second": 11.148, |
|
"eval_steps_per_second": 2.787, |
|
"eval_wer": 0.5015432098765432, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2728512960436562, |
|
"grad_norm": 0.9924084544181824, |
|
"learning_rate": 0.0002986843307446322, |
|
"loss": 0.482, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.2728512960436562, |
|
"eval_loss": 0.27768996357917786, |
|
"eval_runtime": 30.8437, |
|
"eval_samples_per_second": 11.153, |
|
"eval_steps_per_second": 2.788, |
|
"eval_wer": 0.3712522045855379, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4092769440654843, |
|
"grad_norm": 1.641776442527771, |
|
"learning_rate": 0.0002973275468250342, |
|
"loss": 0.3907, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.4092769440654843, |
|
"eval_loss": 0.24835941195487976, |
|
"eval_runtime": 30.9832, |
|
"eval_samples_per_second": 11.103, |
|
"eval_steps_per_second": 2.776, |
|
"eval_wer": 0.3481040564373898, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.5457025920873124, |
|
"grad_norm": 1.1754199266433716, |
|
"learning_rate": 0.00029597076290543625, |
|
"loss": 0.3782, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.5457025920873124, |
|
"eval_loss": 0.22902728617191315, |
|
"eval_runtime": 30.7597, |
|
"eval_samples_per_second": 11.183, |
|
"eval_steps_per_second": 2.796, |
|
"eval_wer": 0.32319223985890655, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.6821282401091405, |
|
"grad_norm": 1.491380214691162, |
|
"learning_rate": 0.00029460027409776153, |
|
"loss": 0.3316, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.6821282401091405, |
|
"eval_loss": 0.222237691283226, |
|
"eval_runtime": 31.0639, |
|
"eval_samples_per_second": 11.074, |
|
"eval_steps_per_second": 2.768, |
|
"eval_wer": 0.3148148148148148, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.8185538881309686, |
|
"grad_norm": 0.943580150604248, |
|
"learning_rate": 0.00029322978529008675, |
|
"loss": 0.3158, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.8185538881309686, |
|
"eval_loss": 0.2126626819372177, |
|
"eval_runtime": 31.0228, |
|
"eval_samples_per_second": 11.089, |
|
"eval_steps_per_second": 2.772, |
|
"eval_wer": 0.30423280423280424, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.9549795361527967, |
|
"grad_norm": 1.18917715549469, |
|
"learning_rate": 0.000291859296482412, |
|
"loss": 0.3199, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.9549795361527967, |
|
"eval_loss": 0.2105857878923416, |
|
"eval_runtime": 30.6684, |
|
"eval_samples_per_second": 11.217, |
|
"eval_steps_per_second": 2.804, |
|
"eval_wer": 0.2932098765432099, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.0914051841746248, |
|
"grad_norm": 1.4716788530349731, |
|
"learning_rate": 0.0002904888076747373, |
|
"loss": 0.3223, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.0914051841746248, |
|
"eval_loss": 0.20132741332054138, |
|
"eval_runtime": 30.7244, |
|
"eval_samples_per_second": 11.196, |
|
"eval_steps_per_second": 2.799, |
|
"eval_wer": 0.2826278659611993, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.2278308321964528, |
|
"grad_norm": 0.600020706653595, |
|
"learning_rate": 0.0002891183188670626, |
|
"loss": 0.3075, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.2278308321964528, |
|
"eval_loss": 0.19748319685459137, |
|
"eval_runtime": 31.1335, |
|
"eval_samples_per_second": 11.049, |
|
"eval_steps_per_second": 2.762, |
|
"eval_wer": 0.2709435626102293, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.364256480218281, |
|
"grad_norm": 0.36852729320526123, |
|
"learning_rate": 0.00028774783005938785, |
|
"loss": 0.3015, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.364256480218281, |
|
"eval_loss": 0.1942104995250702, |
|
"eval_runtime": 31.1423, |
|
"eval_samples_per_second": 11.046, |
|
"eval_steps_per_second": 2.762, |
|
"eval_wer": 0.2762345679012346, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.500682128240109, |
|
"grad_norm": 0.969207227230072, |
|
"learning_rate": 0.00028639104613978984, |
|
"loss": 0.3049, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.500682128240109, |
|
"eval_loss": 0.18946479260921478, |
|
"eval_runtime": 30.9295, |
|
"eval_samples_per_second": 11.122, |
|
"eval_steps_per_second": 2.781, |
|
"eval_wer": 0.27292768959435626, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.6371077762619373, |
|
"grad_norm": 0.7448552250862122, |
|
"learning_rate": 0.00028502055733211506, |
|
"loss": 0.3029, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.6371077762619373, |
|
"eval_loss": 0.1888139843940735, |
|
"eval_runtime": 31.0815, |
|
"eval_samples_per_second": 11.068, |
|
"eval_steps_per_second": 2.767, |
|
"eval_wer": 0.2718253968253968, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.7735334242837655, |
|
"grad_norm": 0.7603126168251038, |
|
"learning_rate": 0.0002836500685244404, |
|
"loss": 0.2626, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.7735334242837655, |
|
"eval_loss": 0.1865960955619812, |
|
"eval_runtime": 31.1543, |
|
"eval_samples_per_second": 11.042, |
|
"eval_steps_per_second": 2.76, |
|
"eval_wer": 0.2682980599647266, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.9099590723055935, |
|
"grad_norm": 0.49969810247421265, |
|
"learning_rate": 0.0002822795797167656, |
|
"loss": 0.2803, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.9099590723055935, |
|
"eval_loss": 0.18303100764751434, |
|
"eval_runtime": 31.2418, |
|
"eval_samples_per_second": 11.011, |
|
"eval_steps_per_second": 2.753, |
|
"eval_wer": 0.26146384479717816, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.0463847203274215, |
|
"grad_norm": 1.5848807096481323, |
|
"learning_rate": 0.0002809090909090909, |
|
"loss": 0.2725, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.0463847203274215, |
|
"eval_loss": 0.1813870072364807, |
|
"eval_runtime": 31.0194, |
|
"eval_samples_per_second": 11.09, |
|
"eval_steps_per_second": 2.772, |
|
"eval_wer": 0.26256613756613756, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.1828103683492497, |
|
"grad_norm": 0.9148170948028564, |
|
"learning_rate": 0.00027953860210141616, |
|
"loss": 0.2732, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.1828103683492497, |
|
"eval_loss": 0.1783067137002945, |
|
"eval_runtime": 31.1451, |
|
"eval_samples_per_second": 11.045, |
|
"eval_steps_per_second": 2.761, |
|
"eval_wer": 0.2641093474426808, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.319236016371078, |
|
"grad_norm": 0.7404142618179321, |
|
"learning_rate": 0.00027816811329374144, |
|
"loss": 0.249, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.319236016371078, |
|
"eval_loss": 0.1828220933675766, |
|
"eval_runtime": 31.1781, |
|
"eval_samples_per_second": 11.033, |
|
"eval_steps_per_second": 2.758, |
|
"eval_wer": 0.25595238095238093, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.4556616643929057, |
|
"grad_norm": 0.795720100402832, |
|
"learning_rate": 0.00027679762448606666, |
|
"loss": 0.2423, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.4556616643929057, |
|
"eval_loss": 0.17622150480747223, |
|
"eval_runtime": 31.3032, |
|
"eval_samples_per_second": 10.989, |
|
"eval_steps_per_second": 2.747, |
|
"eval_wer": 0.24801587301587302, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.592087312414734, |
|
"grad_norm": 2.1790249347686768, |
|
"learning_rate": 0.00027542713567839193, |
|
"loss": 0.2668, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 2.592087312414734, |
|
"eval_loss": 0.1731557846069336, |
|
"eval_runtime": 31.2391, |
|
"eval_samples_per_second": 11.012, |
|
"eval_steps_per_second": 2.753, |
|
"eval_wer": 0.24581128747795414, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 2.728512960436562, |
|
"grad_norm": 1.353415608406067, |
|
"learning_rate": 0.0002740566468707172, |
|
"loss": 0.2653, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.728512960436562, |
|
"eval_loss": 0.1726989597082138, |
|
"eval_runtime": 30.843, |
|
"eval_samples_per_second": 11.153, |
|
"eval_steps_per_second": 2.788, |
|
"eval_wer": 0.24603174603174602, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.8649386084583903, |
|
"grad_norm": 0.9100846648216248, |
|
"learning_rate": 0.00027268615806304243, |
|
"loss": 0.2614, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 2.8649386084583903, |
|
"eval_loss": 0.1748967319726944, |
|
"eval_runtime": 31.2508, |
|
"eval_samples_per_second": 11.008, |
|
"eval_steps_per_second": 2.752, |
|
"eval_wer": 0.2533068783068783, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 3.001364256480218, |
|
"grad_norm": 0.705278217792511, |
|
"learning_rate": 0.0002713156692553677, |
|
"loss": 0.2474, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 3.001364256480218, |
|
"eval_loss": 0.1732555478811264, |
|
"eval_runtime": 31.2961, |
|
"eval_samples_per_second": 10.992, |
|
"eval_steps_per_second": 2.748, |
|
"eval_wer": 0.24382716049382716, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 3.1377899045020463, |
|
"grad_norm": 1.5550167560577393, |
|
"learning_rate": 0.000269945180447693, |
|
"loss": 0.2317, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 3.1377899045020463, |
|
"eval_loss": 0.17674298584461212, |
|
"eval_runtime": 31.0854, |
|
"eval_samples_per_second": 11.066, |
|
"eval_steps_per_second": 2.767, |
|
"eval_wer": 0.2447089947089947, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 3.1377899045020463, |
|
"step": 2300, |
|
"total_flos": 8.649929823066914e+18, |
|
"train_loss": 0.5916279270337975, |
|
"train_runtime": 2613.7833, |
|
"train_samples_per_second": 33.618, |
|
"train_steps_per_second": 8.413 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 21990, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 30, |
|
"save_steps": 400, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.649929823066914e+18, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|