|
{ |
|
"best_metric": 35.37824487651077, |
|
"best_model_checkpoint": "/models/hfhub/whisper-tiny-ft-cy/checkpoint-4000", |
|
"epoch": 1.005593614480548, |
|
"eval_steps": 1000, |
|
"global_step": 4000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 5e-06, |
|
"loss": 1.8194, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1e-05, |
|
"loss": 0.9849, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 9.285714285714288e-06, |
|
"loss": 0.7915, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 8.571428571428571e-06, |
|
"loss": 0.7059, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_loss": 0.73893803358078, |
|
"eval_runtime": 4402.4141, |
|
"eval_samples_per_second": 5.958, |
|
"eval_steps_per_second": 0.745, |
|
"eval_wer": 44.47125591171834, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 7.857142857142858e-06, |
|
"loss": 0.6376, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 7.1428571428571436e-06, |
|
"loss": 0.6106, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 6.4285714285714295e-06, |
|
"loss": 0.5788, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 5.7142857142857145e-06, |
|
"loss": 0.5534, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"eval_loss": 0.6115749478340149, |
|
"eval_runtime": 4567.6465, |
|
"eval_samples_per_second": 5.743, |
|
"eval_steps_per_second": 0.718, |
|
"eval_wer": 39.26852338413032, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5487, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 4.2857142857142855e-06, |
|
"loss": 0.5209, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 3.5714285714285718e-06, |
|
"loss": 0.5085, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 2.8571428571428573e-06, |
|
"loss": 0.5087, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_loss": 0.5615290403366089, |
|
"eval_runtime": 4330.9978, |
|
"eval_samples_per_second": 6.057, |
|
"eval_steps_per_second": 0.757, |
|
"eval_wer": 36.01555438780872, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 2.1428571428571427e-06, |
|
"loss": 0.493, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.4285714285714286e-06, |
|
"loss": 0.4827, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 7.142857142857143e-07, |
|
"loss": 0.4842, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.0, |
|
"loss": 0.4749, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"eval_loss": 0.5461679100990295, |
|
"eval_runtime": 4286.2436, |
|
"eval_samples_per_second": 6.12, |
|
"eval_steps_per_second": 0.765, |
|
"eval_wer": 35.37824487651077, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"step": 4000, |
|
"total_flos": 3.15119267647488e+18, |
|
"train_loss": 0.6689802742004395, |
|
"train_runtime": 33479.3884, |
|
"train_samples_per_second": 3.823, |
|
"train_steps_per_second": 0.119 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"eval_loss": 0.5461679100990295, |
|
"eval_runtime": 4251.2772, |
|
"eval_samples_per_second": 6.17, |
|
"eval_steps_per_second": 0.771, |
|
"eval_wer": 35.37824487651077, |
|
"step": 4000 |
|
} |
|
], |
|
"logging_steps": 250, |
|
"max_steps": 4000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 1000, |
|
"total_flos": 3.15119267647488e+18, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|