whisper-base-google-fleurs-pt-br / trainer_state.json
thiagobarbosa's picture
Model save
b2ef94e verified
raw
history blame
4.61 kB
{
"best_metric": 20.920285768001502,
"best_model_checkpoint": "./whisper-base-google-fleurs-pt-br/checkpoint-200",
"epoch": 2.5125628140703515,
"eval_steps": 100,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.13,
"learning_rate": 5.893749999999999e-06,
"loss": 2.004,
"step": 25
},
{
"epoch": 0.25,
"learning_rate": 1.2299999999999999e-05,
"loss": 1.1337,
"step": 50
},
{
"epoch": 0.38,
"learning_rate": 1.870625e-05,
"loss": 0.8017,
"step": 75
},
{
"epoch": 0.5,
"learning_rate": 1.99875e-05,
"loss": 0.6738,
"step": 100
},
{
"epoch": 0.5,
"eval_loss": 0.3942798376083374,
"eval_runtime": 199.2208,
"eval_samples_per_second": 4.613,
"eval_steps_per_second": 0.291,
"eval_wer": 21.73340853543899,
"eval_wer_normalized": 17.94871794871795,
"step": 100
},
{
"epoch": 0.63,
"learning_rate": 1.9275694444444445e-05,
"loss": 0.5827,
"step": 125
},
{
"epoch": 0.75,
"learning_rate": 1.8563888888888888e-05,
"loss": 0.5512,
"step": 150
},
{
"epoch": 0.88,
"learning_rate": 1.7852083333333335e-05,
"loss": 0.5489,
"step": 175
},
{
"epoch": 1.01,
"learning_rate": 1.7140277777777778e-05,
"loss": 0.4816,
"step": 200
},
{
"epoch": 1.01,
"eval_loss": 0.3761998414993286,
"eval_runtime": 198.2156,
"eval_samples_per_second": 4.636,
"eval_steps_per_second": 0.293,
"eval_wer": 20.920285768001502,
"eval_wer_normalized": 17.13524570667428,
"step": 200
},
{
"epoch": 1.13,
"learning_rate": 1.6428472222222225e-05,
"loss": 0.3379,
"step": 225
},
{
"epoch": 1.26,
"learning_rate": 1.5716666666666668e-05,
"loss": 0.3326,
"step": 250
},
{
"epoch": 1.38,
"learning_rate": 1.500486111111111e-05,
"loss": 0.2885,
"step": 275
},
{
"epoch": 1.51,
"learning_rate": 1.4293055555555555e-05,
"loss": 0.2652,
"step": 300
},
{
"epoch": 1.51,
"eval_loss": 0.3871774673461914,
"eval_runtime": 196.5558,
"eval_samples_per_second": 4.676,
"eval_steps_per_second": 0.295,
"eval_wer": 21.188193269411542,
"eval_wer_normalized": 17.282717282717282,
"step": 300
},
{
"epoch": 1.63,
"learning_rate": 1.358125e-05,
"loss": 0.2971,
"step": 325
},
{
"epoch": 1.76,
"learning_rate": 1.2869444444444445e-05,
"loss": 0.2761,
"step": 350
},
{
"epoch": 1.88,
"learning_rate": 1.215763888888889e-05,
"loss": 0.2682,
"step": 375
},
{
"epoch": 2.01,
"learning_rate": 1.1445833333333333e-05,
"loss": 0.2901,
"step": 400
},
{
"epoch": 2.01,
"eval_loss": 0.3911706209182739,
"eval_runtime": 200.4943,
"eval_samples_per_second": 4.584,
"eval_steps_per_second": 0.289,
"eval_wer": 21.460800902425266,
"eval_wer_normalized": 17.706103420389134,
"step": 400
},
{
"epoch": 2.14,
"learning_rate": 1.0734027777777778e-05,
"loss": 0.1471,
"step": 425
},
{
"epoch": 2.26,
"learning_rate": 1.0022222222222222e-05,
"loss": 0.1668,
"step": 450
},
{
"epoch": 2.39,
"learning_rate": 9.310416666666667e-06,
"loss": 0.1635,
"step": 475
},
{
"epoch": 2.51,
"learning_rate": 8.598611111111112e-06,
"loss": 0.1408,
"step": 500
},
{
"epoch": 2.51,
"eval_loss": 0.4062696099281311,
"eval_runtime": 205.1449,
"eval_samples_per_second": 4.48,
"eval_steps_per_second": 0.283,
"eval_wer": 21.611205113743186,
"eval_wer_normalized": 18.001046572475143,
"step": 500
},
{
"epoch": 2.51,
"step": 500,
"total_flos": 1.03412777877504e+18,
"train_loss": 0.48756390714645387,
"train_runtime": 1663.348,
"train_samples_per_second": 15.391,
"train_steps_per_second": 0.481
}
],
"logging_steps": 25,
"max_steps": 800,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 100,
"total_flos": 1.03412777877504e+18,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}