|
{ |
|
"best_metric": 73.71718882303134, |
|
"best_model_checkpoint": "./whisper-tiny-hindi2_test/checkpoint-120", |
|
"epoch": 4.920634920634921, |
|
"eval_steps": 40, |
|
"global_step": 155, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.6349206349206349, |
|
"grad_norm": Infinity, |
|
"learning_rate": 2.043269230769231e-06, |
|
"loss": 3.3856, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.2698412698412698, |
|
"grad_norm": 12.90635871887207, |
|
"learning_rate": 4.447115384615384e-06, |
|
"loss": 2.2169, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.2698412698412698, |
|
"eval_loss": 1.7869399785995483, |
|
"eval_runtime": 509.7564, |
|
"eval_samples_per_second": 1.962, |
|
"eval_steps_per_second": 0.49, |
|
"eval_wer": 241.5580016934801, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.9047619047619047, |
|
"grad_norm": 8.530549049377441, |
|
"learning_rate": 6.850961538461537e-06, |
|
"loss": 1.4435, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.5396825396825395, |
|
"grad_norm": 7.451807022094727, |
|
"learning_rate": 9.254807692307693e-06, |
|
"loss": 1.0037, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.5396825396825395, |
|
"eval_loss": 0.8910112380981445, |
|
"eval_runtime": 318.2676, |
|
"eval_samples_per_second": 3.142, |
|
"eval_steps_per_second": 0.786, |
|
"eval_wer": 78.55207451312447, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 3.1746031746031744, |
|
"grad_norm": 6.283768177032471, |
|
"learning_rate": 1.1658653846153846e-05, |
|
"loss": 0.7501, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.8095238095238093, |
|
"grad_norm": 6.983267307281494, |
|
"learning_rate": 1.40625e-05, |
|
"loss": 0.5907, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.8095238095238093, |
|
"eval_loss": 0.624940812587738, |
|
"eval_runtime": 318.0146, |
|
"eval_samples_per_second": 3.145, |
|
"eval_steps_per_second": 0.786, |
|
"eval_wer": 73.71718882303134, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 4.444444444444445, |
|
"grad_norm": 5.939335346221924, |
|
"learning_rate": 1.646634615384615e-05, |
|
"loss": 0.4806, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 4.920634920634921, |
|
"step": 155, |
|
"total_flos": 1.2132186587136e+17, |
|
"train_loss": 1.3161909626376245, |
|
"train_runtime": 1802.5104, |
|
"train_samples_per_second": 2.774, |
|
"train_steps_per_second": 0.086 |
|
}, |
|
{ |
|
"epoch": 4.920634920634921, |
|
"step": 155, |
|
"total_flos": 1.2132186587136e+17, |
|
"train_loss": 0.0, |
|
"train_runtime": 0.1541, |
|
"train_samples_per_second": 32447.028, |
|
"train_steps_per_second": 1005.858 |
|
}, |
|
{ |
|
"epoch": 4.920634920634921, |
|
"step": 155, |
|
"total_flos": 1.2132186587136e+17, |
|
"train_loss": 0.0, |
|
"train_runtime": 0.2109, |
|
"train_samples_per_second": 23707.402, |
|
"train_steps_per_second": 734.929 |
|
}, |
|
{ |
|
"epoch": 4.920634920634921, |
|
"step": 155, |
|
"total_flos": 1.2132186587136e+17, |
|
"train_loss": 0.0, |
|
"train_runtime": 0.17, |
|
"train_samples_per_second": 29405.194, |
|
"train_steps_per_second": 911.561 |
|
} |
|
], |
|
"logging_steps": 20, |
|
"max_steps": 155, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 40, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.2132186587136e+17, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|