mikr's picture
Training in progress, step 1000
d546e62
raw
history blame
8.79 kB
12/10/2022 23:08:05 - WARNING - __main__ - Process rank: -1, device: cuda:0, n_gpu: 1distributed training: False, 16-bits training: True
12/10/2022 23:08:05 - INFO - __main__ - Training/evaluation parameters Seq2SeqTrainingArguments(
_n_gpu=1,
adafactor=False,
adam_beta1=0.9,
adam_beta2=0.999,
adam_epsilon=1e-08,
auto_find_batch_size=False,
bf16=False,
bf16_full_eval=False,
data_seed=None,
dataloader_drop_last=False,
dataloader_num_workers=0,
dataloader_pin_memory=True,
ddp_bucket_cap_mb=None,
ddp_find_unused_parameters=None,
ddp_timeout=1800,
debug=[],
deepspeed=None,
disable_tqdm=False,
do_eval=True,
do_predict=False,
do_train=True,
eval_accumulation_steps=None,
eval_delay=0,
eval_steps=1000,
evaluation_strategy=steps,
fp16=True,
fp16_backend=auto,
fp16_full_eval=False,
fp16_opt_level=O1,
fsdp=[],
fsdp_min_num_params=0,
fsdp_transformer_layer_cls_to_wrap=None,
full_determinism=False,
generation_max_length=225,
generation_num_beams=None,
gradient_accumulation_steps=1,
gradient_checkpointing=True,
greater_is_better=False,
group_by_length=False,
half_precision_backend=auto,
hub_model_id=None,
hub_private_repo=False,
hub_strategy=every_save,
hub_token=<HUB_TOKEN>,
ignore_data_skip=False,
include_inputs_for_metrics=False,
jit_mode_eval=False,
label_names=None,
label_smoothing_factor=0.0,
learning_rate=1e-05,
length_column_name=input_length,
load_best_model_at_end=True,
local_rank=-1,
log_level=passive,
log_level_replica=passive,
log_on_each_node=True,
logging_dir=./runs/Dec10_23-08-05_4b942bf2873e,
logging_first_step=False,
logging_nan_inf_filter=True,
logging_steps=25,
logging_strategy=steps,
lr_scheduler_type=linear,
max_grad_norm=1.0,
max_steps=5000,
metric_for_best_model=wer,
mp_parameters=,
no_cuda=False,
num_train_epochs=3.0,
optim=adamw_hf,
optim_args=None,
output_dir=./,
overwrite_output_dir=True,
past_index=-1,
per_device_eval_batch_size=32,
per_device_train_batch_size=64,
predict_with_generate=True,
prediction_loss_only=False,
push_to_hub=True,
push_to_hub_model_id=None,
push_to_hub_organization=None,
push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
ray_scope=last,
remove_unused_columns=True,
report_to=['tensorboard'],
resume_from_checkpoint=None,
run_name=./,
save_on_each_node=False,
save_steps=1000,
save_strategy=steps,
save_total_limit=None,
seed=42,
sharded_ddp=[],
skip_memory_metrics=True,
sortish_sampler=False,
tf32=None,
torchdynamo=None,
tpu_metrics_debug=False,
tpu_num_cores=None,
use_ipex=False,
use_legacy_prediction_loop=False,
use_mps_device=False,
warmup_ratio=0.0,
warmup_steps=500,
weight_decay=0.0,
xpu_backend=None,
)
12/10/2022 23:08:05 - INFO - __main__ - Training/evaluation parameters Seq2SeqTrainingArguments(
_n_gpu=1,
adafactor=False,
adam_beta1=0.9,
adam_beta2=0.999,
adam_epsilon=1e-08,
auto_find_batch_size=False,
bf16=False,
bf16_full_eval=False,
data_seed=None,
dataloader_drop_last=False,
dataloader_num_workers=0,
dataloader_pin_memory=True,
ddp_bucket_cap_mb=None,
ddp_find_unused_parameters=None,
ddp_timeout=1800,
debug=[],
deepspeed=None,
disable_tqdm=False,
do_eval=True,
do_predict=False,
do_train=True,
eval_accumulation_steps=None,
eval_delay=0,
eval_steps=1000,
evaluation_strategy=steps,
fp16=True,
fp16_backend=auto,
fp16_full_eval=False,
fp16_opt_level=O1,
fsdp=[],
fsdp_min_num_params=0,
fsdp_transformer_layer_cls_to_wrap=None,
full_determinism=False,
generation_max_length=225,
generation_num_beams=None,
gradient_accumulation_steps=1,
gradient_checkpointing=True,
greater_is_better=False,
group_by_length=False,
half_precision_backend=auto,
hub_model_id=None,
hub_private_repo=False,
hub_strategy=every_save,
hub_token=<HUB_TOKEN>,
ignore_data_skip=False,
include_inputs_for_metrics=False,
jit_mode_eval=False,
label_names=None,
label_smoothing_factor=0.0,
learning_rate=1e-05,
length_column_name=input_length,
load_best_model_at_end=True,
local_rank=-1,
log_level=passive,
log_level_replica=passive,
log_on_each_node=True,
logging_dir=./runs/Dec10_23-08-05_4b942bf2873e,
logging_first_step=False,
logging_nan_inf_filter=True,
logging_steps=25,
logging_strategy=steps,
lr_scheduler_type=linear,
max_grad_norm=1.0,
max_steps=5000,
metric_for_best_model=wer,
mp_parameters=,
no_cuda=False,
num_train_epochs=3.0,
optim=adamw_hf,
optim_args=None,
output_dir=./,
overwrite_output_dir=True,
past_index=-1,
per_device_eval_batch_size=32,
per_device_train_batch_size=64,
predict_with_generate=True,
prediction_loss_only=False,
push_to_hub=True,
push_to_hub_model_id=None,
push_to_hub_organization=None,
push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
ray_scope=last,
remove_unused_columns=True,
report_to=['tensorboard'],
resume_from_checkpoint=None,
run_name=./,
save_on_each_node=False,
save_steps=1000,
save_strategy=steps,
save_total_limit=None,
seed=42,
sharded_ddp=[],
skip_memory_metrics=True,
sortish_sampler=False,
tf32=None,
torchdynamo=None,
tpu_metrics_debug=False,
tpu_num_cores=None,
use_ipex=False,
use_legacy_prediction_loop=False,
use_mps_device=False,
warmup_ratio=0.0,
warmup_steps=500,
weight_decay=0.0,
xpu_backend=None,
)
12/10/2022 23:08:07 - INFO - datasets.info - Loading Dataset Infos from /root/.cache/huggingface/modules/datasets_modules/datasets/facebook--voxpopuli/b5ff837284f0778eefe0f642734e142d8c3f574eba8c9c8a4b13602297f73604
12/10/2022 23:08:10 - INFO - datasets.info - Loading Dataset Infos from /root/.cache/huggingface/modules/datasets_modules/datasets/facebook--voxpopuli/b5ff837284f0778eefe0f642734e142d8c3f574eba8c9c8a4b13602297f73604
12/10/2022 23:08:13 - INFO - datasets.info - Loading Dataset Infos from /root/.cache/huggingface/modules/datasets_modules/datasets/facebook--voxpopuli/b5ff837284f0778eefe0f642734e142d8c3f574eba8c9c8a4b13602297f73604
12/10/2022 23:08:23 - WARNING - huggingface_hub.repository - /usr/src/app/models/whisper-small-hr-vox/./ is already a clone of https://huggingface.co/mikr/whisper-small-hr-vox. Make sure you pull the latest changes with `repo.git_pull()`.
{'loss': 1.4901, 'learning_rate': 4.800000000000001e-07, 'epoch': 0.01}
{'loss': 1.2499, 'learning_rate': 9.800000000000001e-07, 'epoch': 1.0}
{'loss': 1.0153, 'learning_rate': 1.48e-06, 'epoch': 1.01}
{'loss': 0.8027, 'learning_rate': 1.98e-06, 'epoch': 2.0}
{'loss': 0.6815, 'learning_rate': 2.4800000000000004e-06, 'epoch': 3.0}
{'loss': 0.5597, 'learning_rate': 2.9800000000000003e-06, 'epoch': 3.01}
{'loss': 0.4858, 'learning_rate': 3.48e-06, 'epoch': 4.0}
{'loss': 0.4394, 'learning_rate': 3.980000000000001e-06, 'epoch': 4.01}
{'loss': 0.37, 'learning_rate': 4.48e-06, 'epoch': 5.0}
{'loss': 0.3308, 'learning_rate': 4.980000000000001e-06, 'epoch': 6.0}
{'loss': 0.2815, 'learning_rate': 5.480000000000001e-06, 'epoch': 6.01}
{'loss': 0.2378, 'learning_rate': 5.98e-06, 'epoch': 7.0}
{'loss': 0.2064, 'learning_rate': 6.480000000000001e-06, 'epoch': 8.0}
{'loss': 0.1689, 'learning_rate': 6.98e-06, 'epoch': 8.01}
{'loss': 0.1392, 'learning_rate': 7.48e-06, 'epoch': 9.0}
{'loss': 0.1138, 'learning_rate': 7.980000000000002e-06, 'epoch': 9.01}
{'loss': 0.088, 'learning_rate': 8.48e-06, 'epoch': 10.01}
{'loss': 0.0765, 'learning_rate': 8.98e-06, 'epoch': 11.0}
{'loss': 0.0608, 'learning_rate': 9.48e-06, 'epoch': 11.01}
{'loss': 0.0437, 'learning_rate': 9.980000000000001e-06, 'epoch': 12.0}
{'loss': 0.043, 'learning_rate': 9.946666666666667e-06, 'epoch': 13.0}
{'loss': 0.0325, 'learning_rate': 9.891111111111113e-06, 'epoch': 13.01}
{'loss': 0.0241, 'learning_rate': 9.835555555555556e-06, 'epoch': 14.0}
{'loss': 0.0225, 'learning_rate': 9.780000000000001e-06, 'epoch': 14.01}
{'loss': 0.0149, 'learning_rate': 9.724444444444445e-06, 'epoch': 15.01}
{'loss': 0.0148, 'learning_rate': 9.66888888888889e-06, 'epoch': 16.0}
{'loss': 0.0127, 'learning_rate': 9.613333333333335e-06, 'epoch': 16.01}
{'loss': 0.0089, 'learning_rate': 9.557777777777777e-06, 'epoch': 17.0}
{'loss': 0.0071, 'learning_rate': 9.502222222222223e-06, 'epoch': 18.0}
{'loss': 0.0066, 'learning_rate': 9.446666666666667e-06, 'epoch': 18.01}
{'loss': 0.005, 'learning_rate': 9.391111111111111e-06, 'epoch': 19.0}
{'loss': 0.0047, 'learning_rate': 9.335555555555557e-06, 'epoch': 19.01}
{'loss': 0.0036, 'learning_rate': 9.280000000000001e-06, 'epoch': 20.0}
{'loss': 0.0032, 'learning_rate': 9.224444444444445e-06, 'epoch': 21.0}
{'loss': 0.0033, 'learning_rate': 9.168888888888889e-06, 'epoch': 21.01}
{'loss': 0.003, 'learning_rate': 9.113333333333335e-06, 'epoch': 22.0}
{'loss': 0.0029, 'learning_rate': 9.057777777777779e-06, 'epoch': 23.0}
{'loss': 0.0026, 'learning_rate': 9.002222222222223e-06, 'epoch': 23.01}
{'loss': 0.0026, 'learning_rate': 8.946666666666669e-06, 'epoch': 24.0}
{'loss': 0.0027, 'learning_rate': 8.891111111111111e-06, 'epoch': 24.01}
12/11/2022 03:20:30 - WARNING - datasets.download.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [1/20]