Upload trainer.py with huggingface_hub
Browse files- trainer.py +13 -2
trainer.py
CHANGED
@@ -61,10 +61,12 @@ import torch
|
|
61 |
from dataclasses import dataclass
|
62 |
from typing import Any, Dict, List, Union
|
63 |
|
|
|
64 |
@dataclass
|
65 |
class DataCollatorSpeechSeq2SeqWithPadding:
|
66 |
processor: Any
|
67 |
decoder_start_token_id: int
|
|
|
68 |
|
69 |
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
|
70 |
# split inputs and labels since they have to be of different lengths and need different padding methods
|
@@ -77,8 +79,13 @@ class DataCollatorSpeechSeq2SeqWithPadding:
|
|
77 |
# pad the labels to max length
|
78 |
labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt")
|
79 |
|
|
|
|
|
|
|
|
|
|
|
80 |
# replace padding with -100 to ignore loss correctly
|
81 |
-
labels =
|
82 |
|
83 |
# if bos token is appended in previous tokenization step,
|
84 |
# cut bos token here as it's append later anyways
|
@@ -89,9 +96,11 @@ class DataCollatorSpeechSeq2SeqWithPadding:
|
|
89 |
|
90 |
return batch
|
91 |
|
|
|
92 |
data_collator = DataCollatorSpeechSeq2SeqWithPadding(
|
93 |
processor=processor,
|
94 |
decoder_start_token_id=model.config.decoder_start_token_id,
|
|
|
95 |
)
|
96 |
|
97 |
import evaluate
|
@@ -124,12 +133,14 @@ training_args = Seq2SeqTrainingArguments(
|
|
124 |
max_steps=4000,
|
125 |
gradient_checkpointing=True,
|
126 |
fp16=True,
|
|
|
|
|
127 |
evaluation_strategy="steps",
|
128 |
per_device_eval_batch_size=8,
|
129 |
predict_with_generate=True,
|
130 |
generation_max_length=225,
|
131 |
save_steps=1000,
|
132 |
-
eval_steps=
|
133 |
logging_steps=25,
|
134 |
report_to=["tensorboard"],
|
135 |
load_best_model_at_end=True,
|
|
|
61 |
from dataclasses import dataclass
|
62 |
from typing import Any, Dict, List, Union
|
63 |
|
64 |
+
@dataclass
|
65 |
@dataclass
|
66 |
class DataCollatorSpeechSeq2SeqWithPadding:
|
67 |
processor: Any
|
68 |
decoder_start_token_id: int
|
69 |
+
max_target_length: int = 448 # Add a parameter to control max length
|
70 |
|
71 |
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
|
72 |
# split inputs and labels since they have to be of different lengths and need different padding methods
|
|
|
79 |
# pad the labels to max length
|
80 |
labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt")
|
81 |
|
82 |
+
# Truncate labels to max_target_length
|
83 |
+
labels = labels_batch["input_ids"]
|
84 |
+
if labels.shape[1] > self.max_target_length:
|
85 |
+
labels = labels[:, :self.max_target_length]
|
86 |
+
|
87 |
# replace padding with -100 to ignore loss correctly
|
88 |
+
labels = labels.masked_fill(labels_batch.attention_mask[:, :labels.shape[1]].ne(1), -100)
|
89 |
|
90 |
# if bos token is appended in previous tokenization step,
|
91 |
# cut bos token here as it's append later anyways
|
|
|
96 |
|
97 |
return batch
|
98 |
|
99 |
+
# When initializing the data collator, you can now specify the max length
|
100 |
data_collator = DataCollatorSpeechSeq2SeqWithPadding(
|
101 |
processor=processor,
|
102 |
decoder_start_token_id=model.config.decoder_start_token_id,
|
103 |
+
max_target_length=448 # Explicitly set the max length
|
104 |
)
|
105 |
|
106 |
import evaluate
|
|
|
133 |
max_steps=4000,
|
134 |
gradient_checkpointing=True,
|
135 |
fp16=True,
|
136 |
+
fp16_full_eval=True,
|
137 |
+
half_precision_backend='auto',
|
138 |
evaluation_strategy="steps",
|
139 |
per_device_eval_batch_size=8,
|
140 |
predict_with_generate=True,
|
141 |
generation_max_length=225,
|
142 |
save_steps=1000,
|
143 |
+
eval_steps=20,
|
144 |
logging_steps=25,
|
145 |
report_to=["tensorboard"],
|
146 |
load_best_model_at_end=True,
|