text
stringlengths
0
4.99k
batch: A test batch containing the keys \"source\" and \"target\"
idx_to_token: A List containing the vocabulary tokens corresponding to their indices
target_start_token_idx: A start token index in the target vocabulary
target_end_token_idx: An end token index in the target vocabulary
\"\"\"
self.batch = batch
self.target_start_token_idx = target_start_token_idx
self.target_end_token_idx = target_end_token_idx
self.idx_to_char = idx_to_token
def on_epoch_end(self, epoch, logs=None):
if epoch % 5 != 0:
return
source = self.batch[\"source\"]
target = self.batch[\"target\"].numpy()
bs = tf.shape(source)[0]
preds = self.model.generate(source, self.target_start_token_idx)
preds = preds.numpy()
for i in range(bs):
target_text = \"\".join([self.idx_to_char[_] for _ in target[i, :]])
prediction = \"\"
for idx in preds[i, :]:
prediction += self.idx_to_char[idx]
if idx == self.target_end_token_idx:
break
print(f\"target: {target_text.replace('-','')}\")
print(f\"prediction: {prediction}\n\")
Learning rate schedule
class CustomSchedule(keras.optimizers.schedules.LearningRateSchedule):
def __init__(
self,
init_lr=0.00001,
lr_after_warmup=0.001,
final_lr=0.00001,
warmup_epochs=15,
decay_epochs=85,
steps_per_epoch=203,
):
super().__init__()
self.init_lr = init_lr
self.lr_after_warmup = lr_after_warmup
self.final_lr = final_lr
self.warmup_epochs = warmup_epochs
self.decay_epochs = decay_epochs
self.steps_per_epoch = steps_per_epoch
def calculate_lr(self, epoch):
\"\"\" linear warm up - linear decay \"\"\"
warmup_lr = (
self.init_lr
+ ((self.lr_after_warmup - self.init_lr) / (self.warmup_epochs - 1)) * epoch
)
decay_lr = tf.math.maximum(
self.final_lr,
self.lr_after_warmup
- (epoch - self.warmup_epochs)
* (self.lr_after_warmup - self.final_lr)
/ (self.decay_epochs),
)
return tf.math.minimum(warmup_lr, decay_lr)
def __call__(self, step):
epoch = step // self.steps_per_epoch
return self.calculate_lr(epoch)
Create & train the end-to-end model
batch = next(iter(val_ds))
# The vocabulary to convert predicted indices into characters
idx_to_char = vectorizer.get_vocabulary()
display_cb = DisplayOutputs(
batch, idx_to_char, target_start_token_idx=2, target_end_token_idx=3
) # set the arguments as per vocabulary index for '<' and '>'
model = Transformer(
num_hid=200,
num_head=2,
num_feed_forward=400,
target_maxlen=max_target_len,
num_layers_enc=4,
num_layers_dec=1,
num_classes=34,
)
loss_fn = tf.keras.losses.CategoricalCrossentropy(
from_logits=True, label_smoothing=0.1,
)
learning_rate = CustomSchedule(
init_lr=0.00001,
lr_after_warmup=0.001,
final_lr=0.00001,
warmup_epochs=15,
decay_epochs=85,
steps_per_epoch=len(ds),
)
optimizer = keras.optimizers.Adam(learning_rate)
model.compile(optimizer=optimizer, loss=loss_fn)
history = model.fit(ds, validation_data=val_ds, callbacks=[display_cb], epochs=1)
203/203 [==============================] - 349s 2s/step - loss: 1.7437 - val_loss: 1.4650
target: <he had neither a bed to lie upon nor a coat to his back.>