text
stringlengths
0
4.99k
self.encoder = keras.Sequential(
[self.enc_input]
+ [
TransformerEncoder(num_hid, num_head, num_feed_forward)
for _ in range(num_layers_enc)
]
)
for i in range(num_layers_dec):
setattr(
self,
f\"dec_layer_{i}\",
TransformerDecoder(num_hid, num_head, num_feed_forward),
)
self.classifier = layers.Dense(num_classes)
def decode(self, enc_out, target):
y = self.dec_input(target)
for i in range(self.num_layers_dec):
y = getattr(self, f\"dec_layer_{i}\")(enc_out, y)
return y
def call(self, inputs):
source = inputs[0]
target = inputs[1]
x = self.encoder(source)
y = self.decode(x, target)
return self.classifier(y)
@property
def metrics(self):
return [self.loss_metric]
def train_step(self, batch):
\"\"\"Processes one batch inside model.fit().\"\"\"
source = batch[\"source\"]
target = batch[\"target\"]
dec_input = target[:, :-1]
dec_target = target[:, 1:]
with tf.GradientTape() as tape:
preds = self([source, dec_input])
one_hot = tf.one_hot(dec_target, depth=self.num_classes)
mask = tf.math.logical_not(tf.math.equal(dec_target, 0))
loss = self.compiled_loss(one_hot, preds, sample_weight=mask)
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
self.loss_metric.update_state(loss)
return {\"loss\": self.loss_metric.result()}
def test_step(self, batch):
source = batch[\"source\"]
target = batch[\"target\"]
dec_input = target[:, :-1]
dec_target = target[:, 1:]
preds = self([source, dec_input])
one_hot = tf.one_hot(dec_target, depth=self.num_classes)
mask = tf.math.logical_not(tf.math.equal(dec_target, 0))
loss = self.compiled_loss(one_hot, preds, sample_weight=mask)
self.loss_metric.update_state(loss)
return {\"loss\": self.loss_metric.result()}
def generate(self, source, target_start_token_idx):
\"\"\"Performs inference over one batch of inputs using greedy decoding.\"\"\"
bs = tf.shape(source)[0]
enc = self.encoder(source)
dec_input = tf.ones((bs, 1), dtype=tf.int32) * target_start_token_idx
dec_logits = []
for i in range(self.target_maxlen - 1):
dec_out = self.decode(enc, dec_input)
logits = self.classifier(dec_out)
logits = tf.argmax(logits, axis=-1, output_type=tf.int32)
last_logit = tf.expand_dims(logits[:, -1], axis=-1)
dec_logits.append(last_logit)
dec_input = tf.concat([dec_input, last_logit], axis=-1)
return dec_input
Download the dataset
Note: This requires ~3.6 GB of disk space and takes ~5 minutes for the extraction of files.
keras.utils.get_file(
os.path.join(os.getcwd(), \"data.tar.gz\"),
\"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2\",
extract=True,
archive_format=\"tar\",
cache_dir=\".\",
)
saveto = \"./datasets/LJSpeech-1.1\"
wavs = glob(\"{}/**/*.wav\".format(saveto), recursive=True)
id_to_text = {}
with open(os.path.join(saveto, \"metadata.csv\"), encoding=\"utf-8\") as f:
for line in f:
id = line.strip().split(\"|\")[0]
text = line.strip().split(\"|\")[2]
id_to_text[id] = text