text
stringlengths 0
4.99k
|
---|
x = self.conv2(x) |
return self.conv3(x) |
Transformer Encoder Layer |
class TransformerEncoder(layers.Layer): |
def __init__(self, embed_dim, num_heads, feed_forward_dim, rate=0.1): |
super().__init__() |
self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim) |
self.ffn = keras.Sequential( |
[ |
layers.Dense(feed_forward_dim, activation=\"relu\"), |
layers.Dense(embed_dim), |
] |
) |
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6) |
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6) |
self.dropout1 = layers.Dropout(rate) |
self.dropout2 = layers.Dropout(rate) |
def call(self, inputs, training): |
attn_output = self.att(inputs, inputs) |
attn_output = self.dropout1(attn_output, training=training) |
out1 = self.layernorm1(inputs + attn_output) |
ffn_output = self.ffn(out1) |
ffn_output = self.dropout2(ffn_output, training=training) |
return self.layernorm2(out1 + ffn_output) |
Transformer Decoder Layer |
class TransformerDecoder(layers.Layer): |
def __init__(self, embed_dim, num_heads, feed_forward_dim, dropout_rate=0.1): |
super().__init__() |
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6) |
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6) |
self.layernorm3 = layers.LayerNormalization(epsilon=1e-6) |
self.self_att = layers.MultiHeadAttention( |
num_heads=num_heads, key_dim=embed_dim |
) |
self.enc_att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim) |
self.self_dropout = layers.Dropout(0.5) |
self.enc_dropout = layers.Dropout(0.1) |
self.ffn_dropout = layers.Dropout(0.1) |
self.ffn = keras.Sequential( |
[ |
layers.Dense(feed_forward_dim, activation=\"relu\"), |
layers.Dense(embed_dim), |
] |
) |
def causal_attention_mask(self, batch_size, n_dest, n_src, dtype): |
\"\"\"Masks the upper half of the dot product matrix in self attention. |
This prevents flow of information from future tokens to current token. |
1's in the lower triangle, counting from the lower right corner. |
\"\"\" |
i = tf.range(n_dest)[:, None] |
j = tf.range(n_src) |
m = i >= j - n_src + n_dest |
mask = tf.cast(m, dtype) |
mask = tf.reshape(mask, [1, n_dest, n_src]) |
mult = tf.concat( |
[tf.expand_dims(batch_size, -1), tf.constant([1, 1], dtype=tf.int32)], 0 |
) |
return tf.tile(mask, mult) |
def call(self, enc_out, target): |
input_shape = tf.shape(target) |
batch_size = input_shape[0] |
seq_len = input_shape[1] |
causal_mask = self.causal_attention_mask(batch_size, seq_len, seq_len, tf.bool) |
target_att = self.self_att(target, target, attention_mask=causal_mask) |
target_norm = self.layernorm1(target + self.self_dropout(target_att)) |
enc_out = self.enc_att(target_norm, enc_out) |
enc_out_norm = self.layernorm2(self.enc_dropout(enc_out) + target_norm) |
ffn_out = self.ffn(enc_out_norm) |
ffn_out_norm = self.layernorm3(enc_out_norm + self.ffn_dropout(ffn_out)) |
return ffn_out_norm |
Complete the Transformer model |
Our model takes audio spectrograms as inputs and predicts a sequence of characters. During training, we give the decoder the target character sequence shifted to the left as input. During inference, the decoder uses its own past predictions to predict the next token. |
class Transformer(keras.Model): |
def __init__( |
self, |
num_hid=64, |
num_head=2, |
num_feed_forward=128, |
source_maxlen=100, |
target_maxlen=100, |
num_layers_enc=4, |
num_layers_dec=1, |
num_classes=10, |
): |
super().__init__() |
self.loss_metric = keras.metrics.Mean(name=\"loss\") |
self.num_layers_enc = num_layers_enc |
self.num_layers_dec = num_layers_dec |
self.target_maxlen = target_maxlen |
self.num_classes = num_classes |
self.enc_input = SpeechFeatureEmbedding(num_hid=num_hid, maxlen=source_maxlen) |
self.dec_input = TokenEmbedding( |
num_vocab=num_classes, maxlen=target_maxlen, num_hid=num_hid |
) |