id
int64
1
564
tensorflow
stringclasses
52 values
pytorch
stringclasses
81 values
mxnet
stringclasses
66 values
paddle
stringclasses
73 values
501
import tensorflow as tf from d2l import tensorflow as d2l def net(): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(filters=6, kernel_size=5, activation='sigmoid', padding='same'), tf.keras.layers.AvgPool2D(pool_size=2, strides=2), tf.keras.layers.Conv2D(filters=16, kernel_size=5, activation='sigmoid'), tf.keras.layers.AvgPool2D(pool_size=2, strides=2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(120, activation='sigmoid'), tf.keras.layers.Dense(84, activation='sigmoid'), tf.keras.layers.Dense(10)]) X = tf.random.uniform((1, 28, 28, 1)) for layer in net().layers: X = layer(X) print(layer.__class__.__name__, 'output shape: ', X.shape) class TrainCallback(tf.keras.callbacks.Callback): def __init__(self, net, train_iter, test_iter, num_epochs, device_name): self.timer = d2l.Timer() self.animator = d2l.Animator( xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc']) self.net = net self.train_iter = train_iter self.test_iter = test_iter self.num_epochs = num_epochs self.device_name = device_name def on_epoch_begin(self, epoch, logs=None): self.timer.start() def on_epoch_end(self, epoch, logs): self.timer.stop() test_acc = self.net.evaluate(self.test_iter, verbose=0, return_dict=True)['accuracy'] metrics = (logs['loss'], logs['accuracy'], test_acc) self.animator.add(epoch + 1, metrics) if epoch == self.num_epochs - 1: batch_size = next(iter(self.train_iter))[0].shape[0] num_examples = batch_size * tf.data.experimental.cardinality(self.train_iter).numpy() def train_ch6(net_fn, train_iter, test_iter, num_epochs, lr, device): device_name = device._device_name strategy = tf.distribute.OneDeviceStrategy(device_name) with strategy.scope(): optimizer = tf.keras.optimizers.SGD(learning_rate=lr) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) net = net_fn() net.compile(optimizer=optimizer, loss=loss, metrics=['accuracy']) callback = TrainCallback(net, train_iter, test_iter, num_epochs, device_name) net.fit(train_iter, epochs=num_epochs, verbose=0, callbacks=[callback]) return net
import torch from torch import nn from d2l import torch as d2l net = nn.Sequential( nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Conv2d(6, 16, kernel_size=5), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(), nn.Linear(120, 84), nn.Sigmoid(), nn.Linear(84, 10)) X = torch.rand(size=(1, 1, 28, 28), dtype=torch.float32) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ',X.shape) def train_ch6(net, train_iter, test_iter, num_epochs, lr, device): def init_weights(m): if type(m) == nn.Linear or type(m) == nn.Conv2d: nn.init.xavier_uniform_(m.weight) net.apply(init_weights) net.to(device) optimizer = torch.optim.SGD(net.parameters(), lr=lr) loss = nn.CrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc']) timer, num_batches = d2l.Timer(), len(train_iter) for epoch in range(num_epochs): metric = d2l.Accumulator(3) net.train() for i, (X, y) in enumerate(train_iter): timer.start() optimizer.zero_grad() X, y = X.to(device), y.to(device) y_hat = net(X) l = loss(y_hat, y) l.backward() optimizer.step() with torch.no_grad(): metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0]) timer.stop() train_l = metric[0] / metric[2] train_acc = metric[1] / metric[2] if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None)) test_acc = evaluate_accuracy_gpu(net, test_iter) animator.add(epoch + 1, (None, None, test_acc))
null
null
502
import tensorflow as tf from d2l import tensorflow as d2l def net(): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(filters=96, kernel_size=11, strides=4, activation='relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2), tf.keras.layers.Conv2D(filters=256, kernel_size=5, padding='same', activation='relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2), tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'), tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'), tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same', activation='relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(4096, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(4096, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(10)]) X = tf.random.uniform((1, 224, 224, 1)) for layer in net().layers: X = layer(X) print(layer.__class__.__name__, 'output shape: ', X.shape)
import torch from torch import nn from d2l import torch as d2l net = nn.Sequential( nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2), nn.Flatten(), nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(4096, 10)) X = torch.randn(1, 1, 224, 224) for layer in net: X=layer(X) print(layer.__class__.__name__,'output shape: ',X.shape)
null
null
503
import tensorflow as tf from d2l import tensorflow as d2l def vgg_block(num_convs, num_channels): blk = tf.keras.models.Sequential() for _ in range(num_convs): blk.add(tf.keras.layers.Conv2D(num_channels,kernel_size=3, padding='same',activation='relu')) blk.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2)) return blk def vgg(conv_arch): net = tf.keras.models.Sequential() for (num_convs, num_channels) in conv_arch: net.add(vgg_block(num_convs, num_channels)) net.add(tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4096, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(4096, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(10)])) return net net = vgg(conv_arch) X = tf.random.uniform((1, 224, 224, 1)) for blk in net.layers: X = blk(X) print(blk.__class__.__name__,'output shape: ', X.shape) ratio = 4 small_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch] net = lambda: vgg(small_conv_arch)
import torch from torch import nn from d2l import torch as d2l def vgg_block(num_convs, in_channels, out_channels): layers = [] for _ in range(num_convs): layers.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)) layers.append(nn.ReLU()) in_channels = out_channels layers.append(nn.MaxPool2d(kernel_size=2,stride=2)) return nn.Sequential(*layers) def vgg(conv_arch): conv_blks = [] in_channels = 1 for (num_convs, out_channels) in conv_arch: conv_blks.append(vgg_block(num_convs, in_channels, out_channels)) in_channels = out_channels return nn.Sequential( *conv_blks, nn.Flatten(), nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(), nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(0.5), nn.Linear(4096, 10)) net = vgg(conv_arch) X = torch.randn(size=(1, 1, 224, 224)) for blk in net: X = blk(X) print(blk.__class__.__name__,'output shape: ',X.shape) ratio = 4 small_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch] net = vgg(small_conv_arch)
null
null
504
import tensorflow as tf from d2l import tensorflow as d2l def nin_block(num_channels, kernel_size, strides, padding): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(num_channels, kernel_size, strides=strides, padding=padding, activation='relu'), tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu'), tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu')]) def net(): return tf.keras.models.Sequential([ nin_block(96, kernel_size=11, strides=4, padding='valid'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2), nin_block(256, kernel_size=5, strides=1, padding='same'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2), nin_block(384, kernel_size=3, strides=1, padding='same'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2), tf.keras.layers.Dropout(0.5), nin_block(10, kernel_size=3, strides=1, padding='same'), tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Reshape((1, 1, 10)), tf.keras.layers.Flatten(), ]) X = tf.random.uniform((1, 224, 224, 1)) for layer in net().layers: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
import torch from torch import nn from d2l import torch as d2l def nin_block(in_channels, out_channels, kernel_size, strides, padding): return nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size, strides, padding), nn.ReLU(), nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU(), nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.ReLU()) net = nn.Sequential( nin_block(1, 96, kernel_size=11, strides=4, padding=0), nn.MaxPool2d(3, stride=2), nin_block(96, 256, kernel_size=5, strides=1, padding=2), nn.MaxPool2d(3, stride=2), nin_block(256, 384, kernel_size=3, strides=1, padding=1), nn.MaxPool2d(3, stride=2), nn.Dropout(0.5), nin_block(384, 10, kernel_size=3, strides=1, padding=1), nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten()) X = torch.rand(size=(1, 1, 224, 224)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
null
505
import tensorflow as tf from d2l import tensorflow as d2l class Inception(tf.keras.Model): def __init__(self, c1, c2, c3, c4): super().__init__() self.p1_1 = tf.keras.layers.Conv2D(c1, 1, activation='relu') self.p2_1 = tf.keras.layers.Conv2D(c2[0], 1, activation='relu') self.p2_2 = tf.keras.layers.Conv2D(c2[1], 3, padding='same', activation='relu') self.p3_1 = tf.keras.layers.Conv2D(c3[0], 1, activation='relu') self.p3_2 = tf.keras.layers.Conv2D(c3[1], 5, padding='same', activation='relu') self.p4_1 = tf.keras.layers.MaxPool2D(3, 1, padding='same') self.p4_2 = tf.keras.layers.Conv2D(c4, 1, activation='relu') def call(self, x): p1 = self.p1_1(x) p2 = self.p2_2(self.p2_1(x)) p3 = self.p3_2(self.p3_1(x)) p4 = self.p4_2(self.p4_1(x)) return tf.keras.layers.Concatenate()([p1, p2, p3, p4]) def b1(): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(64, 7, strides=2, padding='same', activation='relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')]) def b2(): return tf.keras.Sequential([ tf.keras.layers.Conv2D(64, 1, activation='relu'), tf.keras.layers.Conv2D(192, 3, padding='same', activation='relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')]) def b3(): return tf.keras.models.Sequential([ Inception(64, (96, 128), (16, 32), 32), Inception(128, (128, 192), (32, 96), 64), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')]) def b4(): return tf.keras.Sequential([ Inception(192, (96, 208), (16, 48), 64), Inception(160, (112, 224), (24, 64), 64), Inception(128, (128, 256), (24, 64), 64), Inception(112, (144, 288), (32, 64), 64), Inception(256, (160, 320), (32, 128), 128), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')]) def b5(): return tf.keras.Sequential([ Inception(256, (160, 320), (32, 128), 128), Inception(384, (192, 384), (48, 128), 128), tf.keras.layers.GlobalAvgPool2D(), tf.keras.layers.Flatten() ]) def net(): return tf.keras.Sequential([b1(), b2(), b3(), b4(), b5(), tf.keras.layers.Dense(10)]) X = tf.random.uniform(shape=(1, 96, 96, 1)) for layer in net().layers: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l class Inception(nn.Module): def __init__(self, in_channels, c1, c2, c3, c4, **kwargs): super(Inception, self).__init__(**kwargs) self.p1_1 = nn.Conv2d(in_channels, c1, kernel_size=1) self.p2_1 = nn.Conv2d(in_channels, c2[0], kernel_size=1) self.p2_2 = nn.Conv2d(c2[0], c2[1], kernel_size=3, padding=1) self.p3_1 = nn.Conv2d(in_channels, c3[0], kernel_size=1) self.p3_2 = nn.Conv2d(c3[0], c3[1], kernel_size=5, padding=2) self.p4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.p4_2 = nn.Conv2d(in_channels, c4, kernel_size=1) def forward(self, x): p1 = F.relu(self.p1_1(x)) p2 = F.relu(self.p2_2(F.relu(self.p2_1(x)))) p3 = F.relu(self.p3_2(F.relu(self.p3_1(x)))) p4 = F.relu(self.p4_2(self.p4_1(x))) return torch.cat((p1, p2, p3, p4), dim=1) b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) b2 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=1), nn.ReLU(), nn.Conv2d(64, 192, kernel_size=3, padding=1), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32), Inception(256, 128, (128, 192), (32, 96), 64), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64), Inception(512, 160, (112, 224), (24, 64), 64), Inception(512, 128, (128, 256), (24, 64), 64), Inception(512, 112, (144, 288), (32, 64), 64), Inception(528, 256, (160, 320), (32, 128), 128), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128), Inception(832, 384, (192, 384), (48, 128), 128), nn.AdaptiveAvgPool2d((1,1)), nn.Flatten()) net = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10)) X = torch.rand(size=(1, 1, 96, 96)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
null
506
import tensorflow as tf from d2l import tensorflow as d2l def batch_norm(X, gamma, beta, moving_mean, moving_var, eps): inv = tf.cast(tf.math.rsqrt(moving_var + eps), X.dtype) inv *= gamma Y = X * inv + (beta - moving_mean * inv) return Y class BatchNorm(tf.keras.layers.Layer): def __init__(self, **kwargs): super(BatchNorm, self).__init__(**kwargs) def build(self, input_shape): weight_shape = [input_shape[-1], ] self.gamma = self.add_weight(name='gamma', shape=weight_shape, initializer=tf.initializers.ones, trainable=True) self.beta = self.add_weight(name='beta', shape=weight_shape, initializer=tf.initializers.zeros, trainable=True) self.moving_mean = self.add_weight(name='moving_mean', shape=weight_shape, initializer=tf.initializers.zeros, trainable=False) self.moving_variance = self.add_weight(name='moving_variance', shape=weight_shape, initializer=tf.initializers.ones, trainable=False) super(BatchNorm, self).build(input_shape) def assign_moving_average(self, variable, value): momentum = 0.9 delta = variable * momentum + value * (1 - momentum) return variable.assign(delta) @tf.function def call(self, inputs, training): if training: axes = list(range(len(inputs.shape) - 1)) batch_mean = tf.reduce_mean(inputs, axes, keepdims=True) batch_variance = tf.reduce_mean(tf.math.squared_difference(inputs, tf.stop_gradient(batch_mean)), axes, keepdims=True) batch_mean = tf.squeeze(batch_mean, axes) batch_variance = tf.squeeze(batch_variance, axes) mean_update = self.assign_moving_average(self.moving_mean, batch_mean) variance_update = self.assign_moving_average(self.moving_variance, batch_variance) self.add_update(mean_update) self.add_update(variance_update) mean, variance = batch_mean, batch_variance else: mean, variance = self.moving_mean, self.moving_variance output = batch_norm(inputs, moving_mean=mean, moving_var=variance, beta=self.beta, gamma=self.gamma, eps=1e-5) return output def net(): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)), BatchNorm(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.AvgPool2D(pool_size=2, strides=2), tf.keras.layers.Conv2D(filters=16, kernel_size=5), BatchNorm(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.AvgPool2D(pool_size=2, strides=2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(120), BatchNorm(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.Dense(84), BatchNorm(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.Dense(10)] ) lr, num_epochs, batch_size = 1.0, 10, 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) net = d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu()) tf.reshape(net.layers[1].gamma, (-1,)), tf.reshape(net.layers[1].beta, (-1,)) def net(): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.AvgPool2D(pool_size=2, strides=2), tf.keras.layers.Conv2D(filters=16, kernel_size=5), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.AvgPool2D(pool_size=2, strides=2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(120), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.Dense(84), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.Dense(10), ])
import torch from torch import nn from d2l import torch as d2l def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum): if not torch.is_grad_enabled(): X_hat = (X - moving_mean) / torch.sqrt(moving_var + eps) else: assert len(X.shape) in (2, 4) if len(X.shape) == 2: mean = X.mean(dim=0) var = ((X - mean) ** 2).mean(dim=0) else: mean = X.mean(dim=(0, 2, 3), keepdim=True) var = ((X - mean) ** 2).mean(dim=(0, 2, 3), keepdim=True) X_hat = (X - mean) / torch.sqrt(var + eps) moving_mean = momentum * moving_mean + (1.0 - momentum) * mean moving_var = momentum * moving_var + (1.0 - momentum) * var Y = gamma * X_hat + beta return Y, moving_mean.data, moving_var.data class BatchNorm(nn.Module): def __init__(self, num_features, num_dims): super().__init__() if num_dims == 2: shape = (1, num_features) else: shape = (1, num_features, 1, 1) self.gamma = nn.Parameter(torch.ones(shape)) self.beta = nn.Parameter(torch.zeros(shape)) self.moving_mean = torch.zeros(shape) self.moving_var = torch.ones(shape) def forward(self, X): if self.moving_mean.device != X.device: self.moving_mean = self.moving_mean.to(X.device) self.moving_var = self.moving_var.to(X.device) Y, self.moving_mean, self.moving_var = batch_norm( X, self.gamma, self.beta, self.moving_mean, self.moving_var, eps=1e-5, momentum=0.9) return Y net = nn.Sequential( nn.Conv2d(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Conv2d(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(16*4*4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(), nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(), nn.Linear(84, 10)) lr, num_epochs, batch_size = 1.0, 10, 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu()) net[1].gamma.reshape((-1,)), net[1].beta.reshape((-1,)) net = nn.Sequential( nn.Conv2d(1, 6, kernel_size=5), nn.BatchNorm2d(6), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Conv2d(6, 16, kernel_size=5), nn.BatchNorm2d(16), nn.Sigmoid(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(), nn.Linear(256, 120), nn.BatchNorm1d(120), nn.Sigmoid(), nn.Linear(120, 84), nn.BatchNorm1d(84), nn.Sigmoid(), nn.Linear(84, 10))
null
null
507
import tensorflow as tf from d2l import tensorflow as d2l class Residual(tf.keras.Model): def __init__(self, num_channels, use_1x1conv=False, strides=1): super().__init__() self.conv1 = tf.keras.layers.Conv2D( num_channels, padding='same', kernel_size=3, strides=strides) self.conv2 = tf.keras.layers.Conv2D( num_channels, kernel_size=3, padding='same') self.conv3 = None if use_1x1conv: self.conv3 = tf.keras.layers.Conv2D( num_channels, kernel_size=1, strides=strides) self.bn1 = tf.keras.layers.BatchNormalization() self.bn2 = tf.keras.layers.BatchNormalization() def call(self, X): Y = tf.keras.activations.relu(self.bn1(self.conv1(X))) Y = self.bn2(self.conv2(Y)) if self.conv3 is not None: X = self.conv3(X) Y += X return tf.keras.activations.relu(Y) blk = Residual(3) X = tf.random.uniform((4, 6, 6, 3)) Y = blk(X) Y.shape blk = Residual(6, use_1x1conv=True, strides=2) blk(X).shape b1 = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')]) class ResnetBlock(tf.keras.layers.Layer): def __init__(self, num_channels, num_residuals, first_block=False, **kwargs): super(ResnetBlock, self).__init__(**kwargs) self.residual_layers = [] for i in range(num_residuals): if i == 0 and not first_block: self.residual_layers.append(Residual(num_channels, use_1x1conv=True, strides=2)) else: self.residual_layers.append(Residual(num_channels)) def call(self, X): for layer in self.residual_layers.layers: X = layer(X) return X b2 = ResnetBlock(64, 2, first_block=True) b3 = ResnetBlock(128, 2) b4 = ResnetBlock(256, 2) b5 = ResnetBlock(512, 2) def net(): return tf.keras.Sequential([ tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same'), ResnetBlock(64, 2, first_block=True), ResnetBlock(128, 2), ResnetBlock(256, 2), ResnetBlock(512, 2), tf.keras.layers.GlobalAvgPool2D(), tf.keras.layers.Dense(units=10)]) X = tf.random.uniform(shape=(1, 224, 224, 1)) for layer in net().layers: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l class Residual(nn.Module): def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1): super().__init__() self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1, stride=strides) self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1) if use_1x1conv: self.conv3 = nn.Conv2d(input_channels, num_channels, kernel_size=1, stride=strides) else: self.conv3 = None self.bn1 = nn.BatchNorm2d(num_channels) self.bn2 = nn.BatchNorm2d(num_channels) def forward(self, X): Y = F.relu(self.bn1(self.conv1(X))) Y = self.bn2(self.conv2(Y)) if self.conv3: X = self.conv3(X) Y += X return F.relu(Y) blk = Residual(3,3) X = torch.rand(4, 3, 6, 6) Y = blk(X) Y.shape blk = Residual(3,6, use_1x1conv=True, strides=2) blk(X).shape b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) def resnet_block(input_channels, num_channels, num_residuals, first_block=False): blk = [] for i in range(num_residuals): if i == 0 and not first_block: blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2)) else: blk.append(Residual(num_channels, num_channels)) return blk b2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True)) b3 = nn.Sequential(*resnet_block(64, 128, 2)) b4 = nn.Sequential(*resnet_block(128, 256, 2)) b5 = nn.Sequential(*resnet_block(256, 512, 2)) net = nn.Sequential(b1, b2, b3, b4, b5, nn.AdaptiveAvgPool2d((1,1)), nn.Flatten(), nn.Linear(512, 10)) X = torch.rand(size=(1, 1, 224, 224)) for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
null
508
import tensorflow as tf from d2l import tensorflow as d2l class ConvBlock(tf.keras.layers.Layer): def __init__(self, num_channels): super(ConvBlock, self).__init__() self.bn = tf.keras.layers.BatchNormalization() self.relu = tf.keras.layers.ReLU() self.conv = tf.keras.layers.Conv2D(filters=num_channels, kernel_size=(3, 3), padding='same') self.listLayers = [self.bn, self.relu, self.conv] def call(self, x): y = x for layer in self.listLayers.layers: y = layer(y) y = tf.keras.layers.concatenate([x,y], axis=-1) return y class DenseBlock(tf.keras.layers.Layer): def __init__(self, num_convs, num_channels): super(DenseBlock, self).__init__() self.listLayers = [] for _ in range(num_convs): self.listLayers.append(ConvBlock(num_channels)) def call(self, x): for layer in self.listLayers.layers: x = layer(x) return x blk = DenseBlock(2, 10) X = tf.random.uniform((4, 8, 8, 3)) Y = blk(X) Y.shape class TransitionBlock(tf.keras.layers.Layer): def __init__(self, num_channels, **kwargs): super(TransitionBlock, self).__init__(**kwargs) self.batch_norm = tf.keras.layers.BatchNormalization() self.relu = tf.keras.layers.ReLU() self.conv = tf.keras.layers.Conv2D(num_channels, kernel_size=1) self.avg_pool = tf.keras.layers.AvgPool2D(pool_size=2, strides=2) def call(self, x): x = self.batch_norm(x) x = self.relu(x) x = self.conv(x) return self.avg_pool(x) blk = TransitionBlock(10) blk(Y).shape def block_1(): return tf.keras.Sequential([ tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'), tf.keras.layers.BatchNormalization(), tf.keras.layers.ReLU(), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')]) def block_2(): net = block_1() num_channels, growth_rate = 64, 32 num_convs_in_dense_blocks = [4, 4, 4, 4] for i, num_convs in enumerate(num_convs_in_dense_blocks): net.add(DenseBlock(num_convs, growth_rate)) num_channels += num_convs * growth_rate if i != len(num_convs_in_dense_blocks) - 1: num_channels //= 2 net.add(TransitionBlock(num_channels)) return net def net(): net = block_2() net.add(tf.keras.layers.BatchNormalization()) net.add(tf.keras.layers.ReLU()) net.add(tf.keras.layers.GlobalAvgPool2D()) net.add(tf.keras.layers.Flatten()) net.add(tf.keras.layers.Dense(10)) return net
import torch from torch import nn from d2l import torch as d2l def conv_block(input_channels, num_channels): return nn.Sequential( nn.BatchNorm2d(input_channels), nn.ReLU(), nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1)) class DenseBlock(nn.Module): def __init__(self, num_convs, input_channels, num_channels): super(DenseBlock, self).__init__() layer = [] for i in range(num_convs): layer.append(conv_block(num_channels * i + input_channels, num_channels)) self.net = nn.Sequential(*layer) def forward(self, X): for blk in self.net: Y = blk(X) X = torch.cat((X, Y), dim=1) return X blk = DenseBlock(2, 3, 10) X = torch.randn(4, 3, 8, 8) Y = blk(X) Y.shape def transition_block(input_channels, num_channels): return nn.Sequential( nn.BatchNorm2d(input_channels), nn.ReLU(), nn.Conv2d(input_channels, num_channels, kernel_size=1), nn.AvgPool2d(kernel_size=2, stride=2)) blk = transition_block(23, 10) blk(Y).shape b1 = nn.Sequential( nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) num_channels, growth_rate = 64, 32 num_convs_in_dense_blocks = [4, 4, 4, 4] blks = [] for i, num_convs in enumerate(num_convs_in_dense_blocks): blks.append(DenseBlock(num_convs, num_channels, growth_rate)) num_channels += num_convs * growth_rate if i != len(num_convs_in_dense_blocks) - 1: blks.append(transition_block(num_channels, num_channels // 2)) num_channels = num_channels // 2 net = nn.Sequential( b1, *blks, nn.BatchNorm2d(num_channels), nn.ReLU(), nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten(), nn.Linear(num_channels, 10))
null
null
509
%matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l T = 1000 time = tf.range(1, T + 1, dtype=tf.float32) x = tf.sin(0.01 * time) + tf.random.normal([T], 0, 0.2) d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3)) tau = 4 features = tf.Variable(tf.zeros((T - tau, tau))) for i in range(tau): features[:, i].assign(x[i: T - tau + i]) labels = tf.reshape(x[tau:], (-1, 1)) batch_size, n_train = 16, 600 train_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True) def get_net(): net = tf.keras.Sequential([tf.keras.layers.Dense(10, activation='relu'), tf.keras.layers.Dense(1)]) return net loss = tf.keras.losses.MeanSquaredError() def train(net, train_iter, loss, epochs, lr): trainer = tf.keras.optimizers.Adam() for epoch in range(epochs): for X, y in train_iter: with tf.GradientTape() as g: out = net(X) l = loss(y, out) params = net.trainable_variables grads = g.gradient(l, params) trainer.apply_gradients(zip(grads, params)) net = get_net() train(net, train_iter, loss, 5, 0.01) onestep_preds = net(features) d2l.plot([time, time[tau:]], [x.numpy(), onestep_preds.numpy()], 'time', 'x', legend=['data', '1-step preds'], xlim=[1, 1000], figsize=(6, 3)) multistep_preds = tf.Variable(tf.zeros(T)) multistep_preds[:n_train + tau].assign(x[:n_train + tau]) for i in range(n_train + tau, T): multistep_preds[i].assign(tf.reshape(net(tf.reshape(multistep_preds[i - tau: i], (1, -1))), ())) d2l.plot([time, time[tau:], time[n_train + tau:]], [x.numpy(), onestep_preds.numpy(), multistep_preds[n_train + tau:].numpy()], 'time', 'x', legend=['data', '1-step preds', 'multistep preds'], xlim=[1, 1000], figsize=(6, 3)) max_steps = 64 features = tf.Variable(tf.zeros((T - tau - max_steps + 1, tau + max_steps))) for i in range(tau): features[:, i].assign(x[i: i + T - tau - max_steps + 1].numpy()) for i in range(tau, tau + max_steps): features[:, i].assign(tf.reshape(net((features[:, i - tau: i])), -1)) steps = (1, 4, 16, 64) d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps], [features[:, tau + i - 1].numpy() for i in steps], 'time', 'x', legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000], figsize=(6, 3))
%matplotlib inline import torch from torch import nn from d2l import torch as d2l T = 1000 time = torch.arange(1, T + 1, dtype=torch.float32) x = torch.sin(0.01 * time) + torch.normal(0, 0.2, (T,)) d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3)) tau = 4 features = torch.zeros((T - tau, tau)) for i in range(tau): features[:, i] = x[i: T - tau + i] labels = x[tau:].reshape((-1, 1)) batch_size, n_train = 16, 600 train_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True) def init_weights(m): if type(m) == nn.Linear: nn.init.xavier_uniform_(m.weight) def get_net(): net = nn.Sequential(nn.Linear(4, 10), nn.ReLU(), nn.Linear(10, 1)) net.apply(init_weights) return net loss = nn.MSELoss(reduction='none') def train(net, train_iter, loss, epochs, lr): trainer = torch.optim.Adam(net.parameters(), lr) for epoch in range(epochs): for X, y in train_iter: trainer.zero_grad() l = loss(net(X), y) l.sum().backward() trainer.step() net = get_net() train(net, train_iter, loss, 5, 0.01) onestep_preds = net(features) d2l.plot([time, time[tau:]], [x.detach().numpy(), onestep_preds.detach().numpy()], 'time', 'x', legend=['data', '1-step preds'], xlim=[1, 1000], figsize=(6, 3)) multistep_preds = torch.zeros(T) multistep_preds[: n_train + tau] = x[: n_train + tau] for i in range(n_train + tau, T): multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1))) d2l.plot([time, time[tau:], time[n_train + tau:]], [x.detach().numpy(), onestep_preds.detach().numpy(), multistep_preds[n_train + tau:].detach().numpy()], 'time', 'x', legend=['data', '1-step preds', 'multistep preds'], xlim=[1, 1000], figsize=(6, 3)) max_steps = 64 features = torch.zeros((T - tau - max_steps + 1, tau + max_steps)) for i in range(tau): features[:, i] = x[i: i + T - tau - max_steps + 1] for i in range(tau, tau + max_steps): features[:, i] = net(features[:, i - tau:i]).reshape(-1) steps = (1, 4, 16, 64) d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps], [features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x', legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000], figsize=(6, 3))
null
null
510
import collections import re from d2l import tensorflow as d2l def tokenize(lines, token='word'): if token == 'word': return [line.split() for line in lines] elif token == 'char': return [list(line) for line in lines] else: print('Error: Unknown word element type:' + token) tokens = tokenize(lines) for i in range(11): print(tokens[i]) def load_corpus_time_machine(max_tokens=-1): lines = read_time_machine() tokens = tokenize(lines, 'char') vocab = Vocab(tokens) corpus = [vocab[token] for line in tokens for token in line] if max_tokens > 0: corpus = corpus[:max_tokens] return corpus, vocab corpus, vocab = load_corpus_time_machine() len(corpus), len(vocab)
import collections import re from d2l import torch as d2l def tokenize(lines, token='word'): if token == 'word': return [line.split() for line in lines] elif token == 'char': return [list(line) for line in lines] else: print('Error: Unknown word element type:' + token) tokens = tokenize(lines) for i in range(11): print(tokens[i]) def load_corpus_time_machine(max_tokens=-1): lines = read_time_machine() tokens = tokenize(lines, 'char') vocab = Vocab(tokens) corpus = [vocab[token] for line in tokens for token in line] if max_tokens > 0: corpus = corpus[:max_tokens] return corpus, vocab corpus, vocab = load_corpus_time_machine() len(corpus), len(vocab)
null
null
511
import random import tensorflow as tf from d2l import tensorflow as d2l tokens = d2l.tokenize(d2l.read_time_machine()) corpus = [token for line in tokens for token in line] vocab = d2l.Vocab(corpus) vocab.token_freqs[:10] def seq_data_iter_random(corpus, batch_size, num_steps): corpus = corpus[random.randint(0, num_steps - 1):] num_subseqs = (len(corpus) - 1) // num_steps initial_indices = list(range(0, num_subseqs * num_steps, num_steps)) random.shuffle(initial_indices) def data(pos): return corpus[pos: pos + num_steps] num_batches = num_subseqs // batch_size for i in range(0, batch_size * num_batches, batch_size): initial_indices_per_batch = initial_indices[i: i + batch_size] X = [data(j) for j in initial_indices_per_batch] Y = [data(j + 1) for j in initial_indices_per_batch] yield tf.constant(X), tf.constant(Y) def seq_data_iter_sequential(corpus, batch_size, num_steps): offset = random.randint(0, num_steps) num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size Xs = tf.constant(corpus[offset: offset + num_tokens]) Ys = tf.constant(corpus[offset + 1: offset + 1 + num_tokens]) Xs = tf.reshape(Xs, (batch_size, -1)) Ys = tf.reshape(Ys, (batch_size, -1)) num_batches = Xs.shape[1] // num_steps for i in range(0, num_batches * num_steps, num_steps): X = Xs[:, i: i + num_steps] Y = Ys[:, i: i + num_steps] yield X, Y
import random import torch from d2l import torch as d2l tokens = d2l.tokenize(d2l.read_time_machine()) corpus = [token for line in tokens for token in line] vocab = d2l.Vocab(corpus) vocab.token_freqs[:10] def seq_data_iter_random(corpus, batch_size, num_steps): corpus = corpus[random.randint(0, num_steps - 1):] num_subseqs = (len(corpus) - 1) // num_steps initial_indices = list(range(0, num_subseqs * num_steps, num_steps)) random.shuffle(initial_indices) def data(pos): return corpus[pos: pos + num_steps] num_batches = num_subseqs // batch_size for i in range(0, batch_size * num_batches, batch_size): initial_indices_per_batch = initial_indices[i: i + batch_size] X = [data(j) for j in initial_indices_per_batch] Y = [data(j + 1) for j in initial_indices_per_batch] yield torch.tensor(X), torch.tensor(Y) def seq_data_iter_sequential(corpus, batch_size, num_steps): offset = random.randint(0, num_steps) num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size Xs = torch.tensor(corpus[offset: offset + num_tokens]) Ys = torch.tensor(corpus[offset + 1: offset + 1 + num_tokens]) Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1) num_batches = Xs.shape[1] // num_steps for i in range(0, num_steps * num_batches, num_steps): X = Xs[:, i: i + num_steps] Y = Ys[:, i: i + num_steps] yield X, Y
null
null
512
import tensorflow as tf from d2l import tensorflow as d2l X, W_xh = tf.random.normal((3, 1), 0, 1), tf.random.normal((1, 4), 0, 1) H, W_hh = tf.random.normal((3, 4), 0, 1), tf.random.normal((4, 4), 0, 1) tf.matmul(X, W_xh) + tf.matmul(H, W_hh) tf.matmul(tf.concat((X, H), 1), tf.concat((W_xh, W_hh), 0))
import torch from d2l import torch as d2l X, W_xh = torch.normal(0, 1, (3, 1)), torch.normal(0, 1, (1, 4)) H, W_hh = torch.normal(0, 1, (3, 4)), torch.normal(0, 1, (4, 4)) torch.matmul(X, W_xh) + torch.matmul(H, W_hh) torch.matmul(torch.cat((X, H), 1), torch.cat((W_xh, W_hh), 0))
null
null
513
%matplotlib inline import math import tensorflow as tf from d2l import tensorflow as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) train_random_iter, vocab_random_iter = d2l.load_data_time_machine(batch_size, num_steps, use_random_iter=True) tf.one_hot(tf.constant([0, 2]), len(vocab)) X = tf.reshape(tf.range(10), (2, 5)) tf.one_hot(tf.transpose(X), 28).shape def get_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32) W_xh = tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32) W_hh = tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32) b_h = tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32) W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32) b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32) params = [W_xh, W_hh, b_h, W_hq, b_q] return params def init_rnn_state(batch_size, num_hiddens): return (tf.zeros((batch_size, num_hiddens)), ) def rnn(inputs, state, params): W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: X = tf.reshape(X,[-1,W_xh.shape[0]]) H = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(H, W_hh) + b_h) Y = tf.matmul(H, W_hq) + b_q outputs.append(Y) return tf.concat(outputs, axis=0), (H,) class RNNModelScratch: def __init__(self, vocab_size, num_hiddens, init_state, forward_fn, get_params): self.vocab_size, self.num_hiddens = vocab_size, num_hiddens self.init_state, self.forward_fn = init_state, forward_fn self.trainable_variables = get_params(vocab_size, num_hiddens) def __call__(self, X, state): X = tf.one_hot(tf.transpose(X), self.vocab_size) X = tf.cast(X, tf.float32) return self.forward_fn(X, state, self.trainable_variables) def begin_state(self, batch_size, *args, **kwargs): return self.init_state(batch_size, self.num_hiddens) device_name = d2l.try_gpu()._device_name strategy = tf.distribute.OneDeviceStrategy(device_name) num_hiddens = 512 with strategy.scope(): net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params) state = net.begin_state(X.shape[0]) Y, new_state = net(X, state) Y.shape, len(new_state), new_state[0].shape def predict_ch8(prefix, num_preds, net, vocab): state = net.begin_state(batch_size=1, dtype=tf.float32) outputs = [vocab[prefix[0]]] get_input = lambda: tf.reshape(tf.constant([outputs[-1]]), (1, 1)).numpy() for y in prefix[1:]: _, state = net(get_input(), state) outputs.append(vocab[y]) for _ in range(num_preds): y, state = net(get_input(), state) outputs.append(int(y.numpy().argmax(axis=1).reshape(1))) return ''.join([vocab.idx_to_token[i] for i in outputs]) predict_ch8('time traveller ', 10, net, vocab) def grad_clipping(grads, theta): theta = tf.constant(theta, dtype=tf.float32) new_grad = [] for grad in grads: if isinstance(grad, tf.IndexedSlices): new_grad.append(tf.convert_to_tensor(grad)) else: new_grad.append(grad) norm = tf.math.sqrt(sum((tf.reduce_sum(grad ** 2)).numpy() for grad in new_grad)) norm = tf.cast(norm, tf.float32) if tf.greater(norm, theta): for i, grad in enumerate(new_grad): new_grad[i] = grad * theta / norm else: new_grad = new_grad return new_grad def train_epoch_ch8(net, train_iter, loss, updater, use_random_iter): state, timer = None, d2l.Timer() metric = d2l.Accumulator(2) for X, Y in train_iter: if state is None or use_random_iter: state = net.begin_state(batch_size=X.shape[0], dtype=tf.float32) with tf.GradientTape(persistent=True) as g: y_hat, state = net(X, state) y = tf.reshape(tf.transpose(Y), (-1)) l = loss(y, y_hat) params = net.trainable_variables grads = g.gradient(l, params) grads = grad_clipping(grads, 1) updater.apply_gradients(zip(grads, params)) metric.add(l * d2l.size(y), d2l.size(y)) return math.exp(metric[0] / metric[1]), metric[1] / timer.stop() def train_ch8(net, train_iter, vocab, lr, num_epochs, strategy, use_random_iter=False): with strategy.scope(): loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) updater = tf.keras.optimizers.SGD(lr) animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs]) predict = lambda prefix: predict_ch8(prefix, 50, net, vocab) for epoch in range(num_epochs): ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, use_random_iter) if (epoch + 1) % 10 == 0: animator.add(epoch + 1, [ppl]) device = d2l.try_gpu()._device_name num_epochs, lr = 500, 1 train_ch8(net, train_iter, vocab, lr, num_epochs, strategy) with strategy.scope(): net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params) train_ch8(net, train_iter, vocab_random_iter, lr, num_epochs, strategy, use_random_iter=True)
%matplotlib inline import math import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) F.one_hot(torch.tensor([0, 2]), len(vocab)) X = torch.arange(10).reshape((2, 5)) F.one_hot(X.T, 28).shape def get_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return torch.randn(size=shape, device=device) * 0.01 W_xh = normal((num_inputs, num_hiddens)) W_hh = normal((num_hiddens, num_hiddens)) b_h = torch.zeros(num_hiddens, device=device) W_hq = normal((num_hiddens, num_outputs)) b_q = torch.zeros(num_outputs, device=device) params = [W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.requires_grad_(True) return params def init_rnn_state(batch_size, num_hiddens, device): return (torch.zeros((batch_size, num_hiddens), device=device), ) def rnn(inputs, state, params): W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: H = torch.tanh(torch.mm(X, W_xh) + torch.mm(H, W_hh) + b_h) Y = torch.mm(H, W_hq) + b_q outputs.append(Y) return torch.cat(outputs, dim=0), (H,) class RNNModelScratch: def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn): self.vocab_size, self.num_hiddens = vocab_size, num_hiddens self.params = get_params(vocab_size, num_hiddens, device) self.init_state, self.forward_fn = init_state, forward_fn def __call__(self, X, state): X = F.one_hot(X.T, self.vocab_size).type(torch.float32) return self.forward_fn(X, state, self.params) def begin_state(self, batch_size, device): return self.init_state(batch_size, self.num_hiddens, device) num_hiddens = 512 net = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn) state = net.begin_state(X.shape[0], d2l.try_gpu()) Y, new_state = net(X.to(d2l.try_gpu()), state) Y.shape, len(new_state), new_state[0].shape def predict_ch8(prefix, num_preds, net, vocab, device): state = net.begin_state(batch_size=1, device=device) outputs = [vocab[prefix[0]]] get_input = lambda: torch.tensor([outputs[-1]], device=device).reshape((1, 1)) for y in prefix[1:]: _, state = net(get_input(), state) outputs.append(vocab[y]) for _ in range(num_preds): y, state = net(get_input(), state) outputs.append(int(y.argmax(dim=1).reshape(1))) return ''.join([vocab.idx_to_token[i] for i in outputs]) predict_ch8('time traveller ', 10, net, vocab, d2l.try_gpu()) def grad_clipping(net, theta): if isinstance(net, nn.Module): params = [p for p in net.parameters() if p.requires_grad] else: params = net.params norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params)) if norm > theta: for param in params: param.grad[:] *= theta / norm def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter): state, timer = None, d2l.Timer() metric = d2l.Accumulator(2) for X, Y in train_iter: if state is None or use_random_iter: state = net.begin_state(batch_size=X.shape[0], device=device) else: if isinstance(net, nn.Module) and not isinstance(state, tuple): state.detach_() else: for s in state: s.detach_() y = Y.T.reshape(-1) X, y = X.to(device), y.to(device) y_hat, state = net(X, state) l = loss(y_hat, y.long()).mean() if isinstance(updater, torch.optim.Optimizer): updater.zero_grad() l.backward() grad_clipping(net, 1) updater.step() else: l.backward() grad_clipping(net, 1) updater(batch_size=1) metric.add(l * y.numel(), y.numel()) return math.exp(metric[0] / metric[1]), metric[1] / timer.stop() def train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False): loss = nn.CrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs]) if isinstance(net, nn.Module): updater = torch.optim.SGD(net.parameters(), lr) else: updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size) predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device) for epoch in range(num_epochs): ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter) if (epoch + 1) % 10 == 0: animator.add(epoch + 1, [ppl]) num_epochs, lr = 500, 1 train_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu()) net = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn) train_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)
null
null
514
import tensorflow as tf from d2l import tensorflow as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) num_hiddens = 256 rnn_cell = tf.keras.layers.SimpleRNNCell(num_hiddens, kernel_initializer='glorot_uniform') rnn_layer = tf.keras.layers.RNN(rnn_cell, time_major=True, return_sequences=True, return_state=True) state = rnn_cell.get_initial_state(batch_size=batch_size, dtype=tf.float32) state.shape X = tf.random.uniform((num_steps, batch_size, len(vocab))) Y, state_new = rnn_layer(X, state) Y.shape, len(state_new), state_new[0].shape class RNNModel(tf.keras.layers.Layer): def __init__(self, rnn_layer, vocab_size, **kwargs): super(RNNModel, self).__init__(**kwargs) self.rnn = rnn_layer self.vocab_size = vocab_size self.dense = tf.keras.layers.Dense(vocab_size) def call(self, inputs, state): X = tf.one_hot(tf.transpose(inputs), self.vocab_size) Y, *state = self.rnn(X, state) output = self.dense(tf.reshape(Y, (-1, Y.shape[-1]))) return output, state def begin_state(self, *args, **kwargs): return self.rnn.cell.get_initial_state(*args, **kwargs) device_name = d2l.try_gpu()._device_name strategy = tf.distribute.OneDeviceStrategy(device_name) with strategy.scope(): net = RNNModel(rnn_layer, vocab_size=len(vocab)) d2l.predict_ch8('time traveller', 10, net, vocab) num_epochs, lr = 500, 1 d2l.train_ch8(net, train_iter, vocab, lr, num_epochs, strategy)
import torch from torch import nn from torch.nn import functional as F from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) num_hiddens = 256 rnn_layer = nn.RNN(len(vocab), num_hiddens) state = torch.zeros((1, batch_size, num_hiddens)) state.shape X = torch.rand(size=(num_steps, batch_size, len(vocab))) Y, state_new = rnn_layer(X, state) Y.shape, state_new.shape class RNNModel(nn.Module): def __init__(self, rnn_layer, vocab_size, **kwargs): super(RNNModel, self).__init__(**kwargs) self.rnn = rnn_layer self.vocab_size = vocab_size self.num_hiddens = self.rnn.hidden_size if not self.rnn.bidirectional: self.num_directions = 1 self.linear = nn.Linear(self.num_hiddens, self.vocab_size) else: self.num_directions = 2 self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size) def forward(self, inputs, state): X = F.one_hot(inputs.T.long(), self.vocab_size) X = X.to(torch.float32) Y, state = self.rnn(X, state) output = self.linear(Y.reshape((-1, Y.shape[-1]))) return output, state def begin_state(self, device, batch_size=1): if not isinstance(self.rnn, nn.LSTM): return torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device) else: return (torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device), torch.zeros((self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens), device=device)) device = d2l.try_gpu() net = RNNModel(rnn_layer, vocab_size=len(vocab)) net = net.to(device) d2l.predict_ch8('time traveller', 10, net, vocab, device) num_epochs, lr = 500, 1 d2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)
null
null
515
import tensorflow as tf from d2l import tensorflow as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32) def three(): return (tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32), tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32)) W_xz, W_hz, b_z = three() W_xr, W_hr, b_r = three() W_xh, W_hh, b_h = three() W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32) b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32) params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q] return params def init_gru_state(batch_size, num_hiddens): return (tf.zeros((batch_size, num_hiddens)), ) def gru(inputs, state, params): W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: X = tf.reshape(X,[-1,W_xh.shape[0]]) Z = tf.sigmoid(tf.matmul(X, W_xz) + tf.matmul(H, W_hz) + b_z) R = tf.sigmoid(tf.matmul(X, W_xr) + tf.matmul(H, W_hr) + b_r) H_tilda = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(R * H, W_hh) + b_h) H = Z * H + (1 - Z) * H_tilda Y = tf.matmul(H, W_hq) + b_q outputs.append(Y) return tf.concat(outputs, axis=0), (H,) vocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name strategy = tf.distribute.OneDeviceStrategy(device_name) num_epochs, lr = 500, 1 with strategy.scope(): model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_gru_state, gru, get_params) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy) gru_cell = tf.keras.layers.GRUCell(num_hiddens, kernel_initializer='glorot_uniform') gru_layer = tf.keras.layers.RNN(gru_cell, time_major=True, return_sequences=True, return_state=True) device_name = d2l.try_gpu()._device_name strategy = tf.distribute.OneDeviceStrategy(device_name) with strategy.scope(): model = d2l.RNNModel(gru_layer, vocab_size=len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)
import torch from torch import nn from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return torch.randn(size=shape, device=device)*0.01 def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device)) W_xz, W_hz, b_z = three() W_xr, W_hr, b_r = three() W_xh, W_hh, b_h = three() W_hq = normal((num_hiddens, num_outputs)) b_q = torch.zeros(num_outputs, device=device) params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.requires_grad_(True) return params def init_gru_state(batch_size, num_hiddens, device): return (torch.zeros((batch_size, num_hiddens), device=device), ) def gru(inputs, state, params): W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: Z = torch.sigmoid((X @ W_xz) + (H @ W_hz) + b_z) R = torch.sigmoid((X @ W_xr) + (H @ W_hr) + b_r) H_tilda = torch.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h) H = Z * H + (1 - Z) * H_tilda Y = H @ W_hq + b_q outputs.append(Y) return torch.cat(outputs, dim=0), (H,) vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu() num_epochs, lr = 500, 1 model = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_params, init_gru_state, gru) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) num_inputs = vocab_size gru_layer = nn.GRU(num_inputs, num_hiddens) model = d2l.RNNModel(gru_layer, len(vocab)) model = model.to(device) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
null
null
516
import tensorflow as tf from d2l import tensorflow as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_lstm_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return tf.Variable(tf.random.normal(shape=shape, stddev=0.01, mean=0, dtype=tf.float32)) def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32)) W_xi, W_hi, b_i = three() W_xf, W_hf, b_f = three() W_xo, W_ho, b_o = three() W_xc, W_hc, b_c = three() W_hq = normal((num_hiddens, num_outputs)) b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32) params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] return params def init_lstm_state(batch_size, num_hiddens): return (tf.zeros(shape=(batch_size, num_hiddens)), tf.zeros(shape=(batch_size, num_hiddens))) def lstm(inputs, state, params): W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q = params (H, C) = state outputs = [] for X in inputs: X=tf.reshape(X,[-1,W_xi.shape[0]]) I = tf.sigmoid(tf.matmul(X, W_xi) + tf.matmul(H, W_hi) + b_i) F = tf.sigmoid(tf.matmul(X, W_xf) + tf.matmul(H, W_hf) + b_f) O = tf.sigmoid(tf.matmul(X, W_xo) + tf.matmul(H, W_ho) + b_o) C_tilda = tf.tanh(tf.matmul(X, W_xc) + tf.matmul(H, W_hc) + b_c) C = F * C + I * C_tilda H = O * tf.tanh(C) Y = tf.matmul(H, W_hq) + b_q outputs.append(Y) return tf.concat(outputs, axis=0), (H,C) vocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name num_epochs, lr = 500, 1 strategy = tf.distribute.OneDeviceStrategy(device_name) with strategy.scope(): model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_lstm_state, lstm, get_lstm_params) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy) lstm_cell = tf.keras.layers.LSTMCell(num_hiddens, kernel_initializer='glorot_uniform') lstm_layer = tf.keras.layers.RNN(lstm_cell, time_major=True, return_sequences=True, return_state=True) device_name = d2l.try_gpu()._device_name strategy = tf.distribute.OneDeviceStrategy(device_name) with strategy.scope(): model = d2l.RNNModel(lstm_layer, vocab_size=len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)
import torch from torch import nn from d2l import torch as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_lstm_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return torch.randn(size=shape, device=device)*0.01 def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), torch.zeros(num_hiddens, device=device)) W_xi, W_hi, b_i = three() W_xf, W_hf, b_f = three() W_xo, W_ho, b_o = three() W_xc, W_hc, b_c = three() W_hq = normal((num_hiddens, num_outputs)) b_q = torch.zeros(num_outputs, device=device) params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] for param in params: param.requires_grad_(True) return params def init_lstm_state(batch_size, num_hiddens, device): return (torch.zeros((batch_size, num_hiddens), device=device), torch.zeros((batch_size, num_hiddens), device=device)) def lstm(inputs, state, params): [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] = params (H, C) = state outputs = [] for X in inputs: I = torch.sigmoid((X @ W_xi) + (H @ W_hi) + b_i) F = torch.sigmoid((X @ W_xf) + (H @ W_hf) + b_f) O = torch.sigmoid((X @ W_xo) + (H @ W_ho) + b_o) C_tilda = torch.tanh((X @ W_xc) + (H @ W_hc) + b_c) C = F * C + I * C_tilda H = O * torch.tanh(C) Y = (H @ W_hq) + b_q outputs.append(Y) return torch.cat(outputs, dim=0), (H, C) vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu() num_epochs, lr = 500, 1 model = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_lstm_params, init_lstm_state, lstm) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) num_inputs = vocab_size lstm_layer = nn.LSTM(num_inputs, num_hiddens) model = d2l.RNNModel(lstm_layer, len(vocab)) model = model.to(device) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
null
null
517
import os import tensorflow as tf from d2l import tensorflow as d2l def build_array_nmt(lines, vocab, num_steps): lines = [vocab[l] for l in lines] lines = [l + [vocab['<eos>']] for l in lines] array = tf.constant([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines]) valid_len = tf.reduce_sum( tf.cast(array != vocab['<pad>'], tf.int32), 1) return array, valid_len train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8) for X, X_valid_len, Y, Y_valid_len in train_iter: print('X:', tf.cast(X, tf.int32)) print('Valid length of X:', X_valid_len) print('Y:', tf.cast(Y, tf.int32)) print('Valid length of Y:', Y_valid_len) break
import os import torch from d2l import torch as d2l def build_array_nmt(lines, vocab, num_steps): lines = [vocab[l] for l in lines] lines = [l + [vocab['<eos>']] for l in lines] array = torch.tensor([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines]) valid_len = (array != vocab['<pad>']).type(torch.int32).sum(1) return array, valid_len train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8) for X, X_valid_len, Y, Y_valid_len in train_iter: print('X:', X.type(torch.int32)) print('Valid length of X:', X_valid_len) print('Y:', Y.type(torch.int32)) print('Valid length of Y:', Y_valid_len) break
null
null
518
x = tf.range(12) tf.size(x) X = tf.reshape(x, (3, 4)) tf.zeros((2, 3, 4)) tf.ones((2, 3, 4)) tf.random.normal(shape=[3, 4]) tf.constant([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) x = tf.constant([1.0, 2, 4, 8]) y = tf.constant([2.0, 2, 2, 2]) x + y, x - y, x * y, x / y, x ** y tf.exp(x) X = tf.reshape(tf.range(12, dtype=tf.float32), (3, 4)) Y = tf.constant([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) tf.concat([X, Y], axis=0), tf.concat([X, Y], axis=1) tf.reduce_sum(X) a = tf.reshape(tf.range(3), (3, 1)) b = tf.reshape(tf.range(2), (1, 2)) X_var = tf.Variable(X) X_var[1, 2].assign(9) X_var = tf.Variable(X) X_var[0:2, :].assign(tf.ones(X_var[0:2,:].shape, dtype = tf.float32) * 12) Z = tf.Variable(tf.zeros_like(Y)) Z.assign(X + Y) @tf.function def computation(X, Y): Z = tf.zeros_like(Y) A = X + Y B = A + Y C = B + Y return C + Y computation(X, Y) A = X.numpy() B = tf.constant(A) a = tf.constant([3.5]).numpy() print(a, a.item(), float(a), int(a))
null
x = np.arange(12) x.size X = x.reshape(3, 4) np.zeros((2, 3, 4)) np.ones((2, 3, 4)) np.random.normal(0, 1, size=(3, 4)) np.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) x = np.array([1, 2, 4, 8]) y = np.array([2, 2, 2, 2]) x + y, x - y, x * y, x / y, x ** y np.exp(x) X = np.arange(12).reshape(3, 4) Y = np.array([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) np.concatenate([X, Y], axis=0), np.concatenate([X, Y], axis=1) X.sum() a = np.arange(3).reshape(3, 1) b = np.arange(2).reshape(1, 2) X[1, 2] = 9 X[0:2, :] = 12 Z = np.zeros_like(Y) Z[:] = X + Y before = id(X) X += Y id(X) == before A = X.asnumpy() B = np.array(A) a = np.array([3.5]) print(a, a.item(), float(a), int(a))
null
519
import tensorflow as tf X, y = tf.constant(inputs.values), tf.constant(outputs.values)
null
from mxnet import np X, y = np.array(inputs.values), np.array(outputs.values)
null
520
import tensorflow as tf x = tf.constant(3.0) y = tf.constant(2.0) print(x + y, x * y, x / y, x**y) x = tf.range(4) A = tf.reshape(tf.range(20), (5, 4)) tf.transpose(A) B = tf.constant([[1, 2, 3], [2, 0, 4], [3, 4, 5]]) B == tf.transpose(B) X = tf.reshape(tf.range(24), (2, 3, 4)) A = tf.reshape(tf.range(20, dtype=tf.float32), (5, 4)) B = A print(A, A + B) a = 2 X = tf.reshape(tf.range(24), (2, 3, 4)) print(a + X, (a * X).shape) x = tf.range(4, dtype=tf.float32) print(x, tf.reduce_sum(x)) a = tf.reduce_sum(A) A_sum_axis0 = tf.reduce_sum(A, axis=0) A_sum_axis1 = tf.reduce_sum(A, axis=1 tf.reduce_sum(A, axis=[0, 1]) tf.reduce_mean(A) tf.reduce_sum(A) / tf.size(A).numpy() tf.reduce_mean(A, axis=0) tf.reduce_sum(A, axis=0) / A.shape[0] sum_A = tf.reduce_sum(A, axis=1, keepdims=True) tf.cumsum(A, axis=0) y = tf.ones(4, dtype=tf.float32) print(tf.tensordot(x, y, axes=1)) tf.reduce_sum(x * y) A.shape, x.shape, tf.linalg.matvec(A, x) B = tf.ones((4, 3), tf.float32) tf.matmul(A, B) u = tf.constant([3.0, -4.0]) tf.norm(u) tf.reduce_sum(tf.abs(u)) tf.norm(tf.ones((4, 9)))
null
from mxnet import np, npx npx.set_np() x = np.array(3.0) y = np.array(2.0) print(x + y, x * y, x / y, x**y) x = np.arange(4) A = np.arange(20).reshape(5, 4) A.T B = np.array([[1, 2, 3], [2, 0, 4], [3, 4, 5]]) B == B.T X = np.arange(24).reshape(2, 3, 4) A = np.arange(20).reshape(5, 4) B = A.copy() print(A, A + B) a = 2 X = np.arange(24).reshape(2, 3, 4) print(a + X, (a * X).shape) x = np.arange(4) print(x, x.sum()) a = A.sum() A_sum_axis0 = A.sum(axis=0) A_sum_axis1 = A.sum(axis=1) A.sum(axis=[0, 1]) A.mean() A.sum() / A.size A.mean(axis=0) A.sum(axis=0) / A.shape[0] sum_A = A.sum(axis=1, keepdims=True) A.cumsum(axis=0) y = np.ones(4) print(np.dot(x, y)) np.sum(x * y) A.shape, x.shape, np.dot(A, x) B = np.ones(shape=(4, 3)) np.dot(A, B) u = np.array([3, -4]) np.linalg.norm(u) np.abs(u).sum() np.linalg.norm(np.ones((4, 9)))
null
521
%matplotlib inline import numpy as np from matplotlib_inline import backend_inline from d2l import tensorflow as d2l def f(x): return 3 * x ** 2 - 4 * x
null
%matplotlib inline from matplotlib_inline import backend_inline from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() def f(x): return 3 * x ** 2 - 4 * x
null
522
import tensorflow as tf x = tf.range(4, dtype=tf.float32) x = tf.Variable(x) with tf.GradientTape() as t: y = 2 * tf.tensordot(x, x, axes=1) x_grad = t.gradient(y, x) x_grad x_grad == 4 * x with tf.GradientTape() as t: y = tf.reduce_sum(x) t.gradient(y, x) with tf.GradientTape() as t: y = x * x t.gradient(y, x) with tf.GradientTape(persistent=True) as t: y = x * x u = tf.stop_gradient(y) z = u * x x_grad = t.gradient(z, x) x_grad == u t.gradient(y, x) == 2 * x def f(a): b = a * 2 while tf.norm(b) < 1000: b = b * 2 if tf.reduce_sum(b) > 0: c = b else: c = 100 * b return c a = tf.Variable(tf.random.normal(shape=())) with tf.GradientTape() as t: d = f(a) d_grad = t.gradient(d, a) d_grad d_grad == d / a
null
from mxnet import autograd, np, npx npx.set_np() x = np.arange(4.0) x.attach_grad() x.grad with autograd.record(): y = 2 * np.dot(x, x) y.backward() x.grad x.grad == 4 * x with autograd.record(): y = x.sum() y.backward() x.grad with autograd.record(): y = x * x y.backward() x.grad with autograd.record(): y = x * x u = y.detach() z = u * x z.backward() x.grad == u y.backward() x.grad == 2 * x def f(a): b = a * 2 while np.linalg.norm(b) < 1000: b = b * 2 if b.sum() > 0: c = b else: c = 100 * b return c a = np.random.normal() a.attach_grad() with autograd.record(): d = f(a) d.backward() a.grad == d / a
null
523
%matplotlib inline import numpy as np import tensorflow as tf import tensorflow_probability as tfp from d2l import tensorflow as d2l fair_probs = tf.ones(6) / 6 tfp.distributions.Multinomial(1, fair_probs).sample() tfp.distributions.Multinomial(10, fair_probs).sample() counts = tfp.distributions.Multinomial(1000, fair_probs).sample()
null
%matplotlib inline import random from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() fair_probs = [1.0 / 6] * 6 np.random.multinomial(1, fair_probs) np.random.multinomial(10, fair_probs) counts = np.random.multinomial(1000, fair_probs).astype(np.float32)
null
524
counts = tfp.distributions.Multinomial(10, fair_probs).sample(500) cum_counts = tf.cumsum(counts, axis=0) estimates = cum_counts / tf.reduce_sum(cum_counts, axis=1, keepdims=True) d2l.set_figsize((6, 4.5)) for i in range(6): d2l.plt.plot(estimates[:, i].numpy(), label=("P(die=" + str(i + 1) + ")")) d2l.plt.axhline(y=0.167, color='black', linestyle='dashed') d2l.plt.gca().set_xlabel('Groups of experiments') d2l.plt.gca().set_ylabel('Estimated probability') d2l.plt.legend(); import tensorflow as tf a = dir(tf.random) help(tf.ones) tf.ones(4)
null
counts = np.random.multinomial(10, fair_probs, size=500) cum_counts = counts.astype(np.float32).cumsum(axis=0) estimates = cum_counts / cum_counts.sum(axis=1, keepdims=True) d2l.set_figsize((6, 4.5)) for i in range(6): d2l.plt.plot(estimates[:, i].asnumpy(), label=("P(die=" + str(i + 1) + ")")) d2l.plt.axhline(y=0.167, color='black', linestyle='dashed') d2l.plt.gca().set_xlabel('Groups of experiments') d2l.plt.gca().set_ylabel('Estimated probability') d2l.plt.legend(); from mxnet import np a = dir(np.random) help(np.ones) np.ones(4)
null
525
%matplotlib inline import math import time import numpy as np import tensorflow as tf from d2l import tensorflow as d2l n = 10000 a = tf.ones(n) b = tf.ones(n) c = tf.Variable(tf.zeros(n)) timer = Timer() for i in range(n): c[i].assign(a[i] + b[i]) x = np.arange(-7, 7, 0.01) params = [(0, 1), (0, 2), (3, 1)] d2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])
null
%matplotlib inline import math import time from mxnet import np from d2l import mxnet as d2l n = 10000 a = np.ones(n) b = np.ones(n) c = np.zeros(n) timer = Timer() for i in range(n): c[i] = a[i] + b[i] x = np.arange(-7, 7, 0.01) params = [(0, 1), (0, 2), (3, 1)] d2l.plot(x.asnumpy(), [normal(x, mu, sigma).asnumpy() for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params])
null
526
%matplotlib inline import random import tensorflow as tf from d2l import tensorflow as d2l def synthetic_data(w, b, num_examples): X = tf.zeros((num_examples, w.shape[0])) X += tf.random.normal(shape=X.shape) y = tf.matmul(X, tf.reshape(w, (-1, 1))) + b y += tf.random.normal(shape=y.shape, stddev=0.01) y = tf.reshape(y, (-1, 1)) return X, y true_w = tf.constant([2, -3.4]) true_b = 4.2 features, labels = synthetic_data(true_w, true_b, 1000) d2l.set_figsize() d2l.plt.scatter(features[:, (1)].numpy(), labels.numpy(), 1); def data_iter(batch_size, features, labels): num_examples = len(features) indices = list(range(num_examples)) random.shuffle(indices) for i in range(0, num_examples, batch_size): j = tf.constant(indices[i: min(i + batch_size, num_examples)]) yield tf.gather(features, j), tf.gather(labels, j) w = tf.Variable(tf.random.normal(shape=(2, 1), mean=0, stddev=0.01), trainable=True) b = tf.Variable(tf.zeros(1), trainable=True) def linreg(X, w, b): return tf.matmul(X, w) + b def squared_loss(y_hat, y): return (y_hat - tf.reshape(y, y_hat.shape)) ** 2 / 2 def sgd(params, grads, lr, batch_size): for param, grad in zip(params, grads): param.assign_sub(lr*grad/batch_size) lr = 0.03 num_epochs = 3 net = linreg loss = squared_loss for epoch in range(num_epochs): for X, y in data_iter(batch_size, features, labels): with tf.GradientTape() as g: l = loss(net(X, w, b), y) dw, db = g.gradient(l, [w, b]) sgd([w, b], [dw, db], lr, batch_size) train_l = loss(net(features, w, b), labels)
null
%matplotlib inline import random from mxnet import autograd, np, npx from d2l import mxnet as d2l npx.set_np() def synthetic_data(w, b, num_examples): X = np.random.normal(0, 1, (num_examples, len(w))) y = np.dot(X, w) + b y += np.random.normal(0, 0.01, y.shape) return X, y.reshape((-1, 1)) true_w = np.array([2, -3.4]) true_b = 4.2 features, labels = synthetic_data(true_w, true_b, 1000) d2l.set_figsize() d2l.plt.scatter(features[:, (1)].asnumpy(), labels.asnumpy(), 1); def data_iter(batch_size, features, labels): num_examples = len(features) indices = list(range(num_examples)) random.shuffle(indices) for i in range(0, num_examples, batch_size): batch_indices = np.array(indices[i: min(i + batch_size, num_examples)]) yield features[batch_indices], labels[batch_indices] w = np.random.normal(0, 0.01, (2, 1)) b = np.zeros(1) w.attach_grad() b.attach_grad() def linreg(X, w, b): return np.dot(X, w) + b def squared_loss(y_hat, y): return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2 def sgd(params, lr, batch_size): for param in params: param[:] = param - lr * param.grad / batch_size lr = 0.03 num_epochs = 3 net = linreg loss = squared_loss for epoch in range(num_epochs): for X, y in data_iter(batch_size, features, labels): with autograd.record(): l = loss(net(X, w, b), y) l.backward() sgd([w, b], lr, batch_size) train_l = loss(net(features, w, b), labels)
null
527
import numpy as np import tensorflow as tf from d2l import tensorflow as d2l true_w = tf.constant([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) def load_array(data_arrays, batch_size, is_train=True): dataset = tf.data.Dataset.from_tensor_slices(data_arrays) if is_train: dataset = dataset.shuffle(buffer_size=1000) dataset = dataset.batch(batch_size) return dataset batch_size = 10 data_iter = load_array((features, labels), batch_size) net = tf.keras.Sequential() net.add(tf.keras.layers.Dense(1)) initializer = tf.initializers.RandomNormal(stddev=0.01) net = tf.keras.Sequential() net.add(tf.keras.layers.Dense(1, kernel_initializer=initializer)) loss = tf.keras.losses.MeanSquaredError() trainer = tf.keras.optimizers.SGD(learning_rate=0.03) w = net.get_weights()[0] b = net.get_weights()[1]
null
from mxnet import autograd, gluon, np, npx from d2l import mxnet as d2l npx.set_np() true_w = np.array([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) def load_array(data_arrays, batch_size, is_train=True): dataset = gluon.data.ArrayDataset(*data_arrays) return gluon.data.DataLoader(dataset, batch_size, shuffle=is_train) batch_size = 10 data_iter = load_array((features, labels), batch_size) from mxnet.gluon import nn net = nn.Sequential() net.add(nn.Dense(1)) from mxnet import init net.initialize(init.Normal(sigma=0.01)) loss = gluon.loss.L2Loss() from mxnet import gluon trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.03}) w = net[0].weight.data() b = net[0].bias.data()
null
528
%matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l d2l.use_svg_display() mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data() len(mnist_train[0]), len(mnist_test[0]) def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): figsize = (num_cols * scale, num_rows * scale) _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize) axes = axes.flatten() for i, (ax, img) in enumerate(zip(axes, imgs)): ax.imshow(img.numpy()) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) if titles: ax.set_title(titles[i]) return axes X = tf.constant(mnist_train[0][:18]) y = tf.constant(mnist_train[1][:18]) show_images(X, 2, 9, titles=get_fashion_mnist_labels(y)); batch_size = 256 train_iter = tf.data.Dataset.from_tensor_slices(mnist_train).batch(batch_size).shuffle(len(mnist_train[0])) def load_data_fashion_mnist(batch_size, resize=None): mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data() process = lambda X, y: (tf.expand_dims(X, axis=3) / 255, tf.cast(y, dtype='int32')) resize_fn = lambda X, y: (tf.image.resize_with_pad(X, resize, resize) if resize else X, y) return (tf.data.Dataset.from_tensor_slices(process(*mnist_train)).batch(batch_size).shuffle(len(mnist_train[0])).map(resize_fn), tf.data.Dataset.from_tensor_slices(process(*mnist_test)).batch(batch_size).map(resize_fn))
null
%matplotlib inline import sys from mxnet import gluon from d2l import mxnet as d2l d2l.use_svg_display() mnist_train = gluon.data.vision.FashionMNIST(train=True) mnist_test = gluon.data.vision.FashionMNIST(train=False) len(mnist_train), len(mnist_test) def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5): figsize = (num_cols * scale, num_rows * scale) _, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize) axes = axes.flatten() for i, (ax, img) in enumerate(zip(axes, imgs)): ax.imshow(img.asnumpy()) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) if titles: ax.set_title(titles[i]) return axes X, y = mnist_train[:18] show_images(X.squeeze(axis=-1), 2, 9, titles=get_fashion_mnist_labels(y)); batch_size = 256 return 0 if sys.platform.startswith('win') else 4 transformer = gluon.data.vision.transforms.ToTensor() train_iter = gluon.data.DataLoader(mnist_train.transform_first(transformer), batch_size, shuffle=True, num_workers=get_dataloader_workers()) def load_data_fashion_mnist(batch_size, resize=None): dataset = gluon.data.vision trans = [dataset.transforms.ToTensor()] if resize: trans.insert(0, dataset.transforms.Resize(resize)) trans = dataset.transforms.Compose(trans) mnist_train = dataset.FashionMNIST(train=True).transform_first(trans) mnist_test = dataset.FashionMNIST(train=False).transform_first(trans) return (gluon.data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=get_dataloader_workers()), gluon.data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=get_dataloader_workers()))
null
529
import tensorflow as tf from IPython import display from d2l import tensorflow as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 784 num_outputs = 10 W = tf.Variable(tf.random.normal(shape=(num_inputs, num_outputs), mean=0, stddev=0.01)) b = tf.Variable(tf.zeros(num_outputs)) X = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) tf.reduce_sum(X, 0, keepdims=True), tf.reduce_sum(X, 1, keepdims=True) def softmax(X): X_exp = tf.exp(X) partition = tf.reduce_sum(X_exp, 1, keepdims=True) return X_exp / partition X = tf.random.normal((2, 5), 0, 1) X_prob = softmax(X) X_prob, tf.reduce_sum(X_prob, 1) def net(X): return softmax(tf.matmul(tf.reshape(X, (-1, W.shape[0])), W) + b) y_hat = tf.constant([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]]) y = tf.constant([0, 2]) tf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1])) def cross_entropy(y_hat, y): return -tf.math.log(tf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1]))) cross_entropy(y_hat, y) def accuracy(y_hat, y): if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: y_hat = tf.argmax(y_hat, axis=1) cmp = tf.cast(y_hat, y.dtype) == y return float(tf.reduce_sum(tf.cast(cmp, y.dtype))) def evaluate_accuracy(net, data_iter): metric = Accumulator(2) for X, y in data_iter: metric.add(accuracy(net(X), y), d2l.size(y)) return metric[0] / metric[1] def train_epoch_ch3(net, train_iter, loss, updater): metric = Accumulator(3) for X, y in train_iter: with tf.GradientTape() as tape: y_hat = net(X) if isinstance(loss, tf.keras.losses.Loss): l = loss(y, y_hat) else: l = loss(y_hat, y) if isinstance(updater, tf.keras.optimizers.Optimizer): params = net.trainable_variables grads = tape.gradient(l, params) updater.apply_gradients(zip(grads, params)) else: updater(X.shape[0], tape.gradient(l, updater.params)) l_sum = l * float(tf.size(y)) if isinstance(loss, tf.keras.losses.Loss) else tf.reduce_sum(l) metric.add(l_sum, accuracy(y_hat, y), tf.size(y)) return metric[0] / metric[2], metric[1] / metric[2] class Updater(): def __init__(self, params, lr): self.params = params self.lr = lr def __call__(self, batch_size, grads): d2l.sgd(self.params, grads, self.lr, batch_size) updater = Updater([W, b], lr=0.1) def predict_ch3(net, test_iter, n=6): for X, y in test_iter: break trues = d2l.get_fashion_mnist_labels(y) preds = d2l.get_fashion_mnist_labels(tf.argmax(net(X), axis=1)) titles = [true +'\n' + pred for true, pred in zip(trues, preds)] d2l.show_images(tf.reshape(X[0:n], (n, 28, 28)), 1, n, titles=titles[0:n]) predict_ch3(net, test_iter)
null
from IPython import display from mxnet import autograd, gluon, np, npx from d2l import mxnet as d2l npx.set_np() batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs = 784 num_outputs = 10 W = np.random.normal(0, 0.01, (num_inputs, num_outputs)) b = np.zeros(num_outputs) W.attach_grad() b.attach_grad() X = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) X.sum(0, keepdims=True), X.sum(1, keepdims=True) def softmax(X): X_exp = np.exp(X) partition = X_exp.sum(1, keepdims=True) return X_exp / partition X = np.random.normal(0, 1, (2, 5)) X_prob = softmax(X) X_prob, X_prob.sum(1) def net(X): return softmax(np.dot(X.reshape((-1, W.shape[0])), W) + b) y = np.array([0, 2]) y_hat = np.array([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]]) y_hat[[0, 1], y] def cross_entropy(y_hat, y): return - np.log(y_hat[range(len(y_hat)), y]) cross_entropy(y_hat, y) def accuracy(y_hat, y): if len(y_hat.shape) > 1 and y_hat.shape[1] > 1: y_hat = y_hat.argmax(axis=1) cmp = y_hat.astype(y.dtype) == y return float(cmp.astype(y.dtype).sum()) def evaluate_accuracy(net, data_iter): metric = Accumulator(2) for X, y in data_iter: metric.add(accuracy(net(X), y), d2l.size(y)) return metric[0] / metric[1] def train_epoch_ch3(net, train_iter, loss, updater): metric = Accumulator(3) if isinstance(updater, gluon.Trainer): updater = updater.step for X, y in train_iter: with autograd.record(): y_hat = net(X) l = loss(y_hat, y) l.backward() updater(X.shape[0]) metric.add(float(l.sum()), accuracy(y_hat, y), y.size) return metric[0] / metric[2], metric[1] / metric[2] lr = 0.1 def updater(batch_size): return d2l.sgd([W, b], lr, batch_size) def predict_ch3(net, test_iter, n=6): for X, y in test_iter: break trues = d2l.get_fashion_mnist_labels(y) preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1)) titles = [true +'\n' + pred for true, pred in zip(trues, preds)] d2l.show_images(X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n]) predict_ch3(net, test_iter)
null
530
import tensorflow as tf from d2l import tensorflow as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) net = tf.keras.models.Sequential() net.add(tf.keras.layers.Flatten(input_shape=(28, 28))) weight_initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.01) net.add(tf.keras.layers.Dense(10, kernel_initializer=weight_initializer)) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) trainer = tf.keras.optimizers.SGD(learning_rate=.1)
null
from mxnet import gluon, init, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) net = nn.Sequential() net.add(nn.Dense(10)) net.initialize(init.Normal(sigma=0.01)) loss = gluon.loss.SoftmaxCrossEntropyLoss() trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})
null
531
%matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l x = tf.Variable(tf.range(-8.0, 8.0, 0.1), dtype=tf.float32) y = tf.nn.relu(x) d2l.plot(x.numpy(), y.numpy(), 'x', 'relu(x)', figsize=(5, 2.5)) with tf.GradientTape() as t: y = tf.nn.relu(x) d2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of relu', figsize=(5, 2.5)) y = tf.nn.sigmoid(x) d2l.plot(x.numpy(), y.numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5)) with tf.GradientTape() as t: y = tf.nn.sigmoid(x) d2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of sigmoid', figsize=(5, 2.5)) y = tf.nn.tanh(x) d2l.plot(x.numpy(), y.numpy(), 'x', 'tanh(x)', figsize=(5, 2.5)) with tf.GradientTape() as t: y = tf.nn.tanh(x) d2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of tanh', figsize=(5, 2.5))
null
%matplotlib inline from mxnet import autograd, np, npx from d2l import mxnet as d2l npx.set_np() x = np.arange(-8.0, 8.0, 0.1) x.attach_grad() with autograd.record(): y = npx.relu(x) d2l.plot(x, y, 'x', 'relu(x)', figsize=(5, 2.5)) y.backward() d2l.plot(x, x.grad, 'x', 'grad of relu', figsize=(5, 2.5)) with autograd.record(): y = npx.sigmoid(x) d2l.plot(x, y, 'x', 'sigmoid(x)', figsize=(5, 2.5)) y.backward() d2l.plot(x, x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5)) with autograd.record(): y = np.tanh(x) d2l.plot(x, y, 'x', 'tanh(x)', figsize=(5, 2.5)) y.backward() d2l.plot(x, x.grad, 'x', 'grad of tanh', figsize=(5, 2.5))
null
532
import tensorflow as tf from d2l import tensorflow as d2l batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs, num_outputs, num_hiddens = 784, 10, 256 W1 = tf.Variable(tf.random.normal(shape=(num_inputs, num_hiddens), mean=0, stddev=0.01)) b1 = tf.Variable(tf.zeros(num_hiddens)) W2 = tf.Variable(tf.random.normal(shape=(num_hiddens, num_outputs), mean=0, stddev=0.01)) b2 = tf.Variable(tf.zeros(num_outputs)) params = [W1, b1, W2, b2] def relu(X): return tf.math.maximum(X, 0) def net(X): X = tf.reshape(X, (-1, num_inputs)) H = relu(tf.matmul(X, W1) + b1) return tf.matmul(H, W2) + b2 def loss(y_hat, y): return tf.losses.sparse_categorical_crossentropy(y, y_hat, from_logits=True) num_epochs, lr = 10, 0.1 updater = d2l.Updater([W1, W2, b1, b2], lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)
null
from mxnet import gluon, np, npx from d2l import mxnet as d2l npx.set_np() batch_size = 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) num_inputs, num_outputs, num_hiddens = 784, 10, 256 W1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens)) b1 = np.zeros(num_hiddens) W2 = np.random.normal(scale=0.01, size=(num_hiddens, num_outputs)) b2 = np.zeros(num_outputs) params = [W1, b1, W2, b2] for param in params: param.attach_grad() def relu(X): return np.maximum(X, 0) def net(X): X = X.reshape((-1, num_inputs)) H = relu(np.dot(X, W1) + b1) return np.dot(H, W2) + b2 loss = gluon.loss.SoftmaxCrossEntropyLoss() num_epochs, lr = 10, 0.1 d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size))
null
533
import tensorflow as tf from d2l import tensorflow as d2l net = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(10)]) batch_size, lr, num_epochs = 256, 0.1, 10 loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) trainer = tf.keras.optimizers.SGD(learning_rate=lr) train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
null
from mxnet import gluon, init, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() net = nn.Sequential() net.add(nn.Dense(256, activation='relu'), nn.Dense(10)) net.initialize(init.Normal(sigma=0.01)) batch_size, lr, num_epochs = 256, 0.1, 10 loss = gluon.loss.SoftmaxCrossEntropyLoss() trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
null
534
import math import numpy as np import tensorflow as tf from d2l import tensorflow as d2l true_w, features, poly_features, labels = [tf.constant(x, dtype=tf.float32) for x in [true_w, features, poly_features, labels]] features[:2], poly_features[:2, :], labels[:2] def evaluate_loss(net, data_iter, loss): metric = d2l.Accumulator(2) for X, y in data_iter: l = loss(net(X), y) metric.add(tf.reduce_sum(l), d2l.size(l)) return metric[0] / metric[1] def train(train_features, test_features, train_labels, test_labels, num_epochs=400): loss = tf.losses.MeanSquaredError() input_shape = train_features.shape[-1] net = tf.keras.Sequential() net.add(tf.keras.layers.Dense(1, use_bias=False)) batch_size = min(10, train_labels.shape[0]) train_iter = d2l.load_array((train_features, train_labels), batch_size) test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False) trainer = tf.keras.optimizers.SGD(learning_rate=.01) animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test']) for epoch in range(num_epochs): d2l.train_epoch_ch3(net, train_iter, loss, trainer) if epoch == 0 or (epoch + 1) % 20 == 0: animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))
null
import math from mxnet import gluon, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() features[:2], poly_features[:2, :], labels[:2] def evaluate_loss(net, data_iter, loss): metric = d2l.Accumulator(2) for X, y in data_iter: l = loss(net(X), y) metric.add(l.sum(), d2l.size(l)) return metric[0] / metric[1] def train(train_features, test_features, train_labels, test_labels, num_epochs=400): loss = gluon.loss.L2Loss() net = nn.Sequential() net.add(nn.Dense(1, use_bias=False)) net.initialize() batch_size = min(10, train_labels.shape[0]) train_iter = d2l.load_array((train_features, train_labels), batch_size) test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False) trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.01}) animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test']) for epoch in range(num_epochs): d2l.train_epoch_ch3(net, train_iter, loss, trainer) if epoch == 0 or (epoch + 1) % 20 == 0: animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))
null
535
%matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5 true_w, true_b = tf.ones((num_inputs, 1)) * 0.01, 0.05 train_data = d2l.synthetic_data(true_w, true_b, n_train) train_iter = d2l.load_array(train_data, batch_size) test_data = d2l.synthetic_data(true_w, true_b, n_test) test_iter = d2l.load_array(test_data, batch_size, is_train=False) def init_params(): w = tf.Variable(tf.random.normal(mean=1, shape=(num_inputs, 1))) b = tf.Variable(tf.zeros(shape=(1, ))) return [w, b] def l2_penalty(w): return tf.reduce_sum(tf.pow(w, 2)) / 2 def train(lambd): w, b = init_params() net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss num_epochs, lr = 100, 0.003 animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: with tf.GradientTape() as tape: l = loss(net(X), y) + lambd * l2_penalty(w) grads = tape.gradient(l, [w, b]) d2l.sgd([w, b], grads, lr, batch_size) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss))) def train_concise(wd): net = tf.keras.models.Sequential() net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(wd))) net.build(input_shape=(1, num_inputs)) w, b = net.trainable_variables loss = tf.keras.losses.MeanSquaredError() num_epochs, lr = 100, 0.003 trainer = tf.keras.optimizers.SGD(learning_rate=lr) animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: with tf.GradientTape() as tape: l = loss(net(X), y) + net.losses grads = tape.gradient(l, net.trainable_variables) trainer.apply_gradients(zip(grads, net.trainable_variables)) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))
null
%matplotlib inline from mxnet import autograd, gluon, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5 true_w, true_b = np.ones((num_inputs, 1)) * 0.01, 0.05 train_data = d2l.synthetic_data(true_w, true_b, n_train) train_iter = d2l.load_array(train_data, batch_size) test_data = d2l.synthetic_data(true_w, true_b, n_test) test_iter = d2l.load_array(test_data, batch_size, is_train=False) def init_params(): w = np.random.normal(scale=1, size=(num_inputs, 1)) b = np.zeros(1) w.attach_grad() b.attach_grad() return [w, b] def l2_penalty(w): return (w**2).sum() / 2 def train(lambd): w, b = init_params() net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss num_epochs, lr = 100, 0.003 animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: with autograd.record(): l = loss(net(X), y) + lambd * l2_penalty(w) l.backward() d2l.sgd([w, b], lr, batch_size) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss))) def train_concise(wd): net = nn.Sequential() net.add(nn.Dense(1)) net.initialize(init.Normal(sigma=1)) loss = gluon.loss.L2Loss() num_epochs, lr = 100, 0.003 trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr, 'wd': wd}) net.collect_params('.*bias').setattr('wd_mult', 0) animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test']) for epoch in range(num_epochs): for X, y in train_iter: with autograd.record(): l = loss(net(X), y) l.backward() trainer.step(batch_size) if (epoch + 1) % 5 == 0: animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))
null
536
import tensorflow as tf from d2l import tensorflow as d2l def dropout_layer(X, dropout): assert 0 <= dropout <= 1 if dropout == 1: return tf.zeros_like(X) if dropout == 0: return X mask = tf.random.uniform(shape=tf.shape(X), minval=0, maxval=1) < 1 - dropout return tf.cast(mask, dtype=tf.float32) * X / (1.0 - dropout) X = tf.reshape(tf.range(16, dtype=tf.float32), (2, 8)) num_outputs, num_hiddens1, num_hiddens2 = 10, 256, 256 dropout1, dropout2 = 0.2, 0.5 class Net(tf.keras.Model): def __init__(self, num_outputs, num_hiddens1, num_hiddens2): super().__init__() self.input_layer = tf.keras.layers.Flatten() self.hidden1 = tf.keras.layers.Dense(num_hiddens1, activation='relu') self.hidden2 = tf.keras.layers.Dense(num_hiddens2, activation='relu') self.output_layer = tf.keras.layers.Dense(num_outputs) def call(self, inputs, training=None): x = self.input_layer(inputs) x = self.hidden1(x) if training: x = dropout_layer(x, dropout1) x = self.hidden2(x) if training: x = dropout_layer(x, dropout2) x = self.output_layer(x) return x net = Net(num_outputs, num_hiddens1, num_hiddens2) num_epochs, lr, batch_size = 10, 0.5, 256 loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) trainer = tf.keras.optimizers.SGD(learning_rate=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation=tf.nn.relu), tf.keras.layers.Dropout(dropout1), tf.keras.layers.Dense(256, activation=tf.nn.relu), tf.keras.layers.Dropout(dropout2), tf.keras.layers.Dense(10), ]) trainer = tf.keras.optimizers.SGD(learning_rate=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
null
from mxnet import autograd, gluon, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def dropout_layer(X, dropout): assert 0 <= dropout <= 1 if dropout == 1: return np.zeros_like(X) if dropout == 0: return X mask = np.random.uniform(0, 1, X.shape) > dropout return mask.astype(np.float32) * X / (1.0 - dropout) X = np.arange(16).reshape(2, 8) num_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256 W1 = np.random.normal(scale=0.01, size=(num_inputs, num_hiddens1)) b1 = np.zeros(num_hiddens1) W2 = np.random.normal(scale=0.01, size=(num_hiddens1, num_hiddens2)) b2 = np.zeros(num_hiddens2) W3 = np.random.normal(scale=0.01, size=(num_hiddens2, num_outputs)) b3 = np.zeros(num_outputs) params = [W1, b1, W2, b2, W3, b3] for param in params: param.attach_grad() dropout1, dropout2 = 0.2, 0.5 def net(X): X = X.reshape(-1, num_inputs) H1 = npx.relu(np.dot(X, W1) + b1) if autograd.is_training(): H1 = dropout_layer(H1, dropout1) H2 = npx.relu(np.dot(H1, W2) + b2) if autograd.is_training(): H2 = dropout_layer(H2, dropout2) return np.dot(H2, W3) + b3 num_epochs, lr, batch_size = 10, 0.5, 256 loss = gluon.loss.SoftmaxCrossEntropyLoss() train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, lambda batch_size: d2l.sgd(params, lr, batch_size)) net = nn.Sequential() net.add(nn.Dense(256, activation="relu"), nn.Dropout(dropout1), nn.Dense(256, activation="relu"), nn.Dropout(dropout2), nn.Dense(10)) net.initialize(init.Normal(sigma=0.01)) trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
null
537
trainer = tf.keras.optimizers.SGD(learning_rate=lr) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) %matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l x = tf.Variable(tf.range(-8.0, 8.0, 0.1)) with tf.GradientTape() as t: y = tf.nn.sigmoid(x) d2l.plot(x.numpy(), [y.numpy(), t.gradient(y, x).numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5)) M = tf.random.normal((4, 4)) for i in range(100): M = tf.matmul(M, tf.random.normal((4, 4)))
null
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) %matplotlib inline from mxnet import autograd, np, npx from d2l import mxnet as d2l npx.set_np() x = np.arange(-8.0, 8.0, 0.1) x.attach_grad() with autograd.record(): y = npx.sigmoid(x) y.backward() d2l.plot(x, [y, x.grad], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5)) M = np.random.normal(size=(4, 4)) for i in range(100): M = np.dot(M, np.random.normal(size=(4, 4)))
null
538
%matplotlib inline import numpy as np import pandas as pd import tensorflow as tf from d2l import tensorflow as d2l n_train = train_data.shape[0] train_features = tf.constant(all_features[:n_train].values, dtype=tf.float32) test_features = tf.constant(all_features[n_train:].values, dtype=tf.float32) train_labels = tf.constant(train_data.SalePrice.values.reshape(-1, 1), dtype=tf.float32) loss = tf.keras.losses.MeanSquaredError() def get_net(): net = tf.keras.models.Sequential() net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(weight_decay))) return net def log_rmse(y_true, y_pred): clipped_preds = tf.clip_by_value(y_pred, 1, float('inf')) return tf.sqrt(tf.reduce_mean(loss(tf.math.log(y_true), tf.math.log(clipped_preds)))) def train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size): train_ls, test_ls = [], [] train_iter = d2l.load_array((train_features, train_labels), batch_size) optimizer = tf.keras.optimizers.Adam(learning_rate) net.compile(loss=loss, optimizer=optimizer) for epoch in range(num_epochs): for X, y in train_iter: with tf.GradientTape() as tape: y_hat = net(X) l = loss(y, y_hat) params = net.trainable_variables grads = tape.gradient(l, params) optimizer.apply_gradients(zip(grads, params)) train_ls.append(log_rmse(train_labels, net(train_features))) if test_labels is not None: test_ls.append(log_rmse(test_labels, net(test_features))) return train_ls, test_ls def get_k_fold_data(k, i, X, y): assert k > 1 fold_size = X.shape[0] // k X_train, y_train = None, None for j in range(k): idx = slice(j * fold_size, (j + 1) * fold_size) X_part, y_part = X[idx, :], y[idx] if j == i: X_valid, y_valid = X_part, y_part elif X_train is None: X_train, y_train = X_part, y_part else: X_train = tf.concat([X_train, X_part], 0) y_train = tf.concat([y_train, y_part], 0) return X_train, y_train, X_valid, y_valid def train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size): net = get_net() train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size) d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log') preds = net(test_features).numpy() test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0]) submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1) submission.to_csv('submission.csv', index=False)
null
%matplotlib inline import pandas as pd from mxnet import autograd, gluon, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() n_train = train_data.shape[0] train_features = np.array(all_features[:n_train].values, dtype=np.float32) test_features = np.array(all_features[n_train:].values, dtype=np.float32) train_labels = np.array(train_data.SalePrice.values.reshape(-1, 1), dtype=np.float32) loss = gluon.loss.L2Loss() def get_net(): net = nn.Sequential() net.add(nn.Dense(1)) net.initialize() return net def log_rmse(net, features, labels): clipped_preds = np.clip(net(features), 1, float('inf')) return np.sqrt(2 * loss(np.log(clipped_preds), np.log(labels)).mean()) def train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size): train_ls, test_ls = [], [] train_iter = d2l.load_array((train_features, train_labels), batch_size) trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': learning_rate, 'wd': weight_decay}) for epoch in range(num_epochs): for X, y in train_iter: with autograd.record(): l = loss(net(X), y) l.backward() trainer.step(batch_size) train_ls.append(log_rmse(net, train_features, train_labels)) if test_labels is not None: test_ls.append(log_rmse(net, test_features, test_labels)) return train_ls, test_ls def get_k_fold_data(k, i, X, y): assert k > 1 fold_size = X.shape[0] // k X_train, y_train = None, None for j in range(k): idx = slice(j * fold_size, (j + 1) * fold_size) X_part, y_part = X[idx, :], y[idx] if j == i: X_valid, y_valid = X_part, y_part elif X_train is None: X_train, y_train = X_part, y_part else: X_train = np.concatenate([X_train, X_part], 0) y_train = np.concatenate([y_train, y_part], 0) return X_train, y_train, X_valid, y_valid def train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size): net = get_net() train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size) d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log') preds = net(test_features).asnumpy() test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0]) submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1) submission.to_csv('submission.csv', index=False)
null
539
import tensorflow as tf net = tf.keras.models.Sequential([ tf.keras.layers.Dense(256, activation=tf.nn.relu), tf.keras.layers.Dense(10)) X = tf.random.uniform((2, 20)) net(X) class MLP(tf.keras.Model): def __init__(self): super().__init__() self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu) self.out = tf.keras.layers.Dense(units=10) def call(self, X): return self.out(self.hidden((X))) net = MLP() net(X) class MySequential(tf.keras.Model): def __init__(self, *args): super().__init__() self.modules = [] for block in args: self.modules.append(block) def call(self, X): for module in self.modules: X = module(X) return X net = MySequential( tf.keras.layers.Dense(units=256, activation=tf.nn.relu), tf.keras.layers.Dense(10)) net(X) class FixedHiddenMLP(tf.keras.Model): def __init__(self): super().__init__() self.flatten = tf.keras.layers.Flatten() self.rand_weight = tf.constant(tf.random.uniform((20, 20))) self.dense = tf.keras.layers.Dense(20, activation=tf.nn.relu) def call(self, inputs): X = self.flatten(inputs) X = tf.nn.relu(tf.matmul(X, self.rand_weight) + 1) X = self.dense(X) while tf.reduce_sum(tf.math.abs(X)) > 1: X /= 2 return tf.reduce_sum(X) net = FixedHiddenMLP() net(X) class NestMLP(tf.keras.Model): def __init__(self): super().__init__() self.net = tf.keras.Sequential() self.net.add(tf.keras.layers.Dense(64, activation=tf.nn.relu)) self.net.add(tf.keras.layers.Dense(32, activation=tf.nn.relu)) self.dense = tf.keras.layers.Dense(16, activation=tf.nn.relu) def call(self, inputs): return self.dense(self.net(inputs)) chimera = tf.keras.Sequential() chimera.add(NestMLP()) chimera.add(tf.keras.layers.Dense(20)) chimera.add(FixedHiddenMLP()) chimera(X)
null
from mxnet import np, npx from mxnet.gluon import nn npx.set_np() net = nn.Sequential() net.add(nn.Dense(256, activation='relu')) net.add(nn.Dense(10)) net.initialize() X = np.random.uniform(size=(2, 20)) net(X) class MLP(nn.Block): def __init__(self, **kwargs): super().__init__(**kwargs) self.hidden = nn.Dense(256, activation='relu') self.out = nn.Dense(10) def forward(self, X): return self.out(self.hidden(X)) net = MLP() net.initialize() net(X) class MySequential(nn.Block): def add(self, block): self._children[block.name] = block def forward(self, X): for block in self._children.values(): X = block(X) return X net = MySequential() net.add(nn.Dense(256, activation='relu')) net.add(nn.Dense(10)) net.initialize() net(X) class FixedHiddenMLP(nn.Block): def __init__(self, **kwargs): super().__init__(**kwargs) self.rand_weight = self.params.get_constant('rand_weight', np.random.uniform(size=(20, 20))) self.dense = nn.Dense(20, activation='relu') def forward(self, X): X = self.dense(X) X = npx.relu(np.dot(X, self.rand_weight.data()) + 1) X = self.dense(X) while np.abs(X).sum() > 1: X /= 2 return X.sum() net = FixedHiddenMLP() net.initialize() net(X) class NestMLP(nn.Block): def __init__(self, **kwargs): super().__init__(**kwargs) self.net = nn.Sequential() self.net.add(nn.Dense(64, activation='relu'), nn.Dense(32, activation='relu')) self.dense = nn.Dense(16, activation='relu') def forward(self, X): return self.dense(self.net(X)) chimera = nn.Sequential() chimera.add(NestMLP(), nn.Dense(20), FixedHiddenMLP()) chimera.initialize() chimera(X)
null
540
import tensorflow as tf net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu), tf.keras.layers.Dense(1), ]) X = tf.random.uniform((2, 4)) net(X) net.get_weights()[1] def block1(name): return tf.keras.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu)], name=name) def block2(): net = tf.keras.Sequential() for i in range(4): net.add(block1(name=f'block-{i}')) return net rgnet = tf.keras.Sequential() rgnet.add(block2()) rgnet.add(tf.keras.layers.Dense(1)) rgnet(X) net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.01), bias_initializer=tf.zeros_initializer()), tf.keras.layers.Dense(1)]) net(X) net.weights[0], net.weights[1] net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.Constant(1), bias_initializer=tf.zeros_initializer()), tf.keras.layers.Dense(1), ]) net(X) net.weights[0], net.weights[1] net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.GlorotUniform()), tf.keras.layers.Dense(1, kernel_initializer=tf.keras.initializers.Constant(1)), ]) net(X) class MyInit(tf.keras.initializers.Initializer): def __call__(self, shape, dtype=None): data=tf.random.uniform(shape, -10, 10, dtype=dtype) factor=(tf.abs(data) >= 5) factor=tf.cast(factor, tf.float32) return data * factor net = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=MyInit()), tf.keras.layers.Dense(1)) net(X) net.layers[1].weights[0][:].assign(net.layers[1].weights[0] + 1) net.layers[1].weights[0][0, 0].assign(42) net.layers[1].weights[0] layer = CenteredLayer() layer(tf.constant([1, 2, 3, 4, 5])) net = tf.keras.Sequential([tf.keras.layers.Dense(128), CenteredLayer()])
null
from mxnet import init, np, npx from mxnet.gluon import nn npx.set_np() net = nn.Sequential() net.add(nn.Dense(8, activation='relu')) net.add(nn.Dense(1)) net.initialize() X = np.random.uniform(size=(2, 4)) net(X) net.collect_params()['dense1_bias'].data() def block1(): net = nn.Sequential() net.add(nn.Dense(32, activation='relu')) net.add(nn.Dense(16, activation='relu')) return net def block2(): net = nn.Sequential() for _ in range(4): net.add(block1()) return net rgnet = nn.Sequential() rgnet.add(block2()) rgnet.add(nn.Dense(10)) rgnet.initialize() rgnet(X) net.initialize(init=init.Normal(sigma=0.01), force_reinit=True) net[0].weight.data()[0] net.initialize(init=init.Constant(1), force_reinit=True) net[0].weight.data()[0] net[0].weight.initialize(init=init.Xavier(), force_reinit=True) net[1].initialize(init=init.Constant(42), force_reinit=True) class MyInit(init.Initializer): def _init_weight(self, name, data): data[:] = np.random.uniform(-10, 10, data.shape) data *= np.abs(data) >= 5 net.initialize(MyInit(), force_reinit=True) net[0].weight.data()[:2] net[0].weight.data()[:] += 1 net[0].weight.data()[0, 0] = 42 net[0].weight.data()[0] layer = CenteredLayer() layer(np.array([1, 2, 3, 4, 5])) net = nn.Sequential() net.add(nn.Dense(128), CenteredLayer()) net.initialize()
null
541
import tensorflow as tf class CenteredLayer(tf.keras.Model): def __init__(self): super().__init__() def call(self, inputs): return inputs - tf.reduce_mean(inputs) Y = net(tf.random.uniform((4, 8))) tf.reduce_mean(Y) class MyDense(tf.keras.Model): def __init__(self, units): super().__init__() self.units = units def build(self, X_shape): self.weight = self.add_weight(name='weight', shape=[X_shape[-1], self.units], initializer=tf.random_normal_initializer()) self.bias = self.add_weight( name='bias', shape=[self.units], initializer=tf.zeros_initializer()) def call(self, X): linear = tf.matmul(X, self.weight) + self.bias return tf.nn.relu(linear) dense = MyDense(3) dense(tf.random.uniform((2, 5))) dense.get_weights() dense(tf.random.uniform((2, 5))) net = tf.keras.models.Sequential([MyDense(8), MyDense(1)]) net(tf.random.uniform((2, 64)))
null
from mxnet import np, npx from mxnet.gluon import nn npx.set_np() class CenteredLayer(nn.Block): def __init__(self, **kwargs): super().__init__(**kwargs) def forward(self, X): return X - X.mean() Y = net(np.random.uniform(size=(4, 8))) Y.mean() class MyDense(nn.Block): def __init__(self, units, in_units, **kwargs): super().__init__(**kwargs) self.weight = self.params.get('weight', shape=(in_units, units)) self.bias = self.params.get('bias', shape=(units,)) def forward(self, x): linear = np.dot(x, self.weight.data(ctx=x.ctx)) + self.bias.data( ctx=x.ctx) return npx.relu(linear) dense = MyDense(units=3, in_units=5) dense.params dense.initialize() dense(np.random.uniform(size=(2, 5))) net = nn.Sequential() net.add(MyDense(8, in_units=64), MyDense(1, in_units=8)) net.initialize() net(np.random.uniform(size=(2, 64)))
null
542
import numpy as np import tensorflow as tf x = tf.range(4) np.save('x-file.npy', x) x2 = np.load('x-file.npy', allow_pickle=True) y = tf.zeros(4) np.save('xy-files.npy', [x, y]) x2, y2 = np.load('xy-files.npy', allow_pickle=True) mydict = {'x': x, 'y': y} np.save('mydict.npy', mydict) mydict2 = np.load('mydict.npy', allow_pickle=True) class MLP(tf.keras.Model): def __init__(self): super().__init__() self.flatten = tf.keras.layers.Flatten() self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu) self.out = tf.keras.layers.Dense(units=10) def call(self, inputs): x = self.flatten(inputs) x = self.hidden(x) return self.out(x) net = MLP() X = tf.random.uniform((2, 20)) Y = net(X) net.save_weights('mlp.params') clone = MLP() clone.load_weights('mlp.params')
null
from mxnet import np, npx from mxnet.gluon import nn npx.set_np() x = np.arange(4) npx.save('x-file', x) x2 = npx.load('x-file') y = np.zeros(4) npx.save('x-files', [x, y]) x2, y2 = npx.load('x-files') mydict = {'x': x, 'y': y} npx.save('mydict', mydict) mydict2 = npx.load('mydict') class MLP(nn.Block): def __init__(self, **kwargs): super(MLP, self).__init__(**kwargs) self.hidden = nn.Dense(256, activation='relu') self.output = nn.Dense(10) def forward(self, x): return self.output(self.hidden(x)) net = MLP() net.initialize() X = np.random.uniform(size=(2, 20)) Y = net(X) net.save_parameters('mlp.params') clone = MLP() clone.load_parameters('mlp.params')
null
543
import tensorflow as tf tf.device('/CPU:0'), tf.device('/GPU:0'), tf.device('/GPU:1') len(tf.config.experimental.list_physical_devices('GPU')) def try_gpu(i=0): if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1: return tf.device(f'/GPU:{i}') return tf.device('/CPU:0') def try_all_gpus(): num_gpus = len(tf.config.experimental.list_physical_devices('GPU')) devices = [tf.device(f'/GPU:{i}') for i in range(num_gpus)] return devices if devices else [tf.device('/CPU:0')] try_gpu(), try_gpu(10), try_all_gpus() x = tf.constant([1, 2, 3]) x.device with try_gpu(): X = tf.ones((2, 3)) with try_gpu(1): Y = tf.random.uniform((2, 3)) with try_gpu(1): Z = X with try_gpu(1): Z2 = Z Z2 is Z strategy = tf.distribute.MirroredStrategy() with strategy.scope(): net = tf.keras.models.Sequential([ tf.keras.layers.Dense(1)]) net.layers[0].weights[0].device, net.layers[0].weights[1].device
null
from mxnet import np, npx from mxnet.gluon import nn npx.set_np() npx.cpu(), npx.gpu(), npx.gpu(1) npx.num_gpus() def try_gpu(i=0): return npx.gpu(i) if npx.num_gpus() >= i + 1 else npx.cpu() def try_all_gpus(): devices = [npx.gpu(i) for i in range(npx.num_gpus())] return devices if devices else [npx.cpu()] try_gpu(), try_gpu(10), try_all_gpus() x = np.array([1, 2, 3]) x.ctx X = np.ones((2, 3), ctx=try_gpu()) Y = np.random.uniform(size=(2, 3), ctx=try_gpu(1)) Z = X.copyto(try_gpu(1)) Z.as_in_ctx(try_gpu(1)) is Z net = nn.Sequential() net.add(nn.Dense(1)) net.initialize(ctx=try_gpu()) net[0].weight.data().ctx
null
544
import tensorflow as tf from d2l import tensorflow as d2l def corr2d(X, K): h, w = K.shape Y = tf.Variable(tf.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))) for i in range(Y.shape[0]): for j in range(Y.shape[1]): Y[i, j].assign(tf.reduce_sum( X[i: i + h, j: j + w] * K)) return Y X = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) K = tf.constant([[0.0, 1.0], [2.0, 3.0]]) corr2d(X, K) class Conv2D(tf.keras.layers.Layer): def __init__(self): super().__init__() def build(self, kernel_size): initializer = tf.random_normal_initializer() self.weight = self.add_weight(name='w', shape=kernel_size, initializer=initializer) self.bias = self.add_weight(name='b', shape=(1, ), initializer=initializer) def call(self, inputs): return corr2d(inputs, self.weight) + self.bias X = tf.Variable(tf.ones((6, 8))) X[:, 2:6].assign(tf.zeros(X[:, 2:6].shape)) K = tf.constant([[1.0, -1.0]]) corr2d(tf.transpose(X), K) conv2d = tf.keras.layers.Conv2D(1, (1, 2), use_bias=False) X = tf.reshape(X, (1, 6, 8, 1)) Y = tf.reshape(Y, (1, 6, 7, 1)) lr = 3e-2 Y_hat = conv2d(X) for i in range(10): with tf.GradientTape(watch_accessed_variables=False) as g: g.watch(conv2d.weights[0]) Y_hat = conv2d(X) l = (abs(Y_hat - Y)) ** 2 update = tf.multiply(lr, g.gradient(l, conv2d.weights[0])) weights = conv2d.get_weights() weights[0] = conv2d.weights[0] - update conv2d.set_weights(weights) tf.reshape(conv2d.get_weights()[0], (1, 2))
null
from mxnet import autograd, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def corr2d(X, K): h, w = K.shape Y = np.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): Y[i, j] = (X[i:i + h, j:j + w] * K).sum() return Y X = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) K = np.array([[0.0, 1.0], [2.0, 3.0]]) corr2d(X, K) class Conv2D(nn.Block): def __init__(self, kernel_size, **kwargs): super().__init__(**kwargs) self.weight = self.params.get('weight', shape=kernel_size) self.bias = self.params.get('bias', shape=(1,)) def forward(self, x): return corr2d(x, self.weight.data()) + self.bias.data() X = np.ones((6, 8)) X[:, 2:6] = 0 K = np.array([[1.0, -1.0]]) corr2d(d2l.transpose(X), K) conv2d = nn.Conv2D(1, kernel_size=(1, 2), use_bias=False) conv2d.initialize() X = X.reshape(1, 1, 6, 8) Y = Y.reshape(1, 1, 6, 7) lr = 3e-2 for i in range(10): with autograd.record(): Y_hat = conv2d(X) l = (Y_hat - Y) ** 2 l.backward() conv2d.weight.data()[:] -= lr * conv2d.weight.grad() conv2d.weight.data().reshape((1, 2))
null
545
import tensorflow as tf def comp_conv2d(conv2d, X): X = tf.reshape(X, (1, ) + X.shape + (1, )) Y = conv2d(X) return tf.reshape(Y, Y.shape[1:3]) conv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same') X = tf.random.uniform(shape=(8, 8)) comp_conv2d(conv2d, X).shape conv2d = tf.keras.layers.Conv2D(1, kernel_size=(5, 3), padding='same') comp_conv2d(conv2d, X).shape conv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same', strides=2) comp_conv2d(conv2d, X).shape conv2d = tf.keras.layers.Conv2D(1, kernel_size=(3,5), padding='valid', strides=(3, 4)) comp_conv2d(conv2d, X).shape
null
from mxnet import np, npx from mxnet.gluon import nn npx.set_np() def comp_conv2d(conv2d, X): conv2d.initialize() X = X.reshape((1, 1) + X.shape) Y = conv2d(X) return Y.reshape(Y.shape[2:]) conv2d = nn.Conv2D(1, kernel_size=3, padding=1) X = np.random.uniform(size=(8, 8)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(1, kernel_size=(5, 3), padding=(2, 1)) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(1, kernel_size=3, padding=1, strides=2) comp_conv2d(conv2d, X).shape conv2d = nn.Conv2D(1, kernel_size=(3, 5), padding=(0, 1), strides=(3, 4)) comp_conv2d(conv2d, X).shape
null
546
import tensorflow as tf from d2l import tensorflow as d2l def corr2d_multi_in(X, K): return tf.reduce_sum([d2l.corr2d(x, k) for x, k in zip(X, K)], axis=0) X = tf.constant([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]) K = tf.constant([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]]) corr2d_multi_in(X, K) def corr2d_multi_in_out(X, K): return tf.stack([corr2d_multi_in(X, k) for k in K], 0) K = tf.stack((K, K + 1, K + 2), 0) K.shape def corr2d_multi_in_out_1x1(X, K): c_i, h, w = X.shape c_o = K.shape[0] X = tf.reshape(X, (c_i, h * w)) K = tf.reshape(K, (c_o, c_i)) Y = tf.matmul(K, X) return tf.reshape(Y, (c_o, h, w)) X = tf.random.normal((3, 3, 3), 0, 1) K = tf.random.normal((2, 3, 1, 1), 0, 1) Y1 = corr2d_multi_in_out_1x1(X, K) Y2 = corr2d_multi_in_out(X, K) assert float(tf.reduce_sum(tf.abs(Y1 - Y2))) < 1e-6
null
from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() def corr2d_multi_in(X, K): return sum(d2l.corr2d(x, k) for x, k in zip(X, K)) X = np.array([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]) K = np.array([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]]) corr2d_multi_in(X, K) def corr2d_multi_in_out(X, K): return np.stack([corr2d_multi_in(X, k) for k in K], 0) K = np.stack((K, K + 1, K + 2), 0) K.shape def corr2d_multi_in_out_1x1(X, K): c_i, h, w = X.shape c_o = K.shape[0] X = X.reshape((c_i, h * w)) K = K.reshape((c_o, c_i)) Y = np.dot(K, X) return Y.reshape((c_o, h, w)) X = np.random.normal(0, 1, (3, 3, 3)) K = np.random.normal(0, 1, (2, 3, 1, 1)) Y1 = corr2d_multi_in_out_1x1(X, K) Y2 = corr2d_multi_in_out(X, K) assert float(np.abs(Y1 - Y2).sum()) < 1e-6
null
547
import tensorflow as tf def pool2d(X, pool_size, mode='max'): p_h, p_w = pool_size Y = tf.Variable(tf.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w +1))) for i in range(Y.shape[0]): for j in range(Y.shape[1]): if mode == 'max': Y[i, j].assign(tf.reduce_max(X[i: i + p_h, j: j + p_w])) elif mode =='avg': Y[i, j].assign(tf.reduce_mean(X[i: i + p_h, j: j + p_w])) return Y X = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) pool2d(X, (2, 2)) X = tf.reshape(tf.range(16, dtype=tf.float32), (1, 4, 4, 1)) pool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3]) pool2d(X) paddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]]) X_padded = tf.pad(X, paddings, "CONSTANT") pool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid', strides=2) pool2d(X_padded) paddings = tf.constant([[0, 0], [0, 0], [1, 1], [0, 0]]) X_padded = tf.pad(X, paddings, "CONSTANT") pool2d = tf.keras.layers.MaxPool2D(pool_size=[2, 3], padding='valid', strides=(2, 3)) pool2d(X_padded) X = tf.concat([X, X + 1], 3) paddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]]) X_padded = tf.pad(X, paddings, "CONSTANT") pool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid', strides=2) pool2d(X_padded)
null
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def pool2d(X, pool_size, mode='max'): p_h, p_w = pool_size Y = np.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1)) for i in range(Y.shape[0]): for j in range(Y.shape[1]): if mode == 'max': Y[i, j] = X[i: i + p_h, j: j + p_w].max() elif mode == 'avg': Y[i, j] = X[i: i + p_h, j: j + p_w].mean() return Y X = np.array([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]) pool2d(X, (2, 2)) X = np.arange(16, dtype=np.float32).reshape((1, 1, 4, 4)) pool2d = nn.MaxPool2D(3) pool2d(X) pool2d = nn.MaxPool2D(3, padding=1, strides=2) pool2d(X) pool2d = nn.MaxPool2D((2, 3), padding=(0, 1), strides=(2, 3)) pool2d(X) X = np.concatenate((X, X + 1), 1) pool2d = nn.MaxPool2D(3, padding=1, strides=2) pool2d(X)
null
548
import tensorflow as tf from d2l import tensorflow as d2l def net(): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(filters=6, kernel_size=5, activation='sigmoid', padding='same'), tf.keras.layers.AvgPool2D(pool_size=2, strides=2), tf.keras.layers.Conv2D(filters=16, kernel_size=5, activation='sigmoid'), tf.keras.layers.AvgPool2D(pool_size=2, strides=2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(120, activation='sigmoid'), tf.keras.layers.Dense(84, activation='sigmoid'), tf.keras.layers.Dense(10)]) X = tf.random.uniform((1, 28, 28, 1)) for layer in net().layers: X = layer(X) print(layer.__class__.__name__, 'output shape: ', X.shape) class TrainCallback(tf.keras.callbacks.Callback): def __init__(self, net, train_iter, test_iter, num_epochs, device_name): self.timer = d2l.Timer() self.animator = d2l.Animator( xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc']) self.net = net self.train_iter = train_iter self.test_iter = test_iter self.num_epochs = num_epochs self.device_name = device_name def on_epoch_begin(self, epoch, logs=None): self.timer.start() def on_epoch_end(self, epoch, logs): self.timer.stop() test_acc = self.net.evaluate(self.test_iter, verbose=0, return_dict=True)['accuracy'] metrics = (logs['loss'], logs['accuracy'], test_acc) self.animator.add(epoch + 1, metrics) if epoch == self.num_epochs - 1: batch_size = next(iter(self.train_iter))[0].shape[0] num_examples = batch_size * tf.data.experimental.cardinality(self.train_iter).numpy() def train_ch6(net_fn, train_iter, test_iter, num_epochs, lr, device): device_name = device._device_name strategy = tf.distribute.OneDeviceStrategy(device_name) with strategy.scope(): optimizer = tf.keras.optimizers.SGD(learning_rate=lr) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) net = net_fn() net.compile(optimizer=optimizer, loss=loss, metrics=['accuracy']) callback = TrainCallback(net, train_iter, test_iter, num_epochs, device_name) net.fit(train_iter, epochs=num_epochs, verbose=0, callbacks=[callback]) return net
null
from mxnet import autograd, gluon, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() net = nn.Sequential() net.add(nn.Conv2D(channels=6, kernel_size=5, padding=2, activation='sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Conv2D(channels=16, kernel_size=5, activation='sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Dense(120, activation='sigmoid'), nn.Dense(84, activation='sigmoid'), nn.Dense(10)) X = np.random.uniform(size=(1, 1, 28, 28)) net.initialize() for layer in net: X = layer(X) print(layer.name, 'output shape: ', X.shape) def train_ch6(net, train_iter, test_iter, num_epochs, lr, device): net.initialize(force_reinit=True, ctx=device, init=init.Xavier()) loss = gluon.loss.SoftmaxCrossEntropyLoss() trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc']) timer, num_batches = d2l.Timer(), len(train_iter) for epoch in range(num_epochs): metric = d2l.Accumulator(3) for i, (X, y) in enumerate(train_iter): timer.start() X, y = X.as_in_ctx(device), y.as_in_ctx(device) with autograd.record(): y_hat = net(X) l = loss(y_hat, y) l.backward() trainer.step(X.shape[0]) metric.add(l.sum(), d2l.accuracy(y_hat, y), X.shape[0]) timer.stop() train_l = metric[0] / metric[2] train_acc = metric[1] / metric[2] if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1: animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None)) test_acc = evaluate_accuracy_gpu(net, test_iter) animator.add(epoch + 1, (None, None, test_acc))
null
549
import tensorflow as tf from d2l import tensorflow as d2l def net(): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(filters=96, kernel_size=11, strides=4, activation='relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2), tf.keras.layers.Conv2D(filters=256, kernel_size=5, padding='same', activation='relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2), tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'), tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'), tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same', activation='relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(4096, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(4096, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(10)]) X = tf.random.uniform((1, 224, 224, 1)) for layer in net().layers: X = layer(X) print(layer.__class__.__name__, 'output shape: ', X.shape)
null
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() net = nn.Sequential() net.add( nn.Conv2D(96, kernel_size=11, strides=4, activation='relu'), nn.MaxPool2D(pool_size=3, strides=2), nn.Conv2D(256, kernel_size=5, padding=2, activation='relu'), nn.MaxPool2D(pool_size=3, strides=2), nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'), nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'), nn.Conv2D(256, kernel_size=3, padding=1, activation='relu'), nn.MaxPool2D(pool_size=3, strides=2), nn.Dense(4096, activation='relu'), nn.Dropout(0.5), nn.Dense(4096, activation='relu'), nn.Dropout(0.5), nn.Dense(10)) X = np.random.uniform(size=(1, 1, 224, 224)) net.initialize() for layer in net: X = layer(X) print(layer.name, 'output shape: ', X.shape)
null
550
import tensorflow as tf from d2l import tensorflow as d2l def vgg_block(num_convs, num_channels): blk = tf.keras.models.Sequential() for _ in range(num_convs): blk.add(tf.keras.layers.Conv2D(num_channels,kernel_size=3, padding='same',activation='relu')) blk.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2)) return blk def vgg(conv_arch): net = tf.keras.models.Sequential() for (num_convs, num_channels) in conv_arch: net.add(vgg_block(num_convs, num_channels)) net.add(tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(4096, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(4096, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(10)])) return net net = vgg(conv_arch) X = tf.random.uniform((1, 224, 224, 1)) for blk in net.layers: X = blk(X) print(blk.__class__.__name__,'output shape: ', X.shape) ratio = 4 small_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch] net = lambda: vgg(small_conv_arch)
null
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def vgg_block(num_convs, num_channels): blk = nn.Sequential() for _ in range(num_convs): blk.add(nn.Conv2D(num_channels, kernel_size=3, padding=1, activation='relu')) blk.add(nn.MaxPool2D(pool_size=2, strides=2)) return blk def vgg(conv_arch): net = nn.Sequential() for (num_convs, num_channels) in conv_arch: net.add(vgg_block(num_convs, num_channels)) net.add(nn.Dense(4096, activation='relu'), nn.Dropout(0.5), nn.Dense(4096, activation='relu'), nn.Dropout(0.5), nn.Dense(10)) return net net = vgg(conv_arch) net.initialize() X = np.random.uniform(size=(1, 1, 224, 224)) for blk in net: X = blk(X) print(blk.name, 'output shape: ', X.shape) ratio = 4 small_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch] net = vgg(small_conv_arch)
null
551
import tensorflow as tf from d2l import tensorflow as d2l def nin_block(num_channels, kernel_size, strides, padding): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(num_channels, kernel_size, strides=strides, padding=padding, activation='relu'), tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu'), tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu')]) def net(): return tf.keras.models.Sequential([ nin_block(96, kernel_size=11, strides=4, padding='valid'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2), nin_block(256, kernel_size=5, strides=1, padding='same'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2), nin_block(384, kernel_size=3, strides=1, padding='same'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2), tf.keras.layers.Dropout(0.5), nin_block(10, kernel_size=3, strides=1, padding='same'), tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Reshape((1, 1, 10)), tf.keras.layers.Flatten(), ]) X = tf.random.uniform((1, 224, 224, 1)) for layer in net().layers: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def nin_block(num_channels, kernel_size, strides, padding): blk = nn.Sequential() blk.add(nn.Conv2D(num_channels, kernel_size, strides, padding, activation='relu'), nn.Conv2D(num_channels, kernel_size=1, activation='relu'), nn.Conv2D(num_channels, kernel_size=1, activation='relu')) return blk net = nn.Sequential() net.add(nin_block(96, kernel_size=11, strides=4, padding=0), nn.MaxPool2D(pool_size=3, strides=2), nin_block(256, kernel_size=5, strides=1, padding=2), nn.MaxPool2D(pool_size=3, strides=2), nin_block(384, kernel_size=3, strides=1, padding=1), nn.MaxPool2D(pool_size=3, strides=2), nn.Dropout(0.5), nin_block(10, kernel_size=3, strides=1, padding=1), nn.GlobalAvgPool2D(), nn.Flatten()) X = np.random.uniform(size=(1, 1, 224, 224)) net.initialize() for layer in net: X = layer(X) print(layer.name, 'output shape: ', X.shape)
null
552
import tensorflow as tf from d2l import tensorflow as d2l class Inception(tf.keras.Model): def __init__(self, c1, c2, c3, c4): super().__init__() self.p1_1 = tf.keras.layers.Conv2D(c1, 1, activation='relu') self.p2_1 = tf.keras.layers.Conv2D(c2[0], 1, activation='relu') self.p2_2 = tf.keras.layers.Conv2D(c2[1], 3, padding='same', activation='relu') self.p3_1 = tf.keras.layers.Conv2D(c3[0], 1, activation='relu') self.p3_2 = tf.keras.layers.Conv2D(c3[1], 5, padding='same', activation='relu') self.p4_1 = tf.keras.layers.MaxPool2D(3, 1, padding='same') self.p4_2 = tf.keras.layers.Conv2D(c4, 1, activation='relu') def call(self, x): p1 = self.p1_1(x) p2 = self.p2_2(self.p2_1(x)) p3 = self.p3_2(self.p3_1(x)) p4 = self.p4_2(self.p4_1(x)) return tf.keras.layers.Concatenate()([p1, p2, p3, p4]) def b1(): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(64, 7, strides=2, padding='same', activation='relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')]) def b2(): return tf.keras.Sequential([ tf.keras.layers.Conv2D(64, 1, activation='relu'), tf.keras.layers.Conv2D(192, 3, padding='same', activation='relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')]) def b3(): return tf.keras.models.Sequential([ Inception(64, (96, 128), (16, 32), 32), Inception(128, (128, 192), (32, 96), 64), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')]) def b4(): return tf.keras.Sequential([ Inception(192, (96, 208), (16, 48), 64), Inception(160, (112, 224), (24, 64), 64), Inception(128, (128, 256), (24, 64), 64), Inception(112, (144, 288), (32, 64), 64), Inception(256, (160, 320), (32, 128), 128), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')]) def b5(): return tf.keras.Sequential([ Inception(256, (160, 320), (32, 128), 128), Inception(384, (192, 384), (48, 128), 128), tf.keras.layers.GlobalAvgPool2D(), tf.keras.layers.Flatten() ]) def net(): return tf.keras.Sequential([b1(), b2(), b3(), b4(), b5(), tf.keras.layers.Dense(10)]) X = tf.random.uniform(shape=(1, 96, 96, 1)) for layer in net().layers: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() class Inception(nn.Block): def __init__(self, c1, c2, c3, c4, **kwargs): super(Inception, self).__init__(**kwargs) self.p1_1 = nn.Conv2D(c1, kernel_size=1, activation='relu') self.p2_1 = nn.Conv2D(c2[0], kernel_size=1, activation='relu') self.p2_2 = nn.Conv2D(c2[1], kernel_size=3, padding=1, activation='relu') self.p3_1 = nn.Conv2D(c3[0], kernel_size=1, activation='relu') self.p3_2 = nn.Conv2D(c3[1], kernel_size=5, padding=2, activation='relu') self.p4_1 = nn.MaxPool2D(pool_size=3, strides=1, padding=1) self.p4_2 = nn.Conv2D(c4, kernel_size=1, activation='relu') def forward(self, x): p1 = self.p1_1(x) p2 = self.p2_2(self.p2_1(x)) p3 = self.p3_2(self.p3_1(x)) p4 = self.p4_2(self.p4_1(x)) return np.concatenate((p1, p2, p3, p4), axis=1) b1 = nn.Sequential() b1.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3, activation='relu'), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) b2 = nn.Sequential() b2.add(nn.Conv2D(64, kernel_size=1, activation='relu'), nn.Conv2D(192, kernel_size=3, padding=1, activation='relu'), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) b3 = nn.Sequential() b3.add(Inception(64, (96, 128), (16, 32), 32), Inception(128, (128, 192), (32, 96), 64), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) b4 = nn.Sequential() b4.add(Inception(192, (96, 208), (16, 48), 64), Inception(160, (112, 224), (24, 64), 64), Inception(128, (128, 256), (24, 64), 64), Inception(112, (144, 288), (32, 64), 64), Inception(256, (160, 320), (32, 128), 128), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) b5 = nn.Sequential() b5.add(Inception(256, (160, 320), (32, 128), 128), Inception(384, (192, 384), (48, 128), 128), nn.GlobalAvgPool2D()) net = nn.Sequential() net.add(b1, b2, b3, b4, b5, nn.Dense(10)) X = np.random.uniform(size=(1, 1, 96, 96)) net.initialize() for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
553
import tensorflow as tf from d2l import tensorflow as d2l def batch_norm(X, gamma, beta, moving_mean, moving_var, eps): inv = tf.cast(tf.math.rsqrt(moving_var + eps), X.dtype) inv *= gamma Y = X * inv + (beta - moving_mean * inv) return Y class BatchNorm(tf.keras.layers.Layer): def __init__(self, **kwargs): super(BatchNorm, self).__init__(**kwargs) def build(self, input_shape): weight_shape = [input_shape[-1], ] self.gamma = self.add_weight(name='gamma', shape=weight_shape, initializer=tf.initializers.ones, trainable=True) self.beta = self.add_weight(name='beta', shape=weight_shape, initializer=tf.initializers.zeros, trainable=True) self.moving_mean = self.add_weight(name='moving_mean', shape=weight_shape, initializer=tf.initializers.zeros, trainable=False) self.moving_variance = self.add_weight(name='moving_variance', shape=weight_shape, initializer=tf.initializers.ones, trainable=False) super(BatchNorm, self).build(input_shape) def assign_moving_average(self, variable, value): momentum = 0.9 delta = variable * momentum + value * (1 - momentum) return variable.assign(delta) @tf.function def call(self, inputs, training): if training: axes = list(range(len(inputs.shape) - 1)) batch_mean = tf.reduce_mean(inputs, axes, keepdims=True) batch_variance = tf.reduce_mean(tf.math.squared_difference(inputs, tf.stop_gradient(batch_mean)), axes, keepdims=True) batch_mean = tf.squeeze(batch_mean, axes) batch_variance = tf.squeeze(batch_variance, axes) mean_update = self.assign_moving_average(self.moving_mean, batch_mean) variance_update = self.assign_moving_average(self.moving_variance, batch_variance) self.add_update(mean_update) self.add_update(variance_update) mean, variance = batch_mean, batch_variance else: mean, variance = self.moving_mean, self.moving_variance output = batch_norm(inputs, moving_mean=mean, moving_var=variance, beta=self.beta, gamma=self.gamma, eps=1e-5) return output def net(): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)), BatchNorm(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.AvgPool2D(pool_size=2, strides=2), tf.keras.layers.Conv2D(filters=16, kernel_size=5), BatchNorm(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.AvgPool2D(pool_size=2, strides=2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(120), BatchNorm(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.Dense(84), BatchNorm(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.Dense(10)] ) lr, num_epochs, batch_size = 1.0, 10, 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) net = d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu()) tf.reshape(net.layers[1].gamma, (-1,)), tf.reshape(net.layers[1].beta, (-1,)) def net(): return tf.keras.models.Sequential([ tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.AvgPool2D(pool_size=2, strides=2), tf.keras.layers.Conv2D(filters=16, kernel_size=5), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.AvgPool2D(pool_size=2, strides=2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(120), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.Dense(84), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('sigmoid'), tf.keras.layers.Dense(10), ])
null
from mxnet import autograd, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum): if not autograd.is_training(): X_hat = (X - moving_mean) / np.sqrt(moving_var + eps) else: assert len(X.shape) in (2, 4) if len(X.shape) == 2: mean = X.mean(axis=0) var = ((X - mean) ** 2).mean(axis=0) else: mean = X.mean(axis=(0, 2, 3), keepdims=True) var = ((X - mean) ** 2).mean(axis=(0, 2, 3), keepdims=True) X_hat = (X - mean) / np.sqrt(var + eps) moving_mean = momentum * moving_mean + (1.0 - momentum) * mean moving_var = momentum * moving_var + (1.0 - momentum) * var Y = gamma * X_hat + beta return Y, moving_mean, moving_var class BatchNorm(nn.Block): def __init__(self, num_features, num_dims, **kwargs): super().__init__(**kwargs) if num_dims == 2: shape = (1, num_features) else: shape = (1, num_features, 1, 1) self.gamma = self.params.get('gamma', shape=shape, init=init.One()) self.beta = self.params.get('beta', shape=shape, init=init.Zero()) self.moving_mean = np.zeros(shape) self.moving_var = np.ones(shape) def forward(self, X): if self.moving_mean.ctx != X.ctx: self.moving_mean = self.moving_mean.copyto(X.ctx) self.moving_var = self.moving_var.copyto(X.ctx) Y, self.moving_mean, self.moving_var = batch_norm( X, self.gamma.data(), self.beta.data(), self.moving_mean, self.moving_var, eps=1e-12, momentum=0.9) return Y net = nn.Sequential() net.add(nn.Conv2D(6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Activation('sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Conv2D(16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Activation('sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Dense(120), BatchNorm(120, num_dims=2), nn.Activation('sigmoid'), nn.Dense(84), BatchNorm(84, num_dims=2), nn.Activation('sigmoid'), nn.Dense(10)) lr, num_epochs, batch_size = 1.0, 10, 256 train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size) d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu()) net[1].gamma.data().reshape(-1,), net[1].beta.data().reshape(-1,) net = nn.Sequential() net.add(nn.Conv2D(6, kernel_size=5), nn.BatchNorm(), nn.Activation('sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Conv2D(16, kernel_size=5), nn.BatchNorm(), nn.Activation('sigmoid'), nn.AvgPool2D(pool_size=2, strides=2), nn.Dense(120), nn.BatchNorm(), nn.Activation('sigmoid'), nn.Dense(84), nn.BatchNorm(), nn.Activation('sigmoid'), nn.Dense(10))
null
554
import tensorflow as tf from d2l import tensorflow as d2l class Residual(tf.keras.Model): def __init__(self, num_channels, use_1x1conv=False, strides=1): super().__init__() self.conv1 = tf.keras.layers.Conv2D( num_channels, padding='same', kernel_size=3, strides=strides) self.conv2 = tf.keras.layers.Conv2D( num_channels, kernel_size=3, padding='same') self.conv3 = None if use_1x1conv: self.conv3 = tf.keras.layers.Conv2D( num_channels, kernel_size=1, strides=strides) self.bn1 = tf.keras.layers.BatchNormalization() self.bn2 = tf.keras.layers.BatchNormalization() def call(self, X): Y = tf.keras.activations.relu(self.bn1(self.conv1(X))) Y = self.bn2(self.conv2(Y)) if self.conv3 is not None: X = self.conv3(X) Y += X return tf.keras.activations.relu(Y) blk = Residual(3) X = tf.random.uniform((4, 6, 6, 3)) Y = blk(X) Y.shape blk = Residual(6, use_1x1conv=True, strides=2) blk(X).shape b1 = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')]) class ResnetBlock(tf.keras.layers.Layer): def __init__(self, num_channels, num_residuals, first_block=False, **kwargs): super(ResnetBlock, self).__init__(**kwargs) self.residual_layers = [] for i in range(num_residuals): if i == 0 and not first_block: self.residual_layers.append(Residual(num_channels, use_1x1conv=True, strides=2)) else: self.residual_layers.append(Residual(num_channels)) def call(self, X): for layer in self.residual_layers.layers: X = layer(X) return X b2 = ResnetBlock(64, 2, first_block=True) b3 = ResnetBlock(128, 2) b4 = ResnetBlock(256, 2) b5 = ResnetBlock(512, 2) def net(): return tf.keras.Sequential([ tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Activation('relu'), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same'), ResnetBlock(64, 2, first_block=True), ResnetBlock(128, 2), ResnetBlock(256, 2), ResnetBlock(512, 2), tf.keras.layers.GlobalAvgPool2D(), tf.keras.layers.Dense(units=10)]) X = tf.random.uniform(shape=(1, 224, 224, 1)) for layer in net().layers: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() class Residual(nn.Block): def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs): super().__init__(**kwargs) self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1, strides=strides) self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1) if use_1x1conv: self.conv3 = nn.Conv2D(num_channels, kernel_size=1, strides=strides) else: self.conv3 = None self.bn1 = nn.BatchNorm() self.bn2 = nn.BatchNorm() def forward(self, X): Y = npx.relu(self.bn1(self.conv1(X))) Y = self.bn2(self.conv2(Y)) if self.conv3: X = self.conv3(X) return npx.relu(Y + X) blk = Residual(3) blk.initialize() X = np.random.uniform(size=(4, 3, 6, 6)) blk(X).shape blk = Residual(6, use_1x1conv=True, strides=2) blk.initialize() blk(X).shape net = nn.Sequential() net.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3), nn.BatchNorm(), nn.Activation('relu'), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) def resnet_block(num_channels, num_residuals, first_block=False): blk = nn.Sequential() for i in range(num_residuals): if i == 0 and not first_block: blk.add(Residual(num_channels, use_1x1conv=True, strides=2)) else: blk.add(Residual(num_channels)) return blk net.add(resnet_block(64, 2, first_block=True), resnet_block(128, 2), resnet_block(256, 2), resnet_block(512, 2)) net.add(nn.GlobalAvgPool2D(), nn.Dense(10)) X = np.random.uniform(size=(1, 1, 224, 224)) net.initialize() for layer in net: X = layer(X) print(layer.__class__.__name__,'output shape: ', X.shape)
null
555
import tensorflow as tf from d2l import tensorflow as d2l class ConvBlock(tf.keras.layers.Layer): def __init__(self, num_channels): super(ConvBlock, self).__init__() self.bn = tf.keras.layers.BatchNormalization() self.relu = tf.keras.layers.ReLU() self.conv = tf.keras.layers.Conv2D(filters=num_channels, kernel_size=(3, 3), padding='same') self.listLayers = [self.bn, self.relu, self.conv] def call(self, x): y = x for layer in self.listLayers.layers: y = layer(y) y = tf.keras.layers.concatenate([x,y], axis=-1) return y class DenseBlock(tf.keras.layers.Layer): def __init__(self, num_convs, num_channels): super(DenseBlock, self).__init__() self.listLayers = [] for _ in range(num_convs): self.listLayers.append(ConvBlock(num_channels)) def call(self, x): for layer in self.listLayers.layers: x = layer(x) return x blk = DenseBlock(2, 10) X = tf.random.uniform((4, 8, 8, 3)) Y = blk(X) Y.shape class TransitionBlock(tf.keras.layers.Layer): def __init__(self, num_channels, **kwargs): super(TransitionBlock, self).__init__(**kwargs) self.batch_norm = tf.keras.layers.BatchNormalization() self.relu = tf.keras.layers.ReLU() self.conv = tf.keras.layers.Conv2D(num_channels, kernel_size=1) self.avg_pool = tf.keras.layers.AvgPool2D(pool_size=2, strides=2) def call(self, x): x = self.batch_norm(x) x = self.relu(x) x = self.conv(x) return self.avg_pool(x) blk = TransitionBlock(10) blk(Y).shape def block_1(): return tf.keras.Sequential([ tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'), tf.keras.layers.BatchNormalization(), tf.keras.layers.ReLU(), tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')]) def block_2(): net = block_1() num_channels, growth_rate = 64, 32 num_convs_in_dense_blocks = [4, 4, 4, 4] for i, num_convs in enumerate(num_convs_in_dense_blocks): net.add(DenseBlock(num_convs, growth_rate)) num_channels += num_convs * growth_rate if i != len(num_convs_in_dense_blocks) - 1: num_channels //= 2 net.add(TransitionBlock(num_channels)) return net def net(): net = block_2() net.add(tf.keras.layers.BatchNormalization()) net.add(tf.keras.layers.ReLU()) net.add(tf.keras.layers.GlobalAvgPool2D()) net.add(tf.keras.layers.Flatten()) net.add(tf.keras.layers.Dense(10)) return net
null
from mxnet import np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() def conv_block(num_channels): blk = nn.Sequential() blk.add(nn.BatchNorm(), nn.Activation('relu'), nn.Conv2D(num_channels, kernel_size=3, padding=1)) return blk class DenseBlock(nn.Block): def __init__(self, num_convs, num_channels, **kwargs): super().__init__(**kwargs) self.net = nn.Sequential() for _ in range(num_convs): self.net.add(conv_block(num_channels)) def forward(self, X): for blk in self.net: Y = blk(X) X = np.concatenate((X, Y), axis=1) return X blk = DenseBlock(2, 10) blk.initialize() X = np.random.uniform(size=(4, 3, 8, 8)) Y = blk(X) Y.shape def transition_block(num_channels): blk = nn.Sequential() blk.add(nn.BatchNorm(), nn.Activation('relu'), nn.Conv2D(num_channels, kernel_size=1), nn.AvgPool2D(pool_size=2, strides=2)) return blk blk = transition_block(10) blk.initialize() blk(Y).shape net = nn.Sequential() net.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3), nn.BatchNorm(), nn.Activation('relu'), nn.MaxPool2D(pool_size=3, strides=2, padding=1)) num_channels, growth_rate = 64, 32 num_convs_in_dense_blocks = [4, 4, 4, 4] for i, num_convs in enumerate(num_convs_in_dense_blocks): net.add(DenseBlock(num_convs, growth_rate)) num_channels += num_convs * growth_rate if i != len(num_convs_in_dense_blocks) - 1: num_channels //= 2 net.add(transition_block(num_channels)) net.add(nn.BatchNorm(), nn.Activation('relu'), nn.GlobalAvgPool2D(), nn.Dense(10))
null
556
%matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l T = 1000 time = tf.range(1, T + 1, dtype=tf.float32) x = tf.sin(0.01 * time) + tf.random.normal([T], 0, 0.2) d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3)) tau = 4 features = tf.Variable(tf.zeros((T - tau, tau))) for i in range(tau): features[:, i].assign(x[i: T - tau + i]) labels = tf.reshape(x[tau:], (-1, 1)) batch_size, n_train = 16, 600 train_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True) def get_net(): net = tf.keras.Sequential([tf.keras.layers.Dense(10, activation='relu'), tf.keras.layers.Dense(1)]) return net loss = tf.keras.losses.MeanSquaredError() def train(net, train_iter, loss, epochs, lr): trainer = tf.keras.optimizers.Adam() for epoch in range(epochs): for X, y in train_iter: with tf.GradientTape() as g: out = net(X) l = loss(y, out) params = net.trainable_variables grads = g.gradient(l, params) trainer.apply_gradients(zip(grads, params)) net = get_net() train(net, train_iter, loss, 5, 0.01) onestep_preds = net(features) d2l.plot([time, time[tau:]], [x.numpy(), onestep_preds.numpy()], 'time', 'x', legend=['data', '1-step preds'], xlim=[1, 1000], figsize=(6, 3)) multistep_preds = tf.Variable(tf.zeros(T)) multistep_preds[:n_train + tau].assign(x[:n_train + tau]) for i in range(n_train + tau, T): multistep_preds[i].assign(tf.reshape(net(tf.reshape(multistep_preds[i - tau: i], (1, -1))), ())) d2l.plot([time, time[tau:], time[n_train + tau:]], [x.numpy(), onestep_preds.numpy(), multistep_preds[n_train + tau:].numpy()], 'time', 'x', legend=['data', '1-step preds', 'multistep preds'], xlim=[1, 1000], figsize=(6, 3)) max_steps = 64 features = tf.Variable(tf.zeros((T - tau - max_steps + 1, tau + max_steps))) for i in range(tau): features[:, i].assign(x[i: i + T - tau - max_steps + 1].numpy()) for i in range(tau, tau + max_steps): features[:, i].assign(tf.reshape(net((features[:, i - tau: i])), -1)) steps = (1, 4, 16, 64) d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps], [features[:, tau + i - 1].numpy() for i in steps], 'time', 'x', legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000], figsize=(6, 3))
null
%matplotlib inline from mxnet import autograd, gluon, init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np() T = 1000 time = np.arange(1, T + 1, dtype=np.float32) x = np.sin(0.01 * time) + np.random.normal(0, 0.2, (T,)) d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3)) tau = 4 features = np.zeros((T - tau, tau)) for i in range(tau): features[:, i] = x[i: T - tau + i] labels = x[tau:].reshape((-1, 1)) batch_size, n_train = 16, 600 train_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True) def get_net(): net = nn.Sequential() net.add(nn.Dense(10, activation='relu'), nn.Dense(1)) net.initialize(init.Xavier()) return net loss = gluon.loss.L2Loss() def train(net, train_iter, loss, epochs, lr): trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': lr}) for epoch in range(epochs): for X, y in train_iter: with autograd.record(): l = loss(net(X), y) l.backward() trainer.step(batch_size) net = get_net() train(net, train_iter, loss, 5, 0.01) onestep_preds = net(features) d2l.plot([time, time[tau:]], [x.asnumpy(), onestep_preds.asnumpy()], 'time', 'x', legend=['data', '1-step preds'], xlim=[1, 1000], figsize=(6, 3)) multistep_preds = np.zeros(T) multistep_preds[: n_train + tau] = x[: n_train + tau] for i in range(n_train + tau, T): multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1))) d2l.plot([time, time[tau:], time[n_train + tau:]], [x.asnumpy(), onestep_preds.asnumpy(), multistep_preds[n_train + tau:].asnumpy()], 'time', 'x', legend=['data', '1-step preds', 'multistep preds'], xlim=[1, 1000], figsize=(6, 3)) max_steps = 64 features = np.zeros((T - tau - max_steps + 1, tau + max_steps)) for i in range(tau): features[:, i] = x[i: i + T - tau - max_steps + 1] for i in range(tau, tau + max_steps): features[:, i] = net(features[:, i - tau:i]).reshape(-1) steps = (1, 4, 16, 64) d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps], [features[:, tau + i - 1].asnumpy() for i in steps], 'time', 'x', legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000], figsize=(6, 3))
null
557
import collections import re from d2l import tensorflow as d2l def tokenize(lines, token='word'): if token == 'word': return [line.split() for line in lines] elif token == 'char': return [list(line) for line in lines] else: print('Error: Unknown word element type:' + token) tokens = tokenize(lines) for i in range(11): print(tokens[i]) def load_corpus_time_machine(max_tokens=-1): lines = read_time_machine() tokens = tokenize(lines, 'char') vocab = Vocab(tokens) corpus = [vocab[token] for line in tokens for token in line] if max_tokens > 0: corpus = corpus[:max_tokens] return corpus, vocab corpus, vocab = load_corpus_time_machine() len(corpus), len(vocab)
null
import collections import re from d2l import mxnet as d2l def tokenize(lines, token='word'): if token == 'word': return [line.split() for line in lines] elif token == 'char': return [list(line) for line in lines] else: print('Error: Unknown word element type:' + token) tokens = tokenize(lines) for i in range(11): print(tokens[i]) def load_corpus_time_machine(max_tokens=-1): lines = read_time_machine() tokens = tokenize(lines, 'char') vocab = Vocab(tokens) corpus = [vocab[token] for line in tokens for token in line] if max_tokens > 0: corpus = corpus[:max_tokens] return corpus, vocab corpus, vocab = load_corpus_time_machine() len(corpus), len(vocab)
null
558
import random import tensorflow as tf from d2l import tensorflow as d2l tokens = d2l.tokenize(d2l.read_time_machine()) corpus = [token for line in tokens for token in line] vocab = d2l.Vocab(corpus) vocab.token_freqs[:10] def seq_data_iter_random(corpus, batch_size, num_steps): corpus = corpus[random.randint(0, num_steps - 1):] num_subseqs = (len(corpus) - 1) // num_steps initial_indices = list(range(0, num_subseqs * num_steps, num_steps)) random.shuffle(initial_indices) def data(pos): return corpus[pos: pos + num_steps] num_batches = num_subseqs // batch_size for i in range(0, batch_size * num_batches, batch_size): initial_indices_per_batch = initial_indices[i: i + batch_size] X = [data(j) for j in initial_indices_per_batch] Y = [data(j + 1) for j in initial_indices_per_batch] yield tf.constant(X), tf.constant(Y) def seq_data_iter_sequential(corpus, batch_size, num_steps): offset = random.randint(0, num_steps) num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size Xs = tf.constant(corpus[offset: offset + num_tokens]) Ys = tf.constant(corpus[offset + 1: offset + 1 + num_tokens]) Xs = tf.reshape(Xs, (batch_size, -1)) Ys = tf.reshape(Ys, (batch_size, -1)) num_batches = Xs.shape[1] // num_steps for i in range(0, num_batches * num_steps, num_steps): X = Xs[:, i: i + num_steps] Y = Ys[:, i: i + num_steps] yield X, Y
null
import random from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() tokens = d2l.tokenize(d2l.read_time_machine()) corpus = [token for line in tokens for token in line] vocab = d2l.Vocab(corpus) vocab.token_freqs[:10] def seq_data_iter_random(corpus, batch_size, num_steps): corpus = corpus[random.randint(0, num_steps - 1):] num_subseqs = (len(corpus) - 1) // num_steps initial_indices = list(range(0, num_subseqs * num_steps, num_steps)) random.shuffle(initial_indices) def data(pos): return corpus[pos: pos + num_steps] num_batches = num_subseqs // batch_size for i in range(0, batch_size * num_batches, batch_size): initial_indices_per_batch = initial_indices[i: i + batch_size] X = [data(j) for j in initial_indices_per_batch] Y = [data(j + 1) for j in initial_indices_per_batch] yield np.array(X), np.array(Y) def seq_data_iter_sequential(corpus, batch_size, num_steps): offset = random.randint(0, num_steps) num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size Xs = np.array(corpus[offset: offset + num_tokens]) Ys = np.array(corpus[offset + 1: offset + 1 + num_tokens]) Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1) num_batches = Xs.shape[1] // num_steps for i in range(0, num_steps * num_batches, num_steps): X = Xs[:, i: i + num_steps] Y = Ys[:, i: i + num_steps] yield X, Y
null
559
import tensorflow as tf from d2l import tensorflow as d2l X, W_xh = tf.random.normal((3, 1), 0, 1), tf.random.normal((1, 4), 0, 1) H, W_hh = tf.random.normal((3, 4), 0, 1), tf.random.normal((4, 4), 0, 1) tf.matmul(X, W_xh) + tf.matmul(H, W_hh) tf.matmul(tf.concat((X, H), 1), tf.concat((W_xh, W_hh), 0))
null
from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() X, W_xh = np.random.normal(0, 1, (3, 1)), np.random.normal(0, 1, (1, 4)) H, W_hh = np.random.normal(0, 1, (3, 4)), np.random.normal(0, 1, (4, 4)) np.dot(X, W_xh) + np.dot(H, W_hh) np.dot(np.concatenate((X, H), 1), np.concatenate((W_xh, W_hh), 0))
null
560
%matplotlib inline import math import tensorflow as tf from d2l import tensorflow as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) train_random_iter, vocab_random_iter = d2l.load_data_time_machine(batch_size, num_steps, use_random_iter=True) tf.one_hot(tf.constant([0, 2]), len(vocab)) X = tf.reshape(tf.range(10), (2, 5)) tf.one_hot(tf.transpose(X), 28).shape def get_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32) W_xh = tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32) W_hh = tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32) b_h = tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32) W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32) b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32) params = [W_xh, W_hh, b_h, W_hq, b_q] return params def init_rnn_state(batch_size, num_hiddens): return (tf.zeros((batch_size, num_hiddens)), ) def rnn(inputs, state, params): W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: X = tf.reshape(X,[-1,W_xh.shape[0]]) H = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(H, W_hh) + b_h) Y = tf.matmul(H, W_hq) + b_q outputs.append(Y) return tf.concat(outputs, axis=0), (H,) class RNNModelScratch: def __init__(self, vocab_size, num_hiddens, init_state, forward_fn, get_params): self.vocab_size, self.num_hiddens = vocab_size, num_hiddens self.init_state, self.forward_fn = init_state, forward_fn self.trainable_variables = get_params(vocab_size, num_hiddens) def __call__(self, X, state): X = tf.one_hot(tf.transpose(X), self.vocab_size) X = tf.cast(X, tf.float32) return self.forward_fn(X, state, self.trainable_variables) def begin_state(self, batch_size, *args, **kwargs): return self.init_state(batch_size, self.num_hiddens) device_name = d2l.try_gpu()._device_name strategy = tf.distribute.OneDeviceStrategy(device_name) num_hiddens = 512 with strategy.scope(): net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params) state = net.begin_state(X.shape[0]) Y, new_state = net(X, state) Y.shape, len(new_state), new_state[0].shape def predict_ch8(prefix, num_preds, net, vocab): state = net.begin_state(batch_size=1, dtype=tf.float32) outputs = [vocab[prefix[0]]] get_input = lambda: tf.reshape(tf.constant([outputs[-1]]), (1, 1)).numpy() for y in prefix[1:]: _, state = net(get_input(), state) outputs.append(vocab[y]) for _ in range(num_preds): y, state = net(get_input(), state) outputs.append(int(y.numpy().argmax(axis=1).reshape(1))) return ''.join([vocab.idx_to_token[i] for i in outputs]) predict_ch8('time traveller ', 10, net, vocab) def grad_clipping(grads, theta): theta = tf.constant(theta, dtype=tf.float32) new_grad = [] for grad in grads: if isinstance(grad, tf.IndexedSlices): new_grad.append(tf.convert_to_tensor(grad)) else: new_grad.append(grad) norm = tf.math.sqrt(sum((tf.reduce_sum(grad ** 2)).numpy() for grad in new_grad)) norm = tf.cast(norm, tf.float32) if tf.greater(norm, theta): for i, grad in enumerate(new_grad): new_grad[i] = grad * theta / norm else: new_grad = new_grad return new_grad def train_epoch_ch8(net, train_iter, loss, updater, use_random_iter): state, timer = None, d2l.Timer() metric = d2l.Accumulator(2) for X, Y in train_iter: if state is None or use_random_iter: state = net.begin_state(batch_size=X.shape[0], dtype=tf.float32) with tf.GradientTape(persistent=True) as g: y_hat, state = net(X, state) y = tf.reshape(tf.transpose(Y), (-1)) l = loss(y, y_hat) params = net.trainable_variables grads = g.gradient(l, params) grads = grad_clipping(grads, 1) updater.apply_gradients(zip(grads, params)) metric.add(l * d2l.size(y), d2l.size(y)) return math.exp(metric[0] / metric[1]), metric[1] / timer.stop() def train_ch8(net, train_iter, vocab, lr, num_epochs, strategy, use_random_iter=False): with strategy.scope(): loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) updater = tf.keras.optimizers.SGD(lr) animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs]) predict = lambda prefix: predict_ch8(prefix, 50, net, vocab) for epoch in range(num_epochs): ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, use_random_iter) if (epoch + 1) % 10 == 0: animator.add(epoch + 1, [ppl]) device = d2l.try_gpu()._device_name num_epochs, lr = 500, 1 train_ch8(net, train_iter, vocab, lr, num_epochs, strategy) with strategy.scope(): net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params) train_ch8(net, train_iter, vocab_random_iter, lr, num_epochs, strategy, use_random_iter=True)
null
%matplotlib inline import math from mxnet import autograd, gluon, np, npx from d2l import mxnet as d2l npx.set_np() batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) npx.one_hot(np.array([0, 2]), len(vocab)) X = np.arange(10).reshape((2, 5)) npx.one_hot(X.T, 28).shape def get_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return np.random.normal(scale=0.01, size=shape, ctx=device) W_xh = normal((num_inputs, num_hiddens)) W_hh = normal((num_hiddens, num_hiddens)) b_h = np.zeros(num_hiddens, ctx=device) W_hq = normal((num_hiddens, num_outputs)) b_q = np.zeros(num_outputs, ctx=device) params = [W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.attach_grad() return params def init_rnn_state(batch_size, num_hiddens, device): return (np.zeros((batch_size, num_hiddens), ctx=device), ) def rnn(inputs, state, params): W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: H = np.tanh(np.dot(X, W_xh) + np.dot(H, W_hh) + b_h) Y = np.dot(H, W_hq) + b_q outputs.append(Y) return np.concatenate(outputs, axis=0), (H,) class RNNModelScratch: def __init__(self, vocab_size, num_hiddens, device, get_params, init_state, forward_fn): self.vocab_size, self.num_hiddens = vocab_size, num_hiddens self.params = get_params(vocab_size, num_hiddens, device) self.init_state, self.forward_fn = init_state, forward_fn def __call__(self, X, state): X = npx.one_hot(X.T, self.vocab_size) return self.forward_fn(X, state, self.params) def begin_state(self, batch_size, ctx): return self.init_state(batch_size, self.num_hiddens, ctx) num_hiddens = 512 net = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn) state = net.begin_state(X.shape[0], d2l.try_gpu()) Y, new_state = net(X.as_in_context(d2l.try_gpu()), state) Y.shape, len(new_state), new_state[0].shape def predict_ch8(prefix, num_preds, net, vocab, device): state = net.begin_state(batch_size=1, ctx=device) outputs = [vocab[prefix[0]]] get_input = lambda: np.array([outputs[-1]], ctx=device).reshape((1, 1)) for y in prefix[1:]: _, state = net(get_input(), state) outputs.append(vocab[y]) for _ in range(num_preds): y, state = net(get_input(), state) outputs.append(int(y.argmax(axis=1).reshape(1))) return ''.join([vocab.idx_to_token[i] for i in outputs]) predict_ch8('time traveller ', 10, net, vocab, d2l.try_gpu()) def grad_clipping(net, theta): if isinstance(net, gluon.Block): params = [p.data() for p in net.collect_params().values()] else: params = net.params norm = math.sqrt(sum((p.grad ** 2).sum() for p in params)) if norm > theta: for param in params: param.grad[:] *= theta / norm def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter): state, timer = None, d2l.Timer() metric = d2l.Accumulator(2) for X, Y in train_iter: if state is None or use_random_iter: state = net.begin_state(batch_size=X.shape[0], ctx=device) else: for s in state: s.detach() y = Y.T.reshape(-1) X, y = X.as_in_ctx(device), y.as_in_ctx(device) with autograd.record(): y_hat, state = net(X, state) l = loss(y_hat, y).mean() l.backward() grad_clipping(net, 1) updater(batch_size=1) metric.add(l * d2l.size(y), d2l.size(y)) return math.exp(metric[0] / metric[1]), metric[1] / timer.stop() def train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False): loss = gluon.loss.SoftmaxCrossEntropyLoss() animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs]) if isinstance(net, gluon.Block): net.initialize(ctx=device, force_reinit=True, init=init.Normal(0.01)) trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) updater = lambda batch_size: trainer.step(batch_size) else: updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size) predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device) for epoch in range(num_epochs): ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter) if (epoch + 1) % 10 == 0: animator.add(epoch + 1, [ppl]) num_epochs, lr = 500, 1 train_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu()) net = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params, init_rnn_state, rnn) train_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True)
null
561
import tensorflow as tf from d2l import tensorflow as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) num_hiddens = 256 rnn_cell = tf.keras.layers.SimpleRNNCell(num_hiddens, kernel_initializer='glorot_uniform') rnn_layer = tf.keras.layers.RNN(rnn_cell, time_major=True, return_sequences=True, return_state=True) state = rnn_cell.get_initial_state(batch_size=batch_size, dtype=tf.float32) state.shape X = tf.random.uniform((num_steps, batch_size, len(vocab))) Y, state_new = rnn_layer(X, state) Y.shape, len(state_new), state_new[0].shape class RNNModel(tf.keras.layers.Layer): def __init__(self, rnn_layer, vocab_size, **kwargs): super(RNNModel, self).__init__(**kwargs) self.rnn = rnn_layer self.vocab_size = vocab_size self.dense = tf.keras.layers.Dense(vocab_size) def call(self, inputs, state): X = tf.one_hot(tf.transpose(inputs), self.vocab_size) Y, *state = self.rnn(X, state) output = self.dense(tf.reshape(Y, (-1, Y.shape[-1]))) return output, state def begin_state(self, *args, **kwargs): return self.rnn.cell.get_initial_state(*args, **kwargs) device_name = d2l.try_gpu()._device_name strategy = tf.distribute.OneDeviceStrategy(device_name) with strategy.scope(): net = RNNModel(rnn_layer, vocab_size=len(vocab)) d2l.predict_ch8('time traveller', 10, net, vocab) num_epochs, lr = 500, 1 d2l.train_ch8(net, train_iter, vocab, lr, num_epochs, strategy)
null
from mxnet import np, npx from mxnet.gluon import nn, rnn from d2l import mxnet as d2l npx.set_np() batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) num_hiddens = 256 rnn_layer = rnn.RNN(num_hiddens) rnn_layer.initialize() state = rnn_layer.begin_state(batch_size=batch_size) len(state), state[0].shape X = np.random.uniform(size=(num_steps, batch_size, len(vocab))) Y, state_new = rnn_layer(X, state) Y.shape, len(state_new), state_new[0].shape class RNNModel(nn.Block): def __init__(self, rnn_layer, vocab_size, **kwargs): super(RNNModel, self).__init__(**kwargs) self.rnn = rnn_layer self.vocab_size = vocab_size self.dense = nn.Dense(vocab_size) def forward(self, inputs, state): X = npx.one_hot(inputs.T, self.vocab_size) Y, state = self.rnn(X, state) output = self.dense(Y.reshape(-1, Y.shape[-1])) return output, state def begin_state(self, *args, **kwargs): return self.rnn.begin_state(*args, **kwargs) device = d2l.try_gpu() net = RNNModel(rnn_layer, len(vocab)) net.initialize(force_reinit=True, ctx=device) d2l.predict_ch8('time traveller', 10, net, vocab, device) num_epochs, lr = 500, 1 d2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device)
null
562
import tensorflow as tf from d2l import tensorflow as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32) def three(): return (tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32), tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32)) W_xz, W_hz, b_z = three() W_xr, W_hr, b_r = three() W_xh, W_hh, b_h = three() W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32) b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32) params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q] return params def init_gru_state(batch_size, num_hiddens): return (tf.zeros((batch_size, num_hiddens)), ) def gru(inputs, state, params): W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: X = tf.reshape(X,[-1,W_xh.shape[0]]) Z = tf.sigmoid(tf.matmul(X, W_xz) + tf.matmul(H, W_hz) + b_z) R = tf.sigmoid(tf.matmul(X, W_xr) + tf.matmul(H, W_hr) + b_r) H_tilda = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(R * H, W_hh) + b_h) H = Z * H + (1 - Z) * H_tilda Y = tf.matmul(H, W_hq) + b_q outputs.append(Y) return tf.concat(outputs, axis=0), (H,) vocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name strategy = tf.distribute.OneDeviceStrategy(device_name) num_epochs, lr = 500, 1 with strategy.scope(): model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_gru_state, gru, get_params) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy) gru_cell = tf.keras.layers.GRUCell(num_hiddens, kernel_initializer='glorot_uniform') gru_layer = tf.keras.layers.RNN(gru_cell, time_major=True, return_sequences=True, return_state=True) device_name = d2l.try_gpu()._device_name strategy = tf.distribute.OneDeviceStrategy(device_name) with strategy.scope(): model = d2l.RNNModel(gru_layer, vocab_size=len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)
null
from mxnet import np, npx from mxnet.gluon import rnn from d2l import mxnet as d2l npx.set_np() batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return np.random.normal(scale=0.01, size=shape, ctx=device) def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device)) W_xz, W_hz, b_z = three() W_xr, W_hr, b_r = three() W_xh, W_hh, b_h = three() W_hq = normal((num_hiddens, num_outputs)) b_q = np.zeros(num_outputs, ctx=device) params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q] for param in params: param.attach_grad() return params def init_gru_state(batch_size, num_hiddens, device): return (np.zeros(shape=(batch_size, num_hiddens), ctx=device), ) def gru(inputs, state, params): W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params H, = state outputs = [] for X in inputs: Z = npx.sigmoid(np.dot(X, W_xz) + np.dot(H, W_hz) + b_z) R = npx.sigmoid(np.dot(X, W_xr) + np.dot(H, W_hr) + b_r) H_tilda = np.tanh(np.dot(X, W_xh) + np.dot(R * H, W_hh) + b_h) H = Z * H + (1 - Z) * H_tilda Y = np.dot(H, W_hq) + b_q outputs.append(Y) return np.concatenate(outputs, axis=0), (H,) vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu() num_epochs, lr = 500, 1 model = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_params, init_gru_state, gru) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) gru_layer = rnn.GRU(num_hiddens) model = d2l.RNNModel(gru_layer, len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
null
563
import tensorflow as tf from d2l import tensorflow as d2l batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_lstm_params(vocab_size, num_hiddens): num_inputs = num_outputs = vocab_size def normal(shape): return tf.Variable(tf.random.normal(shape=shape, stddev=0.01, mean=0, dtype=tf.float32)) def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32)) W_xi, W_hi, b_i = three() W_xf, W_hf, b_f = three() W_xo, W_ho, b_o = three() W_xc, W_hc, b_c = three() W_hq = normal((num_hiddens, num_outputs)) b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32) params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] return params def init_lstm_state(batch_size, num_hiddens): return (tf.zeros(shape=(batch_size, num_hiddens)), tf.zeros(shape=(batch_size, num_hiddens))) def lstm(inputs, state, params): W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q = params (H, C) = state outputs = [] for X in inputs: X=tf.reshape(X,[-1,W_xi.shape[0]]) I = tf.sigmoid(tf.matmul(X, W_xi) + tf.matmul(H, W_hi) + b_i) F = tf.sigmoid(tf.matmul(X, W_xf) + tf.matmul(H, W_hf) + b_f) O = tf.sigmoid(tf.matmul(X, W_xo) + tf.matmul(H, W_ho) + b_o) C_tilda = tf.tanh(tf.matmul(X, W_xc) + tf.matmul(H, W_hc) + b_c) C = F * C + I * C_tilda H = O * tf.tanh(C) Y = tf.matmul(H, W_hq) + b_q outputs.append(Y) return tf.concat(outputs, axis=0), (H,C) vocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name num_epochs, lr = 500, 1 strategy = tf.distribute.OneDeviceStrategy(device_name) with strategy.scope(): model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_lstm_state, lstm, get_lstm_params) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy) lstm_cell = tf.keras.layers.LSTMCell(num_hiddens, kernel_initializer='glorot_uniform') lstm_layer = tf.keras.layers.RNN(lstm_cell, time_major=True, return_sequences=True, return_state=True) device_name = d2l.try_gpu()._device_name strategy = tf.distribute.OneDeviceStrategy(device_name) with strategy.scope(): model = d2l.RNNModel(lstm_layer, vocab_size=len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)
null
from mxnet import np, npx from mxnet.gluon import rnn from d2l import mxnet as d2l npx.set_np() batch_size, num_steps = 32, 35 train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps) def get_lstm_params(vocab_size, num_hiddens, device): num_inputs = num_outputs = vocab_size def normal(shape): return np.random.normal(scale=0.01, size=shape, ctx=device) def three(): return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), np.zeros(num_hiddens, ctx=device)) W_xi, W_hi, b_i = three() W_xf, W_hf, b_f = three() W_xo, W_ho, b_o = three() W_xc, W_hc, b_c = three() W_hq = normal((num_hiddens, num_outputs)) b_q = np.zeros(num_outputs, ctx=device) params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] for param in params: param.attach_grad() return params def init_lstm_state(batch_size, num_hiddens, device): return (np.zeros((batch_size, num_hiddens), ctx=device), np.zeros((batch_size, num_hiddens), ctx=device)) def lstm(inputs, state, params): [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] = params (H, C) = state outputs = [] for X in inputs: I = npx.sigmoid(np.dot(X, W_xi) + np.dot(H, W_hi) + b_i) F = npx.sigmoid(np.dot(X, W_xf) + np.dot(H, W_hf) + b_f) O = npx.sigmoid(np.dot(X, W_xo) + np.dot(H, W_ho) + b_o) C_tilda = np.tanh(np.dot(X, W_xc) + np.dot(H, W_hc) + b_c) C = F * C + I * C_tilda H = O * np.tanh(C) Y = np.dot(H, W_hq) + b_q outputs.append(Y) return np.concatenate(outputs, axis=0), (H, C) vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu() num_epochs, lr = 500, 1 model = d2l.RNNModelScratch(len(vocab), num_hiddens, device, get_lstm_params, init_lstm_state, lstm) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) lstm_layer = rnn.LSTM(num_hiddens) model = d2l.RNNModel(lstm_layer, len(vocab)) d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
null
564
import os import tensorflow as tf from d2l import tensorflow as d2l def build_array_nmt(lines, vocab, num_steps): lines = [vocab[l] for l in lines] lines = [l + [vocab['<eos>']] for l in lines] array = tf.constant([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines]) valid_len = tf.reduce_sum( tf.cast(array != vocab['<pad>'], tf.int32), 1) return array, valid_len train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8) for X, X_valid_len, Y, Y_valid_len in train_iter: print('X:', tf.cast(X, tf.int32)) print('Valid length of X:', X_valid_len) print('Y:', tf.cast(Y, tf.int32)) print('Valid length of Y:', Y_valid_len) break
null
import os from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() def build_array_nmt(lines, vocab, num_steps): lines = [vocab[l] for l in lines] lines = [l + [vocab['<eos>']] for l in lines] array = np.array([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines]) valid_len = (array != vocab['<pad>']).astype(np.int32).sum(1) return array, valid_len train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8) for X, X_valid_len, Y, Y_valid_len in train_iter: print('X:', X.astype(np.int32)) print('Valid length of X:', X_valid_len) print('Y:', Y.astype(np.int32)) print('Valid length of Y:', Y_valid_len) break
null