id
int64 1
564
| tensorflow
stringclasses 52
values | pytorch
nullclasses 81
values | mxnet
nullclasses 66
values | paddle
stringclasses 73
values |
---|---|---|---|---|
1 | x = tf.range(12)
tf.size(x)
X = tf.reshape(x, (3, 4))
tf.zeros((2, 3, 4))
tf.ones((2, 3, 4))
tf.random.normal(shape=[3, 4])
tf.constant([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
x = tf.constant([1.0, 2, 4, 8])
y = tf.constant([2.0, 2, 2, 2])
x + y, x - y, x * y, x / y, x ** y
tf.exp(x)
X = tf.reshape(tf.range(12, dtype=tf.float32), (3, 4))
Y = tf.constant([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
tf.concat([X, Y], axis=0), tf.concat([X, Y], axis=1)
tf.reduce_sum(X)
a = tf.reshape(tf.range(3), (3, 1))
b = tf.reshape(tf.range(2), (1, 2))
X_var = tf.Variable(X)
X_var[1, 2].assign(9)
X_var = tf.Variable(X)
X_var[0:2, :].assign(tf.ones(X_var[0:2,:].shape, dtype = tf.float32) * 12)
Z = tf.Variable(tf.zeros_like(Y))
Z.assign(X + Y)
@tf.function
def computation(X, Y):
Z = tf.zeros_like(Y)
A = X + Y
B = A + Y
C = B + Y
return C + Y
computation(X, Y)
A = X.numpy()
B = tf.constant(A)
a = tf.constant([3.5]).numpy()
print(a, a.item(), float(a), int(a)) | null | null | x = paddle.arange(12)
x.numel()
X = paddle.reshape(x, (3, 4))
paddle.zeros((2, 3, 4))
paddle.ones((2, 3, 4))
paddle.randn((3, 4),'float32')
paddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
x = paddle.to_tensor([1.0, 2, 4, 8])
y = paddle.to_tensor([2, 2, 2, 2])
x + y, x - y, x * y, x / y, x**y
paddle.exp(x)
X = paddle.arange(12, dtype='float32').reshape((3, 4))
Y = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
paddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1)
X.sum()
a = paddle.reshape(paddle.arange(3), (3, 1))
b = paddle.reshape(paddle.arange(2), (1, 2))
X[1, 2] = 9
X[0:2, :] = 12
Z = paddle.zeros_like(Y)
Z = X + Y
before = id(X)
X += Y
id(X) == before
A = X.numpy()
B = paddle.to_tensor(A)
type(A), type(B)
a = paddle.to_tensor([3.5])
a, a.item(), float(a), int(a) |
2 | import tensorflow as tf
X, y = tf.constant(inputs.values), tf.constant(outputs.values) | null | null | import warnings
warnings.filterwarnings(action='ignore')
import paddle
X, y = paddle.to_tensor(inputs.values), paddle.to_tensor(outputs.values) |
3 | import tensorflow as tf
x = tf.constant(3.0)
y = tf.constant(2.0)
print(x + y, x * y, x / y, x**y)
x = tf.range(4)
A = tf.reshape(tf.range(20), (5, 4))
tf.transpose(A)
B = tf.constant([[1, 2, 3], [2, 0, 4], [3, 4, 5]])
B == tf.transpose(B)
X = tf.reshape(tf.range(24), (2, 3, 4))
A = tf.reshape(tf.range(20, dtype=tf.float32), (5, 4))
B = A
print(A, A + B)
a = 2
X = tf.reshape(tf.range(24), (2, 3, 4))
print(a + X, (a * X).shape)
x = tf.range(4, dtype=tf.float32)
print(x, tf.reduce_sum(x))
a = tf.reduce_sum(A)
A_sum_axis0 = tf.reduce_sum(A, axis=0)
A_sum_axis1 = tf.reduce_sum(A, axis=1
tf.reduce_sum(A, axis=[0, 1])
tf.reduce_mean(A)
tf.reduce_sum(A) / tf.size(A).numpy()
tf.reduce_mean(A, axis=0)
tf.reduce_sum(A, axis=0) / A.shape[0]
sum_A = tf.reduce_sum(A, axis=1, keepdims=True)
tf.cumsum(A, axis=0)
y = tf.ones(4, dtype=tf.float32)
print(tf.tensordot(x, y, axes=1))
tf.reduce_sum(x * y)
A.shape, x.shape, tf.linalg.matvec(A, x)
B = tf.ones((4, 3), tf.float32)
tf.matmul(A, B)
u = tf.constant([3.0, -4.0])
tf.norm(u)
tf.reduce_sum(tf.abs(u))
tf.norm(tf.ones((4, 9))) | null | null | import warnings
warnings.filterwarnings(action='ignore')
import paddle
x = paddle.to_tensor([3.0])
y = paddle.to_tensor([2.0])
x + y, x * y, x / y, x**y
x = paddle.arange(4)
A = paddle.reshape(paddle.arange(20), (5, 4))
paddle.transpose(A, perm=[1, 0])
B = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])
B == paddle.transpose(B, perm=[1, 0])
X = paddle.reshape(paddle.arange(24), (2, 3, 4))
A = paddle.reshape(paddle.arange(20, dtype=paddle.float32), (5, 4))
B = A.clone()
A, A + B
a = 2
X = paddle.reshape(paddle.arange(24), (2, 3, 4))
a + X, (a * X).shape
x = paddle.arange(4, dtype=paddle.float32)
print(x, x.sum())
A.shape, A.sum()
A_sum_axis0 = A.sum(axis=0)
A_sum_axis1 = A.sum(axis=1)
A.sum(axis=[0, 1])
A.mean(), A.sum() / A.numel()
A.mean(axis=0), A.sum(axis=0) / A.shape[0]
sum_A = paddle.sum(A, axis=1, keepdim=True)
A.cumsum(axis=0)
y = paddle.ones(shape=[4], dtype='float32')
x, y, paddle.dot(x, y)
paddle.sum(x * y)
A.shape, x.shape, paddle.mv(A, x)
B = paddle.ones(shape=[4, 3], dtype='float32')
paddle.mm(A, B)
u = paddle.to_tensor([3.0, -4.0])
paddle.norm(u)
paddle.abs(u).sum()
paddle.norm(paddle.ones(shape=[4, 9], dtype='float32')) |
4 | %matplotlib inline
import numpy as np
from matplotlib_inline import backend_inline
from d2l import tensorflow as d2l
def f(x):
return 3 * x ** 2 - 4 * x
def numerical_lim(f, x, h):
return (f(x + h) - f(x)) / h
h = 0.1
for i in range(5):
print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')
h *= 0.1 | null | null | %matplotlib inline
import numpy as np
from matplotlib_inline import backend_inline
from d2l import paddle as d2l
def f(x):
return 3 * x ** 2 - 4 * x
def numerical_lim(f, x, h):
return (f(x + h) - f(x)) / h
h = 0.1
for i in range(5):
print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')
h *= 0.1 |
5 | import tensorflow as tf
x = tf.range(4, dtype=tf.float32)
x = tf.Variable(x)
with tf.GradientTape() as t:
y = 2 * tf.tensordot(x, x, axes=1)
x_grad = t.gradient(y, x)
x_grad
x_grad == 4 * x
with tf.GradientTape() as t:
y = tf.reduce_sum(x)
t.gradient(y, x)
with tf.GradientTape() as t:
y = x * x
t.gradient(y, x)
with tf.GradientTape(persistent=True) as t:
y = x * x
u = tf.stop_gradient(y)
z = u * x
x_grad = t.gradient(z, x)
x_grad == u
t.gradient(y, x) == 2 * x
def f(a):
b = a * 2
while tf.norm(b) < 1000:
b = b * 2
if tf.reduce_sum(b) > 0:
c = b
else:
c = 100 * b
return c
a = tf.Variable(tf.random.normal(shape=()))
with tf.GradientTape() as t:
d = f(a)
d_grad = t.gradient(d, a)
d_grad
d_grad == d / a | null | null | import warnings
warnings.filterwarnings(action='ignore')
import paddle
x = paddle.arange(4, dtype='float32')
x = paddle.to_tensor(x, stop_gradient=False)
y = 2 * paddle.dot(x, x)
y.backward()
x.grad
x.grad == 4 * x
x.clear_gradient()
y = paddle.sum(x)
y.backward()
x.grad
x.clear_gradient()
y = x * x
paddle.sum(y).backward()
x.grad
x.clear_gradient()
y = x * x
u = y.detach()
z = u * x
paddle.sum(z).backward()
x.grad == u
x.clear_gradient()
paddle.sum(y).backward()
x.grad == 2 * x
def f(a):
b = a * 2
while paddle.norm(b) < 1000:
b = b * 2
if paddle.sum(b) > 0:
c = b
else:
c = 100 * b
return c
a = paddle.to_tensor(paddle.randn(shape=[1]), stop_gradient=False)
d = f(a)
d.backward()
a.grad == d / a |
6 | %matplotlib inline
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from d2l import tensorflow as d2l
fair_probs = tf.ones(6) / 6
tfp.distributions.Multinomial(1, fair_probs).sample()
tfp.distributions.Multinomial(10, fair_probs).sample()
counts = tfp.distributions.Multinomial(1000, fair_probs).sample() | null | null | %matplotlib inline
import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import random
import numpy as np
import paddle
fair_probs = [1.0 / 6] * 6
paddle.distribution.Multinomial(1, paddle.to_tensor(fair_probs)).sample()
counts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()
counts / 1000
counts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()
counts / 1000 |
7 | counts = tfp.distributions.Multinomial(10, fair_probs).sample(500)
cum_counts = tf.cumsum(counts, axis=0)
estimates = cum_counts / tf.reduce_sum(cum_counts, axis=1, keepdims=True)
d2l.set_figsize((6, 4.5))
for i in range(6):
d2l.plt.plot(estimates[:, i].numpy(), label=("P(die=" + str(i + 1) + ")"))
d2l.plt.axhline(y=0.167, color='black', linestyle='dashed')
d2l.plt.gca().set_xlabel('Groups of experiments')
d2l.plt.gca().set_ylabel('Estimated probability')
d2l.plt.legend();
import tensorflow as tf
a = dir(tf.random)
help(tf.ones)
tf.ones(4) | null | null | counts = paddle.distribution.Multinomial(10, paddle.to_tensor(fair_probs)).sample((500,1))
cum_counts = counts.cumsum(axis=0)
cum_counts = cum_counts.squeeze(axis=1)
estimates = cum_counts / cum_counts.sum(axis=1, keepdim=True)
d2l.set_figsize((6, 4.5))
for i in range(6):
d2l.plt.plot(estimates[:, i],
label=("P(die=" + str(i + 1) + ")"))
d2l.plt.axhline(y=0.167, color='black', linestyle='dashed')
d2l.plt.gca().set_xlabel('Groups of experiments')
d2l.plt.gca().set_ylabel('Estimated probability')
d2l.plt.legend()
import warnings
warnings.filterwarnings(action='ignore')
import paddle
help(paddle.ones)
paddle.ones([4], dtype='float32') |
8 | %matplotlib inline
import math
import time
import numpy as np
import tensorflow as tf
from d2l import tensorflow as d2l
n = 10000
a = tf.ones(n)
b = tf.ones(n)
c = tf.Variable(tf.zeros(n))
timer = Timer()
for i in range(n):
c[i].assign(a[i] + b[i])
x = np.arange(-7, 7, 0.01)
params = [(0, 1), (0, 2), (3, 1)]
d2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x', ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params]) | null | null | %matplotlib inline
import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import math
import time
import numpy as np
import paddle
n = 10000
a = paddle.ones([n])
b = paddle.ones([n])
c = paddle.zeros([n])
timer = Timer()
for i in range(n):
c[i] = a[i] + b[i]
x = np.arange(-7, 7, 0.01)
params = [(0, 1), (0, 2), (3, 1)]
d2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x',
ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params]) |
9 | %matplotlib inline
import random
import tensorflow as tf
from d2l import tensorflow as d2l
def synthetic_data(w, b, num_examples):
X = tf.zeros((num_examples, w.shape[0]))
X += tf.random.normal(shape=X.shape)
y = tf.matmul(X, tf.reshape(w, (-1, 1))) + b
y += tf.random.normal(shape=y.shape, stddev=0.01)
y = tf.reshape(y, (-1, 1))
return X, y
true_w = tf.constant([2, -3.4])
true_b = 4.2
features, labels = synthetic_data(true_w, true_b, 1000)
d2l.set_figsize()
d2l.plt.scatter(features[:, (1)].numpy(), labels.numpy(), 1);
def data_iter(batch_size, features, labels):
num_examples = len(features)
indices = list(range(num_examples))
random.shuffle(indices)
for i in range(0, num_examples, batch_size):
j = tf.constant(indices[i: min(i + batch_size, num_examples)])
yield tf.gather(features, j), tf.gather(labels, j)
batch_size = 10
for X, y in data_iter(batch_size, features, labels):
print(X, '
', y)
break
w = tf.Variable(tf.random.normal(shape=(2, 1), mean=0, stddev=0.01), trainable=True)
b = tf.Variable(tf.zeros(1), trainable=True)
def linreg(X, w, b):
return tf.matmul(X, w) + b
def squared_loss(y_hat, y):
return (y_hat - tf.reshape(y, y_hat.shape)) ** 2 / 2
def sgd(params, grads, lr, batch_size):
for param, grad in zip(params, grads):
param.assign_sub(lr*grad/batch_size)
lr = 0.03
num_epochs = 3
net = linreg
loss = squared_loss
for epoch in range(num_epochs):
for X, y in data_iter(batch_size, features, labels):
with tf.GradientTape() as g:
l = loss(net(X, w, b), y)
dw, db = g.gradient(l, [w, b])
sgd([w, b], [dw, db], lr, batch_size)
train_l = loss(net(features, w, b), labels) | null | null | %matplotlib inline
import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import random
import paddle
def synthetic_data(w, b, num_examples):
X = paddle.normal(0, 1, (num_examples, len(w)))
y = paddle.matmul(X, w) + b
y += paddle.normal(0, 0.01, y.shape)
return X, y.reshape((-1, 1))
true_w = paddle.to_tensor([2, -3.4])
true_b = 4.2
features, labels = synthetic_data(true_w, true_b, 1000)
d2l.set_figsize()
d2l.plt.scatter(features[:, 1].detach().numpy(), labels.detach().numpy(), 1);
def data_iter(batch_size, features, labels):
num_examples = len(features)
indices = list(range(num_examples))
random.shuffle(indices)
for i in range(0, num_examples, batch_size):
batch_indices = paddle.to_tensor(indices[i: min(i + batch_size, num_examples)])
yield features[batch_indices], labels[batch_indices]
batch_size = 10
for X, y in data_iter(batch_size, features, labels):
break
w = paddle.normal(0, 0.01, shape=(2,1))
b = paddle.zeros(shape=[1])
w.stop_gradient = False
b.stop_gradient = False
def linreg(X, w, b):
return paddle.matmul(X, w) + b
def squared_loss(y_hat, y):
return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2
with paddle.no_grad():
for i, param in enumerate(params):
param -= lr * params[i].grad / batch_size
params[i].set_value(param)
params[i].clear_gradient()
lr = 0.03
num_epochs = 3
net = linreg
loss = squared_loss
for epoch in range(num_epochs):
for X, y in data_iter(batch_size, features, labels):
l = loss(net(X, w, b), y)
l.sum().backward()
sgd([w, b], lr, batch_size)
with paddle.no_grad():
train_l = loss(net(features, w, b), labels) |
10 | import numpy as np
import tensorflow as tf
from d2l import tensorflow as d2l
true_w = tf.constant([2, -3.4])
true_b = 4.2
features, labels = d2l.synthetic_data(true_w, true_b, 1000)
def load_array(data_arrays, batch_size, is_train=True):
dataset = tf.data.Dataset.from_tensor_slices(data_arrays)
if is_train:
dataset = dataset.shuffle(buffer_size=1000)
dataset = dataset.batch(batch_size)
return dataset
batch_size = 10
data_iter = load_array((features, labels), batch_size)
net = tf.keras.Sequential()
net.add(tf.keras.layers.Dense(1))
initializer = tf.initializers.RandomNormal(stddev=0.01)
net = tf.keras.Sequential()
net.add(tf.keras.layers.Dense(1, kernel_initializer=initializer))
loss = tf.keras.losses.MeanSquaredError()
trainer = tf.keras.optimizers.SGD(learning_rate=0.03)
w = net.get_weights()[0]
b = net.get_weights()[1] | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import numpy as np
import paddle
true_w = paddle.to_tensor([2, -3.4])
true_b = 4.2
features, labels = d2l.synthetic_data(true_w, true_b, 1000)
def load_array(data_arrays, batch_size, is_train=True):
dataset = paddle.io.TensorDataset(data_arrays)
return paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=is_train, return_list=True)
batch_size = 10
data_iter = load_array((features, labels), batch_size)
from paddle import nn
net = nn.Sequential(nn.Linear(2, 1))
weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0, 0.01))
bias_attr = paddle.ParamAttr(initializer=None)
net = nn.Sequential(nn.Linear(2, 1, weight_attr=weight_attr, bias_attr=bias_attr))
loss = nn.MSELoss()
trainer = paddle.optimizer.SGD(learning_rate=0.03, parameters=net.parameters())
w = net[0].weight
b = net[0].bias |
11 | %matplotlib inline
import tensorflow as tf
from d2l import tensorflow as d2l
d2l.use_svg_display()
mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()
len(mnist_train[0]), len(mnist_test[0])
def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):
figsize = (num_cols * scale, num_rows * scale)
_, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)
axes = axes.flatten()
for i, (ax, img) in enumerate(zip(axes, imgs)):
ax.imshow(img.numpy())
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if titles:
ax.set_title(titles[i])
return axes
X = tf.constant(mnist_train[0][:18])
y = tf.constant(mnist_train[1][:18])
show_images(X, 2, 9, titles=get_fashion_mnist_labels(y));
batch_size = 256
train_iter = tf.data.Dataset.from_tensor_slices(mnist_train).batch(batch_size).shuffle(len(mnist_train[0]))
def load_data_fashion_mnist(batch_size, resize=None):
mnist_train, mnist_test = tf.keras.datasets.fashion_mnist.load_data()
process = lambda X, y: (tf.expand_dims(X, axis=3) / 255, tf.cast(y, dtype='int32'))
resize_fn = lambda X, y: (tf.image.resize_with_pad(X, resize, resize) if resize else X, y)
return (tf.data.Dataset.from_tensor_slices(process(*mnist_train)).batch(batch_size).shuffle(len(mnist_train[0])).map(resize_fn),
tf.data.Dataset.from_tensor_slices(process(*mnist_test)).batch(batch_size).map(resize_fn)) | null | null | %matplotlib inline
import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import sys
import paddle
from paddle.vision import transforms
d2l.use_svg_display()
trans = transforms.ToTensor()
mnist_train = paddle.vision.datasets.FashionMNIST(mode="train", transform=trans)
mnist_test = paddle.vision.datasets.FashionMNIST(mode="test", transform=trans)
len(mnist_train), len(mnist_test)
def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):
figsize = (num_cols * scale, num_rows * scale)
_, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)
axes = axes.flatten()
for i, (ax, img) in enumerate(zip(axes, imgs)):
if paddle.is_tensor(img):
ax.imshow(img.numpy())
else:
ax.imshow(img)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if titles:
ax.set_title(titles[i])
return axes
X, y = next(iter(paddle.io.DataLoader(mnist_train, batch_size=18)))
show_images(X.reshape([18, 28, 28]), 2, 9, titles=get_fashion_mnist_labels(y));
batch_size = 256
return 4
train_iter = paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers())
trans = [transforms.ToTensor()]
if resize:
trans.insert(0, transforms.Resize(resize))
trans = transforms.Compose(trans)
mnist_train = paddle.vision.datasets.FashionMNIST(mode="train", transform=trans)
mnist_test = paddle.vision.datasets.FashionMNIST(mode="test", transform=trans)
return (paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()),
paddle.io.DataLoader(dataset=mnist_test, batch_size=batch_size, return_list=True, shuffle=True, num_workers=get_dataloader_workers())) |
12 | import tensorflow as tf
from IPython import display
from d2l import tensorflow as d2l
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
num_inputs = 784
num_outputs = 10
W = tf.Variable(tf.random.normal(shape=(num_inputs, num_outputs), mean=0, stddev=0.01))
b = tf.Variable(tf.zeros(num_outputs))
X = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
tf.reduce_sum(X, 0, keepdims=True), tf.reduce_sum(X, 1, keepdims=True)
def softmax(X):
X_exp = tf.exp(X)
partition = tf.reduce_sum(X_exp, 1, keepdims=True)
return X_exp / partition
X = tf.random.normal((2, 5), 0, 1)
X_prob = softmax(X)
X_prob, tf.reduce_sum(X_prob, 1)
def net(X):
return softmax(tf.matmul(tf.reshape(X, (-1, W.shape[0])), W) + b)
y_hat = tf.constant([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])
y = tf.constant([0, 2])
tf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1]))
def cross_entropy(y_hat, y):
return -tf.math.log(tf.boolean_mask(y_hat, tf.one_hot(y, depth=y_hat.shape[-1])))
cross_entropy(y_hat, y)
def accuracy(y_hat, y):
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
y_hat = tf.argmax(y_hat, axis=1)
cmp = tf.cast(y_hat, y.dtype) == y
return float(tf.reduce_sum(tf.cast(cmp, y.dtype)))
def evaluate_accuracy(net, data_iter):
metric = Accumulator(2)
for X, y in data_iter:
metric.add(accuracy(net(X), y), d2l.size(y))
return metric[0] / metric[1]
def train_epoch_ch3(net, train_iter, loss, updater):
metric = Accumulator(3)
for X, y in train_iter:
with tf.GradientTape() as tape:
y_hat = net(X)
if isinstance(loss, tf.keras.losses.Loss):
l = loss(y, y_hat)
else:
l = loss(y_hat, y)
if isinstance(updater, tf.keras.optimizers.Optimizer):
params = net.trainable_variables
grads = tape.gradient(l, params)
updater.apply_gradients(zip(grads, params))
else:
updater(X.shape[0], tape.gradient(l, updater.params))
l_sum = l * float(tf.size(y)) if isinstance(loss, tf.keras.losses.Loss) else tf.reduce_sum(l)
metric.add(l_sum, accuracy(y_hat, y), tf.size(y))
return metric[0] / metric[2], metric[1] / metric[2]
class Updater():
def __init__(self, params, lr):
self.params = params
self.lr = lr
def __call__(self, batch_size, grads):
d2l.sgd(self.params, grads, self.lr, batch_size)
updater = Updater([W, b], lr=0.1)
def predict_ch3(net, test_iter, n=6):
for X, y in test_iter:
break
trues = d2l.get_fashion_mnist_labels(y)
preds = d2l.get_fashion_mnist_labels(tf.argmax(net(X), axis=1))
titles = [true +'\n' + pred for true, pred in zip(trues, preds)]
d2l.show_images(tf.reshape(X[0:n], (n, 28, 28)), 1, n, titles=titles[0:n])
predict_ch3(net, test_iter) | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
from IPython import display
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
num_inputs = 784
num_outputs = 10
W = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs))
b = paddle.zeros(shape=(num_outputs,))
W.stop_gradient=False
b.stop_gradient=False
X = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
X.sum(0, keepdim=True), X.sum(1, keepdim=True)
def softmax(X):
X_exp = paddle.exp(X)
partition = X_exp.sum(1, keepdim=True)
return X_exp / partition
X = paddle.normal(0, 1, (2, 5))
X_prob = softmax(X)
X_prob, X_prob.sum(1)
def net(X):
return softmax(paddle.matmul(X.reshape((-1, W.shape[0])), W) + b)
y = paddle.to_tensor([0, 2])
y_hat = paddle.to_tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])
y_hat[[0, 1], y]
def cross_entropy(y_hat, y):
return - paddle.log(y_hat[[i for i in range(len(y_hat))], y.squeeze()])
cross_entropy(y_hat, y)
def accuracy(y_hat, y):
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
y_hat = y_hat.argmax(axis=1)
if len(y_hat.shape) < len(y.shape):
cmp = y_hat.astype(y.dtype) == y.squeeze()
else:
cmp = y_hat.astype(y.dtype) == y
return float(cmp.astype(y.dtype).sum())
def evaluate_accuracy(net, data_iter):
if isinstance(net, paddle.nn.Layer):
net.eval()
metric = Accumulator(2)
with paddle.no_grad():
for X, y in data_iter:
metric.add(accuracy(net(X), y), y.numel())
return metric[0] / metric[1]
def train_epoch_ch3(net, train_iter, loss, updater):
if isinstance(net, paddle.nn.Layer):
net.train()
metric = Accumulator(3)
for X, y in train_iter:
y_hat = net(X)
l = loss(y_hat, y)
if isinstance(updater, paddle.optimizer.Optimizer):
updater.clear_grad()
l.mean().backward()
updater.step()
else:
l.sum().backward()
updater(X.shape[0])
metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())
return metric[0] / metric[2], metric[1] / metric[2]
lr = 0.1
def updater(batch_size):
return d2l.sgd([W, b], lr, batch_size)
def predict_ch3(net, test_iter, n=6):
for X, y in test_iter:
break
trues = d2l.get_fashion_mnist_labels(y)
preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1))
titles = [true +'\n' + pred for true, pred in zip(trues, preds)]
d2l.show_images(X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n])
predict_ch3(net, test_iter) |
13 | import tensorflow as tf
from d2l import tensorflow as d2l
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
net = tf.keras.models.Sequential()
net.add(tf.keras.layers.Flatten(input_shape=(28, 28)))
weight_initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.01)
net.add(tf.keras.layers.Dense(10, kernel_initializer=weight_initializer))
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
trainer = tf.keras.optimizers.SGD(learning_rate=.1) | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
from paddle import nn
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
net = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))
def init_weights(m):
if type(m) == nn.Linear:
nn.initializer.Normal(m.weight, std=0.01)
net.apply(init_weights);
loss = nn.CrossEntropyLoss(reduction='none')
trainer = paddle.optimizer.SGD(learning_rate=0.1, parameters=net.parameters()) |
14 | %matplotlib inline
import tensorflow as tf
from d2l import tensorflow as d2l
x = tf.Variable(tf.range(-8.0, 8.0, 0.1), dtype=tf.float32)
y = tf.nn.relu(x)
d2l.plot(x.numpy(), y.numpy(), 'x', 'relu(x)', figsize=(5, 2.5))
with tf.GradientTape() as t:
y = tf.nn.relu(x)
d2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of relu', figsize=(5, 2.5))
y = tf.nn.sigmoid(x)
d2l.plot(x.numpy(), y.numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))
with tf.GradientTape() as t:
y = tf.nn.sigmoid(x)
d2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of sigmoid',
figsize=(5, 2.5))
y = tf.nn.tanh(x)
d2l.plot(x.numpy(), y.numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))
with tf.GradientTape() as t:
y = tf.nn.tanh(x)
d2l.plot(x.numpy(), t.gradient(y, x).numpy(), 'x', 'grad of tanh', figsize=(5, 2.5)) | null | null | %matplotlib inline
import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
x = paddle.arange(-8.0, 8.0, 0.1, dtype='float32')
x.stop_gradient = False
y = paddle.nn.functional.relu(x)
d2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'relu(x)', figsize=(5, 2.5))
y.backward(paddle.ones_like(x), retain_graph=True)
d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of relu', figsize=(5, 2.5))
y = paddle.nn.functional.sigmoid(x)
d2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))
x.clear_gradient()
y.backward(paddle.ones_like(x), retain_graph=True)
d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of sigmoid', figsize=(5, 2.5))
y = paddle.tanh(x)
d2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))
x.clear_gradient()
y.backward(paddle.ones_like(x), retain_graph=True)
d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of tanh', figsize=(5, 2.5)) |
15 | import tensorflow as tf
from d2l import tensorflow as d2l
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
num_inputs, num_outputs, num_hiddens = 784, 10, 256
W1 = tf.Variable(tf.random.normal(shape=(num_inputs, num_hiddens), mean=0, stddev=0.01))
b1 = tf.Variable(tf.zeros(num_hiddens))
W2 = tf.Variable(tf.random.normal(shape=(num_hiddens, num_outputs), mean=0, stddev=0.01))
b2 = tf.Variable(tf.zeros(num_outputs))
params = [W1, b1, W2, b2]
def relu(X):
return tf.math.maximum(X, 0)
def net(X):
X = tf.reshape(X, (-1, num_inputs))
H = relu(tf.matmul(X, W1) + b1)
return tf.matmul(H, W2) + b2
def loss(y_hat, y):
return tf.losses.sparse_categorical_crossentropy(y, y_hat, from_logits=True)
num_epochs, lr = 10, 0.1
updater = d2l.Updater([W1, W2, b1, b2], lr)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater) | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
from paddle import nn
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
num_inputs, num_outputs, num_hiddens = 784, 10, 256
W1 = paddle.randn([num_inputs, num_hiddens]) * 0.01
W1.stop_gradient = False
b1 = paddle.zeros([num_hiddens])
b1.stop_gradient = False
W2 = paddle.randn([num_hiddens, num_outputs]) * 0.01
W2.stop_gradient = False
b2 = paddle.zeros([num_outputs])
b2.stop_gradient = False
params = [W1, b1, W2, b2]
def relu(X):
a = paddle.zeros_like(X)
return paddle.maximum(X, a)
def net(X):
X = X.reshape((-1, num_inputs))
H = relu(X@W1 + b1)
return (H@W2 + b2)
loss = nn.CrossEntropyLoss(reduction='none')
num_epochs, lr = 10, 0.1
updater = paddle.optimizer.SGD(learning_rate=lr, parameters=params)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater) |
16 | import tensorflow as tf
from d2l import tensorflow as d2l
net = tf.keras.models.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(10)])
batch_size, lr, num_epochs = 256, 0.1, 10
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
trainer = tf.keras.optimizers.SGD(learning_rate=lr)
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
from paddle import nn
net = nn.Sequential(nn.Flatten(),
nn.Linear(784, 256),
nn.ReLU(),
nn.Linear(256, 10))
for layer in net:
if type(layer) == nn.Linear:
weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=0.01))
layer.weight_attr = weight_attr
batch_size, lr, num_epochs = 256, 0.1, 10
loss = nn.CrossEntropyLoss(reduction='none')
trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=lr)
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) |
17 | import math
import numpy as np
import tensorflow as tf
from d2l import tensorflow as d2l
true_w, features, poly_features, labels = [tf.constant(x, dtype=tf.float32) for x in [true_w, features, poly_features, labels]]
features[:2], poly_features[:2, :], labels[:2]
def evaluate_loss(net, data_iter, loss):
metric = d2l.Accumulator(2)
for X, y in data_iter:
l = loss(net(X), y)
metric.add(tf.reduce_sum(l), d2l.size(l))
return metric[0] / metric[1]
def train(train_features, test_features, train_labels, test_labels, num_epochs=400):
loss = tf.losses.MeanSquaredError()
input_shape = train_features.shape[-1]
net = tf.keras.Sequential()
net.add(tf.keras.layers.Dense(1, use_bias=False))
batch_size = min(10, train_labels.shape[0])
train_iter = d2l.load_array((train_features, train_labels), batch_size)
test_iter = d2l.load_array((test_features, test_labels), batch_size, is_train=False)
trainer = tf.keras.optimizers.SGD(learning_rate=.01)
animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])
for epoch in range(num_epochs):
d2l.train_epoch_ch3(net, train_iter, loss, trainer)
if epoch == 0 or (epoch + 1) % 20 == 0:
animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))
train(poly_features[:n_train, :2], poly_features[n_train:, :2], labels[:n_train], labels[n_train:])
train(poly_features[:n_train, :], poly_features[n_train:, :], labels[:n_train], labels[n_train:], num_epochs=1500) | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import math
import numpy as np
import paddle
from paddle import nn
true_w, features, poly_features, labels = [paddle.to_tensor(x, dtype=
paddle.float32) for x in [true_w, features, poly_features, labels]]
features[:2], poly_features[:2, :], labels[:2]
def evaluate_loss(net, data_iter, loss):
metric = d2l.Accumulator(2)
for X, y in data_iter:
out = net(X)
y = y.reshape(out.shape)
l = loss(out, y)
metric.add(l.sum(), l.numel())
return metric[0] / metric[1]
def train(train_features, test_features, train_labels, test_labels,
num_epochs=400):
loss = nn.MSELoss()
input_shape = train_features.shape[-1]
net = nn.Sequential(nn.Linear(input_shape, 1, bias_attr=False))
batch_size = min(10, train_labels.shape[0])
train_iter = d2l.load_array(((train_features, train_labels.reshape([-1,1]))), batch_size)
test_iter = d2l.load_array((test_features, test_labels.reshape([-1,1])), batch_size, is_train=False)
trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=0.01)
animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])
for epoch in range(num_epochs):
d2l.train_epoch_ch3(net, train_iter, loss, trainer)
if epoch == 0 or (epoch + 1) % 20 == 0:
animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))
train(poly_features[:n_train, :2], poly_features[n_train:, :2],
labels[:n_train], labels[n_train:])
train(poly_features[:n_train, :], poly_features[n_train:, :],
labels[:n_train], labels[n_train:], num_epochs=1500) |
18 | %matplotlib inline
import tensorflow as tf
from d2l import tensorflow as d2l
n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5
true_w, true_b = tf.ones((num_inputs, 1)) * 0.01, 0.05
train_data = d2l.synthetic_data(true_w, true_b, n_train)
train_iter = d2l.load_array(train_data, batch_size)
test_data = d2l.synthetic_data(true_w, true_b, n_test)
test_iter = d2l.load_array(test_data, batch_size, is_train=False)
def init_params():
w = tf.Variable(tf.random.normal(mean=1, shape=(num_inputs, 1)))
b = tf.Variable(tf.zeros(shape=(1, )))
return [w, b]
def l2_penalty(w):
return tf.reduce_sum(tf.pow(w, 2)) / 2
def train(lambd):
w, b = init_params()
net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss
num_epochs, lr = 100, 0.003
animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])
for epoch in range(num_epochs):
for X, y in train_iter:
with tf.GradientTape() as tape:
l = loss(net(X), y) + lambd * l2_penalty(w)
grads = tape.gradient(l, [w, b])
d2l.sgd([w, b], grads, lr, batch_size)
if (epoch + 1) % 5 == 0:
animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))
def train_concise(wd):
net = tf.keras.models.Sequential()
net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(wd)))
net.build(input_shape=(1, num_inputs))
w, b = net.trainable_variables
loss = tf.keras.losses.MeanSquaredError()
num_epochs, lr = 100, 0.003
trainer = tf.keras.optimizers.SGD(learning_rate=lr)
animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])
for epoch in range(num_epochs):
for X, y in train_iter:
with tf.GradientTape() as tape:
l = loss(net(X), y) + net.losses
grads = tape.gradient(l, net.trainable_variables)
trainer.apply_gradients(zip(grads, net.trainable_variables))
if (epoch + 1) % 5 == 0:
animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss))) | null | null | %matplotlib inline
import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
from paddle import nn
n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5
true_w, true_b = paddle.ones((num_inputs, 1)) * 0.01, 0.05
train_data = d2l.synthetic_data(true_w, true_b, n_train)
train_iter = d2l.load_array(train_data, batch_size)
test_data = d2l.synthetic_data(true_w, true_b, n_test)
test_iter = d2l.load_array(test_data, batch_size, is_train=False)
def init_params():
w = paddle.normal(0, 1, shape=(num_inputs, 1))
w.stop_gradient = False
b = paddle.zeros(shape=[1])
b.stop_gradient = False
return [w, b]
def l2_penalty(w):
return paddle.sum(w.pow(2)) / 2
def train(lambd):
w, b = init_params()
net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss
num_epochs, lr = 100, 0.003
animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])
for epoch in range(num_epochs):
for X, y in train_iter():
l = loss(net(X), y) + lambd * l2_penalty(w)
l.sum().backward()
d2l.sgd([w, b], lr, batch_size)
if (epoch + 1) % 5 == 0:
animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))
def train_concise(wd):
weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))
bias_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))
net = nn.Sequential(nn.Linear(num_inputs, 1, weight_attr=weight_attr, bias_attr=bias_attr))
loss = nn.MSELoss()
num_epochs, lr = 100, 0.003
trainer = paddle.optimizer.SGD(parameters=net[0].parameters(), learning_rate=lr, weight_decay=wd*1.0)
animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])
for epoch in range(num_epochs):
for X, y in train_iter:
l = loss(net(X), y)
l.backward()
trainer.step()
trainer.clear_grad()
if (epoch + 1) % 5 == 0:
animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss))) |
19 | import tensorflow as tf
from d2l import tensorflow as d2l
def dropout_layer(X, dropout):
assert 0 <= dropout <= 1
if dropout == 1:
return tf.zeros_like(X)
if dropout == 0:
return X
mask = tf.random.uniform(shape=tf.shape(X), minval=0, maxval=1) < 1 - dropout
return tf.cast(mask, dtype=tf.float32) * X / (1.0 - dropout)
X = tf.reshape(tf.range(16, dtype=tf.float32), (2, 8))
num_outputs, num_hiddens1, num_hiddens2 = 10, 256, 256
dropout1, dropout2 = 0.2, 0.5
class Net(tf.keras.Model):
def __init__(self, num_outputs, num_hiddens1, num_hiddens2):
super().__init__()
self.input_layer = tf.keras.layers.Flatten()
self.hidden1 = tf.keras.layers.Dense(num_hiddens1, activation='relu')
self.hidden2 = tf.keras.layers.Dense(num_hiddens2, activation='relu')
self.output_layer = tf.keras.layers.Dense(num_outputs)
def call(self, inputs, training=None):
x = self.input_layer(inputs)
x = self.hidden1(x)
if training:
x = dropout_layer(x, dropout1)
x = self.hidden2(x)
if training:
x = dropout_layer(x, dropout2)
x = self.output_layer(x)
return x
net = Net(num_outputs, num_hiddens1, num_hiddens2)
num_epochs, lr, batch_size = 10, 0.5, 256
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
trainer = tf.keras.optimizers.SGD(learning_rate=lr)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
net = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(256, activation=tf.nn.relu),
tf.keras.layers.Dropout(dropout1),
tf.keras.layers.Dense(256, activation=tf.nn.relu),
tf.keras.layers.Dropout(dropout2),
tf.keras.layers.Dense(10),
])
trainer = tf.keras.optimizers.SGD(learning_rate=lr)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) | null | null | import warnings
warnings.filterwarnings(action='ignore')
import random
import paddle
from paddle import nn
warnings.filterwarnings("ignore", category=DeprecationWarning)
from d2l import paddle as d2l
def dropout_layer(X, dropout):
assert 0 <= dropout <= 1
if dropout == 1:
return paddle.zeros_like(X)
if dropout == 0:
return X
mask = (paddle.to_tensor(paddle.uniform(X.shape)) > dropout).astype('float32')
return mask * X / (1.0 - dropout)
X= paddle.arange(16, dtype = paddle.float32).reshape((2, 8))
num_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256
dropout1, dropout2 = 0.2, 0.5
class Net(nn.Layer):
def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2,
is_training = True):
super(Net, self).__init__()
self.num_inputs = num_inputs
self.training = is_training
self.lin1 = nn.Linear(num_inputs, num_hiddens1)
self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)
self.lin3 = nn.Linear(num_hiddens2, num_outputs)
self.relu = nn.ReLU()
def forward(self, X):
H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))
if self.training == True:
H1 = dropout_layer(H1, dropout1)
H2 = self.relu(self.lin2(H1))
if self.training == True:
H2 = dropout_layer(H2, dropout2)
out = self.lin3(H2)
return out
net = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)
num_epochs, lr, batch_size = 10, 0.5, 256
loss = nn.CrossEntropyLoss(reduction='none')
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
trainer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=0.01))
net = nn.Sequential(nn.Flatten(),
nn.Linear(784, 256, weight_attr=weight_attr),
nn.ReLU(),
nn.Dropout(dropout1),
nn.Linear(256, 256, weight_attr=weight_attr),
nn.ReLU(),
nn.Dropout(dropout2),
nn.Linear(256, 10, weight_attr=weight_attr))
trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) |
20 | trainer = tf.keras.optimizers.SGD(learning_rate=lr)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
%matplotlib inline
import tensorflow as tf
from d2l import tensorflow as d2l
x = tf.Variable(tf.range(-8.0, 8.0, 0.1))
with tf.GradientTape() as t:
y = tf.nn.sigmoid(x)
d2l.plot(x.numpy(), [y.numpy(), t.gradient(y, x).numpy()], legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))
M = tf.random.normal((4, 4))
for i in range(100):
M = tf.matmul(M, tf.random.normal((4, 4))) | null | null | trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
%matplotlib inline
import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
x = paddle.arange(start=-8.0, end=8.0, step=0.1, dtype='float32')
x.stop_gradient = False
y = paddle.nn.functional.sigmoid(x)
y.backward(paddle.ones_like(x))
d2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()],
legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))
M = paddle.normal(0, 1, shape=(4,4))
for i in range(100):
M = paddle.mm(M, paddle.normal(0, 1, shape=(4, 4))) |
21 | %matplotlib inline
import numpy as np
import pandas as pd
import tensorflow as tf
from d2l import tensorflow as d2l
n_train = train_data.shape[0]
train_features = tf.constant(all_features[:n_train].values, dtype=tf.float32)
test_features = tf.constant(all_features[n_train:].values, dtype=tf.float32)
train_labels = tf.constant(train_data.SalePrice.values.reshape(-1, 1), dtype=tf.float32)
loss = tf.keras.losses.MeanSquaredError()
def get_net():
net = tf.keras.models.Sequential()
net.add(tf.keras.layers.Dense(1, kernel_regularizer=tf.keras.regularizers.l2(weight_decay)))
return net
def log_rmse(y_true, y_pred):
clipped_preds = tf.clip_by_value(y_pred, 1, float('inf'))
return tf.sqrt(tf.reduce_mean(loss(tf.math.log(y_true), tf.math.log(clipped_preds))))
def train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):
train_ls, test_ls = [], []
train_iter = d2l.load_array((train_features, train_labels), batch_size)
optimizer = tf.keras.optimizers.Adam(learning_rate)
net.compile(loss=loss, optimizer=optimizer)
for epoch in range(num_epochs):
for X, y in train_iter:
with tf.GradientTape() as tape:
y_hat = net(X)
l = loss(y, y_hat)
params = net.trainable_variables
grads = tape.gradient(l, params)
optimizer.apply_gradients(zip(grads, params))
train_ls.append(log_rmse(train_labels, net(train_features)))
if test_labels is not None:
test_ls.append(log_rmse(test_labels, net(test_features)))
return train_ls, test_ls
def get_k_fold_data(k, i, X, y):
assert k > 1
fold_size = X.shape[0] // k
X_train, y_train = None, None
for j in range(k):
idx = slice(j * fold_size, (j + 1) * fold_size)
X_part, y_part = X[idx, :], y[idx]
if j == i:
X_valid, y_valid = X_part, y_part
elif X_train is None:
X_train, y_train = X_part, y_part
else:
X_train = tf.concat([X_train, X_part], 0)
y_train = tf.concat([y_train, y_part], 0)
return X_train, y_train, X_valid, y_valid
def train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):
net = get_net()
train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)
d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')
preds = net(test_features).numpy()
test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)
submission.to_csv('submission.csv', index=False) | null | null | %matplotlib inline
import warnings
import numpy as np
import pandas as pd
warnings.filterwarnings(action='ignore')
import paddle
from paddle import nn
warnings.filterwarnings("ignore", category=DeprecationWarning)
from d2l import paddle as d2l
n_train = train_data.shape[0]
train_features = paddle.to_tensor(all_features[:n_train].values, dtype=paddle.float32)
test_features = paddle.to_tensor(all_features[n_train:].values, dtype=paddle.float32)
train_labels = paddle.to_tensor(
train_data.SalePrice.values.reshape(-1, 1), dtype=paddle.float32)
loss = nn.MSELoss()
in_features = train_features.shape[1]
def get_net():
net = nn.Sequential(nn.Linear(in_features,1))
return net
def log_rmse(net, features, labels):
clipped_preds = paddle.clip(net(features), 1, float('inf'))
rmse = paddle.sqrt(loss(paddle.log(clipped_preds), paddle.log(labels)))
return rmse.item()
def train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):
train_ls, test_ls = [], []
train_iter = d2l.load_array((train_features, train_labels), batch_size)
optimizer = paddle.optimizer.Adam(learning_rate=learning_rate*1.0, parameters=net.parameters(), weight_decay=weight_decay*1.0)
for epoch in range(num_epochs):
for X, y in train_iter:
l = loss(net(X), y)
l.backward()
optimizer.step()
optimizer.clear_grad()
train_ls.append(log_rmse(net, train_features, train_labels))
if test_labels is not None:
test_ls.append(log_rmse(net, test_features, test_labels))
return train_ls, test_ls
def get_k_fold_data(k, i, X, y):
assert k > 1
fold_size = X.shape[0] // k
X_train, y_train = None, None
for j in range(k):
idx = slice(j * fold_size, (j + 1) * fold_size)
X_part, y_part = X[idx, :], y[idx]
if j == i:
X_valid, y_valid = X_part, y_part
elif X_train is None:
X_train, y_train = X_part, y_part
else:
X_train = paddle.concat([X_train, X_part], 0)
y_train = paddle.concat([y_train, y_part], 0)
return X_train, y_train, X_valid, y_valid
def train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):
net = get_net()
train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)
d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')
preds = net(test_features).detach().numpy()
test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)
submission.to_csv('submission.csv', index=False) |
22 | import tensorflow as tf
net = tf.keras.models.Sequential([
tf.keras.layers.Dense(256, activation=tf.nn.relu),
tf.keras.layers.Dense(10))
X = tf.random.uniform((2, 20))
net(X)
class MLP(tf.keras.Model):
def __init__(self):
super().__init__()
self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu)
self.out = tf.keras.layers.Dense(units=10)
def call(self, X):
return self.out(self.hidden((X)))
class MySequential(tf.keras.Model):
def __init__(self, *args):
super().__init__()
self.modules = []
for block in args:
self.modules.append(block)
def call(self, X):
for module in self.modules:
X = module(X)
return X
net = MySequential(
tf.keras.layers.Dense(units=256, activation=tf.nn.relu),
tf.keras.layers.Dense(10))
net(X)
class FixedHiddenMLP(tf.keras.Model):
def __init__(self):
super().__init__()
self.flatten = tf.keras.layers.Flatten()
self.rand_weight = tf.constant(tf.random.uniform((20, 20)))
self.dense = tf.keras.layers.Dense(20, activation=tf.nn.relu)
def call(self, inputs):
X = self.flatten(inputs)
X = tf.nn.relu(tf.matmul(X, self.rand_weight) + 1)
X = self.dense(X)
while tf.reduce_sum(tf.math.abs(X)) > 1:
X /= 2
return tf.reduce_sum(X)
class NestMLP(tf.keras.Model):
def __init__(self):
super().__init__()
self.net = tf.keras.Sequential()
self.net.add(tf.keras.layers.Dense(64, activation=tf.nn.relu))
self.net.add(tf.keras.layers.Dense(32, activation=tf.nn.relu))
self.dense = tf.keras.layers.Dense(16, activation=tf.nn.relu)
def call(self, inputs):
return self.dense(self.net(inputs))
chimera = tf.keras.Sequential()
chimera.add(NestMLP())
chimera.add(tf.keras.layers.Dense(20))
chimera.add(FixedHiddenMLP())
chimera(X) | null | null | import warnings
warnings.filterwarnings(action='ignore')
import paddle
from paddle import nn
from paddle.nn import functional as F
net = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))
X = paddle.rand([2, 20])
net(X)
class MLP(nn.Layer):
def __init__(self):
super().__init__()
self.hidden = nn.Linear(20, 256)
self.out = nn.Linear(256, 10)
def forward(self, X):
return self.out(F.relu(self.hidden(X)))
class MySequential(nn.Layer):
def __init__(self, *layers):
super(MySequential, self).__init__()
if len(layers) > 0 and isinstance(layers[0], tuple):
for name, layer in layers:
self.add_sublayer(name, layer)
else:
for idx, layer in enumerate(layers):
self.add_sublayer(str(idx), layer)
def forward(self, X):
for layer in self._sub_layers.values():
X = layer(X)
return X
net = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))
net(X)
class FixedHiddenMLP(nn.Layer):
def __init__(self):
super().__init__()
self.rand_weight = paddle.rand([20, 20])
self.linear = nn.Linear(20, 20)
def forward(self, X):
X = self.linear(X)
X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1)
X = self.linear(X)
while X.abs().sum() > 1:
X /= 2
return X.sum()
class NestMLP(nn.Layer):
def __init__(self):
super().__init__()
self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),
nn.Linear(64, 32), nn.ReLU())
self.linear = nn.Linear(32, 16)
def forward(self, X):
return self.linear(self.net(X))
chimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())
chimera(X) |
23 | import tensorflow as tf
net = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(4, activation=tf.nn.relu),
tf.keras.layers.Dense(1),
])
X = tf.random.uniform((2, 4))
net(X)
net.get_weights()[1]
def block1(name):
return tf.keras.Sequential([tf.keras.layers.Flatten(), tf.keras.layers.Dense(4, activation=tf.nn.relu)], name=name)
def block2():
net = tf.keras.Sequential()
for i in range(4):
net.add(block1(name=f'block-{i}'))
return net
rgnet = tf.keras.Sequential()
rgnet.add(block2())
rgnet.add(tf.keras.layers.Dense(1))
rgnet(X)
net = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(4, activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.01), bias_initializer=tf.zeros_initializer()),
tf.keras.layers.Dense(1)])
net(X)
net.weights[0], net.weights[1]
net = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.Constant(1), bias_initializer=tf.zeros_initializer()),
tf.keras.layers.Dense(1),
])
net(X)
net.weights[0], net.weights[1]
net = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.GlorotUniform()),
tf.keras.layers.Dense(1, kernel_initializer=tf.keras.initializers.Constant(1)),
])
net(X)
class MyInit(tf.keras.initializers.Initializer):
def __call__(self, shape, dtype=None):
data=tf.random.uniform(shape, -10, 10, dtype=dtype)
factor=(tf.abs(data) >= 5)
factor=tf.cast(factor, tf.float32)
return data * factor
net = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(4, activation=tf.nn.relu, kernel_initializer=MyInit()),
tf.keras.layers.Dense(1))
net(X)
net.layers[1].weights[0][:].assign(net.layers[1].weights[0] + 1)
net.layers[1].weights[0][0, 0].assign(42)
net.layers[1].weights[0]
layer = CenteredLayer()
layer(tf.constant([1, 2, 3, 4, 5]))
net = tf.keras.Sequential([tf.keras.layers.Dense(128), CenteredLayer()]) | null | null | import warnings
warnings.filterwarnings(action='ignore')
import paddle
from paddle import nn
net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))
X = paddle.rand([2, 4])
net(X)
net.state_dict()['2.bias']
def block1():
return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())
def block2():
net = nn.Sequential()
for i in range(4):
net.add_sublayer(f'block {i}', block1())
return net
rgnet = nn.Sequential(block2(), nn.Linear(4, 1))
rgnet(X)
def init_normal(m):
if type(m) == nn.Linear:
paddle.nn.initializer.Normal(mean=0.0, std=0.01)
paddle.zeros(m.bias)
net.apply(init_normal)
net[0].weight[0],net[0].state_dict()['bias']
def init_constant(m):
if type(m) == nn.Linear:
paddle.nn.initializer.Constant(value = 1)
paddle.zeros(m.bias)
net.apply(init_constant)
net[0].weight[0],net[0].state_dict()['bias']
def xavier(m):
if type(m) == nn.Linear:
paddle.nn.initializer.XavierUniform(m.weight)
def init_42(m):
if type(m) == nn.Linear:
paddle.nn.initializer.Constant(42)
net[0].apply(xavier)
net[2].apply(init_42)
def my_init(m):
if type(m) == nn.Linear:
for name, param in m.named_parameters()][0])
paddle.nn.initializer.XavierUniform(m.weight, -10, 10)
h = paddle.abs(m.weight) >= 5
h = paddle.to_tensor(h)
m = paddle.to_tensor(m.weight)
m *= h
net.apply(my_init)
net[0].weight[:2]
net[0].weight.set_value(net[0].weight.numpy() + 1)
val = net[0].weight.numpy()
val[0, 0] = 42
net[0].weight.set_value(val)
net[0].weight[0]
layer = CenteredLayer()
layer(paddle.to_tensor([1, 2, 3, 4, 5], dtype='float32'))
net = nn.Sequential(nn.Linear(8, 128), CenteredLayer()) |
24 | import tensorflow as tf
class CenteredLayer(tf.keras.Model):
def __init__(self):
super().__init__()
def call(self, inputs):
return inputs - tf.reduce_mean(inputs)
Y = net(tf.random.uniform((4, 8)))
tf.reduce_mean(Y)
class MyDense(tf.keras.Model):
def __init__(self, units):
super().__init__()
self.units = units
def build(self, X_shape):
self.weight = self.add_weight(name='weight',
shape=[X_shape[-1], self.units],
initializer=tf.random_normal_initializer())
self.bias = self.add_weight(
name='bias', shape=[self.units],
initializer=tf.zeros_initializer())
def call(self, X):
linear = tf.matmul(X, self.weight) + self.bias
return tf.nn.relu(linear)
dense = MyDense(3)
dense(tf.random.uniform((2, 5)))
dense.get_weights()
dense(tf.random.uniform((2, 5)))
net = tf.keras.models.Sequential([MyDense(8), MyDense(1)])
net(tf.random.uniform((2, 64))) | null | null | import warnings
warnings.filterwarnings(action='ignore')
import paddle
import paddle.nn.functional as F
from paddle import nn
class CenteredLayer(nn.Layer):
def __init__(self):
super().__init__()
def forward(self, X):
return X - X.mean()
Y = net(paddle.rand([4, 8]))
Y.mean()
class MyLinear(nn.Layer):
def __init__(self, in_units, units):
super().__init__()
self.weight = paddle.create_parameter(shape=(in_units, units), dtype='float32')
self.bias = paddle.create_parameter(shape=(units,), dtype='float32')
def forward(self, X):
linear = paddle.matmul(X, self.weight) + self.bias
return F.relu(linear)
linear = MyLinear(5, 3)
linear.weight
linear(paddle.randn([2, 5]))
net = nn.Sequential(MyLinear(64, 8), MyLinear(8, 1))
net(paddle.rand([2, 64])) |
25 | import numpy as np
import tensorflow as tf
x = tf.range(4)
np.save('x-file.npy', x)
x2 = np.load('x-file.npy', allow_pickle=True)
y = tf.zeros(4)
np.save('xy-files.npy', [x, y])
x2, y2 = np.load('xy-files.npy', allow_pickle=True)
mydict = {'x': x, 'y': y}
np.save('mydict.npy', mydict)
mydict2 = np.load('mydict.npy', allow_pickle=True)
class MLP(tf.keras.Model):
def __init__(self):
super().__init__()
self.flatten = tf.keras.layers.Flatten()
self.hidden = tf.keras.layers.Dense(units=256, activation=tf.nn.relu)
self.out = tf.keras.layers.Dense(units=10)
def call(self, inputs):
x = self.flatten(inputs)
x = self.hidden(x)
return self.out(x)
net = MLP()
X = tf.random.uniform((2, 20))
Y = net(X)
net.save_weights('mlp.params')
clone = MLP()
clone.load_weights('mlp.params') | null | null | import warnings
warnings.filterwarnings(action='ignore')
import paddle
from paddle import nn
from paddle.nn import functional as F
x = paddle.arange(4)
paddle.save(x, 'x-file')
x2 = paddle.load('x-file')
y = paddle.zeros([4])
paddle.save([x,y], 'x-file')
x2, y2 = paddle.load('x-file')
mydict = {'x': x, 'y': y}
paddle.save(mydict, 'mydict')
mydict2 = paddle.load('mydict')
class MLP(nn.Layer):
def __init__(self):
super().__init__()
self.hidden = nn.Linear(20, 256)
self.output = nn.Linear(256, 10)
def forward(self, x):
return self.output(F.relu(self.hidden(x)))
net = MLP()
X = paddle.randn(shape=[2, 20])
Y = net(X)
paddle.save(net.state_dict(), 'mlp.pdparams')
clone = MLP()
clone.set_state_dict(paddle.load('mlp.pdparams'))
clone.eval() |
26 | import tensorflow as tf
tf.device('/CPU:0'), tf.device('/GPU:0'), tf.device('/GPU:1')
len(tf.config.experimental.list_physical_devices('GPU'))
def try_gpu(i=0):
if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1:
return tf.device(f'/GPU:{i}')
return tf.device('/CPU:0')
def try_all_gpus():
num_gpus = len(tf.config.experimental.list_physical_devices('GPU'))
devices = [tf.device(f'/GPU:{i}') for i in range(num_gpus)]
return devices if devices else [tf.device('/CPU:0')]
try_gpu(), try_gpu(10), try_all_gpus()
x = tf.constant([1, 2, 3])
x.device
with try_gpu():
X = tf.ones((2, 3))
with try_gpu(1):
Y = tf.random.uniform((2, 3))
with try_gpu(1):
Z = X
with try_gpu(1):
Z2 = Z
Z2 is Z
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
net = tf.keras.models.Sequential([
tf.keras.layers.Dense(1)])
net.layers[0].weights[0].device, net.layers[0].weights[1].device | null | null | import paddle
from paddle import nn
paddle.device.set_device("cpu"), paddle.CUDAPlace(0), paddle.CUDAPlace(1)
paddle.device.cuda.device_count()
if paddle.device.cuda.device_count() >= i + 1:
return paddle.CUDAPlace(i)
return paddle.CPUPlace()
def try_all_gpus():
devices = [paddle.CUDAPlace(i) for i in range(paddle.device.cuda.device_count())]
return devices if devices else paddle.CPUPlace()
try_gpu(),try_gpu(10),try_all_gpus()
x = paddle.to_tensor([1, 2, 3])
x.place
X = paddle.to_tensor(paddle.ones(shape=[2, 3]), place=try_gpu())
Y = paddle.to_tensor(paddle.rand([2, 3]), place=try_gpu(1))
Z = X.cuda(1)
Z.cuda(1) is Z
net = nn.Sequential(nn.Linear(3, 1))
net=net.to(try_gpu())
net[0].weight.place |
27 | import tensorflow as tf
from d2l import tensorflow as d2l
def corr2d(X, K):
h, w = K.shape
Y = tf.Variable(tf.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1)))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j].assign(tf.reduce_sum(
X[i: i + h, j: j + w] * K))
return Y
X = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])
K = tf.constant([[0.0, 1.0], [2.0, 3.0]])
corr2d(X, K)
class Conv2D(tf.keras.layers.Layer):
def __init__(self):
super().__init__()
def build(self, kernel_size):
initializer = tf.random_normal_initializer()
self.weight = self.add_weight(name='w', shape=kernel_size, initializer=initializer)
self.bias = self.add_weight(name='b', shape=(1, ), initializer=initializer)
def call(self, inputs):
return corr2d(inputs, self.weight) + self.bias
X = tf.Variable(tf.ones((6, 8)))
X[:, 2:6].assign(tf.zeros(X[:, 2:6].shape))
K = tf.constant([[1.0, -1.0]])
corr2d(tf.transpose(X), K)
conv2d = tf.keras.layers.Conv2D(1, (1, 2), use_bias=False)
X = tf.reshape(X, (1, 6, 8, 1))
Y = tf.reshape(Y, (1, 6, 7, 1))
lr = 3e-2
Y_hat = conv2d(X)
for i in range(10):
with tf.GradientTape(watch_accessed_variables=False) as g:
g.watch(conv2d.weights[0])
Y_hat = conv2d(X)
l = (abs(Y_hat - Y)) ** 2
update = tf.multiply(lr, g.gradient(l, conv2d.weights[0]))
weights = conv2d.get_weights()
weights[0] = conv2d.weights[0] - update
conv2d.set_weights(weights)
tf.reshape(conv2d.get_weights()[0], (1, 2)) | null | null | import warningsfrom d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
from paddle import nn
def corr2d(X, K):
h, w = K.shape
Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j] = (X[i:i + h, j:j + w] * K).sum()
return Y
X = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])
K = paddle.to_tensor([[0.0, 1.0], [2.0, 3.0]])
corr2d(X, K)
class Conv2D(nn.Layer):
def __init__(self, kernel_size):
super().__init__()
self.weight = paddle.ParamAttr(paddle.rand(kernel_size))
self.bias = paddle.ParamAttr(paddle.zeros(1))
def forward(self, x):
return corr2d(x, self.weight) + self.bias
X = paddle.ones((6, 8))
X[:, 2:6] = 0
K = paddle.to_tensor([[1.0, -1.0]])
corr2d(X.t(), K)
conv2d = nn.Conv2D(1, 1, kernel_size=(1, 2))
X = X.reshape((1, 1, 6, 8))
Y = Y.reshape((1, 1, 6, 7))
lr = 3e-2
for i in range(10):
Y_hat = conv2d(X)
l = (Y_hat - Y) ** 2
conv2d.clear_gradients()
l.sum().backward()
with paddle.no_grad():
conv2d.weight[:] -= lr * conv2d.weight.grad
conv2d.weight.reshape((1, 2)) |
28 | import tensorflow as tf
def comp_conv2d(conv2d, X):
X = tf.reshape(X, (1, ) + X.shape + (1, ))
Y = conv2d(X)
return tf.reshape(Y, Y.shape[1:3])
conv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same')
X = tf.random.uniform(shape=(8, 8))
comp_conv2d(conv2d, X).shape
conv2d = tf.keras.layers.Conv2D(1, kernel_size=(5, 3), padding='same')
comp_conv2d(conv2d, X).shape
conv2d = tf.keras.layers.Conv2D(1, kernel_size=3, padding='same', strides=2)
comp_conv2d(conv2d, X).shape
conv2d = tf.keras.layers.Conv2D(1, kernel_size=(3,5), padding='valid', strides=(3, 4))
comp_conv2d(conv2d, X).shape | null | null | import warnings
warnings.filterwarnings(action='ignore')
import paddle
from paddle import nn
def comp_conv2d(conv2d, X):
X = paddle.reshape(X, [1, 1] + X.shape)
Y = conv2d(X)
return Y.reshape(Y.shape[2:])
conv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=3, padding=1)
X = paddle.rand((8, 8))
comp_conv2d(conv2d, X).shape
conv2d = nn.Conv2D(in_channels=1, out_channels=1, kernel_size=(5, 3), padding=(2, 1))
comp_conv2d(conv2d, X).shape
conv2d = nn.Conv2D(1, 1, kernel_size=3, padding=1, stride=2)
comp_conv2d(conv2d, X).shape
conv2d = nn.Conv2D(1, 1, kernel_size=(3, 5), padding=(0, 1), stride=(3, 4))
comp_conv2d(conv2d, X).shape |
29 | import tensorflow as tf
from d2l import tensorflow as d2l
def corr2d_multi_in(X, K):
return tf.reduce_sum([d2l.corr2d(x, k) for x, k in zip(X, K)], axis=0)
X = tf.constant([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])
K = tf.constant([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])
corr2d_multi_in(X, K)
def corr2d_multi_in_out(X, K):
return tf.stack([corr2d_multi_in(X, k) for k in K], 0)
K = tf.stack((K, K + 1, K + 2), 0)
K.shape
def corr2d_multi_in_out_1x1(X, K):
c_i, h, w = X.shape
c_o = K.shape[0]
X = tf.reshape(X, (c_i, h * w))
K = tf.reshape(K, (c_o, c_i))
Y = tf.matmul(K, X)
return tf.reshape(Y, (c_o, h, w))
X = tf.random.normal((3, 3, 3), 0, 1)
K = tf.random.normal((2, 3, 1, 1), 0, 1)
Y1 = corr2d_multi_in_out_1x1(X, K)
Y2 = corr2d_multi_in_out(X, K)
assert float(tf.reduce_sum(tf.abs(Y1 - Y2))) < 1e-6 | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
def corr2d_multi_in(X, K):
return sum(d2l.corr2d(x, k) for x, k in zip(X, K))
X = paddle.to_tensor([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]], [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])
K = paddle.to_tensor([[[0.0, 1.0], [2.0, 3.0]], [[1.0, 2.0], [3.0, 4.0]]])
corr2d_multi_in(X, K)
def corr2d_multi_in_out(X, K):
return paddle.stack([corr2d_multi_in(X, k) for k in K], 0)
K = paddle.stack((K, K + 1, K + 2), 0)
K.shape
def corr2d_multi_in_out_1x1(X, K):
c_i, h, w = X.shape
c_o = K.shape[0]
X = X.reshape((c_i, h * w))
K = K.reshape((c_o, c_i))
Y = paddle.matmul(K, X)
return Y.reshape((c_o, h, w))
X = paddle.normal(0, 1, (3, 3, 3))
K = paddle.normal(0, 1, (2, 3, 1, 1))
Y1 = corr2d_multi_in_out_1x1(X, K)
Y2 = corr2d_multi_in_out(X, K)
assert float(paddle.abs(Y1 - Y2).sum()) < 1e-6 |
30 | import tensorflow as tf
def pool2d(X, pool_size, mode='max'):
p_h, p_w = pool_size
Y = tf.Variable(tf.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w +1)))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
if mode == 'max':
Y[i, j].assign(tf.reduce_max(X[i: i + p_h, j: j + p_w]))
elif mode =='avg':
Y[i, j].assign(tf.reduce_mean(X[i: i + p_h, j: j + p_w]))
return Y
X = tf.constant([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])
pool2d(X, (2, 2))
X = tf.reshape(tf.range(16, dtype=tf.float32), (1, 4, 4, 1))
pool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3])
pool2d(X)
paddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]])
X_padded = tf.pad(X, paddings, "CONSTANT")
pool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid',
strides=2)
pool2d(X_padded)
paddings = tf.constant([[0, 0], [0, 0], [1, 1], [0, 0]])
X_padded = tf.pad(X, paddings, "CONSTANT")
pool2d = tf.keras.layers.MaxPool2D(pool_size=[2, 3], padding='valid',
strides=(2, 3))
pool2d(X_padded)
X = tf.concat([X, X + 1], 3)
paddings = tf.constant([[0, 0], [1,0], [1,0], [0,0]])
X_padded = tf.pad(X, paddings, "CONSTANT")
pool2d = tf.keras.layers.MaxPool2D(pool_size=[3, 3], padding='valid',
strides=2)
pool2d(X_padded) | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
from paddle import nn
def pool2d(X, pool_size, mode='max'):
p_h, p_w = pool_size
Y = paddle.zeros((X.shape[0] - p_h + 1, X.shape[1] - p_w + 1))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
if mode == 'max':
Y[i, j] = X[i: i + p_h, j: j + p_w].max()
elif mode == 'avg':
Y[i, j] = X[i: i + p_h, j: j + p_w].mean()
return Y
X = paddle.to_tensor([[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]])
pool2d(X, (2, 2))
X = paddle.arange(16, dtype="float32").reshape((1, 1, 4, 4))
pool2d = nn.MaxPool2D(3, stride=3)
pool2d(X)
pool2d = nn.MaxPool2D(3, padding=1, stride=2)
pool2d(X)
pool2d = nn.MaxPool2D((2, 3), padding=(0, 1), stride=(2, 3))
pool2d(X)
X = paddle.concat((X, X + 1), 1)
pool2d = paddle.nn.MaxPool2D(3, padding=1, stride=2)
pool2d(X) |
31 | import tensorflow as tf
from d2l import tensorflow as d2l
def net():
return tf.keras.models.Sequential([
tf.keras.layers.Conv2D(filters=6, kernel_size=5, activation='sigmoid', padding='same'),
tf.keras.layers.AvgPool2D(pool_size=2, strides=2),
tf.keras.layers.Conv2D(filters=16, kernel_size=5, activation='sigmoid'),
tf.keras.layers.AvgPool2D(pool_size=2, strides=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(120, activation='sigmoid'),
tf.keras.layers.Dense(84, activation='sigmoid'),
tf.keras.layers.Dense(10)])
X = tf.random.uniform((1, 28, 28, 1))
for layer in net().layers:
X = layer(X)
print(layer.__class__.__name__, 'output shape: ', X.shape)
class TrainCallback(tf.keras.callbacks.Callback):
def __init__(self, net, train_iter, test_iter, num_epochs, device_name):
self.timer = d2l.Timer()
self.animator = d2l.Animator( xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])
self.net = net
self.train_iter = train_iter
self.test_iter = test_iter
self.num_epochs = num_epochs
self.device_name = device_name
def on_epoch_begin(self, epoch, logs=None):
self.timer.start()
def on_epoch_end(self, epoch, logs):
self.timer.stop()
test_acc = self.net.evaluate(self.test_iter, verbose=0, return_dict=True)['accuracy']
metrics = (logs['loss'], logs['accuracy'], test_acc)
self.animator.add(epoch + 1, metrics)
if epoch == self.num_epochs - 1:
batch_size = next(iter(self.train_iter))[0].shape[0]
num_examples = batch_size * tf.data.experimental.cardinality(self.train_iter).numpy()
def train_ch6(net_fn, train_iter, test_iter, num_epochs, lr, device):
device_name = device._device_name
strategy = tf.distribute.OneDeviceStrategy(device_name)
with strategy.scope():
optimizer = tf.keras.optimizers.SGD(learning_rate=lr)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
net = net_fn()
net.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
callback = TrainCallback(net, train_iter, test_iter, num_epochs, device_name)
net.fit(train_iter, epochs=num_epochs, verbose=0, callbacks=[callback])
return net | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
from paddle import nn, optimizer
net = nn.Sequential(
nn.Conv2D(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),
nn.AvgPool2D(kernel_size=2, stride=2),
nn.Conv2D(6, 16, kernel_size=5), nn.Sigmoid(),
nn.AvgPool2D(kernel_size=2, stride=2),
nn.Flatten(),
nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),
nn.Linear(120, 84), nn.Sigmoid(),
nn.Linear(84, 10))
X = paddle.rand((1, 1, 28, 28), 'float32')
for layer in net:
X = layer(X)
print(layer.__class__.__name__, 'output shape: ', X.shape)
def train_ch6(net, train_iter, test_iter, num_epochs, lr, device):
def init_weights(m):
if type(m) == nn.Linear or type(m) == nn.Conv2D:
nn.initializer.XavierUniform(m.weight)
net.apply(init_weights)
net.to(device)
optimizer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())
loss = nn.CrossEntropyLoss()
animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], legend=['train loss', 'train acc', 'test acc'])
timer, num_batches = d2l.Timer(), len(train_iter)
for epoch in range(num_epochs):
metric = d2l.Accumulator(3)
net.train()
for i, (X, y) in enumerate(train_iter):
timer.start()
optimizer.clear_grad()
X, y = paddle.to_tensor(X, place=device), paddle.to_tensor(y, place=device)
y_hat = net(X)
l = loss(y_hat, y)
l.backward()
optimizer.step()
with paddle.no_grad():
metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])
timer.stop()
train_l = metric[0] / metric[2]
train_acc = metric[1] / metric[2]
if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
animator.add(epoch + (i + 1) / num_batches, (train_l, train_acc, None))
test_acc = evaluate_accuracy_gpu(net, test_iter)
animator.add(epoch + 1, (None, None, test_acc)) |
32 | import tensorflow as tf
from d2l import tensorflow as d2l
def net():
return tf.keras.models.Sequential([
tf.keras.layers.Conv2D(filters=96, kernel_size=11, strides=4, activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2),
tf.keras.layers.Conv2D(filters=256, kernel_size=5, padding='same', activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2),
tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'),
tf.keras.layers.Conv2D(filters=384, kernel_size=3, padding='same', activation='relu'),
tf.keras.layers.Conv2D(filters=256, kernel_size=3, padding='same', activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(4096, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(4096, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10)])
X = tf.random.uniform((1, 224, 224, 1))
for layer in net().layers:
X = layer(X)
print(layer.__class__.__name__, 'output shape: ', X.shape) | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
import paddle.nn as nn
net = nn.Sequential(
nn.Conv2D(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),
nn.MaxPool2D(kernel_size=3, stride=2),
nn.Conv2D(96, 256, kernel_size=5, padding=2), nn.ReLU(),
nn.MaxPool2D(kernel_size=3, stride=2),
nn.Conv2D(256, 384, kernel_size=3, padding=1), nn.ReLU(),
nn.Conv2D(384, 384, kernel_size=3, padding=1), nn.ReLU(),
nn.Conv2D(384, 256, kernel_size=3, padding=1), nn.ReLU(),
nn.MaxPool2D(kernel_size=3, stride=2), nn.Flatten(),
nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5),
nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5),
nn.Linear(4096, 10))
X = paddle.randn(shape=(1, 1, 224, 224))
for layer in net:
X=layer(X)
print(layer.__class__.__name__,'output shape: ',X.shape) |
33 | import tensorflow as tf
from d2l import tensorflow as d2l
def vgg_block(num_convs, num_channels):
blk = tf.keras.models.Sequential()
for _ in range(num_convs):
blk.add(tf.keras.layers.Conv2D(num_channels,kernel_size=3, padding='same',activation='relu'))
blk.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
return blk
def vgg(conv_arch):
net = tf.keras.models.Sequential()
for (num_convs, num_channels) in conv_arch:
net.add(vgg_block(num_convs, num_channels))
net.add(tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(4096, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(4096, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10)]))
return net
net = vgg(conv_arch)
X = tf.random.uniform((1, 224, 224, 1))
for blk in net.layers:
X = blk(X)
print(blk.__class__.__name__,'output shape: ', X.shape)
ratio = 4
small_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch]
net = lambda: vgg(small_conv_arch) | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
import paddle.nn as nn
def vgg_block(num_convs, in_channels, out_channels):
layers = []
for _ in range(num_convs):
layers.append(nn.Conv2D(in_channels, out_channels, kernel_size=3, padding=1))
layers.append(nn.ReLU())
in_channels = out_channels
layers.append(nn.MaxPool2D(kernel_size=2, stride=2))
return nn.Sequential(*layers)
def vgg(conv_arch):
conv_blks = []
in_channels = 1
for (num_convs, out_channels) in conv_arch:
conv_blks.append(vgg_block(num_convs, in_channels, out_channels))
in_channels = out_channels
return nn.Sequential(*conv_blks, nn.Flatten(),
nn.Linear(out_channels * 7 * 7, 4096), nn.ReLU(),
nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(),
nn.Dropout(0.5), nn.Linear(4096, 10))
net = vgg(conv_arch)
X = paddle.randn(shape=(1, 1, 224, 224))
for blk in net:
X = blk(X)
print(blk.__class__.__name__,'output shape: ',X.shape)
ratio = 4
small_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch]
net = vgg(small_conv_arch) |
34 | import tensorflow as tf
from d2l import tensorflow as d2l
def nin_block(num_channels, kernel_size, strides, padding):
return tf.keras.models.Sequential([
tf.keras.layers.Conv2D(num_channels, kernel_size, strides=strides, padding=padding, activation='relu'),
tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu'),
tf.keras.layers.Conv2D(num_channels, kernel_size=1, activation='relu')])
def net():
return tf.keras.models.Sequential([
nin_block(96, kernel_size=11, strides=4, padding='valid'),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2),
nin_block(256, kernel_size=5, strides=1, padding='same'),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2),
nin_block(384, kernel_size=3, strides=1, padding='same'),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2),
tf.keras.layers.Dropout(0.5),
nin_block(10, kernel_size=3, strides=1, padding='same'),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Reshape((1, 1, 10)),
tf.keras.layers.Flatten(),
])
X = tf.random.uniform((1, 224, 224, 1))
for layer in net().layers:
X = layer(X)
print(layer.__class__.__name__,'output shape: ', X.shape) | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
import paddle.nn as nn
def nin_block(in_channels, out_channels, kernel_size, strides, padding):
return nn.Sequential(
nn.Conv2D(in_channels, out_channels, kernel_size, strides, padding),
nn.ReLU(),
nn.Conv2D(out_channels, out_channels, kernel_size=1),
nn.ReLU(),
nn.Conv2D(out_channels, out_channels, kernel_size=1),
nn.ReLU())
net = nn.Sequential(
nin_block(1, 96, kernel_size=11, strides=4, padding=0),
nn.MaxPool2D(3, stride=2),
nin_block(96, 256, kernel_size=5, strides=1, padding=2),
nn.MaxPool2D(3, stride=2),
nin_block(256, 384, kernel_size=3, strides=1, padding=1),
nn.MaxPool2D(3, stride=2), nn.Dropout(0.5),
nin_block(384, 10, kernel_size=3, strides=1, padding=1),
nn.AdaptiveAvgPool2D((1, 1)),
nn.Flatten())
X = paddle.rand(shape=(1, 1, 224, 224))
for layer in net:
X = layer(X)
print(layer.__class__.__name__,'output shape: ', X.shape) |
35 | import tensorflow as tf
from d2l import tensorflow as d2l
class Inception(tf.keras.Model):
def __init__(self, c1, c2, c3, c4):
super().__init__()
self.p1_1 = tf.keras.layers.Conv2D(c1, 1, activation='relu')
self.p2_1 = tf.keras.layers.Conv2D(c2[0], 1, activation='relu')
self.p2_2 = tf.keras.layers.Conv2D(c2[1], 3, padding='same', activation='relu')
self.p3_1 = tf.keras.layers.Conv2D(c3[0], 1, activation='relu')
self.p3_2 = tf.keras.layers.Conv2D(c3[1], 5, padding='same', activation='relu')
self.p4_1 = tf.keras.layers.MaxPool2D(3, 1, padding='same')
self.p4_2 = tf.keras.layers.Conv2D(c4, 1, activation='relu')
def call(self, x):
p1 = self.p1_1(x)
p2 = self.p2_2(self.p2_1(x))
p3 = self.p3_2(self.p3_1(x))
p4 = self.p4_2(self.p4_1(x))
return tf.keras.layers.Concatenate()([p1, p2, p3, p4])
def b1():
return tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64, 7, strides=2, padding='same', activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])
def b2():
return tf.keras.Sequential([
tf.keras.layers.Conv2D(64, 1, activation='relu'),
tf.keras.layers.Conv2D(192, 3, padding='same', activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])
def b3():
return tf.keras.models.Sequential([
Inception(64, (96, 128), (16, 32), 32),
Inception(128, (128, 192), (32, 96), 64),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])
def b4():
return tf.keras.Sequential([
Inception(192, (96, 208), (16, 48), 64),
Inception(160, (112, 224), (24, 64), 64),
Inception(128, (128, 256), (24, 64), 64),
Inception(112, (144, 288), (32, 64), 64),
Inception(256, (160, 320), (32, 128), 128),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])
def b5():
return tf.keras.Sequential([
Inception(256, (160, 320), (32, 128), 128),
Inception(384, (192, 384), (48, 128), 128),
tf.keras.layers.GlobalAvgPool2D(),
tf.keras.layers.Flatten()
])
def net():
return tf.keras.Sequential([b1(), b2(), b3(), b4(), b5(),
tf.keras.layers.Dense(10)])
X = tf.random.uniform(shape=(1, 96, 96, 1))
for layer in net().layers:
X = layer(X)
print(layer.__class__.__name__,'output shape: ', X.shape) | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
class Inception(nn.Layer):
def __init__(self, in_channels, c1, c2, c3, c4, **kwargs):
super(Inception, self).__init__(**kwargs)
self.p1_1 = nn.Conv2D(in_channels, c1, kernel_size=1)
self.p2_1 = nn.Conv2D(in_channels, c2[0], kernel_size=1)
self.p2_2 = nn.Conv2D(c2[0], c2[1], kernel_size=3, padding=1)
self.p3_1 = nn.Conv2D(in_channels, c3[0], kernel_size=1)
self.p3_2 = nn.Conv2D(c3[0], c3[1], kernel_size=5, padding=2)
self.p4_1 = nn.MaxPool2D(kernel_size=3, stride=1, padding=1)
self.p4_2 = nn.Conv2D(in_channels, c4, kernel_size=1)
def forward(self, x):
p1 = F.relu(self.p1_1(x))
p2 = F.relu(self.p2_2(F.relu(self.p2_1(x))))
p3 = F.relu(self.p3_2(F.relu(self.p3_1(x))))
p4 = F.relu(self.p4_2(self.p4_1(x)))
return paddle.concat(x=[p1, p2, p3, p4], axis=1)
b1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),
nn.ReLU(),
nn.MaxPool2D(kernel_size=3, stride=2,padding=1))
b2 = nn.Sequential(nn.Conv2D(64, 64, kernel_size=1),
nn.ReLU(),
nn.Conv2D(64, 192, kernel_size=3, padding=1),
nn.ReLU(),
nn.MaxPool2D(kernel_size=3, stride=2, padding=1))
b3 = nn.Sequential(Inception(192, 64, (96, 128), (16, 32), 32),
Inception(256, 128, (128, 192), (32, 96), 64),
nn.MaxPool2D(kernel_size=3, stride=2, padding=1))
b4 = nn.Sequential(Inception(480, 192, (96, 208), (16, 48), 64),
Inception(512, 160, (112, 224), (24, 64), 64),
Inception(512, 128, (128, 256), (24, 64), 64),
Inception(512, 112, (144, 288), (32, 64), 64),
Inception(528, 256, (160, 320), (32, 128), 128),
nn.MaxPool2D(kernel_size=3, stride=2, padding=1))
b5 = nn.Sequential(Inception(832, 256, (160, 320), (32, 128), 128),
Inception(832, 384, (192, 384), (48, 128), 128),
nn.AdaptiveAvgPool2D((1, 1)),
nn.Flatten())
net = nn.Sequential(b1, b2, b3, b4, b5, nn.Linear(1024, 10))
X = paddle.rand(shape=(1, 1, 96, 96))
for layer in net:
X = layer(X)
print(layer.__class__.__name__,'output shape: ', X.shape) |
36 | import tensorflow as tf
from d2l import tensorflow as d2l
def batch_norm(X, gamma, beta, moving_mean, moving_var, eps):
inv = tf.cast(tf.math.rsqrt(moving_var + eps), X.dtype)
inv *= gamma
Y = X * inv + (beta - moving_mean * inv)
return Y
class BatchNorm(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(BatchNorm, self).__init__(**kwargs)
def build(self, input_shape):
weight_shape = [input_shape[-1], ]
self.gamma = self.add_weight(name='gamma', shape=weight_shape, initializer=tf.initializers.ones, trainable=True)
self.beta = self.add_weight(name='beta', shape=weight_shape, initializer=tf.initializers.zeros, trainable=True)
self.moving_mean = self.add_weight(name='moving_mean', shape=weight_shape, initializer=tf.initializers.zeros, trainable=False)
self.moving_variance = self.add_weight(name='moving_variance', shape=weight_shape, initializer=tf.initializers.ones, trainable=False)
super(BatchNorm, self).build(input_shape)
def assign_moving_average(self, variable, value):
momentum = 0.9
delta = variable * momentum + value * (1 - momentum)
return variable.assign(delta)
@tf.function
def call(self, inputs, training):
if training:
axes = list(range(len(inputs.shape) - 1))
batch_mean = tf.reduce_mean(inputs, axes, keepdims=True)
batch_variance = tf.reduce_mean(tf.math.squared_difference(inputs, tf.stop_gradient(batch_mean)), axes, keepdims=True)
batch_mean = tf.squeeze(batch_mean, axes)
batch_variance = tf.squeeze(batch_variance, axes)
mean_update = self.assign_moving_average(self.moving_mean, batch_mean)
variance_update = self.assign_moving_average(self.moving_variance, batch_variance)
self.add_update(mean_update)
self.add_update(variance_update)
mean, variance = batch_mean, batch_variance
else:
mean, variance = self.moving_mean, self.moving_variance
output = batch_norm(inputs, moving_mean=mean, moving_var=variance, beta=self.beta, gamma=self.gamma, eps=1e-5)
return output
def net():
return tf.keras.models.Sequential([
tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)),
BatchNorm(),
tf.keras.layers.Activation('sigmoid'),
tf.keras.layers.AvgPool2D(pool_size=2, strides=2),
tf.keras.layers.Conv2D(filters=16, kernel_size=5),
BatchNorm(),
tf.keras.layers.Activation('sigmoid'),
tf.keras.layers.AvgPool2D(pool_size=2, strides=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(120),
BatchNorm(),
tf.keras.layers.Activation('sigmoid'),
tf.keras.layers.Dense(84),
BatchNorm(),
tf.keras.layers.Activation('sigmoid'),
tf.keras.layers.Dense(10)]
)
lr, num_epochs, batch_size = 1.0, 10, 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
net = d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())
tf.reshape(net.layers[1].gamma, (-1,)), tf.reshape(net.layers[1].beta, (-1,))
def net():
return tf.keras.models.Sequential([
tf.keras.layers.Conv2D(filters=6, kernel_size=5, input_shape=(28, 28, 1)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('sigmoid'),
tf.keras.layers.AvgPool2D(pool_size=2, strides=2),
tf.keras.layers.Conv2D(filters=16, kernel_size=5),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('sigmoid'),
tf.keras.layers.AvgPool2D(pool_size=2, strides=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(120),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('sigmoid'),
tf.keras.layers.Dense(84),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('sigmoid'),
tf.keras.layers.Dense(10),
]) | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
import paddle.nn as nn
def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True):
if not is_training:
X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5
else:
assert len(X.shape) in (2, 4)
if len(X.shape) == 2:
mean = paddle.mean(X)
var = paddle.mean(((X - mean) ** 2))
else:
mean = paddle.mean(X, axis=(0, 2, 3), keepdim=True)
var = paddle.mean(((X - mean) ** 2), axis=(0, 2, 3), keepdim=True)
X_hat = (X - mean) / (var + eps) ** 0.5
moving_mean = momentum * moving_mean + (1.0 - momentum) * mean
moving_var = momentum * moving_var + (1.0 - momentum) * var
Y = gamma * X_hat + beta
return Y, moving_mean, moving_var
class BatchNorm(nn.Layer):
def __init__(self, num_features, num_dims=4):
super(BatchNorm, self).__init__()
if num_dims == 2:
shape = (1, num_features)
else:
shape = (1, num_features, 1, 1)
self.gamma = self.create_parameter(
attr=None,
shape=shape,
dtype='float32',
is_bias=False,
default_initializer=nn.initializer.Assign(paddle.ones(shape=shape, dtype='float32')))
self.beta = self.create_parameter(
attr=None,
shape=shape,
dtype='float32',
is_bias=False,
default_initializer=nn.initializer.Assign(paddle.zeros(shape=shape, dtype='float32')))
self.moving_mean = paddle.zeros(shape=shape, dtype='float32')
self.moving_var = paddle.zeros(shape=shape, dtype='float32')
def forward(self, X):
Y, self.moving_mean, self.moving_var = batch_norm(
X, self.gamma, self.beta, self.moving_mean,
self.moving_var, eps=1e-5, momentum=0.9, is_training=self.training)
return Y
net = nn.Sequential(
nn.Conv2D(1, 6, kernel_size=5), BatchNorm(6, num_dims=4), nn.Sigmoid(),
nn.MaxPool2D(kernel_size=2, stride=2),
nn.Conv2D(6, 16, kernel_size=5), BatchNorm(16, num_dims=4), nn.Sigmoid(),
nn.MaxPool2D(kernel_size=2, stride=2),
nn.Flatten(), nn.Linear(16 * 4 * 4, 120), BatchNorm(120, num_dims=2), nn.Sigmoid(),
nn.Linear(120, 84), BatchNorm(84, num_dims=2), nn.Sigmoid(),
nn.Linear(84, 10))
lr, num_epochs, batch_size = 1.0, 10, 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
d2l.train_ch6(net, train_iter, test_iter, num_epochs, lr, d2l.try_gpu())
param = net.parameters()
print('gamma:', param[2].numpy().reshape(-1))
print('beta:', param[3].numpy().reshape(-1))
net = nn.Sequential(
nn.Conv2D(1, 6, kernel_size=5), nn.BatchNorm2D(6, momentum=0.1), nn.Sigmoid(),
nn.MaxPool2D(kernel_size=2, stride=2),
nn.Conv2D(6, 16, kernel_size=5), nn.BatchNorm2D(16, momentum=0.1), nn.Sigmoid(),
nn.MaxPool2D(kernel_size=2, stride=2),
nn.Flatten(),
nn.Linear(256, 120), nn.BatchNorm1D(120, momentum=0.1), nn.Sigmoid(),
nn.Linear(120, 84), nn.BatchNorm1D(84, momentum=0.1), nn.Sigmoid(),
nn.Linear(84, 10)) |
37 | import tensorflow as tf
from d2l import tensorflow as d2l
class Residual(tf.keras.Model):
def __init__(self, num_channels, use_1x1conv=False, strides=1):
super().__init__()
self.conv1 = tf.keras.layers.Conv2D(
num_channels, padding='same', kernel_size=3, strides=strides)
self.conv2 = tf.keras.layers.Conv2D(
num_channels, kernel_size=3, padding='same')
self.conv3 = None
if use_1x1conv:
self.conv3 = tf.keras.layers.Conv2D(
num_channels, kernel_size=1, strides=strides)
self.bn1 = tf.keras.layers.BatchNormalization()
self.bn2 = tf.keras.layers.BatchNormalization()
def call(self, X):
Y = tf.keras.activations.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3 is not None:
X = self.conv3(X)
Y += X
return tf.keras.activations.relu(Y)
blk = Residual(3)
X = tf.random.uniform((4, 6, 6, 3))
Y = blk(X)
Y.shape
blk = Residual(6, use_1x1conv=True, strides=2)
blk(X).shape
b1 = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('relu'),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])
class ResnetBlock(tf.keras.layers.Layer):
def __init__(self, num_channels, num_residuals, first_block=False, **kwargs):
super(ResnetBlock, self).__init__(**kwargs)
self.residual_layers = []
for i in range(num_residuals):
if i == 0 and not first_block:
self.residual_layers.append(Residual(num_channels, use_1x1conv=True, strides=2))
else:
self.residual_layers.append(Residual(num_channels))
def call(self, X):
for layer in self.residual_layers.layers:
X = layer(X)
return X
b2 = ResnetBlock(64, 2, first_block=True)
b3 = ResnetBlock(128, 2)
b4 = ResnetBlock(256, 2)
b5 = ResnetBlock(512, 2)
def net():
return tf.keras.Sequential([
tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation('relu'),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same'),
ResnetBlock(64, 2, first_block=True),
ResnetBlock(128, 2),
ResnetBlock(256, 2),
ResnetBlock(512, 2),
tf.keras.layers.GlobalAvgPool2D(),
tf.keras.layers.Dense(units=10)])
X = tf.random.uniform(shape=(1, 224, 224, 1))
for layer in net().layers:
X = layer(X)
print(layer.__class__.__name__,'output shape: ', X.shape) | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
import paddle.nn as nn
from paddle.nn import functional as F
class Residual(nn.Layer):
def __init__(self, input_channels, num_channels, use_1x1conv=False,
strides=1):
super(Residual, self).__init__()
self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)
self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3, padding=1)
if use_1x1conv:
self.conv3 = nn.Conv2D(input_channels, num_channels, kernel_size=1, stride=strides)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm2D(num_channels)
self.bn2 = nn.BatchNorm2D(num_channels)
self.relu = nn.ReLU()
def forward(self, X):
Y = F.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3:
X = self.conv3(X)
Y += X
return F.relu(Y)
blk = Residual(3, 3)
X = paddle.rand([4, 3, 6, 6])
Y = blk(X)
Y.shape
blk = Residual(3, 6, use_1x1conv=True, strides=2)
blk(X).shape
b1 = nn.Sequential(nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),
nn.BatchNorm2D(64), nn.ReLU(),
nn.MaxPool2D(kernel_size=3, stride=2, padding=1))
def resnet_block(input_channels, num_channels, num_residuals, first_block=False):
blk = []
for i in range(num_residuals):
if i == 0 and not first_block:
blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2))
else:
blk.append(Residual(num_channels, num_channels))
return blk
b2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True))
b3 = nn.Sequential(*resnet_block(64, 128, 2))
b4 = nn.Sequential(*resnet_block(128, 256, 2))
b5 = nn.Sequential(*resnet_block(256, 512, 2))
net = nn.Sequential(b1, b2, b3, b4, b5,
nn.AdaptiveAvgPool2D((1, 1)),
nn.Flatten(), nn.Linear(512, 10))
X = paddle.rand(shape=(1, 1, 224, 224))
for layer in net:
X = layer(X)
print(layer.__class__.__name__,'output shape: ', X.shape) |
38 | import tensorflow as tf
from d2l import tensorflow as d2l
class ConvBlock(tf.keras.layers.Layer):
def __init__(self, num_channels):
super(ConvBlock, self).__init__()
self.bn = tf.keras.layers.BatchNormalization()
self.relu = tf.keras.layers.ReLU()
self.conv = tf.keras.layers.Conv2D(filters=num_channels, kernel_size=(3, 3), padding='same')
self.listLayers = [self.bn, self.relu, self.conv]
def call(self, x):
y = x
for layer in self.listLayers.layers:
y = layer(y)
y = tf.keras.layers.concatenate([x,y], axis=-1)
return y
class DenseBlock(tf.keras.layers.Layer):
def __init__(self, num_convs, num_channels):
super(DenseBlock, self).__init__()
self.listLayers = []
for _ in range(num_convs):
self.listLayers.append(ConvBlock(num_channels))
def call(self, x):
for layer in self.listLayers.layers:
x = layer(x)
return x
blk = DenseBlock(2, 10)
X = tf.random.uniform((4, 8, 8, 3))
Y = blk(X)
Y.shape
class TransitionBlock(tf.keras.layers.Layer):
def __init__(self, num_channels, **kwargs):
super(TransitionBlock, self).__init__(**kwargs)
self.batch_norm = tf.keras.layers.BatchNormalization()
self.relu = tf.keras.layers.ReLU()
self.conv = tf.keras.layers.Conv2D(num_channels, kernel_size=1)
self.avg_pool = tf.keras.layers.AvgPool2D(pool_size=2, strides=2)
def call(self, x):
x = self.batch_norm(x)
x = self.relu(x)
x = self.conv(x)
return self.avg_pool(x)
blk = TransitionBlock(10)
blk(Y).shape
def block_1():
return tf.keras.Sequential([
tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.ReLU(),
tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same')])
def block_2():
net = block_1()
num_channels, growth_rate = 64, 32
num_convs_in_dense_blocks = [4, 4, 4, 4]
for i, num_convs in enumerate(num_convs_in_dense_blocks):
net.add(DenseBlock(num_convs, growth_rate))
num_channels += num_convs * growth_rate
if i != len(num_convs_in_dense_blocks) - 1:
num_channels //= 2
net.add(TransitionBlock(num_channels))
return net
def net():
net = block_2()
net.add(tf.keras.layers.BatchNormalization())
net.add(tf.keras.layers.ReLU())
net.add(tf.keras.layers.GlobalAvgPool2D())
net.add(tf.keras.layers.Flatten())
net.add(tf.keras.layers.Dense(10))
return net | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
import paddle.nn as nn
def conv_block(input_channels, num_channels):
return nn.Sequential(
nn.BatchNorm2D(input_channels), nn.ReLU(),
nn.Conv2D(input_channels, num_channels, kernel_size=3, padding=1))
class DenseBlock(nn.Layer):
def __init__(self, num_convs, input_channels, num_channels):
super(DenseBlock, self).__init__()
layer = []
for i in range(num_convs):
layer.append(conv_block(num_channels * i + input_channels, num_channels))
self.net = nn.Sequential(*layer)
def forward(self, X):
for blk in self.net:
Y = blk(X)
X = paddle.concat(x=[X, Y], axis=1)
return X
blk = DenseBlock(2, 3, 10)
X = paddle.randn([4, 3, 8, 8])
Y = blk(X)
Y.shape
def transition_block(input_channels, num_channels):
return nn.Sequential(
nn.BatchNorm2D(input_channels), nn.ReLU(),
nn.Conv2D(input_channels, num_channels, kernel_size=1),
nn.AvgPool2D(kernel_size=2, stride=2))
blk = transition_block(23, 10)
blk(Y).shape
b1 = nn.Sequential(
nn.Conv2D(1, 64, kernel_size=7, stride=2, padding=3),
nn.BatchNorm2D(64), nn.ReLU(),
nn.MaxPool2D(kernel_size=3, stride=2, padding=1))
num_channels, growth_rate = 64, 32
num_convs_in_dense_blocks = [4, 4, 4, 4]
blks = []
for i, num_convs in enumerate(num_convs_in_dense_blocks):
blks.append(DenseBlock(num_convs, num_channels, growth_rate))
num_channels += num_convs * growth_rate
if i != len(num_convs_in_dense_blocks) - 1:
blks.append(transition_block(num_channels, num_channels // 2))
num_channels = num_channels // 2
net = nn.Sequential(
b1, *blks,
nn.BatchNorm2D(num_channels), nn.ReLU(),
nn.AdaptiveMaxPool2D((1, 1)),
nn.Flatten(),
nn.Linear(num_channels, 10)) |
39 | %matplotlib inline
import tensorflow as tf
from d2l import tensorflow as d2l
T = 1000
time = tf.range(1, T + 1, dtype=tf.float32)
x = tf.sin(0.01 * time) + tf.random.normal([T], 0, 0.2)
d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))
tau = 4
features = tf.Variable(tf.zeros((T - tau, tau)))
for i in range(tau):
features[:, i].assign(x[i: T - tau + i])
labels = tf.reshape(x[tau:], (-1, 1))
batch_size, n_train = 16, 600
train_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)
def get_net():
net = tf.keras.Sequential([tf.keras.layers.Dense(10, activation='relu'),
tf.keras.layers.Dense(1)])
return net
loss = tf.keras.losses.MeanSquaredError()
def train(net, train_iter, loss, epochs, lr):
trainer = tf.keras.optimizers.Adam()
for epoch in range(epochs):
for X, y in train_iter:
with tf.GradientTape() as g:
out = net(X)
l = loss(y, out)
params = net.trainable_variables
grads = g.gradient(l, params)
trainer.apply_gradients(zip(grads, params))
net = get_net()
train(net, train_iter, loss, 5, 0.01)
onestep_preds = net(features)
d2l.plot([time, time[tau:]],
[x.numpy(), onestep_preds.numpy()], 'time',
'x', legend=['data', '1-step preds'], xlim=[1, 1000],
figsize=(6, 3))
multistep_preds = tf.Variable(tf.zeros(T))
multistep_preds[:n_train + tau].assign(x[:n_train + tau])
for i in range(n_train + tau, T):
multistep_preds[i].assign(tf.reshape(net(tf.reshape(multistep_preds[i - tau: i], (1, -1))), ()))
d2l.plot([time, time[tau:], time[n_train + tau:]],
[x.numpy(), onestep_preds.numpy(),
multistep_preds[n_train + tau:].numpy()], 'time',
'x', legend=['data', '1-step preds', 'multistep preds'],
xlim=[1, 1000], figsize=(6, 3))
max_steps = 64
features = tf.Variable(tf.zeros((T - tau - max_steps + 1, tau + max_steps)))
for i in range(tau):
features[:, i].assign(x[i: i + T - tau - max_steps + 1].numpy())
for i in range(tau, tau + max_steps):
features[:, i].assign(tf.reshape(net((features[:, i - tau: i])), -1))
steps = (1, 4, 16, 64)
d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],
[features[:, tau + i - 1].numpy() for i in steps], 'time', 'x',
legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],
figsize=(6, 3)) | null | null | %matplotlib inline
import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
from paddle import nn
T = 1000
time = paddle.arange(1, T + 1, dtype=paddle.float32)
x = paddle.sin(0.01 * time) + paddle.normal(0, 0.2, (T,))
d2l.plot(time, [x], 'time', 'x', xlim=[1, 1000], figsize=(6, 3))
tau = 4
features = paddle.zeros((T - tau, tau))
for i in range(tau):
features[:, i] = x[i: T - tau + i]
labels = x[tau:].reshape((-1, 1))
batch_size, n_train = 16, 600
train_iter = d2l.load_array((features[:n_train], labels[:n_train]), batch_size, is_train=True)
def init_weights(m):
if type(m) == nn.Linear:
nn.initializer.XavierUniform(m.weight)
def get_net():
net = nn.Sequential(nn.Linear(4, 10),
nn.ReLU(),
nn.Linear(10, 1))
net.apply(init_weights)
return net
loss = nn.MSELoss(reduction='none')
def train(net, train_iter, loss, epochs, lr):
trainer = paddle.optimizer.Adam(learning_rate=lr, parameters=net.parameters())
for epoch in range(epochs):
for i,(X, y) in enumerate (train_iter()):
trainer.clear_grad()
l = loss(net(X), y)
l.sum().backward()
trainer.step()
net = get_net()
train(net, train_iter, loss, 5, 0.01)
onestep_preds = net(features)
d2l.plot([time, time[tau:]],
[x.detach().numpy(), onestep_preds.detach().numpy()], 'time',
'x', legend=['data', '1-step preds'], xlim=[1, 1000],
figsize=(6, 3))
multistep_preds = paddle.zeros([T])
multistep_preds[: n_train + tau] = x[: n_train + tau]
for i in range(n_train + tau, T):
multistep_preds[i] = net(multistep_preds[i - tau:i].reshape((1, -1)))
d2l.plot([time, time[tau:], time[n_train + tau:]],
[x.detach().numpy(), onestep_preds.detach().numpy(),
multistep_preds[n_train + tau:].detach().numpy()], 'time',
'x', legend=['data', '1-step preds', 'multistep preds'],
xlim=[1, 1000], figsize=(6, 3))
max_steps = 64
features = paddle.zeros((T - tau - max_steps + 1, tau + max_steps))
for i in range(tau):
features[:, i] = x[i: i + T - tau - max_steps + 1]
for i in range(tau, tau + max_steps):
features[:, i] = net(features[:, i - tau:i]).reshape([-1])
steps = (1, 4, 16, 64)
d2l.plot([time[tau + i - 1: T - max_steps + i] for i in steps],
[features[:, tau + i - 1].detach().numpy() for i in steps], 'time', 'x',
legend=[f'{i}-step preds' for i in steps], xlim=[5, 1000],
figsize=(6, 3)) |
40 | import collections
import re
from d2l import tensorflow as d2l
def tokenize(lines, token='word'):
if token == 'word':
return [line.split() for line in lines]
elif token == 'char':
return [list(line) for line in lines]
else:
print('Error: Unknown word element type:' + token)
tokens = tokenize(lines)
for i in range(11):
print(tokens[i])
def load_corpus_time_machine(max_tokens=-1):
lines = read_time_machine()
tokens = tokenize(lines, 'char')
vocab = Vocab(tokens)
corpus = [vocab[token] for line in tokens for token in line]
if max_tokens > 0:
corpus = corpus[:max_tokens]
return corpus, vocab
corpus, vocab = load_corpus_time_machine()
len(corpus), len(vocab) | null | null | import collections
import re
from d2l import paddle as d2l
def tokenize(lines, token='word'):
if token == 'word':
return [line.split() for line in lines]
elif token == 'char':
return [list(line) for line in lines]
else:
print('Error: Unknown word element type:' + token)
tokens = tokenize(lines)
for i in range(11):
print(tokens[i])
def load_corpus_time_machine(max_tokens=-1):
lines = read_time_machine()
tokens = tokenize(lines, 'char')
vocab = Vocab(tokens)
corpus = [vocab[token] for line in tokens for token in line]
if max_tokens > 0:
corpus = corpus[:max_tokens]
return corpus, vocab
corpus, vocab = load_corpus_time_machine()
len(corpus), len(vocab) |
41 | import random
import tensorflow as tf
from d2l import tensorflow as d2l
tokens = d2l.tokenize(d2l.read_time_machine())
corpus = [token for line in tokens for token in line]
vocab = d2l.Vocab(corpus)
vocab.token_freqs[:10]
def seq_data_iter_random(corpus, batch_size, num_steps):
corpus = corpus[random.randint(0, num_steps - 1):]
num_subseqs = (len(corpus) - 1) // num_steps
initial_indices = list(range(0, num_subseqs * num_steps, num_steps))
random.shuffle(initial_indices)
def data(pos):
return corpus[pos: pos + num_steps]
num_batches = num_subseqs // batch_size
for i in range(0, batch_size * num_batches, batch_size):
initial_indices_per_batch = initial_indices[i: i + batch_size]
X = [data(j) for j in initial_indices_per_batch]
Y = [data(j + 1) for j in initial_indices_per_batch]
yield tf.constant(X), tf.constant(Y)
def seq_data_iter_sequential(corpus, batch_size, num_steps):
offset = random.randint(0, num_steps)
num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size
Xs = tf.constant(corpus[offset: offset + num_tokens])
Ys = tf.constant(corpus[offset + 1: offset + 1 + num_tokens])
Xs = tf.reshape(Xs, (batch_size, -1))
Ys = tf.reshape(Ys, (batch_size, -1))
num_batches = Xs.shape[1] // num_steps
for i in range(0, num_batches * num_steps, num_steps):
X = Xs[:, i: i + num_steps]
Y = Ys[:, i: i + num_steps]
yield X, Y | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import random
import paddle
tokens = d2l.tokenize(d2l.read_time_machine())
corpus = [token for line in tokens for token in line]
vocab = d2l.Vocab(corpus)
vocab.token_freqs[:10]
def seq_data_iter_random(corpus, batch_size, num_steps):
corpus = corpus[random.randint(0, num_steps - 1):]
num_subseqs = (len(corpus) - 1) // num_steps
initial_indices = list(range(0, num_subseqs * num_steps, num_steps))
random.shuffle(initial_indices)
def data(pos):
return corpus[pos: pos + num_steps]
num_batches = num_subseqs // batch_size
for i in range(0, batch_size * num_batches, batch_size):
initial_indices_per_batch = initial_indices[i: i + batch_size]
X = [data(j) for j in initial_indices_per_batch]
Y = [data(j + 1) for j in initial_indices_per_batch]
yield paddle.to_tensor(X), paddle.to_tensor(Y)
def seq_data_iter_sequential(corpus, batch_size, num_steps):
offset = random.randint(0, num_steps)
num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size
Xs = paddle.to_tensor(corpus[offset: offset + num_tokens])
Ys = paddle.to_tensor(corpus[offset + 1: offset + 1 + num_tokens])
Xs, Ys = Xs.reshape((batch_size, -1)), Ys.reshape((batch_size, -1))
num_batches = Xs.shape[1] // num_steps
for i in range(0, num_steps * num_batches, num_steps):
X = Xs[:, i: i + num_steps]
Y = Ys[:, i: i + num_steps]
yield X, Y |
42 | import tensorflow as tf
from d2l import tensorflow as d2l
X, W_xh = tf.random.normal((3, 1), 0, 1), tf.random.normal((1, 4), 0, 1)
H, W_hh = tf.random.normal((3, 4), 0, 1), tf.random.normal((4, 4), 0, 1)
tf.matmul(X, W_xh) + tf.matmul(H, W_hh)
tf.matmul(tf.concat((X, H), 1), tf.concat((W_xh, W_hh), 0)) | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
X, W_xh = paddle.normal(0, 1, (3, 1)), paddle.normal(0, 1, (1, 4))
H, W_hh = paddle.normal(0, 1, (3, 4)), paddle.normal(0, 1, (4, 4))
paddle.matmul(X, W_xh) + paddle.matmul(H, W_hh)
paddle.matmul(paddle.concat((X, H), 1), paddle.concat((W_xh, W_hh), 0)) |
43 | %matplotlib inline
import math
import tensorflow as tf
from d2l import tensorflow as d2l
batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
train_random_iter, vocab_random_iter = d2l.load_data_time_machine(batch_size, num_steps, use_random_iter=True)
tf.one_hot(tf.constant([0, 2]), len(vocab))
X = tf.reshape(tf.range(10), (2, 5))
tf.one_hot(tf.transpose(X), 28).shape
def get_params(vocab_size, num_hiddens):
num_inputs = num_outputs = vocab_size
def normal(shape):
return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32)
W_xh = tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32)
W_hh = tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32)
b_h = tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32)
W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32)
b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)
params = [W_xh, W_hh, b_h, W_hq, b_q]
return params
def init_rnn_state(batch_size, num_hiddens):
return (tf.zeros((batch_size, num_hiddens)), )
def rnn(inputs, state, params):
W_xh, W_hh, b_h, W_hq, b_q = params
H, = state
outputs = []
for X in inputs:
X = tf.reshape(X,[-1,W_xh.shape[0]])
H = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(H, W_hh) + b_h)
Y = tf.matmul(H, W_hq) + b_q
outputs.append(Y)
return tf.concat(outputs, axis=0), (H,)
class RNNModelScratch:
def __init__(self, vocab_size, num_hiddens, init_state, forward_fn, get_params):
self.vocab_size, self.num_hiddens = vocab_size, num_hiddens
self.init_state, self.forward_fn = init_state, forward_fn
self.trainable_variables = get_params(vocab_size, num_hiddens)
def __call__(self, X, state):
X = tf.one_hot(tf.transpose(X), self.vocab_size)
X = tf.cast(X, tf.float32)
return self.forward_fn(X, state, self.trainable_variables)
def begin_state(self, batch_size, *args, **kwargs):
return self.init_state(batch_size, self.num_hiddens)
device_name = d2l.try_gpu()._device_name
strategy = tf.distribute.OneDeviceStrategy(device_name)
num_hiddens = 512
with strategy.scope():
net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params)
state = net.begin_state(X.shape[0])
Y, new_state = net(X, state)
Y.shape, len(new_state), new_state[0].shape
def predict_ch8(prefix, num_preds, net, vocab):
state = net.begin_state(batch_size=1, dtype=tf.float32)
outputs = [vocab[prefix[0]]]
get_input = lambda: tf.reshape(tf.constant([outputs[-1]]),
(1, 1)).numpy()
for y in prefix[1:]:
_, state = net(get_input(), state)
outputs.append(vocab[y])
for _ in range(num_preds):
y, state = net(get_input(), state)
outputs.append(int(y.numpy().argmax(axis=1).reshape(1)))
return ''.join([vocab.idx_to_token[i] for i in outputs])
predict_ch8('time traveller ', 10, net, vocab)
def grad_clipping(grads, theta):
theta = tf.constant(theta, dtype=tf.float32)
new_grad = []
for grad in grads:
if isinstance(grad, tf.IndexedSlices):
new_grad.append(tf.convert_to_tensor(grad))
else:
new_grad.append(grad)
norm = tf.math.sqrt(sum((tf.reduce_sum(grad ** 2)).numpy()
for grad in new_grad))
norm = tf.cast(norm, tf.float32)
if tf.greater(norm, theta):
for i, grad in enumerate(new_grad):
new_grad[i] = grad * theta / norm
else:
new_grad = new_grad
return new_grad
def train_epoch_ch8(net, train_iter, loss, updater, use_random_iter):
state, timer = None, d2l.Timer()
metric = d2l.Accumulator(2)
for X, Y in train_iter:
if state is None or use_random_iter:
state = net.begin_state(batch_size=X.shape[0], dtype=tf.float32)
with tf.GradientTape(persistent=True) as g:
y_hat, state = net(X, state)
y = tf.reshape(tf.transpose(Y), (-1))
l = loss(y, y_hat)
params = net.trainable_variables
grads = g.gradient(l, params)
grads = grad_clipping(grads, 1)
updater.apply_gradients(zip(grads, params))
metric.add(l * d2l.size(y), d2l.size(y))
return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()
def train_ch8(net, train_iter, vocab, lr, num_epochs, strategy, use_random_iter=False):
with strategy.scope():
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
updater = tf.keras.optimizers.SGD(lr)
animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])
predict = lambda prefix: predict_ch8(prefix, 50, net, vocab)
for epoch in range(num_epochs):
ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, use_random_iter)
if (epoch + 1) % 10 == 0:
animator.add(epoch + 1, [ppl])
device = d2l.try_gpu()._device_name
num_epochs, lr = 500, 1
train_ch8(net, train_iter, vocab, lr, num_epochs, strategy)
with strategy.scope():
net = RNNModelScratch(len(vocab), num_hiddens, init_rnn_state, rnn, get_params)
train_ch8(net, train_iter, vocab_random_iter, lr, num_epochs, strategy, use_random_iter=True) | null | null | %matplotlib inline
import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import math
import paddle
from paddle import nn
from paddle.nn import functional as F
batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
F.one_hot(paddle.to_tensor([0, 2]), len(vocab))
X = paddle.arange(10).reshape((2, 5))
F.one_hot(X.T, 28).shape
def get_params(vocab_size, num_hiddens):
num_inputs = num_outputs = vocab_size
def normal(shape):
return paddle.randn(shape=shape)* 0.01
W_xh = normal([num_inputs, num_hiddens])
W_hh = normal([num_hiddens, num_hiddens])
b_h = paddle.zeros(shape=[num_hiddens])
W_hq = normal([num_hiddens, num_outputs])
b_q = paddle.zeros(shape=[num_outputs])
params = [W_xh, W_hh, b_h, W_hq, b_q]
for param in params:
param.stop_gradient=False
return params
def init_rnn_state(batch_size, num_hiddens):
return (paddle.zeros(shape=[batch_size, num_hiddens]), )
def rnn(inputs, state, params):
W_xh, W_hh, b_h, W_hq, b_q = params
H, = state
outputs = []
for X in inputs:
H = paddle.tanh(paddle.mm(X, W_xh) + paddle.mm(H, W_hh) + b_h)
Y = paddle.mm(H, W_hq) + b_q
outputs.append(Y)
return paddle.concat(x=outputs, axis=0), (H,)
class RNNModelScratch:
def __init__(self, vocab_size, num_hiddens, get_params, init_state, forward_fn):
self.vocab_size, self.num_hiddens = vocab_size, num_hiddens
self.params = get_params(vocab_size, num_hiddens)
self.init_state, self.forward_fn = init_state, forward_fn
def __call__(self, X, state):
X = F.one_hot(X.T, self.vocab_size)
return self.forward_fn(X, state, self.params)
def begin_state(self, batch_size):
return self.init_state(batch_size, self.num_hiddens)
num_hiddens = 512
net = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn)
state = net.begin_state(X.shape[0])
Y, new_state = net(X, state)
Y.shape, len(new_state), new_state[0].shape
def predict_ch8(prefix, num_preds, net, vocab, device):
state = net.begin_state(batch_size=1)
outputs = [vocab[prefix[0]]]
get_input = lambda: paddle.to_tensor(outputs[-1], place=device).reshape((1, 1))
for y in prefix[1:]:
_, state = net(get_input(), state)
outputs.append(vocab[y])
for _ in range(num_preds):
y, state = net(get_input(), state)
outputs.append(int(paddle.reshape(paddle.argmax(y,axis=1),shape=[1])))
return ''.join([vocab.idx_to_token[i] for i in outputs])
predict_ch8('time traveller ', 10, net, vocab, d2l.try_gpu())
def grad_clipping(net, theta):
if isinstance(net, nn.Layer):
params = [p for p in net.parameters() if not p.stop_gradient]
else:
params = net.params
norm = paddle.sqrt(sum(paddle.sum((p.grad ** 2)) for p in params))
if norm > theta:
with paddle.no_grad():
for param in params:
param.grad.set_value(param.grad * theta / norm)
def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):
state, timer = None, d2l.Timer()
metric = d2l.Accumulator(2)
for X, Y in train_iter:
if state is None or use_random_iter:
state = net.begin_state(batch_size=X.shape[0])
else:
if isinstance(net, nn.Layer) and not isinstance(state, tuple):
state.stop_gradient=True
else:
for s in state:
s.stop_gradient=True
y = paddle.reshape(Y.T,shape=[-1])
X = paddle.to_tensor(X, place=device)
y = paddle.to_tensor(y, place=device)
y_hat, state = net(X, state)
l = loss(y_hat, y).mean()
if isinstance(updater, paddle.optimizer.Optimizer):
updater.clear_grad()
l.backward()
grad_clipping(net, 1)
updater.step()
else:
l.backward()
grad_clipping(net, 1)
updater(batch_size=1)
metric.add(l * y.numel(), y.numel())
return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()
def train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):
loss = nn.CrossEntropyLoss()
animator = d2l.Animator(xlabel='epoch', ylabel='perplexity', legend=['train'], xlim=[10, num_epochs])
if isinstance(net, nn.Layer):
updater = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())
else:
updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)
predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)
for epoch in range(num_epochs):
ppl, speed = train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter)
if (epoch + 1) % 10 == 0:
animator.add(epoch + 1, [ppl])
num_epochs, lr = 500, 1
train_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu())
net = RNNModelScratch(len(vocab), num_hiddens, get_params, init_rnn_state, rnn)
train_ch8(net, train_iter, vocab, lr, num_epochs, d2l.try_gpu(), use_random_iter=True) |
44 | import tensorflow as tf
from d2l import tensorflow as d2l
batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
num_hiddens = 256
rnn_cell = tf.keras.layers.SimpleRNNCell(num_hiddens, kernel_initializer='glorot_uniform')
rnn_layer = tf.keras.layers.RNN(rnn_cell, time_major=True, return_sequences=True, return_state=True)
state = rnn_cell.get_initial_state(batch_size=batch_size, dtype=tf.float32)
state.shape
X = tf.random.uniform((num_steps, batch_size, len(vocab)))
Y, state_new = rnn_layer(X, state)
Y.shape, len(state_new), state_new[0].shape
class RNNModel(tf.keras.layers.Layer):
def __init__(self, rnn_layer, vocab_size, **kwargs):
super(RNNModel, self).__init__(**kwargs)
self.rnn = rnn_layer
self.vocab_size = vocab_size
self.dense = tf.keras.layers.Dense(vocab_size)
def call(self, inputs, state):
X = tf.one_hot(tf.transpose(inputs), self.vocab_size)
Y, *state = self.rnn(X, state)
output = self.dense(tf.reshape(Y, (-1, Y.shape[-1])))
return output, state
def begin_state(self, *args, **kwargs):
return self.rnn.cell.get_initial_state(*args, **kwargs)
device_name = d2l.try_gpu()._device_name
strategy = tf.distribute.OneDeviceStrategy(device_name)
with strategy.scope():
net = RNNModel(rnn_layer, vocab_size=len(vocab))
d2l.predict_ch8('time traveller', 10, net, vocab)
num_epochs, lr = 500, 1
d2l.train_ch8(net, train_iter, vocab, lr, num_epochs, strategy) | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
from paddle import nn
from paddle.nn import functional as F
batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
num_hiddens = 256
rnn_layer = nn.SimpleRNN(len(vocab), num_hiddens, time_major=True)
state = paddle.zeros(shape=[1, batch_size, num_hiddens])
state.shape
X = paddle.rand(shape=[num_steps, batch_size, len(vocab)])
Y, state_new = rnn_layer(X, state)
Y.shape, state_new.shape
def __init__(self, rnn_layer, vocab_size, **kwargs):
super(RNNModel, self).__init__(**kwargs)
self.rnn = rnn_layer
self.vocab_size = vocab_size
self.num_hiddens = self.rnn.hidden_size
if self.rnn.num_directions==1:
self.num_directions = 1
self.linear = nn.Linear(self.num_hiddens, self.vocab_size)
else:
self.num_directions = 2
self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)
def forward(self, inputs, state):
X = F.one_hot(inputs.T, self.vocab_size)
Y, state = self.rnn(X, state)
output = self.linear(Y.reshape((-1, Y.shape[-1])))
return output, state
def begin_state(self, batch_size=1):
if not isinstance(self.rnn, nn.LSTM):
return paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens])
else:
return (paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]),
paddle.zeros(shape=[self.num_directions * self.rnn.num_layers, batch_size, self.num_hiddens]))
device = d2l.try_gpu()
net = RNNModel(rnn_layer, vocab_size=len(vocab))
d2l.predict_ch8('time traveller', 10, net, vocab, device)
num_epochs, lr = 500, 1.0
d2l.train_ch8(net, train_iter, vocab, lr, num_epochs, device) |
45 | import tensorflow as tf
from d2l import tensorflow as d2l
batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
def get_params(vocab_size, num_hiddens):
num_inputs = num_outputs = vocab_size
def normal(shape):
return tf.random.normal(shape=shape,stddev=0.01,mean=0,dtype=tf.float32)
def three():
return (tf.Variable(normal((num_inputs, num_hiddens)), dtype=tf.float32), tf.Variable(normal((num_hiddens, num_hiddens)), dtype=tf.float32), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32))
W_xz, W_hz, b_z = three()
W_xr, W_hr, b_r = three()
W_xh, W_hh, b_h = three()
W_hq = tf.Variable(normal((num_hiddens, num_outputs)), dtype=tf.float32)
b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)
params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]
return params
def init_gru_state(batch_size, num_hiddens):
return (tf.zeros((batch_size, num_hiddens)), )
def gru(inputs, state, params):
W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params
H, = state
outputs = []
for X in inputs:
X = tf.reshape(X,[-1,W_xh.shape[0]])
Z = tf.sigmoid(tf.matmul(X, W_xz) + tf.matmul(H, W_hz) + b_z)
R = tf.sigmoid(tf.matmul(X, W_xr) + tf.matmul(H, W_hr) + b_r)
H_tilda = tf.tanh(tf.matmul(X, W_xh) + tf.matmul(R * H, W_hh) + b_h)
H = Z * H + (1 - Z) * H_tilda
Y = tf.matmul(H, W_hq) + b_q
outputs.append(Y)
return tf.concat(outputs, axis=0), (H,)
vocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name
strategy = tf.distribute.OneDeviceStrategy(device_name)
num_epochs, lr = 500, 1
with strategy.scope():
model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_gru_state, gru, get_params)
d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)
gru_cell = tf.keras.layers.GRUCell(num_hiddens, kernel_initializer='glorot_uniform')
gru_layer = tf.keras.layers.RNN(gru_cell, time_major=True, return_sequences=True, return_state=True)
device_name = d2l.try_gpu()._device_name
strategy = tf.distribute.OneDeviceStrategy(device_name)
with strategy.scope():
model = d2l.RNNModel(gru_layer, vocab_size=len(vocab))
d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy) | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
import paddle.nn.functional as F
from paddle import nn
batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
def get_params(vocab_size, num_hiddens):
num_inputs = num_outputs = vocab_size
def normal(shape):
return paddle.randn(shape=shape)*0.01
def three():
return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))
W_xz, W_hz, b_z = three()
W_xr, W_hr, b_r = three()
W_xh, W_hh, b_h = three()
W_hq = normal((num_hiddens, num_outputs))
b_q = paddle.zeros([num_outputs])
params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]
for param in params:
param.stop_gradient = False
return params
def init_gru_state(batch_size, num_hiddens):
return (paddle.zeros([batch_size, num_hiddens]), )
def gru(inputs, state, params):
W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params
H,*_ = state
outputs = []
for X in inputs:
Z = F.sigmoid((X @ W_xz) + (H @ W_hz) + b_z)
R = F.sigmoid((X @ W_xr) + (H @ W_hr) + b_r)
H_tilda = paddle.tanh((X @ W_xh) + ((R * H) @ W_hh) + b_h)
H = Z * H + (1 - Z) * H_tilda
Y = H @ W_hq + b_q
outputs.append(Y)
return paddle.concat(outputs, axis=0), (H,*_)
vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()
num_epochs, lr = 500, 1.0
model = d2l.RNNModelScratch(len(vocab), num_hiddens, get_params, init_gru_state, gru)
d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
num_inputs = vocab_size
gru_layer = nn.GRU(num_inputs, num_hiddens, time_major=True)
model = d2l.RNNModel(gru_layer, len(vocab))
d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) |
46 | import tensorflow as tf
from d2l import tensorflow as d2l
batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
def get_lstm_params(vocab_size, num_hiddens):
num_inputs = num_outputs = vocab_size
def normal(shape):
return tf.Variable(tf.random.normal(shape=shape, stddev=0.01, mean=0, dtype=tf.float32))
def three():
return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), tf.Variable(tf.zeros(num_hiddens), dtype=tf.float32))
W_xi, W_hi, b_i = three()
W_xf, W_hf, b_f = three()
W_xo, W_ho, b_o = three()
W_xc, W_hc, b_c = three()
W_hq = normal((num_hiddens, num_outputs))
b_q = tf.Variable(tf.zeros(num_outputs), dtype=tf.float32)
params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]
return params
def init_lstm_state(batch_size, num_hiddens):
return (tf.zeros(shape=(batch_size, num_hiddens)), tf.zeros(shape=(batch_size, num_hiddens)))
def lstm(inputs, state, params):
W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q = params
(H, C) = state
outputs = []
for X in inputs:
X=tf.reshape(X,[-1,W_xi.shape[0]])
I = tf.sigmoid(tf.matmul(X, W_xi) + tf.matmul(H, W_hi) + b_i)
F = tf.sigmoid(tf.matmul(X, W_xf) + tf.matmul(H, W_hf) + b_f)
O = tf.sigmoid(tf.matmul(X, W_xo) + tf.matmul(H, W_ho) + b_o)
C_tilda = tf.tanh(tf.matmul(X, W_xc) + tf.matmul(H, W_hc) + b_c)
C = F * C + I * C_tilda
H = O * tf.tanh(C)
Y = tf.matmul(H, W_hq) + b_q
outputs.append(Y)
return tf.concat(outputs, axis=0), (H,C)
vocab_size, num_hiddens, device_name = len(vocab), 256, d2l.try_gpu()._device_name
num_epochs, lr = 500, 1
strategy = tf.distribute.OneDeviceStrategy(device_name)
with strategy.scope():
model = d2l.RNNModelScratch(len(vocab), num_hiddens, init_lstm_state, lstm, get_lstm_params)
d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy)
lstm_cell = tf.keras.layers.LSTMCell(num_hiddens, kernel_initializer='glorot_uniform')
lstm_layer = tf.keras.layers.RNN(lstm_cell, time_major=True, return_sequences=True, return_state=True)
device_name = d2l.try_gpu()._device_name
strategy = tf.distribute.OneDeviceStrategy(device_name)
with strategy.scope():
model = d2l.RNNModel(lstm_layer, vocab_size=len(vocab))
d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, strategy) | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
import paddle.nn.functional as Function
from paddle import nn
batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
def get_lstm_params(vocab_size, num_hiddens):
num_inputs = num_outputs = vocab_size
def normal(shape):
return paddle.randn(shape=shape)*0.01
def three():
return (normal((num_inputs, num_hiddens)), normal((num_hiddens, num_hiddens)), paddle.zeros([num_hiddens]))
W_xi, W_hi, b_i = three()
W_xf, W_hf, b_f = three()
W_xo, W_ho, b_o = three()
W_xc, W_hc, b_c = three()
W_hq = normal((num_hiddens, num_outputs))
b_q = paddle.zeros([num_outputs])
params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]
for param in params:
param.stop_gradient = False
return params
def init_lstm_state(batch_size, num_hiddens):
return (paddle.zeros([batch_size, num_hiddens]), paddle.zeros([batch_size, num_hiddens]))
def lstm(inputs, state, params):
[W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,
W_hq, b_q] = params
(H, C) = state
outputs = []
for X in inputs:
I = Function.sigmoid((X @ W_xi) + (H @ W_hi) + b_i)
F = Function.sigmoid((X @ W_xf) + (H @ W_hf) + b_f)
O = Function.sigmoid((X @ W_xo) + (H @ W_ho) + b_o)
C_tilda = paddle.tanh((X @ W_xc) + (H @ W_hc) + b_c)
C = F * C + I * C_tilda
H = O * paddle.tanh(C)
Y = (H @ W_hq) + b_q
outputs.append(Y)
return paddle.concat(outputs, axis=0), (H, C)
vocab_size, num_hiddens, device = len(vocab), 256, d2l.try_gpu()
num_epochs, lr = 500, 1.0
model = d2l.RNNModelScratch(len(vocab), num_hiddens, get_lstm_params, init_lstm_state, lstm)
d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
num_inputs = vocab_size
lstm_layer = nn.LSTM(num_inputs, num_hiddens, time_major=True)
model = d2l.RNNModel(lstm_layer, len(vocab))
d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device) |
47 | import os
import tensorflow as tf
from d2l import tensorflow as d2l
def build_array_nmt(lines, vocab, num_steps):
lines = [vocab[l] for l in lines]
lines = [l + [vocab['<eos>']] for l in lines]
array = tf.constant([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines])
valid_len = tf.reduce_sum(
tf.cast(array != vocab['<pad>'], tf.int32), 1)
return array, valid_len
train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)
for X, X_valid_len, Y, Y_valid_len in train_iter:
print('X:', tf.cast(X, tf.int32))
print('Valid length of X:', X_valid_len)
print('Y:', tf.cast(Y, tf.int32))
print('Valid length of Y:', Y_valid_len)
break | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import os
import paddle
def build_array_nmt(lines, vocab, num_steps):
lines = [vocab[l] for l in lines]
lines = [l + [vocab['<eos>']] for l in lines]
array = paddle.to_tensor([truncate_pad(l, num_steps, vocab['<pad>']) for l in lines])
valid_len = (array != vocab['<pad>']).astype(paddle.int32).sum(1)
return array, valid_len
train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)
for X, X_valid_len, Y, Y_valid_len in train_iter:
print('X:', X.astype(paddle.int32))
print('Valid length of X:', X_valid_len)
print('Y:', Y..astype(paddle.int32))
print('Valid length of Y:', Y_valid_len)
break |
48 | null | null | null | x = paddle.arange(12)
x.numel()
X = paddle.reshape(x, (3, 4))
paddle.zeros((2, 3, 4))
paddle.ones((2, 3, 4))
paddle.randn((3, 4),'float32')
paddle.to_tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
x = paddle.to_tensor([1.0, 2, 4, 8])
y = paddle.to_tensor([2, 2, 2, 2])
x + y, x - y, x * y, x / y, x**y
paddle.exp(x)
X = paddle.arange(12, dtype='float32').reshape((3, 4))
Y = paddle.to_tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]])
paddle.concat((X, Y), axis=0), paddle.concat((X, Y), axis=1)
a = paddle.reshape(paddle.arange(3), (3, 1))
b = paddle.reshape(paddle.arange(2), (1, 2))
Z = paddle.zeros_like(Y)
Z = X + Y
A = X.numpy()
B = paddle.to_tensor(A)
type(A), type(B)
a = paddle.to_tensor([3.5])
a, a.item(), float(a), int(a) |
49 | null | null | null | import warnings
warnings.filterwarnings(action='ignore')
import paddle
X, y = paddle.to_tensor(inputs.values), paddle.to_tensor(outputs.values) |
50 | null | null | null | import warnings
warnings.filterwarnings(action='ignore')
import paddle
x = paddle.to_tensor([3.0])
y = paddle.to_tensor([2.0])
x + y, x * y, x / y, x**y
x = paddle.arange(4)
A = paddle.reshape(paddle.arange(20), (5, 4))
paddle.transpose(A, perm=[1, 0])
B = paddle.to_tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]])
B == paddle.transpose(B, perm=[1, 0])
X = paddle.reshape(paddle.arange(24), (2, 3, 4))
A = paddle.reshape(paddle.arange(20, dtype=paddle.float32), (5, 4))
B = A.clone()
A, A + B
a = 2
X = paddle.reshape(paddle.arange(24), (2, 3, 4))
a + X, (a * X).shape
x = paddle.arange(4, dtype=paddle.float32)
print(x, x.sum())
A.shape, A.sum()
A.mean(), A.sum() / A.numel()
A.mean(axis=0), A.sum(axis=0) / A.shape[0]
sum_A = paddle.sum(A, axis=1, keepdim=True)
y = paddle.ones(shape=[4], dtype='float32')
x, y, paddle.dot(x, y)
paddle.sum(x * y)
A.shape, x.shape, paddle.mv(A, x)
B = paddle.ones(shape=[4, 3], dtype='float32')
paddle.mm(A, B)
u = paddle.to_tensor([3.0, -4.0])
paddle.norm(u)
paddle.abs(u).sum()
paddle.norm(paddle.ones(shape=[4, 9], dtype='float32')) |
51 | null | null | null | %matplotlib inline
import numpy as np
from matplotlib_inline import backend_inline
from d2l import paddle as d2l
def f(x):
return 3 * x ** 2 - 4 * x
def numerical_lim(f, x, h):
return (f(x + h) - f(x)) / h
h = 0.1
for i in range(5):
print(f'h={h:.5f}, numerical limit={numerical_lim(f, 1, h):.5f}')
h *= 0.1 |
52 | null | null | null | import warnings
warnings.filterwarnings(action='ignore')
import paddle
x = paddle.arange(4, dtype='float32')
x = paddle.to_tensor(x, stop_gradient=False)
y = 2 * paddle.dot(x, x)
x.clear_gradient()
y = paddle.sum(x)
y.backward()
x.grad
x.clear_gradient()
y = x * x
paddle.sum(y).backward()
x.grad
x.clear_gradient()
y = x * x
u = y.detach()
z = u * x
paddle.sum(z).backward()
x.grad == u
x.clear_gradient()
paddle.sum(y).backward()
x.grad == 2 * x
def f(a):
b = a * 2
while paddle.norm(b) < 1000:
b = b * 2
if paddle.sum(b) > 0:
c = b
else:
c = 100 * b
return c
a = paddle.to_tensor(paddle.randn(shape=[1]), stop_gradient=False)
d = f(a)
d.backward() |
53 | null | null | null | %matplotlib inline
import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import random
import numpy as np
import paddle
fair_probs = [1.0 / 6] * 6
paddle.distribution.Multinomial(1, paddle.to_tensor(fair_probs)).sample()
counts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()
counts / 1000
counts = paddle.distribution.Multinomial(1000, paddle.to_tensor(fair_probs)).sample()
counts / 1000 |
54 | null | null | null | counts = paddle.distribution.Multinomial(10, paddle.to_tensor(fair_probs)).sample((500,1))
cum_counts = counts.cumsum(axis=0)
cum_counts = cum_counts.squeeze(axis=1)
estimates = cum_counts / cum_counts.sum(axis=1, keepdim=True)
d2l.set_figsize((6, 4.5))
for i in range(6):
d2l.plt.plot(estimates[:, i],
label=("P(die=" + str(i + 1) + ")"))
d2l.plt.axhline(y=0.167, color='black', linestyle='dashed')
d2l.plt.gca().set_xlabel('Groups of experiments')
d2l.plt.gca().set_ylabel('Estimated probability')
d2l.plt.legend()
import warnings
warnings.filterwarnings(action='ignore')
import paddle
help(paddle.ones)
paddle.ones([4], dtype='float32') |
55 | null | null | null | %matplotlib inline
import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import math
import time
import numpy as np
import paddle
n = 10000
a = paddle.ones([n])
b = paddle.ones([n])
c = paddle.zeros([n])
timer = Timer()
for i in range(n):
c[i] = a[i] + b[i]
x = np.arange(-7, 7, 0.01)
params = [(0, 1), (0, 2), (3, 1)]
d2l.plot(x, [normal(x, mu, sigma) for mu, sigma in params], xlabel='x',
ylabel='p(x)', figsize=(4.5, 2.5), legend=[f'mean {mu}, std {sigma}' for mu, sigma in params]) |
56 | null | null | null | %matplotlib inline
import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import random
import paddle
def synthetic_data(w, b, num_examples):
X = paddle.normal(0, 1, (num_examples, len(w)))
y = paddle.matmul(X, w) + b
y += paddle.normal(0, 0.01, y.shape)
return X, y.reshape((-1, 1))
true_w = paddle.to_tensor([2, -3.4])
true_b = 4.2
features, labels = synthetic_data(true_w, true_b, 1000)
d2l.set_figsize()
d2l.plt.scatter(features[:, 1].detach().numpy(), labels.detach().numpy(), 1);
def data_iter(batch_size, features, labels):
num_examples = len(features)
indices = list(range(num_examples))
random.shuffle(indices)
for i in range(0, num_examples, batch_size):
batch_indices = paddle.to_tensor(indices[i: min(i + batch_size, num_examples)])
yield features[batch_indices], labels[batch_indices]
batch_size = 10
for X, y in data_iter(batch_size, features, labels):
break
w = paddle.normal(0, 0.01, shape=(2,1))
b = paddle.zeros(shape=[1])
w.stop_gradient = False
b.stop_gradient = False
def linreg(X, w, b):
return paddle.matmul(X, w) + b
with paddle.no_grad():
for i, param in enumerate(params):
param -= lr * params[i].grad / batch_size
params[i].set_value(param)
params[i].clear_gradient()
lr = 0.03
num_epochs = 3
net = linreg
loss = squared_loss
for epoch in range(num_epochs):
for X, y in data_iter(batch_size, features, labels):
l = loss(net(X, w, b), y)
l.sum().backward()
sgd([w, b], lr, batch_size)
with paddle.no_grad():
train_l = loss(net(features, w, b), labels) |
57 | null | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import numpy as np
import paddle
true_w = paddle.to_tensor([2, -3.4])
true_b = 4.2
features, labels = d2l.synthetic_data(true_w, true_b, 1000)
def load_array(data_arrays, batch_size, is_train=True):
dataset = paddle.io.TensorDataset(data_arrays)
return paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=is_train, return_list=True)
batch_size = 10
data_iter = load_array((features, labels), batch_size)
from paddle import nn
net = nn.Sequential(nn.Linear(2, 1))
weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(0, 0.01))
bias_attr = paddle.ParamAttr(initializer=None)
net = nn.Sequential(nn.Linear(2, 1, weight_attr=weight_attr, bias_attr=bias_attr))
loss = nn.MSELoss()
trainer = paddle.optimizer.SGD(learning_rate=0.03, parameters=net.parameters())
w = net[0].weight
b = net[0].bias |
58 | null | null | null | %matplotlib inline
import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import sys
import paddle
from paddle.vision import transforms
d2l.use_svg_display()
trans = transforms.ToTensor()
mnist_train = paddle.vision.datasets.FashionMNIST(mode="train", transform=trans)
mnist_test = paddle.vision.datasets.FashionMNIST(mode="test", transform=trans)
def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):
figsize = (num_cols * scale, num_rows * scale)
_, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize)
axes = axes.flatten()
for i, (ax, img) in enumerate(zip(axes, imgs)):
if paddle.is_tensor(img):
ax.imshow(img.numpy())
else:
ax.imshow(img)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if titles:
ax.set_title(titles[i])
return axes
X, y = next(iter(paddle.io.DataLoader(mnist_train, batch_size=18)))
show_images(X.reshape([18, 28, 28]), 2, 9, titles=get_fashion_mnist_labels(y));
batch_size = 256
return 4
train_iter = paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers())
trans = [transforms.ToTensor()]
if resize:
trans.insert(0, transforms.Resize(resize))
trans = transforms.Compose(trans)
mnist_train = paddle.vision.datasets.FashionMNIST(mode="train", transform=trans)
mnist_test = paddle.vision.datasets.FashionMNIST(mode="test", transform=trans)
return (paddle.io.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, return_list=True, num_workers=get_dataloader_workers()),
paddle.io.DataLoader(dataset=mnist_test, batch_size=batch_size, return_list=True, shuffle=True, num_workers=get_dataloader_workers())) |
59 | null | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
from IPython import display
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
num_inputs = 784
num_outputs = 10
W = paddle.normal(0, 0.01, shape=(num_inputs, num_outputs))
b = paddle.zeros(shape=(num_outputs,))
W.stop_gradient=False
b.stop_gradient=False
X = paddle.to_tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
X.sum(0, keepdim=True), X.sum(1, keepdim=True)
def softmax(X):
X_exp = paddle.exp(X)
partition = X_exp.sum(1, keepdim=True)
return X_exp / partition
X = paddle.normal(0, 1, (2, 5))
X_prob = softmax(X)
X_prob, X_prob.sum(1)
def net(X):
return softmax(paddle.matmul(X.reshape((-1, W.shape[0])), W) + b)
y = paddle.to_tensor([0, 2])
y_hat = paddle.to_tensor([[0.1, 0.3, 0.6], [0.3, 0.2, 0.5]])
y_hat[[0, 1], y]
def cross_entropy(y_hat, y):
return - paddle.log(y_hat[[i for i in range(len(y_hat))], y.squeeze()])
cross_entropy(y_hat, y)
def accuracy(y_hat, y):
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
y_hat = y_hat.argmax(axis=1)
if len(y_hat.shape) < len(y.shape):
cmp = y_hat.astype(y.dtype) == y.squeeze()
else:
cmp = y_hat.astype(y.dtype) == y
return float(cmp.astype(y.dtype).sum())
def evaluate_accuracy(net, data_iter):
if isinstance(net, paddle.nn.Layer):
net.eval()
metric = Accumulator(2)
with paddle.no_grad():
for X, y in data_iter:
metric.add(accuracy(net(X), y), y.numel())
return metric[0] / metric[1]
def train_epoch_ch3(net, train_iter, loss, updater):
if isinstance(net, paddle.nn.Layer):
net.train()
metric = Accumulator(3)
for X, y in train_iter:
y_hat = net(X)
l = loss(y_hat, y)
if isinstance(updater, paddle.optimizer.Optimizer):
updater.clear_grad()
l.mean().backward()
updater.step()
else:
l.sum().backward()
updater(X.shape[0])
metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())
return metric[0] / metric[2], metric[1] / metric[2] |
60 | null | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
from paddle import nn
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
net = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))
def init_weights(m):
if type(m) == nn.Linear:
nn.initializer.Normal(m.weight, std=0.01)
net.apply(init_weights);
loss = nn.CrossEntropyLoss(reduction='none')
trainer = paddle.optimizer.SGD(learning_rate=0.1, parameters=net.parameters()) |
61 | null | null | null | %matplotlib inline
import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
x = paddle.arange(-8.0, 8.0, 0.1, dtype='float32')
x.stop_gradient = False
y = paddle.nn.functional.relu(x)
d2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'relu(x)', figsize=(5, 2.5))
y.backward(paddle.ones_like(x), retain_graph=True)
d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of relu', figsize=(5, 2.5))
y = paddle.nn.functional.sigmoid(x)
d2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'sigmoid(x)', figsize=(5, 2.5))
x.clear_gradient()
y.backward(paddle.ones_like(x), retain_graph=True)
d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of sigmoid', figsize=(5, 2.5))
y = paddle.tanh(x)
d2l.plot(x.detach().numpy(), y.detach().numpy(), 'x', 'tanh(x)', figsize=(5, 2.5))
x.clear_gradient()
y.backward(paddle.ones_like(x), retain_graph=True)
d2l.plot(x.detach().numpy(), x.grad.numpy(), 'x', 'grad of tanh', figsize=(5, 2.5)) |
62 | null | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
from paddle import nn
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
num_inputs, num_outputs, num_hiddens = 784, 10, 256
W1 = paddle.randn([num_inputs, num_hiddens]) * 0.01
W1.stop_gradient = False
b1 = paddle.zeros([num_hiddens])
b1.stop_gradient = False
W2 = paddle.randn([num_hiddens, num_outputs]) * 0.01
W2.stop_gradient = False
b2 = paddle.zeros([num_outputs])
b2.stop_gradient = False
params = [W1, b1, W2, b2]
def relu(X):
a = paddle.zeros_like(X)
return paddle.maximum(X, a)
def net(X):
X = X.reshape((-1, num_inputs))
H = relu(X@W1 + b1)
return (H@W2 + b2)
loss = nn.CrossEntropyLoss(reduction='none')
num_epochs, lr = 10, 0.1
updater = paddle.optimizer.SGD(learning_rate=lr, parameters=params)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, updater) |
63 | null | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
from paddle import nn
net = nn.Sequential(nn.Flatten(),
nn.Linear(784, 256),
nn.ReLU(),
nn.Linear(256, 10))
for layer in net:
if type(layer) == nn.Linear:
weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=0.01))
layer.weight_attr = weight_attr
batch_size, lr, num_epochs = 256, 0.1, 10
loss = nn.CrossEntropyLoss(reduction='none')
trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=lr)
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) |
64 | null | null | null | import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import math
import numpy as np
import paddle
from paddle import nn
true_w, features, poly_features, labels = [paddle.to_tensor(x, dtype=
paddle.float32) for x in [true_w, features, poly_features, labels]]
features[:2], poly_features[:2, :], labels[:2]
def evaluate_loss(net, data_iter, loss):
metric = d2l.Accumulator(2)
for X, y in data_iter:
out = net(X)
y = y.reshape(out.shape)
l = loss(out, y)
metric.add(l.sum(), l.numel())
return metric[0] / metric[1]
def train(train_features, test_features, train_labels, test_labels,
num_epochs=400):
loss = nn.MSELoss()
input_shape = train_features.shape[-1]
net = nn.Sequential(nn.Linear(input_shape, 1, bias_attr=False))
batch_size = min(10, train_labels.shape[0])
train_iter = d2l.load_array(((train_features, train_labels.reshape([-1,1]))), batch_size)
test_iter = d2l.load_array((test_features, test_labels.reshape([-1,1])), batch_size, is_train=False)
trainer = paddle.optimizer.SGD(parameters=net.parameters(), learning_rate=0.01)
animator = d2l.Animator(xlabel='epoch', ylabel='loss', yscale='log', xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=['train', 'test'])
for epoch in range(num_epochs):
d2l.train_epoch_ch3(net, train_iter, loss, trainer)
if epoch == 0 or (epoch + 1) % 20 == 0:
animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))
train(poly_features[:n_train, :2], poly_features[n_train:, :2],
labels[:n_train], labels[n_train:])
train(poly_features[:n_train, :], poly_features[n_train:, :],
labels[:n_train], labels[n_train:], num_epochs=1500) |
65 | null | null | null | %matplotlib inline
import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
from paddle import nn
n_train, n_test, num_inputs, batch_size = 20, 100, 200, 5
true_w, true_b = paddle.ones((num_inputs, 1)) * 0.01, 0.05
train_data = d2l.synthetic_data(true_w, true_b, n_train)
train_iter = d2l.load_array(train_data, batch_size)
test_data = d2l.synthetic_data(true_w, true_b, n_test)
test_iter = d2l.load_array(test_data, batch_size, is_train=False)
def init_params():
w = paddle.normal(0, 1, shape=(num_inputs, 1))
w.stop_gradient = False
b = paddle.zeros(shape=[1])
b.stop_gradient = False
return [w, b]
def l2_penalty(w):
return paddle.sum(w.pow(2)) / 2
def train(lambd):
w, b = init_params()
net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss
num_epochs, lr = 100, 0.003
animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])
for epoch in range(num_epochs):
for X, y in train_iter():
l = loss(net(X), y) + lambd * l2_penalty(w)
l.sum().backward()
d2l.sgd([w, b], lr, batch_size)
if (epoch + 1) % 5 == 0:
animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss)))
def train_concise(wd):
weight_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))
bias_attr = paddle.framework.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0.0, std=1.0))
net = nn.Sequential(nn.Linear(num_inputs, 1, weight_attr=weight_attr, bias_attr=bias_attr))
loss = nn.MSELoss()
num_epochs, lr = 100, 0.003
trainer = paddle.optimizer.SGD(parameters=net[0].parameters(), learning_rate=lr, weight_decay=wd*1.0)
animator = d2l.Animator(xlabel='epochs', ylabel='loss', yscale='log', xlim=[5, num_epochs], legend=['train', 'test'])
for epoch in range(num_epochs):
for X, y in train_iter:
l = loss(net(X), y)
l.backward()
trainer.step()
trainer.clear_grad()
if (epoch + 1) % 5 == 0:
animator.add(epoch + 1, (d2l.evaluate_loss(net, train_iter, loss), d2l.evaluate_loss(net, test_iter, loss))) |
66 | null | null | null | import warnings
warnings.filterwarnings(action='ignore')
import random
import paddle
from paddle import nn
warnings.filterwarnings("ignore", category=DeprecationWarning)
from d2l import paddle as d2l
def dropout_layer(X, dropout):
assert 0 <= dropout <= 1
if dropout == 1:
return paddle.zeros_like(X)
if dropout == 0:
return X
mask = (paddle.to_tensor(paddle.uniform(X.shape)) > dropout).astype('float32')
return mask * X / (1.0 - dropout)
X= paddle.arange(16, dtype = paddle.float32).reshape((2, 8))
num_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256
dropout1, dropout2 = 0.2, 0.5
class Net(nn.Layer):
def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2,
is_training = True):
super(Net, self).__init__()
self.num_inputs = num_inputs
self.training = is_training
self.lin1 = nn.Linear(num_inputs, num_hiddens1)
self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)
self.lin3 = nn.Linear(num_hiddens2, num_outputs)
self.relu = nn.ReLU()
def forward(self, X):
H1 = self.relu(self.lin1(X.reshape((-1, self.num_inputs))))
if self.training == True:
H1 = dropout_layer(H1, dropout1)
H2 = self.relu(self.lin2(H1))
if self.training == True:
H2 = dropout_layer(H2, dropout2)
out = self.lin3(H2)
return out
net = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2)
num_epochs, lr, batch_size = 10, 0.5, 256
loss = nn.CrossEntropyLoss(reduction='none')
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
trainer = paddle.optimizer.SGD(learning_rate=lr, parameters=net.parameters())
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(std=0.01))
net = nn.Sequential(nn.Flatten(),
nn.Linear(784, 256, weight_attr=weight_attr),
nn.ReLU(),
nn.Dropout(dropout1),
nn.Linear(256, 256, weight_attr=weight_attr),
nn.ReLU(),
nn.Dropout(dropout2),
nn.Linear(256, 10, weight_attr=weight_attr))
trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer) |
67 | null | null | null | trainer = paddle.optimizer.SGD(learning_rate=0.5, parameters=net.parameters())
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
%matplotlib inline
import warnings
from d2l import paddle as d2l
warnings.filterwarnings("ignore")
import paddle
x = paddle.arange(start=-8.0, end=8.0, step=0.1, dtype='float32')
x.stop_gradient = False
y = paddle.nn.functional.sigmoid(x)
y.backward(paddle.ones_like(x))
d2l.plot(x.detach().numpy(), [y.detach().numpy(), x.grad.numpy()],
legend=['sigmoid', 'gradient'], figsize=(4.5, 2.5))
M = paddle.normal(0, 1, shape=(4,4))
for i in range(100):
M = paddle.mm(M, paddle.normal(0, 1, shape=(4, 4))) |
68 | null | null | null | %matplotlib inline
import warnings
import numpy as np
import pandas as pd
warnings.filterwarnings(action='ignore')
import paddle
from paddle import nn
warnings.filterwarnings("ignore", category=DeprecationWarning)
from d2l import paddle as d2l
n_train = train_data.shape[0]
train_features = paddle.to_tensor(all_features[:n_train].values, dtype=paddle.float32)
test_features = paddle.to_tensor(all_features[n_train:].values, dtype=paddle.float32)
train_labels = paddle.to_tensor(
train_data.SalePrice.values.reshape(-1, 1), dtype=paddle.float32)
loss = nn.MSELoss()
in_features = train_features.shape[1]
def get_net():
net = nn.Sequential(nn.Linear(in_features,1))
return net
def log_rmse(net, features, labels):
clipped_preds = paddle.clip(net(features), 1, float('inf'))
rmse = paddle.sqrt(loss(paddle.log(clipped_preds), paddle.log(labels)))
return rmse.item()
def train(net, train_features, train_labels, test_features, test_labels, num_epochs, learning_rate, weight_decay, batch_size):
train_ls, test_ls = [], []
train_iter = d2l.load_array((train_features, train_labels), batch_size)
optimizer = paddle.optimizer.Adam(learning_rate=learning_rate*1.0, parameters=net.parameters(), weight_decay=weight_decay*1.0)
for epoch in range(num_epochs):
for X, y in train_iter:
l = loss(net(X), y)
l.backward()
optimizer.step()
optimizer.clear_grad()
train_ls.append(log_rmse(net, train_features, train_labels))
if test_labels is not None:
test_ls.append(log_rmse(net, test_features, test_labels))
return train_ls, test_ls
def get_k_fold_data(k, i, X, y):
assert k > 1
fold_size = X.shape[0] // k
X_train, y_train = None, None
for j in range(k):
idx = slice(j * fold_size, (j + 1) * fold_size)
X_part, y_part = X[idx, :], y[idx]
if j == i:
X_valid, y_valid = X_part, y_part
elif X_train is None:
X_train, y_train = X_part, y_part
else:
X_train = paddle.concat([X_train, X_part], 0)
y_train = paddle.concat([y_train, y_part], 0)
return X_train, y_train, X_valid, y_valid
def train_and_pred(train_features, test_features, train_labels, test_data, num_epochs, lr, weight_decay, batch_size):
net = get_net()
train_ls, _ = train(net, train_features, train_labels, None, None, num_epochs, lr, weight_decay, batch_size)
d2l.plot(np.arange(1, num_epochs + 1), [train_ls], xlabel='epoch', ylabel='log rmse', xlim=[1, num_epochs], yscale='log')
preds = net(test_features).detach().numpy()
test_data['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
submission = pd.concat([test_data['Id'], test_data['SalePrice']], axis=1)
submission.to_csv('submission.csv', index=False) |
69 | null | null | null | import warnings
warnings.filterwarnings(action='ignore')
import paddle
from paddle import nn
from paddle.nn import functional as F
net = nn.Sequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))
X = paddle.rand([2, 20])
net(X)
class MLP(nn.Layer):
def __init__(self):
super().__init__()
self.hidden = nn.Linear(20, 256)
self.out = nn.Linear(256, 10)
def forward(self, X):
return self.out(F.relu(self.hidden(X)))
net = MLP()
net(X)
class MySequential(nn.Layer):
def __init__(self, *layers):
super(MySequential, self).__init__()
if len(layers) > 0 and isinstance(layers[0], tuple):
for name, layer in layers:
self.add_sublayer(name, layer)
else:
for idx, layer in enumerate(layers):
self.add_sublayer(str(idx), layer)
def forward(self, X):
for layer in self._sub_layers.values():
X = layer(X)
return X
net = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))
net(X)
class FixedHiddenMLP(nn.Layer):
def __init__(self):
super().__init__()
self.rand_weight = paddle.rand([20, 20])
self.linear = nn.Linear(20, 20)
def forward(self, X):
X = self.linear(X)
X = F.relu(paddle.tensor.mm(X, self.rand_weight) + 1)
X = self.linear(X)
while X.abs().sum() > 1:
X /= 2
return X.sum()
net = FixedHiddenMLP()
net(X)
class NestMLP(nn.Layer):
def __init__(self):
super().__init__()
self.net = nn.Sequential(nn.Linear(20, 64), nn.ReLU(),
nn.Linear(64, 32), nn.ReLU())
self.linear = nn.Linear(32, 16)
def forward(self, X):
return self.linear(self.net(X))
chimera = nn.Sequential(NestMLP(), nn.Linear(16, 20), FixedHiddenMLP())
chimera(X) |
70 | null | null | null | import warnings
warnings.filterwarnings(action='ignore')
import paddle
from paddle import nn
net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))
X = paddle.rand([2, 4])
net(X)
net.state_dict()['2.bias']
def block1():
return nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU())
def block2():
net = nn.Sequential()
for i in range(4):
net.add_sublayer(f'block {i}', block1())
return net
rgnet = nn.Sequential(block2(), nn.Linear(4, 1))
rgnet(X)
def init_normal(m):
if type(m) == nn.Linear:
paddle.nn.initializer.Normal(mean=0.0, std=0.01)
paddle.zeros(m.bias)
net.apply(init_normal)
net[0].weight[0],net[0].state_dict()['bias']
def init_constant(m):
if type(m) == nn.Linear:
paddle.nn.initializer.Constant(value = 1)
paddle.zeros(m.bias)
net.apply(init_constant)
net[0].weight[0],net[0].state_dict()['bias']
def xavier(m):
if type(m) == nn.Linear:
paddle.nn.initializer.XavierUniform(m.weight)
def init_42(m):
if type(m) == nn.Linear:
paddle.nn.initializer.Constant(42)
net[0].apply(xavier)
net[2].apply(init_42)
def my_init(m):
if type(m) == nn.Linear:
for name, param in m.named_parameters()][0])
paddle.nn.initializer.XavierUniform(m.weight, -10, 10)
h = paddle.abs(m.weight) >= 5
h = paddle.to_tensor(h)
m = paddle.to_tensor(m.weight)
m *= h
net.apply(my_init)
net[0].weight[:2]
net[0].weight.set_value(net[0].weight.numpy() + 1)
val = net[0].weight.numpy()
val[0, 0] = 42
net[0].weight.set_value(val)
net[0].weight[0]
layer = CenteredLayer()
layer(paddle.to_tensor([1, 2, 3, 4, 5], dtype='float32'))
net = nn.Sequential(nn.Linear(8, 128), CenteredLayer()) |
license: apache-2.0
This is an exact copy of the dataset from the original github repo: https://github.com/WeixiangYAN/CodeTransOcean.git
CodeTransOcean: A Comprehensive Multilingual Benchmark for Code Translation
CodeTransOcean, a large-scale comprehensive benchmark that supports the largest variety of programming languages for code translation. CodeTransOcean consists of three novel multilingual datasets, namely, MultilingualTrans supporting translations between multiple popular programming languages, NicheTrans for translating between niche programming languages and popular ones, and LLMTrans for evaluating executability of translated code by large language models (LLMs). CodeTransOcean also includes a novel cross-framework dataset, DLTrans, for translating deep learning code across different frameworks.
Datasets
Code
The MultilingualTrans, NicheTrans, and DLTrans datasets were experimented with on CodeT5+, and the code is in the CodeT5+ file.
The LLMTrans dataset was experimented with on GPT-3.5, and the code is in the ChatGPT file.
Citation
Please cite the paper if you use the data or code from CodeTransOcean.
@article{yan2023codetransocean,
title={CodeTransOcean: A Comprehensive Multilingual Benchmark for Code Translation},
author={Yan, Weixiang and Tian, Yuchen and Li, Yunzhe and Chen, Qian and Wang, Wen},
journal={arXiv preprint arXiv:2310.04951},
year={2023}
}
Contact
For questions, please feel free to reach out via email at yanweixiang.ywx@gmail.com
.
- Downloads last month
- 28