Spaces:
No application file
No application file
File size: 2,394 Bytes
793ec18 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
import torch.nn as nn
class Generator(nn.Module):
def __init__(self, z_dim=100, img_channels=3):
super(Generator, self).__init__()
self.gen = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(z_dim, 512, 4, 1, 0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(True),
# state size. 512 x 4 x 4
nn.ConvTranspose2d(512, 256, 4, 2, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(True),
# state size. 256 x 8 x 8
nn.ConvTranspose2d(256, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(True),
# state size. 128 x 16 x 16
nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(True),
# state size. 64 x 32 x 32
nn.ConvTranspose2d(64, img_channels, 4, 2, 1, bias=False),
nn.Tanh()
# state size. img_channels x 64 x 64
)
def forward(self, input):
return self.gen(input)
class Discriminator(nn.Module):
def __init__(self, img_channels=3):
super(Discriminator, self).__init__()
self.disc = nn.Sequential(
# input is img_channels x 64 x 64
nn.Conv2d(img_channels, 64, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. 64 x 32 x 32
nn.Conv2d(64, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
# state size. 128 x 16 x 16
nn.Conv2d(128, 256, 4, 2, 1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
# state size. 256 x 8 x 8
nn.Conv2d(256, 512, 4, 2, 1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
# state size. 512 x 4 x 4
nn.Conv2d(512, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
return self.disc(input).view(-1, 1).squeeze(1)
batch_size = 32
latent_vector_size = 100
generator = Generator()
discriminator = Discriminator()
generator.load_state_dict(torch.load('netG.pth', map_location=torch.device('cpu') ))
discriminator.load_state_dict(torch.load('netD.pth', map_location=torch.device('cpu') ))
|