File size: 6,644 Bytes
83034b6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torch.nn.utils.spectral_norm as spectral_norm
from models.networks.normalization import FADE
from models.networks.sync_batchnorm import SynchronizedBatchNorm2d
# ResNet block that uses FADE.
# It differs from the ResNet block of SPADE in that
# it takes in the feature map as input, learns the skip connection if necessary.
# This architecture seemed like a standard architecture for unconditional or
# class-conditional GAN architecture using residual block.
# The code was inspired from https://github.com/LMescheder/GAN_stability
# and https://github.com/NVlabs/SPADE.
class FADEResnetBlock(nn.Module):
def __init__(self, fin, fout, opt):
super().__init__()
# attributes
self.learned_shortcut = (fin != fout)
fmiddle = fin
# create conv layers
self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1)
self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1)
if self.learned_shortcut:
self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)
# apply spectral norm if specified
if 'spectral' in opt.norm_G:
self.conv_0 = spectral_norm(self.conv_0)
self.conv_1 = spectral_norm(self.conv_1)
if self.learned_shortcut:
self.conv_s = spectral_norm(self.conv_s)
# define normalization layers
fade_config_str = opt.norm_G.replace('spectral', '')
self.norm_0 = FADE(fade_config_str, fin, fin)
self.norm_1 = FADE(fade_config_str, fmiddle, fmiddle)
if self.learned_shortcut:
self.norm_s = FADE(fade_config_str, fin, fin)
# Note the resnet block with FADE also takes in |feat|,
# the feature map as input
def forward(self, x, feat):
x_s = self.shortcut(x, feat)
dx = self.conv_0(self.actvn(self.norm_0(x, feat)))
dx = self.conv_1(self.actvn(self.norm_1(dx, feat)))
out = x_s + dx
return out
def shortcut(self, x, feat):
if self.learned_shortcut:
x_s = self.conv_s(self.norm_s(x, feat))
else:
x_s = x
return x_s
def actvn(self, x):
return F.leaky_relu(x, 2e-1)
class StreamResnetBlock(nn.Module):
def __init__(self, fin, fout, opt):
super().__init__()
# attributes
self.learned_shortcut = (fin != fout)
fmiddle = fin
# create conv layers
self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1)
self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1)
if self.learned_shortcut:
self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)
# apply spectral norm if specified
if 'spectral' in opt.norm_S:
self.conv_0 = spectral_norm(self.conv_0)
self.conv_1 = spectral_norm(self.conv_1)
if self.learned_shortcut:
self.conv_s = spectral_norm(self.conv_s)
# define normalization layers
subnorm_type = opt.norm_S.replace('spectral', '')
if subnorm_type == 'batch':
self.norm_layer_in = nn.BatchNorm2d(fin, affine=True)
self.norm_layer_out= nn.BatchNorm2d(fout, affine=True)
if self.learned_shortcut:
self.norm_layer_s = nn.BatchNorm2d(fout, affine=True)
elif subnorm_type == 'syncbatch':
self.norm_layer_in = SynchronizedBatchNorm2d(fin, affine=True)
self.norm_layer_out= SynchronizedBatchNorm2d(fout, affine=True)
if self.learned_shortcut:
self.norm_layer_s = SynchronizedBatchNorm2d(fout, affine=True)
elif subnorm_type == 'instance':
self.norm_layer_in = nn.InstanceNorm2d(fin, affine=False)
self.norm_layer_out= nn.InstanceNorm2d(fout, affine=False)
if self.learned_shortcut:
self.norm_layer_s = nn.InstanceNorm2d(fout, affine=False)
else:
raise ValueError('normalization layer %s is not recognized' % subnorm_type)
def forward(self, x):
x_s = self.shortcut(x)
dx = self.actvn(self.norm_layer_in(self.conv_0(x)))
dx = self.actvn(self.norm_layer_out(self.conv_1(dx)))
out = x_s + dx
return out
def shortcut(self,x):
if self.learned_shortcut:
x_s = self.actvn(self.norm_layer_s(self.conv_s(x)))
else:
x_s = x
return x_s
def actvn(self, x):
return F.leaky_relu(x, 2e-1)
# ResNet block used in pix2pixHD
# We keep the same architecture as pix2pixHD.
class ResnetBlock(nn.Module):
def __init__(self, dim, norm_layer, activation=nn.ReLU(False), kernel_size=3):
super().__init__()
pw = (kernel_size - 1) // 2
self.conv_block = nn.Sequential(
nn.ReflectionPad2d(pw),
norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size)),
activation,
nn.ReflectionPad2d(pw),
norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size))
)
def forward(self, x):
y = self.conv_block(x)
out = x + y
return out
# VGG architecture, used for the perceptual loss using a pretrained VGG network
class VGG19(torch.nn.Module):
def __init__(self, requires_grad=False):
super().__init__()
vgg_pretrained_features = torchvision.models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
|