datnguyentien204's picture
Upload 1403 files
83034b6 verified
import torch.nn as nn
from .DecoderCNN import Decoder_MV, vgg_structures,decoder_stem # DecoderCNN
from .transformer_decoder import TransformerDecoder # TransformerDecoder
# compute channel-wise means and variances of features
def calc_mean_std(feat, eps=1e-5):
size = feat.size()
assert len(size) == 4, 'The shape of feature needs to be a tuple with length 4.'
B, C = size[:2]
feat_mean = feat.reshape(B, C, -1).mean(dim=2).reshape(B, C, 1, 1)
feat_std = (feat.reshape(B, C, -1).var(dim=2) + eps).sqrt().reshape(B, C, 1, 1)
return feat_mean, feat_std
# normalize features
def mean_variance_norm(feat):
size = feat.size()
mean, std = calc_mean_std(feat)
normalized_feat = (feat - mean.expand(size)) / std.expand(size)
return normalized_feat
########################################## Net ##########################################
class StyTr(nn.Module):
def __init__(self, SEencoder,HEencoder, decoder, transModule, lossNet,alpha,mode):
super(StyTr, self).__init__()
self.mse_loss = nn.MSELoss()
self.SEencoder = SEencoder
self.HEencoder=HEencoder
self.decoder = decoder
self.transModule = transModule
self.alpha=alpha
self.mode=mode
# features of intermediate layers
lossNet_layers = list(lossNet.children())
self.feat_1 = nn.Sequential(*lossNet_layers[:4]) # input -> relu1_1
self.feat_2 = nn.Sequential(*lossNet_layers[4:11]) # relu1_1 -> relu2_1
self.feat_3 = nn.Sequential(*lossNet_layers[11:18]) # relu2_1 -> relu3_1
self.feat_4 = nn.Sequential(*lossNet_layers[18:31]) # relu3_1 -> relu4_1
self.feat_5 = nn.Sequential(*lossNet_layers[31:44]) # relu3_1 -> relu4_1
# fix parameters
for name in ['feat_1', 'feat_2', 'feat_3', 'feat_4', 'feat_5']:
for param in getattr(self, name).parameters():
param.requires_grad = False
# get intermediate features
def get_interal_feature(self, input):
result = []
for i in range(5):
input = getattr(self, 'feat_{:d}'.format(i + 1))(input)
result.append(input)
return result
def calc_content_loss(self, input, target, norm=False):
assert input.size() == target.size(), 'To calculate loss needs the same shape between input and taget.'
assert target.requires_grad == False, 'To calculate loss target shoud not require grad.'
if norm == False:
return self.mse_loss(input, target)
else:
return self.mse_loss(mean_variance_norm(input), mean_variance_norm(target))
def calc_style_loss(self, input, target):
assert input.size() == target.size(), 'To calculate loss needs the same shape between input and taget.'
assert target.requires_grad == False, 'To calculate loss target shoud not require grad.'
input_mean, input_std = calc_mean_std(input)
target_mean, target_std = calc_mean_std(target)
return self.mse_loss(input_mean, target_mean) + \
self.mse_loss(input_std, target_std)
# calculate losses
def forward(self, i_c, i_s):
f_c = self.SEencoder(i_c)
f_s = self.HEencoder(i_s,alpha=self.alpha,mode=self.mode) ### ADD Haze input noise adjust in this output
f_c, f_c_reso = f_c[0], f_c[2]
f_s, f_s_reso = f_s[0], f_s[2]
f_cs = self.transModule(f_c, f_s)
f_cc = self.transModule(f_c, f_c)
f_ss = self.transModule(f_s, f_s)
i_cs = self.decoder(f_cs, f_c_reso)
i_cc = self.decoder(f_cc, f_c_reso)
i_ss = self.decoder(f_ss, f_c_reso)
f_c_loss = self.get_interal_feature(i_c)
f_s_loss = self.get_interal_feature(i_s)
f_i_cs_loss = self.get_interal_feature(i_cs)
f_i_cc_loss = self.get_interal_feature(i_cc)
f_i_ss_loss = self.get_interal_feature(i_ss)
loss_id_1 = self.mse_loss(i_cc, i_c) + self.mse_loss(i_ss, i_s)
loss_c, loss_s, loss_id_2 = 0, 0, 0
loss_c = self.calc_content_loss(f_i_cs_loss[-2], f_c_loss[-2], norm=True) + \
self.calc_content_loss(f_i_cs_loss[-1], f_c_loss[-1], norm=True)
for i in range(1, 5):
loss_s += self.calc_style_loss(f_i_cs_loss[i], f_s_loss[i])
loss_id_2 += self.mse_loss(f_i_cc_loss[i], f_c_loss[i]) + self.mse_loss(f_i_ss_loss[i], f_s_loss[i])
return loss_c, loss_s, loss_id_1, loss_id_2, i_cs