HazeT_Hieu / models /pix2pix_model.py
datnguyentien204's picture
Upload 1403 files
83034b6 verified
import torch
import models.networks as networks
import util.util as util
import util.logging_wandb as wblogging
import os
import numpy as np
import torch.nn.functional as F
from tqdm import tqdm
from torchvision.utils import save_image
import torch.nn as nn
import torchvision.transforms as transforms
import torch.utils.data as data_torch
from torch.utils.data import Dataset
from .sampler import InfiniteSamplerWrapper,save_checkpoint
from .networks.generator_module.schedule import CosineAnnealingWarmUpLR
import torch.utils.data as data
from rich.progress import Progress, TimeRemainingColumn, BarColumn, TextColumn
import datetime
class Pix2PixModel(torch.nn.Module):
@staticmethod
def modify_commandline_options(parser, is_train):
networks.modify_commandline_options(parser, is_train)
parser.add_argument('--lrG_decay', type=float, default=1e-4, help='learning rate decay')
parser.add_argument('--lrG', type=float, default=12e-4, help='initial learning rate for adam')
parser.add_argument('--save_dir', default=r'/home/share/VAL_ImgTranslations/experiments',
help='Directory to save the model')
parser.add_argument('--content_weight', type=float, default=2.0, help='weight for content reconstruction loss')
parser.add_argument('--save_model_interval', type=int, default=300)
parser.add_argument('--style_weight', type=float, default=3.0, help='weight for style reconstruction loss')
parser.add_argument('--max_iter', type=int, default=700)
parser.add_argument('--n_threads', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--id1_weight', type=float, default=50)
parser.add_argument('--id2_weight', type=float, default=1)
parser.add_argument('--loss_count_interval', type=int, default=200)
return parser
def __init__(self, opt):
super().__init__()
self.opt = opt
self.loss=0
self.out=None
self.content_weight=opt.content_weight
self.style_weight=opt.style_weight
self.lrG_decay=opt.lrG_decay
self.lrG=opt.lrG
self.loss_count_interval=opt.loss_count_interval
self.save_model_interval=opt.save_model_interval
self.n_threads=opt.n_threads
self.save_dir=opt.save_dir
self.id1_weight=opt.id1_weight
self.id2_weight=opt.id2_weight
self.max_iter=opt.max_iter
self.epoch=0
self.batch_size=opt.batch_size
self.FloatTensor = torch.cuda.FloatTensor if self.use_gpu() \
else torch.FloatTensor
self.ByteTensor = torch.cuda.ByteTensor if self.use_gpu() \
else torch.ByteTensor
self.netG, self.netD,self.netD2, self.netE = self.initialize_networks(opt)
self.device="cuda" if torch.cuda.is_available() else "cpu"
# set loss functions
if opt.isTrain:
self.criterionGAN = networks.GANLoss(
opt.gan_mode, tensor=self.FloatTensor, opt=self.opt)
self.criterionFeat = torch.nn.L1Loss()
if not opt.no_vgg_loss:
self.criterionVGG = networks.VGGLoss(self.opt.gpu_ids)
if opt.use_vae:
self.KLDLoss = networks.KLDLoss()
# Entry point for all calls involving forward pass
# of deep networks. We used this approach since DataParallel module
# can't parallelize custom functions, we branch to different
# routines based on |mode|.
def forward(self, data, mode,iters,progress,epochs,images_iters):
input_semantics, real_image = self.preprocess_input(data)
# print("input_semantics: ", input_semantics)
# print("real_image: ", real_image.shape)
if mode == 'generator':
g_loss, generated = self.compute_generator_loss(input_semantics, real_image,iters=iters,progress=progress,epoch_model=epochs,images_iter=images_iters)
return g_loss, generated
elif mode == 'discriminator':
d_loss, d2_loss = self.compute_discriminator_loss(input_semantics, real_image,iters=iters)
return d_loss, d2_loss
elif mode == 'encode_only':
z, mu, logvar = self.encode_z(real_image)
return mu, logvar
elif mode == 'inference':
contents = F.interpolate(real_image, size=(224, 224), mode='bilinear', align_corners=False)
styles = F.interpolate(input_semantics, size=(224, 224), mode='bilinear', align_corners=False)
model = self.generate_fake(contents, styles, iters, mode="gen",progress=None)
with torch.no_grad():
self.opt.alpha = self.opt.alpha.to(self.device)
_, _, _, _, output = model(contents, styles)
print("OUTPUT",output.shape)
upsample_layer = nn.Sequential(nn.Upsample(scale_factor=8 / 7, mode='bilinear', align_corners=False))
fake_image = upsample_layer(output)
return fake_image
else:
raise ValueError("|mode| is invalid")
def create_optimizers(self, opt):
G_params = list(self.netG.parameters())
if opt.use_vae:
G_params += list(self.netE.parameters())
if opt.isTrain:
D_params = list(self.netD.parameters())
D2_params = list(self.netD2.parameters())
if opt.no_TTUR:
beta1, beta2 = opt.beta1, opt.beta2
G_lr, D_lr = opt.lr, opt.lr
else:
beta1, beta2 = 0, 0.9
G_lr, D_lr = opt.lr / 2, opt.lr * 2
optimizer_G = torch.optim.Adam(G_params, lr=G_lr, betas=(beta1, beta2))
optimizer_D = torch.optim.Adam(D_params, lr=D_lr, betas=(beta1, beta2))
optimizer_D2 = torch.optim.Adam(D2_params, lr=D_lr, betas=(beta1, beta2))
return optimizer_G, optimizer_D, optimizer_D2
def save(self, epoch):
util.save_network(self.netG, 'G', epoch, self.opt)
util.save_network(self.netD, 'D', epoch, self.opt)
util.save_network(self.netD2, 'D2', epoch, self.opt)
if self.opt.use_vae:
util.save_network(self.netE, 'E', epoch, self.opt)
############################################################################
# Private helper methods
############################################################################
def initialize_networks(self, opt):
netG = networks.define_G(opt)
netD = networks.define_D(opt) if opt.isTrain else None
netD2 = networks.define_D(opt) if opt.isTrain else None
netE = networks.define_E(opt) if opt.use_vae else None
if not opt.isTrain or opt.continue_train:
netG = util.load_network(netG, 'G', opt.which_epoch, opt)
if opt.isTrain:
netD = util.load_network(netD, 'D', opt.which_epoch, opt)
netD2 = util.load_network(netD2, 'D2', opt.which_epoch, opt)
if opt.use_vae:
netE = util.load_network(netE, 'E', opt.which_epoch, opt)
return netG, netD, netD2, netE
# preprocess the input, such as moving the tensors to GPUs
# and transforming the label map to one-hot encoding (for SIS)
# |data|: dictionary of the input data
def preprocess_input(self, data):
# move to GPU and change data types
if self.opt.task == 'SIS':
data['label'] = data['label'].long()
if self.use_gpu():
data['label'] = data['label'].cuda()
if 'instance' in data:
try:
data['instance'] = data['instance'].cuda()
except:
data['instance'] = data['instance']
else:
data['instance'] = None # or handle as appropriate for your model
data['image'] = data['image'].cuda()
# create one-hot label map for SIS
if self.opt.task == 'SIS':
label_map = data['label']
bs, _, h, w = label_map.size()
nc = self.opt.label_nc + 1 if self.opt.contain_dontcare_label \
else self.opt.label_nc
input_label = self.FloatTensor(bs, nc, h, w).zero_()
input_semantics = input_label.scatter_(1, label_map, 1.0)
# concatenate instance map if it exists
if not self.opt.no_instance:
inst_map = data['instance']
instance_edge_map = self.get_edges(inst_map)
input_semantics = torch.cat((input_semantics, instance_edge_map), dim=1)
else:
input_semantics = data['label']
return input_semantics, data['image']
def compute_generator_loss(self, content, style,iters,progress,epoch_model,images_iter):
G_losses = {}
fake_image, loss_StyTr2 = self.generate_fake(content, style,iters=iters,progress=progress,epochs=epoch_model,image_iters=images_iter)
pred_fake, pred_real_c = self.discriminate(fake_image, content)
# # computer loss using Discriminator 1
G_losses['GAN'] = self.criterionGAN(pred_fake, True, for_discriminator=False) * (1 - self.opt.alpha)
# computer loss using Discriminator 2
pred_fake, pred_real = self.discriminate(fake_image, style, type='D2')
G_losses['GAN'] = self.criterionGAN(pred_fake, True, for_discriminator=False) * self.opt.alpha
if self.opt.task == 'SIS':
pred_fake, pred_real = self.discriminate(fake_image, style, content)
else:
pred_fake, pred_real = self.discriminate(fake_image, style)
G_losses['GAN'] = self.criterionGAN(pred_fake, True,for_discriminator=False)
gan_loss_value = G_losses['GAN'].item()
new_GAN_loss = gan_loss_value + loss_StyTr2
G_losses['GAN'] = torch.tensor([new_GAN_loss], device=self.device)
if not self.opt.no_ganFeat_loss:
num_D = len(pred_fake)
GAN_Feat_loss = torch.tensor(0.0, dtype=torch.float32, device=self.device)
for i in range(num_D): # for each discriminator
# last output is the final prediction, so we exclude it
num_intermediate_outputs = len(pred_fake[i]) - 1
for j in range(num_intermediate_outputs): # for each layer output
unweighted_loss = self.criterionFeat(
pred_fake[i][j], pred_real[i][j].detach())
GAN_Feat_loss += unweighted_loss * self.opt.lambda_feat / num_D
G_losses['GAN_Feat'] = GAN_Feat_loss
if not self.opt.no_vgg_loss:
target = style if self.opt.task == 'SIS' else content
G_losses['VGG'] = self.criterionVGG(fake_image, target) * self.opt.lambda_vgg
return G_losses, fake_image
def compute_discriminator_loss(self, content, style,iters):
D_losses = {}
D2_losses = {}
contents = F.interpolate(content, size=(224, 224), mode='bilinear', align_corners=False)
styles = F.interpolate(style, size=(224, 224), mode='bilinear', align_corners=False)
model=self.generate_fake(contents,styles,iters,mode="gen",progress=None)
with torch.no_grad():
_, _, _, _, output=model(contents,styles)
upsample_layer = nn.Sequential(nn.Upsample(scale_factor=8 / 7, mode='bilinear', align_corners=False))
fake_image = upsample_layer(output)
# For Discriminator 1 between fake and content
pred_fake, pred_real = self.discriminate(fake_image, content)
D_losses['D_Fake'] = self.criterionGAN(pred_fake, False, for_discriminator=True) * (1 - self.opt.alpha)
D_losses['D_real'] = self.criterionGAN(pred_real, True, for_discriminator=True)
pred_fake, pred_real = self.discriminate(fake_image, style, type='D2')
D2_losses['D_Fake'] = self.criterionGAN(pred_fake, False, for_discriminator=True) * self.opt.alpha
D2_losses['D_real'] = self.criterionGAN(pred_real, True, for_discriminator=True)
return D_losses, D2_losses
def encode_z(self, real_image):
mu, logvar = self.netE(real_image)
z = self.reparameterize(mu, logvar)
return z, mu, logvar
def custom_epoch(self, curr_index_image):
if curr_index_image <= 5:
return self.max_iter
elif 5 < curr_index_image < 15:
return int(self.max_iter * 0.6)
elif 15 <= curr_index_image < 25:
return int(self.max_iter * 0.55)
elif 25 <= curr_index_image < 40:
return int(self.max_iter * 0.5)
elif 40 <= curr_index_image < 60:
return int(self.max_iter * 0.375)
elif 60 <= curr_index_image < 80:
return int(self.max_iter * 0.25)
elif 80 <= curr_index_image < 120:
return int(self.max_iter * 0.1)
else:
return int(self.max_iter * 0.05)
def save_generator_weights_by_iters(self,net, label, epoch,iters):
util.save_generator_by_iter(net,label,epoch,iters,self.opt)
def generate_fake(self, input_semantics, real_image, iters,mode="train",progress=None,epochs=None,image_iters=None):
### FOR GENERATE-FAKE IMAGES
if mode == "train":
models_Generator = self.netG()
optimizer = torch.optim.Adam([
{'params': models_Generator.SEencoder.parameters()},
{'params': models_Generator.decoder.parameters()},
{'params': models_Generator.transModule.parameters()},
], lr=self.lrG_decay)
scheduler = CosineAnnealingWarmUpLR(optimizer, warmup_step=self.max_iter // 4, max_step=self.max_iter,
min_lr=0)
contents = real_image.to(self.device)
style = input_semantics.to(self.device)
content_images = F.interpolate(contents, size=(224, 224), mode='bilinear', align_corners=False)
style_images = F.interpolate(style, size=(224, 224), mode='bilinear', align_corners=False)
total_loss = 0
# Mở file ghi log
log_file = open("train_log.txt", "a")
with progress:
task = progress.add_task(
f"[green]Iters on one Images [{iters}|{self.custom_epoch(iters)}]- Total_Loss: {total_loss}",
total=self.custom_epoch(iters)
)
total_i = 0
step_wb=0
for i in range(self.custom_epoch(curr_index_image=iters)):
# Cộng dồn giá trị i vào tổng
if (i-self.custom_epoch(curr_index_image=iters)):
total_i=i
step_wb+=1
if (step_wb>=1):
total_i=total_i+1
loss_c, loss_s, loss_id_1, loss_id_2, out = models_Generator(content_images, style_images)
self.out = out
loss_all = (self.content_weight * loss_c + self.style_weight * loss_s +
self.id1_weight * loss_id_1 + self.id2_weight * loss_id_2)
# Tính tổng loss
total_loss = round(float(loss_all.sum().cpu().detach().numpy()), 3)
print("Loss_model", loss_all.sum().cpu().detach().numpy(), "==>Content_Loss",
loss_c.sum().cpu().detach().numpy(),
"==>Style_Loss", loss_s.sum().cpu().detach().numpy(), "==>ID_1_Loss",
loss_id_1.sum().cpu().detach().numpy(),
"==>ID_2_Loss", loss_id_2.sum().cpu().detach().numpy())
#### WANDB ONE IMAGES
# wblogging.upload_all_loss_on_one_images(loss_value=total_loss, iters_all_images=image_iters,
# iters_one_imgs=total_i, epoch=epochs)
# wblogging.upload_l1_loss_on_one_images(
# loss_value=round(float(loss_id_1.sum().cpu().detach().numpy()), 3),
# iters_all_images=image_iters, iters_one_imgs=total_i, epoch=epochs)
# wblogging.upload_l2_loss_on_one_images(
# loss_value=round(float(loss_id_2.sum().cpu().detach().numpy()), 3),
# iters_all_images=image_iters, iters_one_imgs=total_i, epoch=epochs)
# wblogging.upload_content_loss_on_one_images(
# loss_value=round(float(loss_c.sum().cpu().detach().numpy()), 3),
# iters_all_images=image_iters, iters_one_imgs=total_i, epoch=epochs)
# wblogging.upload_style_loss_on_one_images(
# loss_value=round(float(loss_s.sum().cpu().detach().numpy()), 3),
# iters_all_images=image_iters, iters_one_imgs=total_i, epoch=epochs)
current_lr = optimizer.param_groups[0]['lr']
wblogging.upload_lr(current_lr, epoch=epochs)
### END LOGGING ON ONE IMAGES
# Lấy thời gian hiện tại với format [ss/mm/hh/dd/mm/yyyy]
current_time = datetime.datetime.now().strftime("[%S-%M-%H-%d-%m-%Y]")
# Ghi log vào file
log_file.write(
f"{current_time} Iter: {i}, Total Loss: {total_loss}, Content Loss: {loss_c.sum().cpu().detach().numpy()}, "
f"Style Loss: {loss_s.sum().cpu().detach().numpy()}, ID1 Loss: {loss_id_1.sum().cpu().detach().numpy()}, "
f"ID2 Loss: {loss_id_2.sum().cpu().detach().numpy()}\n")
progress.update(task, advance=1,
description=f"[green]Iters on one Images [{i + 1}|{self.custom_epoch(iters)}]- Total_Loss: {total_loss}")
# Update parameters
optimizer.zero_grad()
loss_all.backward()
optimizer.step()
scheduler.step()
# SAVE Weight by Iters
if ((i + 1) - (self.custom_epoch(curr_index_image=iters))) == 0:
self.save_generator_weights_by_iters(self.netG, 'G', epoch=self.epoch, iters=i + 1)
if i % 100 == 0:
output_name = f'{self.save_dir}/test/{str(i)}.jpg'
out = torch.cat((content_images, out), 0)
out = torch.cat((style_images, out), 0)
save_image(out, output_name)
# Đóng file log sau khi kết thúc
log_file.close()
# #### WANDB ALL IMAGES
# wblogging.upload_loss_all(loss_all=total_loss, iters=image_iters, epoch=epochs)
# wblogging.upload_l1_loss_all(loss_values=round(float(loss_id_1.sum().cpu().detach().numpy()), 3),
# iters_images=image_iters, epoch=epochs)
# wblogging.upload_l2_loss(loss_values=round(float(loss_id_2.sum().cpu().detach().numpy()), 3),
# iters=image_iters, epoch=epochs)
# wblogging.upload_content_loss(loss_c=round(float(loss_c.sum().cpu().detach().numpy()), 3),
# iters=image_iters, epoch=epochs)
# wblogging.upload_style_loss(loss_s=round(float(loss_id_2.sum().cpu().detach().numpy()), 3),
# iters=image_iters, epoch=epochs)
### END WANDB ON ALL IMAGES
upsample_layer = nn.Sequential(nn.Upsample(scale_factor=8 / 7, mode='bilinear', align_corners=False))
out = upsample_layer(self.out)
return out, self.loss
# FOR GENERATOR IMAGES - VAL
if(mode=="gen"):
models_Generator = self.netG()
models_Generator.eval()
models_Generator.to(self.device)
return models_Generator
# Given fake and real image, return the prediction of discriminator
# for each fake and real image. The condition is used in SIS.
def discriminate(self, fake_image, real_image, condition=None,type='D1'):
if self.opt.task == 'SIS':
assert condition is not None
fake_concat = torch.cat([condition, fake_image], dim=1)
real_concat = torch.cat([condition, real_image], dim=1)
else:
assert condition is None
fake_concat = fake_image
real_concat = real_image
# In Batch Normalization, the fake and real images are
# recommended to be in the same batch to avoid disparate
# statistics in fake and real images.
# So both fake and real images are fed to D all at once.
fake_and_real = torch.cat([fake_concat, real_concat], dim=0)
if type == 'D1':
discriminator_out = self.netD(fake_and_real)
else:
discriminator_out = self.netD2(fake_and_real)
pred_fake, pred_real = self.divide_pred(discriminator_out)
return pred_fake, pred_real
# Take the prediction of fake and real images from the combined batch
def divide_pred(self, pred):
# the prediction contains the intermediate outputs of multi-scale GAN,
# so it's usually a list
if type(pred) == list:
fake = []
real = []
for p in pred:
fake.append([tensor[:tensor.size(0) // 2] for tensor in p])
real.append([tensor[tensor.size(0) // 2:] for tensor in p])
else:
fake = pred[:pred.size(0) // 2]
real = pred[pred.size(0) // 2:]
return fake, real
def get_edges(self, t):
edge = self.ByteTensor(t.size()).zero_()
edge[:, :, :, 1:] = edge[:, :, :, 1:] | (t[:, :, :, 1:] != t[:, :, :, :-1])
edge[:, :, :, :-1] = edge[:, :, :, :-1] | (t[:, :, :, 1:] != t[:, :, :, :-1])
edge[:, :, 1:, :] = edge[:, :, 1:, :] | (t[:, :, 1:, :] != t[:, :, :-1, :])
edge[:, :, :-1, :] = edge[:, :, :-1, :] | (t[:, :, 1:, :] != t[:, :, :-1, :])
return edge.float()
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std) + mu
def use_gpu(self):
return len(self.opt.gpu_ids) > 0