|
import argparse
|
|
import os
|
|
import torch
|
|
import torch.nn as nn
|
|
import torch.utils.data as data
|
|
from PIL import Image
|
|
from PIL import ImageFile
|
|
from tensorboardX import SummaryWriter
|
|
from torchvision import transforms
|
|
from tqdm import tqdm
|
|
from pathlib import Path
|
|
import StyTr2.models.transformer_decoder as transformer
|
|
from StyTr2.models.StyTR import StyTr
|
|
from StyTr2.sampler import InfiniteSamplerWrapper
|
|
from torchvision.utils import save_image
|
|
|
|
|
|
from StyTr2.models.transformerEncoder import TransformerEncoder
|
|
from StyTr2.models.schedule import CosineAnnealingWarmUpLR
|
|
from StyTr2.models.DecoderCNN import Decoder_MV, vgg_structures,decoder_stem
|
|
from StyTr2.models.transformer_decoder import TransformerDecoder
|
|
|
|
def train_transform():
|
|
transform_list = [
|
|
transforms.Resize(size=(512, 512)),
|
|
transforms.RandomCrop(size=(224, 224)),
|
|
transforms.ToTensor()
|
|
]
|
|
return transforms.Compose(transform_list)
|
|
|
|
|
|
class FlatFolderDataset(data.Dataset):
|
|
def __init__(self, root, transform):
|
|
super(FlatFolderDataset, self).__init__()
|
|
self.root = root
|
|
print(self.root)
|
|
self.path = os.listdir(self.root)
|
|
if os.path.isdir(os.path.join(self.root, self.path[0])):
|
|
self.paths = []
|
|
for file_name in os.listdir(self.root):
|
|
for file_name1 in os.listdir(os.path.join(self.root, file_name)):
|
|
self.paths.append(self.root + "/" + file_name + "/" + file_name1)
|
|
else:
|
|
self.paths = list(Path(self.root).glob('*'))
|
|
self.transform = transform
|
|
|
|
def __getitem__(self, index):
|
|
path = self.paths[index]
|
|
img = Image.open(str(path)).convert('RGB')
|
|
img = self.transform(img)
|
|
return img
|
|
|
|
def __len__(self):
|
|
return len(self.paths)
|
|
|
|
def name(self):
|
|
return 'FlatFolderDataset'
|
|
|
|
def save_checkpoint(encoder, transModule, decoder, optimizer, scheduler, epoch,
|
|
log_c, log_s, log_id1, log_id2, log_all, loss_count_interval, save_path):
|
|
checkpoint = {
|
|
'encoder': encoder.state_dict() if not encoder is None else None,
|
|
'transModule': transModule.state_dict() if not transModule is None else None,
|
|
'decoder': decoder.state_dict() if not decoder is None else None,
|
|
'optimizer': optimizer.state_dict() if not optimizer is None else None,
|
|
'scheduler': scheduler.state_dict() if not scheduler is None else None,
|
|
'epoch': epoch if not epoch is None else None,
|
|
'log_c': log_c if not log_c is None else None,
|
|
'log_s': log_s if not log_s is None else None,
|
|
'log_id1': log_id1 if not log_id1 is None else None,
|
|
'log_id2': log_id2 if not log_id2 is None else None,
|
|
'log_all': log_all if not log_all is None else None,
|
|
'loss_count_interval': loss_count_interval if not loss_count_interval is None else None
|
|
}
|
|
|
|
torch.save(checkpoint, save_path)
|
|
|
|
|
|
parser = argparse.ArgumentParser()
|
|
|
|
parser.add_argument('--content_dir', default=r'E:\NLP\VAL_Transformers\models\StyTr2\images', type=str,
|
|
help='Directory path to a batch of content images')
|
|
parser.add_argument('--style_dir', default=r'E:\NLP\VAL_Transformers\models\StyTr2\style', type=str,
|
|
|
|
help='Directory path to a batch of style images')
|
|
parser.add_argument('--vgg', type=str,
|
|
default=r'/home/share/VAL_ImageTranslation/models/networks/StyTr2/experiments/vgg_normalised.pth')
|
|
|
|
|
|
parser.add_argument('--save_dir', default='./experiments',
|
|
help='Directory to save the model')
|
|
parser.add_argument('--log_dir', default='./logs',
|
|
help='Directory to save the log')
|
|
parser.add_argument('--lr', type=float, default=5e-4)
|
|
parser.add_argument('--lr_decay', type=float, default=1e-4)
|
|
parser.add_argument('--max_iter', type=int, default=3000)
|
|
parser.add_argument('--batch_size', type=int, default=8)
|
|
parser.add_argument('--style_weight', type=float, default=10.0)
|
|
parser.add_argument('--content_weight', type=float, default=7.0)
|
|
parser.add_argument('--n_threads', type=int, default=1)
|
|
parser.add_argument('--id1_weight', type=float, default=50)
|
|
parser.add_argument('--id2_weight', type=float, default=1)
|
|
parser.add_argument('--save_model_interval', type=int, default=3000)
|
|
parser.add_argument('--loss_count_interval', type=int, default=400)
|
|
|
|
args = parser.parse_args()
|
|
loss_count_interval = args.loss_count_interval
|
|
|
|
USE_CUDA = torch.cuda.is_available()
|
|
device = torch.device("cuda" if USE_CUDA else "cpu")
|
|
print(device)
|
|
|
|
if not os.path.exists(args.save_dir):
|
|
os.makedirs(args.save_dir)
|
|
|
|
if not os.path.exists(args.log_dir):
|
|
os.mkdir(args.log_dir)
|
|
|
|
vgg = vgg_structures
|
|
vgg.load_state_dict(torch.load(args.vgg))
|
|
vgg = nn.Sequential(*list(vgg.children())[:44])
|
|
|
|
encoder=TransformerEncoder(img_size=224,patch_size=2,in_chans=3,embed_dim=192,depths=[2, 2, 2],nhead=[3, 6, 12],strip_width=[2, 4, 7],drop_path_rate=0.,patch_norm=True)
|
|
decoder=Decoder_MV(d_model=768,seq_input=True)
|
|
transformer_decoder=TransformerDecoder(nlayer=3,d_model=768,nhead=8,mlp_ratio=4,qkv_bias=False,attn_drop=0.,drop=0.,drop_path=0.,act_layer=nn.GELU,norm_layer=nn.LayerNorm,norm_first=True)
|
|
|
|
network=StyTr(encoder,decoder,transformer_decoder,vgg)
|
|
|
|
optimizer = torch.optim.Adam([
|
|
{'params': network.encoder.parameters()},
|
|
{'params': network.decoder.parameters()},
|
|
{'params': network.transModule.parameters()},
|
|
], lr=args.lr_decay)
|
|
scheduler = CosineAnnealingWarmUpLR(optimizer, warmup_step=args.max_iter//4, max_step=args.max_iter, min_lr=0)
|
|
|
|
|
|
log_c, log_s, log_id1, log_id2, log_all = [],[],[],[],[]
|
|
log_c_temp, log_s_temp, log_id1_temp, log_id2_temp, log_all_temp = [],[],[],[],[]
|
|
network.train()
|
|
network.to(device)
|
|
|
|
content_tf = train_transform()
|
|
style_tf = train_transform()
|
|
|
|
content_dataset = FlatFolderDataset(args.content_dir, content_tf)
|
|
style_dataset = FlatFolderDataset(args.style_dir, style_tf)
|
|
|
|
content_iter = iter(data.DataLoader(
|
|
content_dataset, batch_size=args.batch_size,
|
|
sampler=InfiniteSamplerWrapper(content_dataset),
|
|
num_workers=args.n_threads))
|
|
style_iter = iter(data.DataLoader(
|
|
style_dataset, batch_size=args.batch_size,
|
|
sampler=InfiniteSamplerWrapper(style_dataset),
|
|
num_workers=args.n_threads))
|
|
|
|
if not os.path.exists(args.save_dir + "/test"):
|
|
os.makedirs(args.save_dir + "/test")
|
|
|
|
for i in tqdm(range(args.max_iter)):
|
|
|
|
content_images = next(content_iter).to(device)
|
|
style_images = next(style_iter).to(device)
|
|
|
|
loss_c, loss_s, loss_id_1, loss_id_2, out = network(content_images, style_images)
|
|
loss_all = args.content_weight * loss_c + args.style_weight * loss_s + args.id1_weight * loss_id_1 + args.id2_weight * loss_id_2
|
|
print("loss_all",loss_all.sum().cpu().detach().numpy(),"==>loss_c",loss_c.sum().cpu().detach().numpy(),"==>loss_s",loss_s.sum().cpu().detach().numpy(),"==>loss_id_1",loss_id_1.sum().cpu().detach().numpy(),"==>loss_id_2",loss_id_2.sum().cpu().detach().numpy())
|
|
|
|
log_c_temp.append(loss_c.item())
|
|
log_s_temp.append(loss_s.item())
|
|
log_id1_temp.append(loss_id_1.item())
|
|
log_id2_temp.append(loss_id_2.item())
|
|
log_all_temp.append(loss_all.item())
|
|
|
|
|
|
optimizer.zero_grad()
|
|
loss_all.backward()
|
|
optimizer.step()
|
|
scheduler.step()
|
|
|
|
|
|
|
|
if i % 100 == 0:
|
|
output_name = '{:s}/test/{:s}{:s}'.format(
|
|
args.save_dir, str(i), ".jpg"
|
|
)
|
|
out = torch.cat((content_images, out), 0)
|
|
out = torch.cat((style_images, out), 0)
|
|
save_image(out, output_name)
|
|
|
|
|
|
|
|
if i % args.save_model_interval == 0:
|
|
save_checkpoint(
|
|
encoder=network.encoder,
|
|
transModule=network.transModule,
|
|
decoder=network.decoder,
|
|
optimizer=optimizer,
|
|
scheduler=scheduler,
|
|
epoch=i,
|
|
log_c=log_c,
|
|
log_s=log_s,
|
|
log_id1=log_id1,
|
|
log_id2=log_id2,
|
|
log_all=log_all,
|
|
loss_count_interval=loss_count_interval,
|
|
save_path=os.path.join(args.save_dir, 'checkpoint_{}_epoch.pkl'.format(i))
|
|
)
|
|
|
|
|
|
|