import torch.nn.functional as F from models.networks.base_network import BaseNetwork import torch import torch.nn as nn from .generator_module.StyTR import StyTr from torchvision.utils import save_image from .generator_module.transformerEncoder import TransformerEncoder from .generator_module.HEtransformerEncoder import HeTransformerEncoder from .generator_module.schedule import CosineAnnealingWarmUpLR from .generator_module.DecoderCNN import Decoder_MV, vgg_structures,decoder_stem # DecoderCNN from .generator_module.transformer_decoder import TransformerDecoder class TSITGenerator(BaseNetwork): @staticmethod def modify_commandline_options(parser, is_train): parser.set_defaults(norm_G='spectralfadesyncbatch3x3') parser.add_argument('--num_upsampling_layers', choices=('normal', 'more', 'most'), default='more', help="If 'more', adds upsampling layer between the two middle resnet blocks." "If 'most', also add one more upsampling + resnet layer at the end of the generator." "We only use 'more' as the default setting.") parser.add_argument('--lr_decay', type=float, default=1e-4, help='learning rate decay') parser.add_argument('--lr_stytr2', type=float, default=5e-4, help='initial learning rate for adam') return parser def __init__(self, opt): super().__init__() self.vgg_path = r'models/networks/experiments/vgg_normalised.pth' self.CNNdecoder = Decoder_MV(d_model=768,seq_input=True) self.HeTransEncoder=HeTransformerEncoder(img_size=224,patch_size=2,in_chans=3,embed_dim=192,depths=[2, 2, 2],nhead=[3, 6, 12],strip_width=[2, 4, 7],drop_path_rate=0.,patch_norm=True) self.TransEncoder= TransformerEncoder(img_size=224,patch_size=2,in_chans=3,embed_dim=192,depths=[2, 2, 2],nhead=[3, 6, 12],strip_width=[2, 4, 7],drop_path_rate=0.,patch_norm=True) self.TransDecoder = TransformerDecoder(nlayer=3,d_model=768,nhead=8,mlp_ratio=4,qkv_bias=False,attn_drop=0.,drop=0.,drop_path=0.,act_layer=nn.GELU,norm_layer=nn.LayerNorm,norm_first=True) self.vgg=vgg_structures self.content_weight= opt.content_weight self.style_weight = opt.style_weight self.lr_decay = opt.lr_decay self.lr_stytr2 = opt.lr_stytr2 self.device="cuda" if torch.cuda.is_available() else "cpu" self.iters_count=0 self.save_dir= opt.save_dir self.opt=opt self.mode=opt.mode def SetUp_model(self, TransEncoder,HeTransEncoder ,CNN_decoder,TransDecoder,vgg,alpha,mode): vgg.load_state_dict(torch.load(self.vgg_path)) # vgg = nn.Sequential(*list(vgg.children())[:44]) network=StyTr(TransEncoder,HeTransEncoder,CNN_decoder,TransDecoder,vgg,alpha,mode) network.train() network.to(self.device) return network def forward(self): network = self.SetUp_model(self.TransEncoder,self.HeTransEncoder,self.CNNdecoder, self.TransDecoder, self.vgg,self.opt.alpha,self.mode) return network