import argparse import torch import torch.nn as nn from pathlib import Path import os from tqdm import tqdm import torchvision.transforms as transforms from torchvision.utils import save_image import os import zipfile from PIL import Image # # class Sample_Test_Net(nn.Module): # def __init__(self, encoder, decoder, transModule, patch_size=8): # super(Sample_Test_Net, self).__init__() # self.encoder = encoder # self.decoder = decoder # self.transModule = transModule # self.patch_size = patch_size # # def forward(self, i_c, i_s, arbitrary_input=False): # _, _, H, W = i_c.size() # self.decoder.img_H = H # self.decoder.img_W = W # f_c = self.encoder(i_c, arbitrary_input) # f_s = self.encoder(i_s, arbitrary_input) # f_c, f_c_reso = f_c[0], f_c[2] # f_s, f_s_reso = f_s[0], f_s[2] # f_cs = self.transModule(f_c, f_s) # i_cs = self.decoder(f_cs, f_c_reso) # return i_cs # # # def content_style_transTo_pt(i_c_path, i_s_path, i_c_size=None): # """Resize the pics of arbitrary size to the shape of content image # """ # i_c_pil = Image.open(i_c_path) # i_s_pil = Image.open(i_s_path) # # if not i_c_size is None: # i_c_tf = transforms.Compose([ # transforms.Resize(i_c_size), # transforms.ToTensor() # ]) # else: # i_c_tf = transforms.Compose([ # transforms.ToTensor() # ]) # # i_s_size = min(i_c_pil.size[1], i_c_pil.size[0]) # i_s_tf = transforms.Compose([ # transforms.Resize(i_s_size), # transforms.ToTensor() # ]) # # i_c_pt = i_c_tf(i_c_pil).unsqueeze(dim=0) # i_s_pt = i_s_tf(i_s_pil).unsqueeze(dim=0) # # return i_c_pt, i_s_pt # # @torch.no_grad() # def save_transferred_imgs(network, samples_path, img_saved_path, device=torch.device('cpu')): # print('Image generation starts:') # # i_c_names = os.listdir(os.path.join(samples_path, 'Content')) # i_s_names = os.listdir(os.path.join(samples_path, 'Style')) # for i_c_name in tqdm(i_c_names): # for i_s_name in tqdm(i_s_names): # i_c_path = os.path.join(samples_path, 'Content', i_c_name) # i_s_path = os.path.join(samples_path, 'Style', i_s_name) # i_c, i_s = content_style_transTo_pt(i_c_path, i_s_path) # i_cs = network(i_c.to(device), i_s.to(device), arbitrary_input=True) # # stem_c, suffix_c = os.path.splitext(i_c_name) # stem_s, suffix_s = os.path.splitext(i_s_name) # output_name = os.path.join(img_saved_path, f'{stem_c}_+_{stem_s}.{suffix_c}') # save_image(i_cs, output_name) # # # parser = argparse.ArgumentParser() # # Basic options # parser.add_argument('--input_dir', type=str, default='./input/Test', # help='Directory path to a batch of content and style images ' + \ # 'which are loaded in "Content"/"Style" subfolders respectively.') # parser.add_argument('--output_dir', type=str, default='./output', # help='Directory to save the output image(s)') # parser.add_argument('--checkpoint_import_path', type=str, # default='./pre_trained_models/checkpoint/checkpoint_40000_epoch.pkl', # help='Directory path to the importing checkpoint') # # args = parser.parse_args() from models.pix2pix_model import Pix2PixModel from options.test_options import TestOptions import numpy as np opt = TestOptions().parse() def test_transform(size, crop): transform_list = [] if size != 0: transform_list.append(transforms.Resize(size)) if crop: transform_list.append(transforms.CenterCrop(size)) transform_list.append(transforms.ToTensor()) transform = transforms.Compose(transform_list) return transform from os.path import basename from os.path import splitext def style_transform(h, w): k = (h, w) size = int(np.max(k)) print(type(size)) transform_list = [] transform_list.append(transforms.CenterCrop((h, w))) transform_list.append(transforms.ToTensor()) transform = transforms.Compose(transform_list) return transform def content_transform(): transform_list = [] transform_list.append(transforms.ToTensor()) transform = transforms.Compose(transform_list) return transform # Advanced options content_size = 512 style_size = 512 crop = 'store_true' save_ext = '.jpg' output_path = opt.output_dir preserve_color = 'store_true' alpha = opt.a device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Either --content or --content_dir should be given. if opt.content: content_paths = [Path(opt.content)] else: content_dir = Path(opt.content_dir) content_paths = [f for f in content_dir.glob('*')] # Either --style or --style_dir should be given. if opt.style: style_paths = [Path(opt.style)] else: style_dir = Path(opt.style_dir) style_paths = [f for f in style_dir.glob('*')] if not os.path.exists(output_path): os.mkdir(output_path) network=torch.load(opt.network_path) network = Pix2PixModel(opt) print(network) network.eval() network.to(device) content_tf = test_transform(content_size, crop) style_tf = test_transform(style_size, crop) import torch.nn.functional as F for content_path in content_paths: for style_path in style_paths: print(content_path) content_tf1 = content_transform() content = content_tf(Image.open(content_path).convert("RGB")) h, w, c = np.shape(content) style_tf1 = style_transform(h, w) style = style_tf(Image.open(style_path).convert("RGB")) style = style.to(device).unsqueeze(0) content = content.to(device).unsqueeze(0) contents = F.interpolate(content, size=(224, 224), mode='bilinear', align_corners=False) styles = F.interpolate(style, size=(224, 224), mode='bilinear', align_corners=False) model_out = network(data=None, mode="inference",iters=0,progress=None,epochs=None,images_iters=None) with torch.no_grad(): _, _, _, _, output = model_out(contents, styles) print("OUTPUT",output.shape) upsample_layer = nn.Sequential(nn.Upsample(scale_factor=8 / 7, mode='bilinear', align_corners=False)) fake_image = upsample_layer(output) output_name = '{:s}/{:s}_stylized_{:s}{:s}'.format( output_path, splitext(basename(content_path))[0], splitext(basename(style_path))[0], save_ext ) save_image(fake_image, output_name)