|
from concurrent.futures import process |
|
import os |
|
from collections import OrderedDict |
|
|
|
import torch |
|
import torchvision |
|
import ntpath |
|
|
|
import data |
|
from options.test_options import TestOptions |
|
from models.pix2pix_model import Pix2PixModel |
|
from util.util import mkdir |
|
from util.visualizer import Visualizer |
|
from util import html |
|
from tqdm import tqdm |
|
import numpy as np |
|
|
|
opt = TestOptions().parse() |
|
|
|
|
|
dataloader = data.create_dataloader(opt) |
|
|
|
model = Pix2PixModel(opt) |
|
if opt.task != 'MMIS' and opt.dataset_mode != 'photo2art': |
|
model.eval() |
|
|
|
visualizer = Visualizer(opt) |
|
|
|
web_dir = os.path.join(opt.results_dir, opt.name, |
|
'%s_%s' % (opt.phase, opt.which_epoch)) |
|
webpage = html.HTML(web_dir, |
|
'Experiment = %s, Phase = %s, Epoch = %s' % |
|
(opt.name, opt.phase, opt.which_epoch)) |
|
|
|
|
|
|
|
|
|
print('Number of images: ', len(dataloader)) |
|
alpha_list = torch.linspace(0, 1.0, 20) |
|
stack = False |
|
samples = len(dataloader) |
|
processed = -1 |
|
for i, data_i in enumerate(tqdm(dataloader)): |
|
processed += 1 |
|
if processed > samples: |
|
break |
|
alpha_imgs = [] |
|
for j, alpha in enumerate(alpha_list): |
|
opt.alpha = alpha |
|
generated = model(data_i, mode='inference',iters=0,progress=None,epochs=None,images_iters=None) |
|
|
|
|
|
img_path = data_i['cpath'] |
|
|
|
if j == 0: |
|
alpha_imgs.append(data_i['image']) |
|
alpha_imgs.append(data_i['label']) |
|
alpha_imgs.append(generated) |
|
if stack: |
|
image_dir = webpage.get_image_dir() |
|
short_path = ntpath.basename(img_path[0]) |
|
name = os.path.splitext(short_path)[0] |
|
image_name = '%s.png' % name |
|
os.makedirs(image_dir, exist_ok=True) |
|
save_path = os.path.join(image_dir, image_name) |
|
alpha_stack = torch.cat(alpha_imgs, dim=0) |
|
im_grid = torchvision.utils.make_grid(alpha_stack, nrow=len(alpha_imgs) + 2, padding=0, normalize=True) |
|
torchvision.utils.save_image(im_grid, save_path) |
|
|
|
else: |
|
for b in range(generated.shape[0]): |
|
|
|
if opt.show_input: |
|
if opt.task == 'SIS': |
|
visuals = OrderedDict([('input_label', data_i['label'][b]), |
|
('real_image', data_i['image'][b]), |
|
('synthesized_image', generated[b])]) |
|
else: |
|
visuals = OrderedDict([('content', data_i['label'][b]), |
|
('style', data_i['image'][b]), |
|
('synthesized_image', generated[b])]) |
|
else: |
|
visuals = OrderedDict([('synthesized_image', generated[b])]) |
|
visualizer.save_images(webpage, visuals, img_path[b:b + 1], alpha=opt.alpha) |
|
|
|
webpage.save() |
|
|