File size: 3,086 Bytes
83034b6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
from concurrent.futures import process
import os
from collections import OrderedDict
# from tkinter import Image
import torch
import torchvision
import ntpath
import data
from options.test_options import TestOptions
from models.pix2pix_model import Pix2PixModel
from util.util import mkdir
from util.visualizer import Visualizer
from util import html
from tqdm import tqdm
import numpy as np
opt = TestOptions().parse()
dataloader = data.create_dataloader(opt)
model = Pix2PixModel(opt)
if opt.task != 'MMIS' and opt.dataset_mode != 'photo2art':
model.eval()
visualizer = Visualizer(opt)
web_dir = os.path.join(opt.results_dir, opt.name,
'%s_%s' % (opt.phase, opt.which_epoch))
webpage = html.HTML(web_dir,
'Experiment = %s, Phase = %s, Epoch = %s' %
(opt.name, opt.phase, opt.which_epoch))
# test
print('Number of images: ', len(dataloader))
alpha_list = torch.linspace(0, 1.0, 20) #generate 20 images for each input image
stack = False
samples = len(dataloader)
processed = -1
for i, data_i in enumerate(tqdm(dataloader)):
processed += 1
if processed > samples:
break
alpha_imgs = []
for j, alpha in enumerate(alpha_list):
opt.alpha = alpha
generated = model(data_i, mode='inference',iters=0,progress=None,epochs=None,images_iters=None)
#self, data, mode,iters,progress,epochs,images_iters
img_path = data_i['cpath']
if j == 0:
alpha_imgs.append(data_i['image'])
alpha_imgs.append(data_i['label'])
alpha_imgs.append(generated)
if stack:
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(img_path[0])
name = os.path.splitext(short_path)[0]
image_name = '%s.png' % name
os.makedirs(image_dir, exist_ok=True)
save_path = os.path.join(image_dir, image_name)
alpha_stack = torch.cat(alpha_imgs, dim=0)
im_grid = torchvision.utils.make_grid(alpha_stack, nrow=len(alpha_imgs) + 2, padding=0, normalize=True)
torchvision.utils.save_image(im_grid, save_path)
else:
for b in range(generated.shape[0]):
# print(i, 'process image... %s' % img_path[b])
if opt.show_input:
if opt.task == 'SIS':
visuals = OrderedDict([('input_label', data_i['label'][b]),
('real_image', data_i['image'][b]),
('synthesized_image', generated[b])])
else:
visuals = OrderedDict([('content', data_i['label'][b]),
('style', data_i['image'][b]),
('synthesized_image', generated[b])])
else:
visuals = OrderedDict([('synthesized_image', generated[b])])
visualizer.save_images(webpage, visuals, img_path[b:b + 1], alpha=opt.alpha)
webpage.save()
|