File size: 4,940 Bytes
83034b6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
import sys
from collections import OrderedDict
from rich.progress import Progress, TextColumn, BarColumn, TaskProgressColumn, TimeRemainingColumn
import data
from options.train_options import TrainOptions
from util.iter_counter import IterationCounter
from util.visualizer import Visualizer
from trainers.pix2pix_trainer import Pix2PixTrainer
from util.logging_wandb import init_project,stop
import torch
from torch.distributions import Beta
# parse options
opt = TrainOptions().parse()
# print options to help debugging
print(' '.join(sys.argv))
# load the dataset
dataloader = data.create_dataloader(opt)
# create trainer for our model
trainer = Pix2PixTrainer(opt)
print(trainer)
# create tool for counting iterations
iter_counter = IterationCounter(opt, len(dataloader))
# create tool for visualization
visualizer = Visualizer(opt)
T = len(dataloader)
b = torch.tensor([1], dtype=torch.float32)
# Setup rich progress bar
progress = Progress(
TextColumn("[bold blue]{task.description}"),
BarColumn(),
TaskProgressColumn(),
TimeRemainingColumn(),
)
## INIT WANDB
init_project("results")
epoch = 0
with progress:
epoch_task = progress.add_task(f"[deep_pink4]Total Epochs[{epoch + 1}|{max(iter_counter.training_epochs())}]",
total=len(iter_counter.training_epochs()))
for epoch in iter_counter.training_epochs():
iter_counter.record_epoch_start(epoch)
progress.update(epoch_task, advance=1)
img_index = 0
iter_task = progress.add_task(f"[cyan]Images Index [{img_index}]", total=len(dataloader))
iteration = 0
while iteration < len(dataloader):
for i,data_i in enumerate(dataloader,start=iter_counter.epoch_iter): # This will loop indefinitely if using InfiniteSamplerWrapper
iter_counter.record_one_iteration()
progress.update(iter_task, advance=1)
t = iteration + 1
a = torch.tensor(((t - (0.5 * T)) / (0.25 * T)), dtype=torch.float32).exp()
m = Beta(a, b)
opt.alpha = m.sample().cuda()
# Training
# train generator
if iteration % opt.D_steps_per_G == 0:
trainer.run_generator_one_step(data_i, iteration, progress=progress, epoch=epoch,
images_iter=img_index)
img_index += 1
progress.update(iter_task,
description=f"[cyan]Images Index [{img_index}|{len(dataloader)}]")
# train discriminator
trainer.run_discriminator_one_step(data_i, iteration)
# Visualizations
if iter_counter.needs_printing():
losses = trainer.get_latest_losses()
visualizer.print_current_errors(epoch, iter_counter.epoch_iter,
losses, iter_counter.time_per_iter)
visualizer.plot_current_errors(losses, iter_counter.total_steps_so_far)
if iter_counter.needs_displaying():
if opt.task == 'SIS':
visuals = OrderedDict([('input_label', data_i['label'][0]),
('synthesized_image', trainer.get_latest_generated()[0]),
('real_image', data_i['image'][0])])
else:
visuals = OrderedDict([('content', data_i['label'][0]),
('synthesized_image', trainer.get_latest_generated()[0]),
('style', data_i['image'][0])])
visualizer.display_current_results(visuals, epoch, iter_counter.total_steps_so_far)
if iter_counter.needs_saving():
print('saving the latest model (epoch %d, total_steps %d)' %
(epoch, iter_counter.total_steps_so_far))
trainer.save('latest')
iter_counter.record_current_iter()
# Visualizations and model saving as in the original script
iteration += 1
if iteration >= opt.max_iterations_per_epoch:
break
trainer.update_learning_rate(epoch)
iter_counter.record_epoch_end()
progress.update(epoch_task,
description=f"[deep_pink4]Total Epochs[{epoch + 1}|{max(iter_counter.training_epochs())}]")
if epoch % opt.save_epoch_freq == 0 or epoch == iter_counter.total_epochs:
print('saving the model at the end of epoch %d, iters %d' %
(epoch, iter_counter.total_steps_so_far))
trainer.save('latest')
trainer.save(epoch)
stop()
print('Training was successfully finished.')
|