File size: 3,889 Bytes
83034b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
from models.networks.sync_batchnorm import DataParallelWithCallback
from models.pix2pix_model import Pix2PixModel
from tqdm import tqdm


class Pix2PixTrainer():
    """
    Trainer creates the model and optimizers, and uses them to
    updates the weights of the network while reporting losses
    and the latest visuals to visualize the progress in training.
    """

    def __init__(self, opt):
        self.opt = opt
        self.pix2pix_model = Pix2PixModel(opt)
        if len(opt.gpu_ids) > 0:
            self.pix2pix_model = DataParallelWithCallback(self.pix2pix_model,
                                                          device_ids=opt.gpu_ids)
            self.pix2pix_model_on_one_gpu = self.pix2pix_model.module
        else:
            self.pix2pix_model_on_one_gpu = self.pix2pix_model

        self.generated = None
        if opt.isTrain:
            self.optimizer_G, self.optimizer_D,self.optimizer_D2 = \
                self.pix2pix_model_on_one_gpu.create_optimizers(opt)
            self.old_lr = opt.lr

    # def run_generator_one_step(self, data,iters):
    #     print(type(data))
    #     for i in tqdm(range(self.max_iters)):
    #         self.optimizer_G.zero_grad()
    #         g_losses, generated = self.pix2pix_model(data, mode='generator',iters=i)
    #         g_loss = sum(g_losses.values()).mean()
    #         g_loss.backward()
    #         self.optimizer_G.step()
    #         self.g_losses = g_losses
    #         self.generated = generated
    def run_generator_one_step(self, data,iters,progress,epoch,images_iter):
        g_losses, generated = self.pix2pix_model(data, mode='generator',iters=iters,progress=progress,epochs=epoch,images_iters=images_iter)
        g_loss = sum(g_losses.values()).mean()

        self.g_losses = g_losses
        self.generated = generated

    def run_discriminator_one_step(self,data,iters):
        self.optimizer_D.zero_grad()
        self.optimizer_D2.zero_grad()
        d_losses, d2_losses = self.pix2pix_model(data,mode='discriminator',iters=iters,progress=None,epochs=None,images_iters=None)

        # for discriminator 1
        d_loss = sum(d_losses.values()).mean()
        d_loss.backward()
        self.optimizer_D.step()
        self.d_losses = d_losses

        # for discriminator 2
        d2_loss = sum(d2_losses.values()).mean()
        d2_loss.backward()
        self.optimizer_D2.step()
        self.d2_losses = d2_losses

    def get_latest_losses(self):

        return {**self.g_losses, **self.d_losses,**self.d2_losses}

    def get_latest_generated(self):
        return self.generated

    def update_learning_rate(self, epoch):
        self.update_learning_rate(epoch)

    def save(self, epoch):
        self.pix2pix_model_on_one_gpu.save(epoch)

    ##################################################################
    # Helper functions
    ##################################################################

    def update_learning_rate(self, epoch):
        if epoch > self.opt.niter:
            lrd = self.opt.lr / self.opt.niter_decay
            new_lr = self.old_lr - lrd
        else:
            new_lr = self.old_lr

        if new_lr != self.old_lr:
            if self.opt.no_TTUR:
                new_lr_G = new_lr
                new_lr_D = new_lr
                new_lr_D2 = new_lr
            else:
                new_lr_G = new_lr / 2
                new_lr_D = new_lr * 2
                new_lr_D2 = new_lr * 2

            for param_group in self.optimizer_D.param_groups:
                param_group['lr'] = new_lr_D
            for param_group in self.optimizer_D2.param_groups:
                param_group['lr'] = new_lr_D2
            for param_group in self.optimizer_G.param_groups:
                param_group['lr'] = new_lr_G
            print('update learning rate: %f -> %f' % (self.old_lr, new_lr))
            self.old_lr = new_lr