File size: 2,892 Bytes
b762e56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86e64e9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import torch
import torchvision

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

class VGG19Feats(torch.nn.Module):
    def __init__(self, requires_grad=False):
        super(VGG19Feats, self).__init__()
        vgg = torchvision.models.vgg19(pretrained=True).to(device) #.cuda()
        # vgg.eval()
        vgg_pretrained_features = vgg.features.eval()
        self.requires_grad = requires_grad
        self.slice1 = torch.nn.Sequential()
        self.slice2 = torch.nn.Sequential()
        self.slice3 = torch.nn.Sequential()
        self.slice4 = torch.nn.Sequential()
        self.slice5 = torch.nn.Sequential()
        for x in range(3):
            self.slice1.add_module(str(x), vgg_pretrained_features[x])
        for x in range(3, 8):
            self.slice2.add_module(str(x), vgg_pretrained_features[x])
        for x in range(8, 13):
            self.slice3.add_module(str(x), vgg_pretrained_features[x])
        for x in range(13, 22):
            self.slice4.add_module(str(x), vgg_pretrained_features[x])
        for x in range(22, 31):
            self.slice5.add_module(str(x), vgg_pretrained_features[x])
        if not self.requires_grad:
            for param in self.parameters():
                param.requires_grad = False
 
    def forward(self, img):
        conv1_2 = self.slice1(img)
        conv2_2 = self.slice2(conv1_2)
        conv3_2 = self.slice3(conv2_2)
        conv4_2 = self.slice4(conv3_2)
        conv5_2 = self.slice5(conv4_2)
        out = [conv1_2, conv2_2, conv3_2, conv4_2, conv5_2]
        return out


class VGGPerceptualLoss(torch.nn.Module):
    def __init__(self):
        super(VGGPerceptualLoss, self).__init__()
        self.vgg = VGG19Feats().to(device)
        self.criterion = torch.nn.functional.l1_loss
        self.register_buffer("mean", torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
        self.register_buffer("std", torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
        self.weights = [1.0/2.6, 1.0/4.8, 1.0/3.7, 1.0/5.6, 1.0*10/1.5]

    def forward(self, input_img, target_img):

        if input_img.shape[1] != 3:
            input_img = input_img.repeat(1, 3, 1, 1)
            target_img = target_img.repeat(1, 3, 1, 1)
        input_img = (input_img - self.mean) / self.std
        target_img = (target_img - self.mean) / self.std

        x_vgg, y_vgg = self.vgg(input_img), self.vgg(target_img)

        loss = {}
        loss['pt_c_loss'] = self.weights[0] * self.criterion(x_vgg[0], y_vgg[0])+\
                            self.weights[1] * self.criterion(x_vgg[1], y_vgg[1])+\
                            self.weights[2] * self.criterion(x_vgg[2], y_vgg[2])+\
                            self.weights[3] * self.criterion(x_vgg[3], y_vgg[3])+\
                            self.weights[4] * self.criterion(x_vgg[4], y_vgg[4])
        loss['pt_s_loss'] = 0.0

        return loss