File size: 10,349 Bytes
6a62ffb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
import math
import os
from typing import List

import PIL.Image
import numpy
import torch
from matplotlib import cm
from torch import Tensor


def is_power2(x):
    return x != 0 and ((x & (x - 1)) == 0)


def numpy_srgb_to_linear(x):
    x = numpy.clip(x, 0.0, 1.0)
    return numpy.where(x <= 0.04045, x / 12.92, ((x + 0.055) / 1.055) ** 2.4)


def numpy_linear_to_srgb(x):
    x = numpy.clip(x, 0.0, 1.0)
    return numpy.where(x <= 0.003130804953560372, x * 12.92, 1.055 * (x ** (1.0 / 2.4)) - 0.055)


def torch_srgb_to_linear(x: torch.Tensor):
    x = torch.clip(x, 0.0, 1.0)
    return torch.where(torch.le(x, 0.04045), x / 12.92, ((x + 0.055) / 1.055) ** 2.4)


def torch_linear_to_srgb(x):
    x = torch.clip(x, 0.0, 1.0)
    return torch.where(torch.le(x, 0.003130804953560372), x * 12.92, 1.055 * (x ** (1.0 / 2.4)) - 0.055)


def image_linear_to_srgb(image):
    assert image.shape[2] == 3 or image.shape[2] == 4
    if image.shape[2] == 3:
        return numpy_linear_to_srgb(image)
    else:
        height, width, _ = image.shape
        rgb_image = numpy_linear_to_srgb(image[:, :, 0:3])
        a_image = image[:, :, 3:4]
        return numpy.concatenate((rgb_image, a_image), axis=2)


def image_srgb_to_linear(image):
    assert image.shape[2] == 3 or image.shape[2] == 4
    if image.shape[2] == 3:
        return numpy_srgb_to_linear(image)
    else:
        height, width, _ = image.shape
        rgb_image = numpy_srgb_to_linear(image[:, :, 0:3])
        a_image = image[:, :, 3:4]
        return numpy.concatenate((rgb_image, a_image), axis=2)


def save_rng_state(file_name):
    rng_state = torch.get_rng_state()
    torch_save(rng_state, file_name)


def load_rng_state(file_name):
    rng_state = torch_load(file_name)
    torch.set_rng_state(rng_state)


def grid_change_to_numpy_image(torch_image, num_channels=3):
    height = torch_image.shape[1]
    width = torch_image.shape[2]
    size_image = (torch_image[0, :, :] ** 2 + torch_image[1, :, :] ** 2).sqrt().view(height, width, 1).numpy()
    hsv = cm.get_cmap('hsv')
    angle_image = hsv(((torch.atan2(
        torch_image[0, :, :].view(height * width),
        torch_image[1, :, :].view(height * width)).view(height, width) + math.pi) / (2 * math.pi)).numpy()) * 3
    numpy_image = size_image * angle_image[:, :, 0:3]
    rgb_image = numpy_linear_to_srgb(numpy_image)
    if num_channels == 3:
        return rgb_image
    elif num_channels == 4:
        return numpy.concatenate([rgb_image, numpy.ones_like(size_image)], axis=2)
    else:
        raise RuntimeError("Unsupported num_channels: " + str(num_channels))


def rgb_to_numpy_image(torch_image: Tensor, min_pixel_value=-1.0, max_pixel_value=1.0):
    assert torch_image.dim() == 3
    assert torch_image.shape[0] == 3
    height = torch_image.shape[1]
    width = torch_image.shape[2]

    reshaped_image = torch_image.numpy().reshape(3, height * width).transpose().reshape(height, width, 3)
    numpy_image = (reshaped_image - min_pixel_value) / (max_pixel_value - min_pixel_value)
    return numpy_linear_to_srgb(numpy_image)


def rgba_to_numpy_image_greenscreen(torch_image: Tensor,
                                    min_pixel_value=-1.0,
                                    max_pixel_value=1.0,
                                    include_alpha=False):
    height = torch_image.shape[1]
    width = torch_image.shape[2]

    numpy_image = (torch_image.numpy().reshape(4, height * width).transpose().reshape(height, width,
                                                                                      4) - min_pixel_value) \
                  / (max_pixel_value - min_pixel_value)
    rgb_image = numpy_linear_to_srgb(numpy_image[:, :, 0:3])
    a_image = numpy_image[:, :, 3]
    rgb_image[:, :, 0:3] = rgb_image[:, :, 0:3] * a_image.reshape(a_image.shape[0], a_image.shape[1], 1)
    rgb_image[:, :, 1] = rgb_image[:, :, 1] + (1 - a_image)

    if not include_alpha:
        return rgb_image
    else:
        return numpy.concatenate((rgb_image, numpy.ones_like(numpy_image[:, :, 3:4])), axis=2)


def rgba_to_numpy_image(torch_image: Tensor, min_pixel_value=-1.0, max_pixel_value=1.0):
    assert torch_image.dim() == 3
    assert torch_image.shape[0] == 4
    height = torch_image.shape[1]
    width = torch_image.shape[2]

    reshaped_image = torch_image.numpy().reshape(4, height * width).transpose().reshape(height, width, 4)
    numpy_image = (reshaped_image - min_pixel_value) / (max_pixel_value - min_pixel_value)
    rgb_image = numpy_linear_to_srgb(numpy_image[:, :, 0:3])
    a_image = numpy.clip(numpy_image[:, :, 3], 0.0, 1.0)
    rgba_image = numpy.concatenate((rgb_image, a_image.reshape(height, width, 1)), axis=2)
    return rgba_image


def extract_numpy_image_from_filelike_with_pytorch_layout(file, has_alpha=True, scale=2.0, offset=-1.0):
    try:
        pil_image = PIL.Image.open(file)
    except Exception as e:
        raise RuntimeError(file)
    return extract_numpy_image_from_PIL_image_with_pytorch_layout(pil_image, has_alpha, scale, offset)


def extract_numpy_image_from_PIL_image_with_pytorch_layout(pil_image, has_alpha=True, scale=2.0, offset=-1.0):
    if has_alpha:
        num_channel = 4
    else:
        num_channel = 3
    image_size = pil_image.width

    # search for transparent pixels(alpha==0) and change them to [0 0 0 0] to avoid the color influence to the model
    for i, px in enumerate(pil_image.getdata()):
        if px[3] <= 0:
            y = i // image_size
            x = i % image_size
            pil_image.putpixel((x, y), (0, 0, 0, 0))

    raw_image = numpy.asarray(pil_image)
    image = (raw_image / 255.0).reshape(image_size, image_size, num_channel)
    image[:, :, 0:3] = numpy_srgb_to_linear(image[:, :, 0:3])
    image = image \
                .reshape(image_size * image_size, num_channel) \
                .transpose() \
                .reshape(num_channel, image_size, image_size) * scale + offset
    return image


def extract_pytorch_image_from_filelike(file, has_alpha=True, scale=2.0, offset=-1.0):
    try:
        pil_image = PIL.Image.open(file)
    except Exception as e:
        raise RuntimeError(file)
    image = extract_numpy_image_from_PIL_image_with_pytorch_layout(pil_image, has_alpha, scale, offset)
    return torch.from_numpy(image).float()


def extract_pytorch_image_from_PIL_image(pil_image, has_alpha=True, scale=2.0, offset=-1.0):
    image = extract_numpy_image_from_PIL_image_with_pytorch_layout(pil_image, has_alpha, scale, offset)
    return torch.from_numpy(image).float()


def extract_numpy_image_from_filelike(file):
    pil_image = PIL.Image.open(file)
    image_width = pil_image.width
    image_height = pil_image.height
    if pil_image.mode == "RGBA":
        image = (numpy.asarray(pil_image) / 255.0).reshape(image_height, image_width, 4)
    else:
        image = (numpy.asarray(pil_image) / 255.0).reshape(image_height, image_width, 3)
    image[:, :, 0:3] = numpy_srgb_to_linear(image[:, :, 0:3])
    return image


def convert_avs_to_avi(avs_file, avi_file):
    os.makedirs(os.path.dirname(avi_file), exist_ok=True)

    file = open("temp.vdub", "w")
    file.write("VirtualDub.Open(\"%s\");" % avs_file)
    file.write("VirtualDub.video.SetCompression(\"cvid\", 0, 10000, 0);")
    file.write("VirtualDub.SaveAVI(\"%s\");" % avi_file)
    file.write("VirtualDub.Close();")
    file.close()

    os.system("C:\\ProgramData\\chocolatey\\lib\\virtualdub\\tools\\vdub64.exe /i temp.vdub")

    os.remove("temp.vdub")


def convert_avi_to_mp4(avi_file, mp4_file):
    os.makedirs(os.path.dirname(mp4_file), exist_ok=True)
    os.system("ffmpeg -y -i %s -c:v libx264 -preset slow -crf 22 -c:a libfaac -b:a 128k %s" % \
              (avi_file, mp4_file))


def convert_avi_to_webm(avi_file, webm_file):
    os.makedirs(os.path.dirname(webm_file), exist_ok=True)
    os.system("ffmpeg -y -i %s -vcodec libvpx -qmin 0 -qmax 50 -crf 10 -b:v 1M -acodec libvorbis %s" % \
              (avi_file, webm_file))


def convert_mp4_to_webm(mp4_file, webm_file):
    os.makedirs(os.path.dirname(webm_file), exist_ok=True)
    os.system("ffmpeg -y -i %s -vcodec libvpx -qmin 0 -qmax 50 -crf 10 -b:v 1M -acodec libvorbis %s" % \
              (mp4_file, webm_file))


def create_parent_dir(file_name):
    os.makedirs(os.path.dirname(file_name), exist_ok=True)


def run_command(command_parts: List[str]):
    command = " ".join(command_parts)
    os.system(command)


def save_pytorch_image(image, file_name):
    if image.shape[0] == 1:
        image = image.squeeze()
    if image.shape[0] == 4:
        numpy_image = rgba_to_numpy_image(image.detach().cpu())
        pil_image = PIL.Image.fromarray(numpy.uint8(numpy.rint(numpy_image * 255.0)), mode='RGBA')
    else:
        numpy_image = rgb_to_numpy_image(image.detach().cpu())
        pil_image = PIL.Image.fromarray(numpy.uint8(numpy.rint(numpy_image * 255.0)), mode='RGB')
    os.makedirs(os.path.dirname(file_name), exist_ok=True)
    pil_image.save(file_name)


def torch_load(file_name):
    with open(file_name, 'rb') as f:
        return torch.load(f)


def torch_save(content, file_name):
    os.makedirs(os.path.dirname(file_name), exist_ok=True)
    with open(file_name, 'wb') as f:
        torch.save(content, f)


def resize_PIL_image(pil_image, size=(256, 256)):
    w, h = pil_image.size
    d = min(w, h)
    r = ((w - d) // 2, (h - d) // 2, (w + d) // 2, (h + d) // 2)
    return pil_image.resize(size, resample=PIL.Image.LANCZOS, box=r)


def extract_PIL_image_from_filelike(file):
    return PIL.Image.open(file)


def convert_output_image_from_torch_to_numpy(output_image):
    if output_image.shape[2] == 2:
        h, w, c = output_image.shape
        output_image = torch.transpose(output_image.reshape(h * w, c), 0, 1).reshape(c, h, w)
    if output_image.shape[0] == 4:
        numpy_image = rgba_to_numpy_image(output_image)
    elif output_image.shape[0] == 1:
        c, h, w = output_image.shape
        alpha_image = torch.cat([output_image.repeat(3, 1, 1) * 2.0 - 1.0, torch.ones(1, h, w)], dim=0)
        numpy_image = rgba_to_numpy_image(alpha_image)
    elif output_image.shape[0] == 2:
        numpy_image = grid_change_to_numpy_image(output_image, num_channels=4)
    else:
        raise RuntimeError("Unsupported # image channels: %d" % output_image.shape[0])
    return numpy_image