Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Commit
•
5181cd5
1
Parent(s):
b028a73
Update to diffusers backend
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- app.py +39 -286
- configs/stable-diffusion/v2-inference-v.yaml +0 -68
- configs/stable-diffusion/v2-inference.yaml +0 -67
- configs/stable-diffusion/v2-inpainting-inference.yaml +0 -158
- configs/stable-diffusion/v2-midas-inference.yaml +0 -74
- configs/stable-diffusion/x4-upscaling.yaml +0 -76
- environment.yaml +0 -29
- ldm/data/__init__.py +0 -0
- ldm/data/util.py +0 -24
- ldm/models/autoencoder.py +0 -219
- ldm/models/diffusion/__init__.py +0 -0
- ldm/models/diffusion/ddim.py +0 -336
- ldm/models/diffusion/ddpm.py +0 -1796
- ldm/models/diffusion/dpm_solver/__init__.py +0 -1
- ldm/models/diffusion/dpm_solver/dpm_solver.py +0 -1154
- ldm/models/diffusion/dpm_solver/sampler.py +0 -87
- ldm/models/diffusion/plms.py +0 -244
- ldm/models/diffusion/sampling_util.py +0 -22
- ldm/modules/attention.py +0 -331
- ldm/modules/diffusionmodules/__init__.py +0 -0
- ldm/modules/diffusionmodules/model.py +0 -852
- ldm/modules/diffusionmodules/openaimodel.py +0 -786
- ldm/modules/diffusionmodules/upscaling.py +0 -81
- ldm/modules/diffusionmodules/util.py +0 -270
- ldm/modules/distributions/__init__.py +0 -0
- ldm/modules/distributions/distributions.py +0 -92
- ldm/modules/ema.py +0 -80
- ldm/modules/encoders/__init__.py +0 -0
- ldm/modules/encoders/modules.py +0 -213
- ldm/modules/image_degradation/__init__.py +0 -2
- ldm/modules/image_degradation/bsrgan.py +0 -730
- ldm/modules/image_degradation/bsrgan_light.py +0 -651
- ldm/modules/image_degradation/utils/test.png +0 -0
- ldm/modules/image_degradation/utils_image.py +0 -916
- ldm/modules/midas/__init__.py +0 -0
- ldm/modules/midas/api.py +0 -170
- ldm/modules/midas/midas/__init__.py +0 -0
- ldm/modules/midas/midas/base_model.py +0 -16
- ldm/modules/midas/midas/blocks.py +0 -342
- ldm/modules/midas/midas/dpt_depth.py +0 -109
- ldm/modules/midas/midas/midas_net.py +0 -76
- ldm/modules/midas/midas/midas_net_custom.py +0 -128
- ldm/modules/midas/midas/transforms.py +0 -234
- ldm/modules/midas/midas/vit.py +0 -491
- ldm/modules/midas/utils.py +0 -189
- ldm/util.py +0 -197
- requirements.txt +4 -13
- scripts/img2img.py +0 -279
- scripts/streamlit/depth2img.py +0 -158
- scripts/streamlit/inpainting.py +0 -194
app.py
CHANGED
@@ -1,63 +1,21 @@
|
|
1 |
import gradio as gr
|
2 |
-
import argparse, os
|
3 |
import cv2
|
4 |
import torch
|
|
|
5 |
import numpy as np
|
6 |
-
from omegaconf import OmegaConf
|
7 |
from PIL import Image
|
8 |
-
from tqdm import tqdm, trange
|
9 |
-
from itertools import islice
|
10 |
-
from einops import rearrange
|
11 |
-
from torchvision.utils import make_grid
|
12 |
-
from pytorch_lightning import seed_everything
|
13 |
-
from torch import autocast
|
14 |
-
from contextlib import nullcontext
|
15 |
-
from imwatermark import WatermarkEncoder
|
16 |
import re
|
17 |
-
|
18 |
-
from ldm.util import instantiate_from_config
|
19 |
-
from ldm.models.diffusion.ddim import DDIMSampler
|
20 |
-
from ldm.models.diffusion.plms import PLMSSampler
|
21 |
-
from ldm.models.diffusion.dpm_solver import DPMSolverSampler
|
22 |
-
from huggingface_hub import hf_hub_download
|
23 |
from datasets import load_dataset
|
24 |
-
|
25 |
-
torch.set_grad_enabled(False)
|
26 |
|
27 |
from share_btn import community_icon_html, loading_icon_html, share_js
|
28 |
|
29 |
REPO_ID = "stabilityai/stable-diffusion-2"
|
30 |
-
|
31 |
-
CONFIG_PATH = "./configs/stable-diffusion/v2-inference-v.yaml"
|
32 |
-
device = "cuda"
|
33 |
-
stable_diffusion_2_path = hf_hub_download(repo_id=REPO_ID, filename=CKPT_NAME)
|
34 |
-
|
35 |
-
torch.set_grad_enabled(False)
|
36 |
-
|
37 |
-
def chunk(it, size):
|
38 |
-
it = iter(it)
|
39 |
-
return iter(lambda: tuple(islice(it, size)), ())
|
40 |
-
|
41 |
-
|
42 |
-
def load_model_from_config(config, ckpt, verbose=False):
|
43 |
-
print(f"Loading model from {ckpt}")
|
44 |
-
pl_sd = torch.load(ckpt, map_location="cpu")
|
45 |
-
if "global_step" in pl_sd:
|
46 |
-
print(f"Global Step: {pl_sd['global_step']}")
|
47 |
-
sd = pl_sd["state_dict"]
|
48 |
-
model = instantiate_from_config(config.model)
|
49 |
-
m, u = model.load_state_dict(sd, strict=False)
|
50 |
-
if len(m) > 0 and verbose:
|
51 |
-
print("missing keys:")
|
52 |
-
print(m)
|
53 |
-
if len(u) > 0 and verbose:
|
54 |
-
print("unexpected keys:")
|
55 |
-
print(u)
|
56 |
-
|
57 |
-
model.cuda()
|
58 |
-
model.eval()
|
59 |
-
return model
|
60 |
|
|
|
|
|
|
|
61 |
def put_watermark(img, wm_encoder=None):
|
62 |
if wm_encoder is not None:
|
63 |
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
|
@@ -65,234 +23,28 @@ def put_watermark(img, wm_encoder=None):
|
|
65 |
img = Image.fromarray(img[:, :, ::-1])
|
66 |
return img
|
67 |
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
word_list_dataset = load_dataset("stabilityai/word-list", data_files="list.txt", use_auth_token=True)
|
70 |
word_list = word_list_dataset["train"]['text']
|
71 |
|
72 |
-
config = OmegaConf.load(CONFIG_PATH)
|
73 |
-
model = load_model_from_config(config, stable_diffusion_2_path)
|
74 |
-
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
75 |
-
model = model.to(device)
|
76 |
-
|
77 |
-
def parse_args():
|
78 |
-
parser = argparse.ArgumentParser()
|
79 |
-
parser.add_argument(
|
80 |
-
"--prompt",
|
81 |
-
type=str,
|
82 |
-
nargs="?",
|
83 |
-
default="a professional photograph of an astronaut riding a triceratops",
|
84 |
-
help="the prompt to render"
|
85 |
-
)
|
86 |
-
parser.add_argument(
|
87 |
-
"--outdir",
|
88 |
-
type=str,
|
89 |
-
nargs="?",
|
90 |
-
help="dir to write results to",
|
91 |
-
default="outputs/txt2img-samples"
|
92 |
-
)
|
93 |
-
parser.add_argument(
|
94 |
-
"--steps",
|
95 |
-
type=int,
|
96 |
-
default=50,
|
97 |
-
help="number of ddim sampling steps",
|
98 |
-
)
|
99 |
-
parser.add_argument(
|
100 |
-
"--plms",
|
101 |
-
action='store_true',
|
102 |
-
help="use plms sampling",
|
103 |
-
)
|
104 |
-
parser.add_argument(
|
105 |
-
"--dpm",
|
106 |
-
action='store_true',
|
107 |
-
help="use DPM (2) sampler",
|
108 |
-
)
|
109 |
-
parser.add_argument(
|
110 |
-
"--fixed_code",
|
111 |
-
action='store_true',
|
112 |
-
help="if enabled, uses the same starting code across all samples ",
|
113 |
-
)
|
114 |
-
parser.add_argument(
|
115 |
-
"--ddim_eta",
|
116 |
-
type=float,
|
117 |
-
default=0.0,
|
118 |
-
help="ddim eta (eta=0.0 corresponds to deterministic sampling",
|
119 |
-
)
|
120 |
-
parser.add_argument(
|
121 |
-
"--n_iter",
|
122 |
-
type=int,
|
123 |
-
default=3,
|
124 |
-
help="sample this often",
|
125 |
-
)
|
126 |
-
parser.add_argument(
|
127 |
-
"--H",
|
128 |
-
type=int,
|
129 |
-
default=512,
|
130 |
-
help="image height, in pixel space",
|
131 |
-
)
|
132 |
-
parser.add_argument(
|
133 |
-
"--W",
|
134 |
-
type=int,
|
135 |
-
default=512,
|
136 |
-
help="image width, in pixel space",
|
137 |
-
)
|
138 |
-
parser.add_argument(
|
139 |
-
"--C",
|
140 |
-
type=int,
|
141 |
-
default=4,
|
142 |
-
help="latent channels",
|
143 |
-
)
|
144 |
-
parser.add_argument(
|
145 |
-
"--f",
|
146 |
-
type=int,
|
147 |
-
default=8,
|
148 |
-
help="downsampling factor, most often 8 or 16",
|
149 |
-
)
|
150 |
-
parser.add_argument(
|
151 |
-
"--n_samples",
|
152 |
-
type=int,
|
153 |
-
default=3,
|
154 |
-
help="how many samples to produce for each given prompt. A.k.a batch size",
|
155 |
-
)
|
156 |
-
parser.add_argument(
|
157 |
-
"--n_rows",
|
158 |
-
type=int,
|
159 |
-
default=0,
|
160 |
-
help="rows in the grid (default: n_samples)",
|
161 |
-
)
|
162 |
-
parser.add_argument(
|
163 |
-
"--scale",
|
164 |
-
type=float,
|
165 |
-
default=9.0,
|
166 |
-
help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))",
|
167 |
-
)
|
168 |
-
parser.add_argument(
|
169 |
-
"--from-file",
|
170 |
-
type=str,
|
171 |
-
help="if specified, load prompts from this file, separated by newlines",
|
172 |
-
)
|
173 |
-
parser.add_argument(
|
174 |
-
"--config",
|
175 |
-
type=str,
|
176 |
-
default="configs/stable-diffusion/v2-inference.yaml",
|
177 |
-
help="path to config which constructs model",
|
178 |
-
)
|
179 |
-
parser.add_argument(
|
180 |
-
"--ckpt",
|
181 |
-
type=str,
|
182 |
-
help="path to checkpoint of model",
|
183 |
-
)
|
184 |
-
parser.add_argument(
|
185 |
-
"--seed",
|
186 |
-
type=int,
|
187 |
-
default=42,
|
188 |
-
help="the seed (for reproducible sampling)",
|
189 |
-
)
|
190 |
-
parser.add_argument(
|
191 |
-
"--precision",
|
192 |
-
type=str,
|
193 |
-
help="evaluate at this precision",
|
194 |
-
choices=["full", "autocast"],
|
195 |
-
default="autocast"
|
196 |
-
)
|
197 |
-
parser.add_argument(
|
198 |
-
"--repeat",
|
199 |
-
type=int,
|
200 |
-
default=1,
|
201 |
-
help="repeat each prompt in file this often",
|
202 |
-
)
|
203 |
-
opt = parser.parse_args()
|
204 |
-
return opt
|
205 |
-
|
206 |
def infer(prompt, samples, steps, scale, seed):
|
207 |
-
|
208 |
-
opt.seed = seed
|
209 |
-
seed_everything(seed)
|
210 |
-
|
211 |
for filter in word_list:
|
212 |
if re.search(rf"\b{filter}\b", prompt):
|
213 |
raise gr.Error("Unsafe content found. Please try again with different prompts.")
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
os.makedirs(opt.outdir, exist_ok=True)
|
222 |
-
outpath = opt.outdir
|
223 |
-
|
224 |
-
print("Creating invisible watermark encoder (see https://github.com/ShieldMnt/invisible-watermark)...")
|
225 |
-
wm = "SDV2"
|
226 |
-
wm_encoder = WatermarkEncoder()
|
227 |
-
wm_encoder.set_watermark('bytes', wm.encode('utf-8'))
|
228 |
-
|
229 |
-
batch_size = opt.n_samples
|
230 |
-
n_rows = opt.n_rows if opt.n_rows > 0 else batch_size
|
231 |
-
if not opt.from_file:
|
232 |
-
prompt = opt.prompt
|
233 |
-
assert prompt is not None
|
234 |
-
data = [batch_size * [prompt]]
|
235 |
-
else:
|
236 |
-
print(f"reading prompts from {opt.from_file}")
|
237 |
-
with open(opt.from_file, "r") as f:
|
238 |
-
data = f.read().splitlines()
|
239 |
-
data = [p for p in data for i in range(opt.repeat)]
|
240 |
-
data = list(chunk(data, batch_size))
|
241 |
-
prompt = prompt
|
242 |
-
assert prompt is not None
|
243 |
-
data = [batch_size * [prompt]]
|
244 |
-
|
245 |
-
sample_path = os.path.join(outpath, "samples")
|
246 |
-
os.makedirs(sample_path, exist_ok=True)
|
247 |
-
sample_count = 0
|
248 |
-
base_count = len(os.listdir(sample_path))
|
249 |
-
grid_count = len(os.listdir(outpath)) - 1
|
250 |
-
|
251 |
-
opt.W = 768
|
252 |
-
opt.H = 768
|
253 |
-
|
254 |
-
start_code = None
|
255 |
-
if opt.fixed_code:
|
256 |
-
start_code = torch.randn([opt.n_samples, opt.C, opt.H // opt.f, opt.W // opt.f], device=device)
|
257 |
-
|
258 |
-
precision_scope = autocast if opt.precision == "autocast" else nullcontext
|
259 |
-
image_samples = []
|
260 |
-
with torch.no_grad(), \
|
261 |
-
precision_scope("cuda"), \
|
262 |
-
model.ema_scope():
|
263 |
-
all_samples = list()
|
264 |
-
for n in trange(opt.n_iter, desc="Sampling"):
|
265 |
-
for prompts in tqdm(data, desc="data"):
|
266 |
-
uc = None
|
267 |
-
if opt.scale != 1.0:
|
268 |
-
uc = model.get_learned_conditioning(batch_size * [""])
|
269 |
-
if isinstance(prompts, tuple):
|
270 |
-
prompts = list(prompts)
|
271 |
-
c = model.get_learned_conditioning(prompts)
|
272 |
-
shape = [opt.C, opt.H // opt.f, opt.W // opt.f]
|
273 |
-
samples, _ = sampler.sample(S=opt.steps,
|
274 |
-
conditioning=c,
|
275 |
-
batch_size=opt.n_samples,
|
276 |
-
shape=shape,
|
277 |
-
verbose=False,
|
278 |
-
unconditional_guidance_scale=opt.scale,
|
279 |
-
unconditional_conditioning=uc,
|
280 |
-
eta=opt.ddim_eta,
|
281 |
-
x_T=start_code)
|
282 |
-
|
283 |
-
x_samples = model.decode_first_stage(samples)
|
284 |
-
x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
|
285 |
-
|
286 |
-
for x_sample in x_samples:
|
287 |
-
x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
|
288 |
-
img = Image.fromarray(x_sample.astype(np.uint8))
|
289 |
-
img = put_watermark(img, wm_encoder)
|
290 |
-
image_samples.append(img)
|
291 |
-
base_count += 1
|
292 |
-
sample_count += 1
|
293 |
-
|
294 |
-
all_samples.append(x_samples)
|
295 |
-
return image_samples
|
296 |
|
297 |
css = """
|
298 |
.gradio-container {
|
@@ -412,7 +164,8 @@ css = """
|
|
412 |
#prompt-container{
|
413 |
gap: 0;
|
414 |
}
|
415 |
-
#component-
|
|
|
416 |
"""
|
417 |
|
418 |
block = gr.Blocks(css=css)
|
@@ -421,36 +174,36 @@ examples = [
|
|
421 |
[
|
422 |
'A high tech solarpunk utopia in the Amazon rainforest',
|
423 |
4,
|
424 |
-
|
425 |
-
|
426 |
1024,
|
427 |
],
|
428 |
[
|
429 |
'A pikachu fine dining with a view to the Eiffel Tower',
|
430 |
4,
|
431 |
-
|
432 |
-
|
433 |
1024,
|
434 |
],
|
435 |
[
|
436 |
'A mecha robot in a favela in expressionist style',
|
437 |
4,
|
438 |
-
|
439 |
-
|
440 |
1024,
|
441 |
],
|
442 |
[
|
443 |
'an insect robot preparing a delicious meal',
|
444 |
4,
|
445 |
-
|
446 |
-
|
447 |
1024,
|
448 |
],
|
449 |
[
|
450 |
"A small cabin on top of a snowy mountain in the style of Disney, artstation",
|
451 |
4,
|
452 |
-
|
453 |
-
|
454 |
1024,
|
455 |
],
|
456 |
]
|
@@ -458,7 +211,7 @@ examples = [
|
|
458 |
with block:
|
459 |
gr.HTML(
|
460 |
"""
|
461 |
-
<div style="text-align: center;
|
462 |
<div
|
463 |
style="
|
464 |
display: inline-flex;
|
@@ -504,7 +257,7 @@ with block:
|
|
504 |
Stable Diffusion 2 Demo
|
505 |
</h1>
|
506 |
</div>
|
507 |
-
<p style="margin-bottom: 10px; font-size: 94
|
508 |
Stable Diffusion 2 is the latest text-to-image model from StabilityAI. <a style="text-decoration: underline;" href="https://huggingface.co/spaces/stabilityai/stable-diffusion-1">Access Stable Diffusion 1 Space here</a><br>For faster generation and API
|
509 |
access you can try
|
510 |
<a
|
@@ -512,7 +265,7 @@ with block:
|
|
512 |
style="text-decoration: underline;"
|
513 |
target="_blank"
|
514 |
>DreamStudio Beta</a
|
515 |
-
>
|
516 |
</p>
|
517 |
</div>
|
518 |
"""
|
@@ -563,7 +316,7 @@ with block:
|
|
563 |
loading_icon = gr.HTML(loading_icon_html)
|
564 |
share_button = gr.Button("Share to community", elem_id="share-btn")
|
565 |
|
566 |
-
ex = gr.Examples(examples=examples, fn=infer, inputs=[text, samples, steps, scale, seed], outputs=[gallery
|
567 |
ex.dataset.headers = [""]
|
568 |
|
569 |
text.submit(infer, inputs=[text, samples, steps, scale, seed], outputs=[gallery])
|
@@ -578,7 +331,7 @@ with block:
|
|
578 |
gr.HTML(
|
579 |
"""
|
580 |
<div class="footer">
|
581 |
-
<p>Model by <a href="https://huggingface.co/stabilityai" style="text-decoration: underline;" target="_blank">Stability AI</a> - Gradio Demo by 🤗 Hugging Face
|
582 |
</p>
|
583 |
</div>
|
584 |
<div class="acknowledgments">
|
@@ -590,4 +343,4 @@ Despite how impressive being able to turn text into image is, beware to the fact
|
|
590 |
"""
|
591 |
)
|
592 |
|
593 |
-
block.queue(concurrency_count=1, max_size=
|
|
|
1 |
import gradio as gr
|
|
|
2 |
import cv2
|
3 |
import torch
|
4 |
+
from imwatermark import WatermarkEncoder
|
5 |
import numpy as np
|
|
|
6 |
from PIL import Image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
import re
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
from datasets import load_dataset
|
9 |
+
from diffusers import DiffusionPipeline, EulerDiscreteScheduler
|
|
|
10 |
|
11 |
from share_btn import community_icon_html, loading_icon_html, share_js
|
12 |
|
13 |
REPO_ID = "stabilityai/stable-diffusion-2"
|
14 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
+
wm = "SDV2"
|
17 |
+
wm_encoder = WatermarkEncoder()
|
18 |
+
wm_encoder.set_watermark('bytes', wm.encode('utf-8'))
|
19 |
def put_watermark(img, wm_encoder=None):
|
20 |
if wm_encoder is not None:
|
21 |
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
|
|
|
23 |
img = Image.fromarray(img[:, :, ::-1])
|
24 |
return img
|
25 |
|
26 |
+
repo_id = "stabilityai/stable-diffusion-2"
|
27 |
+
scheduler = EulerDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler", prediction_type="v_prediction")
|
28 |
+
pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.float16, revision="fp16", scheduler=scheduler)
|
29 |
+
pipe = pipe.to(device)
|
30 |
+
pipe.enable_xformers_memory_efficient_attention()
|
31 |
+
|
32 |
+
#If you have duplicated this Space or is running locally, you can remove this part
|
33 |
word_list_dataset = load_dataset("stabilityai/word-list", data_files="list.txt", use_auth_token=True)
|
34 |
word_list = word_list_dataset["train"]['text']
|
35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
def infer(prompt, samples, steps, scale, seed):
|
37 |
+
#If you have duplicated this Space or is running locally, you can remove this part
|
|
|
|
|
|
|
38 |
for filter in word_list:
|
39 |
if re.search(rf"\b{filter}\b", prompt):
|
40 |
raise gr.Error("Unsafe content found. Please try again with different prompts.")
|
41 |
+
generator = torch.Generator(device=device).manual_seed(seed)
|
42 |
+
images = pipe(prompt, width=768, height=768, num_inference_steps=steps, guidance_scale=scale, num_images_per_prompt=samples, generator=generator).images
|
43 |
+
images_watermarked = []
|
44 |
+
for image in images:
|
45 |
+
image = put_watermark(image, wm_encoder)
|
46 |
+
images_watermarked.append(image)
|
47 |
+
return images_watermarked
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
|
49 |
css = """
|
50 |
.gradio-container {
|
|
|
164 |
#prompt-container{
|
165 |
gap: 0;
|
166 |
}
|
167 |
+
#component-9{margin-top: -19px}
|
168 |
+
.image_duplication{position: absolute; width: 100px; left: 50px}
|
169 |
"""
|
170 |
|
171 |
block = gr.Blocks(css=css)
|
|
|
174 |
[
|
175 |
'A high tech solarpunk utopia in the Amazon rainforest',
|
176 |
4,
|
177 |
+
25,
|
178 |
+
9,
|
179 |
1024,
|
180 |
],
|
181 |
[
|
182 |
'A pikachu fine dining with a view to the Eiffel Tower',
|
183 |
4,
|
184 |
+
25,
|
185 |
+
9,
|
186 |
1024,
|
187 |
],
|
188 |
[
|
189 |
'A mecha robot in a favela in expressionist style',
|
190 |
4,
|
191 |
+
25,
|
192 |
+
9,
|
193 |
1024,
|
194 |
],
|
195 |
[
|
196 |
'an insect robot preparing a delicious meal',
|
197 |
4,
|
198 |
+
25,
|
199 |
+
9,
|
200 |
1024,
|
201 |
],
|
202 |
[
|
203 |
"A small cabin on top of a snowy mountain in the style of Disney, artstation",
|
204 |
4,
|
205 |
+
25,
|
206 |
+
9,
|
207 |
1024,
|
208 |
],
|
209 |
]
|
|
|
211 |
with block:
|
212 |
gr.HTML(
|
213 |
"""
|
214 |
+
<div style="text-align: center; margin: 0 auto;">
|
215 |
<div
|
216 |
style="
|
217 |
display: inline-flex;
|
|
|
257 |
Stable Diffusion 2 Demo
|
258 |
</h1>
|
259 |
</div>
|
260 |
+
<p style="margin-bottom: 10px; font-size: 94%; line-height: 23px;">
|
261 |
Stable Diffusion 2 is the latest text-to-image model from StabilityAI. <a style="text-decoration: underline;" href="https://huggingface.co/spaces/stabilityai/stable-diffusion-1">Access Stable Diffusion 1 Space here</a><br>For faster generation and API
|
262 |
access you can try
|
263 |
<a
|
|
|
265 |
style="text-decoration: underline;"
|
266 |
target="_blank"
|
267 |
>DreamStudio Beta</a
|
268 |
+
>. To skip the queue you can <a style="display:inline-block;width: 123px;" href="https://huggingface.co/spaces/stabilityai/stable-diffusion?duplicate=true"><img style="width: 113px;margin-top: -13px;position: absolute;" src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a>
|
269 |
</p>
|
270 |
</div>
|
271 |
"""
|
|
|
316 |
loading_icon = gr.HTML(loading_icon_html)
|
317 |
share_button = gr.Button("Share to community", elem_id="share-btn")
|
318 |
|
319 |
+
ex = gr.Examples(examples=examples, fn=infer, inputs=[text, samples, steps, scale, seed], outputs=[gallery], cache_examples=False)
|
320 |
ex.dataset.headers = [""]
|
321 |
|
322 |
text.submit(infer, inputs=[text, samples, steps, scale, seed], outputs=[gallery])
|
|
|
331 |
gr.HTML(
|
332 |
"""
|
333 |
<div class="footer">
|
334 |
+
<p>Model by <a href="https://huggingface.co/stabilityai" style="text-decoration: underline;" target="_blank">Stability AI</a> - Gradio Demo by 🤗 Hugging Face using the <a href="https://github.com/huggingface/diffusers" style="text-decoration: underline;" target="_blank">🧨 diffusers library</a>
|
335 |
</p>
|
336 |
</div>
|
337 |
<div class="acknowledgments">
|
|
|
343 |
"""
|
344 |
)
|
345 |
|
346 |
+
block.queue(concurrency_count=1, max_size=50).launch(max_threads=150)
|
configs/stable-diffusion/v2-inference-v.yaml
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
model:
|
2 |
-
base_learning_rate: 1.0e-4
|
3 |
-
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
-
params:
|
5 |
-
parameterization: "v"
|
6 |
-
linear_start: 0.00085
|
7 |
-
linear_end: 0.0120
|
8 |
-
num_timesteps_cond: 1
|
9 |
-
log_every_t: 200
|
10 |
-
timesteps: 1000
|
11 |
-
first_stage_key: "jpg"
|
12 |
-
cond_stage_key: "txt"
|
13 |
-
image_size: 64
|
14 |
-
channels: 4
|
15 |
-
cond_stage_trainable: false
|
16 |
-
conditioning_key: crossattn
|
17 |
-
monitor: val/loss_simple_ema
|
18 |
-
scale_factor: 0.18215
|
19 |
-
use_ema: False # we set this to false because this is an inference only config
|
20 |
-
|
21 |
-
unet_config:
|
22 |
-
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
23 |
-
params:
|
24 |
-
use_checkpoint: True
|
25 |
-
use_fp16: True
|
26 |
-
image_size: 32 # unused
|
27 |
-
in_channels: 4
|
28 |
-
out_channels: 4
|
29 |
-
model_channels: 320
|
30 |
-
attention_resolutions: [ 4, 2, 1 ]
|
31 |
-
num_res_blocks: 2
|
32 |
-
channel_mult: [ 1, 2, 4, 4 ]
|
33 |
-
num_head_channels: 64 # need to fix for flash-attn
|
34 |
-
use_spatial_transformer: True
|
35 |
-
use_linear_in_transformer: True
|
36 |
-
transformer_depth: 1
|
37 |
-
context_dim: 1024
|
38 |
-
legacy: False
|
39 |
-
|
40 |
-
first_stage_config:
|
41 |
-
target: ldm.models.autoencoder.AutoencoderKL
|
42 |
-
params:
|
43 |
-
embed_dim: 4
|
44 |
-
monitor: val/rec_loss
|
45 |
-
ddconfig:
|
46 |
-
#attn_type: "vanilla-xformers"
|
47 |
-
double_z: true
|
48 |
-
z_channels: 4
|
49 |
-
resolution: 256
|
50 |
-
in_channels: 3
|
51 |
-
out_ch: 3
|
52 |
-
ch: 128
|
53 |
-
ch_mult:
|
54 |
-
- 1
|
55 |
-
- 2
|
56 |
-
- 4
|
57 |
-
- 4
|
58 |
-
num_res_blocks: 2
|
59 |
-
attn_resolutions: []
|
60 |
-
dropout: 0.0
|
61 |
-
lossconfig:
|
62 |
-
target: torch.nn.Identity
|
63 |
-
|
64 |
-
cond_stage_config:
|
65 |
-
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
|
66 |
-
params:
|
67 |
-
freeze: True
|
68 |
-
layer: "penultimate"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
configs/stable-diffusion/v2-inference.yaml
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
model:
|
2 |
-
base_learning_rate: 1.0e-4
|
3 |
-
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
-
params:
|
5 |
-
linear_start: 0.00085
|
6 |
-
linear_end: 0.0120
|
7 |
-
num_timesteps_cond: 1
|
8 |
-
log_every_t: 200
|
9 |
-
timesteps: 1000
|
10 |
-
first_stage_key: "jpg"
|
11 |
-
cond_stage_key: "txt"
|
12 |
-
image_size: 64
|
13 |
-
channels: 4
|
14 |
-
cond_stage_trainable: false
|
15 |
-
conditioning_key: crossattn
|
16 |
-
monitor: val/loss_simple_ema
|
17 |
-
scale_factor: 0.18215
|
18 |
-
use_ema: False # we set this to false because this is an inference only config
|
19 |
-
|
20 |
-
unet_config:
|
21 |
-
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
22 |
-
params:
|
23 |
-
use_checkpoint: True
|
24 |
-
use_fp16: True
|
25 |
-
image_size: 32 # unused
|
26 |
-
in_channels: 4
|
27 |
-
out_channels: 4
|
28 |
-
model_channels: 320
|
29 |
-
attention_resolutions: [ 4, 2, 1 ]
|
30 |
-
num_res_blocks: 2
|
31 |
-
channel_mult: [ 1, 2, 4, 4 ]
|
32 |
-
num_head_channels: 64 # need to fix for flash-attn
|
33 |
-
use_spatial_transformer: True
|
34 |
-
use_linear_in_transformer: True
|
35 |
-
transformer_depth: 1
|
36 |
-
context_dim: 1024
|
37 |
-
legacy: False
|
38 |
-
|
39 |
-
first_stage_config:
|
40 |
-
target: ldm.models.autoencoder.AutoencoderKL
|
41 |
-
params:
|
42 |
-
embed_dim: 4
|
43 |
-
monitor: val/rec_loss
|
44 |
-
ddconfig:
|
45 |
-
#attn_type: "vanilla-xformers"
|
46 |
-
double_z: true
|
47 |
-
z_channels: 4
|
48 |
-
resolution: 256
|
49 |
-
in_channels: 3
|
50 |
-
out_ch: 3
|
51 |
-
ch: 128
|
52 |
-
ch_mult:
|
53 |
-
- 1
|
54 |
-
- 2
|
55 |
-
- 4
|
56 |
-
- 4
|
57 |
-
num_res_blocks: 2
|
58 |
-
attn_resolutions: []
|
59 |
-
dropout: 0.0
|
60 |
-
lossconfig:
|
61 |
-
target: torch.nn.Identity
|
62 |
-
|
63 |
-
cond_stage_config:
|
64 |
-
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
|
65 |
-
params:
|
66 |
-
freeze: True
|
67 |
-
layer: "penultimate"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
configs/stable-diffusion/v2-inpainting-inference.yaml
DELETED
@@ -1,158 +0,0 @@
|
|
1 |
-
model:
|
2 |
-
base_learning_rate: 5.0e-05
|
3 |
-
target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
|
4 |
-
params:
|
5 |
-
linear_start: 0.00085
|
6 |
-
linear_end: 0.0120
|
7 |
-
num_timesteps_cond: 1
|
8 |
-
log_every_t: 200
|
9 |
-
timesteps: 1000
|
10 |
-
first_stage_key: "jpg"
|
11 |
-
cond_stage_key: "txt"
|
12 |
-
image_size: 64
|
13 |
-
channels: 4
|
14 |
-
cond_stage_trainable: false
|
15 |
-
conditioning_key: hybrid
|
16 |
-
scale_factor: 0.18215
|
17 |
-
monitor: val/loss_simple_ema
|
18 |
-
finetune_keys: null
|
19 |
-
use_ema: False
|
20 |
-
|
21 |
-
unet_config:
|
22 |
-
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
23 |
-
params:
|
24 |
-
use_checkpoint: True
|
25 |
-
image_size: 32 # unused
|
26 |
-
in_channels: 9
|
27 |
-
out_channels: 4
|
28 |
-
model_channels: 320
|
29 |
-
attention_resolutions: [ 4, 2, 1 ]
|
30 |
-
num_res_blocks: 2
|
31 |
-
channel_mult: [ 1, 2, 4, 4 ]
|
32 |
-
num_head_channels: 64 # need to fix for flash-attn
|
33 |
-
use_spatial_transformer: True
|
34 |
-
use_linear_in_transformer: True
|
35 |
-
transformer_depth: 1
|
36 |
-
context_dim: 1024
|
37 |
-
legacy: False
|
38 |
-
|
39 |
-
first_stage_config:
|
40 |
-
target: ldm.models.autoencoder.AutoencoderKL
|
41 |
-
params:
|
42 |
-
embed_dim: 4
|
43 |
-
monitor: val/rec_loss
|
44 |
-
ddconfig:
|
45 |
-
#attn_type: "vanilla-xformers"
|
46 |
-
double_z: true
|
47 |
-
z_channels: 4
|
48 |
-
resolution: 256
|
49 |
-
in_channels: 3
|
50 |
-
out_ch: 3
|
51 |
-
ch: 128
|
52 |
-
ch_mult:
|
53 |
-
- 1
|
54 |
-
- 2
|
55 |
-
- 4
|
56 |
-
- 4
|
57 |
-
num_res_blocks: 2
|
58 |
-
attn_resolutions: [ ]
|
59 |
-
dropout: 0.0
|
60 |
-
lossconfig:
|
61 |
-
target: torch.nn.Identity
|
62 |
-
|
63 |
-
cond_stage_config:
|
64 |
-
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
|
65 |
-
params:
|
66 |
-
freeze: True
|
67 |
-
layer: "penultimate"
|
68 |
-
|
69 |
-
|
70 |
-
data:
|
71 |
-
target: ldm.data.laion.WebDataModuleFromConfig
|
72 |
-
params:
|
73 |
-
tar_base: null # for concat as in LAION-A
|
74 |
-
p_unsafe_threshold: 0.1
|
75 |
-
filter_word_list: "data/filters.yaml"
|
76 |
-
max_pwatermark: 0.45
|
77 |
-
batch_size: 8
|
78 |
-
num_workers: 6
|
79 |
-
multinode: True
|
80 |
-
min_size: 512
|
81 |
-
train:
|
82 |
-
shards:
|
83 |
-
- "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-0/{00000..18699}.tar -"
|
84 |
-
- "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-1/{00000..18699}.tar -"
|
85 |
-
- "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-2/{00000..18699}.tar -"
|
86 |
-
- "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-3/{00000..18699}.tar -"
|
87 |
-
- "pipe:aws s3 cp s3://stability-aws/laion-a-native/part-4/{00000..18699}.tar -" #{00000-94333}.tar"
|
88 |
-
shuffle: 10000
|
89 |
-
image_key: jpg
|
90 |
-
image_transforms:
|
91 |
-
- target: torchvision.transforms.Resize
|
92 |
-
params:
|
93 |
-
size: 512
|
94 |
-
interpolation: 3
|
95 |
-
- target: torchvision.transforms.RandomCrop
|
96 |
-
params:
|
97 |
-
size: 512
|
98 |
-
postprocess:
|
99 |
-
target: ldm.data.laion.AddMask
|
100 |
-
params:
|
101 |
-
mode: "512train-large"
|
102 |
-
p_drop: 0.25
|
103 |
-
# NOTE use enough shards to avoid empty validation loops in workers
|
104 |
-
validation:
|
105 |
-
shards:
|
106 |
-
- "pipe:aws s3 cp s3://deep-floyd-s3/datasets/laion_cleaned-part5/{93001..94333}.tar - "
|
107 |
-
shuffle: 0
|
108 |
-
image_key: jpg
|
109 |
-
image_transforms:
|
110 |
-
- target: torchvision.transforms.Resize
|
111 |
-
params:
|
112 |
-
size: 512
|
113 |
-
interpolation: 3
|
114 |
-
- target: torchvision.transforms.CenterCrop
|
115 |
-
params:
|
116 |
-
size: 512
|
117 |
-
postprocess:
|
118 |
-
target: ldm.data.laion.AddMask
|
119 |
-
params:
|
120 |
-
mode: "512train-large"
|
121 |
-
p_drop: 0.25
|
122 |
-
|
123 |
-
lightning:
|
124 |
-
find_unused_parameters: True
|
125 |
-
modelcheckpoint:
|
126 |
-
params:
|
127 |
-
every_n_train_steps: 5000
|
128 |
-
|
129 |
-
callbacks:
|
130 |
-
metrics_over_trainsteps_checkpoint:
|
131 |
-
params:
|
132 |
-
every_n_train_steps: 10000
|
133 |
-
|
134 |
-
image_logger:
|
135 |
-
target: main.ImageLogger
|
136 |
-
params:
|
137 |
-
enable_autocast: False
|
138 |
-
disabled: False
|
139 |
-
batch_frequency: 1000
|
140 |
-
max_images: 4
|
141 |
-
increase_log_steps: False
|
142 |
-
log_first_step: False
|
143 |
-
log_images_kwargs:
|
144 |
-
use_ema_scope: False
|
145 |
-
inpaint: False
|
146 |
-
plot_progressive_rows: False
|
147 |
-
plot_diffusion_rows: False
|
148 |
-
N: 4
|
149 |
-
unconditional_guidance_scale: 5.0
|
150 |
-
unconditional_guidance_label: [""]
|
151 |
-
ddim_steps: 50 # todo check these out for depth2img,
|
152 |
-
ddim_eta: 0.0 # todo check these out for depth2img,
|
153 |
-
|
154 |
-
trainer:
|
155 |
-
benchmark: True
|
156 |
-
val_check_interval: 5000000
|
157 |
-
num_sanity_val_steps: 0
|
158 |
-
accumulate_grad_batches: 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
configs/stable-diffusion/v2-midas-inference.yaml
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
model:
|
2 |
-
base_learning_rate: 5.0e-07
|
3 |
-
target: ldm.models.diffusion.ddpm.LatentDepth2ImageDiffusion
|
4 |
-
params:
|
5 |
-
linear_start: 0.00085
|
6 |
-
linear_end: 0.0120
|
7 |
-
num_timesteps_cond: 1
|
8 |
-
log_every_t: 200
|
9 |
-
timesteps: 1000
|
10 |
-
first_stage_key: "jpg"
|
11 |
-
cond_stage_key: "txt"
|
12 |
-
image_size: 64
|
13 |
-
channels: 4
|
14 |
-
cond_stage_trainable: false
|
15 |
-
conditioning_key: hybrid
|
16 |
-
scale_factor: 0.18215
|
17 |
-
monitor: val/loss_simple_ema
|
18 |
-
finetune_keys: null
|
19 |
-
use_ema: False
|
20 |
-
|
21 |
-
depth_stage_config:
|
22 |
-
target: ldm.modules.midas.api.MiDaSInference
|
23 |
-
params:
|
24 |
-
model_type: "dpt_hybrid"
|
25 |
-
|
26 |
-
unet_config:
|
27 |
-
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
28 |
-
params:
|
29 |
-
use_checkpoint: True
|
30 |
-
image_size: 32 # unused
|
31 |
-
in_channels: 5
|
32 |
-
out_channels: 4
|
33 |
-
model_channels: 320
|
34 |
-
attention_resolutions: [ 4, 2, 1 ]
|
35 |
-
num_res_blocks: 2
|
36 |
-
channel_mult: [ 1, 2, 4, 4 ]
|
37 |
-
num_head_channels: 64 # need to fix for flash-attn
|
38 |
-
use_spatial_transformer: True
|
39 |
-
use_linear_in_transformer: True
|
40 |
-
transformer_depth: 1
|
41 |
-
context_dim: 1024
|
42 |
-
legacy: False
|
43 |
-
|
44 |
-
first_stage_config:
|
45 |
-
target: ldm.models.autoencoder.AutoencoderKL
|
46 |
-
params:
|
47 |
-
embed_dim: 4
|
48 |
-
monitor: val/rec_loss
|
49 |
-
ddconfig:
|
50 |
-
#attn_type: "vanilla-xformers"
|
51 |
-
double_z: true
|
52 |
-
z_channels: 4
|
53 |
-
resolution: 256
|
54 |
-
in_channels: 3
|
55 |
-
out_ch: 3
|
56 |
-
ch: 128
|
57 |
-
ch_mult:
|
58 |
-
- 1
|
59 |
-
- 2
|
60 |
-
- 4
|
61 |
-
- 4
|
62 |
-
num_res_blocks: 2
|
63 |
-
attn_resolutions: [ ]
|
64 |
-
dropout: 0.0
|
65 |
-
lossconfig:
|
66 |
-
target: torch.nn.Identity
|
67 |
-
|
68 |
-
cond_stage_config:
|
69 |
-
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
|
70 |
-
params:
|
71 |
-
freeze: True
|
72 |
-
layer: "penultimate"
|
73 |
-
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
configs/stable-diffusion/x4-upscaling.yaml
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
model:
|
2 |
-
base_learning_rate: 1.0e-04
|
3 |
-
target: ldm.models.diffusion.ddpm.LatentUpscaleDiffusion
|
4 |
-
params:
|
5 |
-
parameterization: "v"
|
6 |
-
low_scale_key: "lr"
|
7 |
-
linear_start: 0.0001
|
8 |
-
linear_end: 0.02
|
9 |
-
num_timesteps_cond: 1
|
10 |
-
log_every_t: 200
|
11 |
-
timesteps: 1000
|
12 |
-
first_stage_key: "jpg"
|
13 |
-
cond_stage_key: "txt"
|
14 |
-
image_size: 128
|
15 |
-
channels: 4
|
16 |
-
cond_stage_trainable: false
|
17 |
-
conditioning_key: "hybrid-adm"
|
18 |
-
monitor: val/loss_simple_ema
|
19 |
-
scale_factor: 0.08333
|
20 |
-
use_ema: False
|
21 |
-
|
22 |
-
low_scale_config:
|
23 |
-
target: ldm.modules.diffusionmodules.upscaling.ImageConcatWithNoiseAugmentation
|
24 |
-
params:
|
25 |
-
noise_schedule_config: # image space
|
26 |
-
linear_start: 0.0001
|
27 |
-
linear_end: 0.02
|
28 |
-
max_noise_level: 350
|
29 |
-
|
30 |
-
unet_config:
|
31 |
-
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
32 |
-
params:
|
33 |
-
use_checkpoint: True
|
34 |
-
num_classes: 1000 # timesteps for noise conditioning (here constant, just need one)
|
35 |
-
image_size: 128
|
36 |
-
in_channels: 7
|
37 |
-
out_channels: 4
|
38 |
-
model_channels: 256
|
39 |
-
attention_resolutions: [ 2,4,8]
|
40 |
-
num_res_blocks: 2
|
41 |
-
channel_mult: [ 1, 2, 2, 4]
|
42 |
-
disable_self_attentions: [True, True, True, False]
|
43 |
-
disable_middle_self_attn: False
|
44 |
-
num_heads: 8
|
45 |
-
use_spatial_transformer: True
|
46 |
-
transformer_depth: 1
|
47 |
-
context_dim: 1024
|
48 |
-
legacy: False
|
49 |
-
use_linear_in_transformer: True
|
50 |
-
|
51 |
-
first_stage_config:
|
52 |
-
target: ldm.models.autoencoder.AutoencoderKL
|
53 |
-
params:
|
54 |
-
embed_dim: 4
|
55 |
-
ddconfig:
|
56 |
-
# attn_type: "vanilla-xformers" this model needs efficient attention to be feasible on HR data, also the decoder seems to break in half precision (UNet is fine though)
|
57 |
-
double_z: True
|
58 |
-
z_channels: 4
|
59 |
-
resolution: 256
|
60 |
-
in_channels: 3
|
61 |
-
out_ch: 3
|
62 |
-
ch: 128
|
63 |
-
ch_mult: [ 1,2,4 ] # num_down = len(ch_mult)-1
|
64 |
-
num_res_blocks: 2
|
65 |
-
attn_resolutions: [ ]
|
66 |
-
dropout: 0.0
|
67 |
-
|
68 |
-
lossconfig:
|
69 |
-
target: torch.nn.Identity
|
70 |
-
|
71 |
-
cond_stage_config:
|
72 |
-
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
|
73 |
-
params:
|
74 |
-
freeze: True
|
75 |
-
layer: "penultimate"
|
76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
environment.yaml
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
name: ldm
|
2 |
-
channels:
|
3 |
-
- pytorch
|
4 |
-
- defaults
|
5 |
-
dependencies:
|
6 |
-
- python=3.8.5
|
7 |
-
- pip=20.3
|
8 |
-
- cudatoolkit=11.3
|
9 |
-
- pytorch=1.12.1
|
10 |
-
- torchvision=0.13.1
|
11 |
-
- numpy=1.23.1
|
12 |
-
- pip:
|
13 |
-
- albumentations==1.3.0
|
14 |
-
- opencv-python==4.6.0.66
|
15 |
-
- imageio==2.9.0
|
16 |
-
- imageio-ffmpeg==0.4.2
|
17 |
-
- pytorch-lightning==1.4.2
|
18 |
-
- omegaconf==2.1.1
|
19 |
-
- test-tube>=0.7.5
|
20 |
-
- streamlit==1.12.1
|
21 |
-
- einops==0.3.0
|
22 |
-
- transformers==4.19.2
|
23 |
-
- webdataset==0.2.5
|
24 |
-
- kornia==0.6
|
25 |
-
- open_clip_torch==2.0.2
|
26 |
-
- invisible-watermark>=0.1.5
|
27 |
-
- streamlit-drawable-canvas==0.8.0
|
28 |
-
- torchmetrics==0.6.0
|
29 |
-
- -e .
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/data/__init__.py
DELETED
File without changes
|
ldm/data/util.py
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
from ldm.modules.midas.api import load_midas_transform
|
4 |
-
|
5 |
-
|
6 |
-
class AddMiDaS(object):
|
7 |
-
def __init__(self, model_type):
|
8 |
-
super().__init__()
|
9 |
-
self.transform = load_midas_transform(model_type)
|
10 |
-
|
11 |
-
def pt2np(self, x):
|
12 |
-
x = ((x + 1.0) * .5).detach().cpu().numpy()
|
13 |
-
return x
|
14 |
-
|
15 |
-
def np2pt(self, x):
|
16 |
-
x = torch.from_numpy(x) * 2 - 1.
|
17 |
-
return x
|
18 |
-
|
19 |
-
def __call__(self, sample):
|
20 |
-
# sample['jpg'] is tensor hwc in [-1, 1] at this point
|
21 |
-
x = self.pt2np(sample['jpg'])
|
22 |
-
x = self.transform({"image": x})["image"]
|
23 |
-
sample['midas_in'] = x
|
24 |
-
return sample
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/models/autoencoder.py
DELETED
@@ -1,219 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import pytorch_lightning as pl
|
3 |
-
import torch.nn.functional as F
|
4 |
-
from contextlib import contextmanager
|
5 |
-
|
6 |
-
from ldm.modules.diffusionmodules.model import Encoder, Decoder
|
7 |
-
from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
|
8 |
-
|
9 |
-
from ldm.util import instantiate_from_config
|
10 |
-
from ldm.modules.ema import LitEma
|
11 |
-
|
12 |
-
|
13 |
-
class AutoencoderKL(pl.LightningModule):
|
14 |
-
def __init__(self,
|
15 |
-
ddconfig,
|
16 |
-
lossconfig,
|
17 |
-
embed_dim,
|
18 |
-
ckpt_path=None,
|
19 |
-
ignore_keys=[],
|
20 |
-
image_key="image",
|
21 |
-
colorize_nlabels=None,
|
22 |
-
monitor=None,
|
23 |
-
ema_decay=None,
|
24 |
-
learn_logvar=False
|
25 |
-
):
|
26 |
-
super().__init__()
|
27 |
-
self.learn_logvar = learn_logvar
|
28 |
-
self.image_key = image_key
|
29 |
-
self.encoder = Encoder(**ddconfig)
|
30 |
-
self.decoder = Decoder(**ddconfig)
|
31 |
-
self.loss = instantiate_from_config(lossconfig)
|
32 |
-
assert ddconfig["double_z"]
|
33 |
-
self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
|
34 |
-
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
|
35 |
-
self.embed_dim = embed_dim
|
36 |
-
if colorize_nlabels is not None:
|
37 |
-
assert type(colorize_nlabels)==int
|
38 |
-
self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
|
39 |
-
if monitor is not None:
|
40 |
-
self.monitor = monitor
|
41 |
-
|
42 |
-
self.use_ema = ema_decay is not None
|
43 |
-
if self.use_ema:
|
44 |
-
self.ema_decay = ema_decay
|
45 |
-
assert 0. < ema_decay < 1.
|
46 |
-
self.model_ema = LitEma(self, decay=ema_decay)
|
47 |
-
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
48 |
-
|
49 |
-
if ckpt_path is not None:
|
50 |
-
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
|
51 |
-
|
52 |
-
def init_from_ckpt(self, path, ignore_keys=list()):
|
53 |
-
sd = torch.load(path, map_location="cpu")["state_dict"]
|
54 |
-
keys = list(sd.keys())
|
55 |
-
for k in keys:
|
56 |
-
for ik in ignore_keys:
|
57 |
-
if k.startswith(ik):
|
58 |
-
print("Deleting key {} from state_dict.".format(k))
|
59 |
-
del sd[k]
|
60 |
-
self.load_state_dict(sd, strict=False)
|
61 |
-
print(f"Restored from {path}")
|
62 |
-
|
63 |
-
@contextmanager
|
64 |
-
def ema_scope(self, context=None):
|
65 |
-
if self.use_ema:
|
66 |
-
self.model_ema.store(self.parameters())
|
67 |
-
self.model_ema.copy_to(self)
|
68 |
-
if context is not None:
|
69 |
-
print(f"{context}: Switched to EMA weights")
|
70 |
-
try:
|
71 |
-
yield None
|
72 |
-
finally:
|
73 |
-
if self.use_ema:
|
74 |
-
self.model_ema.restore(self.parameters())
|
75 |
-
if context is not None:
|
76 |
-
print(f"{context}: Restored training weights")
|
77 |
-
|
78 |
-
def on_train_batch_end(self, *args, **kwargs):
|
79 |
-
if self.use_ema:
|
80 |
-
self.model_ema(self)
|
81 |
-
|
82 |
-
def encode(self, x):
|
83 |
-
h = self.encoder(x)
|
84 |
-
moments = self.quant_conv(h)
|
85 |
-
posterior = DiagonalGaussianDistribution(moments)
|
86 |
-
return posterior
|
87 |
-
|
88 |
-
def decode(self, z):
|
89 |
-
z = self.post_quant_conv(z)
|
90 |
-
dec = self.decoder(z)
|
91 |
-
return dec
|
92 |
-
|
93 |
-
def forward(self, input, sample_posterior=True):
|
94 |
-
posterior = self.encode(input)
|
95 |
-
if sample_posterior:
|
96 |
-
z = posterior.sample()
|
97 |
-
else:
|
98 |
-
z = posterior.mode()
|
99 |
-
dec = self.decode(z)
|
100 |
-
return dec, posterior
|
101 |
-
|
102 |
-
def get_input(self, batch, k):
|
103 |
-
x = batch[k]
|
104 |
-
if len(x.shape) == 3:
|
105 |
-
x = x[..., None]
|
106 |
-
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
|
107 |
-
return x
|
108 |
-
|
109 |
-
def training_step(self, batch, batch_idx, optimizer_idx):
|
110 |
-
inputs = self.get_input(batch, self.image_key)
|
111 |
-
reconstructions, posterior = self(inputs)
|
112 |
-
|
113 |
-
if optimizer_idx == 0:
|
114 |
-
# train encoder+decoder+logvar
|
115 |
-
aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
|
116 |
-
last_layer=self.get_last_layer(), split="train")
|
117 |
-
self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
118 |
-
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
|
119 |
-
return aeloss
|
120 |
-
|
121 |
-
if optimizer_idx == 1:
|
122 |
-
# train the discriminator
|
123 |
-
discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
|
124 |
-
last_layer=self.get_last_layer(), split="train")
|
125 |
-
|
126 |
-
self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
|
127 |
-
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
|
128 |
-
return discloss
|
129 |
-
|
130 |
-
def validation_step(self, batch, batch_idx):
|
131 |
-
log_dict = self._validation_step(batch, batch_idx)
|
132 |
-
with self.ema_scope():
|
133 |
-
log_dict_ema = self._validation_step(batch, batch_idx, postfix="_ema")
|
134 |
-
return log_dict
|
135 |
-
|
136 |
-
def _validation_step(self, batch, batch_idx, postfix=""):
|
137 |
-
inputs = self.get_input(batch, self.image_key)
|
138 |
-
reconstructions, posterior = self(inputs)
|
139 |
-
aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
|
140 |
-
last_layer=self.get_last_layer(), split="val"+postfix)
|
141 |
-
|
142 |
-
discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
|
143 |
-
last_layer=self.get_last_layer(), split="val"+postfix)
|
144 |
-
|
145 |
-
self.log(f"val{postfix}/rec_loss", log_dict_ae[f"val{postfix}/rec_loss"])
|
146 |
-
self.log_dict(log_dict_ae)
|
147 |
-
self.log_dict(log_dict_disc)
|
148 |
-
return self.log_dict
|
149 |
-
|
150 |
-
def configure_optimizers(self):
|
151 |
-
lr = self.learning_rate
|
152 |
-
ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(
|
153 |
-
self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())
|
154 |
-
if self.learn_logvar:
|
155 |
-
print(f"{self.__class__.__name__}: Learning logvar")
|
156 |
-
ae_params_list.append(self.loss.logvar)
|
157 |
-
opt_ae = torch.optim.Adam(ae_params_list,
|
158 |
-
lr=lr, betas=(0.5, 0.9))
|
159 |
-
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
|
160 |
-
lr=lr, betas=(0.5, 0.9))
|
161 |
-
return [opt_ae, opt_disc], []
|
162 |
-
|
163 |
-
def get_last_layer(self):
|
164 |
-
return self.decoder.conv_out.weight
|
165 |
-
|
166 |
-
@torch.no_grad()
|
167 |
-
def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):
|
168 |
-
log = dict()
|
169 |
-
x = self.get_input(batch, self.image_key)
|
170 |
-
x = x.to(self.device)
|
171 |
-
if not only_inputs:
|
172 |
-
xrec, posterior = self(x)
|
173 |
-
if x.shape[1] > 3:
|
174 |
-
# colorize with random projection
|
175 |
-
assert xrec.shape[1] > 3
|
176 |
-
x = self.to_rgb(x)
|
177 |
-
xrec = self.to_rgb(xrec)
|
178 |
-
log["samples"] = self.decode(torch.randn_like(posterior.sample()))
|
179 |
-
log["reconstructions"] = xrec
|
180 |
-
if log_ema or self.use_ema:
|
181 |
-
with self.ema_scope():
|
182 |
-
xrec_ema, posterior_ema = self(x)
|
183 |
-
if x.shape[1] > 3:
|
184 |
-
# colorize with random projection
|
185 |
-
assert xrec_ema.shape[1] > 3
|
186 |
-
xrec_ema = self.to_rgb(xrec_ema)
|
187 |
-
log["samples_ema"] = self.decode(torch.randn_like(posterior_ema.sample()))
|
188 |
-
log["reconstructions_ema"] = xrec_ema
|
189 |
-
log["inputs"] = x
|
190 |
-
return log
|
191 |
-
|
192 |
-
def to_rgb(self, x):
|
193 |
-
assert self.image_key == "segmentation"
|
194 |
-
if not hasattr(self, "colorize"):
|
195 |
-
self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
|
196 |
-
x = F.conv2d(x, weight=self.colorize)
|
197 |
-
x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
|
198 |
-
return x
|
199 |
-
|
200 |
-
|
201 |
-
class IdentityFirstStage(torch.nn.Module):
|
202 |
-
def __init__(self, *args, vq_interface=False, **kwargs):
|
203 |
-
self.vq_interface = vq_interface
|
204 |
-
super().__init__()
|
205 |
-
|
206 |
-
def encode(self, x, *args, **kwargs):
|
207 |
-
return x
|
208 |
-
|
209 |
-
def decode(self, x, *args, **kwargs):
|
210 |
-
return x
|
211 |
-
|
212 |
-
def quantize(self, x, *args, **kwargs):
|
213 |
-
if self.vq_interface:
|
214 |
-
return x, None, [None, None, None]
|
215 |
-
return x
|
216 |
-
|
217 |
-
def forward(self, x, *args, **kwargs):
|
218 |
-
return x
|
219 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/models/diffusion/__init__.py
DELETED
File without changes
|
ldm/models/diffusion/ddim.py
DELETED
@@ -1,336 +0,0 @@
|
|
1 |
-
"""SAMPLING ONLY."""
|
2 |
-
|
3 |
-
import torch
|
4 |
-
import numpy as np
|
5 |
-
from tqdm import tqdm
|
6 |
-
|
7 |
-
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor
|
8 |
-
|
9 |
-
|
10 |
-
class DDIMSampler(object):
|
11 |
-
def __init__(self, model, schedule="linear", **kwargs):
|
12 |
-
super().__init__()
|
13 |
-
self.model = model
|
14 |
-
self.ddpm_num_timesteps = model.num_timesteps
|
15 |
-
self.schedule = schedule
|
16 |
-
|
17 |
-
def register_buffer(self, name, attr):
|
18 |
-
if type(attr) == torch.Tensor:
|
19 |
-
if attr.device != torch.device("cuda"):
|
20 |
-
attr = attr.to(torch.device("cuda"))
|
21 |
-
setattr(self, name, attr)
|
22 |
-
|
23 |
-
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
|
24 |
-
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
|
25 |
-
num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
|
26 |
-
alphas_cumprod = self.model.alphas_cumprod
|
27 |
-
assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
|
28 |
-
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
|
29 |
-
|
30 |
-
self.register_buffer('betas', to_torch(self.model.betas))
|
31 |
-
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
32 |
-
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
|
33 |
-
|
34 |
-
# calculations for diffusion q(x_t | x_{t-1}) and others
|
35 |
-
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
|
36 |
-
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
|
37 |
-
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
|
38 |
-
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
|
39 |
-
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
|
40 |
-
|
41 |
-
# ddim sampling parameters
|
42 |
-
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
|
43 |
-
ddim_timesteps=self.ddim_timesteps,
|
44 |
-
eta=ddim_eta,verbose=verbose)
|
45 |
-
self.register_buffer('ddim_sigmas', ddim_sigmas)
|
46 |
-
self.register_buffer('ddim_alphas', ddim_alphas)
|
47 |
-
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
|
48 |
-
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
|
49 |
-
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
|
50 |
-
(1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
|
51 |
-
1 - self.alphas_cumprod / self.alphas_cumprod_prev))
|
52 |
-
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
|
53 |
-
|
54 |
-
@torch.no_grad()
|
55 |
-
def sample(self,
|
56 |
-
S,
|
57 |
-
batch_size,
|
58 |
-
shape,
|
59 |
-
conditioning=None,
|
60 |
-
callback=None,
|
61 |
-
normals_sequence=None,
|
62 |
-
img_callback=None,
|
63 |
-
quantize_x0=False,
|
64 |
-
eta=0.,
|
65 |
-
mask=None,
|
66 |
-
x0=None,
|
67 |
-
temperature=1.,
|
68 |
-
noise_dropout=0.,
|
69 |
-
score_corrector=None,
|
70 |
-
corrector_kwargs=None,
|
71 |
-
verbose=True,
|
72 |
-
x_T=None,
|
73 |
-
log_every_t=100,
|
74 |
-
unconditional_guidance_scale=1.,
|
75 |
-
unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
|
76 |
-
dynamic_threshold=None,
|
77 |
-
ucg_schedule=None,
|
78 |
-
**kwargs
|
79 |
-
):
|
80 |
-
if conditioning is not None:
|
81 |
-
if isinstance(conditioning, dict):
|
82 |
-
ctmp = conditioning[list(conditioning.keys())[0]]
|
83 |
-
while isinstance(ctmp, list): ctmp = ctmp[0]
|
84 |
-
cbs = ctmp.shape[0]
|
85 |
-
if cbs != batch_size:
|
86 |
-
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
|
87 |
-
|
88 |
-
elif isinstance(conditioning, list):
|
89 |
-
for ctmp in conditioning:
|
90 |
-
if ctmp.shape[0] != batch_size:
|
91 |
-
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
|
92 |
-
|
93 |
-
else:
|
94 |
-
if conditioning.shape[0] != batch_size:
|
95 |
-
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
|
96 |
-
|
97 |
-
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
|
98 |
-
# sampling
|
99 |
-
C, H, W = shape
|
100 |
-
size = (batch_size, C, H, W)
|
101 |
-
print(f'Data shape for DDIM sampling is {size}, eta {eta}')
|
102 |
-
|
103 |
-
samples, intermediates = self.ddim_sampling(conditioning, size,
|
104 |
-
callback=callback,
|
105 |
-
img_callback=img_callback,
|
106 |
-
quantize_denoised=quantize_x0,
|
107 |
-
mask=mask, x0=x0,
|
108 |
-
ddim_use_original_steps=False,
|
109 |
-
noise_dropout=noise_dropout,
|
110 |
-
temperature=temperature,
|
111 |
-
score_corrector=score_corrector,
|
112 |
-
corrector_kwargs=corrector_kwargs,
|
113 |
-
x_T=x_T,
|
114 |
-
log_every_t=log_every_t,
|
115 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
116 |
-
unconditional_conditioning=unconditional_conditioning,
|
117 |
-
dynamic_threshold=dynamic_threshold,
|
118 |
-
ucg_schedule=ucg_schedule
|
119 |
-
)
|
120 |
-
return samples, intermediates
|
121 |
-
|
122 |
-
@torch.no_grad()
|
123 |
-
def ddim_sampling(self, cond, shape,
|
124 |
-
x_T=None, ddim_use_original_steps=False,
|
125 |
-
callback=None, timesteps=None, quantize_denoised=False,
|
126 |
-
mask=None, x0=None, img_callback=None, log_every_t=100,
|
127 |
-
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
|
128 |
-
unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,
|
129 |
-
ucg_schedule=None):
|
130 |
-
device = self.model.betas.device
|
131 |
-
b = shape[0]
|
132 |
-
if x_T is None:
|
133 |
-
img = torch.randn(shape, device=device)
|
134 |
-
else:
|
135 |
-
img = x_T
|
136 |
-
|
137 |
-
if timesteps is None:
|
138 |
-
timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
|
139 |
-
elif timesteps is not None and not ddim_use_original_steps:
|
140 |
-
subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
|
141 |
-
timesteps = self.ddim_timesteps[:subset_end]
|
142 |
-
|
143 |
-
intermediates = {'x_inter': [img], 'pred_x0': [img]}
|
144 |
-
time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)
|
145 |
-
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
|
146 |
-
print(f"Running DDIM Sampling with {total_steps} timesteps")
|
147 |
-
|
148 |
-
iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
|
149 |
-
|
150 |
-
for i, step in enumerate(iterator):
|
151 |
-
index = total_steps - i - 1
|
152 |
-
ts = torch.full((b,), step, device=device, dtype=torch.long)
|
153 |
-
|
154 |
-
if mask is not None:
|
155 |
-
assert x0 is not None
|
156 |
-
img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
|
157 |
-
img = img_orig * mask + (1. - mask) * img
|
158 |
-
|
159 |
-
if ucg_schedule is not None:
|
160 |
-
assert len(ucg_schedule) == len(time_range)
|
161 |
-
unconditional_guidance_scale = ucg_schedule[i]
|
162 |
-
|
163 |
-
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
|
164 |
-
quantize_denoised=quantize_denoised, temperature=temperature,
|
165 |
-
noise_dropout=noise_dropout, score_corrector=score_corrector,
|
166 |
-
corrector_kwargs=corrector_kwargs,
|
167 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
168 |
-
unconditional_conditioning=unconditional_conditioning,
|
169 |
-
dynamic_threshold=dynamic_threshold)
|
170 |
-
img, pred_x0 = outs
|
171 |
-
if callback: callback(i)
|
172 |
-
if img_callback: img_callback(pred_x0, i)
|
173 |
-
|
174 |
-
if index % log_every_t == 0 or index == total_steps - 1:
|
175 |
-
intermediates['x_inter'].append(img)
|
176 |
-
intermediates['pred_x0'].append(pred_x0)
|
177 |
-
|
178 |
-
return img, intermediates
|
179 |
-
|
180 |
-
@torch.no_grad()
|
181 |
-
def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
182 |
-
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
|
183 |
-
unconditional_guidance_scale=1., unconditional_conditioning=None,
|
184 |
-
dynamic_threshold=None):
|
185 |
-
b, *_, device = *x.shape, x.device
|
186 |
-
|
187 |
-
if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
|
188 |
-
model_output = self.model.apply_model(x, t, c)
|
189 |
-
else:
|
190 |
-
x_in = torch.cat([x] * 2)
|
191 |
-
t_in = torch.cat([t] * 2)
|
192 |
-
if isinstance(c, dict):
|
193 |
-
assert isinstance(unconditional_conditioning, dict)
|
194 |
-
c_in = dict()
|
195 |
-
for k in c:
|
196 |
-
if isinstance(c[k], list):
|
197 |
-
c_in[k] = [torch.cat([
|
198 |
-
unconditional_conditioning[k][i],
|
199 |
-
c[k][i]]) for i in range(len(c[k]))]
|
200 |
-
else:
|
201 |
-
c_in[k] = torch.cat([
|
202 |
-
unconditional_conditioning[k],
|
203 |
-
c[k]])
|
204 |
-
elif isinstance(c, list):
|
205 |
-
c_in = list()
|
206 |
-
assert isinstance(unconditional_conditioning, list)
|
207 |
-
for i in range(len(c)):
|
208 |
-
c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))
|
209 |
-
else:
|
210 |
-
c_in = torch.cat([unconditional_conditioning, c])
|
211 |
-
model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
|
212 |
-
model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)
|
213 |
-
|
214 |
-
if self.model.parameterization == "v":
|
215 |
-
e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)
|
216 |
-
else:
|
217 |
-
e_t = model_output
|
218 |
-
|
219 |
-
if score_corrector is not None:
|
220 |
-
assert self.model.parameterization == "eps", 'not implemented'
|
221 |
-
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
|
222 |
-
|
223 |
-
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
|
224 |
-
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
|
225 |
-
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
|
226 |
-
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
|
227 |
-
# select parameters corresponding to the currently considered timestep
|
228 |
-
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
|
229 |
-
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
|
230 |
-
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
|
231 |
-
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
|
232 |
-
|
233 |
-
# current prediction for x_0
|
234 |
-
if self.model.parameterization != "v":
|
235 |
-
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
|
236 |
-
else:
|
237 |
-
pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)
|
238 |
-
|
239 |
-
if quantize_denoised:
|
240 |
-
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
|
241 |
-
|
242 |
-
if dynamic_threshold is not None:
|
243 |
-
raise NotImplementedError()
|
244 |
-
|
245 |
-
# direction pointing to x_t
|
246 |
-
dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
|
247 |
-
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
|
248 |
-
if noise_dropout > 0.:
|
249 |
-
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
|
250 |
-
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
|
251 |
-
return x_prev, pred_x0
|
252 |
-
|
253 |
-
@torch.no_grad()
|
254 |
-
def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,
|
255 |
-
unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):
|
256 |
-
num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]
|
257 |
-
|
258 |
-
assert t_enc <= num_reference_steps
|
259 |
-
num_steps = t_enc
|
260 |
-
|
261 |
-
if use_original_steps:
|
262 |
-
alphas_next = self.alphas_cumprod[:num_steps]
|
263 |
-
alphas = self.alphas_cumprod_prev[:num_steps]
|
264 |
-
else:
|
265 |
-
alphas_next = self.ddim_alphas[:num_steps]
|
266 |
-
alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])
|
267 |
-
|
268 |
-
x_next = x0
|
269 |
-
intermediates = []
|
270 |
-
inter_steps = []
|
271 |
-
for i in tqdm(range(num_steps), desc='Encoding Image'):
|
272 |
-
t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)
|
273 |
-
if unconditional_guidance_scale == 1.:
|
274 |
-
noise_pred = self.model.apply_model(x_next, t, c)
|
275 |
-
else:
|
276 |
-
assert unconditional_conditioning is not None
|
277 |
-
e_t_uncond, noise_pred = torch.chunk(
|
278 |
-
self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),
|
279 |
-
torch.cat((unconditional_conditioning, c))), 2)
|
280 |
-
noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)
|
281 |
-
|
282 |
-
xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next
|
283 |
-
weighted_noise_pred = alphas_next[i].sqrt() * (
|
284 |
-
(1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred
|
285 |
-
x_next = xt_weighted + weighted_noise_pred
|
286 |
-
if return_intermediates and i % (
|
287 |
-
num_steps // return_intermediates) == 0 and i < num_steps - 1:
|
288 |
-
intermediates.append(x_next)
|
289 |
-
inter_steps.append(i)
|
290 |
-
elif return_intermediates and i >= num_steps - 2:
|
291 |
-
intermediates.append(x_next)
|
292 |
-
inter_steps.append(i)
|
293 |
-
if callback: callback(i)
|
294 |
-
|
295 |
-
out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}
|
296 |
-
if return_intermediates:
|
297 |
-
out.update({'intermediates': intermediates})
|
298 |
-
return x_next, out
|
299 |
-
|
300 |
-
@torch.no_grad()
|
301 |
-
def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
|
302 |
-
# fast, but does not allow for exact reconstruction
|
303 |
-
# t serves as an index to gather the correct alphas
|
304 |
-
if use_original_steps:
|
305 |
-
sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
|
306 |
-
sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
|
307 |
-
else:
|
308 |
-
sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
|
309 |
-
sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
|
310 |
-
|
311 |
-
if noise is None:
|
312 |
-
noise = torch.randn_like(x0)
|
313 |
-
return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +
|
314 |
-
extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)
|
315 |
-
|
316 |
-
@torch.no_grad()
|
317 |
-
def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
|
318 |
-
use_original_steps=False, callback=None):
|
319 |
-
|
320 |
-
timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps
|
321 |
-
timesteps = timesteps[:t_start]
|
322 |
-
|
323 |
-
time_range = np.flip(timesteps)
|
324 |
-
total_steps = timesteps.shape[0]
|
325 |
-
print(f"Running DDIM Sampling with {total_steps} timesteps")
|
326 |
-
|
327 |
-
iterator = tqdm(time_range, desc='Decoding image', total=total_steps)
|
328 |
-
x_dec = x_latent
|
329 |
-
for i, step in enumerate(iterator):
|
330 |
-
index = total_steps - i - 1
|
331 |
-
ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)
|
332 |
-
x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,
|
333 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
334 |
-
unconditional_conditioning=unconditional_conditioning)
|
335 |
-
if callback: callback(i)
|
336 |
-
return x_dec
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/models/diffusion/ddpm.py
DELETED
@@ -1,1796 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
wild mixture of
|
3 |
-
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
|
4 |
-
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
|
5 |
-
https://github.com/CompVis/taming-transformers
|
6 |
-
-- merci
|
7 |
-
"""
|
8 |
-
|
9 |
-
import torch
|
10 |
-
import torch.nn as nn
|
11 |
-
import numpy as np
|
12 |
-
import pytorch_lightning as pl
|
13 |
-
from torch.optim.lr_scheduler import LambdaLR
|
14 |
-
from einops import rearrange, repeat
|
15 |
-
from contextlib import contextmanager, nullcontext
|
16 |
-
from functools import partial
|
17 |
-
import itertools
|
18 |
-
from tqdm import tqdm
|
19 |
-
from torchvision.utils import make_grid
|
20 |
-
from pytorch_lightning.utilities.distributed import rank_zero_only
|
21 |
-
from omegaconf import ListConfig
|
22 |
-
|
23 |
-
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
|
24 |
-
from ldm.modules.ema import LitEma
|
25 |
-
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
|
26 |
-
from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
|
27 |
-
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
|
28 |
-
from ldm.models.diffusion.ddim import DDIMSampler
|
29 |
-
|
30 |
-
|
31 |
-
__conditioning_keys__ = {'concat': 'c_concat',
|
32 |
-
'crossattn': 'c_crossattn',
|
33 |
-
'adm': 'y'}
|
34 |
-
|
35 |
-
|
36 |
-
def disabled_train(self, mode=True):
|
37 |
-
"""Overwrite model.train with this function to make sure train/eval mode
|
38 |
-
does not change anymore."""
|
39 |
-
return self
|
40 |
-
|
41 |
-
|
42 |
-
def uniform_on_device(r1, r2, shape, device):
|
43 |
-
return (r1 - r2) * torch.rand(*shape, device=device) + r2
|
44 |
-
|
45 |
-
|
46 |
-
class DDPM(pl.LightningModule):
|
47 |
-
# classic DDPM with Gaussian diffusion, in image space
|
48 |
-
def __init__(self,
|
49 |
-
unet_config,
|
50 |
-
timesteps=1000,
|
51 |
-
beta_schedule="linear",
|
52 |
-
loss_type="l2",
|
53 |
-
ckpt_path=None,
|
54 |
-
ignore_keys=[],
|
55 |
-
load_only_unet=False,
|
56 |
-
monitor="val/loss",
|
57 |
-
use_ema=True,
|
58 |
-
first_stage_key="image",
|
59 |
-
image_size=256,
|
60 |
-
channels=3,
|
61 |
-
log_every_t=100,
|
62 |
-
clip_denoised=True,
|
63 |
-
linear_start=1e-4,
|
64 |
-
linear_end=2e-2,
|
65 |
-
cosine_s=8e-3,
|
66 |
-
given_betas=None,
|
67 |
-
original_elbo_weight=0.,
|
68 |
-
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
|
69 |
-
l_simple_weight=1.,
|
70 |
-
conditioning_key=None,
|
71 |
-
parameterization="eps", # all assuming fixed variance schedules
|
72 |
-
scheduler_config=None,
|
73 |
-
use_positional_encodings=False,
|
74 |
-
learn_logvar=False,
|
75 |
-
logvar_init=0.,
|
76 |
-
make_it_fit=False,
|
77 |
-
ucg_training=None,
|
78 |
-
reset_ema=False,
|
79 |
-
reset_num_ema_updates=False,
|
80 |
-
):
|
81 |
-
super().__init__()
|
82 |
-
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
|
83 |
-
self.parameterization = parameterization
|
84 |
-
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
|
85 |
-
self.cond_stage_model = None
|
86 |
-
self.clip_denoised = clip_denoised
|
87 |
-
self.log_every_t = log_every_t
|
88 |
-
self.first_stage_key = first_stage_key
|
89 |
-
self.image_size = image_size # try conv?
|
90 |
-
self.channels = channels
|
91 |
-
self.use_positional_encodings = use_positional_encodings
|
92 |
-
self.model = DiffusionWrapper(unet_config, conditioning_key)
|
93 |
-
count_params(self.model, verbose=True)
|
94 |
-
self.use_ema = use_ema
|
95 |
-
if self.use_ema:
|
96 |
-
self.model_ema = LitEma(self.model)
|
97 |
-
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
98 |
-
|
99 |
-
self.use_scheduler = scheduler_config is not None
|
100 |
-
if self.use_scheduler:
|
101 |
-
self.scheduler_config = scheduler_config
|
102 |
-
|
103 |
-
self.v_posterior = v_posterior
|
104 |
-
self.original_elbo_weight = original_elbo_weight
|
105 |
-
self.l_simple_weight = l_simple_weight
|
106 |
-
|
107 |
-
if monitor is not None:
|
108 |
-
self.monitor = monitor
|
109 |
-
self.make_it_fit = make_it_fit
|
110 |
-
if reset_ema: assert exists(ckpt_path)
|
111 |
-
if ckpt_path is not None:
|
112 |
-
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
|
113 |
-
if reset_ema:
|
114 |
-
assert self.use_ema
|
115 |
-
print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
|
116 |
-
self.model_ema = LitEma(self.model)
|
117 |
-
if reset_num_ema_updates:
|
118 |
-
print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
|
119 |
-
assert self.use_ema
|
120 |
-
self.model_ema.reset_num_updates()
|
121 |
-
|
122 |
-
self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
|
123 |
-
linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
|
124 |
-
|
125 |
-
self.loss_type = loss_type
|
126 |
-
|
127 |
-
self.learn_logvar = learn_logvar
|
128 |
-
self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
|
129 |
-
if self.learn_logvar:
|
130 |
-
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
|
131 |
-
|
132 |
-
self.ucg_training = ucg_training or dict()
|
133 |
-
if self.ucg_training:
|
134 |
-
self.ucg_prng = np.random.RandomState()
|
135 |
-
|
136 |
-
def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
|
137 |
-
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
138 |
-
if exists(given_betas):
|
139 |
-
betas = given_betas
|
140 |
-
else:
|
141 |
-
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
|
142 |
-
cosine_s=cosine_s)
|
143 |
-
alphas = 1. - betas
|
144 |
-
alphas_cumprod = np.cumprod(alphas, axis=0)
|
145 |
-
alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
|
146 |
-
|
147 |
-
timesteps, = betas.shape
|
148 |
-
self.num_timesteps = int(timesteps)
|
149 |
-
self.linear_start = linear_start
|
150 |
-
self.linear_end = linear_end
|
151 |
-
assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
|
152 |
-
|
153 |
-
to_torch = partial(torch.tensor, dtype=torch.float32)
|
154 |
-
|
155 |
-
self.register_buffer('betas', to_torch(betas))
|
156 |
-
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
157 |
-
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
|
158 |
-
|
159 |
-
# calculations for diffusion q(x_t | x_{t-1}) and others
|
160 |
-
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
|
161 |
-
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
|
162 |
-
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
|
163 |
-
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
|
164 |
-
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
|
165 |
-
|
166 |
-
# calculations for posterior q(x_{t-1} | x_t, x_0)
|
167 |
-
posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
|
168 |
-
1. - alphas_cumprod) + self.v_posterior * betas
|
169 |
-
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
|
170 |
-
self.register_buffer('posterior_variance', to_torch(posterior_variance))
|
171 |
-
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
|
172 |
-
self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
|
173 |
-
self.register_buffer('posterior_mean_coef1', to_torch(
|
174 |
-
betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
|
175 |
-
self.register_buffer('posterior_mean_coef2', to_torch(
|
176 |
-
(1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
|
177 |
-
|
178 |
-
if self.parameterization == "eps":
|
179 |
-
lvlb_weights = self.betas ** 2 / (
|
180 |
-
2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
|
181 |
-
elif self.parameterization == "x0":
|
182 |
-
lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
|
183 |
-
elif self.parameterization == "v":
|
184 |
-
lvlb_weights = torch.ones_like(self.betas ** 2 / (
|
185 |
-
2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)))
|
186 |
-
else:
|
187 |
-
raise NotImplementedError("mu not supported")
|
188 |
-
# TODO how to choose this term
|
189 |
-
lvlb_weights[0] = lvlb_weights[1]
|
190 |
-
self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
|
191 |
-
assert not torch.isnan(self.lvlb_weights).all()
|
192 |
-
|
193 |
-
@contextmanager
|
194 |
-
def ema_scope(self, context=None):
|
195 |
-
if self.use_ema:
|
196 |
-
self.model_ema.store(self.model.parameters())
|
197 |
-
self.model_ema.copy_to(self.model)
|
198 |
-
if context is not None:
|
199 |
-
print(f"{context}: Switched to EMA weights")
|
200 |
-
try:
|
201 |
-
yield None
|
202 |
-
finally:
|
203 |
-
if self.use_ema:
|
204 |
-
self.model_ema.restore(self.model.parameters())
|
205 |
-
if context is not None:
|
206 |
-
print(f"{context}: Restored training weights")
|
207 |
-
|
208 |
-
@torch.no_grad()
|
209 |
-
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
|
210 |
-
sd = torch.load(path, map_location="cpu")
|
211 |
-
if "state_dict" in list(sd.keys()):
|
212 |
-
sd = sd["state_dict"]
|
213 |
-
keys = list(sd.keys())
|
214 |
-
for k in keys:
|
215 |
-
for ik in ignore_keys:
|
216 |
-
if k.startswith(ik):
|
217 |
-
print("Deleting key {} from state_dict.".format(k))
|
218 |
-
del sd[k]
|
219 |
-
if self.make_it_fit:
|
220 |
-
n_params = len([name for name, _ in
|
221 |
-
itertools.chain(self.named_parameters(),
|
222 |
-
self.named_buffers())])
|
223 |
-
for name, param in tqdm(
|
224 |
-
itertools.chain(self.named_parameters(),
|
225 |
-
self.named_buffers()),
|
226 |
-
desc="Fitting old weights to new weights",
|
227 |
-
total=n_params
|
228 |
-
):
|
229 |
-
if not name in sd:
|
230 |
-
continue
|
231 |
-
old_shape = sd[name].shape
|
232 |
-
new_shape = param.shape
|
233 |
-
assert len(old_shape) == len(new_shape)
|
234 |
-
if len(new_shape) > 2:
|
235 |
-
# we only modify first two axes
|
236 |
-
assert new_shape[2:] == old_shape[2:]
|
237 |
-
# assumes first axis corresponds to output dim
|
238 |
-
if not new_shape == old_shape:
|
239 |
-
new_param = param.clone()
|
240 |
-
old_param = sd[name]
|
241 |
-
if len(new_shape) == 1:
|
242 |
-
for i in range(new_param.shape[0]):
|
243 |
-
new_param[i] = old_param[i % old_shape[0]]
|
244 |
-
elif len(new_shape) >= 2:
|
245 |
-
for i in range(new_param.shape[0]):
|
246 |
-
for j in range(new_param.shape[1]):
|
247 |
-
new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]]
|
248 |
-
|
249 |
-
n_used_old = torch.ones(old_shape[1])
|
250 |
-
for j in range(new_param.shape[1]):
|
251 |
-
n_used_old[j % old_shape[1]] += 1
|
252 |
-
n_used_new = torch.zeros(new_shape[1])
|
253 |
-
for j in range(new_param.shape[1]):
|
254 |
-
n_used_new[j] = n_used_old[j % old_shape[1]]
|
255 |
-
|
256 |
-
n_used_new = n_used_new[None, :]
|
257 |
-
while len(n_used_new.shape) < len(new_shape):
|
258 |
-
n_used_new = n_used_new.unsqueeze(-1)
|
259 |
-
new_param /= n_used_new
|
260 |
-
|
261 |
-
sd[name] = new_param
|
262 |
-
|
263 |
-
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
|
264 |
-
sd, strict=False)
|
265 |
-
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
|
266 |
-
if len(missing) > 0:
|
267 |
-
print(f"Missing Keys:\n {missing}")
|
268 |
-
if len(unexpected) > 0:
|
269 |
-
print(f"\nUnexpected Keys:\n {unexpected}")
|
270 |
-
|
271 |
-
def q_mean_variance(self, x_start, t):
|
272 |
-
"""
|
273 |
-
Get the distribution q(x_t | x_0).
|
274 |
-
:param x_start: the [N x C x ...] tensor of noiseless inputs.
|
275 |
-
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
|
276 |
-
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
|
277 |
-
"""
|
278 |
-
mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
|
279 |
-
variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
|
280 |
-
log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
|
281 |
-
return mean, variance, log_variance
|
282 |
-
|
283 |
-
def predict_start_from_noise(self, x_t, t, noise):
|
284 |
-
return (
|
285 |
-
extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
|
286 |
-
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
|
287 |
-
)
|
288 |
-
|
289 |
-
def predict_start_from_z_and_v(self, x_t, t, v):
|
290 |
-
# self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
|
291 |
-
# self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
|
292 |
-
return (
|
293 |
-
extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t -
|
294 |
-
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v
|
295 |
-
)
|
296 |
-
|
297 |
-
def predict_eps_from_z_and_v(self, x_t, t, v):
|
298 |
-
return (
|
299 |
-
extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v +
|
300 |
-
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t
|
301 |
-
)
|
302 |
-
|
303 |
-
def q_posterior(self, x_start, x_t, t):
|
304 |
-
posterior_mean = (
|
305 |
-
extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
|
306 |
-
extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
|
307 |
-
)
|
308 |
-
posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
|
309 |
-
posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
|
310 |
-
return posterior_mean, posterior_variance, posterior_log_variance_clipped
|
311 |
-
|
312 |
-
def p_mean_variance(self, x, t, clip_denoised: bool):
|
313 |
-
model_out = self.model(x, t)
|
314 |
-
if self.parameterization == "eps":
|
315 |
-
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
|
316 |
-
elif self.parameterization == "x0":
|
317 |
-
x_recon = model_out
|
318 |
-
if clip_denoised:
|
319 |
-
x_recon.clamp_(-1., 1.)
|
320 |
-
|
321 |
-
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
|
322 |
-
return model_mean, posterior_variance, posterior_log_variance
|
323 |
-
|
324 |
-
@torch.no_grad()
|
325 |
-
def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
|
326 |
-
b, *_, device = *x.shape, x.device
|
327 |
-
model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
|
328 |
-
noise = noise_like(x.shape, device, repeat_noise)
|
329 |
-
# no noise when t == 0
|
330 |
-
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
|
331 |
-
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
332 |
-
|
333 |
-
@torch.no_grad()
|
334 |
-
def p_sample_loop(self, shape, return_intermediates=False):
|
335 |
-
device = self.betas.device
|
336 |
-
b = shape[0]
|
337 |
-
img = torch.randn(shape, device=device)
|
338 |
-
intermediates = [img]
|
339 |
-
for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
|
340 |
-
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
|
341 |
-
clip_denoised=self.clip_denoised)
|
342 |
-
if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
|
343 |
-
intermediates.append(img)
|
344 |
-
if return_intermediates:
|
345 |
-
return img, intermediates
|
346 |
-
return img
|
347 |
-
|
348 |
-
@torch.no_grad()
|
349 |
-
def sample(self, batch_size=16, return_intermediates=False):
|
350 |
-
image_size = self.image_size
|
351 |
-
channels = self.channels
|
352 |
-
return self.p_sample_loop((batch_size, channels, image_size, image_size),
|
353 |
-
return_intermediates=return_intermediates)
|
354 |
-
|
355 |
-
def q_sample(self, x_start, t, noise=None):
|
356 |
-
noise = default(noise, lambda: torch.randn_like(x_start))
|
357 |
-
return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
|
358 |
-
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
|
359 |
-
|
360 |
-
def get_v(self, x, noise, t):
|
361 |
-
return (
|
362 |
-
extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise -
|
363 |
-
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x
|
364 |
-
)
|
365 |
-
|
366 |
-
def get_loss(self, pred, target, mean=True):
|
367 |
-
if self.loss_type == 'l1':
|
368 |
-
loss = (target - pred).abs()
|
369 |
-
if mean:
|
370 |
-
loss = loss.mean()
|
371 |
-
elif self.loss_type == 'l2':
|
372 |
-
if mean:
|
373 |
-
loss = torch.nn.functional.mse_loss(target, pred)
|
374 |
-
else:
|
375 |
-
loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
|
376 |
-
else:
|
377 |
-
raise NotImplementedError("unknown loss type '{loss_type}'")
|
378 |
-
|
379 |
-
return loss
|
380 |
-
|
381 |
-
def p_losses(self, x_start, t, noise=None):
|
382 |
-
noise = default(noise, lambda: torch.randn_like(x_start))
|
383 |
-
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
384 |
-
model_out = self.model(x_noisy, t)
|
385 |
-
|
386 |
-
loss_dict = {}
|
387 |
-
if self.parameterization == "eps":
|
388 |
-
target = noise
|
389 |
-
elif self.parameterization == "x0":
|
390 |
-
target = x_start
|
391 |
-
elif self.parameterization == "v":
|
392 |
-
target = self.get_v(x_start, noise, t)
|
393 |
-
else:
|
394 |
-
raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
|
395 |
-
|
396 |
-
loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
|
397 |
-
|
398 |
-
log_prefix = 'train' if self.training else 'val'
|
399 |
-
|
400 |
-
loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
|
401 |
-
loss_simple = loss.mean() * self.l_simple_weight
|
402 |
-
|
403 |
-
loss_vlb = (self.lvlb_weights[t] * loss).mean()
|
404 |
-
loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
|
405 |
-
|
406 |
-
loss = loss_simple + self.original_elbo_weight * loss_vlb
|
407 |
-
|
408 |
-
loss_dict.update({f'{log_prefix}/loss': loss})
|
409 |
-
|
410 |
-
return loss, loss_dict
|
411 |
-
|
412 |
-
def forward(self, x, *args, **kwargs):
|
413 |
-
# b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
|
414 |
-
# assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
|
415 |
-
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
|
416 |
-
return self.p_losses(x, t, *args, **kwargs)
|
417 |
-
|
418 |
-
def get_input(self, batch, k):
|
419 |
-
x = batch[k]
|
420 |
-
if len(x.shape) == 3:
|
421 |
-
x = x[..., None]
|
422 |
-
x = rearrange(x, 'b h w c -> b c h w')
|
423 |
-
x = x.to(memory_format=torch.contiguous_format).float()
|
424 |
-
return x
|
425 |
-
|
426 |
-
def shared_step(self, batch):
|
427 |
-
x = self.get_input(batch, self.first_stage_key)
|
428 |
-
loss, loss_dict = self(x)
|
429 |
-
return loss, loss_dict
|
430 |
-
|
431 |
-
def training_step(self, batch, batch_idx):
|
432 |
-
for k in self.ucg_training:
|
433 |
-
p = self.ucg_training[k]["p"]
|
434 |
-
val = self.ucg_training[k]["val"]
|
435 |
-
if val is None:
|
436 |
-
val = ""
|
437 |
-
for i in range(len(batch[k])):
|
438 |
-
if self.ucg_prng.choice(2, p=[1 - p, p]):
|
439 |
-
batch[k][i] = val
|
440 |
-
|
441 |
-
loss, loss_dict = self.shared_step(batch)
|
442 |
-
|
443 |
-
self.log_dict(loss_dict, prog_bar=True,
|
444 |
-
logger=True, on_step=True, on_epoch=True)
|
445 |
-
|
446 |
-
self.log("global_step", self.global_step,
|
447 |
-
prog_bar=True, logger=True, on_step=True, on_epoch=False)
|
448 |
-
|
449 |
-
if self.use_scheduler:
|
450 |
-
lr = self.optimizers().param_groups[0]['lr']
|
451 |
-
self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
|
452 |
-
|
453 |
-
return loss
|
454 |
-
|
455 |
-
@torch.no_grad()
|
456 |
-
def validation_step(self, batch, batch_idx):
|
457 |
-
_, loss_dict_no_ema = self.shared_step(batch)
|
458 |
-
with self.ema_scope():
|
459 |
-
_, loss_dict_ema = self.shared_step(batch)
|
460 |
-
loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
|
461 |
-
self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
|
462 |
-
self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
|
463 |
-
|
464 |
-
def on_train_batch_end(self, *args, **kwargs):
|
465 |
-
if self.use_ema:
|
466 |
-
self.model_ema(self.model)
|
467 |
-
|
468 |
-
def _get_rows_from_list(self, samples):
|
469 |
-
n_imgs_per_row = len(samples)
|
470 |
-
denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
|
471 |
-
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
|
472 |
-
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
|
473 |
-
return denoise_grid
|
474 |
-
|
475 |
-
@torch.no_grad()
|
476 |
-
def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
|
477 |
-
log = dict()
|
478 |
-
x = self.get_input(batch, self.first_stage_key)
|
479 |
-
N = min(x.shape[0], N)
|
480 |
-
n_row = min(x.shape[0], n_row)
|
481 |
-
x = x.to(self.device)[:N]
|
482 |
-
log["inputs"] = x
|
483 |
-
|
484 |
-
# get diffusion row
|
485 |
-
diffusion_row = list()
|
486 |
-
x_start = x[:n_row]
|
487 |
-
|
488 |
-
for t in range(self.num_timesteps):
|
489 |
-
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
490 |
-
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
491 |
-
t = t.to(self.device).long()
|
492 |
-
noise = torch.randn_like(x_start)
|
493 |
-
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
494 |
-
diffusion_row.append(x_noisy)
|
495 |
-
|
496 |
-
log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
|
497 |
-
|
498 |
-
if sample:
|
499 |
-
# get denoise row
|
500 |
-
with self.ema_scope("Plotting"):
|
501 |
-
samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
|
502 |
-
|
503 |
-
log["samples"] = samples
|
504 |
-
log["denoise_row"] = self._get_rows_from_list(denoise_row)
|
505 |
-
|
506 |
-
if return_keys:
|
507 |
-
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
|
508 |
-
return log
|
509 |
-
else:
|
510 |
-
return {key: log[key] for key in return_keys}
|
511 |
-
return log
|
512 |
-
|
513 |
-
def configure_optimizers(self):
|
514 |
-
lr = self.learning_rate
|
515 |
-
params = list(self.model.parameters())
|
516 |
-
if self.learn_logvar:
|
517 |
-
params = params + [self.logvar]
|
518 |
-
opt = torch.optim.AdamW(params, lr=lr)
|
519 |
-
return opt
|
520 |
-
|
521 |
-
|
522 |
-
class LatentDiffusion(DDPM):
|
523 |
-
"""main class"""
|
524 |
-
|
525 |
-
def __init__(self,
|
526 |
-
first_stage_config,
|
527 |
-
cond_stage_config,
|
528 |
-
num_timesteps_cond=None,
|
529 |
-
cond_stage_key="image",
|
530 |
-
cond_stage_trainable=False,
|
531 |
-
concat_mode=True,
|
532 |
-
cond_stage_forward=None,
|
533 |
-
conditioning_key=None,
|
534 |
-
scale_factor=1.0,
|
535 |
-
scale_by_std=False,
|
536 |
-
force_null_conditioning=False,
|
537 |
-
*args, **kwargs):
|
538 |
-
self.force_null_conditioning = force_null_conditioning
|
539 |
-
self.num_timesteps_cond = default(num_timesteps_cond, 1)
|
540 |
-
self.scale_by_std = scale_by_std
|
541 |
-
assert self.num_timesteps_cond <= kwargs['timesteps']
|
542 |
-
# for backwards compatibility after implementation of DiffusionWrapper
|
543 |
-
if conditioning_key is None:
|
544 |
-
conditioning_key = 'concat' if concat_mode else 'crossattn'
|
545 |
-
if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning:
|
546 |
-
conditioning_key = None
|
547 |
-
ckpt_path = kwargs.pop("ckpt_path", None)
|
548 |
-
reset_ema = kwargs.pop("reset_ema", False)
|
549 |
-
reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False)
|
550 |
-
ignore_keys = kwargs.pop("ignore_keys", [])
|
551 |
-
super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
|
552 |
-
self.concat_mode = concat_mode
|
553 |
-
self.cond_stage_trainable = cond_stage_trainable
|
554 |
-
self.cond_stage_key = cond_stage_key
|
555 |
-
try:
|
556 |
-
self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
|
557 |
-
except:
|
558 |
-
self.num_downs = 0
|
559 |
-
if not scale_by_std:
|
560 |
-
self.scale_factor = scale_factor
|
561 |
-
else:
|
562 |
-
self.register_buffer('scale_factor', torch.tensor(scale_factor))
|
563 |
-
self.instantiate_first_stage(first_stage_config)
|
564 |
-
self.instantiate_cond_stage(cond_stage_config)
|
565 |
-
self.cond_stage_forward = cond_stage_forward
|
566 |
-
self.clip_denoised = False
|
567 |
-
self.bbox_tokenizer = None
|
568 |
-
|
569 |
-
self.restarted_from_ckpt = False
|
570 |
-
if ckpt_path is not None:
|
571 |
-
self.init_from_ckpt(ckpt_path, ignore_keys)
|
572 |
-
self.restarted_from_ckpt = True
|
573 |
-
if reset_ema:
|
574 |
-
assert self.use_ema
|
575 |
-
print(
|
576 |
-
f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
|
577 |
-
self.model_ema = LitEma(self.model)
|
578 |
-
if reset_num_ema_updates:
|
579 |
-
print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
|
580 |
-
assert self.use_ema
|
581 |
-
self.model_ema.reset_num_updates()
|
582 |
-
|
583 |
-
def make_cond_schedule(self, ):
|
584 |
-
self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
|
585 |
-
ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
|
586 |
-
self.cond_ids[:self.num_timesteps_cond] = ids
|
587 |
-
|
588 |
-
@rank_zero_only
|
589 |
-
@torch.no_grad()
|
590 |
-
def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
|
591 |
-
# only for very first batch
|
592 |
-
if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
|
593 |
-
assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
|
594 |
-
# set rescale weight to 1./std of encodings
|
595 |
-
print("### USING STD-RESCALING ###")
|
596 |
-
x = super().get_input(batch, self.first_stage_key)
|
597 |
-
x = x.to(self.device)
|
598 |
-
encoder_posterior = self.encode_first_stage(x)
|
599 |
-
z = self.get_first_stage_encoding(encoder_posterior).detach()
|
600 |
-
del self.scale_factor
|
601 |
-
self.register_buffer('scale_factor', 1. / z.flatten().std())
|
602 |
-
print(f"setting self.scale_factor to {self.scale_factor}")
|
603 |
-
print("### USING STD-RESCALING ###")
|
604 |
-
|
605 |
-
def register_schedule(self,
|
606 |
-
given_betas=None, beta_schedule="linear", timesteps=1000,
|
607 |
-
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
608 |
-
super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
|
609 |
-
|
610 |
-
self.shorten_cond_schedule = self.num_timesteps_cond > 1
|
611 |
-
if self.shorten_cond_schedule:
|
612 |
-
self.make_cond_schedule()
|
613 |
-
|
614 |
-
def instantiate_first_stage(self, config):
|
615 |
-
model = instantiate_from_config(config)
|
616 |
-
self.first_stage_model = model.eval()
|
617 |
-
self.first_stage_model.train = disabled_train
|
618 |
-
for param in self.first_stage_model.parameters():
|
619 |
-
param.requires_grad = False
|
620 |
-
|
621 |
-
def instantiate_cond_stage(self, config):
|
622 |
-
if not self.cond_stage_trainable:
|
623 |
-
if config == "__is_first_stage__":
|
624 |
-
print("Using first stage also as cond stage.")
|
625 |
-
self.cond_stage_model = self.first_stage_model
|
626 |
-
elif config == "__is_unconditional__":
|
627 |
-
print(f"Training {self.__class__.__name__} as an unconditional model.")
|
628 |
-
self.cond_stage_model = None
|
629 |
-
# self.be_unconditional = True
|
630 |
-
else:
|
631 |
-
model = instantiate_from_config(config)
|
632 |
-
self.cond_stage_model = model.eval()
|
633 |
-
self.cond_stage_model.train = disabled_train
|
634 |
-
for param in self.cond_stage_model.parameters():
|
635 |
-
param.requires_grad = False
|
636 |
-
else:
|
637 |
-
assert config != '__is_first_stage__'
|
638 |
-
assert config != '__is_unconditional__'
|
639 |
-
model = instantiate_from_config(config)
|
640 |
-
self.cond_stage_model = model
|
641 |
-
|
642 |
-
def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
|
643 |
-
denoise_row = []
|
644 |
-
for zd in tqdm(samples, desc=desc):
|
645 |
-
denoise_row.append(self.decode_first_stage(zd.to(self.device),
|
646 |
-
force_not_quantize=force_no_decoder_quantization))
|
647 |
-
n_imgs_per_row = len(denoise_row)
|
648 |
-
denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
|
649 |
-
denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
|
650 |
-
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
|
651 |
-
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
|
652 |
-
return denoise_grid
|
653 |
-
|
654 |
-
def get_first_stage_encoding(self, encoder_posterior):
|
655 |
-
if isinstance(encoder_posterior, DiagonalGaussianDistribution):
|
656 |
-
z = encoder_posterior.sample()
|
657 |
-
elif isinstance(encoder_posterior, torch.Tensor):
|
658 |
-
z = encoder_posterior
|
659 |
-
else:
|
660 |
-
raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
|
661 |
-
return self.scale_factor * z
|
662 |
-
|
663 |
-
def get_learned_conditioning(self, c):
|
664 |
-
if self.cond_stage_forward is None:
|
665 |
-
if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
|
666 |
-
c = self.cond_stage_model.encode(c)
|
667 |
-
if isinstance(c, DiagonalGaussianDistribution):
|
668 |
-
c = c.mode()
|
669 |
-
else:
|
670 |
-
c = self.cond_stage_model(c)
|
671 |
-
else:
|
672 |
-
assert hasattr(self.cond_stage_model, self.cond_stage_forward)
|
673 |
-
c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
|
674 |
-
return c
|
675 |
-
|
676 |
-
def meshgrid(self, h, w):
|
677 |
-
y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
|
678 |
-
x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
|
679 |
-
|
680 |
-
arr = torch.cat([y, x], dim=-1)
|
681 |
-
return arr
|
682 |
-
|
683 |
-
def delta_border(self, h, w):
|
684 |
-
"""
|
685 |
-
:param h: height
|
686 |
-
:param w: width
|
687 |
-
:return: normalized distance to image border,
|
688 |
-
wtith min distance = 0 at border and max dist = 0.5 at image center
|
689 |
-
"""
|
690 |
-
lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
|
691 |
-
arr = self.meshgrid(h, w) / lower_right_corner
|
692 |
-
dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
|
693 |
-
dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
|
694 |
-
edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
|
695 |
-
return edge_dist
|
696 |
-
|
697 |
-
def get_weighting(self, h, w, Ly, Lx, device):
|
698 |
-
weighting = self.delta_border(h, w)
|
699 |
-
weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
|
700 |
-
self.split_input_params["clip_max_weight"], )
|
701 |
-
weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
|
702 |
-
|
703 |
-
if self.split_input_params["tie_braker"]:
|
704 |
-
L_weighting = self.delta_border(Ly, Lx)
|
705 |
-
L_weighting = torch.clip(L_weighting,
|
706 |
-
self.split_input_params["clip_min_tie_weight"],
|
707 |
-
self.split_input_params["clip_max_tie_weight"])
|
708 |
-
|
709 |
-
L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
|
710 |
-
weighting = weighting * L_weighting
|
711 |
-
return weighting
|
712 |
-
|
713 |
-
def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
|
714 |
-
"""
|
715 |
-
:param x: img of size (bs, c, h, w)
|
716 |
-
:return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
|
717 |
-
"""
|
718 |
-
bs, nc, h, w = x.shape
|
719 |
-
|
720 |
-
# number of crops in image
|
721 |
-
Ly = (h - kernel_size[0]) // stride[0] + 1
|
722 |
-
Lx = (w - kernel_size[1]) // stride[1] + 1
|
723 |
-
|
724 |
-
if uf == 1 and df == 1:
|
725 |
-
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
726 |
-
unfold = torch.nn.Unfold(**fold_params)
|
727 |
-
|
728 |
-
fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
|
729 |
-
|
730 |
-
weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
|
731 |
-
normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
|
732 |
-
weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
|
733 |
-
|
734 |
-
elif uf > 1 and df == 1:
|
735 |
-
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
736 |
-
unfold = torch.nn.Unfold(**fold_params)
|
737 |
-
|
738 |
-
fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
|
739 |
-
dilation=1, padding=0,
|
740 |
-
stride=(stride[0] * uf, stride[1] * uf))
|
741 |
-
fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
|
742 |
-
|
743 |
-
weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
|
744 |
-
normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
|
745 |
-
weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
|
746 |
-
|
747 |
-
elif df > 1 and uf == 1:
|
748 |
-
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
749 |
-
unfold = torch.nn.Unfold(**fold_params)
|
750 |
-
|
751 |
-
fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
|
752 |
-
dilation=1, padding=0,
|
753 |
-
stride=(stride[0] // df, stride[1] // df))
|
754 |
-
fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
|
755 |
-
|
756 |
-
weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
|
757 |
-
normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
|
758 |
-
weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
|
759 |
-
|
760 |
-
else:
|
761 |
-
raise NotImplementedError
|
762 |
-
|
763 |
-
return fold, unfold, normalization, weighting
|
764 |
-
|
765 |
-
@torch.no_grad()
|
766 |
-
def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
|
767 |
-
cond_key=None, return_original_cond=False, bs=None, return_x=False):
|
768 |
-
x = super().get_input(batch, k)
|
769 |
-
if bs is not None:
|
770 |
-
x = x[:bs]
|
771 |
-
x = x.to(self.device)
|
772 |
-
encoder_posterior = self.encode_first_stage(x)
|
773 |
-
z = self.get_first_stage_encoding(encoder_posterior).detach()
|
774 |
-
|
775 |
-
if self.model.conditioning_key is not None and not self.force_null_conditioning:
|
776 |
-
if cond_key is None:
|
777 |
-
cond_key = self.cond_stage_key
|
778 |
-
if cond_key != self.first_stage_key:
|
779 |
-
if cond_key in ['caption', 'coordinates_bbox', "txt"]:
|
780 |
-
xc = batch[cond_key]
|
781 |
-
elif cond_key in ['class_label', 'cls']:
|
782 |
-
xc = batch
|
783 |
-
else:
|
784 |
-
xc = super().get_input(batch, cond_key).to(self.device)
|
785 |
-
else:
|
786 |
-
xc = x
|
787 |
-
if not self.cond_stage_trainable or force_c_encode:
|
788 |
-
if isinstance(xc, dict) or isinstance(xc, list):
|
789 |
-
c = self.get_learned_conditioning(xc)
|
790 |
-
else:
|
791 |
-
c = self.get_learned_conditioning(xc.to(self.device))
|
792 |
-
else:
|
793 |
-
c = xc
|
794 |
-
if bs is not None:
|
795 |
-
c = c[:bs]
|
796 |
-
|
797 |
-
if self.use_positional_encodings:
|
798 |
-
pos_x, pos_y = self.compute_latent_shifts(batch)
|
799 |
-
ckey = __conditioning_keys__[self.model.conditioning_key]
|
800 |
-
c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
|
801 |
-
|
802 |
-
else:
|
803 |
-
c = None
|
804 |
-
xc = None
|
805 |
-
if self.use_positional_encodings:
|
806 |
-
pos_x, pos_y = self.compute_latent_shifts(batch)
|
807 |
-
c = {'pos_x': pos_x, 'pos_y': pos_y}
|
808 |
-
out = [z, c]
|
809 |
-
if return_first_stage_outputs:
|
810 |
-
xrec = self.decode_first_stage(z)
|
811 |
-
out.extend([x, xrec])
|
812 |
-
if return_x:
|
813 |
-
out.extend([x])
|
814 |
-
if return_original_cond:
|
815 |
-
out.append(xc)
|
816 |
-
return out
|
817 |
-
|
818 |
-
@torch.no_grad()
|
819 |
-
def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
|
820 |
-
if predict_cids:
|
821 |
-
if z.dim() == 4:
|
822 |
-
z = torch.argmax(z.exp(), dim=1).long()
|
823 |
-
z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
|
824 |
-
z = rearrange(z, 'b h w c -> b c h w').contiguous()
|
825 |
-
|
826 |
-
z = 1. / self.scale_factor * z
|
827 |
-
return self.first_stage_model.decode(z)
|
828 |
-
|
829 |
-
@torch.no_grad()
|
830 |
-
def encode_first_stage(self, x):
|
831 |
-
return self.first_stage_model.encode(x)
|
832 |
-
|
833 |
-
def shared_step(self, batch, **kwargs):
|
834 |
-
x, c = self.get_input(batch, self.first_stage_key)
|
835 |
-
loss = self(x, c)
|
836 |
-
return loss
|
837 |
-
|
838 |
-
def forward(self, x, c, *args, **kwargs):
|
839 |
-
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
|
840 |
-
if self.model.conditioning_key is not None:
|
841 |
-
assert c is not None
|
842 |
-
if self.cond_stage_trainable:
|
843 |
-
c = self.get_learned_conditioning(c)
|
844 |
-
if self.shorten_cond_schedule: # TODO: drop this option
|
845 |
-
tc = self.cond_ids[t].to(self.device)
|
846 |
-
c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
|
847 |
-
return self.p_losses(x, c, t, *args, **kwargs)
|
848 |
-
|
849 |
-
def apply_model(self, x_noisy, t, cond, return_ids=False):
|
850 |
-
if isinstance(cond, dict):
|
851 |
-
# hybrid case, cond is expected to be a dict
|
852 |
-
pass
|
853 |
-
else:
|
854 |
-
if not isinstance(cond, list):
|
855 |
-
cond = [cond]
|
856 |
-
key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
|
857 |
-
cond = {key: cond}
|
858 |
-
|
859 |
-
x_recon = self.model(x_noisy, t, **cond)
|
860 |
-
|
861 |
-
if isinstance(x_recon, tuple) and not return_ids:
|
862 |
-
return x_recon[0]
|
863 |
-
else:
|
864 |
-
return x_recon
|
865 |
-
|
866 |
-
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
|
867 |
-
return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
|
868 |
-
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
|
869 |
-
|
870 |
-
def _prior_bpd(self, x_start):
|
871 |
-
"""
|
872 |
-
Get the prior KL term for the variational lower-bound, measured in
|
873 |
-
bits-per-dim.
|
874 |
-
This term can't be optimized, as it only depends on the encoder.
|
875 |
-
:param x_start: the [N x C x ...] tensor of inputs.
|
876 |
-
:return: a batch of [N] KL values (in bits), one per batch element.
|
877 |
-
"""
|
878 |
-
batch_size = x_start.shape[0]
|
879 |
-
t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
|
880 |
-
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
|
881 |
-
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
|
882 |
-
return mean_flat(kl_prior) / np.log(2.0)
|
883 |
-
|
884 |
-
def p_losses(self, x_start, cond, t, noise=None):
|
885 |
-
noise = default(noise, lambda: torch.randn_like(x_start))
|
886 |
-
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
887 |
-
model_output = self.apply_model(x_noisy, t, cond)
|
888 |
-
|
889 |
-
loss_dict = {}
|
890 |
-
prefix = 'train' if self.training else 'val'
|
891 |
-
|
892 |
-
if self.parameterization == "x0":
|
893 |
-
target = x_start
|
894 |
-
elif self.parameterization == "eps":
|
895 |
-
target = noise
|
896 |
-
elif self.parameterization == "v":
|
897 |
-
target = self.get_v(x_start, noise, t)
|
898 |
-
else:
|
899 |
-
raise NotImplementedError()
|
900 |
-
|
901 |
-
loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
|
902 |
-
loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
|
903 |
-
|
904 |
-
logvar_t = self.logvar[t].to(self.device)
|
905 |
-
loss = loss_simple / torch.exp(logvar_t) + logvar_t
|
906 |
-
# loss = loss_simple / torch.exp(self.logvar) + self.logvar
|
907 |
-
if self.learn_logvar:
|
908 |
-
loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
|
909 |
-
loss_dict.update({'logvar': self.logvar.data.mean()})
|
910 |
-
|
911 |
-
loss = self.l_simple_weight * loss.mean()
|
912 |
-
|
913 |
-
loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
|
914 |
-
loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
|
915 |
-
loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
|
916 |
-
loss += (self.original_elbo_weight * loss_vlb)
|
917 |
-
loss_dict.update({f'{prefix}/loss': loss})
|
918 |
-
|
919 |
-
return loss, loss_dict
|
920 |
-
|
921 |
-
def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
|
922 |
-
return_x0=False, score_corrector=None, corrector_kwargs=None):
|
923 |
-
t_in = t
|
924 |
-
model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
|
925 |
-
|
926 |
-
if score_corrector is not None:
|
927 |
-
assert self.parameterization == "eps"
|
928 |
-
model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
|
929 |
-
|
930 |
-
if return_codebook_ids:
|
931 |
-
model_out, logits = model_out
|
932 |
-
|
933 |
-
if self.parameterization == "eps":
|
934 |
-
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
|
935 |
-
elif self.parameterization == "x0":
|
936 |
-
x_recon = model_out
|
937 |
-
else:
|
938 |
-
raise NotImplementedError()
|
939 |
-
|
940 |
-
if clip_denoised:
|
941 |
-
x_recon.clamp_(-1., 1.)
|
942 |
-
if quantize_denoised:
|
943 |
-
x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
|
944 |
-
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
|
945 |
-
if return_codebook_ids:
|
946 |
-
return model_mean, posterior_variance, posterior_log_variance, logits
|
947 |
-
elif return_x0:
|
948 |
-
return model_mean, posterior_variance, posterior_log_variance, x_recon
|
949 |
-
else:
|
950 |
-
return model_mean, posterior_variance, posterior_log_variance
|
951 |
-
|
952 |
-
@torch.no_grad()
|
953 |
-
def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
|
954 |
-
return_codebook_ids=False, quantize_denoised=False, return_x0=False,
|
955 |
-
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
|
956 |
-
b, *_, device = *x.shape, x.device
|
957 |
-
outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
|
958 |
-
return_codebook_ids=return_codebook_ids,
|
959 |
-
quantize_denoised=quantize_denoised,
|
960 |
-
return_x0=return_x0,
|
961 |
-
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
|
962 |
-
if return_codebook_ids:
|
963 |
-
raise DeprecationWarning("Support dropped.")
|
964 |
-
model_mean, _, model_log_variance, logits = outputs
|
965 |
-
elif return_x0:
|
966 |
-
model_mean, _, model_log_variance, x0 = outputs
|
967 |
-
else:
|
968 |
-
model_mean, _, model_log_variance = outputs
|
969 |
-
|
970 |
-
noise = noise_like(x.shape, device, repeat_noise) * temperature
|
971 |
-
if noise_dropout > 0.:
|
972 |
-
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
|
973 |
-
# no noise when t == 0
|
974 |
-
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
|
975 |
-
|
976 |
-
if return_codebook_ids:
|
977 |
-
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
|
978 |
-
if return_x0:
|
979 |
-
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
|
980 |
-
else:
|
981 |
-
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
982 |
-
|
983 |
-
@torch.no_grad()
|
984 |
-
def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
|
985 |
-
img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
|
986 |
-
score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
|
987 |
-
log_every_t=None):
|
988 |
-
if not log_every_t:
|
989 |
-
log_every_t = self.log_every_t
|
990 |
-
timesteps = self.num_timesteps
|
991 |
-
if batch_size is not None:
|
992 |
-
b = batch_size if batch_size is not None else shape[0]
|
993 |
-
shape = [batch_size] + list(shape)
|
994 |
-
else:
|
995 |
-
b = batch_size = shape[0]
|
996 |
-
if x_T is None:
|
997 |
-
img = torch.randn(shape, device=self.device)
|
998 |
-
else:
|
999 |
-
img = x_T
|
1000 |
-
intermediates = []
|
1001 |
-
if cond is not None:
|
1002 |
-
if isinstance(cond, dict):
|
1003 |
-
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
|
1004 |
-
list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
|
1005 |
-
else:
|
1006 |
-
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
|
1007 |
-
|
1008 |
-
if start_T is not None:
|
1009 |
-
timesteps = min(timesteps, start_T)
|
1010 |
-
iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
|
1011 |
-
total=timesteps) if verbose else reversed(
|
1012 |
-
range(0, timesteps))
|
1013 |
-
if type(temperature) == float:
|
1014 |
-
temperature = [temperature] * timesteps
|
1015 |
-
|
1016 |
-
for i in iterator:
|
1017 |
-
ts = torch.full((b,), i, device=self.device, dtype=torch.long)
|
1018 |
-
if self.shorten_cond_schedule:
|
1019 |
-
assert self.model.conditioning_key != 'hybrid'
|
1020 |
-
tc = self.cond_ids[ts].to(cond.device)
|
1021 |
-
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
|
1022 |
-
|
1023 |
-
img, x0_partial = self.p_sample(img, cond, ts,
|
1024 |
-
clip_denoised=self.clip_denoised,
|
1025 |
-
quantize_denoised=quantize_denoised, return_x0=True,
|
1026 |
-
temperature=temperature[i], noise_dropout=noise_dropout,
|
1027 |
-
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
|
1028 |
-
if mask is not None:
|
1029 |
-
assert x0 is not None
|
1030 |
-
img_orig = self.q_sample(x0, ts)
|
1031 |
-
img = img_orig * mask + (1. - mask) * img
|
1032 |
-
|
1033 |
-
if i % log_every_t == 0 or i == timesteps - 1:
|
1034 |
-
intermediates.append(x0_partial)
|
1035 |
-
if callback: callback(i)
|
1036 |
-
if img_callback: img_callback(img, i)
|
1037 |
-
return img, intermediates
|
1038 |
-
|
1039 |
-
@torch.no_grad()
|
1040 |
-
def p_sample_loop(self, cond, shape, return_intermediates=False,
|
1041 |
-
x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
|
1042 |
-
mask=None, x0=None, img_callback=None, start_T=None,
|
1043 |
-
log_every_t=None):
|
1044 |
-
|
1045 |
-
if not log_every_t:
|
1046 |
-
log_every_t = self.log_every_t
|
1047 |
-
device = self.betas.device
|
1048 |
-
b = shape[0]
|
1049 |
-
if x_T is None:
|
1050 |
-
img = torch.randn(shape, device=device)
|
1051 |
-
else:
|
1052 |
-
img = x_T
|
1053 |
-
|
1054 |
-
intermediates = [img]
|
1055 |
-
if timesteps is None:
|
1056 |
-
timesteps = self.num_timesteps
|
1057 |
-
|
1058 |
-
if start_T is not None:
|
1059 |
-
timesteps = min(timesteps, start_T)
|
1060 |
-
iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
|
1061 |
-
range(0, timesteps))
|
1062 |
-
|
1063 |
-
if mask is not None:
|
1064 |
-
assert x0 is not None
|
1065 |
-
assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
|
1066 |
-
|
1067 |
-
for i in iterator:
|
1068 |
-
ts = torch.full((b,), i, device=device, dtype=torch.long)
|
1069 |
-
if self.shorten_cond_schedule:
|
1070 |
-
assert self.model.conditioning_key != 'hybrid'
|
1071 |
-
tc = self.cond_ids[ts].to(cond.device)
|
1072 |
-
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
|
1073 |
-
|
1074 |
-
img = self.p_sample(img, cond, ts,
|
1075 |
-
clip_denoised=self.clip_denoised,
|
1076 |
-
quantize_denoised=quantize_denoised)
|
1077 |
-
if mask is not None:
|
1078 |
-
img_orig = self.q_sample(x0, ts)
|
1079 |
-
img = img_orig * mask + (1. - mask) * img
|
1080 |
-
|
1081 |
-
if i % log_every_t == 0 or i == timesteps - 1:
|
1082 |
-
intermediates.append(img)
|
1083 |
-
if callback: callback(i)
|
1084 |
-
if img_callback: img_callback(img, i)
|
1085 |
-
|
1086 |
-
if return_intermediates:
|
1087 |
-
return img, intermediates
|
1088 |
-
return img
|
1089 |
-
|
1090 |
-
@torch.no_grad()
|
1091 |
-
def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
|
1092 |
-
verbose=True, timesteps=None, quantize_denoised=False,
|
1093 |
-
mask=None, x0=None, shape=None, **kwargs):
|
1094 |
-
if shape is None:
|
1095 |
-
shape = (batch_size, self.channels, self.image_size, self.image_size)
|
1096 |
-
if cond is not None:
|
1097 |
-
if isinstance(cond, dict):
|
1098 |
-
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
|
1099 |
-
list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
|
1100 |
-
else:
|
1101 |
-
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
|
1102 |
-
return self.p_sample_loop(cond,
|
1103 |
-
shape,
|
1104 |
-
return_intermediates=return_intermediates, x_T=x_T,
|
1105 |
-
verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
|
1106 |
-
mask=mask, x0=x0)
|
1107 |
-
|
1108 |
-
@torch.no_grad()
|
1109 |
-
def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):
|
1110 |
-
if ddim:
|
1111 |
-
ddim_sampler = DDIMSampler(self)
|
1112 |
-
shape = (self.channels, self.image_size, self.image_size)
|
1113 |
-
samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size,
|
1114 |
-
shape, cond, verbose=False, **kwargs)
|
1115 |
-
|
1116 |
-
else:
|
1117 |
-
samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
|
1118 |
-
return_intermediates=True, **kwargs)
|
1119 |
-
|
1120 |
-
return samples, intermediates
|
1121 |
-
|
1122 |
-
@torch.no_grad()
|
1123 |
-
def get_unconditional_conditioning(self, batch_size, null_label=None):
|
1124 |
-
if null_label is not None:
|
1125 |
-
xc = null_label
|
1126 |
-
if isinstance(xc, ListConfig):
|
1127 |
-
xc = list(xc)
|
1128 |
-
if isinstance(xc, dict) or isinstance(xc, list):
|
1129 |
-
c = self.get_learned_conditioning(xc)
|
1130 |
-
else:
|
1131 |
-
if hasattr(xc, "to"):
|
1132 |
-
xc = xc.to(self.device)
|
1133 |
-
c = self.get_learned_conditioning(xc)
|
1134 |
-
else:
|
1135 |
-
if self.cond_stage_key in ["class_label", "cls"]:
|
1136 |
-
xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)
|
1137 |
-
return self.get_learned_conditioning(xc)
|
1138 |
-
else:
|
1139 |
-
raise NotImplementedError("todo")
|
1140 |
-
if isinstance(c, list): # in case the encoder gives us a list
|
1141 |
-
for i in range(len(c)):
|
1142 |
-
c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)
|
1143 |
-
else:
|
1144 |
-
c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)
|
1145 |
-
return c
|
1146 |
-
|
1147 |
-
@torch.no_grad()
|
1148 |
-
def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None,
|
1149 |
-
quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
|
1150 |
-
plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
|
1151 |
-
use_ema_scope=True,
|
1152 |
-
**kwargs):
|
1153 |
-
ema_scope = self.ema_scope if use_ema_scope else nullcontext
|
1154 |
-
use_ddim = ddim_steps is not None
|
1155 |
-
|
1156 |
-
log = dict()
|
1157 |
-
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
|
1158 |
-
return_first_stage_outputs=True,
|
1159 |
-
force_c_encode=True,
|
1160 |
-
return_original_cond=True,
|
1161 |
-
bs=N)
|
1162 |
-
N = min(x.shape[0], N)
|
1163 |
-
n_row = min(x.shape[0], n_row)
|
1164 |
-
log["inputs"] = x
|
1165 |
-
log["reconstruction"] = xrec
|
1166 |
-
if self.model.conditioning_key is not None:
|
1167 |
-
if hasattr(self.cond_stage_model, "decode"):
|
1168 |
-
xc = self.cond_stage_model.decode(c)
|
1169 |
-
log["conditioning"] = xc
|
1170 |
-
elif self.cond_stage_key in ["caption", "txt"]:
|
1171 |
-
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
|
1172 |
-
log["conditioning"] = xc
|
1173 |
-
elif self.cond_stage_key in ['class_label', "cls"]:
|
1174 |
-
try:
|
1175 |
-
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
|
1176 |
-
log['conditioning'] = xc
|
1177 |
-
except KeyError:
|
1178 |
-
# probably no "human_label" in batch
|
1179 |
-
pass
|
1180 |
-
elif isimage(xc):
|
1181 |
-
log["conditioning"] = xc
|
1182 |
-
if ismap(xc):
|
1183 |
-
log["original_conditioning"] = self.to_rgb(xc)
|
1184 |
-
|
1185 |
-
if plot_diffusion_rows:
|
1186 |
-
# get diffusion row
|
1187 |
-
diffusion_row = list()
|
1188 |
-
z_start = z[:n_row]
|
1189 |
-
for t in range(self.num_timesteps):
|
1190 |
-
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
1191 |
-
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
1192 |
-
t = t.to(self.device).long()
|
1193 |
-
noise = torch.randn_like(z_start)
|
1194 |
-
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
|
1195 |
-
diffusion_row.append(self.decode_first_stage(z_noisy))
|
1196 |
-
|
1197 |
-
diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
|
1198 |
-
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
|
1199 |
-
diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
|
1200 |
-
diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
|
1201 |
-
log["diffusion_row"] = diffusion_grid
|
1202 |
-
|
1203 |
-
if sample:
|
1204 |
-
# get denoise row
|
1205 |
-
with ema_scope("Sampling"):
|
1206 |
-
samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
|
1207 |
-
ddim_steps=ddim_steps, eta=ddim_eta)
|
1208 |
-
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
|
1209 |
-
x_samples = self.decode_first_stage(samples)
|
1210 |
-
log["samples"] = x_samples
|
1211 |
-
if plot_denoise_rows:
|
1212 |
-
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
|
1213 |
-
log["denoise_row"] = denoise_grid
|
1214 |
-
|
1215 |
-
if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
|
1216 |
-
self.first_stage_model, IdentityFirstStage):
|
1217 |
-
# also display when quantizing x0 while sampling
|
1218 |
-
with ema_scope("Plotting Quantized Denoised"):
|
1219 |
-
samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
|
1220 |
-
ddim_steps=ddim_steps, eta=ddim_eta,
|
1221 |
-
quantize_denoised=True)
|
1222 |
-
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
|
1223 |
-
# quantize_denoised=True)
|
1224 |
-
x_samples = self.decode_first_stage(samples.to(self.device))
|
1225 |
-
log["samples_x0_quantized"] = x_samples
|
1226 |
-
|
1227 |
-
if unconditional_guidance_scale > 1.0:
|
1228 |
-
uc = self.get_unconditional_conditioning(N, unconditional_guidance_label)
|
1229 |
-
if self.model.conditioning_key == "crossattn-adm":
|
1230 |
-
uc = {"c_crossattn": [uc], "c_adm": c["c_adm"]}
|
1231 |
-
with ema_scope("Sampling with classifier-free guidance"):
|
1232 |
-
samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
|
1233 |
-
ddim_steps=ddim_steps, eta=ddim_eta,
|
1234 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
1235 |
-
unconditional_conditioning=uc,
|
1236 |
-
)
|
1237 |
-
x_samples_cfg = self.decode_first_stage(samples_cfg)
|
1238 |
-
log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
|
1239 |
-
|
1240 |
-
if inpaint:
|
1241 |
-
# make a simple center square
|
1242 |
-
b, h, w = z.shape[0], z.shape[2], z.shape[3]
|
1243 |
-
mask = torch.ones(N, h, w).to(self.device)
|
1244 |
-
# zeros will be filled in
|
1245 |
-
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
|
1246 |
-
mask = mask[:, None, ...]
|
1247 |
-
with ema_scope("Plotting Inpaint"):
|
1248 |
-
samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,
|
1249 |
-
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
|
1250 |
-
x_samples = self.decode_first_stage(samples.to(self.device))
|
1251 |
-
log["samples_inpainting"] = x_samples
|
1252 |
-
log["mask"] = mask
|
1253 |
-
|
1254 |
-
# outpaint
|
1255 |
-
mask = 1. - mask
|
1256 |
-
with ema_scope("Plotting Outpaint"):
|
1257 |
-
samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,
|
1258 |
-
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
|
1259 |
-
x_samples = self.decode_first_stage(samples.to(self.device))
|
1260 |
-
log["samples_outpainting"] = x_samples
|
1261 |
-
|
1262 |
-
if plot_progressive_rows:
|
1263 |
-
with ema_scope("Plotting Progressives"):
|
1264 |
-
img, progressives = self.progressive_denoising(c,
|
1265 |
-
shape=(self.channels, self.image_size, self.image_size),
|
1266 |
-
batch_size=N)
|
1267 |
-
prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
|
1268 |
-
log["progressive_row"] = prog_row
|
1269 |
-
|
1270 |
-
if return_keys:
|
1271 |
-
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
|
1272 |
-
return log
|
1273 |
-
else:
|
1274 |
-
return {key: log[key] for key in return_keys}
|
1275 |
-
return log
|
1276 |
-
|
1277 |
-
def configure_optimizers(self):
|
1278 |
-
lr = self.learning_rate
|
1279 |
-
params = list(self.model.parameters())
|
1280 |
-
if self.cond_stage_trainable:
|
1281 |
-
print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
|
1282 |
-
params = params + list(self.cond_stage_model.parameters())
|
1283 |
-
if self.learn_logvar:
|
1284 |
-
print('Diffusion model optimizing logvar')
|
1285 |
-
params.append(self.logvar)
|
1286 |
-
opt = torch.optim.AdamW(params, lr=lr)
|
1287 |
-
if self.use_scheduler:
|
1288 |
-
assert 'target' in self.scheduler_config
|
1289 |
-
scheduler = instantiate_from_config(self.scheduler_config)
|
1290 |
-
|
1291 |
-
print("Setting up LambdaLR scheduler...")
|
1292 |
-
scheduler = [
|
1293 |
-
{
|
1294 |
-
'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
|
1295 |
-
'interval': 'step',
|
1296 |
-
'frequency': 1
|
1297 |
-
}]
|
1298 |
-
return [opt], scheduler
|
1299 |
-
return opt
|
1300 |
-
|
1301 |
-
@torch.no_grad()
|
1302 |
-
def to_rgb(self, x):
|
1303 |
-
x = x.float()
|
1304 |
-
if not hasattr(self, "colorize"):
|
1305 |
-
self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
|
1306 |
-
x = nn.functional.conv2d(x, weight=self.colorize)
|
1307 |
-
x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
|
1308 |
-
return x
|
1309 |
-
|
1310 |
-
|
1311 |
-
class DiffusionWrapper(pl.LightningModule):
|
1312 |
-
def __init__(self, diff_model_config, conditioning_key):
|
1313 |
-
super().__init__()
|
1314 |
-
self.sequential_cross_attn = diff_model_config.pop("sequential_crossattn", False)
|
1315 |
-
self.diffusion_model = instantiate_from_config(diff_model_config)
|
1316 |
-
self.conditioning_key = conditioning_key
|
1317 |
-
assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm', 'hybrid-adm', 'crossattn-adm']
|
1318 |
-
|
1319 |
-
def forward(self, x, t, c_concat: list = None, c_crossattn: list = None, c_adm=None):
|
1320 |
-
if self.conditioning_key is None:
|
1321 |
-
out = self.diffusion_model(x, t)
|
1322 |
-
elif self.conditioning_key == 'concat':
|
1323 |
-
xc = torch.cat([x] + c_concat, dim=1)
|
1324 |
-
out = self.diffusion_model(xc, t)
|
1325 |
-
elif self.conditioning_key == 'crossattn':
|
1326 |
-
if not self.sequential_cross_attn:
|
1327 |
-
cc = torch.cat(c_crossattn, 1)
|
1328 |
-
else:
|
1329 |
-
cc = c_crossattn
|
1330 |
-
out = self.diffusion_model(x, t, context=cc)
|
1331 |
-
elif self.conditioning_key == 'hybrid':
|
1332 |
-
xc = torch.cat([x] + c_concat, dim=1)
|
1333 |
-
cc = torch.cat(c_crossattn, 1)
|
1334 |
-
out = self.diffusion_model(xc, t, context=cc)
|
1335 |
-
elif self.conditioning_key == 'hybrid-adm':
|
1336 |
-
assert c_adm is not None
|
1337 |
-
xc = torch.cat([x] + c_concat, dim=1)
|
1338 |
-
cc = torch.cat(c_crossattn, 1)
|
1339 |
-
out = self.diffusion_model(xc, t, context=cc, y=c_adm)
|
1340 |
-
elif self.conditioning_key == 'crossattn-adm':
|
1341 |
-
assert c_adm is not None
|
1342 |
-
cc = torch.cat(c_crossattn, 1)
|
1343 |
-
out = self.diffusion_model(x, t, context=cc, y=c_adm)
|
1344 |
-
elif self.conditioning_key == 'adm':
|
1345 |
-
cc = c_crossattn[0]
|
1346 |
-
out = self.diffusion_model(x, t, y=cc)
|
1347 |
-
else:
|
1348 |
-
raise NotImplementedError()
|
1349 |
-
|
1350 |
-
return out
|
1351 |
-
|
1352 |
-
|
1353 |
-
class LatentUpscaleDiffusion(LatentDiffusion):
|
1354 |
-
def __init__(self, *args, low_scale_config, low_scale_key="LR", noise_level_key=None, **kwargs):
|
1355 |
-
super().__init__(*args, **kwargs)
|
1356 |
-
# assumes that neither the cond_stage nor the low_scale_model contain trainable params
|
1357 |
-
assert not self.cond_stage_trainable
|
1358 |
-
self.instantiate_low_stage(low_scale_config)
|
1359 |
-
self.low_scale_key = low_scale_key
|
1360 |
-
self.noise_level_key = noise_level_key
|
1361 |
-
|
1362 |
-
def instantiate_low_stage(self, config):
|
1363 |
-
model = instantiate_from_config(config)
|
1364 |
-
self.low_scale_model = model.eval()
|
1365 |
-
self.low_scale_model.train = disabled_train
|
1366 |
-
for param in self.low_scale_model.parameters():
|
1367 |
-
param.requires_grad = False
|
1368 |
-
|
1369 |
-
@torch.no_grad()
|
1370 |
-
def get_input(self, batch, k, cond_key=None, bs=None, log_mode=False):
|
1371 |
-
if not log_mode:
|
1372 |
-
z, c = super().get_input(batch, k, force_c_encode=True, bs=bs)
|
1373 |
-
else:
|
1374 |
-
z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
|
1375 |
-
force_c_encode=True, return_original_cond=True, bs=bs)
|
1376 |
-
x_low = batch[self.low_scale_key][:bs]
|
1377 |
-
x_low = rearrange(x_low, 'b h w c -> b c h w')
|
1378 |
-
x_low = x_low.to(memory_format=torch.contiguous_format).float()
|
1379 |
-
zx, noise_level = self.low_scale_model(x_low)
|
1380 |
-
if self.noise_level_key is not None:
|
1381 |
-
# get noise level from batch instead, e.g. when extracting a custom noise level for bsr
|
1382 |
-
raise NotImplementedError('TODO')
|
1383 |
-
|
1384 |
-
all_conds = {"c_concat": [zx], "c_crossattn": [c], "c_adm": noise_level}
|
1385 |
-
if log_mode:
|
1386 |
-
# TODO: maybe disable if too expensive
|
1387 |
-
x_low_rec = self.low_scale_model.decode(zx)
|
1388 |
-
return z, all_conds, x, xrec, xc, x_low, x_low_rec, noise_level
|
1389 |
-
return z, all_conds
|
1390 |
-
|
1391 |
-
@torch.no_grad()
|
1392 |
-
def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
|
1393 |
-
plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True,
|
1394 |
-
unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True,
|
1395 |
-
**kwargs):
|
1396 |
-
ema_scope = self.ema_scope if use_ema_scope else nullcontext
|
1397 |
-
use_ddim = ddim_steps is not None
|
1398 |
-
|
1399 |
-
log = dict()
|
1400 |
-
z, c, x, xrec, xc, x_low, x_low_rec, noise_level = self.get_input(batch, self.first_stage_key, bs=N,
|
1401 |
-
log_mode=True)
|
1402 |
-
N = min(x.shape[0], N)
|
1403 |
-
n_row = min(x.shape[0], n_row)
|
1404 |
-
log["inputs"] = x
|
1405 |
-
log["reconstruction"] = xrec
|
1406 |
-
log["x_lr"] = x_low
|
1407 |
-
log[f"x_lr_rec_@noise_levels{'-'.join(map(lambda x: str(x), list(noise_level.cpu().numpy())))}"] = x_low_rec
|
1408 |
-
if self.model.conditioning_key is not None:
|
1409 |
-
if hasattr(self.cond_stage_model, "decode"):
|
1410 |
-
xc = self.cond_stage_model.decode(c)
|
1411 |
-
log["conditioning"] = xc
|
1412 |
-
elif self.cond_stage_key in ["caption", "txt"]:
|
1413 |
-
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
|
1414 |
-
log["conditioning"] = xc
|
1415 |
-
elif self.cond_stage_key in ['class_label', 'cls']:
|
1416 |
-
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
|
1417 |
-
log['conditioning'] = xc
|
1418 |
-
elif isimage(xc):
|
1419 |
-
log["conditioning"] = xc
|
1420 |
-
if ismap(xc):
|
1421 |
-
log["original_conditioning"] = self.to_rgb(xc)
|
1422 |
-
|
1423 |
-
if plot_diffusion_rows:
|
1424 |
-
# get diffusion row
|
1425 |
-
diffusion_row = list()
|
1426 |
-
z_start = z[:n_row]
|
1427 |
-
for t in range(self.num_timesteps):
|
1428 |
-
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
1429 |
-
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
1430 |
-
t = t.to(self.device).long()
|
1431 |
-
noise = torch.randn_like(z_start)
|
1432 |
-
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
|
1433 |
-
diffusion_row.append(self.decode_first_stage(z_noisy))
|
1434 |
-
|
1435 |
-
diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
|
1436 |
-
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
|
1437 |
-
diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
|
1438 |
-
diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
|
1439 |
-
log["diffusion_row"] = diffusion_grid
|
1440 |
-
|
1441 |
-
if sample:
|
1442 |
-
# get denoise row
|
1443 |
-
with ema_scope("Sampling"):
|
1444 |
-
samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
|
1445 |
-
ddim_steps=ddim_steps, eta=ddim_eta)
|
1446 |
-
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
|
1447 |
-
x_samples = self.decode_first_stage(samples)
|
1448 |
-
log["samples"] = x_samples
|
1449 |
-
if plot_denoise_rows:
|
1450 |
-
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
|
1451 |
-
log["denoise_row"] = denoise_grid
|
1452 |
-
|
1453 |
-
if unconditional_guidance_scale > 1.0:
|
1454 |
-
uc_tmp = self.get_unconditional_conditioning(N, unconditional_guidance_label)
|
1455 |
-
# TODO explore better "unconditional" choices for the other keys
|
1456 |
-
# maybe guide away from empty text label and highest noise level and maximally degraded zx?
|
1457 |
-
uc = dict()
|
1458 |
-
for k in c:
|
1459 |
-
if k == "c_crossattn":
|
1460 |
-
assert isinstance(c[k], list) and len(c[k]) == 1
|
1461 |
-
uc[k] = [uc_tmp]
|
1462 |
-
elif k == "c_adm": # todo: only run with text-based guidance?
|
1463 |
-
assert isinstance(c[k], torch.Tensor)
|
1464 |
-
#uc[k] = torch.ones_like(c[k]) * self.low_scale_model.max_noise_level
|
1465 |
-
uc[k] = c[k]
|
1466 |
-
elif isinstance(c[k], list):
|
1467 |
-
uc[k] = [c[k][i] for i in range(len(c[k]))]
|
1468 |
-
else:
|
1469 |
-
uc[k] = c[k]
|
1470 |
-
|
1471 |
-
with ema_scope("Sampling with classifier-free guidance"):
|
1472 |
-
samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,
|
1473 |
-
ddim_steps=ddim_steps, eta=ddim_eta,
|
1474 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
1475 |
-
unconditional_conditioning=uc,
|
1476 |
-
)
|
1477 |
-
x_samples_cfg = self.decode_first_stage(samples_cfg)
|
1478 |
-
log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
|
1479 |
-
|
1480 |
-
if plot_progressive_rows:
|
1481 |
-
with ema_scope("Plotting Progressives"):
|
1482 |
-
img, progressives = self.progressive_denoising(c,
|
1483 |
-
shape=(self.channels, self.image_size, self.image_size),
|
1484 |
-
batch_size=N)
|
1485 |
-
prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
|
1486 |
-
log["progressive_row"] = prog_row
|
1487 |
-
|
1488 |
-
return log
|
1489 |
-
|
1490 |
-
|
1491 |
-
class LatentFinetuneDiffusion(LatentDiffusion):
|
1492 |
-
"""
|
1493 |
-
Basis for different finetunas, such as inpainting or depth2image
|
1494 |
-
To disable finetuning mode, set finetune_keys to None
|
1495 |
-
"""
|
1496 |
-
|
1497 |
-
def __init__(self,
|
1498 |
-
concat_keys: tuple,
|
1499 |
-
finetune_keys=("model.diffusion_model.input_blocks.0.0.weight",
|
1500 |
-
"model_ema.diffusion_modelinput_blocks00weight"
|
1501 |
-
),
|
1502 |
-
keep_finetune_dims=4,
|
1503 |
-
# if model was trained without concat mode before and we would like to keep these channels
|
1504 |
-
c_concat_log_start=None, # to log reconstruction of c_concat codes
|
1505 |
-
c_concat_log_end=None,
|
1506 |
-
*args, **kwargs
|
1507 |
-
):
|
1508 |
-
ckpt_path = kwargs.pop("ckpt_path", None)
|
1509 |
-
ignore_keys = kwargs.pop("ignore_keys", list())
|
1510 |
-
super().__init__(*args, **kwargs)
|
1511 |
-
self.finetune_keys = finetune_keys
|
1512 |
-
self.concat_keys = concat_keys
|
1513 |
-
self.keep_dims = keep_finetune_dims
|
1514 |
-
self.c_concat_log_start = c_concat_log_start
|
1515 |
-
self.c_concat_log_end = c_concat_log_end
|
1516 |
-
if exists(self.finetune_keys): assert exists(ckpt_path), 'can only finetune from a given checkpoint'
|
1517 |
-
if exists(ckpt_path):
|
1518 |
-
self.init_from_ckpt(ckpt_path, ignore_keys)
|
1519 |
-
|
1520 |
-
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
|
1521 |
-
sd = torch.load(path, map_location="cpu")
|
1522 |
-
if "state_dict" in list(sd.keys()):
|
1523 |
-
sd = sd["state_dict"]
|
1524 |
-
keys = list(sd.keys())
|
1525 |
-
for k in keys:
|
1526 |
-
for ik in ignore_keys:
|
1527 |
-
if k.startswith(ik):
|
1528 |
-
print("Deleting key {} from state_dict.".format(k))
|
1529 |
-
del sd[k]
|
1530 |
-
|
1531 |
-
# make it explicit, finetune by including extra input channels
|
1532 |
-
if exists(self.finetune_keys) and k in self.finetune_keys:
|
1533 |
-
new_entry = None
|
1534 |
-
for name, param in self.named_parameters():
|
1535 |
-
if name in self.finetune_keys:
|
1536 |
-
print(
|
1537 |
-
f"modifying key '{name}' and keeping its original {self.keep_dims} (channels) dimensions only")
|
1538 |
-
new_entry = torch.zeros_like(param) # zero init
|
1539 |
-
assert exists(new_entry), 'did not find matching parameter to modify'
|
1540 |
-
new_entry[:, :self.keep_dims, ...] = sd[k]
|
1541 |
-
sd[k] = new_entry
|
1542 |
-
|
1543 |
-
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
|
1544 |
-
sd, strict=False)
|
1545 |
-
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
|
1546 |
-
if len(missing) > 0:
|
1547 |
-
print(f"Missing Keys: {missing}")
|
1548 |
-
if len(unexpected) > 0:
|
1549 |
-
print(f"Unexpected Keys: {unexpected}")
|
1550 |
-
|
1551 |
-
@torch.no_grad()
|
1552 |
-
def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
|
1553 |
-
quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
|
1554 |
-
plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
|
1555 |
-
use_ema_scope=True,
|
1556 |
-
**kwargs):
|
1557 |
-
ema_scope = self.ema_scope if use_ema_scope else nullcontext
|
1558 |
-
use_ddim = ddim_steps is not None
|
1559 |
-
|
1560 |
-
log = dict()
|
1561 |
-
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, bs=N, return_first_stage_outputs=True)
|
1562 |
-
c_cat, c = c["c_concat"][0], c["c_crossattn"][0]
|
1563 |
-
N = min(x.shape[0], N)
|
1564 |
-
n_row = min(x.shape[0], n_row)
|
1565 |
-
log["inputs"] = x
|
1566 |
-
log["reconstruction"] = xrec
|
1567 |
-
if self.model.conditioning_key is not None:
|
1568 |
-
if hasattr(self.cond_stage_model, "decode"):
|
1569 |
-
xc = self.cond_stage_model.decode(c)
|
1570 |
-
log["conditioning"] = xc
|
1571 |
-
elif self.cond_stage_key in ["caption", "txt"]:
|
1572 |
-
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
|
1573 |
-
log["conditioning"] = xc
|
1574 |
-
elif self.cond_stage_key in ['class_label', 'cls']:
|
1575 |
-
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25)
|
1576 |
-
log['conditioning'] = xc
|
1577 |
-
elif isimage(xc):
|
1578 |
-
log["conditioning"] = xc
|
1579 |
-
if ismap(xc):
|
1580 |
-
log["original_conditioning"] = self.to_rgb(xc)
|
1581 |
-
|
1582 |
-
if not (self.c_concat_log_start is None and self.c_concat_log_end is None):
|
1583 |
-
log["c_concat_decoded"] = self.decode_first_stage(c_cat[:, self.c_concat_log_start:self.c_concat_log_end])
|
1584 |
-
|
1585 |
-
if plot_diffusion_rows:
|
1586 |
-
# get diffusion row
|
1587 |
-
diffusion_row = list()
|
1588 |
-
z_start = z[:n_row]
|
1589 |
-
for t in range(self.num_timesteps):
|
1590 |
-
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
1591 |
-
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
1592 |
-
t = t.to(self.device).long()
|
1593 |
-
noise = torch.randn_like(z_start)
|
1594 |
-
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
|
1595 |
-
diffusion_row.append(self.decode_first_stage(z_noisy))
|
1596 |
-
|
1597 |
-
diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
|
1598 |
-
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
|
1599 |
-
diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
|
1600 |
-
diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
|
1601 |
-
log["diffusion_row"] = diffusion_grid
|
1602 |
-
|
1603 |
-
if sample:
|
1604 |
-
# get denoise row
|
1605 |
-
with ema_scope("Sampling"):
|
1606 |
-
samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
|
1607 |
-
batch_size=N, ddim=use_ddim,
|
1608 |
-
ddim_steps=ddim_steps, eta=ddim_eta)
|
1609 |
-
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
|
1610 |
-
x_samples = self.decode_first_stage(samples)
|
1611 |
-
log["samples"] = x_samples
|
1612 |
-
if plot_denoise_rows:
|
1613 |
-
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
|
1614 |
-
log["denoise_row"] = denoise_grid
|
1615 |
-
|
1616 |
-
if unconditional_guidance_scale > 1.0:
|
1617 |
-
uc_cross = self.get_unconditional_conditioning(N, unconditional_guidance_label)
|
1618 |
-
uc_cat = c_cat
|
1619 |
-
uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]}
|
1620 |
-
with ema_scope("Sampling with classifier-free guidance"):
|
1621 |
-
samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
|
1622 |
-
batch_size=N, ddim=use_ddim,
|
1623 |
-
ddim_steps=ddim_steps, eta=ddim_eta,
|
1624 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
1625 |
-
unconditional_conditioning=uc_full,
|
1626 |
-
)
|
1627 |
-
x_samples_cfg = self.decode_first_stage(samples_cfg)
|
1628 |
-
log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
|
1629 |
-
|
1630 |
-
return log
|
1631 |
-
|
1632 |
-
|
1633 |
-
class LatentInpaintDiffusion(LatentFinetuneDiffusion):
|
1634 |
-
"""
|
1635 |
-
can either run as pure inpainting model (only concat mode) or with mixed conditionings,
|
1636 |
-
e.g. mask as concat and text via cross-attn.
|
1637 |
-
To disable finetuning mode, set finetune_keys to None
|
1638 |
-
"""
|
1639 |
-
|
1640 |
-
def __init__(self,
|
1641 |
-
concat_keys=("mask", "masked_image"),
|
1642 |
-
masked_image_key="masked_image",
|
1643 |
-
*args, **kwargs
|
1644 |
-
):
|
1645 |
-
super().__init__(concat_keys, *args, **kwargs)
|
1646 |
-
self.masked_image_key = masked_image_key
|
1647 |
-
assert self.masked_image_key in concat_keys
|
1648 |
-
|
1649 |
-
@torch.no_grad()
|
1650 |
-
def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
|
1651 |
-
# note: restricted to non-trainable encoders currently
|
1652 |
-
assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for inpainting'
|
1653 |
-
z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
|
1654 |
-
force_c_encode=True, return_original_cond=True, bs=bs)
|
1655 |
-
|
1656 |
-
assert exists(self.concat_keys)
|
1657 |
-
c_cat = list()
|
1658 |
-
for ck in self.concat_keys:
|
1659 |
-
cc = rearrange(batch[ck], 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
|
1660 |
-
if bs is not None:
|
1661 |
-
cc = cc[:bs]
|
1662 |
-
cc = cc.to(self.device)
|
1663 |
-
bchw = z.shape
|
1664 |
-
if ck != self.masked_image_key:
|
1665 |
-
cc = torch.nn.functional.interpolate(cc, size=bchw[-2:])
|
1666 |
-
else:
|
1667 |
-
cc = self.get_first_stage_encoding(self.encode_first_stage(cc))
|
1668 |
-
c_cat.append(cc)
|
1669 |
-
c_cat = torch.cat(c_cat, dim=1)
|
1670 |
-
all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
|
1671 |
-
if return_first_stage_outputs:
|
1672 |
-
return z, all_conds, x, xrec, xc
|
1673 |
-
return z, all_conds
|
1674 |
-
|
1675 |
-
@torch.no_grad()
|
1676 |
-
def log_images(self, *args, **kwargs):
|
1677 |
-
log = super(LatentInpaintDiffusion, self).log_images(*args, **kwargs)
|
1678 |
-
log["masked_image"] = rearrange(args[0]["masked_image"],
|
1679 |
-
'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
|
1680 |
-
return log
|
1681 |
-
|
1682 |
-
|
1683 |
-
class LatentDepth2ImageDiffusion(LatentFinetuneDiffusion):
|
1684 |
-
"""
|
1685 |
-
condition on monocular depth estimation
|
1686 |
-
"""
|
1687 |
-
|
1688 |
-
def __init__(self, depth_stage_config, concat_keys=("midas_in",), *args, **kwargs):
|
1689 |
-
super().__init__(concat_keys=concat_keys, *args, **kwargs)
|
1690 |
-
self.depth_model = instantiate_from_config(depth_stage_config)
|
1691 |
-
self.depth_stage_key = concat_keys[0]
|
1692 |
-
|
1693 |
-
@torch.no_grad()
|
1694 |
-
def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
|
1695 |
-
# note: restricted to non-trainable encoders currently
|
1696 |
-
assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for depth2img'
|
1697 |
-
z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
|
1698 |
-
force_c_encode=True, return_original_cond=True, bs=bs)
|
1699 |
-
|
1700 |
-
assert exists(self.concat_keys)
|
1701 |
-
assert len(self.concat_keys) == 1
|
1702 |
-
c_cat = list()
|
1703 |
-
for ck in self.concat_keys:
|
1704 |
-
cc = batch[ck]
|
1705 |
-
if bs is not None:
|
1706 |
-
cc = cc[:bs]
|
1707 |
-
cc = cc.to(self.device)
|
1708 |
-
cc = self.depth_model(cc)
|
1709 |
-
cc = torch.nn.functional.interpolate(
|
1710 |
-
cc,
|
1711 |
-
size=z.shape[2:],
|
1712 |
-
mode="bicubic",
|
1713 |
-
align_corners=False,
|
1714 |
-
)
|
1715 |
-
# TODO: think about this. ideally rescale by some global values
|
1716 |
-
depth_min, depth_max = torch.amin(cc, dim=[1, 2, 3], keepdim=True), torch.amax(cc, dim=[1, 2, 3],
|
1717 |
-
keepdim=True)
|
1718 |
-
cc = 2. * (cc - depth_min) / (depth_max - depth_min + 0.001) - 1.
|
1719 |
-
c_cat.append(cc)
|
1720 |
-
c_cat = torch.cat(c_cat, dim=1)
|
1721 |
-
all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
|
1722 |
-
if return_first_stage_outputs:
|
1723 |
-
return z, all_conds, x, xrec, xc
|
1724 |
-
return z, all_conds
|
1725 |
-
|
1726 |
-
@torch.no_grad()
|
1727 |
-
def log_images(self, *args, **kwargs):
|
1728 |
-
log = super().log_images(*args, **kwargs)
|
1729 |
-
depth = self.depth_model(args[0][self.depth_stage_key])
|
1730 |
-
depth_min, depth_max = torch.amin(depth, dim=[1, 2, 3], keepdim=True), \
|
1731 |
-
torch.amax(depth, dim=[1, 2, 3], keepdim=True)
|
1732 |
-
log["depth"] = 2. * (depth - depth_min) / (depth_max - depth_min) - 1.
|
1733 |
-
return log
|
1734 |
-
|
1735 |
-
|
1736 |
-
class LatentUpscaleFinetuneDiffusion(LatentFinetuneDiffusion):
|
1737 |
-
"""
|
1738 |
-
condition on low-res image (and optionally on some spatial noise augmentation)
|
1739 |
-
"""
|
1740 |
-
def __init__(self, concat_keys=("lr",), reshuffle_patch_size=None,
|
1741 |
-
low_scale_config=None, low_scale_key=None, *args, **kwargs):
|
1742 |
-
super().__init__(concat_keys=concat_keys, *args, **kwargs)
|
1743 |
-
self.reshuffle_patch_size = reshuffle_patch_size
|
1744 |
-
self.low_scale_model = None
|
1745 |
-
if low_scale_config is not None:
|
1746 |
-
print("Initializing a low-scale model")
|
1747 |
-
assert exists(low_scale_key)
|
1748 |
-
self.instantiate_low_stage(low_scale_config)
|
1749 |
-
self.low_scale_key = low_scale_key
|
1750 |
-
|
1751 |
-
def instantiate_low_stage(self, config):
|
1752 |
-
model = instantiate_from_config(config)
|
1753 |
-
self.low_scale_model = model.eval()
|
1754 |
-
self.low_scale_model.train = disabled_train
|
1755 |
-
for param in self.low_scale_model.parameters():
|
1756 |
-
param.requires_grad = False
|
1757 |
-
|
1758 |
-
@torch.no_grad()
|
1759 |
-
def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
|
1760 |
-
# note: restricted to non-trainable encoders currently
|
1761 |
-
assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for upscaling-ft'
|
1762 |
-
z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
|
1763 |
-
force_c_encode=True, return_original_cond=True, bs=bs)
|
1764 |
-
|
1765 |
-
assert exists(self.concat_keys)
|
1766 |
-
assert len(self.concat_keys) == 1
|
1767 |
-
# optionally make spatial noise_level here
|
1768 |
-
c_cat = list()
|
1769 |
-
noise_level = None
|
1770 |
-
for ck in self.concat_keys:
|
1771 |
-
cc = batch[ck]
|
1772 |
-
cc = rearrange(cc, 'b h w c -> b c h w')
|
1773 |
-
if exists(self.reshuffle_patch_size):
|
1774 |
-
assert isinstance(self.reshuffle_patch_size, int)
|
1775 |
-
cc = rearrange(cc, 'b c (p1 h) (p2 w) -> b (p1 p2 c) h w',
|
1776 |
-
p1=self.reshuffle_patch_size, p2=self.reshuffle_patch_size)
|
1777 |
-
if bs is not None:
|
1778 |
-
cc = cc[:bs]
|
1779 |
-
cc = cc.to(self.device)
|
1780 |
-
if exists(self.low_scale_model) and ck == self.low_scale_key:
|
1781 |
-
cc, noise_level = self.low_scale_model(cc)
|
1782 |
-
c_cat.append(cc)
|
1783 |
-
c_cat = torch.cat(c_cat, dim=1)
|
1784 |
-
if exists(noise_level):
|
1785 |
-
all_conds = {"c_concat": [c_cat], "c_crossattn": [c], "c_adm": noise_level}
|
1786 |
-
else:
|
1787 |
-
all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
|
1788 |
-
if return_first_stage_outputs:
|
1789 |
-
return z, all_conds, x, xrec, xc
|
1790 |
-
return z, all_conds
|
1791 |
-
|
1792 |
-
@torch.no_grad()
|
1793 |
-
def log_images(self, *args, **kwargs):
|
1794 |
-
log = super().log_images(*args, **kwargs)
|
1795 |
-
log["lr"] = rearrange(args[0]["lr"], 'b h w c -> b c h w')
|
1796 |
-
return log
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/models/diffusion/dpm_solver/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from .sampler import DPMSolverSampler
|
|
|
|
ldm/models/diffusion/dpm_solver/dpm_solver.py
DELETED
@@ -1,1154 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn.functional as F
|
3 |
-
import math
|
4 |
-
from tqdm import tqdm
|
5 |
-
|
6 |
-
|
7 |
-
class NoiseScheduleVP:
|
8 |
-
def __init__(
|
9 |
-
self,
|
10 |
-
schedule='discrete',
|
11 |
-
betas=None,
|
12 |
-
alphas_cumprod=None,
|
13 |
-
continuous_beta_0=0.1,
|
14 |
-
continuous_beta_1=20.,
|
15 |
-
):
|
16 |
-
"""Create a wrapper class for the forward SDE (VP type).
|
17 |
-
***
|
18 |
-
Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
|
19 |
-
We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
|
20 |
-
***
|
21 |
-
The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
|
22 |
-
We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
|
23 |
-
Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
|
24 |
-
log_alpha_t = self.marginal_log_mean_coeff(t)
|
25 |
-
sigma_t = self.marginal_std(t)
|
26 |
-
lambda_t = self.marginal_lambda(t)
|
27 |
-
Moreover, as lambda(t) is an invertible function, we also support its inverse function:
|
28 |
-
t = self.inverse_lambda(lambda_t)
|
29 |
-
===============================================================
|
30 |
-
We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
|
31 |
-
1. For discrete-time DPMs:
|
32 |
-
For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
|
33 |
-
t_i = (i + 1) / N
|
34 |
-
e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
|
35 |
-
We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
|
36 |
-
Args:
|
37 |
-
betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
|
38 |
-
alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
|
39 |
-
Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
|
40 |
-
**Important**: Please pay special attention for the args for `alphas_cumprod`:
|
41 |
-
The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
|
42 |
-
q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
|
43 |
-
Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
|
44 |
-
alpha_{t_n} = \sqrt{\hat{alpha_n}},
|
45 |
-
and
|
46 |
-
log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
|
47 |
-
2. For continuous-time DPMs:
|
48 |
-
We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise
|
49 |
-
schedule are the default settings in DDPM and improved-DDPM:
|
50 |
-
Args:
|
51 |
-
beta_min: A `float` number. The smallest beta for the linear schedule.
|
52 |
-
beta_max: A `float` number. The largest beta for the linear schedule.
|
53 |
-
cosine_s: A `float` number. The hyperparameter in the cosine schedule.
|
54 |
-
cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.
|
55 |
-
T: A `float` number. The ending time of the forward process.
|
56 |
-
===============================================================
|
57 |
-
Args:
|
58 |
-
schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
|
59 |
-
'linear' or 'cosine' for continuous-time DPMs.
|
60 |
-
Returns:
|
61 |
-
A wrapper object of the forward SDE (VP type).
|
62 |
-
|
63 |
-
===============================================================
|
64 |
-
Example:
|
65 |
-
# For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
|
66 |
-
>>> ns = NoiseScheduleVP('discrete', betas=betas)
|
67 |
-
# For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
|
68 |
-
>>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
|
69 |
-
# For continuous-time DPMs (VPSDE), linear schedule:
|
70 |
-
>>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
|
71 |
-
"""
|
72 |
-
|
73 |
-
if schedule not in ['discrete', 'linear', 'cosine']:
|
74 |
-
raise ValueError(
|
75 |
-
"Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(
|
76 |
-
schedule))
|
77 |
-
|
78 |
-
self.schedule = schedule
|
79 |
-
if schedule == 'discrete':
|
80 |
-
if betas is not None:
|
81 |
-
log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
|
82 |
-
else:
|
83 |
-
assert alphas_cumprod is not None
|
84 |
-
log_alphas = 0.5 * torch.log(alphas_cumprod)
|
85 |
-
self.total_N = len(log_alphas)
|
86 |
-
self.T = 1.
|
87 |
-
self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1))
|
88 |
-
self.log_alpha_array = log_alphas.reshape((1, -1,))
|
89 |
-
else:
|
90 |
-
self.total_N = 1000
|
91 |
-
self.beta_0 = continuous_beta_0
|
92 |
-
self.beta_1 = continuous_beta_1
|
93 |
-
self.cosine_s = 0.008
|
94 |
-
self.cosine_beta_max = 999.
|
95 |
-
self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (
|
96 |
-
1. + self.cosine_s) / math.pi - self.cosine_s
|
97 |
-
self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))
|
98 |
-
self.schedule = schedule
|
99 |
-
if schedule == 'cosine':
|
100 |
-
# For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.
|
101 |
-
# Note that T = 0.9946 may be not the optimal setting. However, we find it works well.
|
102 |
-
self.T = 0.9946
|
103 |
-
else:
|
104 |
-
self.T = 1.
|
105 |
-
|
106 |
-
def marginal_log_mean_coeff(self, t):
|
107 |
-
"""
|
108 |
-
Compute log(alpha_t) of a given continuous-time label t in [0, T].
|
109 |
-
"""
|
110 |
-
if self.schedule == 'discrete':
|
111 |
-
return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device),
|
112 |
-
self.log_alpha_array.to(t.device)).reshape((-1))
|
113 |
-
elif self.schedule == 'linear':
|
114 |
-
return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
|
115 |
-
elif self.schedule == 'cosine':
|
116 |
-
log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))
|
117 |
-
log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0
|
118 |
-
return log_alpha_t
|
119 |
-
|
120 |
-
def marginal_alpha(self, t):
|
121 |
-
"""
|
122 |
-
Compute alpha_t of a given continuous-time label t in [0, T].
|
123 |
-
"""
|
124 |
-
return torch.exp(self.marginal_log_mean_coeff(t))
|
125 |
-
|
126 |
-
def marginal_std(self, t):
|
127 |
-
"""
|
128 |
-
Compute sigma_t of a given continuous-time label t in [0, T].
|
129 |
-
"""
|
130 |
-
return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
|
131 |
-
|
132 |
-
def marginal_lambda(self, t):
|
133 |
-
"""
|
134 |
-
Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
|
135 |
-
"""
|
136 |
-
log_mean_coeff = self.marginal_log_mean_coeff(t)
|
137 |
-
log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
|
138 |
-
return log_mean_coeff - log_std
|
139 |
-
|
140 |
-
def inverse_lambda(self, lamb):
|
141 |
-
"""
|
142 |
-
Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
|
143 |
-
"""
|
144 |
-
if self.schedule == 'linear':
|
145 |
-
tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
|
146 |
-
Delta = self.beta_0 ** 2 + tmp
|
147 |
-
return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
|
148 |
-
elif self.schedule == 'discrete':
|
149 |
-
log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
|
150 |
-
t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]),
|
151 |
-
torch.flip(self.t_array.to(lamb.device), [1]))
|
152 |
-
return t.reshape((-1,))
|
153 |
-
else:
|
154 |
-
log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
|
155 |
-
t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (
|
156 |
-
1. + self.cosine_s) / math.pi - self.cosine_s
|
157 |
-
t = t_fn(log_alpha)
|
158 |
-
return t
|
159 |
-
|
160 |
-
|
161 |
-
def model_wrapper(
|
162 |
-
model,
|
163 |
-
noise_schedule,
|
164 |
-
model_type="noise",
|
165 |
-
model_kwargs={},
|
166 |
-
guidance_type="uncond",
|
167 |
-
condition=None,
|
168 |
-
unconditional_condition=None,
|
169 |
-
guidance_scale=1.,
|
170 |
-
classifier_fn=None,
|
171 |
-
classifier_kwargs={},
|
172 |
-
):
|
173 |
-
"""Create a wrapper function for the noise prediction model.
|
174 |
-
DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
|
175 |
-
firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
|
176 |
-
We support four types of the diffusion model by setting `model_type`:
|
177 |
-
1. "noise": noise prediction model. (Trained by predicting noise).
|
178 |
-
2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
|
179 |
-
3. "v": velocity prediction model. (Trained by predicting the velocity).
|
180 |
-
The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
|
181 |
-
[1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
|
182 |
-
arXiv preprint arXiv:2202.00512 (2022).
|
183 |
-
[2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
|
184 |
-
arXiv preprint arXiv:2210.02303 (2022).
|
185 |
-
|
186 |
-
4. "score": marginal score function. (Trained by denoising score matching).
|
187 |
-
Note that the score function and the noise prediction model follows a simple relationship:
|
188 |
-
```
|
189 |
-
noise(x_t, t) = -sigma_t * score(x_t, t)
|
190 |
-
```
|
191 |
-
We support three types of guided sampling by DPMs by setting `guidance_type`:
|
192 |
-
1. "uncond": unconditional sampling by DPMs.
|
193 |
-
The input `model` has the following format:
|
194 |
-
``
|
195 |
-
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
|
196 |
-
``
|
197 |
-
2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
|
198 |
-
The input `model` has the following format:
|
199 |
-
``
|
200 |
-
model(x, t_input, **model_kwargs) -> noise | x_start | v | score
|
201 |
-
``
|
202 |
-
The input `classifier_fn` has the following format:
|
203 |
-
``
|
204 |
-
classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
|
205 |
-
``
|
206 |
-
[3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
|
207 |
-
in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
|
208 |
-
3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
|
209 |
-
The input `model` has the following format:
|
210 |
-
``
|
211 |
-
model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
|
212 |
-
``
|
213 |
-
And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
|
214 |
-
[4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
|
215 |
-
arXiv preprint arXiv:2207.12598 (2022).
|
216 |
-
|
217 |
-
The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
|
218 |
-
or continuous-time labels (i.e. epsilon to T).
|
219 |
-
We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
|
220 |
-
``
|
221 |
-
def model_fn(x, t_continuous) -> noise:
|
222 |
-
t_input = get_model_input_time(t_continuous)
|
223 |
-
return noise_pred(model, x, t_input, **model_kwargs)
|
224 |
-
``
|
225 |
-
where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
|
226 |
-
===============================================================
|
227 |
-
Args:
|
228 |
-
model: A diffusion model with the corresponding format described above.
|
229 |
-
noise_schedule: A noise schedule object, such as NoiseScheduleVP.
|
230 |
-
model_type: A `str`. The parameterization type of the diffusion model.
|
231 |
-
"noise" or "x_start" or "v" or "score".
|
232 |
-
model_kwargs: A `dict`. A dict for the other inputs of the model function.
|
233 |
-
guidance_type: A `str`. The type of the guidance for sampling.
|
234 |
-
"uncond" or "classifier" or "classifier-free".
|
235 |
-
condition: A pytorch tensor. The condition for the guided sampling.
|
236 |
-
Only used for "classifier" or "classifier-free" guidance type.
|
237 |
-
unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
|
238 |
-
Only used for "classifier-free" guidance type.
|
239 |
-
guidance_scale: A `float`. The scale for the guided sampling.
|
240 |
-
classifier_fn: A classifier function. Only used for the classifier guidance.
|
241 |
-
classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
|
242 |
-
Returns:
|
243 |
-
A noise prediction model that accepts the noised data and the continuous time as the inputs.
|
244 |
-
"""
|
245 |
-
|
246 |
-
def get_model_input_time(t_continuous):
|
247 |
-
"""
|
248 |
-
Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
|
249 |
-
For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
|
250 |
-
For continuous-time DPMs, we just use `t_continuous`.
|
251 |
-
"""
|
252 |
-
if noise_schedule.schedule == 'discrete':
|
253 |
-
return (t_continuous - 1. / noise_schedule.total_N) * 1000.
|
254 |
-
else:
|
255 |
-
return t_continuous
|
256 |
-
|
257 |
-
def noise_pred_fn(x, t_continuous, cond=None):
|
258 |
-
if t_continuous.reshape((-1,)).shape[0] == 1:
|
259 |
-
t_continuous = t_continuous.expand((x.shape[0]))
|
260 |
-
t_input = get_model_input_time(t_continuous)
|
261 |
-
if cond is None:
|
262 |
-
output = model(x, t_input, **model_kwargs)
|
263 |
-
else:
|
264 |
-
output = model(x, t_input, cond, **model_kwargs)
|
265 |
-
if model_type == "noise":
|
266 |
-
return output
|
267 |
-
elif model_type == "x_start":
|
268 |
-
alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
|
269 |
-
dims = x.dim()
|
270 |
-
return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
|
271 |
-
elif model_type == "v":
|
272 |
-
alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
|
273 |
-
dims = x.dim()
|
274 |
-
return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
|
275 |
-
elif model_type == "score":
|
276 |
-
sigma_t = noise_schedule.marginal_std(t_continuous)
|
277 |
-
dims = x.dim()
|
278 |
-
return -expand_dims(sigma_t, dims) * output
|
279 |
-
|
280 |
-
def cond_grad_fn(x, t_input):
|
281 |
-
"""
|
282 |
-
Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
|
283 |
-
"""
|
284 |
-
with torch.enable_grad():
|
285 |
-
x_in = x.detach().requires_grad_(True)
|
286 |
-
log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
|
287 |
-
return torch.autograd.grad(log_prob.sum(), x_in)[0]
|
288 |
-
|
289 |
-
def model_fn(x, t_continuous):
|
290 |
-
"""
|
291 |
-
The noise predicition model function that is used for DPM-Solver.
|
292 |
-
"""
|
293 |
-
if t_continuous.reshape((-1,)).shape[0] == 1:
|
294 |
-
t_continuous = t_continuous.expand((x.shape[0]))
|
295 |
-
if guidance_type == "uncond":
|
296 |
-
return noise_pred_fn(x, t_continuous)
|
297 |
-
elif guidance_type == "classifier":
|
298 |
-
assert classifier_fn is not None
|
299 |
-
t_input = get_model_input_time(t_continuous)
|
300 |
-
cond_grad = cond_grad_fn(x, t_input)
|
301 |
-
sigma_t = noise_schedule.marginal_std(t_continuous)
|
302 |
-
noise = noise_pred_fn(x, t_continuous)
|
303 |
-
return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
|
304 |
-
elif guidance_type == "classifier-free":
|
305 |
-
if guidance_scale == 1. or unconditional_condition is None:
|
306 |
-
return noise_pred_fn(x, t_continuous, cond=condition)
|
307 |
-
else:
|
308 |
-
x_in = torch.cat([x] * 2)
|
309 |
-
t_in = torch.cat([t_continuous] * 2)
|
310 |
-
c_in = torch.cat([unconditional_condition, condition])
|
311 |
-
noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
|
312 |
-
return noise_uncond + guidance_scale * (noise - noise_uncond)
|
313 |
-
|
314 |
-
assert model_type in ["noise", "x_start", "v"]
|
315 |
-
assert guidance_type in ["uncond", "classifier", "classifier-free"]
|
316 |
-
return model_fn
|
317 |
-
|
318 |
-
|
319 |
-
class DPM_Solver:
|
320 |
-
def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.):
|
321 |
-
"""Construct a DPM-Solver.
|
322 |
-
We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0").
|
323 |
-
If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver).
|
324 |
-
If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++).
|
325 |
-
In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True.
|
326 |
-
The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales.
|
327 |
-
Args:
|
328 |
-
model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):
|
329 |
-
``
|
330 |
-
def model_fn(x, t_continuous):
|
331 |
-
return noise
|
332 |
-
``
|
333 |
-
noise_schedule: A noise schedule object, such as NoiseScheduleVP.
|
334 |
-
predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model.
|
335 |
-
thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1].
|
336 |
-
max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding.
|
337 |
-
|
338 |
-
[1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.
|
339 |
-
"""
|
340 |
-
self.model = model_fn
|
341 |
-
self.noise_schedule = noise_schedule
|
342 |
-
self.predict_x0 = predict_x0
|
343 |
-
self.thresholding = thresholding
|
344 |
-
self.max_val = max_val
|
345 |
-
|
346 |
-
def noise_prediction_fn(self, x, t):
|
347 |
-
"""
|
348 |
-
Return the noise prediction model.
|
349 |
-
"""
|
350 |
-
return self.model(x, t)
|
351 |
-
|
352 |
-
def data_prediction_fn(self, x, t):
|
353 |
-
"""
|
354 |
-
Return the data prediction model (with thresholding).
|
355 |
-
"""
|
356 |
-
noise = self.noise_prediction_fn(x, t)
|
357 |
-
dims = x.dim()
|
358 |
-
alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
|
359 |
-
x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims)
|
360 |
-
if self.thresholding:
|
361 |
-
p = 0.995 # A hyperparameter in the paper of "Imagen" [1].
|
362 |
-
s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
|
363 |
-
s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims)
|
364 |
-
x0 = torch.clamp(x0, -s, s) / s
|
365 |
-
return x0
|
366 |
-
|
367 |
-
def model_fn(self, x, t):
|
368 |
-
"""
|
369 |
-
Convert the model to the noise prediction model or the data prediction model.
|
370 |
-
"""
|
371 |
-
if self.predict_x0:
|
372 |
-
return self.data_prediction_fn(x, t)
|
373 |
-
else:
|
374 |
-
return self.noise_prediction_fn(x, t)
|
375 |
-
|
376 |
-
def get_time_steps(self, skip_type, t_T, t_0, N, device):
|
377 |
-
"""Compute the intermediate time steps for sampling.
|
378 |
-
Args:
|
379 |
-
skip_type: A `str`. The type for the spacing of the time steps. We support three types:
|
380 |
-
- 'logSNR': uniform logSNR for the time steps.
|
381 |
-
- 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
|
382 |
-
- 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
|
383 |
-
t_T: A `float`. The starting time of the sampling (default is T).
|
384 |
-
t_0: A `float`. The ending time of the sampling (default is epsilon).
|
385 |
-
N: A `int`. The total number of the spacing of the time steps.
|
386 |
-
device: A torch device.
|
387 |
-
Returns:
|
388 |
-
A pytorch tensor of the time steps, with the shape (N + 1,).
|
389 |
-
"""
|
390 |
-
if skip_type == 'logSNR':
|
391 |
-
lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
|
392 |
-
lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
|
393 |
-
logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
|
394 |
-
return self.noise_schedule.inverse_lambda(logSNR_steps)
|
395 |
-
elif skip_type == 'time_uniform':
|
396 |
-
return torch.linspace(t_T, t_0, N + 1).to(device)
|
397 |
-
elif skip_type == 'time_quadratic':
|
398 |
-
t_order = 2
|
399 |
-
t = torch.linspace(t_T ** (1. / t_order), t_0 ** (1. / t_order), N + 1).pow(t_order).to(device)
|
400 |
-
return t
|
401 |
-
else:
|
402 |
-
raise ValueError(
|
403 |
-
"Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type))
|
404 |
-
|
405 |
-
def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
|
406 |
-
"""
|
407 |
-
Get the order of each step for sampling by the singlestep DPM-Solver.
|
408 |
-
We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast".
|
409 |
-
Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:
|
410 |
-
- If order == 1:
|
411 |
-
We take `steps` of DPM-Solver-1 (i.e. DDIM).
|
412 |
-
- If order == 2:
|
413 |
-
- Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.
|
414 |
-
- If steps % 2 == 0, we use K steps of DPM-Solver-2.
|
415 |
-
- If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.
|
416 |
-
- If order == 3:
|
417 |
-
- Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
|
418 |
-
- If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.
|
419 |
-
- If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.
|
420 |
-
- If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.
|
421 |
-
============================================
|
422 |
-
Args:
|
423 |
-
order: A `int`. The max order for the solver (2 or 3).
|
424 |
-
steps: A `int`. The total number of function evaluations (NFE).
|
425 |
-
skip_type: A `str`. The type for the spacing of the time steps. We support three types:
|
426 |
-
- 'logSNR': uniform logSNR for the time steps.
|
427 |
-
- 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)
|
428 |
-
- 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)
|
429 |
-
t_T: A `float`. The starting time of the sampling (default is T).
|
430 |
-
t_0: A `float`. The ending time of the sampling (default is epsilon).
|
431 |
-
device: A torch device.
|
432 |
-
Returns:
|
433 |
-
orders: A list of the solver order of each step.
|
434 |
-
"""
|
435 |
-
if order == 3:
|
436 |
-
K = steps // 3 + 1
|
437 |
-
if steps % 3 == 0:
|
438 |
-
orders = [3, ] * (K - 2) + [2, 1]
|
439 |
-
elif steps % 3 == 1:
|
440 |
-
orders = [3, ] * (K - 1) + [1]
|
441 |
-
else:
|
442 |
-
orders = [3, ] * (K - 1) + [2]
|
443 |
-
elif order == 2:
|
444 |
-
if steps % 2 == 0:
|
445 |
-
K = steps // 2
|
446 |
-
orders = [2, ] * K
|
447 |
-
else:
|
448 |
-
K = steps // 2 + 1
|
449 |
-
orders = [2, ] * (K - 1) + [1]
|
450 |
-
elif order == 1:
|
451 |
-
K = 1
|
452 |
-
orders = [1, ] * steps
|
453 |
-
else:
|
454 |
-
raise ValueError("'order' must be '1' or '2' or '3'.")
|
455 |
-
if skip_type == 'logSNR':
|
456 |
-
# To reproduce the results in DPM-Solver paper
|
457 |
-
timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
|
458 |
-
else:
|
459 |
-
timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[
|
460 |
-
torch.cumsum(torch.tensor([0, ] + orders)).to(device)]
|
461 |
-
return timesteps_outer, orders
|
462 |
-
|
463 |
-
def denoise_to_zero_fn(self, x, s):
|
464 |
-
"""
|
465 |
-
Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
|
466 |
-
"""
|
467 |
-
return self.data_prediction_fn(x, s)
|
468 |
-
|
469 |
-
def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):
|
470 |
-
"""
|
471 |
-
DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.
|
472 |
-
Args:
|
473 |
-
x: A pytorch tensor. The initial value at time `s`.
|
474 |
-
s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
|
475 |
-
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
476 |
-
model_s: A pytorch tensor. The model function evaluated at time `s`.
|
477 |
-
If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
|
478 |
-
return_intermediate: A `bool`. If true, also return the model value at time `s`.
|
479 |
-
Returns:
|
480 |
-
x_t: A pytorch tensor. The approximated solution at time `t`.
|
481 |
-
"""
|
482 |
-
ns = self.noise_schedule
|
483 |
-
dims = x.dim()
|
484 |
-
lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
|
485 |
-
h = lambda_t - lambda_s
|
486 |
-
log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)
|
487 |
-
sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)
|
488 |
-
alpha_t = torch.exp(log_alpha_t)
|
489 |
-
|
490 |
-
if self.predict_x0:
|
491 |
-
phi_1 = torch.expm1(-h)
|
492 |
-
if model_s is None:
|
493 |
-
model_s = self.model_fn(x, s)
|
494 |
-
x_t = (
|
495 |
-
expand_dims(sigma_t / sigma_s, dims) * x
|
496 |
-
- expand_dims(alpha_t * phi_1, dims) * model_s
|
497 |
-
)
|
498 |
-
if return_intermediate:
|
499 |
-
return x_t, {'model_s': model_s}
|
500 |
-
else:
|
501 |
-
return x_t
|
502 |
-
else:
|
503 |
-
phi_1 = torch.expm1(h)
|
504 |
-
if model_s is None:
|
505 |
-
model_s = self.model_fn(x, s)
|
506 |
-
x_t = (
|
507 |
-
expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
|
508 |
-
- expand_dims(sigma_t * phi_1, dims) * model_s
|
509 |
-
)
|
510 |
-
if return_intermediate:
|
511 |
-
return x_t, {'model_s': model_s}
|
512 |
-
else:
|
513 |
-
return x_t
|
514 |
-
|
515 |
-
def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False,
|
516 |
-
solver_type='dpm_solver'):
|
517 |
-
"""
|
518 |
-
Singlestep solver DPM-Solver-2 from time `s` to time `t`.
|
519 |
-
Args:
|
520 |
-
x: A pytorch tensor. The initial value at time `s`.
|
521 |
-
s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
|
522 |
-
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
523 |
-
r1: A `float`. The hyperparameter of the second-order solver.
|
524 |
-
model_s: A pytorch tensor. The model function evaluated at time `s`.
|
525 |
-
If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
|
526 |
-
return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).
|
527 |
-
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
528 |
-
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
529 |
-
Returns:
|
530 |
-
x_t: A pytorch tensor. The approximated solution at time `t`.
|
531 |
-
"""
|
532 |
-
if solver_type not in ['dpm_solver', 'taylor']:
|
533 |
-
raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
|
534 |
-
if r1 is None:
|
535 |
-
r1 = 0.5
|
536 |
-
ns = self.noise_schedule
|
537 |
-
dims = x.dim()
|
538 |
-
lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
|
539 |
-
h = lambda_t - lambda_s
|
540 |
-
lambda_s1 = lambda_s + r1 * h
|
541 |
-
s1 = ns.inverse_lambda(lambda_s1)
|
542 |
-
log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(
|
543 |
-
s1), ns.marginal_log_mean_coeff(t)
|
544 |
-
sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)
|
545 |
-
alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)
|
546 |
-
|
547 |
-
if self.predict_x0:
|
548 |
-
phi_11 = torch.expm1(-r1 * h)
|
549 |
-
phi_1 = torch.expm1(-h)
|
550 |
-
|
551 |
-
if model_s is None:
|
552 |
-
model_s = self.model_fn(x, s)
|
553 |
-
x_s1 = (
|
554 |
-
expand_dims(sigma_s1 / sigma_s, dims) * x
|
555 |
-
- expand_dims(alpha_s1 * phi_11, dims) * model_s
|
556 |
-
)
|
557 |
-
model_s1 = self.model_fn(x_s1, s1)
|
558 |
-
if solver_type == 'dpm_solver':
|
559 |
-
x_t = (
|
560 |
-
expand_dims(sigma_t / sigma_s, dims) * x
|
561 |
-
- expand_dims(alpha_t * phi_1, dims) * model_s
|
562 |
-
- (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s)
|
563 |
-
)
|
564 |
-
elif solver_type == 'taylor':
|
565 |
-
x_t = (
|
566 |
-
expand_dims(sigma_t / sigma_s, dims) * x
|
567 |
-
- expand_dims(alpha_t * phi_1, dims) * model_s
|
568 |
-
+ (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (
|
569 |
-
model_s1 - model_s)
|
570 |
-
)
|
571 |
-
else:
|
572 |
-
phi_11 = torch.expm1(r1 * h)
|
573 |
-
phi_1 = torch.expm1(h)
|
574 |
-
|
575 |
-
if model_s is None:
|
576 |
-
model_s = self.model_fn(x, s)
|
577 |
-
x_s1 = (
|
578 |
-
expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
|
579 |
-
- expand_dims(sigma_s1 * phi_11, dims) * model_s
|
580 |
-
)
|
581 |
-
model_s1 = self.model_fn(x_s1, s1)
|
582 |
-
if solver_type == 'dpm_solver':
|
583 |
-
x_t = (
|
584 |
-
expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
|
585 |
-
- expand_dims(sigma_t * phi_1, dims) * model_s
|
586 |
-
- (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s)
|
587 |
-
)
|
588 |
-
elif solver_type == 'taylor':
|
589 |
-
x_t = (
|
590 |
-
expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
|
591 |
-
- expand_dims(sigma_t * phi_1, dims) * model_s
|
592 |
-
- (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s)
|
593 |
-
)
|
594 |
-
if return_intermediate:
|
595 |
-
return x_t, {'model_s': model_s, 'model_s1': model_s1}
|
596 |
-
else:
|
597 |
-
return x_t
|
598 |
-
|
599 |
-
def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None,
|
600 |
-
return_intermediate=False, solver_type='dpm_solver'):
|
601 |
-
"""
|
602 |
-
Singlestep solver DPM-Solver-3 from time `s` to time `t`.
|
603 |
-
Args:
|
604 |
-
x: A pytorch tensor. The initial value at time `s`.
|
605 |
-
s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
|
606 |
-
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
607 |
-
r1: A `float`. The hyperparameter of the third-order solver.
|
608 |
-
r2: A `float`. The hyperparameter of the third-order solver.
|
609 |
-
model_s: A pytorch tensor. The model function evaluated at time `s`.
|
610 |
-
If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.
|
611 |
-
model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).
|
612 |
-
If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.
|
613 |
-
return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
|
614 |
-
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
615 |
-
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
616 |
-
Returns:
|
617 |
-
x_t: A pytorch tensor. The approximated solution at time `t`.
|
618 |
-
"""
|
619 |
-
if solver_type not in ['dpm_solver', 'taylor']:
|
620 |
-
raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
|
621 |
-
if r1 is None:
|
622 |
-
r1 = 1. / 3.
|
623 |
-
if r2 is None:
|
624 |
-
r2 = 2. / 3.
|
625 |
-
ns = self.noise_schedule
|
626 |
-
dims = x.dim()
|
627 |
-
lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)
|
628 |
-
h = lambda_t - lambda_s
|
629 |
-
lambda_s1 = lambda_s + r1 * h
|
630 |
-
lambda_s2 = lambda_s + r2 * h
|
631 |
-
s1 = ns.inverse_lambda(lambda_s1)
|
632 |
-
s2 = ns.inverse_lambda(lambda_s2)
|
633 |
-
log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(
|
634 |
-
s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)
|
635 |
-
sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(
|
636 |
-
s2), ns.marginal_std(t)
|
637 |
-
alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)
|
638 |
-
|
639 |
-
if self.predict_x0:
|
640 |
-
phi_11 = torch.expm1(-r1 * h)
|
641 |
-
phi_12 = torch.expm1(-r2 * h)
|
642 |
-
phi_1 = torch.expm1(-h)
|
643 |
-
phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.
|
644 |
-
phi_2 = phi_1 / h + 1.
|
645 |
-
phi_3 = phi_2 / h - 0.5
|
646 |
-
|
647 |
-
if model_s is None:
|
648 |
-
model_s = self.model_fn(x, s)
|
649 |
-
if model_s1 is None:
|
650 |
-
x_s1 = (
|
651 |
-
expand_dims(sigma_s1 / sigma_s, dims) * x
|
652 |
-
- expand_dims(alpha_s1 * phi_11, dims) * model_s
|
653 |
-
)
|
654 |
-
model_s1 = self.model_fn(x_s1, s1)
|
655 |
-
x_s2 = (
|
656 |
-
expand_dims(sigma_s2 / sigma_s, dims) * x
|
657 |
-
- expand_dims(alpha_s2 * phi_12, dims) * model_s
|
658 |
-
+ r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s)
|
659 |
-
)
|
660 |
-
model_s2 = self.model_fn(x_s2, s2)
|
661 |
-
if solver_type == 'dpm_solver':
|
662 |
-
x_t = (
|
663 |
-
expand_dims(sigma_t / sigma_s, dims) * x
|
664 |
-
- expand_dims(alpha_t * phi_1, dims) * model_s
|
665 |
-
+ (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s)
|
666 |
-
)
|
667 |
-
elif solver_type == 'taylor':
|
668 |
-
D1_0 = (1. / r1) * (model_s1 - model_s)
|
669 |
-
D1_1 = (1. / r2) * (model_s2 - model_s)
|
670 |
-
D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
|
671 |
-
D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
|
672 |
-
x_t = (
|
673 |
-
expand_dims(sigma_t / sigma_s, dims) * x
|
674 |
-
- expand_dims(alpha_t * phi_1, dims) * model_s
|
675 |
-
+ expand_dims(alpha_t * phi_2, dims) * D1
|
676 |
-
- expand_dims(alpha_t * phi_3, dims) * D2
|
677 |
-
)
|
678 |
-
else:
|
679 |
-
phi_11 = torch.expm1(r1 * h)
|
680 |
-
phi_12 = torch.expm1(r2 * h)
|
681 |
-
phi_1 = torch.expm1(h)
|
682 |
-
phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.
|
683 |
-
phi_2 = phi_1 / h - 1.
|
684 |
-
phi_3 = phi_2 / h - 0.5
|
685 |
-
|
686 |
-
if model_s is None:
|
687 |
-
model_s = self.model_fn(x, s)
|
688 |
-
if model_s1 is None:
|
689 |
-
x_s1 = (
|
690 |
-
expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x
|
691 |
-
- expand_dims(sigma_s1 * phi_11, dims) * model_s
|
692 |
-
)
|
693 |
-
model_s1 = self.model_fn(x_s1, s1)
|
694 |
-
x_s2 = (
|
695 |
-
expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x
|
696 |
-
- expand_dims(sigma_s2 * phi_12, dims) * model_s
|
697 |
-
- r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s)
|
698 |
-
)
|
699 |
-
model_s2 = self.model_fn(x_s2, s2)
|
700 |
-
if solver_type == 'dpm_solver':
|
701 |
-
x_t = (
|
702 |
-
expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
|
703 |
-
- expand_dims(sigma_t * phi_1, dims) * model_s
|
704 |
-
- (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s)
|
705 |
-
)
|
706 |
-
elif solver_type == 'taylor':
|
707 |
-
D1_0 = (1. / r1) * (model_s1 - model_s)
|
708 |
-
D1_1 = (1. / r2) * (model_s2 - model_s)
|
709 |
-
D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)
|
710 |
-
D2 = 2. * (D1_1 - D1_0) / (r2 - r1)
|
711 |
-
x_t = (
|
712 |
-
expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x
|
713 |
-
- expand_dims(sigma_t * phi_1, dims) * model_s
|
714 |
-
- expand_dims(sigma_t * phi_2, dims) * D1
|
715 |
-
- expand_dims(sigma_t * phi_3, dims) * D2
|
716 |
-
)
|
717 |
-
|
718 |
-
if return_intermediate:
|
719 |
-
return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}
|
720 |
-
else:
|
721 |
-
return x_t
|
722 |
-
|
723 |
-
def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"):
|
724 |
-
"""
|
725 |
-
Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.
|
726 |
-
Args:
|
727 |
-
x: A pytorch tensor. The initial value at time `s`.
|
728 |
-
model_prev_list: A list of pytorch tensor. The previous computed model values.
|
729 |
-
t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
|
730 |
-
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
731 |
-
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
732 |
-
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
733 |
-
Returns:
|
734 |
-
x_t: A pytorch tensor. The approximated solution at time `t`.
|
735 |
-
"""
|
736 |
-
if solver_type not in ['dpm_solver', 'taylor']:
|
737 |
-
raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type))
|
738 |
-
ns = self.noise_schedule
|
739 |
-
dims = x.dim()
|
740 |
-
model_prev_1, model_prev_0 = model_prev_list
|
741 |
-
t_prev_1, t_prev_0 = t_prev_list
|
742 |
-
lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(
|
743 |
-
t_prev_0), ns.marginal_lambda(t)
|
744 |
-
log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
|
745 |
-
sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
|
746 |
-
alpha_t = torch.exp(log_alpha_t)
|
747 |
-
|
748 |
-
h_0 = lambda_prev_0 - lambda_prev_1
|
749 |
-
h = lambda_t - lambda_prev_0
|
750 |
-
r0 = h_0 / h
|
751 |
-
D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
|
752 |
-
if self.predict_x0:
|
753 |
-
if solver_type == 'dpm_solver':
|
754 |
-
x_t = (
|
755 |
-
expand_dims(sigma_t / sigma_prev_0, dims) * x
|
756 |
-
- expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
|
757 |
-
- 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0
|
758 |
-
)
|
759 |
-
elif solver_type == 'taylor':
|
760 |
-
x_t = (
|
761 |
-
expand_dims(sigma_t / sigma_prev_0, dims) * x
|
762 |
-
- expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
|
763 |
-
+ expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0
|
764 |
-
)
|
765 |
-
else:
|
766 |
-
if solver_type == 'dpm_solver':
|
767 |
-
x_t = (
|
768 |
-
expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
|
769 |
-
- expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
|
770 |
-
- 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0
|
771 |
-
)
|
772 |
-
elif solver_type == 'taylor':
|
773 |
-
x_t = (
|
774 |
-
expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
|
775 |
-
- expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
|
776 |
-
- expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0
|
777 |
-
)
|
778 |
-
return x_t
|
779 |
-
|
780 |
-
def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'):
|
781 |
-
"""
|
782 |
-
Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.
|
783 |
-
Args:
|
784 |
-
x: A pytorch tensor. The initial value at time `s`.
|
785 |
-
model_prev_list: A list of pytorch tensor. The previous computed model values.
|
786 |
-
t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
|
787 |
-
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
788 |
-
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
789 |
-
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
790 |
-
Returns:
|
791 |
-
x_t: A pytorch tensor. The approximated solution at time `t`.
|
792 |
-
"""
|
793 |
-
ns = self.noise_schedule
|
794 |
-
dims = x.dim()
|
795 |
-
model_prev_2, model_prev_1, model_prev_0 = model_prev_list
|
796 |
-
t_prev_2, t_prev_1, t_prev_0 = t_prev_list
|
797 |
-
lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(
|
798 |
-
t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)
|
799 |
-
log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
|
800 |
-
sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
|
801 |
-
alpha_t = torch.exp(log_alpha_t)
|
802 |
-
|
803 |
-
h_1 = lambda_prev_1 - lambda_prev_2
|
804 |
-
h_0 = lambda_prev_0 - lambda_prev_1
|
805 |
-
h = lambda_t - lambda_prev_0
|
806 |
-
r0, r1 = h_0 / h, h_1 / h
|
807 |
-
D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)
|
808 |
-
D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2)
|
809 |
-
D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1)
|
810 |
-
D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1)
|
811 |
-
if self.predict_x0:
|
812 |
-
x_t = (
|
813 |
-
expand_dims(sigma_t / sigma_prev_0, dims) * x
|
814 |
-
- expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0
|
815 |
-
+ expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1
|
816 |
-
- expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h ** 2 - 0.5), dims) * D2
|
817 |
-
)
|
818 |
-
else:
|
819 |
-
x_t = (
|
820 |
-
expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
|
821 |
-
- expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0
|
822 |
-
- expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1
|
823 |
-
- expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h ** 2 - 0.5), dims) * D2
|
824 |
-
)
|
825 |
-
return x_t
|
826 |
-
|
827 |
-
def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None,
|
828 |
-
r2=None):
|
829 |
-
"""
|
830 |
-
Singlestep DPM-Solver with the order `order` from time `s` to time `t`.
|
831 |
-
Args:
|
832 |
-
x: A pytorch tensor. The initial value at time `s`.
|
833 |
-
s: A pytorch tensor. The starting time, with the shape (x.shape[0],).
|
834 |
-
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
835 |
-
order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
|
836 |
-
return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).
|
837 |
-
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
838 |
-
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
839 |
-
r1: A `float`. The hyperparameter of the second-order or third-order solver.
|
840 |
-
r2: A `float`. The hyperparameter of the third-order solver.
|
841 |
-
Returns:
|
842 |
-
x_t: A pytorch tensor. The approximated solution at time `t`.
|
843 |
-
"""
|
844 |
-
if order == 1:
|
845 |
-
return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)
|
846 |
-
elif order == 2:
|
847 |
-
return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate,
|
848 |
-
solver_type=solver_type, r1=r1)
|
849 |
-
elif order == 3:
|
850 |
-
return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate,
|
851 |
-
solver_type=solver_type, r1=r1, r2=r2)
|
852 |
-
else:
|
853 |
-
raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
|
854 |
-
|
855 |
-
def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'):
|
856 |
-
"""
|
857 |
-
Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.
|
858 |
-
Args:
|
859 |
-
x: A pytorch tensor. The initial value at time `s`.
|
860 |
-
model_prev_list: A list of pytorch tensor. The previous computed model values.
|
861 |
-
t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)
|
862 |
-
t: A pytorch tensor. The ending time, with the shape (x.shape[0],).
|
863 |
-
order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.
|
864 |
-
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
865 |
-
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
866 |
-
Returns:
|
867 |
-
x_t: A pytorch tensor. The approximated solution at time `t`.
|
868 |
-
"""
|
869 |
-
if order == 1:
|
870 |
-
return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1])
|
871 |
-
elif order == 2:
|
872 |
-
return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
|
873 |
-
elif order == 3:
|
874 |
-
return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)
|
875 |
-
else:
|
876 |
-
raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order))
|
877 |
-
|
878 |
-
def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5,
|
879 |
-
solver_type='dpm_solver'):
|
880 |
-
"""
|
881 |
-
The adaptive step size solver based on singlestep DPM-Solver.
|
882 |
-
Args:
|
883 |
-
x: A pytorch tensor. The initial value at time `t_T`.
|
884 |
-
order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.
|
885 |
-
t_T: A `float`. The starting time of the sampling (default is T).
|
886 |
-
t_0: A `float`. The ending time of the sampling (default is epsilon).
|
887 |
-
h_init: A `float`. The initial step size (for logSNR).
|
888 |
-
atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].
|
889 |
-
rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.
|
890 |
-
theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].
|
891 |
-
t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the
|
892 |
-
current time and `t_0` is less than `t_err`. The default setting is 1e-5.
|
893 |
-
solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.
|
894 |
-
The type slightly impacts the performance. We recommend to use 'dpm_solver' type.
|
895 |
-
Returns:
|
896 |
-
x_0: A pytorch tensor. The approximated solution at time `t_0`.
|
897 |
-
[1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021.
|
898 |
-
"""
|
899 |
-
ns = self.noise_schedule
|
900 |
-
s = t_T * torch.ones((x.shape[0],)).to(x)
|
901 |
-
lambda_s = ns.marginal_lambda(s)
|
902 |
-
lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))
|
903 |
-
h = h_init * torch.ones_like(s).to(x)
|
904 |
-
x_prev = x
|
905 |
-
nfe = 0
|
906 |
-
if order == 2:
|
907 |
-
r1 = 0.5
|
908 |
-
lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True)
|
909 |
-
higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
|
910 |
-
solver_type=solver_type,
|
911 |
-
**kwargs)
|
912 |
-
elif order == 3:
|
913 |
-
r1, r2 = 1. / 3., 2. / 3.
|
914 |
-
lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,
|
915 |
-
return_intermediate=True,
|
916 |
-
solver_type=solver_type)
|
917 |
-
higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2,
|
918 |
-
solver_type=solver_type,
|
919 |
-
**kwargs)
|
920 |
-
else:
|
921 |
-
raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order))
|
922 |
-
while torch.abs((s - t_0)).mean() > t_err:
|
923 |
-
t = ns.inverse_lambda(lambda_s + h)
|
924 |
-
x_lower, lower_noise_kwargs = lower_update(x, s, t)
|
925 |
-
x_higher = higher_update(x, s, t, **lower_noise_kwargs)
|
926 |
-
delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))
|
927 |
-
norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True))
|
928 |
-
E = norm_fn((x_higher - x_lower) / delta).max()
|
929 |
-
if torch.all(E <= 1.):
|
930 |
-
x = x_higher
|
931 |
-
s = t
|
932 |
-
x_prev = x_lower
|
933 |
-
lambda_s = ns.marginal_lambda(s)
|
934 |
-
h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s)
|
935 |
-
nfe += order
|
936 |
-
print('adaptive solver nfe', nfe)
|
937 |
-
return x
|
938 |
-
|
939 |
-
def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform',
|
940 |
-
method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',
|
941 |
-
atol=0.0078, rtol=0.05,
|
942 |
-
):
|
943 |
-
"""
|
944 |
-
Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.
|
945 |
-
=====================================================
|
946 |
-
We support the following algorithms for both noise prediction model and data prediction model:
|
947 |
-
- 'singlestep':
|
948 |
-
Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver.
|
949 |
-
We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).
|
950 |
-
The total number of function evaluations (NFE) == `steps`.
|
951 |
-
Given a fixed NFE == `steps`, the sampling procedure is:
|
952 |
-
- If `order` == 1:
|
953 |
-
- Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).
|
954 |
-
- If `order` == 2:
|
955 |
-
- Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.
|
956 |
-
- If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.
|
957 |
-
- If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
|
958 |
-
- If `order` == 3:
|
959 |
-
- Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.
|
960 |
-
- If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.
|
961 |
-
- If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.
|
962 |
-
- If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.
|
963 |
-
- 'multistep':
|
964 |
-
Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.
|
965 |
-
We initialize the first `order` values by lower order multistep solvers.
|
966 |
-
Given a fixed NFE == `steps`, the sampling procedure is:
|
967 |
-
Denote K = steps.
|
968 |
-
- If `order` == 1:
|
969 |
-
- We use K steps of DPM-Solver-1 (i.e. DDIM).
|
970 |
-
- If `order` == 2:
|
971 |
-
- We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.
|
972 |
-
- If `order` == 3:
|
973 |
-
- We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.
|
974 |
-
- 'singlestep_fixed':
|
975 |
-
Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).
|
976 |
-
We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.
|
977 |
-
- 'adaptive':
|
978 |
-
Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper).
|
979 |
-
We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.
|
980 |
-
You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs
|
981 |
-
(NFE) and the sample quality.
|
982 |
-
- If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.
|
983 |
-
- If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.
|
984 |
-
=====================================================
|
985 |
-
Some advices for choosing the algorithm:
|
986 |
-
- For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:
|
987 |
-
Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`.
|
988 |
-
e.g.
|
989 |
-
>>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False)
|
990 |
-
>>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,
|
991 |
-
skip_type='time_uniform', method='singlestep')
|
992 |
-
- For **guided sampling with large guidance scale** by DPMs:
|
993 |
-
Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`.
|
994 |
-
e.g.
|
995 |
-
>>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True)
|
996 |
-
>>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,
|
997 |
-
skip_type='time_uniform', method='multistep')
|
998 |
-
We support three types of `skip_type`:
|
999 |
-
- 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**
|
1000 |
-
- 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.
|
1001 |
-
- 'time_quadratic': quadratic time for the time steps.
|
1002 |
-
=====================================================
|
1003 |
-
Args:
|
1004 |
-
x: A pytorch tensor. The initial value at time `t_start`
|
1005 |
-
e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.
|
1006 |
-
steps: A `int`. The total number of function evaluations (NFE).
|
1007 |
-
t_start: A `float`. The starting time of the sampling.
|
1008 |
-
If `T` is None, we use self.noise_schedule.T (default is 1.0).
|
1009 |
-
t_end: A `float`. The ending time of the sampling.
|
1010 |
-
If `t_end` is None, we use 1. / self.noise_schedule.total_N.
|
1011 |
-
e.g. if total_N == 1000, we have `t_end` == 1e-3.
|
1012 |
-
For discrete-time DPMs:
|
1013 |
-
- We recommend `t_end` == 1. / self.noise_schedule.total_N.
|
1014 |
-
For continuous-time DPMs:
|
1015 |
-
- We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.
|
1016 |
-
order: A `int`. The order of DPM-Solver.
|
1017 |
-
skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.
|
1018 |
-
method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.
|
1019 |
-
denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.
|
1020 |
-
Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).
|
1021 |
-
This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and
|
1022 |
-
score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID
|
1023 |
-
for diffusion models sampling by diffusion SDEs for low-resolutional images
|
1024 |
-
(such as CIFAR-10). However, we observed that such trick does not matter for
|
1025 |
-
high-resolutional images. As it needs an additional NFE, we do not recommend
|
1026 |
-
it for high-resolutional images.
|
1027 |
-
lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.
|
1028 |
-
Only valid for `method=multistep` and `steps < 15`. We empirically find that
|
1029 |
-
this trick is a key to stabilizing the sampling by DPM-Solver with very few steps
|
1030 |
-
(especially for steps <= 10). So we recommend to set it to be `True`.
|
1031 |
-
solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`.
|
1032 |
-
atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
|
1033 |
-
rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.
|
1034 |
-
Returns:
|
1035 |
-
x_end: A pytorch tensor. The approximated solution at time `t_end`.
|
1036 |
-
"""
|
1037 |
-
t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
|
1038 |
-
t_T = self.noise_schedule.T if t_start is None else t_start
|
1039 |
-
device = x.device
|
1040 |
-
if method == 'adaptive':
|
1041 |
-
with torch.no_grad():
|
1042 |
-
x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol,
|
1043 |
-
solver_type=solver_type)
|
1044 |
-
elif method == 'multistep':
|
1045 |
-
assert steps >= order
|
1046 |
-
timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
|
1047 |
-
assert timesteps.shape[0] - 1 == steps
|
1048 |
-
with torch.no_grad():
|
1049 |
-
vec_t = timesteps[0].expand((x.shape[0]))
|
1050 |
-
model_prev_list = [self.model_fn(x, vec_t)]
|
1051 |
-
t_prev_list = [vec_t]
|
1052 |
-
# Init the first `order` values by lower order multistep DPM-Solver.
|
1053 |
-
for init_order in tqdm(range(1, order), desc="DPM init order"):
|
1054 |
-
vec_t = timesteps[init_order].expand(x.shape[0])
|
1055 |
-
x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order,
|
1056 |
-
solver_type=solver_type)
|
1057 |
-
model_prev_list.append(self.model_fn(x, vec_t))
|
1058 |
-
t_prev_list.append(vec_t)
|
1059 |
-
# Compute the remaining values by `order`-th order multistep DPM-Solver.
|
1060 |
-
for step in tqdm(range(order, steps + 1), desc="DPM multistep"):
|
1061 |
-
vec_t = timesteps[step].expand(x.shape[0])
|
1062 |
-
if lower_order_final and steps < 15:
|
1063 |
-
step_order = min(order, steps + 1 - step)
|
1064 |
-
else:
|
1065 |
-
step_order = order
|
1066 |
-
x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order,
|
1067 |
-
solver_type=solver_type)
|
1068 |
-
for i in range(order - 1):
|
1069 |
-
t_prev_list[i] = t_prev_list[i + 1]
|
1070 |
-
model_prev_list[i] = model_prev_list[i + 1]
|
1071 |
-
t_prev_list[-1] = vec_t
|
1072 |
-
# We do not need to evaluate the final model value.
|
1073 |
-
if step < steps:
|
1074 |
-
model_prev_list[-1] = self.model_fn(x, vec_t)
|
1075 |
-
elif method in ['singlestep', 'singlestep_fixed']:
|
1076 |
-
if method == 'singlestep':
|
1077 |
-
timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order,
|
1078 |
-
skip_type=skip_type,
|
1079 |
-
t_T=t_T, t_0=t_0,
|
1080 |
-
device=device)
|
1081 |
-
elif method == 'singlestep_fixed':
|
1082 |
-
K = steps // order
|
1083 |
-
orders = [order, ] * K
|
1084 |
-
timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)
|
1085 |
-
for i, order in enumerate(orders):
|
1086 |
-
t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1]
|
1087 |
-
timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(),
|
1088 |
-
N=order, device=device)
|
1089 |
-
lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)
|
1090 |
-
vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0])
|
1091 |
-
h = lambda_inner[-1] - lambda_inner[0]
|
1092 |
-
r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h
|
1093 |
-
r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h
|
1094 |
-
x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2)
|
1095 |
-
if denoise_to_zero:
|
1096 |
-
x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)
|
1097 |
-
return x
|
1098 |
-
|
1099 |
-
|
1100 |
-
#############################################################
|
1101 |
-
# other utility functions
|
1102 |
-
#############################################################
|
1103 |
-
|
1104 |
-
def interpolate_fn(x, xp, yp):
|
1105 |
-
"""
|
1106 |
-
A piecewise linear function y = f(x), using xp and yp as keypoints.
|
1107 |
-
We implement f(x) in a differentiable way (i.e. applicable for autograd).
|
1108 |
-
The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
|
1109 |
-
Args:
|
1110 |
-
x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
|
1111 |
-
xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
|
1112 |
-
yp: PyTorch tensor with shape [C, K].
|
1113 |
-
Returns:
|
1114 |
-
The function values f(x), with shape [N, C].
|
1115 |
-
"""
|
1116 |
-
N, K = x.shape[0], xp.shape[1]
|
1117 |
-
all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
|
1118 |
-
sorted_all_x, x_indices = torch.sort(all_x, dim=2)
|
1119 |
-
x_idx = torch.argmin(x_indices, dim=2)
|
1120 |
-
cand_start_idx = x_idx - 1
|
1121 |
-
start_idx = torch.where(
|
1122 |
-
torch.eq(x_idx, 0),
|
1123 |
-
torch.tensor(1, device=x.device),
|
1124 |
-
torch.where(
|
1125 |
-
torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
|
1126 |
-
),
|
1127 |
-
)
|
1128 |
-
end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
|
1129 |
-
start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
|
1130 |
-
end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
|
1131 |
-
start_idx2 = torch.where(
|
1132 |
-
torch.eq(x_idx, 0),
|
1133 |
-
torch.tensor(0, device=x.device),
|
1134 |
-
torch.where(
|
1135 |
-
torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
|
1136 |
-
),
|
1137 |
-
)
|
1138 |
-
y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
|
1139 |
-
start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
|
1140 |
-
end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
|
1141 |
-
cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
|
1142 |
-
return cand
|
1143 |
-
|
1144 |
-
|
1145 |
-
def expand_dims(v, dims):
|
1146 |
-
"""
|
1147 |
-
Expand the tensor `v` to the dim `dims`.
|
1148 |
-
Args:
|
1149 |
-
`v`: a PyTorch tensor with shape [N].
|
1150 |
-
`dim`: a `int`.
|
1151 |
-
Returns:
|
1152 |
-
a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
|
1153 |
-
"""
|
1154 |
-
return v[(...,) + (None,) * (dims - 1)]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/models/diffusion/dpm_solver/sampler.py
DELETED
@@ -1,87 +0,0 @@
|
|
1 |
-
"""SAMPLING ONLY."""
|
2 |
-
import torch
|
3 |
-
|
4 |
-
from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver
|
5 |
-
|
6 |
-
|
7 |
-
MODEL_TYPES = {
|
8 |
-
"eps": "noise",
|
9 |
-
"v": "v"
|
10 |
-
}
|
11 |
-
|
12 |
-
|
13 |
-
class DPMSolverSampler(object):
|
14 |
-
def __init__(self, model, **kwargs):
|
15 |
-
super().__init__()
|
16 |
-
self.model = model
|
17 |
-
to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device)
|
18 |
-
self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod))
|
19 |
-
|
20 |
-
def register_buffer(self, name, attr):
|
21 |
-
if type(attr) == torch.Tensor:
|
22 |
-
if attr.device != torch.device("cuda"):
|
23 |
-
attr = attr.to(torch.device("cuda"))
|
24 |
-
setattr(self, name, attr)
|
25 |
-
|
26 |
-
@torch.no_grad()
|
27 |
-
def sample(self,
|
28 |
-
S,
|
29 |
-
batch_size,
|
30 |
-
shape,
|
31 |
-
conditioning=None,
|
32 |
-
callback=None,
|
33 |
-
normals_sequence=None,
|
34 |
-
img_callback=None,
|
35 |
-
quantize_x0=False,
|
36 |
-
eta=0.,
|
37 |
-
mask=None,
|
38 |
-
x0=None,
|
39 |
-
temperature=1.,
|
40 |
-
noise_dropout=0.,
|
41 |
-
score_corrector=None,
|
42 |
-
corrector_kwargs=None,
|
43 |
-
verbose=True,
|
44 |
-
x_T=None,
|
45 |
-
log_every_t=100,
|
46 |
-
unconditional_guidance_scale=1.,
|
47 |
-
unconditional_conditioning=None,
|
48 |
-
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
|
49 |
-
**kwargs
|
50 |
-
):
|
51 |
-
if conditioning is not None:
|
52 |
-
if isinstance(conditioning, dict):
|
53 |
-
cbs = conditioning[list(conditioning.keys())[0]].shape[0]
|
54 |
-
if cbs != batch_size:
|
55 |
-
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
|
56 |
-
else:
|
57 |
-
if conditioning.shape[0] != batch_size:
|
58 |
-
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
|
59 |
-
|
60 |
-
# sampling
|
61 |
-
C, H, W = shape
|
62 |
-
size = (batch_size, C, H, W)
|
63 |
-
|
64 |
-
print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}')
|
65 |
-
|
66 |
-
device = self.model.betas.device
|
67 |
-
if x_T is None:
|
68 |
-
img = torch.randn(size, device=device)
|
69 |
-
else:
|
70 |
-
img = x_T
|
71 |
-
|
72 |
-
ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)
|
73 |
-
|
74 |
-
model_fn = model_wrapper(
|
75 |
-
lambda x, t, c: self.model.apply_model(x, t, c),
|
76 |
-
ns,
|
77 |
-
model_type=MODEL_TYPES[self.model.parameterization],
|
78 |
-
guidance_type="classifier-free",
|
79 |
-
condition=conditioning,
|
80 |
-
unconditional_condition=unconditional_conditioning,
|
81 |
-
guidance_scale=unconditional_guidance_scale,
|
82 |
-
)
|
83 |
-
|
84 |
-
dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False)
|
85 |
-
x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, lower_order_final=True)
|
86 |
-
|
87 |
-
return x.to(device), None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/models/diffusion/plms.py
DELETED
@@ -1,244 +0,0 @@
|
|
1 |
-
"""SAMPLING ONLY."""
|
2 |
-
|
3 |
-
import torch
|
4 |
-
import numpy as np
|
5 |
-
from tqdm import tqdm
|
6 |
-
from functools import partial
|
7 |
-
|
8 |
-
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
|
9 |
-
from ldm.models.diffusion.sampling_util import norm_thresholding
|
10 |
-
|
11 |
-
|
12 |
-
class PLMSSampler(object):
|
13 |
-
def __init__(self, model, schedule="linear", **kwargs):
|
14 |
-
super().__init__()
|
15 |
-
self.model = model
|
16 |
-
self.ddpm_num_timesteps = model.num_timesteps
|
17 |
-
self.schedule = schedule
|
18 |
-
|
19 |
-
def register_buffer(self, name, attr):
|
20 |
-
if type(attr) == torch.Tensor:
|
21 |
-
if attr.device != torch.device("cuda"):
|
22 |
-
attr = attr.to(torch.device("cuda"))
|
23 |
-
setattr(self, name, attr)
|
24 |
-
|
25 |
-
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
|
26 |
-
if ddim_eta != 0:
|
27 |
-
raise ValueError('ddim_eta must be 0 for PLMS')
|
28 |
-
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
|
29 |
-
num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
|
30 |
-
alphas_cumprod = self.model.alphas_cumprod
|
31 |
-
assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
|
32 |
-
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
|
33 |
-
|
34 |
-
self.register_buffer('betas', to_torch(self.model.betas))
|
35 |
-
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
36 |
-
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
|
37 |
-
|
38 |
-
# calculations for diffusion q(x_t | x_{t-1}) and others
|
39 |
-
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
|
40 |
-
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
|
41 |
-
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
|
42 |
-
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
|
43 |
-
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
|
44 |
-
|
45 |
-
# ddim sampling parameters
|
46 |
-
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
|
47 |
-
ddim_timesteps=self.ddim_timesteps,
|
48 |
-
eta=ddim_eta,verbose=verbose)
|
49 |
-
self.register_buffer('ddim_sigmas', ddim_sigmas)
|
50 |
-
self.register_buffer('ddim_alphas', ddim_alphas)
|
51 |
-
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
|
52 |
-
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
|
53 |
-
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
|
54 |
-
(1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
|
55 |
-
1 - self.alphas_cumprod / self.alphas_cumprod_prev))
|
56 |
-
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
|
57 |
-
|
58 |
-
@torch.no_grad()
|
59 |
-
def sample(self,
|
60 |
-
S,
|
61 |
-
batch_size,
|
62 |
-
shape,
|
63 |
-
conditioning=None,
|
64 |
-
callback=None,
|
65 |
-
normals_sequence=None,
|
66 |
-
img_callback=None,
|
67 |
-
quantize_x0=False,
|
68 |
-
eta=0.,
|
69 |
-
mask=None,
|
70 |
-
x0=None,
|
71 |
-
temperature=1.,
|
72 |
-
noise_dropout=0.,
|
73 |
-
score_corrector=None,
|
74 |
-
corrector_kwargs=None,
|
75 |
-
verbose=True,
|
76 |
-
x_T=None,
|
77 |
-
log_every_t=100,
|
78 |
-
unconditional_guidance_scale=1.,
|
79 |
-
unconditional_conditioning=None,
|
80 |
-
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
|
81 |
-
dynamic_threshold=None,
|
82 |
-
**kwargs
|
83 |
-
):
|
84 |
-
if conditioning is not None:
|
85 |
-
if isinstance(conditioning, dict):
|
86 |
-
cbs = conditioning[list(conditioning.keys())[0]].shape[0]
|
87 |
-
if cbs != batch_size:
|
88 |
-
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
|
89 |
-
else:
|
90 |
-
if conditioning.shape[0] != batch_size:
|
91 |
-
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
|
92 |
-
|
93 |
-
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
|
94 |
-
# sampling
|
95 |
-
C, H, W = shape
|
96 |
-
size = (batch_size, C, H, W)
|
97 |
-
print(f'Data shape for PLMS sampling is {size}')
|
98 |
-
|
99 |
-
samples, intermediates = self.plms_sampling(conditioning, size,
|
100 |
-
callback=callback,
|
101 |
-
img_callback=img_callback,
|
102 |
-
quantize_denoised=quantize_x0,
|
103 |
-
mask=mask, x0=x0,
|
104 |
-
ddim_use_original_steps=False,
|
105 |
-
noise_dropout=noise_dropout,
|
106 |
-
temperature=temperature,
|
107 |
-
score_corrector=score_corrector,
|
108 |
-
corrector_kwargs=corrector_kwargs,
|
109 |
-
x_T=x_T,
|
110 |
-
log_every_t=log_every_t,
|
111 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
112 |
-
unconditional_conditioning=unconditional_conditioning,
|
113 |
-
dynamic_threshold=dynamic_threshold,
|
114 |
-
)
|
115 |
-
return samples, intermediates
|
116 |
-
|
117 |
-
@torch.no_grad()
|
118 |
-
def plms_sampling(self, cond, shape,
|
119 |
-
x_T=None, ddim_use_original_steps=False,
|
120 |
-
callback=None, timesteps=None, quantize_denoised=False,
|
121 |
-
mask=None, x0=None, img_callback=None, log_every_t=100,
|
122 |
-
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
|
123 |
-
unconditional_guidance_scale=1., unconditional_conditioning=None,
|
124 |
-
dynamic_threshold=None):
|
125 |
-
device = self.model.betas.device
|
126 |
-
b = shape[0]
|
127 |
-
if x_T is None:
|
128 |
-
img = torch.randn(shape, device=device)
|
129 |
-
else:
|
130 |
-
img = x_T
|
131 |
-
|
132 |
-
if timesteps is None:
|
133 |
-
timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
|
134 |
-
elif timesteps is not None and not ddim_use_original_steps:
|
135 |
-
subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
|
136 |
-
timesteps = self.ddim_timesteps[:subset_end]
|
137 |
-
|
138 |
-
intermediates = {'x_inter': [img], 'pred_x0': [img]}
|
139 |
-
time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps)
|
140 |
-
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
|
141 |
-
print(f"Running PLMS Sampling with {total_steps} timesteps")
|
142 |
-
|
143 |
-
iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)
|
144 |
-
old_eps = []
|
145 |
-
|
146 |
-
for i, step in enumerate(iterator):
|
147 |
-
index = total_steps - i - 1
|
148 |
-
ts = torch.full((b,), step, device=device, dtype=torch.long)
|
149 |
-
ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)
|
150 |
-
|
151 |
-
if mask is not None:
|
152 |
-
assert x0 is not None
|
153 |
-
img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
|
154 |
-
img = img_orig * mask + (1. - mask) * img
|
155 |
-
|
156 |
-
outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
|
157 |
-
quantize_denoised=quantize_denoised, temperature=temperature,
|
158 |
-
noise_dropout=noise_dropout, score_corrector=score_corrector,
|
159 |
-
corrector_kwargs=corrector_kwargs,
|
160 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
161 |
-
unconditional_conditioning=unconditional_conditioning,
|
162 |
-
old_eps=old_eps, t_next=ts_next,
|
163 |
-
dynamic_threshold=dynamic_threshold)
|
164 |
-
img, pred_x0, e_t = outs
|
165 |
-
old_eps.append(e_t)
|
166 |
-
if len(old_eps) >= 4:
|
167 |
-
old_eps.pop(0)
|
168 |
-
if callback: callback(i)
|
169 |
-
if img_callback: img_callback(pred_x0, i)
|
170 |
-
|
171 |
-
if index % log_every_t == 0 or index == total_steps - 1:
|
172 |
-
intermediates['x_inter'].append(img)
|
173 |
-
intermediates['pred_x0'].append(pred_x0)
|
174 |
-
|
175 |
-
return img, intermediates
|
176 |
-
|
177 |
-
@torch.no_grad()
|
178 |
-
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
179 |
-
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
|
180 |
-
unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None,
|
181 |
-
dynamic_threshold=None):
|
182 |
-
b, *_, device = *x.shape, x.device
|
183 |
-
|
184 |
-
def get_model_output(x, t):
|
185 |
-
if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
|
186 |
-
e_t = self.model.apply_model(x, t, c)
|
187 |
-
else:
|
188 |
-
x_in = torch.cat([x] * 2)
|
189 |
-
t_in = torch.cat([t] * 2)
|
190 |
-
c_in = torch.cat([unconditional_conditioning, c])
|
191 |
-
e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
|
192 |
-
e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
|
193 |
-
|
194 |
-
if score_corrector is not None:
|
195 |
-
assert self.model.parameterization == "eps"
|
196 |
-
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
|
197 |
-
|
198 |
-
return e_t
|
199 |
-
|
200 |
-
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
|
201 |
-
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
|
202 |
-
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
|
203 |
-
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
|
204 |
-
|
205 |
-
def get_x_prev_and_pred_x0(e_t, index):
|
206 |
-
# select parameters corresponding to the currently considered timestep
|
207 |
-
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
|
208 |
-
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
|
209 |
-
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
|
210 |
-
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
|
211 |
-
|
212 |
-
# current prediction for x_0
|
213 |
-
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
|
214 |
-
if quantize_denoised:
|
215 |
-
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
|
216 |
-
if dynamic_threshold is not None:
|
217 |
-
pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)
|
218 |
-
# direction pointing to x_t
|
219 |
-
dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
|
220 |
-
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
|
221 |
-
if noise_dropout > 0.:
|
222 |
-
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
|
223 |
-
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
|
224 |
-
return x_prev, pred_x0
|
225 |
-
|
226 |
-
e_t = get_model_output(x, t)
|
227 |
-
if len(old_eps) == 0:
|
228 |
-
# Pseudo Improved Euler (2nd order)
|
229 |
-
x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
|
230 |
-
e_t_next = get_model_output(x_prev, t_next)
|
231 |
-
e_t_prime = (e_t + e_t_next) / 2
|
232 |
-
elif len(old_eps) == 1:
|
233 |
-
# 2nd order Pseudo Linear Multistep (Adams-Bashforth)
|
234 |
-
e_t_prime = (3 * e_t - old_eps[-1]) / 2
|
235 |
-
elif len(old_eps) == 2:
|
236 |
-
# 3nd order Pseudo Linear Multistep (Adams-Bashforth)
|
237 |
-
e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
|
238 |
-
elif len(old_eps) >= 3:
|
239 |
-
# 4nd order Pseudo Linear Multistep (Adams-Bashforth)
|
240 |
-
e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
|
241 |
-
|
242 |
-
x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
|
243 |
-
|
244 |
-
return x_prev, pred_x0, e_t
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/models/diffusion/sampling_util.py
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import numpy as np
|
3 |
-
|
4 |
-
|
5 |
-
def append_dims(x, target_dims):
|
6 |
-
"""Appends dimensions to the end of a tensor until it has target_dims dimensions.
|
7 |
-
From https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/utils.py"""
|
8 |
-
dims_to_append = target_dims - x.ndim
|
9 |
-
if dims_to_append < 0:
|
10 |
-
raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less')
|
11 |
-
return x[(...,) + (None,) * dims_to_append]
|
12 |
-
|
13 |
-
|
14 |
-
def norm_thresholding(x0, value):
|
15 |
-
s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim)
|
16 |
-
return x0 * (value / s)
|
17 |
-
|
18 |
-
|
19 |
-
def spatial_norm_thresholding(x0, value):
|
20 |
-
# b c h w
|
21 |
-
s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value)
|
22 |
-
return x0 * (value / s)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/modules/attention.py
DELETED
@@ -1,331 +0,0 @@
|
|
1 |
-
from inspect import isfunction
|
2 |
-
import math
|
3 |
-
import torch
|
4 |
-
import torch.nn.functional as F
|
5 |
-
from torch import nn, einsum
|
6 |
-
from einops import rearrange, repeat
|
7 |
-
from typing import Optional, Any
|
8 |
-
|
9 |
-
from ldm.modules.diffusionmodules.util import checkpoint
|
10 |
-
|
11 |
-
|
12 |
-
try:
|
13 |
-
import xformers
|
14 |
-
import xformers.ops
|
15 |
-
XFORMERS_IS_AVAILBLE = True
|
16 |
-
except:
|
17 |
-
XFORMERS_IS_AVAILBLE = False
|
18 |
-
|
19 |
-
|
20 |
-
def exists(val):
|
21 |
-
return val is not None
|
22 |
-
|
23 |
-
|
24 |
-
def uniq(arr):
|
25 |
-
return{el: True for el in arr}.keys()
|
26 |
-
|
27 |
-
|
28 |
-
def default(val, d):
|
29 |
-
if exists(val):
|
30 |
-
return val
|
31 |
-
return d() if isfunction(d) else d
|
32 |
-
|
33 |
-
|
34 |
-
def max_neg_value(t):
|
35 |
-
return -torch.finfo(t.dtype).max
|
36 |
-
|
37 |
-
|
38 |
-
def init_(tensor):
|
39 |
-
dim = tensor.shape[-1]
|
40 |
-
std = 1 / math.sqrt(dim)
|
41 |
-
tensor.uniform_(-std, std)
|
42 |
-
return tensor
|
43 |
-
|
44 |
-
|
45 |
-
# feedforward
|
46 |
-
class GEGLU(nn.Module):
|
47 |
-
def __init__(self, dim_in, dim_out):
|
48 |
-
super().__init__()
|
49 |
-
self.proj = nn.Linear(dim_in, dim_out * 2)
|
50 |
-
|
51 |
-
def forward(self, x):
|
52 |
-
x, gate = self.proj(x).chunk(2, dim=-1)
|
53 |
-
return x * F.gelu(gate)
|
54 |
-
|
55 |
-
|
56 |
-
class FeedForward(nn.Module):
|
57 |
-
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
|
58 |
-
super().__init__()
|
59 |
-
inner_dim = int(dim * mult)
|
60 |
-
dim_out = default(dim_out, dim)
|
61 |
-
project_in = nn.Sequential(
|
62 |
-
nn.Linear(dim, inner_dim),
|
63 |
-
nn.GELU()
|
64 |
-
) if not glu else GEGLU(dim, inner_dim)
|
65 |
-
|
66 |
-
self.net = nn.Sequential(
|
67 |
-
project_in,
|
68 |
-
nn.Dropout(dropout),
|
69 |
-
nn.Linear(inner_dim, dim_out)
|
70 |
-
)
|
71 |
-
|
72 |
-
def forward(self, x):
|
73 |
-
return self.net(x)
|
74 |
-
|
75 |
-
|
76 |
-
def zero_module(module):
|
77 |
-
"""
|
78 |
-
Zero out the parameters of a module and return it.
|
79 |
-
"""
|
80 |
-
for p in module.parameters():
|
81 |
-
p.detach().zero_()
|
82 |
-
return module
|
83 |
-
|
84 |
-
|
85 |
-
def Normalize(in_channels):
|
86 |
-
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
|
87 |
-
|
88 |
-
|
89 |
-
class SpatialSelfAttention(nn.Module):
|
90 |
-
def __init__(self, in_channels):
|
91 |
-
super().__init__()
|
92 |
-
self.in_channels = in_channels
|
93 |
-
|
94 |
-
self.norm = Normalize(in_channels)
|
95 |
-
self.q = torch.nn.Conv2d(in_channels,
|
96 |
-
in_channels,
|
97 |
-
kernel_size=1,
|
98 |
-
stride=1,
|
99 |
-
padding=0)
|
100 |
-
self.k = torch.nn.Conv2d(in_channels,
|
101 |
-
in_channels,
|
102 |
-
kernel_size=1,
|
103 |
-
stride=1,
|
104 |
-
padding=0)
|
105 |
-
self.v = torch.nn.Conv2d(in_channels,
|
106 |
-
in_channels,
|
107 |
-
kernel_size=1,
|
108 |
-
stride=1,
|
109 |
-
padding=0)
|
110 |
-
self.proj_out = torch.nn.Conv2d(in_channels,
|
111 |
-
in_channels,
|
112 |
-
kernel_size=1,
|
113 |
-
stride=1,
|
114 |
-
padding=0)
|
115 |
-
|
116 |
-
def forward(self, x):
|
117 |
-
h_ = x
|
118 |
-
h_ = self.norm(h_)
|
119 |
-
q = self.q(h_)
|
120 |
-
k = self.k(h_)
|
121 |
-
v = self.v(h_)
|
122 |
-
|
123 |
-
# compute attention
|
124 |
-
b,c,h,w = q.shape
|
125 |
-
q = rearrange(q, 'b c h w -> b (h w) c')
|
126 |
-
k = rearrange(k, 'b c h w -> b c (h w)')
|
127 |
-
w_ = torch.einsum('bij,bjk->bik', q, k)
|
128 |
-
|
129 |
-
w_ = w_ * (int(c)**(-0.5))
|
130 |
-
w_ = torch.nn.functional.softmax(w_, dim=2)
|
131 |
-
|
132 |
-
# attend to values
|
133 |
-
v = rearrange(v, 'b c h w -> b c (h w)')
|
134 |
-
w_ = rearrange(w_, 'b i j -> b j i')
|
135 |
-
h_ = torch.einsum('bij,bjk->bik', v, w_)
|
136 |
-
h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h)
|
137 |
-
h_ = self.proj_out(h_)
|
138 |
-
|
139 |
-
return x+h_
|
140 |
-
|
141 |
-
|
142 |
-
class CrossAttention(nn.Module):
|
143 |
-
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):
|
144 |
-
super().__init__()
|
145 |
-
inner_dim = dim_head * heads
|
146 |
-
context_dim = default(context_dim, query_dim)
|
147 |
-
|
148 |
-
self.scale = dim_head ** -0.5
|
149 |
-
self.heads = heads
|
150 |
-
|
151 |
-
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
|
152 |
-
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
|
153 |
-
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
|
154 |
-
|
155 |
-
self.to_out = nn.Sequential(
|
156 |
-
nn.Linear(inner_dim, query_dim),
|
157 |
-
nn.Dropout(dropout)
|
158 |
-
)
|
159 |
-
|
160 |
-
def forward(self, x, context=None, mask=None):
|
161 |
-
h = self.heads
|
162 |
-
|
163 |
-
q = self.to_q(x)
|
164 |
-
context = default(context, x)
|
165 |
-
k = self.to_k(context)
|
166 |
-
v = self.to_v(context)
|
167 |
-
|
168 |
-
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
|
169 |
-
|
170 |
-
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
|
171 |
-
del q, k
|
172 |
-
|
173 |
-
if exists(mask):
|
174 |
-
mask = rearrange(mask, 'b ... -> b (...)')
|
175 |
-
max_neg_value = -torch.finfo(sim.dtype).max
|
176 |
-
mask = repeat(mask, 'b j -> (b h) () j', h=h)
|
177 |
-
sim.masked_fill_(~mask, max_neg_value)
|
178 |
-
|
179 |
-
# attention, what we cannot get enough of
|
180 |
-
sim = sim.softmax(dim=-1)
|
181 |
-
|
182 |
-
out = einsum('b i j, b j d -> b i d', sim, v)
|
183 |
-
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
|
184 |
-
return self.to_out(out)
|
185 |
-
|
186 |
-
|
187 |
-
class MemoryEfficientCrossAttention(nn.Module):
|
188 |
-
# https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223
|
189 |
-
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):
|
190 |
-
super().__init__()
|
191 |
-
print(f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using "
|
192 |
-
f"{heads} heads.")
|
193 |
-
inner_dim = dim_head * heads
|
194 |
-
context_dim = default(context_dim, query_dim)
|
195 |
-
|
196 |
-
self.heads = heads
|
197 |
-
self.dim_head = dim_head
|
198 |
-
|
199 |
-
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
|
200 |
-
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
|
201 |
-
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
|
202 |
-
|
203 |
-
self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout))
|
204 |
-
self.attention_op: Optional[Any] = None
|
205 |
-
|
206 |
-
def forward(self, x, context=None, mask=None):
|
207 |
-
q = self.to_q(x)
|
208 |
-
context = default(context, x)
|
209 |
-
k = self.to_k(context)
|
210 |
-
v = self.to_v(context)
|
211 |
-
|
212 |
-
b, _, _ = q.shape
|
213 |
-
q, k, v = map(
|
214 |
-
lambda t: t.unsqueeze(3)
|
215 |
-
.reshape(b, t.shape[1], self.heads, self.dim_head)
|
216 |
-
.permute(0, 2, 1, 3)
|
217 |
-
.reshape(b * self.heads, t.shape[1], self.dim_head)
|
218 |
-
.contiguous(),
|
219 |
-
(q, k, v),
|
220 |
-
)
|
221 |
-
|
222 |
-
# actually compute the attention, what we cannot get enough of
|
223 |
-
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)
|
224 |
-
|
225 |
-
if exists(mask):
|
226 |
-
raise NotImplementedError
|
227 |
-
out = (
|
228 |
-
out.unsqueeze(0)
|
229 |
-
.reshape(b, self.heads, out.shape[1], self.dim_head)
|
230 |
-
.permute(0, 2, 1, 3)
|
231 |
-
.reshape(b, out.shape[1], self.heads * self.dim_head)
|
232 |
-
)
|
233 |
-
return self.to_out(out)
|
234 |
-
|
235 |
-
|
236 |
-
class BasicTransformerBlock(nn.Module):
|
237 |
-
ATTENTION_MODES = {
|
238 |
-
"softmax": CrossAttention, # vanilla attention
|
239 |
-
"softmax-xformers": MemoryEfficientCrossAttention
|
240 |
-
}
|
241 |
-
def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True,
|
242 |
-
disable_self_attn=False):
|
243 |
-
super().__init__()
|
244 |
-
attn_mode = "softmax-xformers" if XFORMERS_IS_AVAILBLE else "softmax"
|
245 |
-
assert attn_mode in self.ATTENTION_MODES
|
246 |
-
attn_cls = self.ATTENTION_MODES[attn_mode]
|
247 |
-
self.disable_self_attn = disable_self_attn
|
248 |
-
self.attn1 = attn_cls(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout,
|
249 |
-
context_dim=context_dim if self.disable_self_attn else None) # is a self-attention if not self.disable_self_attn
|
250 |
-
self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
|
251 |
-
self.attn2 = attn_cls(query_dim=dim, context_dim=context_dim,
|
252 |
-
heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none
|
253 |
-
self.norm1 = nn.LayerNorm(dim)
|
254 |
-
self.norm2 = nn.LayerNorm(dim)
|
255 |
-
self.norm3 = nn.LayerNorm(dim)
|
256 |
-
self.checkpoint = checkpoint
|
257 |
-
|
258 |
-
def forward(self, x, context=None):
|
259 |
-
return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
|
260 |
-
|
261 |
-
def _forward(self, x, context=None):
|
262 |
-
x = self.attn1(self.norm1(x), context=context if self.disable_self_attn else None) + x
|
263 |
-
x = self.attn2(self.norm2(x), context=context) + x
|
264 |
-
x = self.ff(self.norm3(x)) + x
|
265 |
-
return x
|
266 |
-
|
267 |
-
|
268 |
-
class SpatialTransformer(nn.Module):
|
269 |
-
"""
|
270 |
-
Transformer block for image-like data.
|
271 |
-
First, project the input (aka embedding)
|
272 |
-
and reshape to b, t, d.
|
273 |
-
Then apply standard transformer action.
|
274 |
-
Finally, reshape to image
|
275 |
-
NEW: use_linear for more efficiency instead of the 1x1 convs
|
276 |
-
"""
|
277 |
-
def __init__(self, in_channels, n_heads, d_head,
|
278 |
-
depth=1, dropout=0., context_dim=None,
|
279 |
-
disable_self_attn=False, use_linear=False,
|
280 |
-
use_checkpoint=True):
|
281 |
-
super().__init__()
|
282 |
-
if exists(context_dim) and not isinstance(context_dim, list):
|
283 |
-
context_dim = [context_dim]
|
284 |
-
self.in_channels = in_channels
|
285 |
-
inner_dim = n_heads * d_head
|
286 |
-
self.norm = Normalize(in_channels)
|
287 |
-
if not use_linear:
|
288 |
-
self.proj_in = nn.Conv2d(in_channels,
|
289 |
-
inner_dim,
|
290 |
-
kernel_size=1,
|
291 |
-
stride=1,
|
292 |
-
padding=0)
|
293 |
-
else:
|
294 |
-
self.proj_in = nn.Linear(in_channels, inner_dim)
|
295 |
-
|
296 |
-
self.transformer_blocks = nn.ModuleList(
|
297 |
-
[BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],
|
298 |
-
disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)
|
299 |
-
for d in range(depth)]
|
300 |
-
)
|
301 |
-
if not use_linear:
|
302 |
-
self.proj_out = zero_module(nn.Conv2d(inner_dim,
|
303 |
-
in_channels,
|
304 |
-
kernel_size=1,
|
305 |
-
stride=1,
|
306 |
-
padding=0))
|
307 |
-
else:
|
308 |
-
self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))
|
309 |
-
self.use_linear = use_linear
|
310 |
-
|
311 |
-
def forward(self, x, context=None):
|
312 |
-
# note: if no context is given, cross-attention defaults to self-attention
|
313 |
-
if not isinstance(context, list):
|
314 |
-
context = [context]
|
315 |
-
b, c, h, w = x.shape
|
316 |
-
x_in = x
|
317 |
-
x = self.norm(x)
|
318 |
-
if not self.use_linear:
|
319 |
-
x = self.proj_in(x)
|
320 |
-
x = rearrange(x, 'b c h w -> b (h w) c').contiguous()
|
321 |
-
if self.use_linear:
|
322 |
-
x = self.proj_in(x)
|
323 |
-
for i, block in enumerate(self.transformer_blocks):
|
324 |
-
x = block(x, context=context[i])
|
325 |
-
if self.use_linear:
|
326 |
-
x = self.proj_out(x)
|
327 |
-
x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()
|
328 |
-
if not self.use_linear:
|
329 |
-
x = self.proj_out(x)
|
330 |
-
return x + x_in
|
331 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/modules/diffusionmodules/__init__.py
DELETED
File without changes
|
ldm/modules/diffusionmodules/model.py
DELETED
@@ -1,852 +0,0 @@
|
|
1 |
-
# pytorch_diffusion + derived encoder decoder
|
2 |
-
import math
|
3 |
-
import torch
|
4 |
-
import torch.nn as nn
|
5 |
-
import numpy as np
|
6 |
-
from einops import rearrange
|
7 |
-
from typing import Optional, Any
|
8 |
-
|
9 |
-
from ldm.modules.attention import MemoryEfficientCrossAttention
|
10 |
-
|
11 |
-
try:
|
12 |
-
import xformers
|
13 |
-
import xformers.ops
|
14 |
-
XFORMERS_IS_AVAILBLE = True
|
15 |
-
except:
|
16 |
-
XFORMERS_IS_AVAILBLE = False
|
17 |
-
print("No module 'xformers'. Proceeding without it.")
|
18 |
-
|
19 |
-
|
20 |
-
def get_timestep_embedding(timesteps, embedding_dim):
|
21 |
-
"""
|
22 |
-
This matches the implementation in Denoising Diffusion Probabilistic Models:
|
23 |
-
From Fairseq.
|
24 |
-
Build sinusoidal embeddings.
|
25 |
-
This matches the implementation in tensor2tensor, but differs slightly
|
26 |
-
from the description in Section 3.5 of "Attention Is All You Need".
|
27 |
-
"""
|
28 |
-
assert len(timesteps.shape) == 1
|
29 |
-
|
30 |
-
half_dim = embedding_dim // 2
|
31 |
-
emb = math.log(10000) / (half_dim - 1)
|
32 |
-
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
|
33 |
-
emb = emb.to(device=timesteps.device)
|
34 |
-
emb = timesteps.float()[:, None] * emb[None, :]
|
35 |
-
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
36 |
-
if embedding_dim % 2 == 1: # zero pad
|
37 |
-
emb = torch.nn.functional.pad(emb, (0,1,0,0))
|
38 |
-
return emb
|
39 |
-
|
40 |
-
|
41 |
-
def nonlinearity(x):
|
42 |
-
# swish
|
43 |
-
return x*torch.sigmoid(x)
|
44 |
-
|
45 |
-
|
46 |
-
def Normalize(in_channels, num_groups=32):
|
47 |
-
return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True)
|
48 |
-
|
49 |
-
|
50 |
-
class Upsample(nn.Module):
|
51 |
-
def __init__(self, in_channels, with_conv):
|
52 |
-
super().__init__()
|
53 |
-
self.with_conv = with_conv
|
54 |
-
if self.with_conv:
|
55 |
-
self.conv = torch.nn.Conv2d(in_channels,
|
56 |
-
in_channels,
|
57 |
-
kernel_size=3,
|
58 |
-
stride=1,
|
59 |
-
padding=1)
|
60 |
-
|
61 |
-
def forward(self, x):
|
62 |
-
x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
|
63 |
-
if self.with_conv:
|
64 |
-
x = self.conv(x)
|
65 |
-
return x
|
66 |
-
|
67 |
-
|
68 |
-
class Downsample(nn.Module):
|
69 |
-
def __init__(self, in_channels, with_conv):
|
70 |
-
super().__init__()
|
71 |
-
self.with_conv = with_conv
|
72 |
-
if self.with_conv:
|
73 |
-
# no asymmetric padding in torch conv, must do it ourselves
|
74 |
-
self.conv = torch.nn.Conv2d(in_channels,
|
75 |
-
in_channels,
|
76 |
-
kernel_size=3,
|
77 |
-
stride=2,
|
78 |
-
padding=0)
|
79 |
-
|
80 |
-
def forward(self, x):
|
81 |
-
if self.with_conv:
|
82 |
-
pad = (0,1,0,1)
|
83 |
-
x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
|
84 |
-
x = self.conv(x)
|
85 |
-
else:
|
86 |
-
x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
|
87 |
-
return x
|
88 |
-
|
89 |
-
|
90 |
-
class ResnetBlock(nn.Module):
|
91 |
-
def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
|
92 |
-
dropout, temb_channels=512):
|
93 |
-
super().__init__()
|
94 |
-
self.in_channels = in_channels
|
95 |
-
out_channels = in_channels if out_channels is None else out_channels
|
96 |
-
self.out_channels = out_channels
|
97 |
-
self.use_conv_shortcut = conv_shortcut
|
98 |
-
|
99 |
-
self.norm1 = Normalize(in_channels)
|
100 |
-
self.conv1 = torch.nn.Conv2d(in_channels,
|
101 |
-
out_channels,
|
102 |
-
kernel_size=3,
|
103 |
-
stride=1,
|
104 |
-
padding=1)
|
105 |
-
if temb_channels > 0:
|
106 |
-
self.temb_proj = torch.nn.Linear(temb_channels,
|
107 |
-
out_channels)
|
108 |
-
self.norm2 = Normalize(out_channels)
|
109 |
-
self.dropout = torch.nn.Dropout(dropout)
|
110 |
-
self.conv2 = torch.nn.Conv2d(out_channels,
|
111 |
-
out_channels,
|
112 |
-
kernel_size=3,
|
113 |
-
stride=1,
|
114 |
-
padding=1)
|
115 |
-
if self.in_channels != self.out_channels:
|
116 |
-
if self.use_conv_shortcut:
|
117 |
-
self.conv_shortcut = torch.nn.Conv2d(in_channels,
|
118 |
-
out_channels,
|
119 |
-
kernel_size=3,
|
120 |
-
stride=1,
|
121 |
-
padding=1)
|
122 |
-
else:
|
123 |
-
self.nin_shortcut = torch.nn.Conv2d(in_channels,
|
124 |
-
out_channels,
|
125 |
-
kernel_size=1,
|
126 |
-
stride=1,
|
127 |
-
padding=0)
|
128 |
-
|
129 |
-
def forward(self, x, temb):
|
130 |
-
h = x
|
131 |
-
h = self.norm1(h)
|
132 |
-
h = nonlinearity(h)
|
133 |
-
h = self.conv1(h)
|
134 |
-
|
135 |
-
if temb is not None:
|
136 |
-
h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]
|
137 |
-
|
138 |
-
h = self.norm2(h)
|
139 |
-
h = nonlinearity(h)
|
140 |
-
h = self.dropout(h)
|
141 |
-
h = self.conv2(h)
|
142 |
-
|
143 |
-
if self.in_channels != self.out_channels:
|
144 |
-
if self.use_conv_shortcut:
|
145 |
-
x = self.conv_shortcut(x)
|
146 |
-
else:
|
147 |
-
x = self.nin_shortcut(x)
|
148 |
-
|
149 |
-
return x+h
|
150 |
-
|
151 |
-
|
152 |
-
class AttnBlock(nn.Module):
|
153 |
-
def __init__(self, in_channels):
|
154 |
-
super().__init__()
|
155 |
-
self.in_channels = in_channels
|
156 |
-
|
157 |
-
self.norm = Normalize(in_channels)
|
158 |
-
self.q = torch.nn.Conv2d(in_channels,
|
159 |
-
in_channels,
|
160 |
-
kernel_size=1,
|
161 |
-
stride=1,
|
162 |
-
padding=0)
|
163 |
-
self.k = torch.nn.Conv2d(in_channels,
|
164 |
-
in_channels,
|
165 |
-
kernel_size=1,
|
166 |
-
stride=1,
|
167 |
-
padding=0)
|
168 |
-
self.v = torch.nn.Conv2d(in_channels,
|
169 |
-
in_channels,
|
170 |
-
kernel_size=1,
|
171 |
-
stride=1,
|
172 |
-
padding=0)
|
173 |
-
self.proj_out = torch.nn.Conv2d(in_channels,
|
174 |
-
in_channels,
|
175 |
-
kernel_size=1,
|
176 |
-
stride=1,
|
177 |
-
padding=0)
|
178 |
-
|
179 |
-
def forward(self, x):
|
180 |
-
h_ = x
|
181 |
-
h_ = self.norm(h_)
|
182 |
-
q = self.q(h_)
|
183 |
-
k = self.k(h_)
|
184 |
-
v = self.v(h_)
|
185 |
-
|
186 |
-
# compute attention
|
187 |
-
b,c,h,w = q.shape
|
188 |
-
q = q.reshape(b,c,h*w)
|
189 |
-
q = q.permute(0,2,1) # b,hw,c
|
190 |
-
k = k.reshape(b,c,h*w) # b,c,hw
|
191 |
-
w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
|
192 |
-
w_ = w_ * (int(c)**(-0.5))
|
193 |
-
w_ = torch.nn.functional.softmax(w_, dim=2)
|
194 |
-
|
195 |
-
# attend to values
|
196 |
-
v = v.reshape(b,c,h*w)
|
197 |
-
w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
|
198 |
-
h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
|
199 |
-
h_ = h_.reshape(b,c,h,w)
|
200 |
-
|
201 |
-
h_ = self.proj_out(h_)
|
202 |
-
|
203 |
-
return x+h_
|
204 |
-
|
205 |
-
class MemoryEfficientAttnBlock(nn.Module):
|
206 |
-
"""
|
207 |
-
Uses xformers efficient implementation,
|
208 |
-
see https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223
|
209 |
-
Note: this is a single-head self-attention operation
|
210 |
-
"""
|
211 |
-
#
|
212 |
-
def __init__(self, in_channels):
|
213 |
-
super().__init__()
|
214 |
-
self.in_channels = in_channels
|
215 |
-
|
216 |
-
self.norm = Normalize(in_channels)
|
217 |
-
self.q = torch.nn.Conv2d(in_channels,
|
218 |
-
in_channels,
|
219 |
-
kernel_size=1,
|
220 |
-
stride=1,
|
221 |
-
padding=0)
|
222 |
-
self.k = torch.nn.Conv2d(in_channels,
|
223 |
-
in_channels,
|
224 |
-
kernel_size=1,
|
225 |
-
stride=1,
|
226 |
-
padding=0)
|
227 |
-
self.v = torch.nn.Conv2d(in_channels,
|
228 |
-
in_channels,
|
229 |
-
kernel_size=1,
|
230 |
-
stride=1,
|
231 |
-
padding=0)
|
232 |
-
self.proj_out = torch.nn.Conv2d(in_channels,
|
233 |
-
in_channels,
|
234 |
-
kernel_size=1,
|
235 |
-
stride=1,
|
236 |
-
padding=0)
|
237 |
-
self.attention_op: Optional[Any] = None
|
238 |
-
|
239 |
-
def forward(self, x):
|
240 |
-
h_ = x
|
241 |
-
h_ = self.norm(h_)
|
242 |
-
q = self.q(h_)
|
243 |
-
k = self.k(h_)
|
244 |
-
v = self.v(h_)
|
245 |
-
|
246 |
-
# compute attention
|
247 |
-
B, C, H, W = q.shape
|
248 |
-
q, k, v = map(lambda x: rearrange(x, 'b c h w -> b (h w) c'), (q, k, v))
|
249 |
-
|
250 |
-
q, k, v = map(
|
251 |
-
lambda t: t.unsqueeze(3)
|
252 |
-
.reshape(B, t.shape[1], 1, C)
|
253 |
-
.permute(0, 2, 1, 3)
|
254 |
-
.reshape(B * 1, t.shape[1], C)
|
255 |
-
.contiguous(),
|
256 |
-
(q, k, v),
|
257 |
-
)
|
258 |
-
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)
|
259 |
-
|
260 |
-
out = (
|
261 |
-
out.unsqueeze(0)
|
262 |
-
.reshape(B, 1, out.shape[1], C)
|
263 |
-
.permute(0, 2, 1, 3)
|
264 |
-
.reshape(B, out.shape[1], C)
|
265 |
-
)
|
266 |
-
out = rearrange(out, 'b (h w) c -> b c h w', b=B, h=H, w=W, c=C)
|
267 |
-
out = self.proj_out(out)
|
268 |
-
return x+out
|
269 |
-
|
270 |
-
|
271 |
-
class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention):
|
272 |
-
def forward(self, x, context=None, mask=None):
|
273 |
-
b, c, h, w = x.shape
|
274 |
-
x = rearrange(x, 'b c h w -> b (h w) c')
|
275 |
-
out = super().forward(x, context=context, mask=mask)
|
276 |
-
out = rearrange(out, 'b (h w) c -> b c h w', h=h, w=w, c=c)
|
277 |
-
return x + out
|
278 |
-
|
279 |
-
|
280 |
-
def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None):
|
281 |
-
assert attn_type in ["vanilla", "vanilla-xformers", "memory-efficient-cross-attn", "linear", "none"], f'attn_type {attn_type} unknown'
|
282 |
-
if XFORMERS_IS_AVAILBLE and attn_type == "vanilla":
|
283 |
-
attn_type = "vanilla-xformers"
|
284 |
-
print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
|
285 |
-
if attn_type == "vanilla":
|
286 |
-
assert attn_kwargs is None
|
287 |
-
return AttnBlock(in_channels)
|
288 |
-
elif attn_type == "vanilla-xformers":
|
289 |
-
print(f"building MemoryEfficientAttnBlock with {in_channels} in_channels...")
|
290 |
-
return MemoryEfficientAttnBlock(in_channels)
|
291 |
-
elif type == "memory-efficient-cross-attn":
|
292 |
-
attn_kwargs["query_dim"] = in_channels
|
293 |
-
return MemoryEfficientCrossAttentionWrapper(**attn_kwargs)
|
294 |
-
elif attn_type == "none":
|
295 |
-
return nn.Identity(in_channels)
|
296 |
-
else:
|
297 |
-
raise NotImplementedError()
|
298 |
-
|
299 |
-
|
300 |
-
class Model(nn.Module):
|
301 |
-
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
|
302 |
-
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
|
303 |
-
resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"):
|
304 |
-
super().__init__()
|
305 |
-
if use_linear_attn: attn_type = "linear"
|
306 |
-
self.ch = ch
|
307 |
-
self.temb_ch = self.ch*4
|
308 |
-
self.num_resolutions = len(ch_mult)
|
309 |
-
self.num_res_blocks = num_res_blocks
|
310 |
-
self.resolution = resolution
|
311 |
-
self.in_channels = in_channels
|
312 |
-
|
313 |
-
self.use_timestep = use_timestep
|
314 |
-
if self.use_timestep:
|
315 |
-
# timestep embedding
|
316 |
-
self.temb = nn.Module()
|
317 |
-
self.temb.dense = nn.ModuleList([
|
318 |
-
torch.nn.Linear(self.ch,
|
319 |
-
self.temb_ch),
|
320 |
-
torch.nn.Linear(self.temb_ch,
|
321 |
-
self.temb_ch),
|
322 |
-
])
|
323 |
-
|
324 |
-
# downsampling
|
325 |
-
self.conv_in = torch.nn.Conv2d(in_channels,
|
326 |
-
self.ch,
|
327 |
-
kernel_size=3,
|
328 |
-
stride=1,
|
329 |
-
padding=1)
|
330 |
-
|
331 |
-
curr_res = resolution
|
332 |
-
in_ch_mult = (1,)+tuple(ch_mult)
|
333 |
-
self.down = nn.ModuleList()
|
334 |
-
for i_level in range(self.num_resolutions):
|
335 |
-
block = nn.ModuleList()
|
336 |
-
attn = nn.ModuleList()
|
337 |
-
block_in = ch*in_ch_mult[i_level]
|
338 |
-
block_out = ch*ch_mult[i_level]
|
339 |
-
for i_block in range(self.num_res_blocks):
|
340 |
-
block.append(ResnetBlock(in_channels=block_in,
|
341 |
-
out_channels=block_out,
|
342 |
-
temb_channels=self.temb_ch,
|
343 |
-
dropout=dropout))
|
344 |
-
block_in = block_out
|
345 |
-
if curr_res in attn_resolutions:
|
346 |
-
attn.append(make_attn(block_in, attn_type=attn_type))
|
347 |
-
down = nn.Module()
|
348 |
-
down.block = block
|
349 |
-
down.attn = attn
|
350 |
-
if i_level != self.num_resolutions-1:
|
351 |
-
down.downsample = Downsample(block_in, resamp_with_conv)
|
352 |
-
curr_res = curr_res // 2
|
353 |
-
self.down.append(down)
|
354 |
-
|
355 |
-
# middle
|
356 |
-
self.mid = nn.Module()
|
357 |
-
self.mid.block_1 = ResnetBlock(in_channels=block_in,
|
358 |
-
out_channels=block_in,
|
359 |
-
temb_channels=self.temb_ch,
|
360 |
-
dropout=dropout)
|
361 |
-
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
|
362 |
-
self.mid.block_2 = ResnetBlock(in_channels=block_in,
|
363 |
-
out_channels=block_in,
|
364 |
-
temb_channels=self.temb_ch,
|
365 |
-
dropout=dropout)
|
366 |
-
|
367 |
-
# upsampling
|
368 |
-
self.up = nn.ModuleList()
|
369 |
-
for i_level in reversed(range(self.num_resolutions)):
|
370 |
-
block = nn.ModuleList()
|
371 |
-
attn = nn.ModuleList()
|
372 |
-
block_out = ch*ch_mult[i_level]
|
373 |
-
skip_in = ch*ch_mult[i_level]
|
374 |
-
for i_block in range(self.num_res_blocks+1):
|
375 |
-
if i_block == self.num_res_blocks:
|
376 |
-
skip_in = ch*in_ch_mult[i_level]
|
377 |
-
block.append(ResnetBlock(in_channels=block_in+skip_in,
|
378 |
-
out_channels=block_out,
|
379 |
-
temb_channels=self.temb_ch,
|
380 |
-
dropout=dropout))
|
381 |
-
block_in = block_out
|
382 |
-
if curr_res in attn_resolutions:
|
383 |
-
attn.append(make_attn(block_in, attn_type=attn_type))
|
384 |
-
up = nn.Module()
|
385 |
-
up.block = block
|
386 |
-
up.attn = attn
|
387 |
-
if i_level != 0:
|
388 |
-
up.upsample = Upsample(block_in, resamp_with_conv)
|
389 |
-
curr_res = curr_res * 2
|
390 |
-
self.up.insert(0, up) # prepend to get consistent order
|
391 |
-
|
392 |
-
# end
|
393 |
-
self.norm_out = Normalize(block_in)
|
394 |
-
self.conv_out = torch.nn.Conv2d(block_in,
|
395 |
-
out_ch,
|
396 |
-
kernel_size=3,
|
397 |
-
stride=1,
|
398 |
-
padding=1)
|
399 |
-
|
400 |
-
def forward(self, x, t=None, context=None):
|
401 |
-
#assert x.shape[2] == x.shape[3] == self.resolution
|
402 |
-
if context is not None:
|
403 |
-
# assume aligned context, cat along channel axis
|
404 |
-
x = torch.cat((x, context), dim=1)
|
405 |
-
if self.use_timestep:
|
406 |
-
# timestep embedding
|
407 |
-
assert t is not None
|
408 |
-
temb = get_timestep_embedding(t, self.ch)
|
409 |
-
temb = self.temb.dense[0](temb)
|
410 |
-
temb = nonlinearity(temb)
|
411 |
-
temb = self.temb.dense[1](temb)
|
412 |
-
else:
|
413 |
-
temb = None
|
414 |
-
|
415 |
-
# downsampling
|
416 |
-
hs = [self.conv_in(x)]
|
417 |
-
for i_level in range(self.num_resolutions):
|
418 |
-
for i_block in range(self.num_res_blocks):
|
419 |
-
h = self.down[i_level].block[i_block](hs[-1], temb)
|
420 |
-
if len(self.down[i_level].attn) > 0:
|
421 |
-
h = self.down[i_level].attn[i_block](h)
|
422 |
-
hs.append(h)
|
423 |
-
if i_level != self.num_resolutions-1:
|
424 |
-
hs.append(self.down[i_level].downsample(hs[-1]))
|
425 |
-
|
426 |
-
# middle
|
427 |
-
h = hs[-1]
|
428 |
-
h = self.mid.block_1(h, temb)
|
429 |
-
h = self.mid.attn_1(h)
|
430 |
-
h = self.mid.block_2(h, temb)
|
431 |
-
|
432 |
-
# upsampling
|
433 |
-
for i_level in reversed(range(self.num_resolutions)):
|
434 |
-
for i_block in range(self.num_res_blocks+1):
|
435 |
-
h = self.up[i_level].block[i_block](
|
436 |
-
torch.cat([h, hs.pop()], dim=1), temb)
|
437 |
-
if len(self.up[i_level].attn) > 0:
|
438 |
-
h = self.up[i_level].attn[i_block](h)
|
439 |
-
if i_level != 0:
|
440 |
-
h = self.up[i_level].upsample(h)
|
441 |
-
|
442 |
-
# end
|
443 |
-
h = self.norm_out(h)
|
444 |
-
h = nonlinearity(h)
|
445 |
-
h = self.conv_out(h)
|
446 |
-
return h
|
447 |
-
|
448 |
-
def get_last_layer(self):
|
449 |
-
return self.conv_out.weight
|
450 |
-
|
451 |
-
|
452 |
-
class Encoder(nn.Module):
|
453 |
-
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
|
454 |
-
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
|
455 |
-
resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla",
|
456 |
-
**ignore_kwargs):
|
457 |
-
super().__init__()
|
458 |
-
if use_linear_attn: attn_type = "linear"
|
459 |
-
self.ch = ch
|
460 |
-
self.temb_ch = 0
|
461 |
-
self.num_resolutions = len(ch_mult)
|
462 |
-
self.num_res_blocks = num_res_blocks
|
463 |
-
self.resolution = resolution
|
464 |
-
self.in_channels = in_channels
|
465 |
-
|
466 |
-
# downsampling
|
467 |
-
self.conv_in = torch.nn.Conv2d(in_channels,
|
468 |
-
self.ch,
|
469 |
-
kernel_size=3,
|
470 |
-
stride=1,
|
471 |
-
padding=1)
|
472 |
-
|
473 |
-
curr_res = resolution
|
474 |
-
in_ch_mult = (1,)+tuple(ch_mult)
|
475 |
-
self.in_ch_mult = in_ch_mult
|
476 |
-
self.down = nn.ModuleList()
|
477 |
-
for i_level in range(self.num_resolutions):
|
478 |
-
block = nn.ModuleList()
|
479 |
-
attn = nn.ModuleList()
|
480 |
-
block_in = ch*in_ch_mult[i_level]
|
481 |
-
block_out = ch*ch_mult[i_level]
|
482 |
-
for i_block in range(self.num_res_blocks):
|
483 |
-
block.append(ResnetBlock(in_channels=block_in,
|
484 |
-
out_channels=block_out,
|
485 |
-
temb_channels=self.temb_ch,
|
486 |
-
dropout=dropout))
|
487 |
-
block_in = block_out
|
488 |
-
if curr_res in attn_resolutions:
|
489 |
-
attn.append(make_attn(block_in, attn_type=attn_type))
|
490 |
-
down = nn.Module()
|
491 |
-
down.block = block
|
492 |
-
down.attn = attn
|
493 |
-
if i_level != self.num_resolutions-1:
|
494 |
-
down.downsample = Downsample(block_in, resamp_with_conv)
|
495 |
-
curr_res = curr_res // 2
|
496 |
-
self.down.append(down)
|
497 |
-
|
498 |
-
# middle
|
499 |
-
self.mid = nn.Module()
|
500 |
-
self.mid.block_1 = ResnetBlock(in_channels=block_in,
|
501 |
-
out_channels=block_in,
|
502 |
-
temb_channels=self.temb_ch,
|
503 |
-
dropout=dropout)
|
504 |
-
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
|
505 |
-
self.mid.block_2 = ResnetBlock(in_channels=block_in,
|
506 |
-
out_channels=block_in,
|
507 |
-
temb_channels=self.temb_ch,
|
508 |
-
dropout=dropout)
|
509 |
-
|
510 |
-
# end
|
511 |
-
self.norm_out = Normalize(block_in)
|
512 |
-
self.conv_out = torch.nn.Conv2d(block_in,
|
513 |
-
2*z_channels if double_z else z_channels,
|
514 |
-
kernel_size=3,
|
515 |
-
stride=1,
|
516 |
-
padding=1)
|
517 |
-
|
518 |
-
def forward(self, x):
|
519 |
-
# timestep embedding
|
520 |
-
temb = None
|
521 |
-
|
522 |
-
# downsampling
|
523 |
-
hs = [self.conv_in(x)]
|
524 |
-
for i_level in range(self.num_resolutions):
|
525 |
-
for i_block in range(self.num_res_blocks):
|
526 |
-
h = self.down[i_level].block[i_block](hs[-1], temb)
|
527 |
-
if len(self.down[i_level].attn) > 0:
|
528 |
-
h = self.down[i_level].attn[i_block](h)
|
529 |
-
hs.append(h)
|
530 |
-
if i_level != self.num_resolutions-1:
|
531 |
-
hs.append(self.down[i_level].downsample(hs[-1]))
|
532 |
-
|
533 |
-
# middle
|
534 |
-
h = hs[-1]
|
535 |
-
h = self.mid.block_1(h, temb)
|
536 |
-
h = self.mid.attn_1(h)
|
537 |
-
h = self.mid.block_2(h, temb)
|
538 |
-
|
539 |
-
# end
|
540 |
-
h = self.norm_out(h)
|
541 |
-
h = nonlinearity(h)
|
542 |
-
h = self.conv_out(h)
|
543 |
-
return h
|
544 |
-
|
545 |
-
|
546 |
-
class Decoder(nn.Module):
|
547 |
-
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
|
548 |
-
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
|
549 |
-
resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
|
550 |
-
attn_type="vanilla", **ignorekwargs):
|
551 |
-
super().__init__()
|
552 |
-
if use_linear_attn: attn_type = "linear"
|
553 |
-
self.ch = ch
|
554 |
-
self.temb_ch = 0
|
555 |
-
self.num_resolutions = len(ch_mult)
|
556 |
-
self.num_res_blocks = num_res_blocks
|
557 |
-
self.resolution = resolution
|
558 |
-
self.in_channels = in_channels
|
559 |
-
self.give_pre_end = give_pre_end
|
560 |
-
self.tanh_out = tanh_out
|
561 |
-
|
562 |
-
# compute in_ch_mult, block_in and curr_res at lowest res
|
563 |
-
in_ch_mult = (1,)+tuple(ch_mult)
|
564 |
-
block_in = ch*ch_mult[self.num_resolutions-1]
|
565 |
-
curr_res = resolution // 2**(self.num_resolutions-1)
|
566 |
-
self.z_shape = (1,z_channels,curr_res,curr_res)
|
567 |
-
print("Working with z of shape {} = {} dimensions.".format(
|
568 |
-
self.z_shape, np.prod(self.z_shape)))
|
569 |
-
|
570 |
-
# z to block_in
|
571 |
-
self.conv_in = torch.nn.Conv2d(z_channels,
|
572 |
-
block_in,
|
573 |
-
kernel_size=3,
|
574 |
-
stride=1,
|
575 |
-
padding=1)
|
576 |
-
|
577 |
-
# middle
|
578 |
-
self.mid = nn.Module()
|
579 |
-
self.mid.block_1 = ResnetBlock(in_channels=block_in,
|
580 |
-
out_channels=block_in,
|
581 |
-
temb_channels=self.temb_ch,
|
582 |
-
dropout=dropout)
|
583 |
-
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
|
584 |
-
self.mid.block_2 = ResnetBlock(in_channels=block_in,
|
585 |
-
out_channels=block_in,
|
586 |
-
temb_channels=self.temb_ch,
|
587 |
-
dropout=dropout)
|
588 |
-
|
589 |
-
# upsampling
|
590 |
-
self.up = nn.ModuleList()
|
591 |
-
for i_level in reversed(range(self.num_resolutions)):
|
592 |
-
block = nn.ModuleList()
|
593 |
-
attn = nn.ModuleList()
|
594 |
-
block_out = ch*ch_mult[i_level]
|
595 |
-
for i_block in range(self.num_res_blocks+1):
|
596 |
-
block.append(ResnetBlock(in_channels=block_in,
|
597 |
-
out_channels=block_out,
|
598 |
-
temb_channels=self.temb_ch,
|
599 |
-
dropout=dropout))
|
600 |
-
block_in = block_out
|
601 |
-
if curr_res in attn_resolutions:
|
602 |
-
attn.append(make_attn(block_in, attn_type=attn_type))
|
603 |
-
up = nn.Module()
|
604 |
-
up.block = block
|
605 |
-
up.attn = attn
|
606 |
-
if i_level != 0:
|
607 |
-
up.upsample = Upsample(block_in, resamp_with_conv)
|
608 |
-
curr_res = curr_res * 2
|
609 |
-
self.up.insert(0, up) # prepend to get consistent order
|
610 |
-
|
611 |
-
# end
|
612 |
-
self.norm_out = Normalize(block_in)
|
613 |
-
self.conv_out = torch.nn.Conv2d(block_in,
|
614 |
-
out_ch,
|
615 |
-
kernel_size=3,
|
616 |
-
stride=1,
|
617 |
-
padding=1)
|
618 |
-
|
619 |
-
def forward(self, z):
|
620 |
-
#assert z.shape[1:] == self.z_shape[1:]
|
621 |
-
self.last_z_shape = z.shape
|
622 |
-
|
623 |
-
# timestep embedding
|
624 |
-
temb = None
|
625 |
-
|
626 |
-
# z to block_in
|
627 |
-
h = self.conv_in(z)
|
628 |
-
|
629 |
-
# middle
|
630 |
-
h = self.mid.block_1(h, temb)
|
631 |
-
h = self.mid.attn_1(h)
|
632 |
-
h = self.mid.block_2(h, temb)
|
633 |
-
|
634 |
-
# upsampling
|
635 |
-
for i_level in reversed(range(self.num_resolutions)):
|
636 |
-
for i_block in range(self.num_res_blocks+1):
|
637 |
-
h = self.up[i_level].block[i_block](h, temb)
|
638 |
-
if len(self.up[i_level].attn) > 0:
|
639 |
-
h = self.up[i_level].attn[i_block](h)
|
640 |
-
if i_level != 0:
|
641 |
-
h = self.up[i_level].upsample(h)
|
642 |
-
|
643 |
-
# end
|
644 |
-
if self.give_pre_end:
|
645 |
-
return h
|
646 |
-
|
647 |
-
h = self.norm_out(h)
|
648 |
-
h = nonlinearity(h)
|
649 |
-
h = self.conv_out(h)
|
650 |
-
if self.tanh_out:
|
651 |
-
h = torch.tanh(h)
|
652 |
-
return h
|
653 |
-
|
654 |
-
|
655 |
-
class SimpleDecoder(nn.Module):
|
656 |
-
def __init__(self, in_channels, out_channels, *args, **kwargs):
|
657 |
-
super().__init__()
|
658 |
-
self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1),
|
659 |
-
ResnetBlock(in_channels=in_channels,
|
660 |
-
out_channels=2 * in_channels,
|
661 |
-
temb_channels=0, dropout=0.0),
|
662 |
-
ResnetBlock(in_channels=2 * in_channels,
|
663 |
-
out_channels=4 * in_channels,
|
664 |
-
temb_channels=0, dropout=0.0),
|
665 |
-
ResnetBlock(in_channels=4 * in_channels,
|
666 |
-
out_channels=2 * in_channels,
|
667 |
-
temb_channels=0, dropout=0.0),
|
668 |
-
nn.Conv2d(2*in_channels, in_channels, 1),
|
669 |
-
Upsample(in_channels, with_conv=True)])
|
670 |
-
# end
|
671 |
-
self.norm_out = Normalize(in_channels)
|
672 |
-
self.conv_out = torch.nn.Conv2d(in_channels,
|
673 |
-
out_channels,
|
674 |
-
kernel_size=3,
|
675 |
-
stride=1,
|
676 |
-
padding=1)
|
677 |
-
|
678 |
-
def forward(self, x):
|
679 |
-
for i, layer in enumerate(self.model):
|
680 |
-
if i in [1,2,3]:
|
681 |
-
x = layer(x, None)
|
682 |
-
else:
|
683 |
-
x = layer(x)
|
684 |
-
|
685 |
-
h = self.norm_out(x)
|
686 |
-
h = nonlinearity(h)
|
687 |
-
x = self.conv_out(h)
|
688 |
-
return x
|
689 |
-
|
690 |
-
|
691 |
-
class UpsampleDecoder(nn.Module):
|
692 |
-
def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution,
|
693 |
-
ch_mult=(2,2), dropout=0.0):
|
694 |
-
super().__init__()
|
695 |
-
# upsampling
|
696 |
-
self.temb_ch = 0
|
697 |
-
self.num_resolutions = len(ch_mult)
|
698 |
-
self.num_res_blocks = num_res_blocks
|
699 |
-
block_in = in_channels
|
700 |
-
curr_res = resolution // 2 ** (self.num_resolutions - 1)
|
701 |
-
self.res_blocks = nn.ModuleList()
|
702 |
-
self.upsample_blocks = nn.ModuleList()
|
703 |
-
for i_level in range(self.num_resolutions):
|
704 |
-
res_block = []
|
705 |
-
block_out = ch * ch_mult[i_level]
|
706 |
-
for i_block in range(self.num_res_blocks + 1):
|
707 |
-
res_block.append(ResnetBlock(in_channels=block_in,
|
708 |
-
out_channels=block_out,
|
709 |
-
temb_channels=self.temb_ch,
|
710 |
-
dropout=dropout))
|
711 |
-
block_in = block_out
|
712 |
-
self.res_blocks.append(nn.ModuleList(res_block))
|
713 |
-
if i_level != self.num_resolutions - 1:
|
714 |
-
self.upsample_blocks.append(Upsample(block_in, True))
|
715 |
-
curr_res = curr_res * 2
|
716 |
-
|
717 |
-
# end
|
718 |
-
self.norm_out = Normalize(block_in)
|
719 |
-
self.conv_out = torch.nn.Conv2d(block_in,
|
720 |
-
out_channels,
|
721 |
-
kernel_size=3,
|
722 |
-
stride=1,
|
723 |
-
padding=1)
|
724 |
-
|
725 |
-
def forward(self, x):
|
726 |
-
# upsampling
|
727 |
-
h = x
|
728 |
-
for k, i_level in enumerate(range(self.num_resolutions)):
|
729 |
-
for i_block in range(self.num_res_blocks + 1):
|
730 |
-
h = self.res_blocks[i_level][i_block](h, None)
|
731 |
-
if i_level != self.num_resolutions - 1:
|
732 |
-
h = self.upsample_blocks[k](h)
|
733 |
-
h = self.norm_out(h)
|
734 |
-
h = nonlinearity(h)
|
735 |
-
h = self.conv_out(h)
|
736 |
-
return h
|
737 |
-
|
738 |
-
|
739 |
-
class LatentRescaler(nn.Module):
|
740 |
-
def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2):
|
741 |
-
super().__init__()
|
742 |
-
# residual block, interpolate, residual block
|
743 |
-
self.factor = factor
|
744 |
-
self.conv_in = nn.Conv2d(in_channels,
|
745 |
-
mid_channels,
|
746 |
-
kernel_size=3,
|
747 |
-
stride=1,
|
748 |
-
padding=1)
|
749 |
-
self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
|
750 |
-
out_channels=mid_channels,
|
751 |
-
temb_channels=0,
|
752 |
-
dropout=0.0) for _ in range(depth)])
|
753 |
-
self.attn = AttnBlock(mid_channels)
|
754 |
-
self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
|
755 |
-
out_channels=mid_channels,
|
756 |
-
temb_channels=0,
|
757 |
-
dropout=0.0) for _ in range(depth)])
|
758 |
-
|
759 |
-
self.conv_out = nn.Conv2d(mid_channels,
|
760 |
-
out_channels,
|
761 |
-
kernel_size=1,
|
762 |
-
)
|
763 |
-
|
764 |
-
def forward(self, x):
|
765 |
-
x = self.conv_in(x)
|
766 |
-
for block in self.res_block1:
|
767 |
-
x = block(x, None)
|
768 |
-
x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor))))
|
769 |
-
x = self.attn(x)
|
770 |
-
for block in self.res_block2:
|
771 |
-
x = block(x, None)
|
772 |
-
x = self.conv_out(x)
|
773 |
-
return x
|
774 |
-
|
775 |
-
|
776 |
-
class MergedRescaleEncoder(nn.Module):
|
777 |
-
def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks,
|
778 |
-
attn_resolutions, dropout=0.0, resamp_with_conv=True,
|
779 |
-
ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1):
|
780 |
-
super().__init__()
|
781 |
-
intermediate_chn = ch * ch_mult[-1]
|
782 |
-
self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult,
|
783 |
-
z_channels=intermediate_chn, double_z=False, resolution=resolution,
|
784 |
-
attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv,
|
785 |
-
out_ch=None)
|
786 |
-
self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn,
|
787 |
-
mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth)
|
788 |
-
|
789 |
-
def forward(self, x):
|
790 |
-
x = self.encoder(x)
|
791 |
-
x = self.rescaler(x)
|
792 |
-
return x
|
793 |
-
|
794 |
-
|
795 |
-
class MergedRescaleDecoder(nn.Module):
|
796 |
-
def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8),
|
797 |
-
dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1):
|
798 |
-
super().__init__()
|
799 |
-
tmp_chn = z_channels*ch_mult[-1]
|
800 |
-
self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout,
|
801 |
-
resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks,
|
802 |
-
ch_mult=ch_mult, resolution=resolution, ch=ch)
|
803 |
-
self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn,
|
804 |
-
out_channels=tmp_chn, depth=rescale_module_depth)
|
805 |
-
|
806 |
-
def forward(self, x):
|
807 |
-
x = self.rescaler(x)
|
808 |
-
x = self.decoder(x)
|
809 |
-
return x
|
810 |
-
|
811 |
-
|
812 |
-
class Upsampler(nn.Module):
|
813 |
-
def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2):
|
814 |
-
super().__init__()
|
815 |
-
assert out_size >= in_size
|
816 |
-
num_blocks = int(np.log2(out_size//in_size))+1
|
817 |
-
factor_up = 1.+ (out_size % in_size)
|
818 |
-
print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}")
|
819 |
-
self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels,
|
820 |
-
out_channels=in_channels)
|
821 |
-
self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2,
|
822 |
-
attn_resolutions=[], in_channels=None, ch=in_channels,
|
823 |
-
ch_mult=[ch_mult for _ in range(num_blocks)])
|
824 |
-
|
825 |
-
def forward(self, x):
|
826 |
-
x = self.rescaler(x)
|
827 |
-
x = self.decoder(x)
|
828 |
-
return x
|
829 |
-
|
830 |
-
|
831 |
-
class Resize(nn.Module):
|
832 |
-
def __init__(self, in_channels=None, learned=False, mode="bilinear"):
|
833 |
-
super().__init__()
|
834 |
-
self.with_conv = learned
|
835 |
-
self.mode = mode
|
836 |
-
if self.with_conv:
|
837 |
-
print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode")
|
838 |
-
raise NotImplementedError()
|
839 |
-
assert in_channels is not None
|
840 |
-
# no asymmetric padding in torch conv, must do it ourselves
|
841 |
-
self.conv = torch.nn.Conv2d(in_channels,
|
842 |
-
in_channels,
|
843 |
-
kernel_size=4,
|
844 |
-
stride=2,
|
845 |
-
padding=1)
|
846 |
-
|
847 |
-
def forward(self, x, scale_factor=1.0):
|
848 |
-
if scale_factor==1.0:
|
849 |
-
return x
|
850 |
-
else:
|
851 |
-
x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor)
|
852 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/modules/diffusionmodules/openaimodel.py
DELETED
@@ -1,786 +0,0 @@
|
|
1 |
-
from abc import abstractmethod
|
2 |
-
import math
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
import torch as th
|
6 |
-
import torch.nn as nn
|
7 |
-
import torch.nn.functional as F
|
8 |
-
|
9 |
-
from ldm.modules.diffusionmodules.util import (
|
10 |
-
checkpoint,
|
11 |
-
conv_nd,
|
12 |
-
linear,
|
13 |
-
avg_pool_nd,
|
14 |
-
zero_module,
|
15 |
-
normalization,
|
16 |
-
timestep_embedding,
|
17 |
-
)
|
18 |
-
from ldm.modules.attention import SpatialTransformer
|
19 |
-
from ldm.util import exists
|
20 |
-
|
21 |
-
|
22 |
-
# dummy replace
|
23 |
-
def convert_module_to_f16(x):
|
24 |
-
pass
|
25 |
-
|
26 |
-
def convert_module_to_f32(x):
|
27 |
-
pass
|
28 |
-
|
29 |
-
|
30 |
-
## go
|
31 |
-
class AttentionPool2d(nn.Module):
|
32 |
-
"""
|
33 |
-
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
|
34 |
-
"""
|
35 |
-
|
36 |
-
def __init__(
|
37 |
-
self,
|
38 |
-
spacial_dim: int,
|
39 |
-
embed_dim: int,
|
40 |
-
num_heads_channels: int,
|
41 |
-
output_dim: int = None,
|
42 |
-
):
|
43 |
-
super().__init__()
|
44 |
-
self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
|
45 |
-
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
|
46 |
-
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
|
47 |
-
self.num_heads = embed_dim // num_heads_channels
|
48 |
-
self.attention = QKVAttention(self.num_heads)
|
49 |
-
|
50 |
-
def forward(self, x):
|
51 |
-
b, c, *_spatial = x.shape
|
52 |
-
x = x.reshape(b, c, -1) # NC(HW)
|
53 |
-
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
|
54 |
-
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
|
55 |
-
x = self.qkv_proj(x)
|
56 |
-
x = self.attention(x)
|
57 |
-
x = self.c_proj(x)
|
58 |
-
return x[:, :, 0]
|
59 |
-
|
60 |
-
|
61 |
-
class TimestepBlock(nn.Module):
|
62 |
-
"""
|
63 |
-
Any module where forward() takes timestep embeddings as a second argument.
|
64 |
-
"""
|
65 |
-
|
66 |
-
@abstractmethod
|
67 |
-
def forward(self, x, emb):
|
68 |
-
"""
|
69 |
-
Apply the module to `x` given `emb` timestep embeddings.
|
70 |
-
"""
|
71 |
-
|
72 |
-
|
73 |
-
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
|
74 |
-
"""
|
75 |
-
A sequential module that passes timestep embeddings to the children that
|
76 |
-
support it as an extra input.
|
77 |
-
"""
|
78 |
-
|
79 |
-
def forward(self, x, emb, context=None):
|
80 |
-
for layer in self:
|
81 |
-
if isinstance(layer, TimestepBlock):
|
82 |
-
x = layer(x, emb)
|
83 |
-
elif isinstance(layer, SpatialTransformer):
|
84 |
-
x = layer(x, context)
|
85 |
-
else:
|
86 |
-
x = layer(x)
|
87 |
-
return x
|
88 |
-
|
89 |
-
|
90 |
-
class Upsample(nn.Module):
|
91 |
-
"""
|
92 |
-
An upsampling layer with an optional convolution.
|
93 |
-
:param channels: channels in the inputs and outputs.
|
94 |
-
:param use_conv: a bool determining if a convolution is applied.
|
95 |
-
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
|
96 |
-
upsampling occurs in the inner-two dimensions.
|
97 |
-
"""
|
98 |
-
|
99 |
-
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
|
100 |
-
super().__init__()
|
101 |
-
self.channels = channels
|
102 |
-
self.out_channels = out_channels or channels
|
103 |
-
self.use_conv = use_conv
|
104 |
-
self.dims = dims
|
105 |
-
if use_conv:
|
106 |
-
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
|
107 |
-
|
108 |
-
def forward(self, x):
|
109 |
-
assert x.shape[1] == self.channels
|
110 |
-
if self.dims == 3:
|
111 |
-
x = F.interpolate(
|
112 |
-
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
|
113 |
-
)
|
114 |
-
else:
|
115 |
-
x = F.interpolate(x, scale_factor=2, mode="nearest")
|
116 |
-
if self.use_conv:
|
117 |
-
x = self.conv(x)
|
118 |
-
return x
|
119 |
-
|
120 |
-
class TransposedUpsample(nn.Module):
|
121 |
-
'Learned 2x upsampling without padding'
|
122 |
-
def __init__(self, channels, out_channels=None, ks=5):
|
123 |
-
super().__init__()
|
124 |
-
self.channels = channels
|
125 |
-
self.out_channels = out_channels or channels
|
126 |
-
|
127 |
-
self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
|
128 |
-
|
129 |
-
def forward(self,x):
|
130 |
-
return self.up(x)
|
131 |
-
|
132 |
-
|
133 |
-
class Downsample(nn.Module):
|
134 |
-
"""
|
135 |
-
A downsampling layer with an optional convolution.
|
136 |
-
:param channels: channels in the inputs and outputs.
|
137 |
-
:param use_conv: a bool determining if a convolution is applied.
|
138 |
-
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
|
139 |
-
downsampling occurs in the inner-two dimensions.
|
140 |
-
"""
|
141 |
-
|
142 |
-
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
|
143 |
-
super().__init__()
|
144 |
-
self.channels = channels
|
145 |
-
self.out_channels = out_channels or channels
|
146 |
-
self.use_conv = use_conv
|
147 |
-
self.dims = dims
|
148 |
-
stride = 2 if dims != 3 else (1, 2, 2)
|
149 |
-
if use_conv:
|
150 |
-
self.op = conv_nd(
|
151 |
-
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
|
152 |
-
)
|
153 |
-
else:
|
154 |
-
assert self.channels == self.out_channels
|
155 |
-
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
|
156 |
-
|
157 |
-
def forward(self, x):
|
158 |
-
assert x.shape[1] == self.channels
|
159 |
-
return self.op(x)
|
160 |
-
|
161 |
-
|
162 |
-
class ResBlock(TimestepBlock):
|
163 |
-
"""
|
164 |
-
A residual block that can optionally change the number of channels.
|
165 |
-
:param channels: the number of input channels.
|
166 |
-
:param emb_channels: the number of timestep embedding channels.
|
167 |
-
:param dropout: the rate of dropout.
|
168 |
-
:param out_channels: if specified, the number of out channels.
|
169 |
-
:param use_conv: if True and out_channels is specified, use a spatial
|
170 |
-
convolution instead of a smaller 1x1 convolution to change the
|
171 |
-
channels in the skip connection.
|
172 |
-
:param dims: determines if the signal is 1D, 2D, or 3D.
|
173 |
-
:param use_checkpoint: if True, use gradient checkpointing on this module.
|
174 |
-
:param up: if True, use this block for upsampling.
|
175 |
-
:param down: if True, use this block for downsampling.
|
176 |
-
"""
|
177 |
-
|
178 |
-
def __init__(
|
179 |
-
self,
|
180 |
-
channels,
|
181 |
-
emb_channels,
|
182 |
-
dropout,
|
183 |
-
out_channels=None,
|
184 |
-
use_conv=False,
|
185 |
-
use_scale_shift_norm=False,
|
186 |
-
dims=2,
|
187 |
-
use_checkpoint=False,
|
188 |
-
up=False,
|
189 |
-
down=False,
|
190 |
-
):
|
191 |
-
super().__init__()
|
192 |
-
self.channels = channels
|
193 |
-
self.emb_channels = emb_channels
|
194 |
-
self.dropout = dropout
|
195 |
-
self.out_channels = out_channels or channels
|
196 |
-
self.use_conv = use_conv
|
197 |
-
self.use_checkpoint = use_checkpoint
|
198 |
-
self.use_scale_shift_norm = use_scale_shift_norm
|
199 |
-
|
200 |
-
self.in_layers = nn.Sequential(
|
201 |
-
normalization(channels),
|
202 |
-
nn.SiLU(),
|
203 |
-
conv_nd(dims, channels, self.out_channels, 3, padding=1),
|
204 |
-
)
|
205 |
-
|
206 |
-
self.updown = up or down
|
207 |
-
|
208 |
-
if up:
|
209 |
-
self.h_upd = Upsample(channels, False, dims)
|
210 |
-
self.x_upd = Upsample(channels, False, dims)
|
211 |
-
elif down:
|
212 |
-
self.h_upd = Downsample(channels, False, dims)
|
213 |
-
self.x_upd = Downsample(channels, False, dims)
|
214 |
-
else:
|
215 |
-
self.h_upd = self.x_upd = nn.Identity()
|
216 |
-
|
217 |
-
self.emb_layers = nn.Sequential(
|
218 |
-
nn.SiLU(),
|
219 |
-
linear(
|
220 |
-
emb_channels,
|
221 |
-
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
|
222 |
-
),
|
223 |
-
)
|
224 |
-
self.out_layers = nn.Sequential(
|
225 |
-
normalization(self.out_channels),
|
226 |
-
nn.SiLU(),
|
227 |
-
nn.Dropout(p=dropout),
|
228 |
-
zero_module(
|
229 |
-
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
|
230 |
-
),
|
231 |
-
)
|
232 |
-
|
233 |
-
if self.out_channels == channels:
|
234 |
-
self.skip_connection = nn.Identity()
|
235 |
-
elif use_conv:
|
236 |
-
self.skip_connection = conv_nd(
|
237 |
-
dims, channels, self.out_channels, 3, padding=1
|
238 |
-
)
|
239 |
-
else:
|
240 |
-
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
|
241 |
-
|
242 |
-
def forward(self, x, emb):
|
243 |
-
"""
|
244 |
-
Apply the block to a Tensor, conditioned on a timestep embedding.
|
245 |
-
:param x: an [N x C x ...] Tensor of features.
|
246 |
-
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
|
247 |
-
:return: an [N x C x ...] Tensor of outputs.
|
248 |
-
"""
|
249 |
-
return checkpoint(
|
250 |
-
self._forward, (x, emb), self.parameters(), self.use_checkpoint
|
251 |
-
)
|
252 |
-
|
253 |
-
|
254 |
-
def _forward(self, x, emb):
|
255 |
-
if self.updown:
|
256 |
-
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
|
257 |
-
h = in_rest(x)
|
258 |
-
h = self.h_upd(h)
|
259 |
-
x = self.x_upd(x)
|
260 |
-
h = in_conv(h)
|
261 |
-
else:
|
262 |
-
h = self.in_layers(x)
|
263 |
-
emb_out = self.emb_layers(emb).type(h.dtype)
|
264 |
-
while len(emb_out.shape) < len(h.shape):
|
265 |
-
emb_out = emb_out[..., None]
|
266 |
-
if self.use_scale_shift_norm:
|
267 |
-
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
|
268 |
-
scale, shift = th.chunk(emb_out, 2, dim=1)
|
269 |
-
h = out_norm(h) * (1 + scale) + shift
|
270 |
-
h = out_rest(h)
|
271 |
-
else:
|
272 |
-
h = h + emb_out
|
273 |
-
h = self.out_layers(h)
|
274 |
-
return self.skip_connection(x) + h
|
275 |
-
|
276 |
-
|
277 |
-
class AttentionBlock(nn.Module):
|
278 |
-
"""
|
279 |
-
An attention block that allows spatial positions to attend to each other.
|
280 |
-
Originally ported from here, but adapted to the N-d case.
|
281 |
-
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
|
282 |
-
"""
|
283 |
-
|
284 |
-
def __init__(
|
285 |
-
self,
|
286 |
-
channels,
|
287 |
-
num_heads=1,
|
288 |
-
num_head_channels=-1,
|
289 |
-
use_checkpoint=False,
|
290 |
-
use_new_attention_order=False,
|
291 |
-
):
|
292 |
-
super().__init__()
|
293 |
-
self.channels = channels
|
294 |
-
if num_head_channels == -1:
|
295 |
-
self.num_heads = num_heads
|
296 |
-
else:
|
297 |
-
assert (
|
298 |
-
channels % num_head_channels == 0
|
299 |
-
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
|
300 |
-
self.num_heads = channels // num_head_channels
|
301 |
-
self.use_checkpoint = use_checkpoint
|
302 |
-
self.norm = normalization(channels)
|
303 |
-
self.qkv = conv_nd(1, channels, channels * 3, 1)
|
304 |
-
if use_new_attention_order:
|
305 |
-
# split qkv before split heads
|
306 |
-
self.attention = QKVAttention(self.num_heads)
|
307 |
-
else:
|
308 |
-
# split heads before split qkv
|
309 |
-
self.attention = QKVAttentionLegacy(self.num_heads)
|
310 |
-
|
311 |
-
self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
|
312 |
-
|
313 |
-
def forward(self, x):
|
314 |
-
return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
|
315 |
-
#return pt_checkpoint(self._forward, x) # pytorch
|
316 |
-
|
317 |
-
def _forward(self, x):
|
318 |
-
b, c, *spatial = x.shape
|
319 |
-
x = x.reshape(b, c, -1)
|
320 |
-
qkv = self.qkv(self.norm(x))
|
321 |
-
h = self.attention(qkv)
|
322 |
-
h = self.proj_out(h)
|
323 |
-
return (x + h).reshape(b, c, *spatial)
|
324 |
-
|
325 |
-
|
326 |
-
def count_flops_attn(model, _x, y):
|
327 |
-
"""
|
328 |
-
A counter for the `thop` package to count the operations in an
|
329 |
-
attention operation.
|
330 |
-
Meant to be used like:
|
331 |
-
macs, params = thop.profile(
|
332 |
-
model,
|
333 |
-
inputs=(inputs, timestamps),
|
334 |
-
custom_ops={QKVAttention: QKVAttention.count_flops},
|
335 |
-
)
|
336 |
-
"""
|
337 |
-
b, c, *spatial = y[0].shape
|
338 |
-
num_spatial = int(np.prod(spatial))
|
339 |
-
# We perform two matmuls with the same number of ops.
|
340 |
-
# The first computes the weight matrix, the second computes
|
341 |
-
# the combination of the value vectors.
|
342 |
-
matmul_ops = 2 * b * (num_spatial ** 2) * c
|
343 |
-
model.total_ops += th.DoubleTensor([matmul_ops])
|
344 |
-
|
345 |
-
|
346 |
-
class QKVAttentionLegacy(nn.Module):
|
347 |
-
"""
|
348 |
-
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
|
349 |
-
"""
|
350 |
-
|
351 |
-
def __init__(self, n_heads):
|
352 |
-
super().__init__()
|
353 |
-
self.n_heads = n_heads
|
354 |
-
|
355 |
-
def forward(self, qkv):
|
356 |
-
"""
|
357 |
-
Apply QKV attention.
|
358 |
-
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
|
359 |
-
:return: an [N x (H * C) x T] tensor after attention.
|
360 |
-
"""
|
361 |
-
bs, width, length = qkv.shape
|
362 |
-
assert width % (3 * self.n_heads) == 0
|
363 |
-
ch = width // (3 * self.n_heads)
|
364 |
-
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
|
365 |
-
scale = 1 / math.sqrt(math.sqrt(ch))
|
366 |
-
weight = th.einsum(
|
367 |
-
"bct,bcs->bts", q * scale, k * scale
|
368 |
-
) # More stable with f16 than dividing afterwards
|
369 |
-
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
|
370 |
-
a = th.einsum("bts,bcs->bct", weight, v)
|
371 |
-
return a.reshape(bs, -1, length)
|
372 |
-
|
373 |
-
@staticmethod
|
374 |
-
def count_flops(model, _x, y):
|
375 |
-
return count_flops_attn(model, _x, y)
|
376 |
-
|
377 |
-
|
378 |
-
class QKVAttention(nn.Module):
|
379 |
-
"""
|
380 |
-
A module which performs QKV attention and splits in a different order.
|
381 |
-
"""
|
382 |
-
|
383 |
-
def __init__(self, n_heads):
|
384 |
-
super().__init__()
|
385 |
-
self.n_heads = n_heads
|
386 |
-
|
387 |
-
def forward(self, qkv):
|
388 |
-
"""
|
389 |
-
Apply QKV attention.
|
390 |
-
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
|
391 |
-
:return: an [N x (H * C) x T] tensor after attention.
|
392 |
-
"""
|
393 |
-
bs, width, length = qkv.shape
|
394 |
-
assert width % (3 * self.n_heads) == 0
|
395 |
-
ch = width // (3 * self.n_heads)
|
396 |
-
q, k, v = qkv.chunk(3, dim=1)
|
397 |
-
scale = 1 / math.sqrt(math.sqrt(ch))
|
398 |
-
weight = th.einsum(
|
399 |
-
"bct,bcs->bts",
|
400 |
-
(q * scale).view(bs * self.n_heads, ch, length),
|
401 |
-
(k * scale).view(bs * self.n_heads, ch, length),
|
402 |
-
) # More stable with f16 than dividing afterwards
|
403 |
-
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
|
404 |
-
a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
|
405 |
-
return a.reshape(bs, -1, length)
|
406 |
-
|
407 |
-
@staticmethod
|
408 |
-
def count_flops(model, _x, y):
|
409 |
-
return count_flops_attn(model, _x, y)
|
410 |
-
|
411 |
-
|
412 |
-
class UNetModel(nn.Module):
|
413 |
-
"""
|
414 |
-
The full UNet model with attention and timestep embedding.
|
415 |
-
:param in_channels: channels in the input Tensor.
|
416 |
-
:param model_channels: base channel count for the model.
|
417 |
-
:param out_channels: channels in the output Tensor.
|
418 |
-
:param num_res_blocks: number of residual blocks per downsample.
|
419 |
-
:param attention_resolutions: a collection of downsample rates at which
|
420 |
-
attention will take place. May be a set, list, or tuple.
|
421 |
-
For example, if this contains 4, then at 4x downsampling, attention
|
422 |
-
will be used.
|
423 |
-
:param dropout: the dropout probability.
|
424 |
-
:param channel_mult: channel multiplier for each level of the UNet.
|
425 |
-
:param conv_resample: if True, use learned convolutions for upsampling and
|
426 |
-
downsampling.
|
427 |
-
:param dims: determines if the signal is 1D, 2D, or 3D.
|
428 |
-
:param num_classes: if specified (as an int), then this model will be
|
429 |
-
class-conditional with `num_classes` classes.
|
430 |
-
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
|
431 |
-
:param num_heads: the number of attention heads in each attention layer.
|
432 |
-
:param num_heads_channels: if specified, ignore num_heads and instead use
|
433 |
-
a fixed channel width per attention head.
|
434 |
-
:param num_heads_upsample: works with num_heads to set a different number
|
435 |
-
of heads for upsampling. Deprecated.
|
436 |
-
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
|
437 |
-
:param resblock_updown: use residual blocks for up/downsampling.
|
438 |
-
:param use_new_attention_order: use a different attention pattern for potentially
|
439 |
-
increased efficiency.
|
440 |
-
"""
|
441 |
-
|
442 |
-
def __init__(
|
443 |
-
self,
|
444 |
-
image_size,
|
445 |
-
in_channels,
|
446 |
-
model_channels,
|
447 |
-
out_channels,
|
448 |
-
num_res_blocks,
|
449 |
-
attention_resolutions,
|
450 |
-
dropout=0,
|
451 |
-
channel_mult=(1, 2, 4, 8),
|
452 |
-
conv_resample=True,
|
453 |
-
dims=2,
|
454 |
-
num_classes=None,
|
455 |
-
use_checkpoint=False,
|
456 |
-
use_fp16=False,
|
457 |
-
num_heads=-1,
|
458 |
-
num_head_channels=-1,
|
459 |
-
num_heads_upsample=-1,
|
460 |
-
use_scale_shift_norm=False,
|
461 |
-
resblock_updown=False,
|
462 |
-
use_new_attention_order=False,
|
463 |
-
use_spatial_transformer=False, # custom transformer support
|
464 |
-
transformer_depth=1, # custom transformer support
|
465 |
-
context_dim=None, # custom transformer support
|
466 |
-
n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
|
467 |
-
legacy=True,
|
468 |
-
disable_self_attentions=None,
|
469 |
-
num_attention_blocks=None,
|
470 |
-
disable_middle_self_attn=False,
|
471 |
-
use_linear_in_transformer=False,
|
472 |
-
):
|
473 |
-
super().__init__()
|
474 |
-
if use_spatial_transformer:
|
475 |
-
assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
|
476 |
-
|
477 |
-
if context_dim is not None:
|
478 |
-
assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
|
479 |
-
from omegaconf.listconfig import ListConfig
|
480 |
-
if type(context_dim) == ListConfig:
|
481 |
-
context_dim = list(context_dim)
|
482 |
-
|
483 |
-
if num_heads_upsample == -1:
|
484 |
-
num_heads_upsample = num_heads
|
485 |
-
|
486 |
-
if num_heads == -1:
|
487 |
-
assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
|
488 |
-
|
489 |
-
if num_head_channels == -1:
|
490 |
-
assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
|
491 |
-
|
492 |
-
self.image_size = image_size
|
493 |
-
self.in_channels = in_channels
|
494 |
-
self.model_channels = model_channels
|
495 |
-
self.out_channels = out_channels
|
496 |
-
if isinstance(num_res_blocks, int):
|
497 |
-
self.num_res_blocks = len(channel_mult) * [num_res_blocks]
|
498 |
-
else:
|
499 |
-
if len(num_res_blocks) != len(channel_mult):
|
500 |
-
raise ValueError("provide num_res_blocks either as an int (globally constant) or "
|
501 |
-
"as a list/tuple (per-level) with the same length as channel_mult")
|
502 |
-
self.num_res_blocks = num_res_blocks
|
503 |
-
if disable_self_attentions is not None:
|
504 |
-
# should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
|
505 |
-
assert len(disable_self_attentions) == len(channel_mult)
|
506 |
-
if num_attention_blocks is not None:
|
507 |
-
assert len(num_attention_blocks) == len(self.num_res_blocks)
|
508 |
-
assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
|
509 |
-
print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
|
510 |
-
f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
|
511 |
-
f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
|
512 |
-
f"attention will still not be set.")
|
513 |
-
|
514 |
-
self.attention_resolutions = attention_resolutions
|
515 |
-
self.dropout = dropout
|
516 |
-
self.channel_mult = channel_mult
|
517 |
-
self.conv_resample = conv_resample
|
518 |
-
self.num_classes = num_classes
|
519 |
-
self.use_checkpoint = use_checkpoint
|
520 |
-
self.dtype = th.float16 if use_fp16 else th.float32
|
521 |
-
self.num_heads = num_heads
|
522 |
-
self.num_head_channels = num_head_channels
|
523 |
-
self.num_heads_upsample = num_heads_upsample
|
524 |
-
self.predict_codebook_ids = n_embed is not None
|
525 |
-
|
526 |
-
time_embed_dim = model_channels * 4
|
527 |
-
self.time_embed = nn.Sequential(
|
528 |
-
linear(model_channels, time_embed_dim),
|
529 |
-
nn.SiLU(),
|
530 |
-
linear(time_embed_dim, time_embed_dim),
|
531 |
-
)
|
532 |
-
|
533 |
-
if self.num_classes is not None:
|
534 |
-
if isinstance(self.num_classes, int):
|
535 |
-
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
|
536 |
-
elif self.num_classes == "continuous":
|
537 |
-
print("setting up linear c_adm embedding layer")
|
538 |
-
self.label_emb = nn.Linear(1, time_embed_dim)
|
539 |
-
else:
|
540 |
-
raise ValueError()
|
541 |
-
|
542 |
-
self.input_blocks = nn.ModuleList(
|
543 |
-
[
|
544 |
-
TimestepEmbedSequential(
|
545 |
-
conv_nd(dims, in_channels, model_channels, 3, padding=1)
|
546 |
-
)
|
547 |
-
]
|
548 |
-
)
|
549 |
-
self._feature_size = model_channels
|
550 |
-
input_block_chans = [model_channels]
|
551 |
-
ch = model_channels
|
552 |
-
ds = 1
|
553 |
-
for level, mult in enumerate(channel_mult):
|
554 |
-
for nr in range(self.num_res_blocks[level]):
|
555 |
-
layers = [
|
556 |
-
ResBlock(
|
557 |
-
ch,
|
558 |
-
time_embed_dim,
|
559 |
-
dropout,
|
560 |
-
out_channels=mult * model_channels,
|
561 |
-
dims=dims,
|
562 |
-
use_checkpoint=use_checkpoint,
|
563 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
564 |
-
)
|
565 |
-
]
|
566 |
-
ch = mult * model_channels
|
567 |
-
if ds in attention_resolutions:
|
568 |
-
if num_head_channels == -1:
|
569 |
-
dim_head = ch // num_heads
|
570 |
-
else:
|
571 |
-
num_heads = ch // num_head_channels
|
572 |
-
dim_head = num_head_channels
|
573 |
-
if legacy:
|
574 |
-
#num_heads = 1
|
575 |
-
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
|
576 |
-
if exists(disable_self_attentions):
|
577 |
-
disabled_sa = disable_self_attentions[level]
|
578 |
-
else:
|
579 |
-
disabled_sa = False
|
580 |
-
|
581 |
-
if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:
|
582 |
-
layers.append(
|
583 |
-
AttentionBlock(
|
584 |
-
ch,
|
585 |
-
use_checkpoint=use_checkpoint,
|
586 |
-
num_heads=num_heads,
|
587 |
-
num_head_channels=dim_head,
|
588 |
-
use_new_attention_order=use_new_attention_order,
|
589 |
-
) if not use_spatial_transformer else SpatialTransformer(
|
590 |
-
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
|
591 |
-
disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
|
592 |
-
use_checkpoint=use_checkpoint
|
593 |
-
)
|
594 |
-
)
|
595 |
-
self.input_blocks.append(TimestepEmbedSequential(*layers))
|
596 |
-
self._feature_size += ch
|
597 |
-
input_block_chans.append(ch)
|
598 |
-
if level != len(channel_mult) - 1:
|
599 |
-
out_ch = ch
|
600 |
-
self.input_blocks.append(
|
601 |
-
TimestepEmbedSequential(
|
602 |
-
ResBlock(
|
603 |
-
ch,
|
604 |
-
time_embed_dim,
|
605 |
-
dropout,
|
606 |
-
out_channels=out_ch,
|
607 |
-
dims=dims,
|
608 |
-
use_checkpoint=use_checkpoint,
|
609 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
610 |
-
down=True,
|
611 |
-
)
|
612 |
-
if resblock_updown
|
613 |
-
else Downsample(
|
614 |
-
ch, conv_resample, dims=dims, out_channels=out_ch
|
615 |
-
)
|
616 |
-
)
|
617 |
-
)
|
618 |
-
ch = out_ch
|
619 |
-
input_block_chans.append(ch)
|
620 |
-
ds *= 2
|
621 |
-
self._feature_size += ch
|
622 |
-
|
623 |
-
if num_head_channels == -1:
|
624 |
-
dim_head = ch // num_heads
|
625 |
-
else:
|
626 |
-
num_heads = ch // num_head_channels
|
627 |
-
dim_head = num_head_channels
|
628 |
-
if legacy:
|
629 |
-
#num_heads = 1
|
630 |
-
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
|
631 |
-
self.middle_block = TimestepEmbedSequential(
|
632 |
-
ResBlock(
|
633 |
-
ch,
|
634 |
-
time_embed_dim,
|
635 |
-
dropout,
|
636 |
-
dims=dims,
|
637 |
-
use_checkpoint=use_checkpoint,
|
638 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
639 |
-
),
|
640 |
-
AttentionBlock(
|
641 |
-
ch,
|
642 |
-
use_checkpoint=use_checkpoint,
|
643 |
-
num_heads=num_heads,
|
644 |
-
num_head_channels=dim_head,
|
645 |
-
use_new_attention_order=use_new_attention_order,
|
646 |
-
) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn
|
647 |
-
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
|
648 |
-
disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,
|
649 |
-
use_checkpoint=use_checkpoint
|
650 |
-
),
|
651 |
-
ResBlock(
|
652 |
-
ch,
|
653 |
-
time_embed_dim,
|
654 |
-
dropout,
|
655 |
-
dims=dims,
|
656 |
-
use_checkpoint=use_checkpoint,
|
657 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
658 |
-
),
|
659 |
-
)
|
660 |
-
self._feature_size += ch
|
661 |
-
|
662 |
-
self.output_blocks = nn.ModuleList([])
|
663 |
-
for level, mult in list(enumerate(channel_mult))[::-1]:
|
664 |
-
for i in range(self.num_res_blocks[level] + 1):
|
665 |
-
ich = input_block_chans.pop()
|
666 |
-
layers = [
|
667 |
-
ResBlock(
|
668 |
-
ch + ich,
|
669 |
-
time_embed_dim,
|
670 |
-
dropout,
|
671 |
-
out_channels=model_channels * mult,
|
672 |
-
dims=dims,
|
673 |
-
use_checkpoint=use_checkpoint,
|
674 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
675 |
-
)
|
676 |
-
]
|
677 |
-
ch = model_channels * mult
|
678 |
-
if ds in attention_resolutions:
|
679 |
-
if num_head_channels == -1:
|
680 |
-
dim_head = ch // num_heads
|
681 |
-
else:
|
682 |
-
num_heads = ch // num_head_channels
|
683 |
-
dim_head = num_head_channels
|
684 |
-
if legacy:
|
685 |
-
#num_heads = 1
|
686 |
-
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
|
687 |
-
if exists(disable_self_attentions):
|
688 |
-
disabled_sa = disable_self_attentions[level]
|
689 |
-
else:
|
690 |
-
disabled_sa = False
|
691 |
-
|
692 |
-
if not exists(num_attention_blocks) or i < num_attention_blocks[level]:
|
693 |
-
layers.append(
|
694 |
-
AttentionBlock(
|
695 |
-
ch,
|
696 |
-
use_checkpoint=use_checkpoint,
|
697 |
-
num_heads=num_heads_upsample,
|
698 |
-
num_head_channels=dim_head,
|
699 |
-
use_new_attention_order=use_new_attention_order,
|
700 |
-
) if not use_spatial_transformer else SpatialTransformer(
|
701 |
-
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
|
702 |
-
disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
|
703 |
-
use_checkpoint=use_checkpoint
|
704 |
-
)
|
705 |
-
)
|
706 |
-
if level and i == self.num_res_blocks[level]:
|
707 |
-
out_ch = ch
|
708 |
-
layers.append(
|
709 |
-
ResBlock(
|
710 |
-
ch,
|
711 |
-
time_embed_dim,
|
712 |
-
dropout,
|
713 |
-
out_channels=out_ch,
|
714 |
-
dims=dims,
|
715 |
-
use_checkpoint=use_checkpoint,
|
716 |
-
use_scale_shift_norm=use_scale_shift_norm,
|
717 |
-
up=True,
|
718 |
-
)
|
719 |
-
if resblock_updown
|
720 |
-
else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
|
721 |
-
)
|
722 |
-
ds //= 2
|
723 |
-
self.output_blocks.append(TimestepEmbedSequential(*layers))
|
724 |
-
self._feature_size += ch
|
725 |
-
|
726 |
-
self.out = nn.Sequential(
|
727 |
-
normalization(ch),
|
728 |
-
nn.SiLU(),
|
729 |
-
zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
|
730 |
-
)
|
731 |
-
if self.predict_codebook_ids:
|
732 |
-
self.id_predictor = nn.Sequential(
|
733 |
-
normalization(ch),
|
734 |
-
conv_nd(dims, model_channels, n_embed, 1),
|
735 |
-
#nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
|
736 |
-
)
|
737 |
-
|
738 |
-
def convert_to_fp16(self):
|
739 |
-
"""
|
740 |
-
Convert the torso of the model to float16.
|
741 |
-
"""
|
742 |
-
self.input_blocks.apply(convert_module_to_f16)
|
743 |
-
self.middle_block.apply(convert_module_to_f16)
|
744 |
-
self.output_blocks.apply(convert_module_to_f16)
|
745 |
-
|
746 |
-
def convert_to_fp32(self):
|
747 |
-
"""
|
748 |
-
Convert the torso of the model to float32.
|
749 |
-
"""
|
750 |
-
self.input_blocks.apply(convert_module_to_f32)
|
751 |
-
self.middle_block.apply(convert_module_to_f32)
|
752 |
-
self.output_blocks.apply(convert_module_to_f32)
|
753 |
-
|
754 |
-
def forward(self, x, timesteps=None, context=None, y=None,**kwargs):
|
755 |
-
"""
|
756 |
-
Apply the model to an input batch.
|
757 |
-
:param x: an [N x C x ...] Tensor of inputs.
|
758 |
-
:param timesteps: a 1-D batch of timesteps.
|
759 |
-
:param context: conditioning plugged in via crossattn
|
760 |
-
:param y: an [N] Tensor of labels, if class-conditional.
|
761 |
-
:return: an [N x C x ...] Tensor of outputs.
|
762 |
-
"""
|
763 |
-
assert (y is not None) == (
|
764 |
-
self.num_classes is not None
|
765 |
-
), "must specify y if and only if the model is class-conditional"
|
766 |
-
hs = []
|
767 |
-
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
|
768 |
-
emb = self.time_embed(t_emb)
|
769 |
-
|
770 |
-
if self.num_classes is not None:
|
771 |
-
assert y.shape[0] == x.shape[0]
|
772 |
-
emb = emb + self.label_emb(y)
|
773 |
-
|
774 |
-
h = x.type(self.dtype)
|
775 |
-
for module in self.input_blocks:
|
776 |
-
h = module(h, emb, context)
|
777 |
-
hs.append(h)
|
778 |
-
h = self.middle_block(h, emb, context)
|
779 |
-
for module in self.output_blocks:
|
780 |
-
h = th.cat([h, hs.pop()], dim=1)
|
781 |
-
h = module(h, emb, context)
|
782 |
-
h = h.type(x.dtype)
|
783 |
-
if self.predict_codebook_ids:
|
784 |
-
return self.id_predictor(h)
|
785 |
-
else:
|
786 |
-
return self.out(h)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/modules/diffusionmodules/upscaling.py
DELETED
@@ -1,81 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import numpy as np
|
4 |
-
from functools import partial
|
5 |
-
|
6 |
-
from ldm.modules.diffusionmodules.util import extract_into_tensor, make_beta_schedule
|
7 |
-
from ldm.util import default
|
8 |
-
|
9 |
-
|
10 |
-
class AbstractLowScaleModel(nn.Module):
|
11 |
-
# for concatenating a downsampled image to the latent representation
|
12 |
-
def __init__(self, noise_schedule_config=None):
|
13 |
-
super(AbstractLowScaleModel, self).__init__()
|
14 |
-
if noise_schedule_config is not None:
|
15 |
-
self.register_schedule(**noise_schedule_config)
|
16 |
-
|
17 |
-
def register_schedule(self, beta_schedule="linear", timesteps=1000,
|
18 |
-
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
19 |
-
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
|
20 |
-
cosine_s=cosine_s)
|
21 |
-
alphas = 1. - betas
|
22 |
-
alphas_cumprod = np.cumprod(alphas, axis=0)
|
23 |
-
alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
|
24 |
-
|
25 |
-
timesteps, = betas.shape
|
26 |
-
self.num_timesteps = int(timesteps)
|
27 |
-
self.linear_start = linear_start
|
28 |
-
self.linear_end = linear_end
|
29 |
-
assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
|
30 |
-
|
31 |
-
to_torch = partial(torch.tensor, dtype=torch.float32)
|
32 |
-
|
33 |
-
self.register_buffer('betas', to_torch(betas))
|
34 |
-
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
35 |
-
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
|
36 |
-
|
37 |
-
# calculations for diffusion q(x_t | x_{t-1}) and others
|
38 |
-
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
|
39 |
-
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
|
40 |
-
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
|
41 |
-
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
|
42 |
-
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
|
43 |
-
|
44 |
-
def q_sample(self, x_start, t, noise=None):
|
45 |
-
noise = default(noise, lambda: torch.randn_like(x_start))
|
46 |
-
return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
|
47 |
-
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
|
48 |
-
|
49 |
-
def forward(self, x):
|
50 |
-
return x, None
|
51 |
-
|
52 |
-
def decode(self, x):
|
53 |
-
return x
|
54 |
-
|
55 |
-
|
56 |
-
class SimpleImageConcat(AbstractLowScaleModel):
|
57 |
-
# no noise level conditioning
|
58 |
-
def __init__(self):
|
59 |
-
super(SimpleImageConcat, self).__init__(noise_schedule_config=None)
|
60 |
-
self.max_noise_level = 0
|
61 |
-
|
62 |
-
def forward(self, x):
|
63 |
-
# fix to constant noise level
|
64 |
-
return x, torch.zeros(x.shape[0], device=x.device).long()
|
65 |
-
|
66 |
-
|
67 |
-
class ImageConcatWithNoiseAugmentation(AbstractLowScaleModel):
|
68 |
-
def __init__(self, noise_schedule_config, max_noise_level=1000, to_cuda=False):
|
69 |
-
super().__init__(noise_schedule_config=noise_schedule_config)
|
70 |
-
self.max_noise_level = max_noise_level
|
71 |
-
|
72 |
-
def forward(self, x, noise_level=None):
|
73 |
-
if noise_level is None:
|
74 |
-
noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long()
|
75 |
-
else:
|
76 |
-
assert isinstance(noise_level, torch.Tensor)
|
77 |
-
z = self.q_sample(x, noise_level)
|
78 |
-
return z, noise_level
|
79 |
-
|
80 |
-
|
81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/modules/diffusionmodules/util.py
DELETED
@@ -1,270 +0,0 @@
|
|
1 |
-
# adopted from
|
2 |
-
# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
|
3 |
-
# and
|
4 |
-
# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
|
5 |
-
# and
|
6 |
-
# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py
|
7 |
-
#
|
8 |
-
# thanks!
|
9 |
-
|
10 |
-
|
11 |
-
import os
|
12 |
-
import math
|
13 |
-
import torch
|
14 |
-
import torch.nn as nn
|
15 |
-
import numpy as np
|
16 |
-
from einops import repeat
|
17 |
-
|
18 |
-
from ldm.util import instantiate_from_config
|
19 |
-
|
20 |
-
|
21 |
-
def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
22 |
-
if schedule == "linear":
|
23 |
-
betas = (
|
24 |
-
torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
|
25 |
-
)
|
26 |
-
|
27 |
-
elif schedule == "cosine":
|
28 |
-
timesteps = (
|
29 |
-
torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
|
30 |
-
)
|
31 |
-
alphas = timesteps / (1 + cosine_s) * np.pi / 2
|
32 |
-
alphas = torch.cos(alphas).pow(2)
|
33 |
-
alphas = alphas / alphas[0]
|
34 |
-
betas = 1 - alphas[1:] / alphas[:-1]
|
35 |
-
betas = np.clip(betas, a_min=0, a_max=0.999)
|
36 |
-
|
37 |
-
elif schedule == "sqrt_linear":
|
38 |
-
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
|
39 |
-
elif schedule == "sqrt":
|
40 |
-
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5
|
41 |
-
else:
|
42 |
-
raise ValueError(f"schedule '{schedule}' unknown.")
|
43 |
-
return betas.numpy()
|
44 |
-
|
45 |
-
|
46 |
-
def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):
|
47 |
-
if ddim_discr_method == 'uniform':
|
48 |
-
c = num_ddpm_timesteps // num_ddim_timesteps
|
49 |
-
ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
|
50 |
-
elif ddim_discr_method == 'quad':
|
51 |
-
ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)
|
52 |
-
else:
|
53 |
-
raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"')
|
54 |
-
|
55 |
-
# assert ddim_timesteps.shape[0] == num_ddim_timesteps
|
56 |
-
# add one to get the final alpha values right (the ones from first scale to data during sampling)
|
57 |
-
steps_out = ddim_timesteps + 1
|
58 |
-
if verbose:
|
59 |
-
print(f'Selected timesteps for ddim sampler: {steps_out}')
|
60 |
-
return steps_out
|
61 |
-
|
62 |
-
|
63 |
-
def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
|
64 |
-
# select alphas for computing the variance schedule
|
65 |
-
alphas = alphacums[ddim_timesteps]
|
66 |
-
alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
|
67 |
-
|
68 |
-
# according the the formula provided in https://arxiv.org/abs/2010.02502
|
69 |
-
sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
|
70 |
-
if verbose:
|
71 |
-
print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')
|
72 |
-
print(f'For the chosen value of eta, which is {eta}, '
|
73 |
-
f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
|
74 |
-
return sigmas, alphas, alphas_prev
|
75 |
-
|
76 |
-
|
77 |
-
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
|
78 |
-
"""
|
79 |
-
Create a beta schedule that discretizes the given alpha_t_bar function,
|
80 |
-
which defines the cumulative product of (1-beta) over time from t = [0,1].
|
81 |
-
:param num_diffusion_timesteps: the number of betas to produce.
|
82 |
-
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
|
83 |
-
produces the cumulative product of (1-beta) up to that
|
84 |
-
part of the diffusion process.
|
85 |
-
:param max_beta: the maximum beta to use; use values lower than 1 to
|
86 |
-
prevent singularities.
|
87 |
-
"""
|
88 |
-
betas = []
|
89 |
-
for i in range(num_diffusion_timesteps):
|
90 |
-
t1 = i / num_diffusion_timesteps
|
91 |
-
t2 = (i + 1) / num_diffusion_timesteps
|
92 |
-
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
|
93 |
-
return np.array(betas)
|
94 |
-
|
95 |
-
|
96 |
-
def extract_into_tensor(a, t, x_shape):
|
97 |
-
b, *_ = t.shape
|
98 |
-
out = a.gather(-1, t)
|
99 |
-
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
|
100 |
-
|
101 |
-
|
102 |
-
def checkpoint(func, inputs, params, flag):
|
103 |
-
"""
|
104 |
-
Evaluate a function without caching intermediate activations, allowing for
|
105 |
-
reduced memory at the expense of extra compute in the backward pass.
|
106 |
-
:param func: the function to evaluate.
|
107 |
-
:param inputs: the argument sequence to pass to `func`.
|
108 |
-
:param params: a sequence of parameters `func` depends on but does not
|
109 |
-
explicitly take as arguments.
|
110 |
-
:param flag: if False, disable gradient checkpointing.
|
111 |
-
"""
|
112 |
-
if flag:
|
113 |
-
args = tuple(inputs) + tuple(params)
|
114 |
-
return CheckpointFunction.apply(func, len(inputs), *args)
|
115 |
-
else:
|
116 |
-
return func(*inputs)
|
117 |
-
|
118 |
-
|
119 |
-
class CheckpointFunction(torch.autograd.Function):
|
120 |
-
@staticmethod
|
121 |
-
def forward(ctx, run_function, length, *args):
|
122 |
-
ctx.run_function = run_function
|
123 |
-
ctx.input_tensors = list(args[:length])
|
124 |
-
ctx.input_params = list(args[length:])
|
125 |
-
ctx.gpu_autocast_kwargs = {"enabled": torch.is_autocast_enabled(),
|
126 |
-
"dtype": torch.get_autocast_gpu_dtype(),
|
127 |
-
"cache_enabled": torch.is_autocast_cache_enabled()}
|
128 |
-
with torch.no_grad():
|
129 |
-
output_tensors = ctx.run_function(*ctx.input_tensors)
|
130 |
-
return output_tensors
|
131 |
-
|
132 |
-
@staticmethod
|
133 |
-
def backward(ctx, *output_grads):
|
134 |
-
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
|
135 |
-
with torch.enable_grad(), \
|
136 |
-
torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs):
|
137 |
-
# Fixes a bug where the first op in run_function modifies the
|
138 |
-
# Tensor storage in place, which is not allowed for detach()'d
|
139 |
-
# Tensors.
|
140 |
-
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
|
141 |
-
output_tensors = ctx.run_function(*shallow_copies)
|
142 |
-
input_grads = torch.autograd.grad(
|
143 |
-
output_tensors,
|
144 |
-
ctx.input_tensors + ctx.input_params,
|
145 |
-
output_grads,
|
146 |
-
allow_unused=True,
|
147 |
-
)
|
148 |
-
del ctx.input_tensors
|
149 |
-
del ctx.input_params
|
150 |
-
del output_tensors
|
151 |
-
return (None, None) + input_grads
|
152 |
-
|
153 |
-
|
154 |
-
def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
|
155 |
-
"""
|
156 |
-
Create sinusoidal timestep embeddings.
|
157 |
-
:param timesteps: a 1-D Tensor of N indices, one per batch element.
|
158 |
-
These may be fractional.
|
159 |
-
:param dim: the dimension of the output.
|
160 |
-
:param max_period: controls the minimum frequency of the embeddings.
|
161 |
-
:return: an [N x dim] Tensor of positional embeddings.
|
162 |
-
"""
|
163 |
-
if not repeat_only:
|
164 |
-
half = dim // 2
|
165 |
-
freqs = torch.exp(
|
166 |
-
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
|
167 |
-
).to(device=timesteps.device)
|
168 |
-
args = timesteps[:, None].float() * freqs[None]
|
169 |
-
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
170 |
-
if dim % 2:
|
171 |
-
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
|
172 |
-
else:
|
173 |
-
embedding = repeat(timesteps, 'b -> b d', d=dim)
|
174 |
-
return embedding
|
175 |
-
|
176 |
-
|
177 |
-
def zero_module(module):
|
178 |
-
"""
|
179 |
-
Zero out the parameters of a module and return it.
|
180 |
-
"""
|
181 |
-
for p in module.parameters():
|
182 |
-
p.detach().zero_()
|
183 |
-
return module
|
184 |
-
|
185 |
-
|
186 |
-
def scale_module(module, scale):
|
187 |
-
"""
|
188 |
-
Scale the parameters of a module and return it.
|
189 |
-
"""
|
190 |
-
for p in module.parameters():
|
191 |
-
p.detach().mul_(scale)
|
192 |
-
return module
|
193 |
-
|
194 |
-
|
195 |
-
def mean_flat(tensor):
|
196 |
-
"""
|
197 |
-
Take the mean over all non-batch dimensions.
|
198 |
-
"""
|
199 |
-
return tensor.mean(dim=list(range(1, len(tensor.shape))))
|
200 |
-
|
201 |
-
|
202 |
-
def normalization(channels):
|
203 |
-
"""
|
204 |
-
Make a standard normalization layer.
|
205 |
-
:param channels: number of input channels.
|
206 |
-
:return: an nn.Module for normalization.
|
207 |
-
"""
|
208 |
-
return GroupNorm32(32, channels)
|
209 |
-
|
210 |
-
|
211 |
-
# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
|
212 |
-
class SiLU(nn.Module):
|
213 |
-
def forward(self, x):
|
214 |
-
return x * torch.sigmoid(x)
|
215 |
-
|
216 |
-
|
217 |
-
class GroupNorm32(nn.GroupNorm):
|
218 |
-
def forward(self, x):
|
219 |
-
return super().forward(x.float()).type(x.dtype)
|
220 |
-
|
221 |
-
def conv_nd(dims, *args, **kwargs):
|
222 |
-
"""
|
223 |
-
Create a 1D, 2D, or 3D convolution module.
|
224 |
-
"""
|
225 |
-
if dims == 1:
|
226 |
-
return nn.Conv1d(*args, **kwargs)
|
227 |
-
elif dims == 2:
|
228 |
-
return nn.Conv2d(*args, **kwargs)
|
229 |
-
elif dims == 3:
|
230 |
-
return nn.Conv3d(*args, **kwargs)
|
231 |
-
raise ValueError(f"unsupported dimensions: {dims}")
|
232 |
-
|
233 |
-
|
234 |
-
def linear(*args, **kwargs):
|
235 |
-
"""
|
236 |
-
Create a linear module.
|
237 |
-
"""
|
238 |
-
return nn.Linear(*args, **kwargs)
|
239 |
-
|
240 |
-
|
241 |
-
def avg_pool_nd(dims, *args, **kwargs):
|
242 |
-
"""
|
243 |
-
Create a 1D, 2D, or 3D average pooling module.
|
244 |
-
"""
|
245 |
-
if dims == 1:
|
246 |
-
return nn.AvgPool1d(*args, **kwargs)
|
247 |
-
elif dims == 2:
|
248 |
-
return nn.AvgPool2d(*args, **kwargs)
|
249 |
-
elif dims == 3:
|
250 |
-
return nn.AvgPool3d(*args, **kwargs)
|
251 |
-
raise ValueError(f"unsupported dimensions: {dims}")
|
252 |
-
|
253 |
-
|
254 |
-
class HybridConditioner(nn.Module):
|
255 |
-
|
256 |
-
def __init__(self, c_concat_config, c_crossattn_config):
|
257 |
-
super().__init__()
|
258 |
-
self.concat_conditioner = instantiate_from_config(c_concat_config)
|
259 |
-
self.crossattn_conditioner = instantiate_from_config(c_crossattn_config)
|
260 |
-
|
261 |
-
def forward(self, c_concat, c_crossattn):
|
262 |
-
c_concat = self.concat_conditioner(c_concat)
|
263 |
-
c_crossattn = self.crossattn_conditioner(c_crossattn)
|
264 |
-
return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]}
|
265 |
-
|
266 |
-
|
267 |
-
def noise_like(shape, device, repeat=False):
|
268 |
-
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
|
269 |
-
noise = lambda: torch.randn(shape, device=device)
|
270 |
-
return repeat_noise() if repeat else noise()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/modules/distributions/__init__.py
DELETED
File without changes
|
ldm/modules/distributions/distributions.py
DELETED
@@ -1,92 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import numpy as np
|
3 |
-
|
4 |
-
|
5 |
-
class AbstractDistribution:
|
6 |
-
def sample(self):
|
7 |
-
raise NotImplementedError()
|
8 |
-
|
9 |
-
def mode(self):
|
10 |
-
raise NotImplementedError()
|
11 |
-
|
12 |
-
|
13 |
-
class DiracDistribution(AbstractDistribution):
|
14 |
-
def __init__(self, value):
|
15 |
-
self.value = value
|
16 |
-
|
17 |
-
def sample(self):
|
18 |
-
return self.value
|
19 |
-
|
20 |
-
def mode(self):
|
21 |
-
return self.value
|
22 |
-
|
23 |
-
|
24 |
-
class DiagonalGaussianDistribution(object):
|
25 |
-
def __init__(self, parameters, deterministic=False):
|
26 |
-
self.parameters = parameters
|
27 |
-
self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
|
28 |
-
self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
|
29 |
-
self.deterministic = deterministic
|
30 |
-
self.std = torch.exp(0.5 * self.logvar)
|
31 |
-
self.var = torch.exp(self.logvar)
|
32 |
-
if self.deterministic:
|
33 |
-
self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
|
34 |
-
|
35 |
-
def sample(self):
|
36 |
-
x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)
|
37 |
-
return x
|
38 |
-
|
39 |
-
def kl(self, other=None):
|
40 |
-
if self.deterministic:
|
41 |
-
return torch.Tensor([0.])
|
42 |
-
else:
|
43 |
-
if other is None:
|
44 |
-
return 0.5 * torch.sum(torch.pow(self.mean, 2)
|
45 |
-
+ self.var - 1.0 - self.logvar,
|
46 |
-
dim=[1, 2, 3])
|
47 |
-
else:
|
48 |
-
return 0.5 * torch.sum(
|
49 |
-
torch.pow(self.mean - other.mean, 2) / other.var
|
50 |
-
+ self.var / other.var - 1.0 - self.logvar + other.logvar,
|
51 |
-
dim=[1, 2, 3])
|
52 |
-
|
53 |
-
def nll(self, sample, dims=[1,2,3]):
|
54 |
-
if self.deterministic:
|
55 |
-
return torch.Tensor([0.])
|
56 |
-
logtwopi = np.log(2.0 * np.pi)
|
57 |
-
return 0.5 * torch.sum(
|
58 |
-
logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
|
59 |
-
dim=dims)
|
60 |
-
|
61 |
-
def mode(self):
|
62 |
-
return self.mean
|
63 |
-
|
64 |
-
|
65 |
-
def normal_kl(mean1, logvar1, mean2, logvar2):
|
66 |
-
"""
|
67 |
-
source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
|
68 |
-
Compute the KL divergence between two gaussians.
|
69 |
-
Shapes are automatically broadcasted, so batches can be compared to
|
70 |
-
scalars, among other use cases.
|
71 |
-
"""
|
72 |
-
tensor = None
|
73 |
-
for obj in (mean1, logvar1, mean2, logvar2):
|
74 |
-
if isinstance(obj, torch.Tensor):
|
75 |
-
tensor = obj
|
76 |
-
break
|
77 |
-
assert tensor is not None, "at least one argument must be a Tensor"
|
78 |
-
|
79 |
-
# Force variances to be Tensors. Broadcasting helps convert scalars to
|
80 |
-
# Tensors, but it does not work for torch.exp().
|
81 |
-
logvar1, logvar2 = [
|
82 |
-
x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)
|
83 |
-
for x in (logvar1, logvar2)
|
84 |
-
]
|
85 |
-
|
86 |
-
return 0.5 * (
|
87 |
-
-1.0
|
88 |
-
+ logvar2
|
89 |
-
- logvar1
|
90 |
-
+ torch.exp(logvar1 - logvar2)
|
91 |
-
+ ((mean1 - mean2) ** 2) * torch.exp(-logvar2)
|
92 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/modules/ema.py
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
|
4 |
-
|
5 |
-
class LitEma(nn.Module):
|
6 |
-
def __init__(self, model, decay=0.9999, use_num_upates=True):
|
7 |
-
super().__init__()
|
8 |
-
if decay < 0.0 or decay > 1.0:
|
9 |
-
raise ValueError('Decay must be between 0 and 1')
|
10 |
-
|
11 |
-
self.m_name2s_name = {}
|
12 |
-
self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
|
13 |
-
self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates
|
14 |
-
else torch.tensor(-1, dtype=torch.int))
|
15 |
-
|
16 |
-
for name, p in model.named_parameters():
|
17 |
-
if p.requires_grad:
|
18 |
-
# remove as '.'-character is not allowed in buffers
|
19 |
-
s_name = name.replace('.', '')
|
20 |
-
self.m_name2s_name.update({name: s_name})
|
21 |
-
self.register_buffer(s_name, p.clone().detach().data)
|
22 |
-
|
23 |
-
self.collected_params = []
|
24 |
-
|
25 |
-
def reset_num_updates(self):
|
26 |
-
del self.num_updates
|
27 |
-
self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))
|
28 |
-
|
29 |
-
def forward(self, model):
|
30 |
-
decay = self.decay
|
31 |
-
|
32 |
-
if self.num_updates >= 0:
|
33 |
-
self.num_updates += 1
|
34 |
-
decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))
|
35 |
-
|
36 |
-
one_minus_decay = 1.0 - decay
|
37 |
-
|
38 |
-
with torch.no_grad():
|
39 |
-
m_param = dict(model.named_parameters())
|
40 |
-
shadow_params = dict(self.named_buffers())
|
41 |
-
|
42 |
-
for key in m_param:
|
43 |
-
if m_param[key].requires_grad:
|
44 |
-
sname = self.m_name2s_name[key]
|
45 |
-
shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
|
46 |
-
shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))
|
47 |
-
else:
|
48 |
-
assert not key in self.m_name2s_name
|
49 |
-
|
50 |
-
def copy_to(self, model):
|
51 |
-
m_param = dict(model.named_parameters())
|
52 |
-
shadow_params = dict(self.named_buffers())
|
53 |
-
for key in m_param:
|
54 |
-
if m_param[key].requires_grad:
|
55 |
-
m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
|
56 |
-
else:
|
57 |
-
assert not key in self.m_name2s_name
|
58 |
-
|
59 |
-
def store(self, parameters):
|
60 |
-
"""
|
61 |
-
Save the current parameters for restoring later.
|
62 |
-
Args:
|
63 |
-
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
|
64 |
-
temporarily stored.
|
65 |
-
"""
|
66 |
-
self.collected_params = [param.clone() for param in parameters]
|
67 |
-
|
68 |
-
def restore(self, parameters):
|
69 |
-
"""
|
70 |
-
Restore the parameters stored with the `store` method.
|
71 |
-
Useful to validate the model with EMA parameters without affecting the
|
72 |
-
original optimization process. Store the parameters before the
|
73 |
-
`copy_to` method. After validation (or model saving), use this to
|
74 |
-
restore the former parameters.
|
75 |
-
Args:
|
76 |
-
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
|
77 |
-
updated with the stored parameters.
|
78 |
-
"""
|
79 |
-
for c_param, param in zip(self.collected_params, parameters):
|
80 |
-
param.data.copy_(c_param.data)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/modules/encoders/__init__.py
DELETED
File without changes
|
ldm/modules/encoders/modules.py
DELETED
@@ -1,213 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
from torch.utils.checkpoint import checkpoint
|
4 |
-
|
5 |
-
from transformers import T5Tokenizer, T5EncoderModel, CLIPTokenizer, CLIPTextModel
|
6 |
-
|
7 |
-
import open_clip
|
8 |
-
from ldm.util import default, count_params
|
9 |
-
|
10 |
-
|
11 |
-
class AbstractEncoder(nn.Module):
|
12 |
-
def __init__(self):
|
13 |
-
super().__init__()
|
14 |
-
|
15 |
-
def encode(self, *args, **kwargs):
|
16 |
-
raise NotImplementedError
|
17 |
-
|
18 |
-
|
19 |
-
class IdentityEncoder(AbstractEncoder):
|
20 |
-
|
21 |
-
def encode(self, x):
|
22 |
-
return x
|
23 |
-
|
24 |
-
|
25 |
-
class ClassEmbedder(nn.Module):
|
26 |
-
def __init__(self, embed_dim, n_classes=1000, key='class', ucg_rate=0.1):
|
27 |
-
super().__init__()
|
28 |
-
self.key = key
|
29 |
-
self.embedding = nn.Embedding(n_classes, embed_dim)
|
30 |
-
self.n_classes = n_classes
|
31 |
-
self.ucg_rate = ucg_rate
|
32 |
-
|
33 |
-
def forward(self, batch, key=None, disable_dropout=False):
|
34 |
-
if key is None:
|
35 |
-
key = self.key
|
36 |
-
# this is for use in crossattn
|
37 |
-
c = batch[key][:, None]
|
38 |
-
if self.ucg_rate > 0. and not disable_dropout:
|
39 |
-
mask = 1. - torch.bernoulli(torch.ones_like(c) * self.ucg_rate)
|
40 |
-
c = mask * c + (1-mask) * torch.ones_like(c)*(self.n_classes-1)
|
41 |
-
c = c.long()
|
42 |
-
c = self.embedding(c)
|
43 |
-
return c
|
44 |
-
|
45 |
-
def get_unconditional_conditioning(self, bs, device="cuda"):
|
46 |
-
uc_class = self.n_classes - 1 # 1000 classes --> 0 ... 999, one extra class for ucg (class 1000)
|
47 |
-
uc = torch.ones((bs,), device=device) * uc_class
|
48 |
-
uc = {self.key: uc}
|
49 |
-
return uc
|
50 |
-
|
51 |
-
|
52 |
-
def disabled_train(self, mode=True):
|
53 |
-
"""Overwrite model.train with this function to make sure train/eval mode
|
54 |
-
does not change anymore."""
|
55 |
-
return self
|
56 |
-
|
57 |
-
|
58 |
-
class FrozenT5Embedder(AbstractEncoder):
|
59 |
-
"""Uses the T5 transformer encoder for text"""
|
60 |
-
def __init__(self, version="google/t5-v1_1-large", device="cuda", max_length=77, freeze=True): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl
|
61 |
-
super().__init__()
|
62 |
-
self.tokenizer = T5Tokenizer.from_pretrained(version)
|
63 |
-
self.transformer = T5EncoderModel.from_pretrained(version)
|
64 |
-
self.device = device
|
65 |
-
self.max_length = max_length # TODO: typical value?
|
66 |
-
if freeze:
|
67 |
-
self.freeze()
|
68 |
-
|
69 |
-
def freeze(self):
|
70 |
-
self.transformer = self.transformer.eval()
|
71 |
-
#self.train = disabled_train
|
72 |
-
for param in self.parameters():
|
73 |
-
param.requires_grad = False
|
74 |
-
|
75 |
-
def forward(self, text):
|
76 |
-
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
|
77 |
-
return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
|
78 |
-
tokens = batch_encoding["input_ids"].to(self.device)
|
79 |
-
outputs = self.transformer(input_ids=tokens)
|
80 |
-
|
81 |
-
z = outputs.last_hidden_state
|
82 |
-
return z
|
83 |
-
|
84 |
-
def encode(self, text):
|
85 |
-
return self(text)
|
86 |
-
|
87 |
-
|
88 |
-
class FrozenCLIPEmbedder(AbstractEncoder):
|
89 |
-
"""Uses the CLIP transformer encoder for text (from huggingface)"""
|
90 |
-
LAYERS = [
|
91 |
-
"last",
|
92 |
-
"pooled",
|
93 |
-
"hidden"
|
94 |
-
]
|
95 |
-
def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77,
|
96 |
-
freeze=True, layer="last", layer_idx=None): # clip-vit-base-patch32
|
97 |
-
super().__init__()
|
98 |
-
assert layer in self.LAYERS
|
99 |
-
self.tokenizer = CLIPTokenizer.from_pretrained(version)
|
100 |
-
self.transformer = CLIPTextModel.from_pretrained(version)
|
101 |
-
self.device = device
|
102 |
-
self.max_length = max_length
|
103 |
-
if freeze:
|
104 |
-
self.freeze()
|
105 |
-
self.layer = layer
|
106 |
-
self.layer_idx = layer_idx
|
107 |
-
if layer == "hidden":
|
108 |
-
assert layer_idx is not None
|
109 |
-
assert 0 <= abs(layer_idx) <= 12
|
110 |
-
|
111 |
-
def freeze(self):
|
112 |
-
self.transformer = self.transformer.eval()
|
113 |
-
#self.train = disabled_train
|
114 |
-
for param in self.parameters():
|
115 |
-
param.requires_grad = False
|
116 |
-
|
117 |
-
def forward(self, text):
|
118 |
-
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
|
119 |
-
return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
|
120 |
-
tokens = batch_encoding["input_ids"].to(self.device)
|
121 |
-
outputs = self.transformer(input_ids=tokens, output_hidden_states=self.layer=="hidden")
|
122 |
-
if self.layer == "last":
|
123 |
-
z = outputs.last_hidden_state
|
124 |
-
elif self.layer == "pooled":
|
125 |
-
z = outputs.pooler_output[:, None, :]
|
126 |
-
else:
|
127 |
-
z = outputs.hidden_states[self.layer_idx]
|
128 |
-
return z
|
129 |
-
|
130 |
-
def encode(self, text):
|
131 |
-
return self(text)
|
132 |
-
|
133 |
-
|
134 |
-
class FrozenOpenCLIPEmbedder(AbstractEncoder):
|
135 |
-
"""
|
136 |
-
Uses the OpenCLIP transformer encoder for text
|
137 |
-
"""
|
138 |
-
LAYERS = [
|
139 |
-
#"pooled",
|
140 |
-
"last",
|
141 |
-
"penultimate"
|
142 |
-
]
|
143 |
-
def __init__(self, arch="ViT-H-14", version="laion2b_s32b_b79k", device="cuda", max_length=77,
|
144 |
-
freeze=True, layer="last"):
|
145 |
-
super().__init__()
|
146 |
-
assert layer in self.LAYERS
|
147 |
-
model, _, _ = open_clip.create_model_and_transforms(arch, device=torch.device('cpu'), pretrained=version)
|
148 |
-
del model.visual
|
149 |
-
self.model = model
|
150 |
-
|
151 |
-
self.device = device
|
152 |
-
self.max_length = max_length
|
153 |
-
if freeze:
|
154 |
-
self.freeze()
|
155 |
-
self.layer = layer
|
156 |
-
if self.layer == "last":
|
157 |
-
self.layer_idx = 0
|
158 |
-
elif self.layer == "penultimate":
|
159 |
-
self.layer_idx = 1
|
160 |
-
else:
|
161 |
-
raise NotImplementedError()
|
162 |
-
|
163 |
-
def freeze(self):
|
164 |
-
self.model = self.model.eval()
|
165 |
-
for param in self.parameters():
|
166 |
-
param.requires_grad = False
|
167 |
-
|
168 |
-
def forward(self, text):
|
169 |
-
tokens = open_clip.tokenize(text)
|
170 |
-
z = self.encode_with_transformer(tokens.to(self.device))
|
171 |
-
return z
|
172 |
-
|
173 |
-
def encode_with_transformer(self, text):
|
174 |
-
x = self.model.token_embedding(text) # [batch_size, n_ctx, d_model]
|
175 |
-
x = x + self.model.positional_embedding
|
176 |
-
x = x.permute(1, 0, 2) # NLD -> LND
|
177 |
-
x = self.text_transformer_forward(x, attn_mask=self.model.attn_mask)
|
178 |
-
x = x.permute(1, 0, 2) # LND -> NLD
|
179 |
-
x = self.model.ln_final(x)
|
180 |
-
return x
|
181 |
-
|
182 |
-
def text_transformer_forward(self, x: torch.Tensor, attn_mask = None):
|
183 |
-
for i, r in enumerate(self.model.transformer.resblocks):
|
184 |
-
if i == len(self.model.transformer.resblocks) - self.layer_idx:
|
185 |
-
break
|
186 |
-
if self.model.transformer.grad_checkpointing and not torch.jit.is_scripting():
|
187 |
-
x = checkpoint(r, x, attn_mask)
|
188 |
-
else:
|
189 |
-
x = r(x, attn_mask=attn_mask)
|
190 |
-
return x
|
191 |
-
|
192 |
-
def encode(self, text):
|
193 |
-
return self(text)
|
194 |
-
|
195 |
-
|
196 |
-
class FrozenCLIPT5Encoder(AbstractEncoder):
|
197 |
-
def __init__(self, clip_version="openai/clip-vit-large-patch14", t5_version="google/t5-v1_1-xl", device="cuda",
|
198 |
-
clip_max_length=77, t5_max_length=77):
|
199 |
-
super().__init__()
|
200 |
-
self.clip_encoder = FrozenCLIPEmbedder(clip_version, device, max_length=clip_max_length)
|
201 |
-
self.t5_encoder = FrozenT5Embedder(t5_version, device, max_length=t5_max_length)
|
202 |
-
print(f"{self.clip_encoder.__class__.__name__} has {count_params(self.clip_encoder)*1.e-6:.2f} M parameters, "
|
203 |
-
f"{self.t5_encoder.__class__.__name__} comes with {count_params(self.t5_encoder)*1.e-6:.2f} M params.")
|
204 |
-
|
205 |
-
def encode(self, text):
|
206 |
-
return self(text)
|
207 |
-
|
208 |
-
def forward(self, text):
|
209 |
-
clip_z = self.clip_encoder.encode(text)
|
210 |
-
t5_z = self.t5_encoder.encode(text)
|
211 |
-
return [clip_z, t5_z]
|
212 |
-
|
213 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/modules/image_degradation/__init__.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr
|
2 |
-
from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light
|
|
|
|
|
|
ldm/modules/image_degradation/bsrgan.py
DELETED
@@ -1,730 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
"""
|
3 |
-
# --------------------------------------------
|
4 |
-
# Super-Resolution
|
5 |
-
# --------------------------------------------
|
6 |
-
#
|
7 |
-
# Kai Zhang (cskaizhang@gmail.com)
|
8 |
-
# https://github.com/cszn
|
9 |
-
# From 2019/03--2021/08
|
10 |
-
# --------------------------------------------
|
11 |
-
"""
|
12 |
-
|
13 |
-
import numpy as np
|
14 |
-
import cv2
|
15 |
-
import torch
|
16 |
-
|
17 |
-
from functools import partial
|
18 |
-
import random
|
19 |
-
from scipy import ndimage
|
20 |
-
import scipy
|
21 |
-
import scipy.stats as ss
|
22 |
-
from scipy.interpolate import interp2d
|
23 |
-
from scipy.linalg import orth
|
24 |
-
import albumentations
|
25 |
-
|
26 |
-
import ldm.modules.image_degradation.utils_image as util
|
27 |
-
|
28 |
-
|
29 |
-
def modcrop_np(img, sf):
|
30 |
-
'''
|
31 |
-
Args:
|
32 |
-
img: numpy image, WxH or WxHxC
|
33 |
-
sf: scale factor
|
34 |
-
Return:
|
35 |
-
cropped image
|
36 |
-
'''
|
37 |
-
w, h = img.shape[:2]
|
38 |
-
im = np.copy(img)
|
39 |
-
return im[:w - w % sf, :h - h % sf, ...]
|
40 |
-
|
41 |
-
|
42 |
-
"""
|
43 |
-
# --------------------------------------------
|
44 |
-
# anisotropic Gaussian kernels
|
45 |
-
# --------------------------------------------
|
46 |
-
"""
|
47 |
-
|
48 |
-
|
49 |
-
def analytic_kernel(k):
|
50 |
-
"""Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
|
51 |
-
k_size = k.shape[0]
|
52 |
-
# Calculate the big kernels size
|
53 |
-
big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
|
54 |
-
# Loop over the small kernel to fill the big one
|
55 |
-
for r in range(k_size):
|
56 |
-
for c in range(k_size):
|
57 |
-
big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
|
58 |
-
# Crop the edges of the big kernel to ignore very small values and increase run time of SR
|
59 |
-
crop = k_size // 2
|
60 |
-
cropped_big_k = big_k[crop:-crop, crop:-crop]
|
61 |
-
# Normalize to 1
|
62 |
-
return cropped_big_k / cropped_big_k.sum()
|
63 |
-
|
64 |
-
|
65 |
-
def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
|
66 |
-
""" generate an anisotropic Gaussian kernel
|
67 |
-
Args:
|
68 |
-
ksize : e.g., 15, kernel size
|
69 |
-
theta : [0, pi], rotation angle range
|
70 |
-
l1 : [0.1,50], scaling of eigenvalues
|
71 |
-
l2 : [0.1,l1], scaling of eigenvalues
|
72 |
-
If l1 = l2, will get an isotropic Gaussian kernel.
|
73 |
-
Returns:
|
74 |
-
k : kernel
|
75 |
-
"""
|
76 |
-
|
77 |
-
v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
|
78 |
-
V = np.array([[v[0], v[1]], [v[1], -v[0]]])
|
79 |
-
D = np.array([[l1, 0], [0, l2]])
|
80 |
-
Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
|
81 |
-
k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
|
82 |
-
|
83 |
-
return k
|
84 |
-
|
85 |
-
|
86 |
-
def gm_blur_kernel(mean, cov, size=15):
|
87 |
-
center = size / 2.0 + 0.5
|
88 |
-
k = np.zeros([size, size])
|
89 |
-
for y in range(size):
|
90 |
-
for x in range(size):
|
91 |
-
cy = y - center + 1
|
92 |
-
cx = x - center + 1
|
93 |
-
k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
|
94 |
-
|
95 |
-
k = k / np.sum(k)
|
96 |
-
return k
|
97 |
-
|
98 |
-
|
99 |
-
def shift_pixel(x, sf, upper_left=True):
|
100 |
-
"""shift pixel for super-resolution with different scale factors
|
101 |
-
Args:
|
102 |
-
x: WxHxC or WxH
|
103 |
-
sf: scale factor
|
104 |
-
upper_left: shift direction
|
105 |
-
"""
|
106 |
-
h, w = x.shape[:2]
|
107 |
-
shift = (sf - 1) * 0.5
|
108 |
-
xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
|
109 |
-
if upper_left:
|
110 |
-
x1 = xv + shift
|
111 |
-
y1 = yv + shift
|
112 |
-
else:
|
113 |
-
x1 = xv - shift
|
114 |
-
y1 = yv - shift
|
115 |
-
|
116 |
-
x1 = np.clip(x1, 0, w - 1)
|
117 |
-
y1 = np.clip(y1, 0, h - 1)
|
118 |
-
|
119 |
-
if x.ndim == 2:
|
120 |
-
x = interp2d(xv, yv, x)(x1, y1)
|
121 |
-
if x.ndim == 3:
|
122 |
-
for i in range(x.shape[-1]):
|
123 |
-
x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
|
124 |
-
|
125 |
-
return x
|
126 |
-
|
127 |
-
|
128 |
-
def blur(x, k):
|
129 |
-
'''
|
130 |
-
x: image, NxcxHxW
|
131 |
-
k: kernel, Nx1xhxw
|
132 |
-
'''
|
133 |
-
n, c = x.shape[:2]
|
134 |
-
p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
|
135 |
-
x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
|
136 |
-
k = k.repeat(1, c, 1, 1)
|
137 |
-
k = k.view(-1, 1, k.shape[2], k.shape[3])
|
138 |
-
x = x.view(1, -1, x.shape[2], x.shape[3])
|
139 |
-
x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
|
140 |
-
x = x.view(n, c, x.shape[2], x.shape[3])
|
141 |
-
|
142 |
-
return x
|
143 |
-
|
144 |
-
|
145 |
-
def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
|
146 |
-
""""
|
147 |
-
# modified version of https://github.com/assafshocher/BlindSR_dataset_generator
|
148 |
-
# Kai Zhang
|
149 |
-
# min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
|
150 |
-
# max_var = 2.5 * sf
|
151 |
-
"""
|
152 |
-
# Set random eigen-vals (lambdas) and angle (theta) for COV matrix
|
153 |
-
lambda_1 = min_var + np.random.rand() * (max_var - min_var)
|
154 |
-
lambda_2 = min_var + np.random.rand() * (max_var - min_var)
|
155 |
-
theta = np.random.rand() * np.pi # random theta
|
156 |
-
noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
|
157 |
-
|
158 |
-
# Set COV matrix using Lambdas and Theta
|
159 |
-
LAMBDA = np.diag([lambda_1, lambda_2])
|
160 |
-
Q = np.array([[np.cos(theta), -np.sin(theta)],
|
161 |
-
[np.sin(theta), np.cos(theta)]])
|
162 |
-
SIGMA = Q @ LAMBDA @ Q.T
|
163 |
-
INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
|
164 |
-
|
165 |
-
# Set expectation position (shifting kernel for aligned image)
|
166 |
-
MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
|
167 |
-
MU = MU[None, None, :, None]
|
168 |
-
|
169 |
-
# Create meshgrid for Gaussian
|
170 |
-
[X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
|
171 |
-
Z = np.stack([X, Y], 2)[:, :, :, None]
|
172 |
-
|
173 |
-
# Calcualte Gaussian for every pixel of the kernel
|
174 |
-
ZZ = Z - MU
|
175 |
-
ZZ_t = ZZ.transpose(0, 1, 3, 2)
|
176 |
-
raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
|
177 |
-
|
178 |
-
# shift the kernel so it will be centered
|
179 |
-
# raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
|
180 |
-
|
181 |
-
# Normalize the kernel and return
|
182 |
-
# kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
|
183 |
-
kernel = raw_kernel / np.sum(raw_kernel)
|
184 |
-
return kernel
|
185 |
-
|
186 |
-
|
187 |
-
def fspecial_gaussian(hsize, sigma):
|
188 |
-
hsize = [hsize, hsize]
|
189 |
-
siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
|
190 |
-
std = sigma
|
191 |
-
[x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
|
192 |
-
arg = -(x * x + y * y) / (2 * std * std)
|
193 |
-
h = np.exp(arg)
|
194 |
-
h[h < scipy.finfo(float).eps * h.max()] = 0
|
195 |
-
sumh = h.sum()
|
196 |
-
if sumh != 0:
|
197 |
-
h = h / sumh
|
198 |
-
return h
|
199 |
-
|
200 |
-
|
201 |
-
def fspecial_laplacian(alpha):
|
202 |
-
alpha = max([0, min([alpha, 1])])
|
203 |
-
h1 = alpha / (alpha + 1)
|
204 |
-
h2 = (1 - alpha) / (alpha + 1)
|
205 |
-
h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
|
206 |
-
h = np.array(h)
|
207 |
-
return h
|
208 |
-
|
209 |
-
|
210 |
-
def fspecial(filter_type, *args, **kwargs):
|
211 |
-
'''
|
212 |
-
python code from:
|
213 |
-
https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
|
214 |
-
'''
|
215 |
-
if filter_type == 'gaussian':
|
216 |
-
return fspecial_gaussian(*args, **kwargs)
|
217 |
-
if filter_type == 'laplacian':
|
218 |
-
return fspecial_laplacian(*args, **kwargs)
|
219 |
-
|
220 |
-
|
221 |
-
"""
|
222 |
-
# --------------------------------------------
|
223 |
-
# degradation models
|
224 |
-
# --------------------------------------------
|
225 |
-
"""
|
226 |
-
|
227 |
-
|
228 |
-
def bicubic_degradation(x, sf=3):
|
229 |
-
'''
|
230 |
-
Args:
|
231 |
-
x: HxWxC image, [0, 1]
|
232 |
-
sf: down-scale factor
|
233 |
-
Return:
|
234 |
-
bicubicly downsampled LR image
|
235 |
-
'''
|
236 |
-
x = util.imresize_np(x, scale=1 / sf)
|
237 |
-
return x
|
238 |
-
|
239 |
-
|
240 |
-
def srmd_degradation(x, k, sf=3):
|
241 |
-
''' blur + bicubic downsampling
|
242 |
-
Args:
|
243 |
-
x: HxWxC image, [0, 1]
|
244 |
-
k: hxw, double
|
245 |
-
sf: down-scale factor
|
246 |
-
Return:
|
247 |
-
downsampled LR image
|
248 |
-
Reference:
|
249 |
-
@inproceedings{zhang2018learning,
|
250 |
-
title={Learning a single convolutional super-resolution network for multiple degradations},
|
251 |
-
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
|
252 |
-
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
|
253 |
-
pages={3262--3271},
|
254 |
-
year={2018}
|
255 |
-
}
|
256 |
-
'''
|
257 |
-
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
|
258 |
-
x = bicubic_degradation(x, sf=sf)
|
259 |
-
return x
|
260 |
-
|
261 |
-
|
262 |
-
def dpsr_degradation(x, k, sf=3):
|
263 |
-
''' bicubic downsampling + blur
|
264 |
-
Args:
|
265 |
-
x: HxWxC image, [0, 1]
|
266 |
-
k: hxw, double
|
267 |
-
sf: down-scale factor
|
268 |
-
Return:
|
269 |
-
downsampled LR image
|
270 |
-
Reference:
|
271 |
-
@inproceedings{zhang2019deep,
|
272 |
-
title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
|
273 |
-
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
|
274 |
-
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
|
275 |
-
pages={1671--1681},
|
276 |
-
year={2019}
|
277 |
-
}
|
278 |
-
'''
|
279 |
-
x = bicubic_degradation(x, sf=sf)
|
280 |
-
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
|
281 |
-
return x
|
282 |
-
|
283 |
-
|
284 |
-
def classical_degradation(x, k, sf=3):
|
285 |
-
''' blur + downsampling
|
286 |
-
Args:
|
287 |
-
x: HxWxC image, [0, 1]/[0, 255]
|
288 |
-
k: hxw, double
|
289 |
-
sf: down-scale factor
|
290 |
-
Return:
|
291 |
-
downsampled LR image
|
292 |
-
'''
|
293 |
-
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
|
294 |
-
# x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
|
295 |
-
st = 0
|
296 |
-
return x[st::sf, st::sf, ...]
|
297 |
-
|
298 |
-
|
299 |
-
def add_sharpening(img, weight=0.5, radius=50, threshold=10):
|
300 |
-
"""USM sharpening. borrowed from real-ESRGAN
|
301 |
-
Input image: I; Blurry image: B.
|
302 |
-
1. K = I + weight * (I - B)
|
303 |
-
2. Mask = 1 if abs(I - B) > threshold, else: 0
|
304 |
-
3. Blur mask:
|
305 |
-
4. Out = Mask * K + (1 - Mask) * I
|
306 |
-
Args:
|
307 |
-
img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
|
308 |
-
weight (float): Sharp weight. Default: 1.
|
309 |
-
radius (float): Kernel size of Gaussian blur. Default: 50.
|
310 |
-
threshold (int):
|
311 |
-
"""
|
312 |
-
if radius % 2 == 0:
|
313 |
-
radius += 1
|
314 |
-
blur = cv2.GaussianBlur(img, (radius, radius), 0)
|
315 |
-
residual = img - blur
|
316 |
-
mask = np.abs(residual) * 255 > threshold
|
317 |
-
mask = mask.astype('float32')
|
318 |
-
soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
|
319 |
-
|
320 |
-
K = img + weight * residual
|
321 |
-
K = np.clip(K, 0, 1)
|
322 |
-
return soft_mask * K + (1 - soft_mask) * img
|
323 |
-
|
324 |
-
|
325 |
-
def add_blur(img, sf=4):
|
326 |
-
wd2 = 4.0 + sf
|
327 |
-
wd = 2.0 + 0.2 * sf
|
328 |
-
if random.random() < 0.5:
|
329 |
-
l1 = wd2 * random.random()
|
330 |
-
l2 = wd2 * random.random()
|
331 |
-
k = anisotropic_Gaussian(ksize=2 * random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
|
332 |
-
else:
|
333 |
-
k = fspecial('gaussian', 2 * random.randint(2, 11) + 3, wd * random.random())
|
334 |
-
img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
|
335 |
-
|
336 |
-
return img
|
337 |
-
|
338 |
-
|
339 |
-
def add_resize(img, sf=4):
|
340 |
-
rnum = np.random.rand()
|
341 |
-
if rnum > 0.8: # up
|
342 |
-
sf1 = random.uniform(1, 2)
|
343 |
-
elif rnum < 0.7: # down
|
344 |
-
sf1 = random.uniform(0.5 / sf, 1)
|
345 |
-
else:
|
346 |
-
sf1 = 1.0
|
347 |
-
img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
|
348 |
-
img = np.clip(img, 0.0, 1.0)
|
349 |
-
|
350 |
-
return img
|
351 |
-
|
352 |
-
|
353 |
-
# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
|
354 |
-
# noise_level = random.randint(noise_level1, noise_level2)
|
355 |
-
# rnum = np.random.rand()
|
356 |
-
# if rnum > 0.6: # add color Gaussian noise
|
357 |
-
# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
|
358 |
-
# elif rnum < 0.4: # add grayscale Gaussian noise
|
359 |
-
# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
|
360 |
-
# else: # add noise
|
361 |
-
# L = noise_level2 / 255.
|
362 |
-
# D = np.diag(np.random.rand(3))
|
363 |
-
# U = orth(np.random.rand(3, 3))
|
364 |
-
# conv = np.dot(np.dot(np.transpose(U), D), U)
|
365 |
-
# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
|
366 |
-
# img = np.clip(img, 0.0, 1.0)
|
367 |
-
# return img
|
368 |
-
|
369 |
-
def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
|
370 |
-
noise_level = random.randint(noise_level1, noise_level2)
|
371 |
-
rnum = np.random.rand()
|
372 |
-
if rnum > 0.6: # add color Gaussian noise
|
373 |
-
img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
|
374 |
-
elif rnum < 0.4: # add grayscale Gaussian noise
|
375 |
-
img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
|
376 |
-
else: # add noise
|
377 |
-
L = noise_level2 / 255.
|
378 |
-
D = np.diag(np.random.rand(3))
|
379 |
-
U = orth(np.random.rand(3, 3))
|
380 |
-
conv = np.dot(np.dot(np.transpose(U), D), U)
|
381 |
-
img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
|
382 |
-
img = np.clip(img, 0.0, 1.0)
|
383 |
-
return img
|
384 |
-
|
385 |
-
|
386 |
-
def add_speckle_noise(img, noise_level1=2, noise_level2=25):
|
387 |
-
noise_level = random.randint(noise_level1, noise_level2)
|
388 |
-
img = np.clip(img, 0.0, 1.0)
|
389 |
-
rnum = random.random()
|
390 |
-
if rnum > 0.6:
|
391 |
-
img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
|
392 |
-
elif rnum < 0.4:
|
393 |
-
img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
|
394 |
-
else:
|
395 |
-
L = noise_level2 / 255.
|
396 |
-
D = np.diag(np.random.rand(3))
|
397 |
-
U = orth(np.random.rand(3, 3))
|
398 |
-
conv = np.dot(np.dot(np.transpose(U), D), U)
|
399 |
-
img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
|
400 |
-
img = np.clip(img, 0.0, 1.0)
|
401 |
-
return img
|
402 |
-
|
403 |
-
|
404 |
-
def add_Poisson_noise(img):
|
405 |
-
img = np.clip((img * 255.0).round(), 0, 255) / 255.
|
406 |
-
vals = 10 ** (2 * random.random() + 2.0) # [2, 4]
|
407 |
-
if random.random() < 0.5:
|
408 |
-
img = np.random.poisson(img * vals).astype(np.float32) / vals
|
409 |
-
else:
|
410 |
-
img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
|
411 |
-
img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
|
412 |
-
noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
|
413 |
-
img += noise_gray[:, :, np.newaxis]
|
414 |
-
img = np.clip(img, 0.0, 1.0)
|
415 |
-
return img
|
416 |
-
|
417 |
-
|
418 |
-
def add_JPEG_noise(img):
|
419 |
-
quality_factor = random.randint(30, 95)
|
420 |
-
img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
|
421 |
-
result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
|
422 |
-
img = cv2.imdecode(encimg, 1)
|
423 |
-
img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
|
424 |
-
return img
|
425 |
-
|
426 |
-
|
427 |
-
def random_crop(lq, hq, sf=4, lq_patchsize=64):
|
428 |
-
h, w = lq.shape[:2]
|
429 |
-
rnd_h = random.randint(0, h - lq_patchsize)
|
430 |
-
rnd_w = random.randint(0, w - lq_patchsize)
|
431 |
-
lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
|
432 |
-
|
433 |
-
rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
|
434 |
-
hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
|
435 |
-
return lq, hq
|
436 |
-
|
437 |
-
|
438 |
-
def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
|
439 |
-
"""
|
440 |
-
This is the degradation model of BSRGAN from the paper
|
441 |
-
"Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
|
442 |
-
----------
|
443 |
-
img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
|
444 |
-
sf: scale factor
|
445 |
-
isp_model: camera ISP model
|
446 |
-
Returns
|
447 |
-
-------
|
448 |
-
img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
|
449 |
-
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
|
450 |
-
"""
|
451 |
-
isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
|
452 |
-
sf_ori = sf
|
453 |
-
|
454 |
-
h1, w1 = img.shape[:2]
|
455 |
-
img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
|
456 |
-
h, w = img.shape[:2]
|
457 |
-
|
458 |
-
if h < lq_patchsize * sf or w < lq_patchsize * sf:
|
459 |
-
raise ValueError(f'img size ({h1}X{w1}) is too small!')
|
460 |
-
|
461 |
-
hq = img.copy()
|
462 |
-
|
463 |
-
if sf == 4 and random.random() < scale2_prob: # downsample1
|
464 |
-
if np.random.rand() < 0.5:
|
465 |
-
img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
|
466 |
-
interpolation=random.choice([1, 2, 3]))
|
467 |
-
else:
|
468 |
-
img = util.imresize_np(img, 1 / 2, True)
|
469 |
-
img = np.clip(img, 0.0, 1.0)
|
470 |
-
sf = 2
|
471 |
-
|
472 |
-
shuffle_order = random.sample(range(7), 7)
|
473 |
-
idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
|
474 |
-
if idx1 > idx2: # keep downsample3 last
|
475 |
-
shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
|
476 |
-
|
477 |
-
for i in shuffle_order:
|
478 |
-
|
479 |
-
if i == 0:
|
480 |
-
img = add_blur(img, sf=sf)
|
481 |
-
|
482 |
-
elif i == 1:
|
483 |
-
img = add_blur(img, sf=sf)
|
484 |
-
|
485 |
-
elif i == 2:
|
486 |
-
a, b = img.shape[1], img.shape[0]
|
487 |
-
# downsample2
|
488 |
-
if random.random() < 0.75:
|
489 |
-
sf1 = random.uniform(1, 2 * sf)
|
490 |
-
img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
|
491 |
-
interpolation=random.choice([1, 2, 3]))
|
492 |
-
else:
|
493 |
-
k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
|
494 |
-
k_shifted = shift_pixel(k, sf)
|
495 |
-
k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
|
496 |
-
img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
|
497 |
-
img = img[0::sf, 0::sf, ...] # nearest downsampling
|
498 |
-
img = np.clip(img, 0.0, 1.0)
|
499 |
-
|
500 |
-
elif i == 3:
|
501 |
-
# downsample3
|
502 |
-
img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
|
503 |
-
img = np.clip(img, 0.0, 1.0)
|
504 |
-
|
505 |
-
elif i == 4:
|
506 |
-
# add Gaussian noise
|
507 |
-
img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
|
508 |
-
|
509 |
-
elif i == 5:
|
510 |
-
# add JPEG noise
|
511 |
-
if random.random() < jpeg_prob:
|
512 |
-
img = add_JPEG_noise(img)
|
513 |
-
|
514 |
-
elif i == 6:
|
515 |
-
# add processed camera sensor noise
|
516 |
-
if random.random() < isp_prob and isp_model is not None:
|
517 |
-
with torch.no_grad():
|
518 |
-
img, hq = isp_model.forward(img.copy(), hq)
|
519 |
-
|
520 |
-
# add final JPEG compression noise
|
521 |
-
img = add_JPEG_noise(img)
|
522 |
-
|
523 |
-
# random crop
|
524 |
-
img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
|
525 |
-
|
526 |
-
return img, hq
|
527 |
-
|
528 |
-
|
529 |
-
# todo no isp_model?
|
530 |
-
def degradation_bsrgan_variant(image, sf=4, isp_model=None):
|
531 |
-
"""
|
532 |
-
This is the degradation model of BSRGAN from the paper
|
533 |
-
"Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
|
534 |
-
----------
|
535 |
-
sf: scale factor
|
536 |
-
isp_model: camera ISP model
|
537 |
-
Returns
|
538 |
-
-------
|
539 |
-
img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
|
540 |
-
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
|
541 |
-
"""
|
542 |
-
image = util.uint2single(image)
|
543 |
-
isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
|
544 |
-
sf_ori = sf
|
545 |
-
|
546 |
-
h1, w1 = image.shape[:2]
|
547 |
-
image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
|
548 |
-
h, w = image.shape[:2]
|
549 |
-
|
550 |
-
hq = image.copy()
|
551 |
-
|
552 |
-
if sf == 4 and random.random() < scale2_prob: # downsample1
|
553 |
-
if np.random.rand() < 0.5:
|
554 |
-
image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
|
555 |
-
interpolation=random.choice([1, 2, 3]))
|
556 |
-
else:
|
557 |
-
image = util.imresize_np(image, 1 / 2, True)
|
558 |
-
image = np.clip(image, 0.0, 1.0)
|
559 |
-
sf = 2
|
560 |
-
|
561 |
-
shuffle_order = random.sample(range(7), 7)
|
562 |
-
idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
|
563 |
-
if idx1 > idx2: # keep downsample3 last
|
564 |
-
shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
|
565 |
-
|
566 |
-
for i in shuffle_order:
|
567 |
-
|
568 |
-
if i == 0:
|
569 |
-
image = add_blur(image, sf=sf)
|
570 |
-
|
571 |
-
elif i == 1:
|
572 |
-
image = add_blur(image, sf=sf)
|
573 |
-
|
574 |
-
elif i == 2:
|
575 |
-
a, b = image.shape[1], image.shape[0]
|
576 |
-
# downsample2
|
577 |
-
if random.random() < 0.75:
|
578 |
-
sf1 = random.uniform(1, 2 * sf)
|
579 |
-
image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
|
580 |
-
interpolation=random.choice([1, 2, 3]))
|
581 |
-
else:
|
582 |
-
k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
|
583 |
-
k_shifted = shift_pixel(k, sf)
|
584 |
-
k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
|
585 |
-
image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
|
586 |
-
image = image[0::sf, 0::sf, ...] # nearest downsampling
|
587 |
-
image = np.clip(image, 0.0, 1.0)
|
588 |
-
|
589 |
-
elif i == 3:
|
590 |
-
# downsample3
|
591 |
-
image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
|
592 |
-
image = np.clip(image, 0.0, 1.0)
|
593 |
-
|
594 |
-
elif i == 4:
|
595 |
-
# add Gaussian noise
|
596 |
-
image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25)
|
597 |
-
|
598 |
-
elif i == 5:
|
599 |
-
# add JPEG noise
|
600 |
-
if random.random() < jpeg_prob:
|
601 |
-
image = add_JPEG_noise(image)
|
602 |
-
|
603 |
-
# elif i == 6:
|
604 |
-
# # add processed camera sensor noise
|
605 |
-
# if random.random() < isp_prob and isp_model is not None:
|
606 |
-
# with torch.no_grad():
|
607 |
-
# img, hq = isp_model.forward(img.copy(), hq)
|
608 |
-
|
609 |
-
# add final JPEG compression noise
|
610 |
-
image = add_JPEG_noise(image)
|
611 |
-
image = util.single2uint(image)
|
612 |
-
example = {"image":image}
|
613 |
-
return example
|
614 |
-
|
615 |
-
|
616 |
-
# TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc...
|
617 |
-
def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None):
|
618 |
-
"""
|
619 |
-
This is an extended degradation model by combining
|
620 |
-
the degradation models of BSRGAN and Real-ESRGAN
|
621 |
-
----------
|
622 |
-
img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
|
623 |
-
sf: scale factor
|
624 |
-
use_shuffle: the degradation shuffle
|
625 |
-
use_sharp: sharpening the img
|
626 |
-
Returns
|
627 |
-
-------
|
628 |
-
img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
|
629 |
-
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
|
630 |
-
"""
|
631 |
-
|
632 |
-
h1, w1 = img.shape[:2]
|
633 |
-
img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
|
634 |
-
h, w = img.shape[:2]
|
635 |
-
|
636 |
-
if h < lq_patchsize * sf or w < lq_patchsize * sf:
|
637 |
-
raise ValueError(f'img size ({h1}X{w1}) is too small!')
|
638 |
-
|
639 |
-
if use_sharp:
|
640 |
-
img = add_sharpening(img)
|
641 |
-
hq = img.copy()
|
642 |
-
|
643 |
-
if random.random() < shuffle_prob:
|
644 |
-
shuffle_order = random.sample(range(13), 13)
|
645 |
-
else:
|
646 |
-
shuffle_order = list(range(13))
|
647 |
-
# local shuffle for noise, JPEG is always the last one
|
648 |
-
shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6)))
|
649 |
-
shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13)))
|
650 |
-
|
651 |
-
poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1
|
652 |
-
|
653 |
-
for i in shuffle_order:
|
654 |
-
if i == 0:
|
655 |
-
img = add_blur(img, sf=sf)
|
656 |
-
elif i == 1:
|
657 |
-
img = add_resize(img, sf=sf)
|
658 |
-
elif i == 2:
|
659 |
-
img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
|
660 |
-
elif i == 3:
|
661 |
-
if random.random() < poisson_prob:
|
662 |
-
img = add_Poisson_noise(img)
|
663 |
-
elif i == 4:
|
664 |
-
if random.random() < speckle_prob:
|
665 |
-
img = add_speckle_noise(img)
|
666 |
-
elif i == 5:
|
667 |
-
if random.random() < isp_prob and isp_model is not None:
|
668 |
-
with torch.no_grad():
|
669 |
-
img, hq = isp_model.forward(img.copy(), hq)
|
670 |
-
elif i == 6:
|
671 |
-
img = add_JPEG_noise(img)
|
672 |
-
elif i == 7:
|
673 |
-
img = add_blur(img, sf=sf)
|
674 |
-
elif i == 8:
|
675 |
-
img = add_resize(img, sf=sf)
|
676 |
-
elif i == 9:
|
677 |
-
img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
|
678 |
-
elif i == 10:
|
679 |
-
if random.random() < poisson_prob:
|
680 |
-
img = add_Poisson_noise(img)
|
681 |
-
elif i == 11:
|
682 |
-
if random.random() < speckle_prob:
|
683 |
-
img = add_speckle_noise(img)
|
684 |
-
elif i == 12:
|
685 |
-
if random.random() < isp_prob and isp_model is not None:
|
686 |
-
with torch.no_grad():
|
687 |
-
img, hq = isp_model.forward(img.copy(), hq)
|
688 |
-
else:
|
689 |
-
print('check the shuffle!')
|
690 |
-
|
691 |
-
# resize to desired size
|
692 |
-
img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])),
|
693 |
-
interpolation=random.choice([1, 2, 3]))
|
694 |
-
|
695 |
-
# add final JPEG compression noise
|
696 |
-
img = add_JPEG_noise(img)
|
697 |
-
|
698 |
-
# random crop
|
699 |
-
img, hq = random_crop(img, hq, sf, lq_patchsize)
|
700 |
-
|
701 |
-
return img, hq
|
702 |
-
|
703 |
-
|
704 |
-
if __name__ == '__main__':
|
705 |
-
print("hey")
|
706 |
-
img = util.imread_uint('utils/test.png', 3)
|
707 |
-
print(img)
|
708 |
-
img = util.uint2single(img)
|
709 |
-
print(img)
|
710 |
-
img = img[:448, :448]
|
711 |
-
h = img.shape[0] // 4
|
712 |
-
print("resizing to", h)
|
713 |
-
sf = 4
|
714 |
-
deg_fn = partial(degradation_bsrgan_variant, sf=sf)
|
715 |
-
for i in range(20):
|
716 |
-
print(i)
|
717 |
-
img_lq = deg_fn(img)
|
718 |
-
print(img_lq)
|
719 |
-
img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"]
|
720 |
-
print(img_lq.shape)
|
721 |
-
print("bicubic", img_lq_bicubic.shape)
|
722 |
-
print(img_hq.shape)
|
723 |
-
lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
|
724 |
-
interpolation=0)
|
725 |
-
lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
|
726 |
-
interpolation=0)
|
727 |
-
img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
|
728 |
-
util.imsave(img_concat, str(i) + '.png')
|
729 |
-
|
730 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/modules/image_degradation/bsrgan_light.py
DELETED
@@ -1,651 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
import numpy as np
|
3 |
-
import cv2
|
4 |
-
import torch
|
5 |
-
|
6 |
-
from functools import partial
|
7 |
-
import random
|
8 |
-
from scipy import ndimage
|
9 |
-
import scipy
|
10 |
-
import scipy.stats as ss
|
11 |
-
from scipy.interpolate import interp2d
|
12 |
-
from scipy.linalg import orth
|
13 |
-
import albumentations
|
14 |
-
|
15 |
-
import ldm.modules.image_degradation.utils_image as util
|
16 |
-
|
17 |
-
"""
|
18 |
-
# --------------------------------------------
|
19 |
-
# Super-Resolution
|
20 |
-
# --------------------------------------------
|
21 |
-
#
|
22 |
-
# Kai Zhang (cskaizhang@gmail.com)
|
23 |
-
# https://github.com/cszn
|
24 |
-
# From 2019/03--2021/08
|
25 |
-
# --------------------------------------------
|
26 |
-
"""
|
27 |
-
|
28 |
-
def modcrop_np(img, sf):
|
29 |
-
'''
|
30 |
-
Args:
|
31 |
-
img: numpy image, WxH or WxHxC
|
32 |
-
sf: scale factor
|
33 |
-
Return:
|
34 |
-
cropped image
|
35 |
-
'''
|
36 |
-
w, h = img.shape[:2]
|
37 |
-
im = np.copy(img)
|
38 |
-
return im[:w - w % sf, :h - h % sf, ...]
|
39 |
-
|
40 |
-
|
41 |
-
"""
|
42 |
-
# --------------------------------------------
|
43 |
-
# anisotropic Gaussian kernels
|
44 |
-
# --------------------------------------------
|
45 |
-
"""
|
46 |
-
|
47 |
-
|
48 |
-
def analytic_kernel(k):
|
49 |
-
"""Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
|
50 |
-
k_size = k.shape[0]
|
51 |
-
# Calculate the big kernels size
|
52 |
-
big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
|
53 |
-
# Loop over the small kernel to fill the big one
|
54 |
-
for r in range(k_size):
|
55 |
-
for c in range(k_size):
|
56 |
-
big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
|
57 |
-
# Crop the edges of the big kernel to ignore very small values and increase run time of SR
|
58 |
-
crop = k_size // 2
|
59 |
-
cropped_big_k = big_k[crop:-crop, crop:-crop]
|
60 |
-
# Normalize to 1
|
61 |
-
return cropped_big_k / cropped_big_k.sum()
|
62 |
-
|
63 |
-
|
64 |
-
def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
|
65 |
-
""" generate an anisotropic Gaussian kernel
|
66 |
-
Args:
|
67 |
-
ksize : e.g., 15, kernel size
|
68 |
-
theta : [0, pi], rotation angle range
|
69 |
-
l1 : [0.1,50], scaling of eigenvalues
|
70 |
-
l2 : [0.1,l1], scaling of eigenvalues
|
71 |
-
If l1 = l2, will get an isotropic Gaussian kernel.
|
72 |
-
Returns:
|
73 |
-
k : kernel
|
74 |
-
"""
|
75 |
-
|
76 |
-
v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
|
77 |
-
V = np.array([[v[0], v[1]], [v[1], -v[0]]])
|
78 |
-
D = np.array([[l1, 0], [0, l2]])
|
79 |
-
Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
|
80 |
-
k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
|
81 |
-
|
82 |
-
return k
|
83 |
-
|
84 |
-
|
85 |
-
def gm_blur_kernel(mean, cov, size=15):
|
86 |
-
center = size / 2.0 + 0.5
|
87 |
-
k = np.zeros([size, size])
|
88 |
-
for y in range(size):
|
89 |
-
for x in range(size):
|
90 |
-
cy = y - center + 1
|
91 |
-
cx = x - center + 1
|
92 |
-
k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
|
93 |
-
|
94 |
-
k = k / np.sum(k)
|
95 |
-
return k
|
96 |
-
|
97 |
-
|
98 |
-
def shift_pixel(x, sf, upper_left=True):
|
99 |
-
"""shift pixel for super-resolution with different scale factors
|
100 |
-
Args:
|
101 |
-
x: WxHxC or WxH
|
102 |
-
sf: scale factor
|
103 |
-
upper_left: shift direction
|
104 |
-
"""
|
105 |
-
h, w = x.shape[:2]
|
106 |
-
shift = (sf - 1) * 0.5
|
107 |
-
xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
|
108 |
-
if upper_left:
|
109 |
-
x1 = xv + shift
|
110 |
-
y1 = yv + shift
|
111 |
-
else:
|
112 |
-
x1 = xv - shift
|
113 |
-
y1 = yv - shift
|
114 |
-
|
115 |
-
x1 = np.clip(x1, 0, w - 1)
|
116 |
-
y1 = np.clip(y1, 0, h - 1)
|
117 |
-
|
118 |
-
if x.ndim == 2:
|
119 |
-
x = interp2d(xv, yv, x)(x1, y1)
|
120 |
-
if x.ndim == 3:
|
121 |
-
for i in range(x.shape[-1]):
|
122 |
-
x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
|
123 |
-
|
124 |
-
return x
|
125 |
-
|
126 |
-
|
127 |
-
def blur(x, k):
|
128 |
-
'''
|
129 |
-
x: image, NxcxHxW
|
130 |
-
k: kernel, Nx1xhxw
|
131 |
-
'''
|
132 |
-
n, c = x.shape[:2]
|
133 |
-
p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
|
134 |
-
x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
|
135 |
-
k = k.repeat(1, c, 1, 1)
|
136 |
-
k = k.view(-1, 1, k.shape[2], k.shape[3])
|
137 |
-
x = x.view(1, -1, x.shape[2], x.shape[3])
|
138 |
-
x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
|
139 |
-
x = x.view(n, c, x.shape[2], x.shape[3])
|
140 |
-
|
141 |
-
return x
|
142 |
-
|
143 |
-
|
144 |
-
def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
|
145 |
-
""""
|
146 |
-
# modified version of https://github.com/assafshocher/BlindSR_dataset_generator
|
147 |
-
# Kai Zhang
|
148 |
-
# min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
|
149 |
-
# max_var = 2.5 * sf
|
150 |
-
"""
|
151 |
-
# Set random eigen-vals (lambdas) and angle (theta) for COV matrix
|
152 |
-
lambda_1 = min_var + np.random.rand() * (max_var - min_var)
|
153 |
-
lambda_2 = min_var + np.random.rand() * (max_var - min_var)
|
154 |
-
theta = np.random.rand() * np.pi # random theta
|
155 |
-
noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
|
156 |
-
|
157 |
-
# Set COV matrix using Lambdas and Theta
|
158 |
-
LAMBDA = np.diag([lambda_1, lambda_2])
|
159 |
-
Q = np.array([[np.cos(theta), -np.sin(theta)],
|
160 |
-
[np.sin(theta), np.cos(theta)]])
|
161 |
-
SIGMA = Q @ LAMBDA @ Q.T
|
162 |
-
INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
|
163 |
-
|
164 |
-
# Set expectation position (shifting kernel for aligned image)
|
165 |
-
MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
|
166 |
-
MU = MU[None, None, :, None]
|
167 |
-
|
168 |
-
# Create meshgrid for Gaussian
|
169 |
-
[X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
|
170 |
-
Z = np.stack([X, Y], 2)[:, :, :, None]
|
171 |
-
|
172 |
-
# Calcualte Gaussian for every pixel of the kernel
|
173 |
-
ZZ = Z - MU
|
174 |
-
ZZ_t = ZZ.transpose(0, 1, 3, 2)
|
175 |
-
raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
|
176 |
-
|
177 |
-
# shift the kernel so it will be centered
|
178 |
-
# raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
|
179 |
-
|
180 |
-
# Normalize the kernel and return
|
181 |
-
# kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
|
182 |
-
kernel = raw_kernel / np.sum(raw_kernel)
|
183 |
-
return kernel
|
184 |
-
|
185 |
-
|
186 |
-
def fspecial_gaussian(hsize, sigma):
|
187 |
-
hsize = [hsize, hsize]
|
188 |
-
siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
|
189 |
-
std = sigma
|
190 |
-
[x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
|
191 |
-
arg = -(x * x + y * y) / (2 * std * std)
|
192 |
-
h = np.exp(arg)
|
193 |
-
h[h < scipy.finfo(float).eps * h.max()] = 0
|
194 |
-
sumh = h.sum()
|
195 |
-
if sumh != 0:
|
196 |
-
h = h / sumh
|
197 |
-
return h
|
198 |
-
|
199 |
-
|
200 |
-
def fspecial_laplacian(alpha):
|
201 |
-
alpha = max([0, min([alpha, 1])])
|
202 |
-
h1 = alpha / (alpha + 1)
|
203 |
-
h2 = (1 - alpha) / (alpha + 1)
|
204 |
-
h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
|
205 |
-
h = np.array(h)
|
206 |
-
return h
|
207 |
-
|
208 |
-
|
209 |
-
def fspecial(filter_type, *args, **kwargs):
|
210 |
-
'''
|
211 |
-
python code from:
|
212 |
-
https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
|
213 |
-
'''
|
214 |
-
if filter_type == 'gaussian':
|
215 |
-
return fspecial_gaussian(*args, **kwargs)
|
216 |
-
if filter_type == 'laplacian':
|
217 |
-
return fspecial_laplacian(*args, **kwargs)
|
218 |
-
|
219 |
-
|
220 |
-
"""
|
221 |
-
# --------------------------------------------
|
222 |
-
# degradation models
|
223 |
-
# --------------------------------------------
|
224 |
-
"""
|
225 |
-
|
226 |
-
|
227 |
-
def bicubic_degradation(x, sf=3):
|
228 |
-
'''
|
229 |
-
Args:
|
230 |
-
x: HxWxC image, [0, 1]
|
231 |
-
sf: down-scale factor
|
232 |
-
Return:
|
233 |
-
bicubicly downsampled LR image
|
234 |
-
'''
|
235 |
-
x = util.imresize_np(x, scale=1 / sf)
|
236 |
-
return x
|
237 |
-
|
238 |
-
|
239 |
-
def srmd_degradation(x, k, sf=3):
|
240 |
-
''' blur + bicubic downsampling
|
241 |
-
Args:
|
242 |
-
x: HxWxC image, [0, 1]
|
243 |
-
k: hxw, double
|
244 |
-
sf: down-scale factor
|
245 |
-
Return:
|
246 |
-
downsampled LR image
|
247 |
-
Reference:
|
248 |
-
@inproceedings{zhang2018learning,
|
249 |
-
title={Learning a single convolutional super-resolution network for multiple degradations},
|
250 |
-
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
|
251 |
-
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
|
252 |
-
pages={3262--3271},
|
253 |
-
year={2018}
|
254 |
-
}
|
255 |
-
'''
|
256 |
-
x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
|
257 |
-
x = bicubic_degradation(x, sf=sf)
|
258 |
-
return x
|
259 |
-
|
260 |
-
|
261 |
-
def dpsr_degradation(x, k, sf=3):
|
262 |
-
''' bicubic downsampling + blur
|
263 |
-
Args:
|
264 |
-
x: HxWxC image, [0, 1]
|
265 |
-
k: hxw, double
|
266 |
-
sf: down-scale factor
|
267 |
-
Return:
|
268 |
-
downsampled LR image
|
269 |
-
Reference:
|
270 |
-
@inproceedings{zhang2019deep,
|
271 |
-
title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
|
272 |
-
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
|
273 |
-
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
|
274 |
-
pages={1671--1681},
|
275 |
-
year={2019}
|
276 |
-
}
|
277 |
-
'''
|
278 |
-
x = bicubic_degradation(x, sf=sf)
|
279 |
-
x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
|
280 |
-
return x
|
281 |
-
|
282 |
-
|
283 |
-
def classical_degradation(x, k, sf=3):
|
284 |
-
''' blur + downsampling
|
285 |
-
Args:
|
286 |
-
x: HxWxC image, [0, 1]/[0, 255]
|
287 |
-
k: hxw, double
|
288 |
-
sf: down-scale factor
|
289 |
-
Return:
|
290 |
-
downsampled LR image
|
291 |
-
'''
|
292 |
-
x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
|
293 |
-
# x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
|
294 |
-
st = 0
|
295 |
-
return x[st::sf, st::sf, ...]
|
296 |
-
|
297 |
-
|
298 |
-
def add_sharpening(img, weight=0.5, radius=50, threshold=10):
|
299 |
-
"""USM sharpening. borrowed from real-ESRGAN
|
300 |
-
Input image: I; Blurry image: B.
|
301 |
-
1. K = I + weight * (I - B)
|
302 |
-
2. Mask = 1 if abs(I - B) > threshold, else: 0
|
303 |
-
3. Blur mask:
|
304 |
-
4. Out = Mask * K + (1 - Mask) * I
|
305 |
-
Args:
|
306 |
-
img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
|
307 |
-
weight (float): Sharp weight. Default: 1.
|
308 |
-
radius (float): Kernel size of Gaussian blur. Default: 50.
|
309 |
-
threshold (int):
|
310 |
-
"""
|
311 |
-
if radius % 2 == 0:
|
312 |
-
radius += 1
|
313 |
-
blur = cv2.GaussianBlur(img, (radius, radius), 0)
|
314 |
-
residual = img - blur
|
315 |
-
mask = np.abs(residual) * 255 > threshold
|
316 |
-
mask = mask.astype('float32')
|
317 |
-
soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
|
318 |
-
|
319 |
-
K = img + weight * residual
|
320 |
-
K = np.clip(K, 0, 1)
|
321 |
-
return soft_mask * K + (1 - soft_mask) * img
|
322 |
-
|
323 |
-
|
324 |
-
def add_blur(img, sf=4):
|
325 |
-
wd2 = 4.0 + sf
|
326 |
-
wd = 2.0 + 0.2 * sf
|
327 |
-
|
328 |
-
wd2 = wd2/4
|
329 |
-
wd = wd/4
|
330 |
-
|
331 |
-
if random.random() < 0.5:
|
332 |
-
l1 = wd2 * random.random()
|
333 |
-
l2 = wd2 * random.random()
|
334 |
-
k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
|
335 |
-
else:
|
336 |
-
k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random())
|
337 |
-
img = ndimage.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
|
338 |
-
|
339 |
-
return img
|
340 |
-
|
341 |
-
|
342 |
-
def add_resize(img, sf=4):
|
343 |
-
rnum = np.random.rand()
|
344 |
-
if rnum > 0.8: # up
|
345 |
-
sf1 = random.uniform(1, 2)
|
346 |
-
elif rnum < 0.7: # down
|
347 |
-
sf1 = random.uniform(0.5 / sf, 1)
|
348 |
-
else:
|
349 |
-
sf1 = 1.0
|
350 |
-
img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
|
351 |
-
img = np.clip(img, 0.0, 1.0)
|
352 |
-
|
353 |
-
return img
|
354 |
-
|
355 |
-
|
356 |
-
# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
|
357 |
-
# noise_level = random.randint(noise_level1, noise_level2)
|
358 |
-
# rnum = np.random.rand()
|
359 |
-
# if rnum > 0.6: # add color Gaussian noise
|
360 |
-
# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
|
361 |
-
# elif rnum < 0.4: # add grayscale Gaussian noise
|
362 |
-
# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
|
363 |
-
# else: # add noise
|
364 |
-
# L = noise_level2 / 255.
|
365 |
-
# D = np.diag(np.random.rand(3))
|
366 |
-
# U = orth(np.random.rand(3, 3))
|
367 |
-
# conv = np.dot(np.dot(np.transpose(U), D), U)
|
368 |
-
# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
|
369 |
-
# img = np.clip(img, 0.0, 1.0)
|
370 |
-
# return img
|
371 |
-
|
372 |
-
def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
|
373 |
-
noise_level = random.randint(noise_level1, noise_level2)
|
374 |
-
rnum = np.random.rand()
|
375 |
-
if rnum > 0.6: # add color Gaussian noise
|
376 |
-
img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
|
377 |
-
elif rnum < 0.4: # add grayscale Gaussian noise
|
378 |
-
img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
|
379 |
-
else: # add noise
|
380 |
-
L = noise_level2 / 255.
|
381 |
-
D = np.diag(np.random.rand(3))
|
382 |
-
U = orth(np.random.rand(3, 3))
|
383 |
-
conv = np.dot(np.dot(np.transpose(U), D), U)
|
384 |
-
img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
|
385 |
-
img = np.clip(img, 0.0, 1.0)
|
386 |
-
return img
|
387 |
-
|
388 |
-
|
389 |
-
def add_speckle_noise(img, noise_level1=2, noise_level2=25):
|
390 |
-
noise_level = random.randint(noise_level1, noise_level2)
|
391 |
-
img = np.clip(img, 0.0, 1.0)
|
392 |
-
rnum = random.random()
|
393 |
-
if rnum > 0.6:
|
394 |
-
img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
|
395 |
-
elif rnum < 0.4:
|
396 |
-
img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
|
397 |
-
else:
|
398 |
-
L = noise_level2 / 255.
|
399 |
-
D = np.diag(np.random.rand(3))
|
400 |
-
U = orth(np.random.rand(3, 3))
|
401 |
-
conv = np.dot(np.dot(np.transpose(U), D), U)
|
402 |
-
img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
|
403 |
-
img = np.clip(img, 0.0, 1.0)
|
404 |
-
return img
|
405 |
-
|
406 |
-
|
407 |
-
def add_Poisson_noise(img):
|
408 |
-
img = np.clip((img * 255.0).round(), 0, 255) / 255.
|
409 |
-
vals = 10 ** (2 * random.random() + 2.0) # [2, 4]
|
410 |
-
if random.random() < 0.5:
|
411 |
-
img = np.random.poisson(img * vals).astype(np.float32) / vals
|
412 |
-
else:
|
413 |
-
img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
|
414 |
-
img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
|
415 |
-
noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
|
416 |
-
img += noise_gray[:, :, np.newaxis]
|
417 |
-
img = np.clip(img, 0.0, 1.0)
|
418 |
-
return img
|
419 |
-
|
420 |
-
|
421 |
-
def add_JPEG_noise(img):
|
422 |
-
quality_factor = random.randint(80, 95)
|
423 |
-
img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
|
424 |
-
result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
|
425 |
-
img = cv2.imdecode(encimg, 1)
|
426 |
-
img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
|
427 |
-
return img
|
428 |
-
|
429 |
-
|
430 |
-
def random_crop(lq, hq, sf=4, lq_patchsize=64):
|
431 |
-
h, w = lq.shape[:2]
|
432 |
-
rnd_h = random.randint(0, h - lq_patchsize)
|
433 |
-
rnd_w = random.randint(0, w - lq_patchsize)
|
434 |
-
lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
|
435 |
-
|
436 |
-
rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
|
437 |
-
hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
|
438 |
-
return lq, hq
|
439 |
-
|
440 |
-
|
441 |
-
def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
|
442 |
-
"""
|
443 |
-
This is the degradation model of BSRGAN from the paper
|
444 |
-
"Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
|
445 |
-
----------
|
446 |
-
img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
|
447 |
-
sf: scale factor
|
448 |
-
isp_model: camera ISP model
|
449 |
-
Returns
|
450 |
-
-------
|
451 |
-
img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
|
452 |
-
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
|
453 |
-
"""
|
454 |
-
isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
|
455 |
-
sf_ori = sf
|
456 |
-
|
457 |
-
h1, w1 = img.shape[:2]
|
458 |
-
img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
|
459 |
-
h, w = img.shape[:2]
|
460 |
-
|
461 |
-
if h < lq_patchsize * sf or w < lq_patchsize * sf:
|
462 |
-
raise ValueError(f'img size ({h1}X{w1}) is too small!')
|
463 |
-
|
464 |
-
hq = img.copy()
|
465 |
-
|
466 |
-
if sf == 4 and random.random() < scale2_prob: # downsample1
|
467 |
-
if np.random.rand() < 0.5:
|
468 |
-
img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
|
469 |
-
interpolation=random.choice([1, 2, 3]))
|
470 |
-
else:
|
471 |
-
img = util.imresize_np(img, 1 / 2, True)
|
472 |
-
img = np.clip(img, 0.0, 1.0)
|
473 |
-
sf = 2
|
474 |
-
|
475 |
-
shuffle_order = random.sample(range(7), 7)
|
476 |
-
idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
|
477 |
-
if idx1 > idx2: # keep downsample3 last
|
478 |
-
shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
|
479 |
-
|
480 |
-
for i in shuffle_order:
|
481 |
-
|
482 |
-
if i == 0:
|
483 |
-
img = add_blur(img, sf=sf)
|
484 |
-
|
485 |
-
elif i == 1:
|
486 |
-
img = add_blur(img, sf=sf)
|
487 |
-
|
488 |
-
elif i == 2:
|
489 |
-
a, b = img.shape[1], img.shape[0]
|
490 |
-
# downsample2
|
491 |
-
if random.random() < 0.75:
|
492 |
-
sf1 = random.uniform(1, 2 * sf)
|
493 |
-
img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
|
494 |
-
interpolation=random.choice([1, 2, 3]))
|
495 |
-
else:
|
496 |
-
k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
|
497 |
-
k_shifted = shift_pixel(k, sf)
|
498 |
-
k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
|
499 |
-
img = ndimage.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
|
500 |
-
img = img[0::sf, 0::sf, ...] # nearest downsampling
|
501 |
-
img = np.clip(img, 0.0, 1.0)
|
502 |
-
|
503 |
-
elif i == 3:
|
504 |
-
# downsample3
|
505 |
-
img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
|
506 |
-
img = np.clip(img, 0.0, 1.0)
|
507 |
-
|
508 |
-
elif i == 4:
|
509 |
-
# add Gaussian noise
|
510 |
-
img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8)
|
511 |
-
|
512 |
-
elif i == 5:
|
513 |
-
# add JPEG noise
|
514 |
-
if random.random() < jpeg_prob:
|
515 |
-
img = add_JPEG_noise(img)
|
516 |
-
|
517 |
-
elif i == 6:
|
518 |
-
# add processed camera sensor noise
|
519 |
-
if random.random() < isp_prob and isp_model is not None:
|
520 |
-
with torch.no_grad():
|
521 |
-
img, hq = isp_model.forward(img.copy(), hq)
|
522 |
-
|
523 |
-
# add final JPEG compression noise
|
524 |
-
img = add_JPEG_noise(img)
|
525 |
-
|
526 |
-
# random crop
|
527 |
-
img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
|
528 |
-
|
529 |
-
return img, hq
|
530 |
-
|
531 |
-
|
532 |
-
# todo no isp_model?
|
533 |
-
def degradation_bsrgan_variant(image, sf=4, isp_model=None, up=False):
|
534 |
-
"""
|
535 |
-
This is the degradation model of BSRGAN from the paper
|
536 |
-
"Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
|
537 |
-
----------
|
538 |
-
sf: scale factor
|
539 |
-
isp_model: camera ISP model
|
540 |
-
Returns
|
541 |
-
-------
|
542 |
-
img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
|
543 |
-
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
|
544 |
-
"""
|
545 |
-
image = util.uint2single(image)
|
546 |
-
isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
|
547 |
-
sf_ori = sf
|
548 |
-
|
549 |
-
h1, w1 = image.shape[:2]
|
550 |
-
image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
|
551 |
-
h, w = image.shape[:2]
|
552 |
-
|
553 |
-
hq = image.copy()
|
554 |
-
|
555 |
-
if sf == 4 and random.random() < scale2_prob: # downsample1
|
556 |
-
if np.random.rand() < 0.5:
|
557 |
-
image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
|
558 |
-
interpolation=random.choice([1, 2, 3]))
|
559 |
-
else:
|
560 |
-
image = util.imresize_np(image, 1 / 2, True)
|
561 |
-
image = np.clip(image, 0.0, 1.0)
|
562 |
-
sf = 2
|
563 |
-
|
564 |
-
shuffle_order = random.sample(range(7), 7)
|
565 |
-
idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
|
566 |
-
if idx1 > idx2: # keep downsample3 last
|
567 |
-
shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
|
568 |
-
|
569 |
-
for i in shuffle_order:
|
570 |
-
|
571 |
-
if i == 0:
|
572 |
-
image = add_blur(image, sf=sf)
|
573 |
-
|
574 |
-
# elif i == 1:
|
575 |
-
# image = add_blur(image, sf=sf)
|
576 |
-
|
577 |
-
if i == 0:
|
578 |
-
pass
|
579 |
-
|
580 |
-
elif i == 2:
|
581 |
-
a, b = image.shape[1], image.shape[0]
|
582 |
-
# downsample2
|
583 |
-
if random.random() < 0.8:
|
584 |
-
sf1 = random.uniform(1, 2 * sf)
|
585 |
-
image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
|
586 |
-
interpolation=random.choice([1, 2, 3]))
|
587 |
-
else:
|
588 |
-
k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
|
589 |
-
k_shifted = shift_pixel(k, sf)
|
590 |
-
k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
|
591 |
-
image = ndimage.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
|
592 |
-
image = image[0::sf, 0::sf, ...] # nearest downsampling
|
593 |
-
|
594 |
-
image = np.clip(image, 0.0, 1.0)
|
595 |
-
|
596 |
-
elif i == 3:
|
597 |
-
# downsample3
|
598 |
-
image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
|
599 |
-
image = np.clip(image, 0.0, 1.0)
|
600 |
-
|
601 |
-
elif i == 4:
|
602 |
-
# add Gaussian noise
|
603 |
-
image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2)
|
604 |
-
|
605 |
-
elif i == 5:
|
606 |
-
# add JPEG noise
|
607 |
-
if random.random() < jpeg_prob:
|
608 |
-
image = add_JPEG_noise(image)
|
609 |
-
#
|
610 |
-
# elif i == 6:
|
611 |
-
# # add processed camera sensor noise
|
612 |
-
# if random.random() < isp_prob and isp_model is not None:
|
613 |
-
# with torch.no_grad():
|
614 |
-
# img, hq = isp_model.forward(img.copy(), hq)
|
615 |
-
|
616 |
-
# add final JPEG compression noise
|
617 |
-
image = add_JPEG_noise(image)
|
618 |
-
image = util.single2uint(image)
|
619 |
-
if up:
|
620 |
-
image = cv2.resize(image, (w1, h1), interpolation=cv2.INTER_CUBIC) # todo: random, as above? want to condition on it then
|
621 |
-
example = {"image": image}
|
622 |
-
return example
|
623 |
-
|
624 |
-
|
625 |
-
|
626 |
-
|
627 |
-
if __name__ == '__main__':
|
628 |
-
print("hey")
|
629 |
-
img = util.imread_uint('utils/test.png', 3)
|
630 |
-
img = img[:448, :448]
|
631 |
-
h = img.shape[0] // 4
|
632 |
-
print("resizing to", h)
|
633 |
-
sf = 4
|
634 |
-
deg_fn = partial(degradation_bsrgan_variant, sf=sf)
|
635 |
-
for i in range(20):
|
636 |
-
print(i)
|
637 |
-
img_hq = img
|
638 |
-
img_lq = deg_fn(img)["image"]
|
639 |
-
img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq)
|
640 |
-
print(img_lq)
|
641 |
-
img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"]
|
642 |
-
print(img_lq.shape)
|
643 |
-
print("bicubic", img_lq_bicubic.shape)
|
644 |
-
print(img_hq.shape)
|
645 |
-
lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
|
646 |
-
interpolation=0)
|
647 |
-
lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic),
|
648 |
-
(int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
|
649 |
-
interpolation=0)
|
650 |
-
img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
|
651 |
-
util.imsave(img_concat, str(i) + '.png')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/modules/image_degradation/utils/test.png
DELETED
Binary file (441 kB)
|
|
ldm/modules/image_degradation/utils_image.py
DELETED
@@ -1,916 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import math
|
3 |
-
import random
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
import cv2
|
7 |
-
from torchvision.utils import make_grid
|
8 |
-
from datetime import datetime
|
9 |
-
#import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py
|
10 |
-
|
11 |
-
|
12 |
-
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
|
13 |
-
|
14 |
-
|
15 |
-
'''
|
16 |
-
# --------------------------------------------
|
17 |
-
# Kai Zhang (github: https://github.com/cszn)
|
18 |
-
# 03/Mar/2019
|
19 |
-
# --------------------------------------------
|
20 |
-
# https://github.com/twhui/SRGAN-pyTorch
|
21 |
-
# https://github.com/xinntao/BasicSR
|
22 |
-
# --------------------------------------------
|
23 |
-
'''
|
24 |
-
|
25 |
-
|
26 |
-
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif']
|
27 |
-
|
28 |
-
|
29 |
-
def is_image_file(filename):
|
30 |
-
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
|
31 |
-
|
32 |
-
|
33 |
-
def get_timestamp():
|
34 |
-
return datetime.now().strftime('%y%m%d-%H%M%S')
|
35 |
-
|
36 |
-
|
37 |
-
def imshow(x, title=None, cbar=False, figsize=None):
|
38 |
-
plt.figure(figsize=figsize)
|
39 |
-
plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray')
|
40 |
-
if title:
|
41 |
-
plt.title(title)
|
42 |
-
if cbar:
|
43 |
-
plt.colorbar()
|
44 |
-
plt.show()
|
45 |
-
|
46 |
-
|
47 |
-
def surf(Z, cmap='rainbow', figsize=None):
|
48 |
-
plt.figure(figsize=figsize)
|
49 |
-
ax3 = plt.axes(projection='3d')
|
50 |
-
|
51 |
-
w, h = Z.shape[:2]
|
52 |
-
xx = np.arange(0,w,1)
|
53 |
-
yy = np.arange(0,h,1)
|
54 |
-
X, Y = np.meshgrid(xx, yy)
|
55 |
-
ax3.plot_surface(X,Y,Z,cmap=cmap)
|
56 |
-
#ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap)
|
57 |
-
plt.show()
|
58 |
-
|
59 |
-
|
60 |
-
'''
|
61 |
-
# --------------------------------------------
|
62 |
-
# get image pathes
|
63 |
-
# --------------------------------------------
|
64 |
-
'''
|
65 |
-
|
66 |
-
|
67 |
-
def get_image_paths(dataroot):
|
68 |
-
paths = None # return None if dataroot is None
|
69 |
-
if dataroot is not None:
|
70 |
-
paths = sorted(_get_paths_from_images(dataroot))
|
71 |
-
return paths
|
72 |
-
|
73 |
-
|
74 |
-
def _get_paths_from_images(path):
|
75 |
-
assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
|
76 |
-
images = []
|
77 |
-
for dirpath, _, fnames in sorted(os.walk(path)):
|
78 |
-
for fname in sorted(fnames):
|
79 |
-
if is_image_file(fname):
|
80 |
-
img_path = os.path.join(dirpath, fname)
|
81 |
-
images.append(img_path)
|
82 |
-
assert images, '{:s} has no valid image file'.format(path)
|
83 |
-
return images
|
84 |
-
|
85 |
-
|
86 |
-
'''
|
87 |
-
# --------------------------------------------
|
88 |
-
# split large images into small images
|
89 |
-
# --------------------------------------------
|
90 |
-
'''
|
91 |
-
|
92 |
-
|
93 |
-
def patches_from_image(img, p_size=512, p_overlap=64, p_max=800):
|
94 |
-
w, h = img.shape[:2]
|
95 |
-
patches = []
|
96 |
-
if w > p_max and h > p_max:
|
97 |
-
w1 = list(np.arange(0, w-p_size, p_size-p_overlap, dtype=np.int))
|
98 |
-
h1 = list(np.arange(0, h-p_size, p_size-p_overlap, dtype=np.int))
|
99 |
-
w1.append(w-p_size)
|
100 |
-
h1.append(h-p_size)
|
101 |
-
# print(w1)
|
102 |
-
# print(h1)
|
103 |
-
for i in w1:
|
104 |
-
for j in h1:
|
105 |
-
patches.append(img[i:i+p_size, j:j+p_size,:])
|
106 |
-
else:
|
107 |
-
patches.append(img)
|
108 |
-
|
109 |
-
return patches
|
110 |
-
|
111 |
-
|
112 |
-
def imssave(imgs, img_path):
|
113 |
-
"""
|
114 |
-
imgs: list, N images of size WxHxC
|
115 |
-
"""
|
116 |
-
img_name, ext = os.path.splitext(os.path.basename(img_path))
|
117 |
-
|
118 |
-
for i, img in enumerate(imgs):
|
119 |
-
if img.ndim == 3:
|
120 |
-
img = img[:, :, [2, 1, 0]]
|
121 |
-
new_path = os.path.join(os.path.dirname(img_path), img_name+str('_s{:04d}'.format(i))+'.png')
|
122 |
-
cv2.imwrite(new_path, img)
|
123 |
-
|
124 |
-
|
125 |
-
def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=800, p_overlap=96, p_max=1000):
|
126 |
-
"""
|
127 |
-
split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size),
|
128 |
-
and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max)
|
129 |
-
will be splitted.
|
130 |
-
Args:
|
131 |
-
original_dataroot:
|
132 |
-
taget_dataroot:
|
133 |
-
p_size: size of small images
|
134 |
-
p_overlap: patch size in training is a good choice
|
135 |
-
p_max: images with smaller size than (p_max)x(p_max) keep unchanged.
|
136 |
-
"""
|
137 |
-
paths = get_image_paths(original_dataroot)
|
138 |
-
for img_path in paths:
|
139 |
-
# img_name, ext = os.path.splitext(os.path.basename(img_path))
|
140 |
-
img = imread_uint(img_path, n_channels=n_channels)
|
141 |
-
patches = patches_from_image(img, p_size, p_overlap, p_max)
|
142 |
-
imssave(patches, os.path.join(taget_dataroot,os.path.basename(img_path)))
|
143 |
-
#if original_dataroot == taget_dataroot:
|
144 |
-
#del img_path
|
145 |
-
|
146 |
-
'''
|
147 |
-
# --------------------------------------------
|
148 |
-
# makedir
|
149 |
-
# --------------------------------------------
|
150 |
-
'''
|
151 |
-
|
152 |
-
|
153 |
-
def mkdir(path):
|
154 |
-
if not os.path.exists(path):
|
155 |
-
os.makedirs(path)
|
156 |
-
|
157 |
-
|
158 |
-
def mkdirs(paths):
|
159 |
-
if isinstance(paths, str):
|
160 |
-
mkdir(paths)
|
161 |
-
else:
|
162 |
-
for path in paths:
|
163 |
-
mkdir(path)
|
164 |
-
|
165 |
-
|
166 |
-
def mkdir_and_rename(path):
|
167 |
-
if os.path.exists(path):
|
168 |
-
new_name = path + '_archived_' + get_timestamp()
|
169 |
-
print('Path already exists. Rename it to [{:s}]'.format(new_name))
|
170 |
-
os.rename(path, new_name)
|
171 |
-
os.makedirs(path)
|
172 |
-
|
173 |
-
|
174 |
-
'''
|
175 |
-
# --------------------------------------------
|
176 |
-
# read image from path
|
177 |
-
# opencv is fast, but read BGR numpy image
|
178 |
-
# --------------------------------------------
|
179 |
-
'''
|
180 |
-
|
181 |
-
|
182 |
-
# --------------------------------------------
|
183 |
-
# get uint8 image of size HxWxn_channles (RGB)
|
184 |
-
# --------------------------------------------
|
185 |
-
def imread_uint(path, n_channels=3):
|
186 |
-
# input: path
|
187 |
-
# output: HxWx3(RGB or GGG), or HxWx1 (G)
|
188 |
-
if n_channels == 1:
|
189 |
-
img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE
|
190 |
-
img = np.expand_dims(img, axis=2) # HxWx1
|
191 |
-
elif n_channels == 3:
|
192 |
-
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G
|
193 |
-
if img.ndim == 2:
|
194 |
-
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG
|
195 |
-
else:
|
196 |
-
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB
|
197 |
-
return img
|
198 |
-
|
199 |
-
|
200 |
-
# --------------------------------------------
|
201 |
-
# matlab's imwrite
|
202 |
-
# --------------------------------------------
|
203 |
-
def imsave(img, img_path):
|
204 |
-
img = np.squeeze(img)
|
205 |
-
if img.ndim == 3:
|
206 |
-
img = img[:, :, [2, 1, 0]]
|
207 |
-
cv2.imwrite(img_path, img)
|
208 |
-
|
209 |
-
def imwrite(img, img_path):
|
210 |
-
img = np.squeeze(img)
|
211 |
-
if img.ndim == 3:
|
212 |
-
img = img[:, :, [2, 1, 0]]
|
213 |
-
cv2.imwrite(img_path, img)
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
# --------------------------------------------
|
218 |
-
# get single image of size HxWxn_channles (BGR)
|
219 |
-
# --------------------------------------------
|
220 |
-
def read_img(path):
|
221 |
-
# read image by cv2
|
222 |
-
# return: Numpy float32, HWC, BGR, [0,1]
|
223 |
-
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE
|
224 |
-
img = img.astype(np.float32) / 255.
|
225 |
-
if img.ndim == 2:
|
226 |
-
img = np.expand_dims(img, axis=2)
|
227 |
-
# some images have 4 channels
|
228 |
-
if img.shape[2] > 3:
|
229 |
-
img = img[:, :, :3]
|
230 |
-
return img
|
231 |
-
|
232 |
-
|
233 |
-
'''
|
234 |
-
# --------------------------------------------
|
235 |
-
# image format conversion
|
236 |
-
# --------------------------------------------
|
237 |
-
# numpy(single) <---> numpy(unit)
|
238 |
-
# numpy(single) <---> tensor
|
239 |
-
# numpy(unit) <---> tensor
|
240 |
-
# --------------------------------------------
|
241 |
-
'''
|
242 |
-
|
243 |
-
|
244 |
-
# --------------------------------------------
|
245 |
-
# numpy(single) [0, 1] <---> numpy(unit)
|
246 |
-
# --------------------------------------------
|
247 |
-
|
248 |
-
|
249 |
-
def uint2single(img):
|
250 |
-
|
251 |
-
return np.float32(img/255.)
|
252 |
-
|
253 |
-
|
254 |
-
def single2uint(img):
|
255 |
-
|
256 |
-
return np.uint8((img.clip(0, 1)*255.).round())
|
257 |
-
|
258 |
-
|
259 |
-
def uint162single(img):
|
260 |
-
|
261 |
-
return np.float32(img/65535.)
|
262 |
-
|
263 |
-
|
264 |
-
def single2uint16(img):
|
265 |
-
|
266 |
-
return np.uint16((img.clip(0, 1)*65535.).round())
|
267 |
-
|
268 |
-
|
269 |
-
# --------------------------------------------
|
270 |
-
# numpy(unit) (HxWxC or HxW) <---> tensor
|
271 |
-
# --------------------------------------------
|
272 |
-
|
273 |
-
|
274 |
-
# convert uint to 4-dimensional torch tensor
|
275 |
-
def uint2tensor4(img):
|
276 |
-
if img.ndim == 2:
|
277 |
-
img = np.expand_dims(img, axis=2)
|
278 |
-
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0)
|
279 |
-
|
280 |
-
|
281 |
-
# convert uint to 3-dimensional torch tensor
|
282 |
-
def uint2tensor3(img):
|
283 |
-
if img.ndim == 2:
|
284 |
-
img = np.expand_dims(img, axis=2)
|
285 |
-
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.)
|
286 |
-
|
287 |
-
|
288 |
-
# convert 2/3/4-dimensional torch tensor to uint
|
289 |
-
def tensor2uint(img):
|
290 |
-
img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()
|
291 |
-
if img.ndim == 3:
|
292 |
-
img = np.transpose(img, (1, 2, 0))
|
293 |
-
return np.uint8((img*255.0).round())
|
294 |
-
|
295 |
-
|
296 |
-
# --------------------------------------------
|
297 |
-
# numpy(single) (HxWxC) <---> tensor
|
298 |
-
# --------------------------------------------
|
299 |
-
|
300 |
-
|
301 |
-
# convert single (HxWxC) to 3-dimensional torch tensor
|
302 |
-
def single2tensor3(img):
|
303 |
-
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float()
|
304 |
-
|
305 |
-
|
306 |
-
# convert single (HxWxC) to 4-dimensional torch tensor
|
307 |
-
def single2tensor4(img):
|
308 |
-
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0)
|
309 |
-
|
310 |
-
|
311 |
-
# convert torch tensor to single
|
312 |
-
def tensor2single(img):
|
313 |
-
img = img.data.squeeze().float().cpu().numpy()
|
314 |
-
if img.ndim == 3:
|
315 |
-
img = np.transpose(img, (1, 2, 0))
|
316 |
-
|
317 |
-
return img
|
318 |
-
|
319 |
-
# convert torch tensor to single
|
320 |
-
def tensor2single3(img):
|
321 |
-
img = img.data.squeeze().float().cpu().numpy()
|
322 |
-
if img.ndim == 3:
|
323 |
-
img = np.transpose(img, (1, 2, 0))
|
324 |
-
elif img.ndim == 2:
|
325 |
-
img = np.expand_dims(img, axis=2)
|
326 |
-
return img
|
327 |
-
|
328 |
-
|
329 |
-
def single2tensor5(img):
|
330 |
-
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0)
|
331 |
-
|
332 |
-
|
333 |
-
def single32tensor5(img):
|
334 |
-
return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0)
|
335 |
-
|
336 |
-
|
337 |
-
def single42tensor4(img):
|
338 |
-
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float()
|
339 |
-
|
340 |
-
|
341 |
-
# from skimage.io import imread, imsave
|
342 |
-
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
|
343 |
-
'''
|
344 |
-
Converts a torch Tensor into an image Numpy array of BGR channel order
|
345 |
-
Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
|
346 |
-
Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
|
347 |
-
'''
|
348 |
-
tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp
|
349 |
-
tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]
|
350 |
-
n_dim = tensor.dim()
|
351 |
-
if n_dim == 4:
|
352 |
-
n_img = len(tensor)
|
353 |
-
img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
|
354 |
-
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
|
355 |
-
elif n_dim == 3:
|
356 |
-
img_np = tensor.numpy()
|
357 |
-
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
|
358 |
-
elif n_dim == 2:
|
359 |
-
img_np = tensor.numpy()
|
360 |
-
else:
|
361 |
-
raise TypeError(
|
362 |
-
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
|
363 |
-
if out_type == np.uint8:
|
364 |
-
img_np = (img_np * 255.0).round()
|
365 |
-
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
|
366 |
-
return img_np.astype(out_type)
|
367 |
-
|
368 |
-
|
369 |
-
'''
|
370 |
-
# --------------------------------------------
|
371 |
-
# Augmentation, flipe and/or rotate
|
372 |
-
# --------------------------------------------
|
373 |
-
# The following two are enough.
|
374 |
-
# (1) augmet_img: numpy image of WxHxC or WxH
|
375 |
-
# (2) augment_img_tensor4: tensor image 1xCxWxH
|
376 |
-
# --------------------------------------------
|
377 |
-
'''
|
378 |
-
|
379 |
-
|
380 |
-
def augment_img(img, mode=0):
|
381 |
-
'''Kai Zhang (github: https://github.com/cszn)
|
382 |
-
'''
|
383 |
-
if mode == 0:
|
384 |
-
return img
|
385 |
-
elif mode == 1:
|
386 |
-
return np.flipud(np.rot90(img))
|
387 |
-
elif mode == 2:
|
388 |
-
return np.flipud(img)
|
389 |
-
elif mode == 3:
|
390 |
-
return np.rot90(img, k=3)
|
391 |
-
elif mode == 4:
|
392 |
-
return np.flipud(np.rot90(img, k=2))
|
393 |
-
elif mode == 5:
|
394 |
-
return np.rot90(img)
|
395 |
-
elif mode == 6:
|
396 |
-
return np.rot90(img, k=2)
|
397 |
-
elif mode == 7:
|
398 |
-
return np.flipud(np.rot90(img, k=3))
|
399 |
-
|
400 |
-
|
401 |
-
def augment_img_tensor4(img, mode=0):
|
402 |
-
'''Kai Zhang (github: https://github.com/cszn)
|
403 |
-
'''
|
404 |
-
if mode == 0:
|
405 |
-
return img
|
406 |
-
elif mode == 1:
|
407 |
-
return img.rot90(1, [2, 3]).flip([2])
|
408 |
-
elif mode == 2:
|
409 |
-
return img.flip([2])
|
410 |
-
elif mode == 3:
|
411 |
-
return img.rot90(3, [2, 3])
|
412 |
-
elif mode == 4:
|
413 |
-
return img.rot90(2, [2, 3]).flip([2])
|
414 |
-
elif mode == 5:
|
415 |
-
return img.rot90(1, [2, 3])
|
416 |
-
elif mode == 6:
|
417 |
-
return img.rot90(2, [2, 3])
|
418 |
-
elif mode == 7:
|
419 |
-
return img.rot90(3, [2, 3]).flip([2])
|
420 |
-
|
421 |
-
|
422 |
-
def augment_img_tensor(img, mode=0):
|
423 |
-
'''Kai Zhang (github: https://github.com/cszn)
|
424 |
-
'''
|
425 |
-
img_size = img.size()
|
426 |
-
img_np = img.data.cpu().numpy()
|
427 |
-
if len(img_size) == 3:
|
428 |
-
img_np = np.transpose(img_np, (1, 2, 0))
|
429 |
-
elif len(img_size) == 4:
|
430 |
-
img_np = np.transpose(img_np, (2, 3, 1, 0))
|
431 |
-
img_np = augment_img(img_np, mode=mode)
|
432 |
-
img_tensor = torch.from_numpy(np.ascontiguousarray(img_np))
|
433 |
-
if len(img_size) == 3:
|
434 |
-
img_tensor = img_tensor.permute(2, 0, 1)
|
435 |
-
elif len(img_size) == 4:
|
436 |
-
img_tensor = img_tensor.permute(3, 2, 0, 1)
|
437 |
-
|
438 |
-
return img_tensor.type_as(img)
|
439 |
-
|
440 |
-
|
441 |
-
def augment_img_np3(img, mode=0):
|
442 |
-
if mode == 0:
|
443 |
-
return img
|
444 |
-
elif mode == 1:
|
445 |
-
return img.transpose(1, 0, 2)
|
446 |
-
elif mode == 2:
|
447 |
-
return img[::-1, :, :]
|
448 |
-
elif mode == 3:
|
449 |
-
img = img[::-1, :, :]
|
450 |
-
img = img.transpose(1, 0, 2)
|
451 |
-
return img
|
452 |
-
elif mode == 4:
|
453 |
-
return img[:, ::-1, :]
|
454 |
-
elif mode == 5:
|
455 |
-
img = img[:, ::-1, :]
|
456 |
-
img = img.transpose(1, 0, 2)
|
457 |
-
return img
|
458 |
-
elif mode == 6:
|
459 |
-
img = img[:, ::-1, :]
|
460 |
-
img = img[::-1, :, :]
|
461 |
-
return img
|
462 |
-
elif mode == 7:
|
463 |
-
img = img[:, ::-1, :]
|
464 |
-
img = img[::-1, :, :]
|
465 |
-
img = img.transpose(1, 0, 2)
|
466 |
-
return img
|
467 |
-
|
468 |
-
|
469 |
-
def augment_imgs(img_list, hflip=True, rot=True):
|
470 |
-
# horizontal flip OR rotate
|
471 |
-
hflip = hflip and random.random() < 0.5
|
472 |
-
vflip = rot and random.random() < 0.5
|
473 |
-
rot90 = rot and random.random() < 0.5
|
474 |
-
|
475 |
-
def _augment(img):
|
476 |
-
if hflip:
|
477 |
-
img = img[:, ::-1, :]
|
478 |
-
if vflip:
|
479 |
-
img = img[::-1, :, :]
|
480 |
-
if rot90:
|
481 |
-
img = img.transpose(1, 0, 2)
|
482 |
-
return img
|
483 |
-
|
484 |
-
return [_augment(img) for img in img_list]
|
485 |
-
|
486 |
-
|
487 |
-
'''
|
488 |
-
# --------------------------------------------
|
489 |
-
# modcrop and shave
|
490 |
-
# --------------------------------------------
|
491 |
-
'''
|
492 |
-
|
493 |
-
|
494 |
-
def modcrop(img_in, scale):
|
495 |
-
# img_in: Numpy, HWC or HW
|
496 |
-
img = np.copy(img_in)
|
497 |
-
if img.ndim == 2:
|
498 |
-
H, W = img.shape
|
499 |
-
H_r, W_r = H % scale, W % scale
|
500 |
-
img = img[:H - H_r, :W - W_r]
|
501 |
-
elif img.ndim == 3:
|
502 |
-
H, W, C = img.shape
|
503 |
-
H_r, W_r = H % scale, W % scale
|
504 |
-
img = img[:H - H_r, :W - W_r, :]
|
505 |
-
else:
|
506 |
-
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
|
507 |
-
return img
|
508 |
-
|
509 |
-
|
510 |
-
def shave(img_in, border=0):
|
511 |
-
# img_in: Numpy, HWC or HW
|
512 |
-
img = np.copy(img_in)
|
513 |
-
h, w = img.shape[:2]
|
514 |
-
img = img[border:h-border, border:w-border]
|
515 |
-
return img
|
516 |
-
|
517 |
-
|
518 |
-
'''
|
519 |
-
# --------------------------------------------
|
520 |
-
# image processing process on numpy image
|
521 |
-
# channel_convert(in_c, tar_type, img_list):
|
522 |
-
# rgb2ycbcr(img, only_y=True):
|
523 |
-
# bgr2ycbcr(img, only_y=True):
|
524 |
-
# ycbcr2rgb(img):
|
525 |
-
# --------------------------------------------
|
526 |
-
'''
|
527 |
-
|
528 |
-
|
529 |
-
def rgb2ycbcr(img, only_y=True):
|
530 |
-
'''same as matlab rgb2ycbcr
|
531 |
-
only_y: only return Y channel
|
532 |
-
Input:
|
533 |
-
uint8, [0, 255]
|
534 |
-
float, [0, 1]
|
535 |
-
'''
|
536 |
-
in_img_type = img.dtype
|
537 |
-
img.astype(np.float32)
|
538 |
-
if in_img_type != np.uint8:
|
539 |
-
img *= 255.
|
540 |
-
# convert
|
541 |
-
if only_y:
|
542 |
-
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
|
543 |
-
else:
|
544 |
-
rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
|
545 |
-
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
|
546 |
-
if in_img_type == np.uint8:
|
547 |
-
rlt = rlt.round()
|
548 |
-
else:
|
549 |
-
rlt /= 255.
|
550 |
-
return rlt.astype(in_img_type)
|
551 |
-
|
552 |
-
|
553 |
-
def ycbcr2rgb(img):
|
554 |
-
'''same as matlab ycbcr2rgb
|
555 |
-
Input:
|
556 |
-
uint8, [0, 255]
|
557 |
-
float, [0, 1]
|
558 |
-
'''
|
559 |
-
in_img_type = img.dtype
|
560 |
-
img.astype(np.float32)
|
561 |
-
if in_img_type != np.uint8:
|
562 |
-
img *= 255.
|
563 |
-
# convert
|
564 |
-
rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
|
565 |
-
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
|
566 |
-
if in_img_type == np.uint8:
|
567 |
-
rlt = rlt.round()
|
568 |
-
else:
|
569 |
-
rlt /= 255.
|
570 |
-
return rlt.astype(in_img_type)
|
571 |
-
|
572 |
-
|
573 |
-
def bgr2ycbcr(img, only_y=True):
|
574 |
-
'''bgr version of rgb2ycbcr
|
575 |
-
only_y: only return Y channel
|
576 |
-
Input:
|
577 |
-
uint8, [0, 255]
|
578 |
-
float, [0, 1]
|
579 |
-
'''
|
580 |
-
in_img_type = img.dtype
|
581 |
-
img.astype(np.float32)
|
582 |
-
if in_img_type != np.uint8:
|
583 |
-
img *= 255.
|
584 |
-
# convert
|
585 |
-
if only_y:
|
586 |
-
rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0
|
587 |
-
else:
|
588 |
-
rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
|
589 |
-
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
|
590 |
-
if in_img_type == np.uint8:
|
591 |
-
rlt = rlt.round()
|
592 |
-
else:
|
593 |
-
rlt /= 255.
|
594 |
-
return rlt.astype(in_img_type)
|
595 |
-
|
596 |
-
|
597 |
-
def channel_convert(in_c, tar_type, img_list):
|
598 |
-
# conversion among BGR, gray and y
|
599 |
-
if in_c == 3 and tar_type == 'gray': # BGR to gray
|
600 |
-
gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
|
601 |
-
return [np.expand_dims(img, axis=2) for img in gray_list]
|
602 |
-
elif in_c == 3 and tar_type == 'y': # BGR to y
|
603 |
-
y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
|
604 |
-
return [np.expand_dims(img, axis=2) for img in y_list]
|
605 |
-
elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR
|
606 |
-
return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
|
607 |
-
else:
|
608 |
-
return img_list
|
609 |
-
|
610 |
-
|
611 |
-
'''
|
612 |
-
# --------------------------------------------
|
613 |
-
# metric, PSNR and SSIM
|
614 |
-
# --------------------------------------------
|
615 |
-
'''
|
616 |
-
|
617 |
-
|
618 |
-
# --------------------------------------------
|
619 |
-
# PSNR
|
620 |
-
# --------------------------------------------
|
621 |
-
def calculate_psnr(img1, img2, border=0):
|
622 |
-
# img1 and img2 have range [0, 255]
|
623 |
-
#img1 = img1.squeeze()
|
624 |
-
#img2 = img2.squeeze()
|
625 |
-
if not img1.shape == img2.shape:
|
626 |
-
raise ValueError('Input images must have the same dimensions.')
|
627 |
-
h, w = img1.shape[:2]
|
628 |
-
img1 = img1[border:h-border, border:w-border]
|
629 |
-
img2 = img2[border:h-border, border:w-border]
|
630 |
-
|
631 |
-
img1 = img1.astype(np.float64)
|
632 |
-
img2 = img2.astype(np.float64)
|
633 |
-
mse = np.mean((img1 - img2)**2)
|
634 |
-
if mse == 0:
|
635 |
-
return float('inf')
|
636 |
-
return 20 * math.log10(255.0 / math.sqrt(mse))
|
637 |
-
|
638 |
-
|
639 |
-
# --------------------------------------------
|
640 |
-
# SSIM
|
641 |
-
# --------------------------------------------
|
642 |
-
def calculate_ssim(img1, img2, border=0):
|
643 |
-
'''calculate SSIM
|
644 |
-
the same outputs as MATLAB's
|
645 |
-
img1, img2: [0, 255]
|
646 |
-
'''
|
647 |
-
#img1 = img1.squeeze()
|
648 |
-
#img2 = img2.squeeze()
|
649 |
-
if not img1.shape == img2.shape:
|
650 |
-
raise ValueError('Input images must have the same dimensions.')
|
651 |
-
h, w = img1.shape[:2]
|
652 |
-
img1 = img1[border:h-border, border:w-border]
|
653 |
-
img2 = img2[border:h-border, border:w-border]
|
654 |
-
|
655 |
-
if img1.ndim == 2:
|
656 |
-
return ssim(img1, img2)
|
657 |
-
elif img1.ndim == 3:
|
658 |
-
if img1.shape[2] == 3:
|
659 |
-
ssims = []
|
660 |
-
for i in range(3):
|
661 |
-
ssims.append(ssim(img1[:,:,i], img2[:,:,i]))
|
662 |
-
return np.array(ssims).mean()
|
663 |
-
elif img1.shape[2] == 1:
|
664 |
-
return ssim(np.squeeze(img1), np.squeeze(img2))
|
665 |
-
else:
|
666 |
-
raise ValueError('Wrong input image dimensions.')
|
667 |
-
|
668 |
-
|
669 |
-
def ssim(img1, img2):
|
670 |
-
C1 = (0.01 * 255)**2
|
671 |
-
C2 = (0.03 * 255)**2
|
672 |
-
|
673 |
-
img1 = img1.astype(np.float64)
|
674 |
-
img2 = img2.astype(np.float64)
|
675 |
-
kernel = cv2.getGaussianKernel(11, 1.5)
|
676 |
-
window = np.outer(kernel, kernel.transpose())
|
677 |
-
|
678 |
-
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
|
679 |
-
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
|
680 |
-
mu1_sq = mu1**2
|
681 |
-
mu2_sq = mu2**2
|
682 |
-
mu1_mu2 = mu1 * mu2
|
683 |
-
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
|
684 |
-
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
|
685 |
-
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
|
686 |
-
|
687 |
-
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
|
688 |
-
(sigma1_sq + sigma2_sq + C2))
|
689 |
-
return ssim_map.mean()
|
690 |
-
|
691 |
-
|
692 |
-
'''
|
693 |
-
# --------------------------------------------
|
694 |
-
# matlab's bicubic imresize (numpy and torch) [0, 1]
|
695 |
-
# --------------------------------------------
|
696 |
-
'''
|
697 |
-
|
698 |
-
|
699 |
-
# matlab 'imresize' function, now only support 'bicubic'
|
700 |
-
def cubic(x):
|
701 |
-
absx = torch.abs(x)
|
702 |
-
absx2 = absx**2
|
703 |
-
absx3 = absx**3
|
704 |
-
return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
|
705 |
-
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
|
706 |
-
|
707 |
-
|
708 |
-
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
|
709 |
-
if (scale < 1) and (antialiasing):
|
710 |
-
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
|
711 |
-
kernel_width = kernel_width / scale
|
712 |
-
|
713 |
-
# Output-space coordinates
|
714 |
-
x = torch.linspace(1, out_length, out_length)
|
715 |
-
|
716 |
-
# Input-space coordinates. Calculate the inverse mapping such that 0.5
|
717 |
-
# in output space maps to 0.5 in input space, and 0.5+scale in output
|
718 |
-
# space maps to 1.5 in input space.
|
719 |
-
u = x / scale + 0.5 * (1 - 1 / scale)
|
720 |
-
|
721 |
-
# What is the left-most pixel that can be involved in the computation?
|
722 |
-
left = torch.floor(u - kernel_width / 2)
|
723 |
-
|
724 |
-
# What is the maximum number of pixels that can be involved in the
|
725 |
-
# computation? Note: it's OK to use an extra pixel here; if the
|
726 |
-
# corresponding weights are all zero, it will be eliminated at the end
|
727 |
-
# of this function.
|
728 |
-
P = math.ceil(kernel_width) + 2
|
729 |
-
|
730 |
-
# The indices of the input pixels involved in computing the k-th output
|
731 |
-
# pixel are in row k of the indices matrix.
|
732 |
-
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
|
733 |
-
1, P).expand(out_length, P)
|
734 |
-
|
735 |
-
# The weights used to compute the k-th output pixel are in row k of the
|
736 |
-
# weights matrix.
|
737 |
-
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
|
738 |
-
# apply cubic kernel
|
739 |
-
if (scale < 1) and (antialiasing):
|
740 |
-
weights = scale * cubic(distance_to_center * scale)
|
741 |
-
else:
|
742 |
-
weights = cubic(distance_to_center)
|
743 |
-
# Normalize the weights matrix so that each row sums to 1.
|
744 |
-
weights_sum = torch.sum(weights, 1).view(out_length, 1)
|
745 |
-
weights = weights / weights_sum.expand(out_length, P)
|
746 |
-
|
747 |
-
# If a column in weights is all zero, get rid of it. only consider the first and last column.
|
748 |
-
weights_zero_tmp = torch.sum((weights == 0), 0)
|
749 |
-
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
|
750 |
-
indices = indices.narrow(1, 1, P - 2)
|
751 |
-
weights = weights.narrow(1, 1, P - 2)
|
752 |
-
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
|
753 |
-
indices = indices.narrow(1, 0, P - 2)
|
754 |
-
weights = weights.narrow(1, 0, P - 2)
|
755 |
-
weights = weights.contiguous()
|
756 |
-
indices = indices.contiguous()
|
757 |
-
sym_len_s = -indices.min() + 1
|
758 |
-
sym_len_e = indices.max() - in_length
|
759 |
-
indices = indices + sym_len_s - 1
|
760 |
-
return weights, indices, int(sym_len_s), int(sym_len_e)
|
761 |
-
|
762 |
-
|
763 |
-
# --------------------------------------------
|
764 |
-
# imresize for tensor image [0, 1]
|
765 |
-
# --------------------------------------------
|
766 |
-
def imresize(img, scale, antialiasing=True):
|
767 |
-
# Now the scale should be the same for H and W
|
768 |
-
# input: img: pytorch tensor, CHW or HW [0,1]
|
769 |
-
# output: CHW or HW [0,1] w/o round
|
770 |
-
need_squeeze = True if img.dim() == 2 else False
|
771 |
-
if need_squeeze:
|
772 |
-
img.unsqueeze_(0)
|
773 |
-
in_C, in_H, in_W = img.size()
|
774 |
-
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
|
775 |
-
kernel_width = 4
|
776 |
-
kernel = 'cubic'
|
777 |
-
|
778 |
-
# Return the desired dimension order for performing the resize. The
|
779 |
-
# strategy is to perform the resize first along the dimension with the
|
780 |
-
# smallest scale factor.
|
781 |
-
# Now we do not support this.
|
782 |
-
|
783 |
-
# get weights and indices
|
784 |
-
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
|
785 |
-
in_H, out_H, scale, kernel, kernel_width, antialiasing)
|
786 |
-
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
|
787 |
-
in_W, out_W, scale, kernel, kernel_width, antialiasing)
|
788 |
-
# process H dimension
|
789 |
-
# symmetric copying
|
790 |
-
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
|
791 |
-
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
|
792 |
-
|
793 |
-
sym_patch = img[:, :sym_len_Hs, :]
|
794 |
-
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
|
795 |
-
sym_patch_inv = sym_patch.index_select(1, inv_idx)
|
796 |
-
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
|
797 |
-
|
798 |
-
sym_patch = img[:, -sym_len_He:, :]
|
799 |
-
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
|
800 |
-
sym_patch_inv = sym_patch.index_select(1, inv_idx)
|
801 |
-
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
|
802 |
-
|
803 |
-
out_1 = torch.FloatTensor(in_C, out_H, in_W)
|
804 |
-
kernel_width = weights_H.size(1)
|
805 |
-
for i in range(out_H):
|
806 |
-
idx = int(indices_H[i][0])
|
807 |
-
for j in range(out_C):
|
808 |
-
out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
|
809 |
-
|
810 |
-
# process W dimension
|
811 |
-
# symmetric copying
|
812 |
-
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
|
813 |
-
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
|
814 |
-
|
815 |
-
sym_patch = out_1[:, :, :sym_len_Ws]
|
816 |
-
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
|
817 |
-
sym_patch_inv = sym_patch.index_select(2, inv_idx)
|
818 |
-
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
|
819 |
-
|
820 |
-
sym_patch = out_1[:, :, -sym_len_We:]
|
821 |
-
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
|
822 |
-
sym_patch_inv = sym_patch.index_select(2, inv_idx)
|
823 |
-
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
|
824 |
-
|
825 |
-
out_2 = torch.FloatTensor(in_C, out_H, out_W)
|
826 |
-
kernel_width = weights_W.size(1)
|
827 |
-
for i in range(out_W):
|
828 |
-
idx = int(indices_W[i][0])
|
829 |
-
for j in range(out_C):
|
830 |
-
out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i])
|
831 |
-
if need_squeeze:
|
832 |
-
out_2.squeeze_()
|
833 |
-
return out_2
|
834 |
-
|
835 |
-
|
836 |
-
# --------------------------------------------
|
837 |
-
# imresize for numpy image [0, 1]
|
838 |
-
# --------------------------------------------
|
839 |
-
def imresize_np(img, scale, antialiasing=True):
|
840 |
-
# Now the scale should be the same for H and W
|
841 |
-
# input: img: Numpy, HWC or HW [0,1]
|
842 |
-
# output: HWC or HW [0,1] w/o round
|
843 |
-
img = torch.from_numpy(img)
|
844 |
-
need_squeeze = True if img.dim() == 2 else False
|
845 |
-
if need_squeeze:
|
846 |
-
img.unsqueeze_(2)
|
847 |
-
|
848 |
-
in_H, in_W, in_C = img.size()
|
849 |
-
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
|
850 |
-
kernel_width = 4
|
851 |
-
kernel = 'cubic'
|
852 |
-
|
853 |
-
# Return the desired dimension order for performing the resize. The
|
854 |
-
# strategy is to perform the resize first along the dimension with the
|
855 |
-
# smallest scale factor.
|
856 |
-
# Now we do not support this.
|
857 |
-
|
858 |
-
# get weights and indices
|
859 |
-
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
|
860 |
-
in_H, out_H, scale, kernel, kernel_width, antialiasing)
|
861 |
-
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
|
862 |
-
in_W, out_W, scale, kernel, kernel_width, antialiasing)
|
863 |
-
# process H dimension
|
864 |
-
# symmetric copying
|
865 |
-
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
|
866 |
-
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
|
867 |
-
|
868 |
-
sym_patch = img[:sym_len_Hs, :, :]
|
869 |
-
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
|
870 |
-
sym_patch_inv = sym_patch.index_select(0, inv_idx)
|
871 |
-
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
|
872 |
-
|
873 |
-
sym_patch = img[-sym_len_He:, :, :]
|
874 |
-
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
|
875 |
-
sym_patch_inv = sym_patch.index_select(0, inv_idx)
|
876 |
-
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
|
877 |
-
|
878 |
-
out_1 = torch.FloatTensor(out_H, in_W, in_C)
|
879 |
-
kernel_width = weights_H.size(1)
|
880 |
-
for i in range(out_H):
|
881 |
-
idx = int(indices_H[i][0])
|
882 |
-
for j in range(out_C):
|
883 |
-
out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i])
|
884 |
-
|
885 |
-
# process W dimension
|
886 |
-
# symmetric copying
|
887 |
-
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
|
888 |
-
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
|
889 |
-
|
890 |
-
sym_patch = out_1[:, :sym_len_Ws, :]
|
891 |
-
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
|
892 |
-
sym_patch_inv = sym_patch.index_select(1, inv_idx)
|
893 |
-
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
|
894 |
-
|
895 |
-
sym_patch = out_1[:, -sym_len_We:, :]
|
896 |
-
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
|
897 |
-
sym_patch_inv = sym_patch.index_select(1, inv_idx)
|
898 |
-
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
|
899 |
-
|
900 |
-
out_2 = torch.FloatTensor(out_H, out_W, in_C)
|
901 |
-
kernel_width = weights_W.size(1)
|
902 |
-
for i in range(out_W):
|
903 |
-
idx = int(indices_W[i][0])
|
904 |
-
for j in range(out_C):
|
905 |
-
out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i])
|
906 |
-
if need_squeeze:
|
907 |
-
out_2.squeeze_()
|
908 |
-
|
909 |
-
return out_2.numpy()
|
910 |
-
|
911 |
-
|
912 |
-
if __name__ == '__main__':
|
913 |
-
print('---')
|
914 |
-
# img = imread_uint('test.bmp', 3)
|
915 |
-
# img = uint2single(img)
|
916 |
-
# img_bicubic = imresize_np(img, 1/4)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/modules/midas/__init__.py
DELETED
File without changes
|
ldm/modules/midas/api.py
DELETED
@@ -1,170 +0,0 @@
|
|
1 |
-
# based on https://github.com/isl-org/MiDaS
|
2 |
-
|
3 |
-
import cv2
|
4 |
-
import torch
|
5 |
-
import torch.nn as nn
|
6 |
-
from torchvision.transforms import Compose
|
7 |
-
|
8 |
-
from ldm.modules.midas.midas.dpt_depth import DPTDepthModel
|
9 |
-
from ldm.modules.midas.midas.midas_net import MidasNet
|
10 |
-
from ldm.modules.midas.midas.midas_net_custom import MidasNet_small
|
11 |
-
from ldm.modules.midas.midas.transforms import Resize, NormalizeImage, PrepareForNet
|
12 |
-
|
13 |
-
|
14 |
-
ISL_PATHS = {
|
15 |
-
"dpt_large": "/fsx/robin/midas_models/dpt_large-midas-2f21e586.pt", # TODO: adapt
|
16 |
-
"dpt_hybrid": "/fsx/robin/midas_models/dpt_hybrid-midas-501f0c75.pt", # TODO: adapt
|
17 |
-
"midas_v21": "",
|
18 |
-
"midas_v21_small": "",
|
19 |
-
}
|
20 |
-
|
21 |
-
|
22 |
-
def disabled_train(self, mode=True):
|
23 |
-
"""Overwrite model.train with this function to make sure train/eval mode
|
24 |
-
does not change anymore."""
|
25 |
-
return self
|
26 |
-
|
27 |
-
|
28 |
-
def load_midas_transform(model_type):
|
29 |
-
# https://github.com/isl-org/MiDaS/blob/master/run.py
|
30 |
-
# load transform only
|
31 |
-
if model_type == "dpt_large": # DPT-Large
|
32 |
-
net_w, net_h = 384, 384
|
33 |
-
resize_mode = "minimal"
|
34 |
-
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
35 |
-
|
36 |
-
elif model_type == "dpt_hybrid": # DPT-Hybrid
|
37 |
-
net_w, net_h = 384, 384
|
38 |
-
resize_mode = "minimal"
|
39 |
-
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
40 |
-
|
41 |
-
elif model_type == "midas_v21":
|
42 |
-
net_w, net_h = 384, 384
|
43 |
-
resize_mode = "upper_bound"
|
44 |
-
normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
45 |
-
|
46 |
-
elif model_type == "midas_v21_small":
|
47 |
-
net_w, net_h = 256, 256
|
48 |
-
resize_mode = "upper_bound"
|
49 |
-
normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
50 |
-
|
51 |
-
else:
|
52 |
-
assert False, f"model_type '{model_type}' not implemented, use: --model_type large"
|
53 |
-
|
54 |
-
transform = Compose(
|
55 |
-
[
|
56 |
-
Resize(
|
57 |
-
net_w,
|
58 |
-
net_h,
|
59 |
-
resize_target=None,
|
60 |
-
keep_aspect_ratio=True,
|
61 |
-
ensure_multiple_of=32,
|
62 |
-
resize_method=resize_mode,
|
63 |
-
image_interpolation_method=cv2.INTER_CUBIC,
|
64 |
-
),
|
65 |
-
normalization,
|
66 |
-
PrepareForNet(),
|
67 |
-
]
|
68 |
-
)
|
69 |
-
|
70 |
-
return transform
|
71 |
-
|
72 |
-
|
73 |
-
def load_model(model_type):
|
74 |
-
# https://github.com/isl-org/MiDaS/blob/master/run.py
|
75 |
-
# load network
|
76 |
-
model_path = ISL_PATHS[model_type]
|
77 |
-
if model_type == "dpt_large": # DPT-Large
|
78 |
-
model = DPTDepthModel(
|
79 |
-
path=model_path,
|
80 |
-
backbone="vitl16_384",
|
81 |
-
non_negative=True,
|
82 |
-
)
|
83 |
-
net_w, net_h = 384, 384
|
84 |
-
resize_mode = "minimal"
|
85 |
-
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
86 |
-
|
87 |
-
elif model_type == "dpt_hybrid": # DPT-Hybrid
|
88 |
-
model = DPTDepthModel(
|
89 |
-
path=model_path,
|
90 |
-
backbone="vitb_rn50_384",
|
91 |
-
non_negative=True,
|
92 |
-
)
|
93 |
-
net_w, net_h = 384, 384
|
94 |
-
resize_mode = "minimal"
|
95 |
-
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
96 |
-
|
97 |
-
elif model_type == "midas_v21":
|
98 |
-
model = MidasNet(model_path, non_negative=True)
|
99 |
-
net_w, net_h = 384, 384
|
100 |
-
resize_mode = "upper_bound"
|
101 |
-
normalization = NormalizeImage(
|
102 |
-
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
|
103 |
-
)
|
104 |
-
|
105 |
-
elif model_type == "midas_v21_small":
|
106 |
-
model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True,
|
107 |
-
non_negative=True, blocks={'expand': True})
|
108 |
-
net_w, net_h = 256, 256
|
109 |
-
resize_mode = "upper_bound"
|
110 |
-
normalization = NormalizeImage(
|
111 |
-
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
|
112 |
-
)
|
113 |
-
|
114 |
-
else:
|
115 |
-
print(f"model_type '{model_type}' not implemented, use: --model_type large")
|
116 |
-
assert False
|
117 |
-
|
118 |
-
transform = Compose(
|
119 |
-
[
|
120 |
-
Resize(
|
121 |
-
net_w,
|
122 |
-
net_h,
|
123 |
-
resize_target=None,
|
124 |
-
keep_aspect_ratio=True,
|
125 |
-
ensure_multiple_of=32,
|
126 |
-
resize_method=resize_mode,
|
127 |
-
image_interpolation_method=cv2.INTER_CUBIC,
|
128 |
-
),
|
129 |
-
normalization,
|
130 |
-
PrepareForNet(),
|
131 |
-
]
|
132 |
-
)
|
133 |
-
|
134 |
-
return model.eval(), transform
|
135 |
-
|
136 |
-
|
137 |
-
class MiDaSInference(nn.Module):
|
138 |
-
MODEL_TYPES_TORCH_HUB = [
|
139 |
-
"DPT_Large",
|
140 |
-
"DPT_Hybrid",
|
141 |
-
"MiDaS_small"
|
142 |
-
]
|
143 |
-
MODEL_TYPES_ISL = [
|
144 |
-
"dpt_large",
|
145 |
-
"dpt_hybrid",
|
146 |
-
"midas_v21",
|
147 |
-
"midas_v21_small",
|
148 |
-
]
|
149 |
-
|
150 |
-
def __init__(self, model_type):
|
151 |
-
super().__init__()
|
152 |
-
assert (model_type in self.MODEL_TYPES_ISL)
|
153 |
-
model, _ = load_model(model_type)
|
154 |
-
self.model = model
|
155 |
-
self.model.train = disabled_train
|
156 |
-
|
157 |
-
def forward(self, x):
|
158 |
-
# x in 0..1 as produced by calling self.transform on a 0..1 float64 numpy array
|
159 |
-
# NOTE: we expect that the correct transform has been called during dataloading.
|
160 |
-
with torch.no_grad():
|
161 |
-
prediction = self.model(x)
|
162 |
-
prediction = torch.nn.functional.interpolate(
|
163 |
-
prediction.unsqueeze(1),
|
164 |
-
size=x.shape[2:],
|
165 |
-
mode="bicubic",
|
166 |
-
align_corners=False,
|
167 |
-
)
|
168 |
-
assert prediction.shape == (x.shape[0], 1, x.shape[2], x.shape[3])
|
169 |
-
return prediction
|
170 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/modules/midas/midas/__init__.py
DELETED
File without changes
|
ldm/modules/midas/midas/base_model.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
|
4 |
-
class BaseModel(torch.nn.Module):
|
5 |
-
def load(self, path):
|
6 |
-
"""Load model from file.
|
7 |
-
|
8 |
-
Args:
|
9 |
-
path (str): file path
|
10 |
-
"""
|
11 |
-
parameters = torch.load(path, map_location=torch.device('cpu'))
|
12 |
-
|
13 |
-
if "optimizer" in parameters:
|
14 |
-
parameters = parameters["model"]
|
15 |
-
|
16 |
-
self.load_state_dict(parameters)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/modules/midas/midas/blocks.py
DELETED
@@ -1,342 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
|
4 |
-
from .vit import (
|
5 |
-
_make_pretrained_vitb_rn50_384,
|
6 |
-
_make_pretrained_vitl16_384,
|
7 |
-
_make_pretrained_vitb16_384,
|
8 |
-
forward_vit,
|
9 |
-
)
|
10 |
-
|
11 |
-
def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore",):
|
12 |
-
if backbone == "vitl16_384":
|
13 |
-
pretrained = _make_pretrained_vitl16_384(
|
14 |
-
use_pretrained, hooks=hooks, use_readout=use_readout
|
15 |
-
)
|
16 |
-
scratch = _make_scratch(
|
17 |
-
[256, 512, 1024, 1024], features, groups=groups, expand=expand
|
18 |
-
) # ViT-L/16 - 85.0% Top1 (backbone)
|
19 |
-
elif backbone == "vitb_rn50_384":
|
20 |
-
pretrained = _make_pretrained_vitb_rn50_384(
|
21 |
-
use_pretrained,
|
22 |
-
hooks=hooks,
|
23 |
-
use_vit_only=use_vit_only,
|
24 |
-
use_readout=use_readout,
|
25 |
-
)
|
26 |
-
scratch = _make_scratch(
|
27 |
-
[256, 512, 768, 768], features, groups=groups, expand=expand
|
28 |
-
) # ViT-H/16 - 85.0% Top1 (backbone)
|
29 |
-
elif backbone == "vitb16_384":
|
30 |
-
pretrained = _make_pretrained_vitb16_384(
|
31 |
-
use_pretrained, hooks=hooks, use_readout=use_readout
|
32 |
-
)
|
33 |
-
scratch = _make_scratch(
|
34 |
-
[96, 192, 384, 768], features, groups=groups, expand=expand
|
35 |
-
) # ViT-B/16 - 84.6% Top1 (backbone)
|
36 |
-
elif backbone == "resnext101_wsl":
|
37 |
-
pretrained = _make_pretrained_resnext101_wsl(use_pretrained)
|
38 |
-
scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3
|
39 |
-
elif backbone == "efficientnet_lite3":
|
40 |
-
pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable)
|
41 |
-
scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3
|
42 |
-
else:
|
43 |
-
print(f"Backbone '{backbone}' not implemented")
|
44 |
-
assert False
|
45 |
-
|
46 |
-
return pretrained, scratch
|
47 |
-
|
48 |
-
|
49 |
-
def _make_scratch(in_shape, out_shape, groups=1, expand=False):
|
50 |
-
scratch = nn.Module()
|
51 |
-
|
52 |
-
out_shape1 = out_shape
|
53 |
-
out_shape2 = out_shape
|
54 |
-
out_shape3 = out_shape
|
55 |
-
out_shape4 = out_shape
|
56 |
-
if expand==True:
|
57 |
-
out_shape1 = out_shape
|
58 |
-
out_shape2 = out_shape*2
|
59 |
-
out_shape3 = out_shape*4
|
60 |
-
out_shape4 = out_shape*8
|
61 |
-
|
62 |
-
scratch.layer1_rn = nn.Conv2d(
|
63 |
-
in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
64 |
-
)
|
65 |
-
scratch.layer2_rn = nn.Conv2d(
|
66 |
-
in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
67 |
-
)
|
68 |
-
scratch.layer3_rn = nn.Conv2d(
|
69 |
-
in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
70 |
-
)
|
71 |
-
scratch.layer4_rn = nn.Conv2d(
|
72 |
-
in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
73 |
-
)
|
74 |
-
|
75 |
-
return scratch
|
76 |
-
|
77 |
-
|
78 |
-
def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False):
|
79 |
-
efficientnet = torch.hub.load(
|
80 |
-
"rwightman/gen-efficientnet-pytorch",
|
81 |
-
"tf_efficientnet_lite3",
|
82 |
-
pretrained=use_pretrained,
|
83 |
-
exportable=exportable
|
84 |
-
)
|
85 |
-
return _make_efficientnet_backbone(efficientnet)
|
86 |
-
|
87 |
-
|
88 |
-
def _make_efficientnet_backbone(effnet):
|
89 |
-
pretrained = nn.Module()
|
90 |
-
|
91 |
-
pretrained.layer1 = nn.Sequential(
|
92 |
-
effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2]
|
93 |
-
)
|
94 |
-
pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3])
|
95 |
-
pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5])
|
96 |
-
pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9])
|
97 |
-
|
98 |
-
return pretrained
|
99 |
-
|
100 |
-
|
101 |
-
def _make_resnet_backbone(resnet):
|
102 |
-
pretrained = nn.Module()
|
103 |
-
pretrained.layer1 = nn.Sequential(
|
104 |
-
resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1
|
105 |
-
)
|
106 |
-
|
107 |
-
pretrained.layer2 = resnet.layer2
|
108 |
-
pretrained.layer3 = resnet.layer3
|
109 |
-
pretrained.layer4 = resnet.layer4
|
110 |
-
|
111 |
-
return pretrained
|
112 |
-
|
113 |
-
|
114 |
-
def _make_pretrained_resnext101_wsl(use_pretrained):
|
115 |
-
resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl")
|
116 |
-
return _make_resnet_backbone(resnet)
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
class Interpolate(nn.Module):
|
121 |
-
"""Interpolation module.
|
122 |
-
"""
|
123 |
-
|
124 |
-
def __init__(self, scale_factor, mode, align_corners=False):
|
125 |
-
"""Init.
|
126 |
-
|
127 |
-
Args:
|
128 |
-
scale_factor (float): scaling
|
129 |
-
mode (str): interpolation mode
|
130 |
-
"""
|
131 |
-
super(Interpolate, self).__init__()
|
132 |
-
|
133 |
-
self.interp = nn.functional.interpolate
|
134 |
-
self.scale_factor = scale_factor
|
135 |
-
self.mode = mode
|
136 |
-
self.align_corners = align_corners
|
137 |
-
|
138 |
-
def forward(self, x):
|
139 |
-
"""Forward pass.
|
140 |
-
|
141 |
-
Args:
|
142 |
-
x (tensor): input
|
143 |
-
|
144 |
-
Returns:
|
145 |
-
tensor: interpolated data
|
146 |
-
"""
|
147 |
-
|
148 |
-
x = self.interp(
|
149 |
-
x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners
|
150 |
-
)
|
151 |
-
|
152 |
-
return x
|
153 |
-
|
154 |
-
|
155 |
-
class ResidualConvUnit(nn.Module):
|
156 |
-
"""Residual convolution module.
|
157 |
-
"""
|
158 |
-
|
159 |
-
def __init__(self, features):
|
160 |
-
"""Init.
|
161 |
-
|
162 |
-
Args:
|
163 |
-
features (int): number of features
|
164 |
-
"""
|
165 |
-
super().__init__()
|
166 |
-
|
167 |
-
self.conv1 = nn.Conv2d(
|
168 |
-
features, features, kernel_size=3, stride=1, padding=1, bias=True
|
169 |
-
)
|
170 |
-
|
171 |
-
self.conv2 = nn.Conv2d(
|
172 |
-
features, features, kernel_size=3, stride=1, padding=1, bias=True
|
173 |
-
)
|
174 |
-
|
175 |
-
self.relu = nn.ReLU(inplace=True)
|
176 |
-
|
177 |
-
def forward(self, x):
|
178 |
-
"""Forward pass.
|
179 |
-
|
180 |
-
Args:
|
181 |
-
x (tensor): input
|
182 |
-
|
183 |
-
Returns:
|
184 |
-
tensor: output
|
185 |
-
"""
|
186 |
-
out = self.relu(x)
|
187 |
-
out = self.conv1(out)
|
188 |
-
out = self.relu(out)
|
189 |
-
out = self.conv2(out)
|
190 |
-
|
191 |
-
return out + x
|
192 |
-
|
193 |
-
|
194 |
-
class FeatureFusionBlock(nn.Module):
|
195 |
-
"""Feature fusion block.
|
196 |
-
"""
|
197 |
-
|
198 |
-
def __init__(self, features):
|
199 |
-
"""Init.
|
200 |
-
|
201 |
-
Args:
|
202 |
-
features (int): number of features
|
203 |
-
"""
|
204 |
-
super(FeatureFusionBlock, self).__init__()
|
205 |
-
|
206 |
-
self.resConfUnit1 = ResidualConvUnit(features)
|
207 |
-
self.resConfUnit2 = ResidualConvUnit(features)
|
208 |
-
|
209 |
-
def forward(self, *xs):
|
210 |
-
"""Forward pass.
|
211 |
-
|
212 |
-
Returns:
|
213 |
-
tensor: output
|
214 |
-
"""
|
215 |
-
output = xs[0]
|
216 |
-
|
217 |
-
if len(xs) == 2:
|
218 |
-
output += self.resConfUnit1(xs[1])
|
219 |
-
|
220 |
-
output = self.resConfUnit2(output)
|
221 |
-
|
222 |
-
output = nn.functional.interpolate(
|
223 |
-
output, scale_factor=2, mode="bilinear", align_corners=True
|
224 |
-
)
|
225 |
-
|
226 |
-
return output
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
class ResidualConvUnit_custom(nn.Module):
|
232 |
-
"""Residual convolution module.
|
233 |
-
"""
|
234 |
-
|
235 |
-
def __init__(self, features, activation, bn):
|
236 |
-
"""Init.
|
237 |
-
|
238 |
-
Args:
|
239 |
-
features (int): number of features
|
240 |
-
"""
|
241 |
-
super().__init__()
|
242 |
-
|
243 |
-
self.bn = bn
|
244 |
-
|
245 |
-
self.groups=1
|
246 |
-
|
247 |
-
self.conv1 = nn.Conv2d(
|
248 |
-
features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
|
249 |
-
)
|
250 |
-
|
251 |
-
self.conv2 = nn.Conv2d(
|
252 |
-
features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
|
253 |
-
)
|
254 |
-
|
255 |
-
if self.bn==True:
|
256 |
-
self.bn1 = nn.BatchNorm2d(features)
|
257 |
-
self.bn2 = nn.BatchNorm2d(features)
|
258 |
-
|
259 |
-
self.activation = activation
|
260 |
-
|
261 |
-
self.skip_add = nn.quantized.FloatFunctional()
|
262 |
-
|
263 |
-
def forward(self, x):
|
264 |
-
"""Forward pass.
|
265 |
-
|
266 |
-
Args:
|
267 |
-
x (tensor): input
|
268 |
-
|
269 |
-
Returns:
|
270 |
-
tensor: output
|
271 |
-
"""
|
272 |
-
|
273 |
-
out = self.activation(x)
|
274 |
-
out = self.conv1(out)
|
275 |
-
if self.bn==True:
|
276 |
-
out = self.bn1(out)
|
277 |
-
|
278 |
-
out = self.activation(out)
|
279 |
-
out = self.conv2(out)
|
280 |
-
if self.bn==True:
|
281 |
-
out = self.bn2(out)
|
282 |
-
|
283 |
-
if self.groups > 1:
|
284 |
-
out = self.conv_merge(out)
|
285 |
-
|
286 |
-
return self.skip_add.add(out, x)
|
287 |
-
|
288 |
-
# return out + x
|
289 |
-
|
290 |
-
|
291 |
-
class FeatureFusionBlock_custom(nn.Module):
|
292 |
-
"""Feature fusion block.
|
293 |
-
"""
|
294 |
-
|
295 |
-
def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True):
|
296 |
-
"""Init.
|
297 |
-
|
298 |
-
Args:
|
299 |
-
features (int): number of features
|
300 |
-
"""
|
301 |
-
super(FeatureFusionBlock_custom, self).__init__()
|
302 |
-
|
303 |
-
self.deconv = deconv
|
304 |
-
self.align_corners = align_corners
|
305 |
-
|
306 |
-
self.groups=1
|
307 |
-
|
308 |
-
self.expand = expand
|
309 |
-
out_features = features
|
310 |
-
if self.expand==True:
|
311 |
-
out_features = features//2
|
312 |
-
|
313 |
-
self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
|
314 |
-
|
315 |
-
self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
|
316 |
-
self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
|
317 |
-
|
318 |
-
self.skip_add = nn.quantized.FloatFunctional()
|
319 |
-
|
320 |
-
def forward(self, *xs):
|
321 |
-
"""Forward pass.
|
322 |
-
|
323 |
-
Returns:
|
324 |
-
tensor: output
|
325 |
-
"""
|
326 |
-
output = xs[0]
|
327 |
-
|
328 |
-
if len(xs) == 2:
|
329 |
-
res = self.resConfUnit1(xs[1])
|
330 |
-
output = self.skip_add.add(output, res)
|
331 |
-
# output += res
|
332 |
-
|
333 |
-
output = self.resConfUnit2(output)
|
334 |
-
|
335 |
-
output = nn.functional.interpolate(
|
336 |
-
output, scale_factor=2, mode="bilinear", align_corners=self.align_corners
|
337 |
-
)
|
338 |
-
|
339 |
-
output = self.out_conv(output)
|
340 |
-
|
341 |
-
return output
|
342 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/modules/midas/midas/dpt_depth.py
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
|
5 |
-
from .base_model import BaseModel
|
6 |
-
from .blocks import (
|
7 |
-
FeatureFusionBlock,
|
8 |
-
FeatureFusionBlock_custom,
|
9 |
-
Interpolate,
|
10 |
-
_make_encoder,
|
11 |
-
forward_vit,
|
12 |
-
)
|
13 |
-
|
14 |
-
|
15 |
-
def _make_fusion_block(features, use_bn):
|
16 |
-
return FeatureFusionBlock_custom(
|
17 |
-
features,
|
18 |
-
nn.ReLU(False),
|
19 |
-
deconv=False,
|
20 |
-
bn=use_bn,
|
21 |
-
expand=False,
|
22 |
-
align_corners=True,
|
23 |
-
)
|
24 |
-
|
25 |
-
|
26 |
-
class DPT(BaseModel):
|
27 |
-
def __init__(
|
28 |
-
self,
|
29 |
-
head,
|
30 |
-
features=256,
|
31 |
-
backbone="vitb_rn50_384",
|
32 |
-
readout="project",
|
33 |
-
channels_last=False,
|
34 |
-
use_bn=False,
|
35 |
-
):
|
36 |
-
|
37 |
-
super(DPT, self).__init__()
|
38 |
-
|
39 |
-
self.channels_last = channels_last
|
40 |
-
|
41 |
-
hooks = {
|
42 |
-
"vitb_rn50_384": [0, 1, 8, 11],
|
43 |
-
"vitb16_384": [2, 5, 8, 11],
|
44 |
-
"vitl16_384": [5, 11, 17, 23],
|
45 |
-
}
|
46 |
-
|
47 |
-
# Instantiate backbone and reassemble blocks
|
48 |
-
self.pretrained, self.scratch = _make_encoder(
|
49 |
-
backbone,
|
50 |
-
features,
|
51 |
-
False, # Set to true of you want to train from scratch, uses ImageNet weights
|
52 |
-
groups=1,
|
53 |
-
expand=False,
|
54 |
-
exportable=False,
|
55 |
-
hooks=hooks[backbone],
|
56 |
-
use_readout=readout,
|
57 |
-
)
|
58 |
-
|
59 |
-
self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
|
60 |
-
self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
|
61 |
-
self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
|
62 |
-
self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
|
63 |
-
|
64 |
-
self.scratch.output_conv = head
|
65 |
-
|
66 |
-
|
67 |
-
def forward(self, x):
|
68 |
-
if self.channels_last == True:
|
69 |
-
x.contiguous(memory_format=torch.channels_last)
|
70 |
-
|
71 |
-
layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x)
|
72 |
-
|
73 |
-
layer_1_rn = self.scratch.layer1_rn(layer_1)
|
74 |
-
layer_2_rn = self.scratch.layer2_rn(layer_2)
|
75 |
-
layer_3_rn = self.scratch.layer3_rn(layer_3)
|
76 |
-
layer_4_rn = self.scratch.layer4_rn(layer_4)
|
77 |
-
|
78 |
-
path_4 = self.scratch.refinenet4(layer_4_rn)
|
79 |
-
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
|
80 |
-
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
|
81 |
-
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
|
82 |
-
|
83 |
-
out = self.scratch.output_conv(path_1)
|
84 |
-
|
85 |
-
return out
|
86 |
-
|
87 |
-
|
88 |
-
class DPTDepthModel(DPT):
|
89 |
-
def __init__(self, path=None, non_negative=True, **kwargs):
|
90 |
-
features = kwargs["features"] if "features" in kwargs else 256
|
91 |
-
|
92 |
-
head = nn.Sequential(
|
93 |
-
nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
|
94 |
-
Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
|
95 |
-
nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
|
96 |
-
nn.ReLU(True),
|
97 |
-
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
|
98 |
-
nn.ReLU(True) if non_negative else nn.Identity(),
|
99 |
-
nn.Identity(),
|
100 |
-
)
|
101 |
-
|
102 |
-
super().__init__(head, **kwargs)
|
103 |
-
|
104 |
-
if path is not None:
|
105 |
-
self.load(path)
|
106 |
-
|
107 |
-
def forward(self, x):
|
108 |
-
return super().forward(x).squeeze(dim=1)
|
109 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/modules/midas/midas/midas_net.py
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
"""MidashNet: Network for monocular depth estimation trained by mixing several datasets.
|
2 |
-
This file contains code that is adapted from
|
3 |
-
https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
|
4 |
-
"""
|
5 |
-
import torch
|
6 |
-
import torch.nn as nn
|
7 |
-
|
8 |
-
from .base_model import BaseModel
|
9 |
-
from .blocks import FeatureFusionBlock, Interpolate, _make_encoder
|
10 |
-
|
11 |
-
|
12 |
-
class MidasNet(BaseModel):
|
13 |
-
"""Network for monocular depth estimation.
|
14 |
-
"""
|
15 |
-
|
16 |
-
def __init__(self, path=None, features=256, non_negative=True):
|
17 |
-
"""Init.
|
18 |
-
|
19 |
-
Args:
|
20 |
-
path (str, optional): Path to saved model. Defaults to None.
|
21 |
-
features (int, optional): Number of features. Defaults to 256.
|
22 |
-
backbone (str, optional): Backbone network for encoder. Defaults to resnet50
|
23 |
-
"""
|
24 |
-
print("Loading weights: ", path)
|
25 |
-
|
26 |
-
super(MidasNet, self).__init__()
|
27 |
-
|
28 |
-
use_pretrained = False if path is None else True
|
29 |
-
|
30 |
-
self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained)
|
31 |
-
|
32 |
-
self.scratch.refinenet4 = FeatureFusionBlock(features)
|
33 |
-
self.scratch.refinenet3 = FeatureFusionBlock(features)
|
34 |
-
self.scratch.refinenet2 = FeatureFusionBlock(features)
|
35 |
-
self.scratch.refinenet1 = FeatureFusionBlock(features)
|
36 |
-
|
37 |
-
self.scratch.output_conv = nn.Sequential(
|
38 |
-
nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),
|
39 |
-
Interpolate(scale_factor=2, mode="bilinear"),
|
40 |
-
nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),
|
41 |
-
nn.ReLU(True),
|
42 |
-
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
|
43 |
-
nn.ReLU(True) if non_negative else nn.Identity(),
|
44 |
-
)
|
45 |
-
|
46 |
-
if path:
|
47 |
-
self.load(path)
|
48 |
-
|
49 |
-
def forward(self, x):
|
50 |
-
"""Forward pass.
|
51 |
-
|
52 |
-
Args:
|
53 |
-
x (tensor): input data (image)
|
54 |
-
|
55 |
-
Returns:
|
56 |
-
tensor: depth
|
57 |
-
"""
|
58 |
-
|
59 |
-
layer_1 = self.pretrained.layer1(x)
|
60 |
-
layer_2 = self.pretrained.layer2(layer_1)
|
61 |
-
layer_3 = self.pretrained.layer3(layer_2)
|
62 |
-
layer_4 = self.pretrained.layer4(layer_3)
|
63 |
-
|
64 |
-
layer_1_rn = self.scratch.layer1_rn(layer_1)
|
65 |
-
layer_2_rn = self.scratch.layer2_rn(layer_2)
|
66 |
-
layer_3_rn = self.scratch.layer3_rn(layer_3)
|
67 |
-
layer_4_rn = self.scratch.layer4_rn(layer_4)
|
68 |
-
|
69 |
-
path_4 = self.scratch.refinenet4(layer_4_rn)
|
70 |
-
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
|
71 |
-
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
|
72 |
-
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
|
73 |
-
|
74 |
-
out = self.scratch.output_conv(path_1)
|
75 |
-
|
76 |
-
return torch.squeeze(out, dim=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/modules/midas/midas/midas_net_custom.py
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
"""MidashNet: Network for monocular depth estimation trained by mixing several datasets.
|
2 |
-
This file contains code that is adapted from
|
3 |
-
https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
|
4 |
-
"""
|
5 |
-
import torch
|
6 |
-
import torch.nn as nn
|
7 |
-
|
8 |
-
from .base_model import BaseModel
|
9 |
-
from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder
|
10 |
-
|
11 |
-
|
12 |
-
class MidasNet_small(BaseModel):
|
13 |
-
"""Network for monocular depth estimation.
|
14 |
-
"""
|
15 |
-
|
16 |
-
def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True,
|
17 |
-
blocks={'expand': True}):
|
18 |
-
"""Init.
|
19 |
-
|
20 |
-
Args:
|
21 |
-
path (str, optional): Path to saved model. Defaults to None.
|
22 |
-
features (int, optional): Number of features. Defaults to 256.
|
23 |
-
backbone (str, optional): Backbone network for encoder. Defaults to resnet50
|
24 |
-
"""
|
25 |
-
print("Loading weights: ", path)
|
26 |
-
|
27 |
-
super(MidasNet_small, self).__init__()
|
28 |
-
|
29 |
-
use_pretrained = False if path else True
|
30 |
-
|
31 |
-
self.channels_last = channels_last
|
32 |
-
self.blocks = blocks
|
33 |
-
self.backbone = backbone
|
34 |
-
|
35 |
-
self.groups = 1
|
36 |
-
|
37 |
-
features1=features
|
38 |
-
features2=features
|
39 |
-
features3=features
|
40 |
-
features4=features
|
41 |
-
self.expand = False
|
42 |
-
if "expand" in self.blocks and self.blocks['expand'] == True:
|
43 |
-
self.expand = True
|
44 |
-
features1=features
|
45 |
-
features2=features*2
|
46 |
-
features3=features*4
|
47 |
-
features4=features*8
|
48 |
-
|
49 |
-
self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable)
|
50 |
-
|
51 |
-
self.scratch.activation = nn.ReLU(False)
|
52 |
-
|
53 |
-
self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
|
54 |
-
self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
|
55 |
-
self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
|
56 |
-
self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners)
|
57 |
-
|
58 |
-
|
59 |
-
self.scratch.output_conv = nn.Sequential(
|
60 |
-
nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups),
|
61 |
-
Interpolate(scale_factor=2, mode="bilinear"),
|
62 |
-
nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1),
|
63 |
-
self.scratch.activation,
|
64 |
-
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
|
65 |
-
nn.ReLU(True) if non_negative else nn.Identity(),
|
66 |
-
nn.Identity(),
|
67 |
-
)
|
68 |
-
|
69 |
-
if path:
|
70 |
-
self.load(path)
|
71 |
-
|
72 |
-
|
73 |
-
def forward(self, x):
|
74 |
-
"""Forward pass.
|
75 |
-
|
76 |
-
Args:
|
77 |
-
x (tensor): input data (image)
|
78 |
-
|
79 |
-
Returns:
|
80 |
-
tensor: depth
|
81 |
-
"""
|
82 |
-
if self.channels_last==True:
|
83 |
-
print("self.channels_last = ", self.channels_last)
|
84 |
-
x.contiguous(memory_format=torch.channels_last)
|
85 |
-
|
86 |
-
|
87 |
-
layer_1 = self.pretrained.layer1(x)
|
88 |
-
layer_2 = self.pretrained.layer2(layer_1)
|
89 |
-
layer_3 = self.pretrained.layer3(layer_2)
|
90 |
-
layer_4 = self.pretrained.layer4(layer_3)
|
91 |
-
|
92 |
-
layer_1_rn = self.scratch.layer1_rn(layer_1)
|
93 |
-
layer_2_rn = self.scratch.layer2_rn(layer_2)
|
94 |
-
layer_3_rn = self.scratch.layer3_rn(layer_3)
|
95 |
-
layer_4_rn = self.scratch.layer4_rn(layer_4)
|
96 |
-
|
97 |
-
|
98 |
-
path_4 = self.scratch.refinenet4(layer_4_rn)
|
99 |
-
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
|
100 |
-
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
|
101 |
-
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
|
102 |
-
|
103 |
-
out = self.scratch.output_conv(path_1)
|
104 |
-
|
105 |
-
return torch.squeeze(out, dim=1)
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
def fuse_model(m):
|
110 |
-
prev_previous_type = nn.Identity()
|
111 |
-
prev_previous_name = ''
|
112 |
-
previous_type = nn.Identity()
|
113 |
-
previous_name = ''
|
114 |
-
for name, module in m.named_modules():
|
115 |
-
if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU:
|
116 |
-
# print("FUSED ", prev_previous_name, previous_name, name)
|
117 |
-
torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True)
|
118 |
-
elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d:
|
119 |
-
# print("FUSED ", prev_previous_name, previous_name)
|
120 |
-
torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True)
|
121 |
-
# elif previous_type == nn.Conv2d and type(module) == nn.ReLU:
|
122 |
-
# print("FUSED ", previous_name, name)
|
123 |
-
# torch.quantization.fuse_modules(m, [previous_name, name], inplace=True)
|
124 |
-
|
125 |
-
prev_previous_type = previous_type
|
126 |
-
prev_previous_name = previous_name
|
127 |
-
previous_type = type(module)
|
128 |
-
previous_name = name
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/modules/midas/midas/transforms.py
DELETED
@@ -1,234 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import cv2
|
3 |
-
import math
|
4 |
-
|
5 |
-
|
6 |
-
def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
|
7 |
-
"""Rezise the sample to ensure the given size. Keeps aspect ratio.
|
8 |
-
|
9 |
-
Args:
|
10 |
-
sample (dict): sample
|
11 |
-
size (tuple): image size
|
12 |
-
|
13 |
-
Returns:
|
14 |
-
tuple: new size
|
15 |
-
"""
|
16 |
-
shape = list(sample["disparity"].shape)
|
17 |
-
|
18 |
-
if shape[0] >= size[0] and shape[1] >= size[1]:
|
19 |
-
return sample
|
20 |
-
|
21 |
-
scale = [0, 0]
|
22 |
-
scale[0] = size[0] / shape[0]
|
23 |
-
scale[1] = size[1] / shape[1]
|
24 |
-
|
25 |
-
scale = max(scale)
|
26 |
-
|
27 |
-
shape[0] = math.ceil(scale * shape[0])
|
28 |
-
shape[1] = math.ceil(scale * shape[1])
|
29 |
-
|
30 |
-
# resize
|
31 |
-
sample["image"] = cv2.resize(
|
32 |
-
sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
|
33 |
-
)
|
34 |
-
|
35 |
-
sample["disparity"] = cv2.resize(
|
36 |
-
sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
|
37 |
-
)
|
38 |
-
sample["mask"] = cv2.resize(
|
39 |
-
sample["mask"].astype(np.float32),
|
40 |
-
tuple(shape[::-1]),
|
41 |
-
interpolation=cv2.INTER_NEAREST,
|
42 |
-
)
|
43 |
-
sample["mask"] = sample["mask"].astype(bool)
|
44 |
-
|
45 |
-
return tuple(shape)
|
46 |
-
|
47 |
-
|
48 |
-
class Resize(object):
|
49 |
-
"""Resize sample to given size (width, height).
|
50 |
-
"""
|
51 |
-
|
52 |
-
def __init__(
|
53 |
-
self,
|
54 |
-
width,
|
55 |
-
height,
|
56 |
-
resize_target=True,
|
57 |
-
keep_aspect_ratio=False,
|
58 |
-
ensure_multiple_of=1,
|
59 |
-
resize_method="lower_bound",
|
60 |
-
image_interpolation_method=cv2.INTER_AREA,
|
61 |
-
):
|
62 |
-
"""Init.
|
63 |
-
|
64 |
-
Args:
|
65 |
-
width (int): desired output width
|
66 |
-
height (int): desired output height
|
67 |
-
resize_target (bool, optional):
|
68 |
-
True: Resize the full sample (image, mask, target).
|
69 |
-
False: Resize image only.
|
70 |
-
Defaults to True.
|
71 |
-
keep_aspect_ratio (bool, optional):
|
72 |
-
True: Keep the aspect ratio of the input sample.
|
73 |
-
Output sample might not have the given width and height, and
|
74 |
-
resize behaviour depends on the parameter 'resize_method'.
|
75 |
-
Defaults to False.
|
76 |
-
ensure_multiple_of (int, optional):
|
77 |
-
Output width and height is constrained to be multiple of this parameter.
|
78 |
-
Defaults to 1.
|
79 |
-
resize_method (str, optional):
|
80 |
-
"lower_bound": Output will be at least as large as the given size.
|
81 |
-
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
|
82 |
-
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
|
83 |
-
Defaults to "lower_bound".
|
84 |
-
"""
|
85 |
-
self.__width = width
|
86 |
-
self.__height = height
|
87 |
-
|
88 |
-
self.__resize_target = resize_target
|
89 |
-
self.__keep_aspect_ratio = keep_aspect_ratio
|
90 |
-
self.__multiple_of = ensure_multiple_of
|
91 |
-
self.__resize_method = resize_method
|
92 |
-
self.__image_interpolation_method = image_interpolation_method
|
93 |
-
|
94 |
-
def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
|
95 |
-
y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
96 |
-
|
97 |
-
if max_val is not None and y > max_val:
|
98 |
-
y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
99 |
-
|
100 |
-
if y < min_val:
|
101 |
-
y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
102 |
-
|
103 |
-
return y
|
104 |
-
|
105 |
-
def get_size(self, width, height):
|
106 |
-
# determine new height and width
|
107 |
-
scale_height = self.__height / height
|
108 |
-
scale_width = self.__width / width
|
109 |
-
|
110 |
-
if self.__keep_aspect_ratio:
|
111 |
-
if self.__resize_method == "lower_bound":
|
112 |
-
# scale such that output size is lower bound
|
113 |
-
if scale_width > scale_height:
|
114 |
-
# fit width
|
115 |
-
scale_height = scale_width
|
116 |
-
else:
|
117 |
-
# fit height
|
118 |
-
scale_width = scale_height
|
119 |
-
elif self.__resize_method == "upper_bound":
|
120 |
-
# scale such that output size is upper bound
|
121 |
-
if scale_width < scale_height:
|
122 |
-
# fit width
|
123 |
-
scale_height = scale_width
|
124 |
-
else:
|
125 |
-
# fit height
|
126 |
-
scale_width = scale_height
|
127 |
-
elif self.__resize_method == "minimal":
|
128 |
-
# scale as least as possbile
|
129 |
-
if abs(1 - scale_width) < abs(1 - scale_height):
|
130 |
-
# fit width
|
131 |
-
scale_height = scale_width
|
132 |
-
else:
|
133 |
-
# fit height
|
134 |
-
scale_width = scale_height
|
135 |
-
else:
|
136 |
-
raise ValueError(
|
137 |
-
f"resize_method {self.__resize_method} not implemented"
|
138 |
-
)
|
139 |
-
|
140 |
-
if self.__resize_method == "lower_bound":
|
141 |
-
new_height = self.constrain_to_multiple_of(
|
142 |
-
scale_height * height, min_val=self.__height
|
143 |
-
)
|
144 |
-
new_width = self.constrain_to_multiple_of(
|
145 |
-
scale_width * width, min_val=self.__width
|
146 |
-
)
|
147 |
-
elif self.__resize_method == "upper_bound":
|
148 |
-
new_height = self.constrain_to_multiple_of(
|
149 |
-
scale_height * height, max_val=self.__height
|
150 |
-
)
|
151 |
-
new_width = self.constrain_to_multiple_of(
|
152 |
-
scale_width * width, max_val=self.__width
|
153 |
-
)
|
154 |
-
elif self.__resize_method == "minimal":
|
155 |
-
new_height = self.constrain_to_multiple_of(scale_height * height)
|
156 |
-
new_width = self.constrain_to_multiple_of(scale_width * width)
|
157 |
-
else:
|
158 |
-
raise ValueError(f"resize_method {self.__resize_method} not implemented")
|
159 |
-
|
160 |
-
return (new_width, new_height)
|
161 |
-
|
162 |
-
def __call__(self, sample):
|
163 |
-
width, height = self.get_size(
|
164 |
-
sample["image"].shape[1], sample["image"].shape[0]
|
165 |
-
)
|
166 |
-
|
167 |
-
# resize sample
|
168 |
-
sample["image"] = cv2.resize(
|
169 |
-
sample["image"],
|
170 |
-
(width, height),
|
171 |
-
interpolation=self.__image_interpolation_method,
|
172 |
-
)
|
173 |
-
|
174 |
-
if self.__resize_target:
|
175 |
-
if "disparity" in sample:
|
176 |
-
sample["disparity"] = cv2.resize(
|
177 |
-
sample["disparity"],
|
178 |
-
(width, height),
|
179 |
-
interpolation=cv2.INTER_NEAREST,
|
180 |
-
)
|
181 |
-
|
182 |
-
if "depth" in sample:
|
183 |
-
sample["depth"] = cv2.resize(
|
184 |
-
sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST
|
185 |
-
)
|
186 |
-
|
187 |
-
sample["mask"] = cv2.resize(
|
188 |
-
sample["mask"].astype(np.float32),
|
189 |
-
(width, height),
|
190 |
-
interpolation=cv2.INTER_NEAREST,
|
191 |
-
)
|
192 |
-
sample["mask"] = sample["mask"].astype(bool)
|
193 |
-
|
194 |
-
return sample
|
195 |
-
|
196 |
-
|
197 |
-
class NormalizeImage(object):
|
198 |
-
"""Normlize image by given mean and std.
|
199 |
-
"""
|
200 |
-
|
201 |
-
def __init__(self, mean, std):
|
202 |
-
self.__mean = mean
|
203 |
-
self.__std = std
|
204 |
-
|
205 |
-
def __call__(self, sample):
|
206 |
-
sample["image"] = (sample["image"] - self.__mean) / self.__std
|
207 |
-
|
208 |
-
return sample
|
209 |
-
|
210 |
-
|
211 |
-
class PrepareForNet(object):
|
212 |
-
"""Prepare sample for usage as network input.
|
213 |
-
"""
|
214 |
-
|
215 |
-
def __init__(self):
|
216 |
-
pass
|
217 |
-
|
218 |
-
def __call__(self, sample):
|
219 |
-
image = np.transpose(sample["image"], (2, 0, 1))
|
220 |
-
sample["image"] = np.ascontiguousarray(image).astype(np.float32)
|
221 |
-
|
222 |
-
if "mask" in sample:
|
223 |
-
sample["mask"] = sample["mask"].astype(np.float32)
|
224 |
-
sample["mask"] = np.ascontiguousarray(sample["mask"])
|
225 |
-
|
226 |
-
if "disparity" in sample:
|
227 |
-
disparity = sample["disparity"].astype(np.float32)
|
228 |
-
sample["disparity"] = np.ascontiguousarray(disparity)
|
229 |
-
|
230 |
-
if "depth" in sample:
|
231 |
-
depth = sample["depth"].astype(np.float32)
|
232 |
-
sample["depth"] = np.ascontiguousarray(depth)
|
233 |
-
|
234 |
-
return sample
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/modules/midas/midas/vit.py
DELETED
@@ -1,491 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import timm
|
4 |
-
import types
|
5 |
-
import math
|
6 |
-
import torch.nn.functional as F
|
7 |
-
|
8 |
-
|
9 |
-
class Slice(nn.Module):
|
10 |
-
def __init__(self, start_index=1):
|
11 |
-
super(Slice, self).__init__()
|
12 |
-
self.start_index = start_index
|
13 |
-
|
14 |
-
def forward(self, x):
|
15 |
-
return x[:, self.start_index :]
|
16 |
-
|
17 |
-
|
18 |
-
class AddReadout(nn.Module):
|
19 |
-
def __init__(self, start_index=1):
|
20 |
-
super(AddReadout, self).__init__()
|
21 |
-
self.start_index = start_index
|
22 |
-
|
23 |
-
def forward(self, x):
|
24 |
-
if self.start_index == 2:
|
25 |
-
readout = (x[:, 0] + x[:, 1]) / 2
|
26 |
-
else:
|
27 |
-
readout = x[:, 0]
|
28 |
-
return x[:, self.start_index :] + readout.unsqueeze(1)
|
29 |
-
|
30 |
-
|
31 |
-
class ProjectReadout(nn.Module):
|
32 |
-
def __init__(self, in_features, start_index=1):
|
33 |
-
super(ProjectReadout, self).__init__()
|
34 |
-
self.start_index = start_index
|
35 |
-
|
36 |
-
self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU())
|
37 |
-
|
38 |
-
def forward(self, x):
|
39 |
-
readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index :])
|
40 |
-
features = torch.cat((x[:, self.start_index :], readout), -1)
|
41 |
-
|
42 |
-
return self.project(features)
|
43 |
-
|
44 |
-
|
45 |
-
class Transpose(nn.Module):
|
46 |
-
def __init__(self, dim0, dim1):
|
47 |
-
super(Transpose, self).__init__()
|
48 |
-
self.dim0 = dim0
|
49 |
-
self.dim1 = dim1
|
50 |
-
|
51 |
-
def forward(self, x):
|
52 |
-
x = x.transpose(self.dim0, self.dim1)
|
53 |
-
return x
|
54 |
-
|
55 |
-
|
56 |
-
def forward_vit(pretrained, x):
|
57 |
-
b, c, h, w = x.shape
|
58 |
-
|
59 |
-
glob = pretrained.model.forward_flex(x)
|
60 |
-
|
61 |
-
layer_1 = pretrained.activations["1"]
|
62 |
-
layer_2 = pretrained.activations["2"]
|
63 |
-
layer_3 = pretrained.activations["3"]
|
64 |
-
layer_4 = pretrained.activations["4"]
|
65 |
-
|
66 |
-
layer_1 = pretrained.act_postprocess1[0:2](layer_1)
|
67 |
-
layer_2 = pretrained.act_postprocess2[0:2](layer_2)
|
68 |
-
layer_3 = pretrained.act_postprocess3[0:2](layer_3)
|
69 |
-
layer_4 = pretrained.act_postprocess4[0:2](layer_4)
|
70 |
-
|
71 |
-
unflatten = nn.Sequential(
|
72 |
-
nn.Unflatten(
|
73 |
-
2,
|
74 |
-
torch.Size(
|
75 |
-
[
|
76 |
-
h // pretrained.model.patch_size[1],
|
77 |
-
w // pretrained.model.patch_size[0],
|
78 |
-
]
|
79 |
-
),
|
80 |
-
)
|
81 |
-
)
|
82 |
-
|
83 |
-
if layer_1.ndim == 3:
|
84 |
-
layer_1 = unflatten(layer_1)
|
85 |
-
if layer_2.ndim == 3:
|
86 |
-
layer_2 = unflatten(layer_2)
|
87 |
-
if layer_3.ndim == 3:
|
88 |
-
layer_3 = unflatten(layer_3)
|
89 |
-
if layer_4.ndim == 3:
|
90 |
-
layer_4 = unflatten(layer_4)
|
91 |
-
|
92 |
-
layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1)
|
93 |
-
layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2)
|
94 |
-
layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3)
|
95 |
-
layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4)
|
96 |
-
|
97 |
-
return layer_1, layer_2, layer_3, layer_4
|
98 |
-
|
99 |
-
|
100 |
-
def _resize_pos_embed(self, posemb, gs_h, gs_w):
|
101 |
-
posemb_tok, posemb_grid = (
|
102 |
-
posemb[:, : self.start_index],
|
103 |
-
posemb[0, self.start_index :],
|
104 |
-
)
|
105 |
-
|
106 |
-
gs_old = int(math.sqrt(len(posemb_grid)))
|
107 |
-
|
108 |
-
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
|
109 |
-
posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear")
|
110 |
-
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1)
|
111 |
-
|
112 |
-
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
|
113 |
-
|
114 |
-
return posemb
|
115 |
-
|
116 |
-
|
117 |
-
def forward_flex(self, x):
|
118 |
-
b, c, h, w = x.shape
|
119 |
-
|
120 |
-
pos_embed = self._resize_pos_embed(
|
121 |
-
self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]
|
122 |
-
)
|
123 |
-
|
124 |
-
B = x.shape[0]
|
125 |
-
|
126 |
-
if hasattr(self.patch_embed, "backbone"):
|
127 |
-
x = self.patch_embed.backbone(x)
|
128 |
-
if isinstance(x, (list, tuple)):
|
129 |
-
x = x[-1] # last feature if backbone outputs list/tuple of features
|
130 |
-
|
131 |
-
x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
|
132 |
-
|
133 |
-
if getattr(self, "dist_token", None) is not None:
|
134 |
-
cls_tokens = self.cls_token.expand(
|
135 |
-
B, -1, -1
|
136 |
-
) # stole cls_tokens impl from Phil Wang, thanks
|
137 |
-
dist_token = self.dist_token.expand(B, -1, -1)
|
138 |
-
x = torch.cat((cls_tokens, dist_token, x), dim=1)
|
139 |
-
else:
|
140 |
-
cls_tokens = self.cls_token.expand(
|
141 |
-
B, -1, -1
|
142 |
-
) # stole cls_tokens impl from Phil Wang, thanks
|
143 |
-
x = torch.cat((cls_tokens, x), dim=1)
|
144 |
-
|
145 |
-
x = x + pos_embed
|
146 |
-
x = self.pos_drop(x)
|
147 |
-
|
148 |
-
for blk in self.blocks:
|
149 |
-
x = blk(x)
|
150 |
-
|
151 |
-
x = self.norm(x)
|
152 |
-
|
153 |
-
return x
|
154 |
-
|
155 |
-
|
156 |
-
activations = {}
|
157 |
-
|
158 |
-
|
159 |
-
def get_activation(name):
|
160 |
-
def hook(model, input, output):
|
161 |
-
activations[name] = output
|
162 |
-
|
163 |
-
return hook
|
164 |
-
|
165 |
-
|
166 |
-
def get_readout_oper(vit_features, features, use_readout, start_index=1):
|
167 |
-
if use_readout == "ignore":
|
168 |
-
readout_oper = [Slice(start_index)] * len(features)
|
169 |
-
elif use_readout == "add":
|
170 |
-
readout_oper = [AddReadout(start_index)] * len(features)
|
171 |
-
elif use_readout == "project":
|
172 |
-
readout_oper = [
|
173 |
-
ProjectReadout(vit_features, start_index) for out_feat in features
|
174 |
-
]
|
175 |
-
else:
|
176 |
-
assert (
|
177 |
-
False
|
178 |
-
), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
|
179 |
-
|
180 |
-
return readout_oper
|
181 |
-
|
182 |
-
|
183 |
-
def _make_vit_b16_backbone(
|
184 |
-
model,
|
185 |
-
features=[96, 192, 384, 768],
|
186 |
-
size=[384, 384],
|
187 |
-
hooks=[2, 5, 8, 11],
|
188 |
-
vit_features=768,
|
189 |
-
use_readout="ignore",
|
190 |
-
start_index=1,
|
191 |
-
):
|
192 |
-
pretrained = nn.Module()
|
193 |
-
|
194 |
-
pretrained.model = model
|
195 |
-
pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
|
196 |
-
pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
|
197 |
-
pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
|
198 |
-
pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
|
199 |
-
|
200 |
-
pretrained.activations = activations
|
201 |
-
|
202 |
-
readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
|
203 |
-
|
204 |
-
# 32, 48, 136, 384
|
205 |
-
pretrained.act_postprocess1 = nn.Sequential(
|
206 |
-
readout_oper[0],
|
207 |
-
Transpose(1, 2),
|
208 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
209 |
-
nn.Conv2d(
|
210 |
-
in_channels=vit_features,
|
211 |
-
out_channels=features[0],
|
212 |
-
kernel_size=1,
|
213 |
-
stride=1,
|
214 |
-
padding=0,
|
215 |
-
),
|
216 |
-
nn.ConvTranspose2d(
|
217 |
-
in_channels=features[0],
|
218 |
-
out_channels=features[0],
|
219 |
-
kernel_size=4,
|
220 |
-
stride=4,
|
221 |
-
padding=0,
|
222 |
-
bias=True,
|
223 |
-
dilation=1,
|
224 |
-
groups=1,
|
225 |
-
),
|
226 |
-
)
|
227 |
-
|
228 |
-
pretrained.act_postprocess2 = nn.Sequential(
|
229 |
-
readout_oper[1],
|
230 |
-
Transpose(1, 2),
|
231 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
232 |
-
nn.Conv2d(
|
233 |
-
in_channels=vit_features,
|
234 |
-
out_channels=features[1],
|
235 |
-
kernel_size=1,
|
236 |
-
stride=1,
|
237 |
-
padding=0,
|
238 |
-
),
|
239 |
-
nn.ConvTranspose2d(
|
240 |
-
in_channels=features[1],
|
241 |
-
out_channels=features[1],
|
242 |
-
kernel_size=2,
|
243 |
-
stride=2,
|
244 |
-
padding=0,
|
245 |
-
bias=True,
|
246 |
-
dilation=1,
|
247 |
-
groups=1,
|
248 |
-
),
|
249 |
-
)
|
250 |
-
|
251 |
-
pretrained.act_postprocess3 = nn.Sequential(
|
252 |
-
readout_oper[2],
|
253 |
-
Transpose(1, 2),
|
254 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
255 |
-
nn.Conv2d(
|
256 |
-
in_channels=vit_features,
|
257 |
-
out_channels=features[2],
|
258 |
-
kernel_size=1,
|
259 |
-
stride=1,
|
260 |
-
padding=0,
|
261 |
-
),
|
262 |
-
)
|
263 |
-
|
264 |
-
pretrained.act_postprocess4 = nn.Sequential(
|
265 |
-
readout_oper[3],
|
266 |
-
Transpose(1, 2),
|
267 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
268 |
-
nn.Conv2d(
|
269 |
-
in_channels=vit_features,
|
270 |
-
out_channels=features[3],
|
271 |
-
kernel_size=1,
|
272 |
-
stride=1,
|
273 |
-
padding=0,
|
274 |
-
),
|
275 |
-
nn.Conv2d(
|
276 |
-
in_channels=features[3],
|
277 |
-
out_channels=features[3],
|
278 |
-
kernel_size=3,
|
279 |
-
stride=2,
|
280 |
-
padding=1,
|
281 |
-
),
|
282 |
-
)
|
283 |
-
|
284 |
-
pretrained.model.start_index = start_index
|
285 |
-
pretrained.model.patch_size = [16, 16]
|
286 |
-
|
287 |
-
# We inject this function into the VisionTransformer instances so that
|
288 |
-
# we can use it with interpolated position embeddings without modifying the library source.
|
289 |
-
pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
|
290 |
-
pretrained.model._resize_pos_embed = types.MethodType(
|
291 |
-
_resize_pos_embed, pretrained.model
|
292 |
-
)
|
293 |
-
|
294 |
-
return pretrained
|
295 |
-
|
296 |
-
|
297 |
-
def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None):
|
298 |
-
model = timm.create_model("vit_large_patch16_384", pretrained=pretrained)
|
299 |
-
|
300 |
-
hooks = [5, 11, 17, 23] if hooks == None else hooks
|
301 |
-
return _make_vit_b16_backbone(
|
302 |
-
model,
|
303 |
-
features=[256, 512, 1024, 1024],
|
304 |
-
hooks=hooks,
|
305 |
-
vit_features=1024,
|
306 |
-
use_readout=use_readout,
|
307 |
-
)
|
308 |
-
|
309 |
-
|
310 |
-
def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None):
|
311 |
-
model = timm.create_model("vit_base_patch16_384", pretrained=pretrained)
|
312 |
-
|
313 |
-
hooks = [2, 5, 8, 11] if hooks == None else hooks
|
314 |
-
return _make_vit_b16_backbone(
|
315 |
-
model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
|
316 |
-
)
|
317 |
-
|
318 |
-
|
319 |
-
def _make_pretrained_deitb16_384(pretrained, use_readout="ignore", hooks=None):
|
320 |
-
model = timm.create_model("vit_deit_base_patch16_384", pretrained=pretrained)
|
321 |
-
|
322 |
-
hooks = [2, 5, 8, 11] if hooks == None else hooks
|
323 |
-
return _make_vit_b16_backbone(
|
324 |
-
model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
|
325 |
-
)
|
326 |
-
|
327 |
-
|
328 |
-
def _make_pretrained_deitb16_distil_384(pretrained, use_readout="ignore", hooks=None):
|
329 |
-
model = timm.create_model(
|
330 |
-
"vit_deit_base_distilled_patch16_384", pretrained=pretrained
|
331 |
-
)
|
332 |
-
|
333 |
-
hooks = [2, 5, 8, 11] if hooks == None else hooks
|
334 |
-
return _make_vit_b16_backbone(
|
335 |
-
model,
|
336 |
-
features=[96, 192, 384, 768],
|
337 |
-
hooks=hooks,
|
338 |
-
use_readout=use_readout,
|
339 |
-
start_index=2,
|
340 |
-
)
|
341 |
-
|
342 |
-
|
343 |
-
def _make_vit_b_rn50_backbone(
|
344 |
-
model,
|
345 |
-
features=[256, 512, 768, 768],
|
346 |
-
size=[384, 384],
|
347 |
-
hooks=[0, 1, 8, 11],
|
348 |
-
vit_features=768,
|
349 |
-
use_vit_only=False,
|
350 |
-
use_readout="ignore",
|
351 |
-
start_index=1,
|
352 |
-
):
|
353 |
-
pretrained = nn.Module()
|
354 |
-
|
355 |
-
pretrained.model = model
|
356 |
-
|
357 |
-
if use_vit_only == True:
|
358 |
-
pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
|
359 |
-
pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
|
360 |
-
else:
|
361 |
-
pretrained.model.patch_embed.backbone.stages[0].register_forward_hook(
|
362 |
-
get_activation("1")
|
363 |
-
)
|
364 |
-
pretrained.model.patch_embed.backbone.stages[1].register_forward_hook(
|
365 |
-
get_activation("2")
|
366 |
-
)
|
367 |
-
|
368 |
-
pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
|
369 |
-
pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
|
370 |
-
|
371 |
-
pretrained.activations = activations
|
372 |
-
|
373 |
-
readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
|
374 |
-
|
375 |
-
if use_vit_only == True:
|
376 |
-
pretrained.act_postprocess1 = nn.Sequential(
|
377 |
-
readout_oper[0],
|
378 |
-
Transpose(1, 2),
|
379 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
380 |
-
nn.Conv2d(
|
381 |
-
in_channels=vit_features,
|
382 |
-
out_channels=features[0],
|
383 |
-
kernel_size=1,
|
384 |
-
stride=1,
|
385 |
-
padding=0,
|
386 |
-
),
|
387 |
-
nn.ConvTranspose2d(
|
388 |
-
in_channels=features[0],
|
389 |
-
out_channels=features[0],
|
390 |
-
kernel_size=4,
|
391 |
-
stride=4,
|
392 |
-
padding=0,
|
393 |
-
bias=True,
|
394 |
-
dilation=1,
|
395 |
-
groups=1,
|
396 |
-
),
|
397 |
-
)
|
398 |
-
|
399 |
-
pretrained.act_postprocess2 = nn.Sequential(
|
400 |
-
readout_oper[1],
|
401 |
-
Transpose(1, 2),
|
402 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
403 |
-
nn.Conv2d(
|
404 |
-
in_channels=vit_features,
|
405 |
-
out_channels=features[1],
|
406 |
-
kernel_size=1,
|
407 |
-
stride=1,
|
408 |
-
padding=0,
|
409 |
-
),
|
410 |
-
nn.ConvTranspose2d(
|
411 |
-
in_channels=features[1],
|
412 |
-
out_channels=features[1],
|
413 |
-
kernel_size=2,
|
414 |
-
stride=2,
|
415 |
-
padding=0,
|
416 |
-
bias=True,
|
417 |
-
dilation=1,
|
418 |
-
groups=1,
|
419 |
-
),
|
420 |
-
)
|
421 |
-
else:
|
422 |
-
pretrained.act_postprocess1 = nn.Sequential(
|
423 |
-
nn.Identity(), nn.Identity(), nn.Identity()
|
424 |
-
)
|
425 |
-
pretrained.act_postprocess2 = nn.Sequential(
|
426 |
-
nn.Identity(), nn.Identity(), nn.Identity()
|
427 |
-
)
|
428 |
-
|
429 |
-
pretrained.act_postprocess3 = nn.Sequential(
|
430 |
-
readout_oper[2],
|
431 |
-
Transpose(1, 2),
|
432 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
433 |
-
nn.Conv2d(
|
434 |
-
in_channels=vit_features,
|
435 |
-
out_channels=features[2],
|
436 |
-
kernel_size=1,
|
437 |
-
stride=1,
|
438 |
-
padding=0,
|
439 |
-
),
|
440 |
-
)
|
441 |
-
|
442 |
-
pretrained.act_postprocess4 = nn.Sequential(
|
443 |
-
readout_oper[3],
|
444 |
-
Transpose(1, 2),
|
445 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
446 |
-
nn.Conv2d(
|
447 |
-
in_channels=vit_features,
|
448 |
-
out_channels=features[3],
|
449 |
-
kernel_size=1,
|
450 |
-
stride=1,
|
451 |
-
padding=0,
|
452 |
-
),
|
453 |
-
nn.Conv2d(
|
454 |
-
in_channels=features[3],
|
455 |
-
out_channels=features[3],
|
456 |
-
kernel_size=3,
|
457 |
-
stride=2,
|
458 |
-
padding=1,
|
459 |
-
),
|
460 |
-
)
|
461 |
-
|
462 |
-
pretrained.model.start_index = start_index
|
463 |
-
pretrained.model.patch_size = [16, 16]
|
464 |
-
|
465 |
-
# We inject this function into the VisionTransformer instances so that
|
466 |
-
# we can use it with interpolated position embeddings without modifying the library source.
|
467 |
-
pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
|
468 |
-
|
469 |
-
# We inject this function into the VisionTransformer instances so that
|
470 |
-
# we can use it with interpolated position embeddings without modifying the library source.
|
471 |
-
pretrained.model._resize_pos_embed = types.MethodType(
|
472 |
-
_resize_pos_embed, pretrained.model
|
473 |
-
)
|
474 |
-
|
475 |
-
return pretrained
|
476 |
-
|
477 |
-
|
478 |
-
def _make_pretrained_vitb_rn50_384(
|
479 |
-
pretrained, use_readout="ignore", hooks=None, use_vit_only=False
|
480 |
-
):
|
481 |
-
model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained)
|
482 |
-
|
483 |
-
hooks = [0, 1, 8, 11] if hooks == None else hooks
|
484 |
-
return _make_vit_b_rn50_backbone(
|
485 |
-
model,
|
486 |
-
features=[256, 512, 768, 768],
|
487 |
-
size=[384, 384],
|
488 |
-
hooks=hooks,
|
489 |
-
use_vit_only=use_vit_only,
|
490 |
-
use_readout=use_readout,
|
491 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/modules/midas/utils.py
DELETED
@@ -1,189 +0,0 @@
|
|
1 |
-
"""Utils for monoDepth."""
|
2 |
-
import sys
|
3 |
-
import re
|
4 |
-
import numpy as np
|
5 |
-
import cv2
|
6 |
-
import torch
|
7 |
-
|
8 |
-
|
9 |
-
def read_pfm(path):
|
10 |
-
"""Read pfm file.
|
11 |
-
|
12 |
-
Args:
|
13 |
-
path (str): path to file
|
14 |
-
|
15 |
-
Returns:
|
16 |
-
tuple: (data, scale)
|
17 |
-
"""
|
18 |
-
with open(path, "rb") as file:
|
19 |
-
|
20 |
-
color = None
|
21 |
-
width = None
|
22 |
-
height = None
|
23 |
-
scale = None
|
24 |
-
endian = None
|
25 |
-
|
26 |
-
header = file.readline().rstrip()
|
27 |
-
if header.decode("ascii") == "PF":
|
28 |
-
color = True
|
29 |
-
elif header.decode("ascii") == "Pf":
|
30 |
-
color = False
|
31 |
-
else:
|
32 |
-
raise Exception("Not a PFM file: " + path)
|
33 |
-
|
34 |
-
dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii"))
|
35 |
-
if dim_match:
|
36 |
-
width, height = list(map(int, dim_match.groups()))
|
37 |
-
else:
|
38 |
-
raise Exception("Malformed PFM header.")
|
39 |
-
|
40 |
-
scale = float(file.readline().decode("ascii").rstrip())
|
41 |
-
if scale < 0:
|
42 |
-
# little-endian
|
43 |
-
endian = "<"
|
44 |
-
scale = -scale
|
45 |
-
else:
|
46 |
-
# big-endian
|
47 |
-
endian = ">"
|
48 |
-
|
49 |
-
data = np.fromfile(file, endian + "f")
|
50 |
-
shape = (height, width, 3) if color else (height, width)
|
51 |
-
|
52 |
-
data = np.reshape(data, shape)
|
53 |
-
data = np.flipud(data)
|
54 |
-
|
55 |
-
return data, scale
|
56 |
-
|
57 |
-
|
58 |
-
def write_pfm(path, image, scale=1):
|
59 |
-
"""Write pfm file.
|
60 |
-
|
61 |
-
Args:
|
62 |
-
path (str): pathto file
|
63 |
-
image (array): data
|
64 |
-
scale (int, optional): Scale. Defaults to 1.
|
65 |
-
"""
|
66 |
-
|
67 |
-
with open(path, "wb") as file:
|
68 |
-
color = None
|
69 |
-
|
70 |
-
if image.dtype.name != "float32":
|
71 |
-
raise Exception("Image dtype must be float32.")
|
72 |
-
|
73 |
-
image = np.flipud(image)
|
74 |
-
|
75 |
-
if len(image.shape) == 3 and image.shape[2] == 3: # color image
|
76 |
-
color = True
|
77 |
-
elif (
|
78 |
-
len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
|
79 |
-
): # greyscale
|
80 |
-
color = False
|
81 |
-
else:
|
82 |
-
raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
|
83 |
-
|
84 |
-
file.write("PF\n" if color else "Pf\n".encode())
|
85 |
-
file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
|
86 |
-
|
87 |
-
endian = image.dtype.byteorder
|
88 |
-
|
89 |
-
if endian == "<" or endian == "=" and sys.byteorder == "little":
|
90 |
-
scale = -scale
|
91 |
-
|
92 |
-
file.write("%f\n".encode() % scale)
|
93 |
-
|
94 |
-
image.tofile(file)
|
95 |
-
|
96 |
-
|
97 |
-
def read_image(path):
|
98 |
-
"""Read image and output RGB image (0-1).
|
99 |
-
|
100 |
-
Args:
|
101 |
-
path (str): path to file
|
102 |
-
|
103 |
-
Returns:
|
104 |
-
array: RGB image (0-1)
|
105 |
-
"""
|
106 |
-
img = cv2.imread(path)
|
107 |
-
|
108 |
-
if img.ndim == 2:
|
109 |
-
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
110 |
-
|
111 |
-
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
|
112 |
-
|
113 |
-
return img
|
114 |
-
|
115 |
-
|
116 |
-
def resize_image(img):
|
117 |
-
"""Resize image and make it fit for network.
|
118 |
-
|
119 |
-
Args:
|
120 |
-
img (array): image
|
121 |
-
|
122 |
-
Returns:
|
123 |
-
tensor: data ready for network
|
124 |
-
"""
|
125 |
-
height_orig = img.shape[0]
|
126 |
-
width_orig = img.shape[1]
|
127 |
-
|
128 |
-
if width_orig > height_orig:
|
129 |
-
scale = width_orig / 384
|
130 |
-
else:
|
131 |
-
scale = height_orig / 384
|
132 |
-
|
133 |
-
height = (np.ceil(height_orig / scale / 32) * 32).astype(int)
|
134 |
-
width = (np.ceil(width_orig / scale / 32) * 32).astype(int)
|
135 |
-
|
136 |
-
img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
|
137 |
-
|
138 |
-
img_resized = (
|
139 |
-
torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float()
|
140 |
-
)
|
141 |
-
img_resized = img_resized.unsqueeze(0)
|
142 |
-
|
143 |
-
return img_resized
|
144 |
-
|
145 |
-
|
146 |
-
def resize_depth(depth, width, height):
|
147 |
-
"""Resize depth map and bring to CPU (numpy).
|
148 |
-
|
149 |
-
Args:
|
150 |
-
depth (tensor): depth
|
151 |
-
width (int): image width
|
152 |
-
height (int): image height
|
153 |
-
|
154 |
-
Returns:
|
155 |
-
array: processed depth
|
156 |
-
"""
|
157 |
-
depth = torch.squeeze(depth[0, :, :, :]).to("cpu")
|
158 |
-
|
159 |
-
depth_resized = cv2.resize(
|
160 |
-
depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC
|
161 |
-
)
|
162 |
-
|
163 |
-
return depth_resized
|
164 |
-
|
165 |
-
def write_depth(path, depth, bits=1):
|
166 |
-
"""Write depth map to pfm and png file.
|
167 |
-
|
168 |
-
Args:
|
169 |
-
path (str): filepath without extension
|
170 |
-
depth (array): depth
|
171 |
-
"""
|
172 |
-
write_pfm(path + ".pfm", depth.astype(np.float32))
|
173 |
-
|
174 |
-
depth_min = depth.min()
|
175 |
-
depth_max = depth.max()
|
176 |
-
|
177 |
-
max_val = (2**(8*bits))-1
|
178 |
-
|
179 |
-
if depth_max - depth_min > np.finfo("float").eps:
|
180 |
-
out = max_val * (depth - depth_min) / (depth_max - depth_min)
|
181 |
-
else:
|
182 |
-
out = np.zeros(depth.shape, dtype=depth.type)
|
183 |
-
|
184 |
-
if bits == 1:
|
185 |
-
cv2.imwrite(path + ".png", out.astype("uint8"))
|
186 |
-
elif bits == 2:
|
187 |
-
cv2.imwrite(path + ".png", out.astype("uint16"))
|
188 |
-
|
189 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ldm/util.py
DELETED
@@ -1,197 +0,0 @@
|
|
1 |
-
import importlib
|
2 |
-
|
3 |
-
import torch
|
4 |
-
from torch import optim
|
5 |
-
import numpy as np
|
6 |
-
|
7 |
-
from inspect import isfunction
|
8 |
-
from PIL import Image, ImageDraw, ImageFont
|
9 |
-
|
10 |
-
|
11 |
-
def log_txt_as_img(wh, xc, size=10):
|
12 |
-
# wh a tuple of (width, height)
|
13 |
-
# xc a list of captions to plot
|
14 |
-
b = len(xc)
|
15 |
-
txts = list()
|
16 |
-
for bi in range(b):
|
17 |
-
txt = Image.new("RGB", wh, color="white")
|
18 |
-
draw = ImageDraw.Draw(txt)
|
19 |
-
font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)
|
20 |
-
nc = int(40 * (wh[0] / 256))
|
21 |
-
lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))
|
22 |
-
|
23 |
-
try:
|
24 |
-
draw.text((0, 0), lines, fill="black", font=font)
|
25 |
-
except UnicodeEncodeError:
|
26 |
-
print("Cant encode string for logging. Skipping.")
|
27 |
-
|
28 |
-
txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0
|
29 |
-
txts.append(txt)
|
30 |
-
txts = np.stack(txts)
|
31 |
-
txts = torch.tensor(txts)
|
32 |
-
return txts
|
33 |
-
|
34 |
-
|
35 |
-
def ismap(x):
|
36 |
-
if not isinstance(x, torch.Tensor):
|
37 |
-
return False
|
38 |
-
return (len(x.shape) == 4) and (x.shape[1] > 3)
|
39 |
-
|
40 |
-
|
41 |
-
def isimage(x):
|
42 |
-
if not isinstance(x,torch.Tensor):
|
43 |
-
return False
|
44 |
-
return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)
|
45 |
-
|
46 |
-
|
47 |
-
def exists(x):
|
48 |
-
return x is not None
|
49 |
-
|
50 |
-
|
51 |
-
def default(val, d):
|
52 |
-
if exists(val):
|
53 |
-
return val
|
54 |
-
return d() if isfunction(d) else d
|
55 |
-
|
56 |
-
|
57 |
-
def mean_flat(tensor):
|
58 |
-
"""
|
59 |
-
https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86
|
60 |
-
Take the mean over all non-batch dimensions.
|
61 |
-
"""
|
62 |
-
return tensor.mean(dim=list(range(1, len(tensor.shape))))
|
63 |
-
|
64 |
-
|
65 |
-
def count_params(model, verbose=False):
|
66 |
-
total_params = sum(p.numel() for p in model.parameters())
|
67 |
-
if verbose:
|
68 |
-
print(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.")
|
69 |
-
return total_params
|
70 |
-
|
71 |
-
|
72 |
-
def instantiate_from_config(config):
|
73 |
-
if not "target" in config:
|
74 |
-
if config == '__is_first_stage__':
|
75 |
-
return None
|
76 |
-
elif config == "__is_unconditional__":
|
77 |
-
return None
|
78 |
-
raise KeyError("Expected key `target` to instantiate.")
|
79 |
-
return get_obj_from_str(config["target"])(**config.get("params", dict()))
|
80 |
-
|
81 |
-
|
82 |
-
def get_obj_from_str(string, reload=False):
|
83 |
-
module, cls = string.rsplit(".", 1)
|
84 |
-
if reload:
|
85 |
-
module_imp = importlib.import_module(module)
|
86 |
-
importlib.reload(module_imp)
|
87 |
-
return getattr(importlib.import_module(module, package=None), cls)
|
88 |
-
|
89 |
-
|
90 |
-
class AdamWwithEMAandWings(optim.Optimizer):
|
91 |
-
# credit to https://gist.github.com/crowsonkb/65f7265353f403714fce3b2595e0b298
|
92 |
-
def __init__(self, params, lr=1.e-3, betas=(0.9, 0.999), eps=1.e-8, # TODO: check hyperparameters before using
|
93 |
-
weight_decay=1.e-2, amsgrad=False, ema_decay=0.9999, # ema decay to match previous code
|
94 |
-
ema_power=1., param_names=()):
|
95 |
-
"""AdamW that saves EMA versions of the parameters."""
|
96 |
-
if not 0.0 <= lr:
|
97 |
-
raise ValueError("Invalid learning rate: {}".format(lr))
|
98 |
-
if not 0.0 <= eps:
|
99 |
-
raise ValueError("Invalid epsilon value: {}".format(eps))
|
100 |
-
if not 0.0 <= betas[0] < 1.0:
|
101 |
-
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
|
102 |
-
if not 0.0 <= betas[1] < 1.0:
|
103 |
-
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
|
104 |
-
if not 0.0 <= weight_decay:
|
105 |
-
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
|
106 |
-
if not 0.0 <= ema_decay <= 1.0:
|
107 |
-
raise ValueError("Invalid ema_decay value: {}".format(ema_decay))
|
108 |
-
defaults = dict(lr=lr, betas=betas, eps=eps,
|
109 |
-
weight_decay=weight_decay, amsgrad=amsgrad, ema_decay=ema_decay,
|
110 |
-
ema_power=ema_power, param_names=param_names)
|
111 |
-
super().__init__(params, defaults)
|
112 |
-
|
113 |
-
def __setstate__(self, state):
|
114 |
-
super().__setstate__(state)
|
115 |
-
for group in self.param_groups:
|
116 |
-
group.setdefault('amsgrad', False)
|
117 |
-
|
118 |
-
@torch.no_grad()
|
119 |
-
def step(self, closure=None):
|
120 |
-
"""Performs a single optimization step.
|
121 |
-
Args:
|
122 |
-
closure (callable, optional): A closure that reevaluates the model
|
123 |
-
and returns the loss.
|
124 |
-
"""
|
125 |
-
loss = None
|
126 |
-
if closure is not None:
|
127 |
-
with torch.enable_grad():
|
128 |
-
loss = closure()
|
129 |
-
|
130 |
-
for group in self.param_groups:
|
131 |
-
params_with_grad = []
|
132 |
-
grads = []
|
133 |
-
exp_avgs = []
|
134 |
-
exp_avg_sqs = []
|
135 |
-
ema_params_with_grad = []
|
136 |
-
state_sums = []
|
137 |
-
max_exp_avg_sqs = []
|
138 |
-
state_steps = []
|
139 |
-
amsgrad = group['amsgrad']
|
140 |
-
beta1, beta2 = group['betas']
|
141 |
-
ema_decay = group['ema_decay']
|
142 |
-
ema_power = group['ema_power']
|
143 |
-
|
144 |
-
for p in group['params']:
|
145 |
-
if p.grad is None:
|
146 |
-
continue
|
147 |
-
params_with_grad.append(p)
|
148 |
-
if p.grad.is_sparse:
|
149 |
-
raise RuntimeError('AdamW does not support sparse gradients')
|
150 |
-
grads.append(p.grad)
|
151 |
-
|
152 |
-
state = self.state[p]
|
153 |
-
|
154 |
-
# State initialization
|
155 |
-
if len(state) == 0:
|
156 |
-
state['step'] = 0
|
157 |
-
# Exponential moving average of gradient values
|
158 |
-
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
|
159 |
-
# Exponential moving average of squared gradient values
|
160 |
-
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
|
161 |
-
if amsgrad:
|
162 |
-
# Maintains max of all exp. moving avg. of sq. grad. values
|
163 |
-
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
|
164 |
-
# Exponential moving average of parameter values
|
165 |
-
state['param_exp_avg'] = p.detach().float().clone()
|
166 |
-
|
167 |
-
exp_avgs.append(state['exp_avg'])
|
168 |
-
exp_avg_sqs.append(state['exp_avg_sq'])
|
169 |
-
ema_params_with_grad.append(state['param_exp_avg'])
|
170 |
-
|
171 |
-
if amsgrad:
|
172 |
-
max_exp_avg_sqs.append(state['max_exp_avg_sq'])
|
173 |
-
|
174 |
-
# update the steps for each param group update
|
175 |
-
state['step'] += 1
|
176 |
-
# record the step after step update
|
177 |
-
state_steps.append(state['step'])
|
178 |
-
|
179 |
-
optim._functional.adamw(params_with_grad,
|
180 |
-
grads,
|
181 |
-
exp_avgs,
|
182 |
-
exp_avg_sqs,
|
183 |
-
max_exp_avg_sqs,
|
184 |
-
state_steps,
|
185 |
-
amsgrad=amsgrad,
|
186 |
-
beta1=beta1,
|
187 |
-
beta2=beta2,
|
188 |
-
lr=group['lr'],
|
189 |
-
weight_decay=group['weight_decay'],
|
190 |
-
eps=group['eps'],
|
191 |
-
maximize=False)
|
192 |
-
|
193 |
-
cur_ema_decay = min(ema_decay, 1 - state['step'] ** -ema_power)
|
194 |
-
for param, ema_param in zip(params_with_grad, ema_params_with_grad):
|
195 |
-
ema_param.mul_(cur_ema_decay).add_(param.float(), alpha=1 - cur_ema_decay)
|
196 |
-
|
197 |
-
return loss
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
@@ -1,19 +1,10 @@
|
|
1 |
--extra-index-url https://download.pytorch.org/whl/cu113
|
2 |
torch==1.13.0
|
3 |
torchvision
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
imageio-ffmpeg==0.4.2
|
9 |
-
pytorch-lightning==1.4.2
|
10 |
-
torchmetrics==0.6
|
11 |
-
omegaconf==2.1.1
|
12 |
-
test-tube>=0.7.5
|
13 |
-
einops==0.3.0
|
14 |
-
transformers==4.19.2
|
15 |
-
webdataset==0.2.5
|
16 |
-
open_clip_torch==2.7.0
|
17 |
python-dotenv
|
18 |
invisible-watermark
|
19 |
https://github.com/apolinario/xformers/releases/download/0.0.3/xformers-0.0.14.dev0-cp38-cp38-linux_x86_64.whl
|
|
|
1 |
--extra-index-url https://download.pytorch.org/whl/cu113
|
2 |
torch==1.13.0
|
3 |
torchvision
|
4 |
+
git+https://github.com/huggingface/diffusers.git@30f6f44
|
5 |
+
transformers
|
6 |
+
accelerate
|
7 |
+
ftfy
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
python-dotenv
|
9 |
invisible-watermark
|
10 |
https://github.com/apolinario/xformers/releases/download/0.0.3/xformers-0.0.14.dev0-cp38-cp38-linux_x86_64.whl
|
scripts/img2img.py
DELETED
@@ -1,279 +0,0 @@
|
|
1 |
-
"""make variations of input image"""
|
2 |
-
|
3 |
-
import argparse, os
|
4 |
-
import PIL
|
5 |
-
import torch
|
6 |
-
import numpy as np
|
7 |
-
from omegaconf import OmegaConf
|
8 |
-
from PIL import Image
|
9 |
-
from tqdm import tqdm, trange
|
10 |
-
from itertools import islice
|
11 |
-
from einops import rearrange, repeat
|
12 |
-
from torchvision.utils import make_grid
|
13 |
-
from torch import autocast
|
14 |
-
from contextlib import nullcontext
|
15 |
-
from pytorch_lightning import seed_everything
|
16 |
-
from imwatermark import WatermarkEncoder
|
17 |
-
|
18 |
-
|
19 |
-
from scripts.txt2img import put_watermark
|
20 |
-
from ldm.util import instantiate_from_config
|
21 |
-
from ldm.models.diffusion.ddim import DDIMSampler
|
22 |
-
|
23 |
-
|
24 |
-
def chunk(it, size):
|
25 |
-
it = iter(it)
|
26 |
-
return iter(lambda: tuple(islice(it, size)), ())
|
27 |
-
|
28 |
-
|
29 |
-
def load_model_from_config(config, ckpt, verbose=False):
|
30 |
-
print(f"Loading model from {ckpt}")
|
31 |
-
pl_sd = torch.load(ckpt, map_location="cpu")
|
32 |
-
if "global_step" in pl_sd:
|
33 |
-
print(f"Global Step: {pl_sd['global_step']}")
|
34 |
-
sd = pl_sd["state_dict"]
|
35 |
-
model = instantiate_from_config(config.model)
|
36 |
-
m, u = model.load_state_dict(sd, strict=False)
|
37 |
-
if len(m) > 0 and verbose:
|
38 |
-
print("missing keys:")
|
39 |
-
print(m)
|
40 |
-
if len(u) > 0 and verbose:
|
41 |
-
print("unexpected keys:")
|
42 |
-
print(u)
|
43 |
-
|
44 |
-
model.cuda()
|
45 |
-
model.eval()
|
46 |
-
return model
|
47 |
-
|
48 |
-
|
49 |
-
def load_img(path):
|
50 |
-
image = Image.open(path).convert("RGB")
|
51 |
-
w, h = image.size
|
52 |
-
print(f"loaded input image of size ({w}, {h}) from {path}")
|
53 |
-
w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64
|
54 |
-
image = image.resize((w, h), resample=PIL.Image.LANCZOS)
|
55 |
-
image = np.array(image).astype(np.float32) / 255.0
|
56 |
-
image = image[None].transpose(0, 3, 1, 2)
|
57 |
-
image = torch.from_numpy(image)
|
58 |
-
return 2. * image - 1.
|
59 |
-
|
60 |
-
|
61 |
-
def main():
|
62 |
-
parser = argparse.ArgumentParser()
|
63 |
-
|
64 |
-
parser.add_argument(
|
65 |
-
"--prompt",
|
66 |
-
type=str,
|
67 |
-
nargs="?",
|
68 |
-
default="a painting of a virus monster playing guitar",
|
69 |
-
help="the prompt to render"
|
70 |
-
)
|
71 |
-
|
72 |
-
parser.add_argument(
|
73 |
-
"--init-img",
|
74 |
-
type=str,
|
75 |
-
nargs="?",
|
76 |
-
help="path to the input image"
|
77 |
-
)
|
78 |
-
|
79 |
-
parser.add_argument(
|
80 |
-
"--outdir",
|
81 |
-
type=str,
|
82 |
-
nargs="?",
|
83 |
-
help="dir to write results to",
|
84 |
-
default="outputs/img2img-samples"
|
85 |
-
)
|
86 |
-
|
87 |
-
parser.add_argument(
|
88 |
-
"--ddim_steps",
|
89 |
-
type=int,
|
90 |
-
default=50,
|
91 |
-
help="number of ddim sampling steps",
|
92 |
-
)
|
93 |
-
|
94 |
-
parser.add_argument(
|
95 |
-
"--fixed_code",
|
96 |
-
action='store_true',
|
97 |
-
help="if enabled, uses the same starting code across all samples ",
|
98 |
-
)
|
99 |
-
|
100 |
-
parser.add_argument(
|
101 |
-
"--ddim_eta",
|
102 |
-
type=float,
|
103 |
-
default=0.0,
|
104 |
-
help="ddim eta (eta=0.0 corresponds to deterministic sampling",
|
105 |
-
)
|
106 |
-
parser.add_argument(
|
107 |
-
"--n_iter",
|
108 |
-
type=int,
|
109 |
-
default=1,
|
110 |
-
help="sample this often",
|
111 |
-
)
|
112 |
-
|
113 |
-
parser.add_argument(
|
114 |
-
"--C",
|
115 |
-
type=int,
|
116 |
-
default=4,
|
117 |
-
help="latent channels",
|
118 |
-
)
|
119 |
-
parser.add_argument(
|
120 |
-
"--f",
|
121 |
-
type=int,
|
122 |
-
default=8,
|
123 |
-
help="downsampling factor, most often 8 or 16",
|
124 |
-
)
|
125 |
-
|
126 |
-
parser.add_argument(
|
127 |
-
"--n_samples",
|
128 |
-
type=int,
|
129 |
-
default=2,
|
130 |
-
help="how many samples to produce for each given prompt. A.k.a batch size",
|
131 |
-
)
|
132 |
-
|
133 |
-
parser.add_argument(
|
134 |
-
"--n_rows",
|
135 |
-
type=int,
|
136 |
-
default=0,
|
137 |
-
help="rows in the grid (default: n_samples)",
|
138 |
-
)
|
139 |
-
|
140 |
-
parser.add_argument(
|
141 |
-
"--scale",
|
142 |
-
type=float,
|
143 |
-
default=9.0,
|
144 |
-
help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))",
|
145 |
-
)
|
146 |
-
|
147 |
-
parser.add_argument(
|
148 |
-
"--strength",
|
149 |
-
type=float,
|
150 |
-
default=0.8,
|
151 |
-
help="strength for noising/unnoising. 1.0 corresponds to full destruction of information in init image",
|
152 |
-
)
|
153 |
-
|
154 |
-
parser.add_argument(
|
155 |
-
"--from-file",
|
156 |
-
type=str,
|
157 |
-
help="if specified, load prompts from this file",
|
158 |
-
)
|
159 |
-
parser.add_argument(
|
160 |
-
"--config",
|
161 |
-
type=str,
|
162 |
-
default="configs/stable-diffusion/v2-inference.yaml",
|
163 |
-
help="path to config which constructs model",
|
164 |
-
)
|
165 |
-
parser.add_argument(
|
166 |
-
"--ckpt",
|
167 |
-
type=str,
|
168 |
-
help="path to checkpoint of model",
|
169 |
-
)
|
170 |
-
parser.add_argument(
|
171 |
-
"--seed",
|
172 |
-
type=int,
|
173 |
-
default=42,
|
174 |
-
help="the seed (for reproducible sampling)",
|
175 |
-
)
|
176 |
-
parser.add_argument(
|
177 |
-
"--precision",
|
178 |
-
type=str,
|
179 |
-
help="evaluate at this precision",
|
180 |
-
choices=["full", "autocast"],
|
181 |
-
default="autocast"
|
182 |
-
)
|
183 |
-
|
184 |
-
opt = parser.parse_args()
|
185 |
-
seed_everything(opt.seed)
|
186 |
-
|
187 |
-
config = OmegaConf.load(f"{opt.config}")
|
188 |
-
model = load_model_from_config(config, f"{opt.ckpt}")
|
189 |
-
|
190 |
-
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
191 |
-
model = model.to(device)
|
192 |
-
|
193 |
-
sampler = DDIMSampler(model)
|
194 |
-
|
195 |
-
os.makedirs(opt.outdir, exist_ok=True)
|
196 |
-
outpath = opt.outdir
|
197 |
-
|
198 |
-
print("Creating invisible watermark encoder (see https://github.com/ShieldMnt/invisible-watermark)...")
|
199 |
-
wm = "SDV2"
|
200 |
-
wm_encoder = WatermarkEncoder()
|
201 |
-
wm_encoder.set_watermark('bytes', wm.encode('utf-8'))
|
202 |
-
|
203 |
-
batch_size = opt.n_samples
|
204 |
-
n_rows = opt.n_rows if opt.n_rows > 0 else batch_size
|
205 |
-
if not opt.from_file:
|
206 |
-
prompt = opt.prompt
|
207 |
-
assert prompt is not None
|
208 |
-
data = [batch_size * [prompt]]
|
209 |
-
|
210 |
-
else:
|
211 |
-
print(f"reading prompts from {opt.from_file}")
|
212 |
-
with open(opt.from_file, "r") as f:
|
213 |
-
data = f.read().splitlines()
|
214 |
-
data = list(chunk(data, batch_size))
|
215 |
-
|
216 |
-
sample_path = os.path.join(outpath, "samples")
|
217 |
-
os.makedirs(sample_path, exist_ok=True)
|
218 |
-
base_count = len(os.listdir(sample_path))
|
219 |
-
grid_count = len(os.listdir(outpath)) - 1
|
220 |
-
|
221 |
-
assert os.path.isfile(opt.init_img)
|
222 |
-
init_image = load_img(opt.init_img).to(device)
|
223 |
-
init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
|
224 |
-
init_latent = model.get_first_stage_encoding(model.encode_first_stage(init_image)) # move to latent space
|
225 |
-
|
226 |
-
sampler.make_schedule(ddim_num_steps=opt.ddim_steps, ddim_eta=opt.ddim_eta, verbose=False)
|
227 |
-
|
228 |
-
assert 0. <= opt.strength <= 1., 'can only work with strength in [0.0, 1.0]'
|
229 |
-
t_enc = int(opt.strength * opt.ddim_steps)
|
230 |
-
print(f"target t_enc is {t_enc} steps")
|
231 |
-
|
232 |
-
precision_scope = autocast if opt.precision == "autocast" else nullcontext
|
233 |
-
with torch.no_grad():
|
234 |
-
with precision_scope("cuda"):
|
235 |
-
with model.ema_scope():
|
236 |
-
all_samples = list()
|
237 |
-
for n in trange(opt.n_iter, desc="Sampling"):
|
238 |
-
for prompts in tqdm(data, desc="data"):
|
239 |
-
uc = None
|
240 |
-
if opt.scale != 1.0:
|
241 |
-
uc = model.get_learned_conditioning(batch_size * [""])
|
242 |
-
if isinstance(prompts, tuple):
|
243 |
-
prompts = list(prompts)
|
244 |
-
c = model.get_learned_conditioning(prompts)
|
245 |
-
|
246 |
-
# encode (scaled latent)
|
247 |
-
z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc] * batch_size).to(device))
|
248 |
-
# decode it
|
249 |
-
samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=opt.scale,
|
250 |
-
unconditional_conditioning=uc, )
|
251 |
-
|
252 |
-
x_samples = model.decode_first_stage(samples)
|
253 |
-
x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
|
254 |
-
|
255 |
-
for x_sample in x_samples:
|
256 |
-
x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
|
257 |
-
img = Image.fromarray(x_sample.astype(np.uint8))
|
258 |
-
img = put_watermark(img, wm_encoder)
|
259 |
-
img.save(os.path.join(sample_path, f"{base_count:05}.png"))
|
260 |
-
base_count += 1
|
261 |
-
all_samples.append(x_samples)
|
262 |
-
|
263 |
-
# additionally, save as grid
|
264 |
-
grid = torch.stack(all_samples, 0)
|
265 |
-
grid = rearrange(grid, 'n b c h w -> (n b) c h w')
|
266 |
-
grid = make_grid(grid, nrow=n_rows)
|
267 |
-
|
268 |
-
# to image
|
269 |
-
grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy()
|
270 |
-
grid = Image.fromarray(grid.astype(np.uint8))
|
271 |
-
grid = put_watermark(grid, wm_encoder)
|
272 |
-
grid.save(os.path.join(outpath, f'grid-{grid_count:04}.png'))
|
273 |
-
grid_count += 1
|
274 |
-
|
275 |
-
print(f"Your samples are ready and waiting for you here: \n{outpath} \nEnjoy.")
|
276 |
-
|
277 |
-
|
278 |
-
if __name__ == "__main__":
|
279 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
scripts/streamlit/depth2img.py
DELETED
@@ -1,158 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
import torch
|
3 |
-
import numpy as np
|
4 |
-
import streamlit as st
|
5 |
-
from PIL import Image
|
6 |
-
from omegaconf import OmegaConf
|
7 |
-
from einops import repeat, rearrange
|
8 |
-
from pytorch_lightning import seed_everything
|
9 |
-
from imwatermark import WatermarkEncoder
|
10 |
-
|
11 |
-
from scripts.txt2img import put_watermark
|
12 |
-
from ldm.util import instantiate_from_config
|
13 |
-
from ldm.models.diffusion.ddim import DDIMSampler
|
14 |
-
from ldm.data.util import AddMiDaS
|
15 |
-
|
16 |
-
torch.set_grad_enabled(False)
|
17 |
-
|
18 |
-
|
19 |
-
@st.cache(allow_output_mutation=True)
|
20 |
-
def initialize_model(config, ckpt):
|
21 |
-
config = OmegaConf.load(config)
|
22 |
-
model = instantiate_from_config(config.model)
|
23 |
-
model.load_state_dict(torch.load(ckpt)["state_dict"], strict=False)
|
24 |
-
|
25 |
-
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
26 |
-
model = model.to(device)
|
27 |
-
sampler = DDIMSampler(model)
|
28 |
-
return sampler
|
29 |
-
|
30 |
-
|
31 |
-
def make_batch_sd(
|
32 |
-
image,
|
33 |
-
txt,
|
34 |
-
device,
|
35 |
-
num_samples=1,
|
36 |
-
model_type="dpt_hybrid"
|
37 |
-
):
|
38 |
-
image = np.array(image.convert("RGB"))
|
39 |
-
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
|
40 |
-
# sample['jpg'] is tensor hwc in [-1, 1] at this point
|
41 |
-
midas_trafo = AddMiDaS(model_type=model_type)
|
42 |
-
batch = {
|
43 |
-
"jpg": image,
|
44 |
-
"txt": num_samples * [txt],
|
45 |
-
}
|
46 |
-
batch = midas_trafo(batch)
|
47 |
-
batch["jpg"] = rearrange(batch["jpg"], 'h w c -> 1 c h w')
|
48 |
-
batch["jpg"] = repeat(batch["jpg"].to(device=device), "1 ... -> n ...", n=num_samples)
|
49 |
-
batch["midas_in"] = repeat(torch.from_numpy(batch["midas_in"][None, ...]).to(device=device), "1 ... -> n ...", n=num_samples)
|
50 |
-
return batch
|
51 |
-
|
52 |
-
|
53 |
-
def paint(sampler, image, prompt, t_enc, seed, scale, num_samples=1, callback=None,
|
54 |
-
do_full_sample=False):
|
55 |
-
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
56 |
-
model = sampler.model
|
57 |
-
seed_everything(seed)
|
58 |
-
|
59 |
-
print("Creating invisible watermark encoder (see https://github.com/ShieldMnt/invisible-watermark)...")
|
60 |
-
wm = "SDV2"
|
61 |
-
wm_encoder = WatermarkEncoder()
|
62 |
-
wm_encoder.set_watermark('bytes', wm.encode('utf-8'))
|
63 |
-
|
64 |
-
with torch.no_grad(),\
|
65 |
-
torch.autocast("cuda"):
|
66 |
-
batch = make_batch_sd(image, txt=prompt, device=device, num_samples=num_samples)
|
67 |
-
z = model.get_first_stage_encoding(model.encode_first_stage(batch[model.first_stage_key])) # move to latent space
|
68 |
-
c = model.cond_stage_model.encode(batch["txt"])
|
69 |
-
c_cat = list()
|
70 |
-
for ck in model.concat_keys:
|
71 |
-
cc = batch[ck]
|
72 |
-
cc = model.depth_model(cc)
|
73 |
-
depth_min, depth_max = torch.amin(cc, dim=[1, 2, 3], keepdim=True), torch.amax(cc, dim=[1, 2, 3],
|
74 |
-
keepdim=True)
|
75 |
-
display_depth = (cc - depth_min) / (depth_max - depth_min)
|
76 |
-
st.image(Image.fromarray((display_depth[0, 0, ...].cpu().numpy() * 255.).astype(np.uint8)))
|
77 |
-
cc = torch.nn.functional.interpolate(
|
78 |
-
cc,
|
79 |
-
size=z.shape[2:],
|
80 |
-
mode="bicubic",
|
81 |
-
align_corners=False,
|
82 |
-
)
|
83 |
-
depth_min, depth_max = torch.amin(cc, dim=[1, 2, 3], keepdim=True), torch.amax(cc, dim=[1, 2, 3],
|
84 |
-
keepdim=True)
|
85 |
-
cc = 2. * (cc - depth_min) / (depth_max - depth_min) - 1.
|
86 |
-
c_cat.append(cc)
|
87 |
-
c_cat = torch.cat(c_cat, dim=1)
|
88 |
-
# cond
|
89 |
-
cond = {"c_concat": [c_cat], "c_crossattn": [c]}
|
90 |
-
|
91 |
-
# uncond cond
|
92 |
-
uc_cross = model.get_unconditional_conditioning(num_samples, "")
|
93 |
-
uc_full = {"c_concat": [c_cat], "c_crossattn": [uc_cross]}
|
94 |
-
if not do_full_sample:
|
95 |
-
# encode (scaled latent)
|
96 |
-
z_enc = sampler.stochastic_encode(z, torch.tensor([t_enc] * num_samples).to(model.device))
|
97 |
-
else:
|
98 |
-
z_enc = torch.randn_like(z)
|
99 |
-
# decode it
|
100 |
-
samples = sampler.decode(z_enc, cond, t_enc, unconditional_guidance_scale=scale,
|
101 |
-
unconditional_conditioning=uc_full, callback=callback)
|
102 |
-
x_samples_ddim = model.decode_first_stage(samples)
|
103 |
-
result = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
104 |
-
result = result.cpu().numpy().transpose(0, 2, 3, 1) * 255
|
105 |
-
return [put_watermark(Image.fromarray(img.astype(np.uint8)), wm_encoder) for img in result]
|
106 |
-
|
107 |
-
|
108 |
-
def run():
|
109 |
-
st.title("Stable Diffusion Depth2Img")
|
110 |
-
# run via streamlit run scripts/demo/depth2img.py <path-tp-config> <path-to-ckpt>
|
111 |
-
sampler = initialize_model(sys.argv[1], sys.argv[2])
|
112 |
-
|
113 |
-
image = st.file_uploader("Image", ["jpg", "png"])
|
114 |
-
if image:
|
115 |
-
image = Image.open(image)
|
116 |
-
w, h = image.size
|
117 |
-
st.text(f"loaded input image of size ({w}, {h})")
|
118 |
-
width, height = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64
|
119 |
-
image = image.resize((width, height))
|
120 |
-
st.text(f"resized input image to size ({width}, {height} (w, h))")
|
121 |
-
st.image(image)
|
122 |
-
|
123 |
-
prompt = st.text_input("Prompt")
|
124 |
-
|
125 |
-
seed = st.number_input("Seed", min_value=0, max_value=1000000, value=0)
|
126 |
-
num_samples = st.number_input("Number of Samples", min_value=1, max_value=64, value=1)
|
127 |
-
scale = st.slider("Scale", min_value=0.1, max_value=30.0, value=9.0, step=0.1)
|
128 |
-
steps = st.slider("DDIM Steps", min_value=0, max_value=50, value=50, step=1)
|
129 |
-
strength = st.slider("Strength", min_value=0., max_value=1., value=0.9)
|
130 |
-
eta = st.sidebar.number_input("eta (DDIM)", value=0., min_value=0., max_value=1.)
|
131 |
-
|
132 |
-
t_progress = st.progress(0)
|
133 |
-
def t_callback(t):
|
134 |
-
t_progress.progress(min((t + 1) / t_enc, 1.))
|
135 |
-
|
136 |
-
assert 0. <= strength <= 1., 'can only work with strength in [0.0, 1.0]'
|
137 |
-
do_full_sample = strength == 1.
|
138 |
-
t_enc = min(int(strength * steps), steps-1)
|
139 |
-
sampler.make_schedule(steps, ddim_eta=eta, verbose=True)
|
140 |
-
if st.button("Sample"):
|
141 |
-
result = paint(
|
142 |
-
sampler=sampler,
|
143 |
-
image=image,
|
144 |
-
prompt=prompt,
|
145 |
-
t_enc=t_enc,
|
146 |
-
seed=seed,
|
147 |
-
scale=scale,
|
148 |
-
num_samples=num_samples,
|
149 |
-
callback=t_callback,
|
150 |
-
do_full_sample=do_full_sample
|
151 |
-
)
|
152 |
-
st.write("Result")
|
153 |
-
for image in result:
|
154 |
-
st.image(image, output_format='PNG')
|
155 |
-
|
156 |
-
|
157 |
-
if __name__ == "__main__":
|
158 |
-
run()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
scripts/streamlit/inpainting.py
DELETED
@@ -1,194 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
import cv2
|
3 |
-
import torch
|
4 |
-
import numpy as np
|
5 |
-
import streamlit as st
|
6 |
-
from PIL import Image
|
7 |
-
from omegaconf import OmegaConf
|
8 |
-
from einops import repeat
|
9 |
-
from streamlit_drawable_canvas import st_canvas
|
10 |
-
from imwatermark import WatermarkEncoder
|
11 |
-
|
12 |
-
from ldm.models.diffusion.ddim import DDIMSampler
|
13 |
-
from ldm.util import instantiate_from_config
|
14 |
-
|
15 |
-
|
16 |
-
torch.set_grad_enabled(False)
|
17 |
-
|
18 |
-
|
19 |
-
def put_watermark(img, wm_encoder=None):
|
20 |
-
if wm_encoder is not None:
|
21 |
-
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
|
22 |
-
img = wm_encoder.encode(img, 'dwtDct')
|
23 |
-
img = Image.fromarray(img[:, :, ::-1])
|
24 |
-
return img
|
25 |
-
|
26 |
-
|
27 |
-
@st.cache(allow_output_mutation=True)
|
28 |
-
def initialize_model(config, ckpt):
|
29 |
-
config = OmegaConf.load(config)
|
30 |
-
model = instantiate_from_config(config.model)
|
31 |
-
|
32 |
-
model.load_state_dict(torch.load(ckpt)["state_dict"], strict=False)
|
33 |
-
|
34 |
-
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
35 |
-
model = model.to(device)
|
36 |
-
sampler = DDIMSampler(model)
|
37 |
-
|
38 |
-
return sampler
|
39 |
-
|
40 |
-
|
41 |
-
def make_batch_sd(
|
42 |
-
image,
|
43 |
-
mask,
|
44 |
-
txt,
|
45 |
-
device,
|
46 |
-
num_samples=1):
|
47 |
-
image = np.array(image.convert("RGB"))
|
48 |
-
image = image[None].transpose(0, 3, 1, 2)
|
49 |
-
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
|
50 |
-
|
51 |
-
mask = np.array(mask.convert("L"))
|
52 |
-
mask = mask.astype(np.float32) / 255.0
|
53 |
-
mask = mask[None, None]
|
54 |
-
mask[mask < 0.5] = 0
|
55 |
-
mask[mask >= 0.5] = 1
|
56 |
-
mask = torch.from_numpy(mask)
|
57 |
-
|
58 |
-
masked_image = image * (mask < 0.5)
|
59 |
-
|
60 |
-
batch = {
|
61 |
-
"image": repeat(image.to(device=device), "1 ... -> n ...", n=num_samples),
|
62 |
-
"txt": num_samples * [txt],
|
63 |
-
"mask": repeat(mask.to(device=device), "1 ... -> n ...", n=num_samples),
|
64 |
-
"masked_image": repeat(masked_image.to(device=device), "1 ... -> n ...", n=num_samples),
|
65 |
-
}
|
66 |
-
return batch
|
67 |
-
|
68 |
-
|
69 |
-
def inpaint(sampler, image, mask, prompt, seed, scale, ddim_steps, num_samples=1, w=512, h=512):
|
70 |
-
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
71 |
-
model = sampler.model
|
72 |
-
|
73 |
-
print("Creating invisible watermark encoder (see https://github.com/ShieldMnt/invisible-watermark)...")
|
74 |
-
wm = "SDV2"
|
75 |
-
wm_encoder = WatermarkEncoder()
|
76 |
-
wm_encoder.set_watermark('bytes', wm.encode('utf-8'))
|
77 |
-
|
78 |
-
prng = np.random.RandomState(seed)
|
79 |
-
start_code = prng.randn(num_samples, 4, h // 8, w // 8)
|
80 |
-
start_code = torch.from_numpy(start_code).to(device=device, dtype=torch.float32)
|
81 |
-
|
82 |
-
with torch.no_grad(), \
|
83 |
-
torch.autocast("cuda"):
|
84 |
-
batch = make_batch_sd(image, mask, txt=prompt, device=device, num_samples=num_samples)
|
85 |
-
|
86 |
-
c = model.cond_stage_model.encode(batch["txt"])
|
87 |
-
|
88 |
-
c_cat = list()
|
89 |
-
for ck in model.concat_keys:
|
90 |
-
cc = batch[ck].float()
|
91 |
-
if ck != model.masked_image_key:
|
92 |
-
bchw = [num_samples, 4, h // 8, w // 8]
|
93 |
-
cc = torch.nn.functional.interpolate(cc, size=bchw[-2:])
|
94 |
-
else:
|
95 |
-
cc = model.get_first_stage_encoding(model.encode_first_stage(cc))
|
96 |
-
c_cat.append(cc)
|
97 |
-
c_cat = torch.cat(c_cat, dim=1)
|
98 |
-
|
99 |
-
# cond
|
100 |
-
cond = {"c_concat": [c_cat], "c_crossattn": [c]}
|
101 |
-
|
102 |
-
# uncond cond
|
103 |
-
uc_cross = model.get_unconditional_conditioning(num_samples, "")
|
104 |
-
uc_full = {"c_concat": [c_cat], "c_crossattn": [uc_cross]}
|
105 |
-
|
106 |
-
shape = [model.channels, h // 8, w // 8]
|
107 |
-
samples_cfg, intermediates = sampler.sample(
|
108 |
-
ddim_steps,
|
109 |
-
num_samples,
|
110 |
-
shape,
|
111 |
-
cond,
|
112 |
-
verbose=False,
|
113 |
-
eta=1.0,
|
114 |
-
unconditional_guidance_scale=scale,
|
115 |
-
unconditional_conditioning=uc_full,
|
116 |
-
x_T=start_code,
|
117 |
-
)
|
118 |
-
x_samples_ddim = model.decode_first_stage(samples_cfg)
|
119 |
-
|
120 |
-
result = torch.clamp((x_samples_ddim + 1.0) / 2.0,
|
121 |
-
min=0.0, max=1.0)
|
122 |
-
|
123 |
-
result = result.cpu().numpy().transpose(0, 2, 3, 1) * 255
|
124 |
-
return [put_watermark(Image.fromarray(img.astype(np.uint8)), wm_encoder) for img in result]
|
125 |
-
|
126 |
-
|
127 |
-
def run():
|
128 |
-
st.title("Stable Diffusion Inpainting")
|
129 |
-
|
130 |
-
sampler = initialize_model(sys.argv[1], sys.argv[2])
|
131 |
-
|
132 |
-
image = st.file_uploader("Image", ["jpg", "png"])
|
133 |
-
if image:
|
134 |
-
image = Image.open(image)
|
135 |
-
w, h = image.size
|
136 |
-
print(f"loaded input image of size ({w}, {h})")
|
137 |
-
width, height = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 32
|
138 |
-
image = image.resize((width, height))
|
139 |
-
|
140 |
-
prompt = st.text_input("Prompt")
|
141 |
-
|
142 |
-
seed = st.number_input("Seed", min_value=0, max_value=1000000, value=0)
|
143 |
-
num_samples = st.number_input("Number of Samples", min_value=1, max_value=64, value=1)
|
144 |
-
scale = st.slider("Scale", min_value=0.1, max_value=30.0, value=10., step=0.1)
|
145 |
-
ddim_steps = st.slider("DDIM Steps", min_value=0, max_value=50, value=50, step=1)
|
146 |
-
|
147 |
-
fill_color = "rgba(255, 255, 255, 0.0)"
|
148 |
-
stroke_width = st.number_input("Brush Size",
|
149 |
-
value=64,
|
150 |
-
min_value=1,
|
151 |
-
max_value=100)
|
152 |
-
stroke_color = "rgba(255, 255, 255, 1.0)"
|
153 |
-
bg_color = "rgba(0, 0, 0, 1.0)"
|
154 |
-
drawing_mode = "freedraw"
|
155 |
-
|
156 |
-
st.write("Canvas")
|
157 |
-
st.caption(
|
158 |
-
"Draw a mask to inpaint, then click the 'Send to Streamlit' button (bottom left, with an arrow on it).")
|
159 |
-
canvas_result = st_canvas(
|
160 |
-
fill_color=fill_color,
|
161 |
-
stroke_width=stroke_width,
|
162 |
-
stroke_color=stroke_color,
|
163 |
-
background_color=bg_color,
|
164 |
-
background_image=image,
|
165 |
-
update_streamlit=False,
|
166 |
-
height=height,
|
167 |
-
width=width,
|
168 |
-
drawing_mode=drawing_mode,
|
169 |
-
key="canvas",
|
170 |
-
)
|
171 |
-
if canvas_result:
|
172 |
-
mask = canvas_result.image_data
|
173 |
-
mask = mask[:, :, -1] > 0
|
174 |
-
if mask.sum() > 0:
|
175 |
-
mask = Image.fromarray(mask)
|
176 |
-
|
177 |
-
result = inpaint(
|
178 |
-
sampler=sampler,
|
179 |
-
image=image,
|
180 |
-
mask=mask,
|
181 |
-
prompt=prompt,
|
182 |
-
seed=seed,
|
183 |
-
scale=scale,
|
184 |
-
ddim_steps=ddim_steps,
|
185 |
-
num_samples=num_samples,
|
186 |
-
h=height, w=width
|
187 |
-
)
|
188 |
-
st.write("Inpainted")
|
189 |
-
for image in result:
|
190 |
-
st.image(image, output_format='PNG')
|
191 |
-
|
192 |
-
|
193 |
-
if __name__ == "__main__":
|
194 |
-
run()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|