Vasudevakrishna commited on
Commit
f140a8c
1 Parent(s): 3588d1c

Create utils.py

Browse files
Files changed (1) hide show
  1. utils.py +153 -0
utils.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ import numpy as np
3
+ import numpy
4
+ import torch
5
+ from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel
6
+
7
+ from matplotlib import pyplot as plt
8
+ from pathlib import Path
9
+ from PIL import Image
10
+ from torch import autocast
11
+ from torchvision import transforms as tfms
12
+ from tqdm.auto import tqdm
13
+ from transformers import CLIPTextModel, CLIPTokenizer, logging
14
+ import os
15
+ from diffusers import StableDiffusionPipeline, DiffusionPipeline
16
+
17
+ # large or small model
18
+
19
+ # configurations
20
+ height, width = 512, 512
21
+ guidance_scale = 8
22
+ custom_loss_scale = 200
23
+ batch_size = 1
24
+ torch_device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
25
+
26
+
27
+ pretrained_model_name_or_path = "CompVis/stable-diffusion-v1-4"
28
+ pipe = DiffusionPipeline.from_pretrained(
29
+ pretrained_model_name_or_path,
30
+ torch_dtype=torch.float32
31
+ ).to(torch_device)
32
+
33
+ # Load SD concepts
34
+ sdconcepts = ['<morino-hon>', '<space-style>', '<tesla-bot>', '<midjourney-style>', ' <hanfu-anime-style>']
35
+
36
+ pipe.load_textual_inversion("sd-concepts-library/morino-hon-style")
37
+ pipe.load_textual_inversion("sd-concepts-library/space-style")
38
+ pipe.load_textual_inversion("sd-concepts-library/tesla-bot")
39
+ pipe.load_textual_inversion("sd-concepts-library/midjourney-style")
40
+ pipe.load_textual_inversion("sd-concepts-library/hanfu-anime-style")
41
+
42
+ # define seeds
43
+ seed_list = [1, 2, 3, 4, 5]
44
+
45
+
46
+ def custom_loss(images):
47
+
48
+ # Gradient loss
49
+ gradient_x = torch.abs(images[:, :, :, :-1] - images[:, :, :, 1:]).mean()
50
+ gradient_y = torch.abs(images[:, :, :-1, :] - images[:, :, 1:, :]).mean()
51
+ error = gradient_x + gradient_y
52
+ #Variational loss
53
+ # diff_x = torch.abs(images[:, :, :, :-1] - images[:, :, :, 1:])
54
+ # diff_y = torch.abs(images[:, :, :-1, :] - images[:, :, 1:, :])
55
+ # error = diff_x.mean() + diff_y.mean()
56
+
57
+ return error
58
+
59
+ def latents_to_pil(latents):
60
+ # bath of latents -> list of images
61
+ latents = (1 / 0.18215) * latents
62
+ with torch.no_grad():
63
+ image = pipe.vae.decode(latents).sample
64
+ image = (image / 2 + 0.5).clamp(0, 1) # 0 to 1
65
+ image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
66
+ images = (image * 255).round().astype("uint8")
67
+ pil_images = [Image.fromarray(image) for image in images]
68
+ return pil_images
69
+
70
+ def generate_latents(prompts, num_inference_steps, seed_nums, loss_apply=False):
71
+
72
+ generator = torch.manual_seed(seed_nums)
73
+
74
+ # scheduler
75
+ scheduler = LMSDiscreteScheduler(beta_start = 0.00085, beta_end = 0.012, beta_schedule = "scaled_linear", num_train_timesteps = 1000)
76
+ scheduler.set_timesteps(num_inference_steps)
77
+ scheduler.timesteps = scheduler.timesteps.to(torch.float32)
78
+
79
+ # text embeddings of the prompt
80
+ text_input = pipe.tokenizer(prompts, padding='max_length', max_length = pipe.tokenizer.model_max_length, truncation= True, return_tensors="pt")
81
+ input_ids = text_input.input_ids.to(torch_device)
82
+
83
+ with torch.no_grad():
84
+ text_embeddings = pipe.text_encoder(text_input.input_ids.to(torch_device))[0]
85
+
86
+ max_length = text_input.input_ids.shape[-1]
87
+ uncond_input = pipe.tokenizer(
88
+ [""] * batch_size, padding="max_length", max_length= max_length, return_tensors="pt"
89
+ )
90
+
91
+ with torch.no_grad():
92
+ uncond_embeddings = pipe.text_encoder(uncond_input.input_ids.to(torch_device))[0]
93
+
94
+ text_embeddings = torch.cat([uncond_embeddings,text_embeddings]) # 2,77,768
95
+
96
+ # random latent
97
+ latents = torch.randn(
98
+ (batch_size, pipe.unet.config.in_channels, height// 8, width //8),
99
+ generator = generator,
100
+ ) .to(torch.float16)
101
+
102
+
103
+ latents = latents.to(torch_device)
104
+ latents = latents * scheduler.init_noise_sigma
105
+
106
+ for i, t in tqdm(enumerate(scheduler.timesteps), total = len(scheduler.timesteps)):
107
+
108
+ latent_model_input = torch.cat([latents] * 2)
109
+ sigma = scheduler.sigmas[i]
110
+ latent_model_input = scheduler.scale_model_input(latent_model_input, t)
111
+
112
+ with torch.no_grad():
113
+ noise_pred = pipe.unet(latent_model_input.to(torch.float32), t, encoder_hidden_states=text_embeddings)["sample"]
114
+ #noise_pred = pipe.unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
115
+
116
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
117
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
118
+
119
+ if (loss_apply and i%5 == 0):
120
+
121
+ latents = latents.detach().requires_grad_()
122
+ #latents_x0 = scheduler.step(noise_pred,t, latents).pred_original_sample # this line does not work
123
+ latents_x0 = latents - sigma * noise_pred
124
+
125
+ # use vae to decode the image
126
+ denoised_images = pipe.vae.decode((1/ 0.18215) * latents_x0).sample / 2 + 0.5 # range(0,1)
127
+
128
+ loss = custom_loss(denoised_images) * custom_loss_scale
129
+ print(f"Custom gradient loss {loss}")
130
+
131
+ cond_grad = torch.autograd.grad(loss, latents)[0]
132
+ latents = latents.detach() - cond_grad * sigma**2
133
+
134
+ latents = scheduler.step(noise_pred,t, latents).prev_sample
135
+
136
+ return latents
137
+
138
+
139
+ # Function to convert PIL images to NumPy arrays
140
+ def pil_to_np(image):
141
+ return np.array(image)
142
+
143
+ def generate_gradio_images(prompt, num_inference_steps, loss_flag = False):
144
+ # after loss is applied
145
+ latents_list = []
146
+ for seed_no, sd in zip(seed_list, sdconcepts):
147
+ prompts = [f'{prompt} {sd}']
148
+ latents = generate_latents(prompts,num_inference_steps, seed_no, loss_apply=loss_flag)
149
+ latents_list.append(latents)
150
+ # show all
151
+ latents_list = torch.vstack(latents_list)
152
+ images = latents_to_pil(latents_list)
153
+ return images