piyushgrover's picture
Update utils.py
352f904
raw
history blame
8.29 kB
from base64 import b64encode
import numpy
import torch
from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel
# For video display:
from matplotlib import pyplot as plt
from pathlib import Path
from PIL import Image
from torch import autocast
from torchvision import transforms as tfms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer, logging
import os
torch.manual_seed(1)
# Supress some unnecessary warnings when loading the CLIPTextModel
logging.set_verbosity_error()
# Set device
torch_device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
if "mps" == torch_device: os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = "1"
import gc
gc.collect()
torch.cuda.empty_cache()
# Load the autoencoder model which will be used to decode the latents into image space.
vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae")
# Load the tokenizer and text encoder to tokenize and encode the text.
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14")
# The UNet model for generating the latents.
unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet")
# The noise scheduler
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear",
num_train_timesteps=1000)
# To the GPU we go!
vae = vae.to(torch_device)
text_encoder = text_encoder.to(torch_device)
unet = unet.to(torch_device)
def load_learned_embeds():
pathlist = Path('learned_embeds/').glob('*_learned_embeds.bin')
learned_embeds = []
for path in pathlist:
path_in_str = str(path)
# print(path_in_str)
learned_embeds.append(torch.load(path_in_str))
concept_embeds_list = []
for obj in learned_embeds:
for k, v in obj.items():
if v.shape[0] == 768:
print(k, v.shape)
concept_embeds_list.append(v)
return torch.stack(concept_embeds_list)
def pil_to_latent(input_im):
# Single image -> single latent in a batch (so size 1, 4, 64, 64)
with torch.no_grad():
latent = vae.encode(tfms.ToTensor()(input_im).unsqueeze(0).to(torch_device) * 2 - 1) # Note scaling
return 0.18215 * latent.latent_dist.sample()
def latents_to_pil(latents):
# bath of latents -> list of images
latents = (1 / 0.18215) * latents
with torch.no_grad():
image = vae.decode(latents).sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
images = (image * 255).round().astype("uint8")
pil_images = [Image.fromarray(image) for image in images]
return pil_images
# Prep Scheduler
def set_timesteps(scheduler, num_inference_steps):
scheduler.set_timesteps(num_inference_steps)
scheduler.timesteps = scheduler.timesteps.to(
torch.float32) # minor fix to ensure MPS compatibility, fixed in diffusers PR 3925
def get_output_embeds(input_embeddings):
# CLIP's text model uses causal mask, so we prepare it here:
bsz, seq_len = input_embeddings.shape[:2]
causal_attention_mask = text_encoder.text_model._build_causal_attention_mask(bsz, seq_len,
dtype=input_embeddings.dtype)
# Getting the output embeddings involves calling the model with passing output_hidden_states=True
# so that it doesn't just return the pooled final predictions:
encoder_outputs = text_encoder.text_model.encoder(
inputs_embeds=input_embeddings,
attention_mask=None, # We aren't using an attention mask so that can be None
causal_attention_mask=causal_attention_mask.to(torch_device),
output_attentions=None,
output_hidden_states=True, # We want the output embs not the final output
return_dict=None,
)
# We're interested in the output hidden state only
output = encoder_outputs[0]
# There is a final layer norm we need to pass these through
output = text_encoder.text_model.final_layer_norm(output)
# And now they're ready!
return output
def blue_loss(images):
# How far the pixels are from +80% contrast:
contrast = 230 # it ranges from -255 to +255
contrast_scale_factor = (259 * (contrast + 255)) / (255 * (259 - contrast))
cimgs = (contrast_scale_factor * (images - 0.5) + 0.5 )
cimgs = torch.where(cimgs > 1.0, 1.0, cimgs)
cimgs = torch.where(cimgs < 0.0, 0.0, cimgs)
error = torch.abs( images - cimgs ).mean()
#error = torch.abs(images[:] - 0.9).mean() # [:,2] -> all images in batch, only the blue channel
print('error: ', error)
return error
# Generating an image with these modified embeddings
def generate_with_embs(text_input, text_embeddings, output=None, generator=None, additional_guidance=False):
height = 512 # default height of Stable Diffusion
width = 512 # default width of Stable Diffusion
num_inference_steps = 30 # Number of denoising steps
guidance_scale = 7.5 # Scale for classifier-free guidance
if generator is None:
generator = torch.manual_seed(32) # Seed generator to create the inital latent noise
batch_size = 1
max_length = text_input.input_ids.shape[-1]
uncond_input = tokenizer(
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
)
with torch.no_grad():
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# Prep Scheduler
set_timesteps(scheduler, num_inference_steps)
# Prep latents
latents = torch.randn(
(batch_size, unet.in_channels, height // 8, width // 8),
generator=generator,
)
latents = latents.to(torch_device)
latents = latents * scheduler.init_noise_sigma
# Loop
for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)):
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
latent_model_input = torch.cat([latents] * 2)
sigma = scheduler.sigmas[i]
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
with torch.no_grad():
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
# perform guidance
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
#### ADDITIONAL GUIDANCE ###
if additional_guidance:
blue_loss_scale = 80
if i % 5 == 0:
# Requires grad on the latents
latents = latents.detach().requires_grad_()
# Get the predicted x0:
latents_x0 = latents - sigma * noise_pred
# latents_x0 = scheduler.step(noise_pred, t, latents).pred_original_sample
# Decode to image space
denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 # range (0, 1)
# Calculate loss
loss = blue_loss(denoised_images) * blue_loss_scale
# Occasionally print it out
if i % 10 == 0:
print(i, 'loss:', loss.item())
# Get gradient
cond_grad = torch.autograd.grad(loss, latents)[0]
# Modify the latents based on this gradient
latents = latents.detach() - cond_grad * sigma ** 2
# compute the previous noisy sample x_t -> x_t-1
latents = scheduler.step(noise_pred, t, latents).prev_sample
if output:
output = latents_to_pil(latents)[0]
return latents_to_pil(latents)[0]
concept_embeds = load_learned_embeds()
token_emb_layer = text_encoder.text_model.embeddings.token_embedding
#token_emb_layer # Vocab size 49408, emb_dim 768
pos_emb_layer = text_encoder.text_model.embeddings.position_embedding
#pos_emb_layer