|
import gradio as gr |
|
from base64 import b64encode |
|
|
|
import numpy |
|
import torch |
|
from diffusers import AutoencoderKL, LMSDiscreteScheduler, UNet2DConditionModel, StableDiffusionPipeline |
|
|
|
|
|
from matplotlib import pyplot as plt |
|
from pathlib import Path |
|
from PIL import Image |
|
from torch import autocast |
|
from torchvision import transforms as tfms |
|
from tqdm.auto import tqdm |
|
from transformers import CLIPTextModel, CLIPTokenizer, logging |
|
import os |
|
|
|
torch.manual_seed(1) |
|
|
|
|
|
logging.set_verbosity_error() |
|
|
|
|
|
torch_device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu" |
|
if "mps" == torch_device: os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = "1" |
|
|
|
model_nm = "CompVis/stable-diffusion-v1-4" |
|
|
|
output_dir="sd-concept-output" |
|
pipe = StableDiffusionPipeline.from_pretrained(model_nm).to(torch_device) |
|
|
|
|
|
vae = pipe.vae |
|
tokenizer = pipe.tokenizer |
|
|
|
|
|
|
|
text_encoder =pipe.text_encoder |
|
|
|
|
|
unet = pipe.unet |
|
|
|
|
|
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000) |
|
|
|
|
|
vae = vae.to(torch_device) |
|
text_encoder = text_encoder.to(torch_device) |
|
unet = unet.to(torch_device); |
|
|
|
pipe.load_textual_inversion("sd-concepts-library/madhubani-art") |
|
pipe.load_textual_inversion("sd-concepts-library/line-art") |
|
pipe.load_textual_inversion("sd-concepts-library/cat-toy") |
|
pipe.load_textual_inversion("sd-concepts-library/concept-art") |
|
|
|
def pil_to_latent(input_im): |
|
|
|
with torch.no_grad(): |
|
latent = vae.encode(tfms.ToTensor()(input_im).unsqueeze(0).to(torch_device)*2-1) |
|
return 0.18215 * latent.latent_dist.sample() |
|
|
|
def latents_to_pil(latents): |
|
|
|
latents = (1 / 0.18215) * latents |
|
with torch.no_grad(): |
|
image = vae.decode(latents).sample |
|
image = (image / 2 + 0.5).clamp(0, 1) |
|
image = image.detach().cpu().permute(0, 2, 3, 1).numpy() |
|
images = (image * 255).round().astype("uint8") |
|
pil_images = [Image.fromarray(image) for image in images] |
|
return pil_images |
|
|
|
|
|
def set_timesteps(scheduler, num_inference_steps): |
|
scheduler.set_timesteps(num_inference_steps) |
|
scheduler.timesteps = scheduler.timesteps.to(torch.float32) |
|
|
|
def saturation_loss(images): |
|
|
|
max_vals, _ = torch.max(images, dim=1, keepdim=True) |
|
min_vals, _ = torch.min(images, dim=1, keepdim=True) |
|
saturation = (max_vals - min_vals) / max_vals.clamp(min=1e-7) |
|
|
|
|
|
mean_saturation = torch.mean(saturation, dim=(2, 3)) |
|
|
|
|
|
|
|
|
|
return mean_saturation/10000 |
|
|
|
def generateImage(prompt, lossScale): |
|
|
|
height = 512 |
|
width = 512 |
|
num_inference_steps = 200 |
|
guidance_scale = 8 |
|
generator = torch.manual_seed(32) |
|
batch_size = 1 |
|
saturation_loss_Scale = lossScale |
|
|
|
|
|
text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt") |
|
with torch.no_grad(): |
|
text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0] |
|
|
|
|
|
max_length = text_input.input_ids.shape[-1] |
|
uncond_input = tokenizer( |
|
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" |
|
) |
|
with torch.no_grad(): |
|
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] |
|
text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) |
|
|
|
|
|
set_timesteps(scheduler, num_inference_steps) |
|
|
|
|
|
latents = torch.randn( |
|
(batch_size, unet.in_channels, height // 8, width // 8), |
|
generator=generator, |
|
) |
|
latents = latents.to(torch_device) |
|
latents = latents * scheduler.init_noise_sigma |
|
|
|
|
|
for i, t in tqdm(enumerate(scheduler.timesteps), total=len(scheduler.timesteps)): |
|
|
|
latent_model_input = torch.cat([latents] * 2) |
|
sigma = scheduler.sigmas[i] |
|
latent_model_input = scheduler.scale_model_input(latent_model_input, t) |
|
|
|
|
|
with torch.no_grad(): |
|
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"] |
|
|
|
|
|
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
|
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
|
|
|
|
|
if i%5 == 0: |
|
|
|
latents = latents.detach().requires_grad_() |
|
|
|
|
|
|
|
latents_x0 = scheduler.step(noise_pred, t, latents).pred_original_sample |
|
scheduler._step_index = scheduler._step_index - 1 |
|
|
|
|
|
denoised_images = vae.decode((1 / 0.18215) * latents_x0).sample / 2 + 0.5 |
|
|
|
|
|
loss = saturation_loss(denoised_images) * saturation_loss_Scale |
|
|
|
|
|
|
|
|
|
if i%10==0: |
|
print(i, 'loss:', loss.item()) |
|
|
|
|
|
cond_grad = torch.autograd.grad(loss, latents)[0] |
|
|
|
|
|
latents = latents.detach() - cond_grad * sigma**2 |
|
|
|
|
|
latents = scheduler.step(noise_pred, t, latents).prev_sample |
|
|
|
|
|
custom_loss_image = latents_to_pil(latents)[0] |
|
return custom_loss_image |
|
|
|
def inference(imgText, style, customLoss="no"): |
|
prompt = f'a {imgText} in <{style}> style' |
|
if (customLoss == "yes") : |
|
outImage = generateImage(prompt, 2) |
|
return outImage |
|
else: |
|
outImage = generateImage(prompt, 0) |
|
return outImage |
|
|
|
|
|
title = "TSAI S20 Assignment: Use a pretrained Sstable Diffusion model and give a demo on its workig" |
|
description = "A simple Gradio interface that accepts a text and style, and generated an image using stable diffusion pipeline" |
|
|
|
examples = [["puppy","cat-toy","yes"]] |
|
|
|
demo = gr.Interface( |
|
inference, |
|
inputs = [gr.Textbox("Enter an image you want to generate"), |
|
gr.Dropdown(["madhubani-art", "line-art", "cat-toy","concept-art"], label="Choose your style"), |
|
gr.Radio(["yes", "no"], label="Add custom saturation loss?") |
|
], |
|
outputs = [gr.Image(shape=(512, 512), label="Generated Image")], |
|
title = title, |
|
description = description, |
|
examples = examples, |
|
) |
|
demo.launch() |
|
|
|
|