Spaces:
Runtime error
Runtime error
import spaces | |
import gradio as gr | |
from gradio_imageslider import ImageSlider | |
import torch | |
from diffusers import DiffusionPipeline, AutoencoderKL, ControlNetModel | |
from compel import Compel, ReturnedEmbeddingsType | |
from PIL import Image | |
from torchvision import transforms | |
import tempfile | |
import os | |
import time | |
import uuid | |
import cv2 | |
import numpy as np | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
dtype = torch.float16 | |
LOW_MEMORY = os.getenv("LOW_MEMORY", "0") == "1" | |
print(f"device: {device}") | |
print(f"dtype: {dtype}") | |
print(f"low memory: {LOW_MEMORY}") | |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=dtype) | |
controlnet = ControlNetModel.from_pretrained( | |
"diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16 | |
) | |
pipe = DiffusionPipeline.from_pretrained( | |
"stabilityai/stable-diffusion-xl-base-1.0", | |
custom_pipeline="pipeline_demofusion_sdxl_controlnet.py", | |
controlnet=controlnet, | |
custom_revision="main", | |
torch_dtype=dtype, | |
variant="fp16", | |
use_safetensors=True, | |
vae=vae, | |
) | |
compel = Compel( | |
tokenizer=[pipe.tokenizer, pipe.tokenizer_2], | |
text_encoder=[pipe.text_encoder, pipe.text_encoder_2], | |
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, | |
requires_pooled=[False, True], | |
) | |
pipe = pipe.to(device) | |
def load_and_process_image(pil_image): | |
transform = transforms.Compose( | |
[ | |
transforms.Resize((1024, 1024)), | |
transforms.ToTensor(), | |
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), | |
] | |
) | |
image = transform(pil_image) | |
image = image.unsqueeze(0).half() | |
return image | |
def pad_image(image): | |
w, h = image.size | |
if w == h: | |
return image | |
elif w > h: | |
new_image = Image.new(image.mode, (w, w), (0, 0, 0)) | |
pad_w = 0 | |
pad_h = (w - h) // 2 | |
new_image.paste(image, (0, pad_h)) | |
return new_image | |
else: | |
new_image = Image.new(image.mode, (h, h), (0, 0, 0)) | |
pad_w = (h - w) // 2 | |
pad_h = 0 | |
new_image.paste(image, (pad_w, 0)) | |
return new_image | |
def predict( | |
input_image, | |
prompt, | |
negative_prompt, | |
seed, | |
controlnet_conditioning_scale, | |
guidance_scale=8.5, | |
cosine_scale_1=3, | |
cosine_scale_2=1, | |
cosine_scale_3=1, | |
sigma=0.8, | |
scale=2, | |
progress=gr.Progress(track_tqdm=True), | |
): | |
if input_image is None: | |
raise gr.Error("Please upload an image.") | |
padded_image = pad_image(input_image).resize((1024, 1024)).convert("RGB") | |
image_lr = load_and_process_image(padded_image).to(device) | |
conditioning, pooled = compel([prompt, negative_prompt]) | |
generator = torch.manual_seed(seed) | |
last_time = time.time() | |
canny_image = np.array(padded_image) | |
canny_image = cv2.Canny(canny_image, 100, 200) | |
canny_image = canny_image[:, :, None] | |
canny_image = np.concatenate([canny_image, canny_image, canny_image], axis=2) | |
canny_image = Image.fromarray(canny_image) | |
images = pipe( | |
prompt_embeds=conditioning[0:1], | |
pooled_prompt_embeds=pooled[0:1], | |
negative_prompt_embeds=conditioning[1:2], | |
negative_pooled_prompt_embeds=pooled[1:2], | |
image_lr=image_lr, | |
width=1024 * scale, | |
height=1024 * scale, | |
view_batch_size=16, | |
controlnet_conditioning_scale=controlnet_conditioning_scale, | |
condition_image=canny_image, | |
stride=64, | |
generator=generator, | |
num_inference_steps=40, | |
guidance_scale=guidance_scale, | |
cosine_scale_1=cosine_scale_1, | |
cosine_scale_2=cosine_scale_2, | |
cosine_scale_3=cosine_scale_3, | |
sigma=sigma, | |
multi_decoder=1024 * scale > 2048, | |
show_image=False, | |
lowvram=LOW_MEMORY, | |
) | |
print(f"Time taken: {time.time() - last_time}") | |
images_path = tempfile.mkdtemp() | |
paths = [] | |
uuid_name = uuid.uuid4() | |
for i, img in enumerate(images): | |
img.save(images_path + f"/img_{uuid_name}_{img.size[0]}.jpg") | |
paths.append(images_path + f"/img_{uuid_name}_{img.size[0]}.jpg") | |
return (images[0], images[-1]), paths | |
css = """ | |
#intro{ | |
max-width: 32rem; | |
text-align: center; | |
margin: 0 auto; | |
} | |
""" | |
with gr.Blocks(css=css) as demo: | |
gr.Markdown( | |
""" | |
# Enhance This | |
### DemoFusion SDXL | |
[DemoFusion](https://ruoyidu.github.io/demofusion/demofusion.html) enables higher-resolution image generation. | |
You can upload an initial image and prompt to generate an enhanced version. | |
[Duplicate Space](https://huggingface.co/spaces/radames/Enhance-This-DemoFusion-SDXL?duplicate=true) to avoid the queue. | |
GPU Time Comparison: T4: ~276s - A10G: ~113.6s A100: ~43.5s RTX 4090: ~48.1s | |
<small> | |
<b>Notes</b> The author advises against the term "super resolution" because it's more like image-to-image generation than enhancement, but it's still a lot of fun! | |
</small> | |
""", | |
elem_id="intro", | |
) | |
with gr.Row(): | |
with gr.Column(scale=1): | |
image_input = gr.Image(type="pil", label="Input Image") | |
prompt = gr.Textbox( | |
label="Prompt", | |
info="The prompt is very important to get the desired results. Please try to describe the image as best as you can. Accepts Compel Syntax", | |
) | |
negative_prompt = gr.Textbox( | |
label="Negative Prompt", | |
value="blurry, ugly, duplicate, poorly drawn, deformed, mosaic", | |
) | |
seed = gr.Slider( | |
minimum=0, | |
maximum=2**64 - 1, | |
value=1415926535897932, | |
step=1, | |
label="Seed", | |
randomize=True, | |
) | |
with gr.Accordion(label="DemoFusion Params", open=False): | |
guidance_scale = gr.Slider( | |
minimum=0, | |
maximum=50, | |
value=8.5, | |
step=0.001, | |
label="Guidance Scale", | |
) | |
scale = gr.Slider( | |
minimum=1, | |
maximum=5, | |
value=2, | |
step=1, | |
label="Magnification Scale", | |
interactive=False, | |
) | |
cosine_scale_1 = gr.Slider( | |
minimum=0, | |
maximum=5, | |
value=3, | |
step=0.01, | |
label="Cosine Scale 1", | |
) | |
cosine_scale_2 = gr.Slider( | |
minimum=0, | |
maximum=5, | |
value=1, | |
step=0.01, | |
label="Cosine Scale 2", | |
) | |
cosine_scale_3 = gr.Slider( | |
minimum=0, | |
maximum=5, | |
value=1, | |
step=0.01, | |
label="Cosine Scale 3", | |
) | |
sigma = gr.Slider( | |
minimum=0, | |
maximum=1, | |
value=0.8, | |
step=0.01, | |
label="Sigma", | |
) | |
with gr.Accordion(label="ControlNet Params", open=False): | |
controlnet_conditioning_scale = gr.Slider( | |
minimum=0, | |
maximum=1, | |
step=0.001, | |
value=0.5, | |
label="ControlNet Conditioning Scale", | |
) | |
controlnet_start = gr.Slider( | |
minimum=0, | |
maximum=1, | |
step=0.001, | |
value=0.0, | |
label="ControlNet Start", | |
) | |
controlnet_end = gr.Slider( | |
minimum=0.0, | |
maximum=1.0, | |
step=0.001, | |
value=1.0, | |
label="ControlNet End", | |
) | |
btn = gr.Button() | |
with gr.Column(scale=2): | |
image_slider = ImageSlider(position=0.5) | |
files = gr.Files() | |
inputs = [ | |
image_input, | |
prompt, | |
negative_prompt, | |
seed, | |
controlnet_conditioning_scale, | |
guidance_scale, | |
cosine_scale_1, | |
cosine_scale_2, | |
cosine_scale_3, | |
sigma, | |
# scale, | |
] | |
outputs = [image_slider, files] | |
btn.click(predict, inputs=inputs, outputs=outputs, concurrency_limit=1) | |
gr.Examples( | |
fn=predict, | |
examples=[ | |
[ | |
"./examples/lara.jpeg", | |
"photography of lara croft 8k high definition award winning", | |
"blurry, ugly, duplicate, poorly drawn, deformed, mosaic", | |
5436236241, | |
0.5, | |
8.5, | |
3, | |
1, | |
1, | |
0.8, | |
2, | |
], | |
[ | |
"./examples/cybetruck.jpeg", | |
"photo of tesla cybertruck futuristic car 8k high definition on a sand dune in mars, future", | |
"blurry, ugly, duplicate, poorly drawn, deformed, mosaic", | |
383472451451, | |
0.5, | |
8.5, | |
3, | |
1, | |
1, | |
0.8, | |
2, | |
], | |
[ | |
"./examples/jesus.png", | |
"a photorealistic painting of Jesus Christ, 4k high definition", | |
"blurry, ugly, duplicate, poorly drawn, deformed, mosaic", | |
13317204146129588000, | |
0.5, | |
8.5, | |
3, | |
1, | |
1, | |
0.8, | |
2, | |
], | |
[ | |
"./examples/anna-sullivan-DioLM8ViiO8-unsplash.jpg", | |
"A crowded stadium with enthusiastic fans watching a daytime sporting event, the stands filled with colorful attire and the sun casting a warm glow", | |
"blurry, ugly, duplicate, poorly drawn, deformed, mosaic", | |
5623124123512, | |
0.5, | |
8.5, | |
3, | |
1, | |
1, | |
0.8, | |
2, | |
], | |
[ | |
"./examples/img_aef651cb-2919-499d-aa49-6d4e2e21a56e_1024.jpg", | |
"a large red flower on a black background 4k high definition", | |
"blurry, ugly, duplicate, poorly drawn, deformed, mosaic", | |
23123412341234, | |
0.5, | |
8.5, | |
3, | |
1, | |
1, | |
0.8, | |
2, | |
], | |
[ | |
"./examples/huggingface.jpg", | |
"photo realistic huggingface human+++ emoji costume, round, yellow, skin+++ texture+++", | |
"blurry, ugly, duplicate, poorly drawn, deformed, mosaic, emoji cartoon, drawing, pixelated", | |
5532144938416372000, | |
0.101, | |
25.206, | |
4.64, | |
1, | |
1, | |
0.49, | |
3, | |
], | |
], | |
inputs=inputs, | |
outputs=outputs, | |
cache_examples=True, | |
) | |
demo.queue(api_open=False) | |
demo.launch(show_api=False) | |