Spaces:
Runtime error
Runtime error
import gradio as gr | |
import torch | |
from PIL import Image | |
import numpy as np | |
from diffusers import StableDiffusionDepth2ImgPipeline | |
from pathlib import Path | |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
dept2img = StableDiffusionDepth2ImgPipeline.from_pretrained( | |
"stabilityai/stable-diffusion-2-depth", | |
torch_dtype=torch.float16, | |
).to(device) | |
def pad_image(input_image): | |
pad_w, pad_h = np.max(((2, 2), np.ceil( | |
np.array(input_image.size) / 64).astype(int)), axis=0) * 64 - input_image.size | |
im_padded = Image.fromarray( | |
np.pad(np.array(input_image), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge')) | |
w, h = im_padded.size | |
if w == h: | |
return im_padded | |
elif w > h: | |
new_image = Image.new(im_padded.mode, (w, w), (0, 0, 0)) | |
new_image.paste(im_padded, (0, (w - h) // 2)) | |
return new_image | |
else: | |
new_image = Image.new(im_padded.mode, (h, h), (0, 0, 0)) | |
new_image.paste(im_padded, ((h - w) // 2, 0)) | |
return new_image | |
def predict(input_image, prompt, negative_prompt, steps, num_samples, scale, seed, strength, depth_image=None): | |
depth = None | |
if depth_image is not None: | |
depth_image = pad_image(depth_image) | |
depth_image = depth_image.resize((512, 512)) | |
depth = np.array(depth_image.convert("L")) | |
depth = np.expand_dims(depth, 0) | |
depth = depth.astype(np.float32) / 255.0 | |
depth = torch.from_numpy(depth) | |
init_image = input_image.convert("RGB") | |
image = pad_image(init_image) # resize to integer multiple of 32 | |
image = image.resize((512, 512)) | |
generator = None | |
if seed is not None: | |
generator = torch.Generator(device=device).manual_seed(seed) | |
result = dept2img( | |
image=image, | |
prompt=prompt, | |
negative_prompt=negative_prompt, | |
generator=generator, | |
depth_map=depth, | |
strength=strength, | |
num_inference_steps=steps, | |
guidance_scale=scale, | |
num_images_per_prompt=num_samples, | |
) | |
return result['images'] | |
block = gr.Blocks().queue() | |
with block: | |
with gr.Row(): | |
with gr.Column(): | |
gr.Markdown("## Stable Diffusion 2 Depth2Img") | |
gr.HTML("<p><a href='https://huggingface.co/spaces/radames/stable-diffusion-depth2img?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14' alt='Duplicate Space'></a></p>") | |
with gr.Row(): | |
with gr.Column(): | |
input_image = gr.Image(type="pil") | |
depth_image = gr.Image(type="pil", label="Depth Image Optional") | |
prompt = gr.Textbox(label="Prompt") | |
negative_prompt = gr.Textbox(label="Negative Prompt") | |
run_button = gr.Button("Run") | |
with gr.Accordion("Advanced Options", open=False): | |
num_samples = gr.Slider( | |
label="Images", minimum=1, maximum=4, value=1, step=1) | |
steps = gr.Slider(label="Steps", minimum=1, | |
maximum=50, value=50, step=1) | |
scale = gr.Slider( | |
label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1 | |
) | |
strength = gr.Slider( | |
label="Strength", minimum=0.0, maximum=1.0, value=0.9, step=0.01 | |
) | |
seed = gr.Slider( | |
label="Seed", | |
minimum=0, | |
maximum=2147483647, | |
step=1, | |
randomize=True, | |
) | |
with gr.Column(): | |
gallery = gr.Gallery(label="Generated Images", show_label=False).style( | |
grid=[2], height="auto") | |
gr.Examples( | |
examples=[ | |
["./examples/baby.jpg", "high definition photo of a baby astronaut space walking at the international space station with earth seeing from above in the background", | |
"", 50, 4, 9.0, 123123123, 0.8, None], | |
["./examples/gol.jpg", "professional photo of a Elmo jumping between two high rises, beautiful colorful city landscape in the background", | |
"", 50, 4, 9.0, 1734133747, 0.9, None], | |
["./examples/bag.jpg", "a photo of a bag of cookies in the bathroom", "low light, dark, blurry", 50, 4, 9.0, 1734133747, 0.9, "./examples/depth.jpg"], | |
["./examples/smile_face.jpg", "a hand holding a very spherical orange", "low light, dark, blurry", 50, 4, 6.0, 961736534, 0.5, "./examples/smile_depth.jpg"] | |
], | |
inputs=[input_image, prompt, negative_prompt, steps, | |
num_samples, scale, seed, strength, depth_image], | |
outputs=[gallery], | |
fn=predict, | |
cache_examples=True, | |
) | |
run_button.click(fn=predict, inputs=[input_image, prompt, negative_prompt, | |
steps, num_samples, scale, seed, strength, depth_image], outputs=[gallery]) | |
block.launch(show_api=False) |