Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,957 Bytes
453ed2e a29e3ba 9ad92f4 453ed2e 9ad92f4 d58d62b 453ed2e a29e3ba 9ad92f4 01e1199 453ed2e 9ad92f4 453ed2e a29e3ba 453ed2e 9ad92f4 a29e3ba 453ed2e 811e3ea 453ed2e 01e1199 453ed2e 9ad92f4 453ed2e a29e3ba 9ad92f4 a29e3ba 9ad92f4 a29e3ba 453ed2e a29e3ba 453ed2e 9ad92f4 453ed2e 9ad92f4 a02d083 9ad92f4 453ed2e 9ad92f4 8963f5c 9ad92f4 8963f5c 453ed2e a02d083 9ad92f4 0277b1d 453ed2e 9ad92f4 453ed2e 9ad92f4 453ed2e 9ad92f4 453ed2e fa07c02 453ed2e 9ad92f4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
import torch
import os
import gradio as gr
from PIL import Image
from diffusers import (
DiffusionPipeline,
StableDiffusionControlNetPipeline,
ControlNetModel,
StableDiffusionLatentUpscalePipeline,
DPMSolverMultistepScheduler, # <-- Added import
EulerDiscreteScheduler # <-- Added import
)
# Initialize both pipelines
init_pipe = DiffusionPipeline.from_pretrained("SG161222/Realistic_Vision_V2.0", torch_dtype=torch.float16).to("cuda")
controlnet = ControlNetModel.from_pretrained("monster-labs/control_v1p_sd15_qrcode_monster", torch_dtype=torch.float16)
main_pipe = StableDiffusionControlNetPipeline.from_pretrained(
"SG161222/Realistic_Vision_V2.0",
controlnet=controlnet,
safety_checker=None,
torch_dtype=torch.float16,
).to("cuda")
model_id = "stabilityai/sd-x2-latent-upscaler"
upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16)
upscaler.to("cuda")
# Sampler map
SAMPLER_MAP = {
"DPM++ Karras SDE": lambda config: DPMSolverMultistepScheduler.from_config(config, use_karras=True, algorithm_type="sde-dpmsolver++"),
"Euler": lambda config: EulerDiscreteScheduler.from_config(config),
}
def center_crop_resize(img, output_size=(512, 512)):
width, height = img.size
# Calculate dimensions to crop to the center
new_dimension = min(width, height)
left = (width - new_dimension)/2
top = (height - new_dimension)/2
right = (width + new_dimension)/2
bottom = (height + new_dimension)/2
# Crop and resize
img = img.crop((left, top, right, bottom))
img = img.resize(output_size)
return img
# Inference function
def inference(
control_image: Image.Image,
prompt: str,
negative_prompt: str,
guidance_scale: float = 8.0,
controlnet_conditioning_scale: float = 1,
seed: int = -1,
sampler = "DPM++ Karras SDE",
progress = gr.Progress(track_tqdm=True)
):
if prompt is None or prompt == "":
raise gr.Error("Prompt is required")
# Generate the initial image
#init_image = init_pipe(prompt).images[0]
# Rest of your existing code
control_image = center_crop_resize(control_image)
main_pipe.scheduler = SAMPLER_MAP[sampler](main_pipe.scheduler.config)
generator = torch.manual_seed(seed) if seed != -1 else torch.Generator()
out = main_pipe(
prompt=prompt,
negative_prompt=negative_prompt,
image=control_image,
#control_image=control_image,
guidance_scale=float(guidance_scale),
controlnet_conditioning_scale=float(controlnet_conditioning_scale),
generator=generator,
#strength=strength,
num_inference_steps=30,
#output_type="latent"
).images[0]
return out
with gr.Blocks() as app:
gr.Markdown(
'''
<center><h1>Illusion Diffusion 🌀</h1></span>
<span font-size:16px;">Generate stunning illusion artwork with Stable Diffusion</span>
<span font-size:10px;">A space by AP [Follow me on Twitter](https://twitter.com/angrypenguinPNG)</span>
</center>
This project works by using the QR Control Net by Monster Labs: [Monster Labs QR Control Net](https://huggingface.co/monster-labs/control_v1p_sd15_qrcode_monster).
Given a prompt and your pattern, we use a QR code conditioned controlnet to create a stunning illusion! Credit to: MrUgleh (https://twitter.com/MrUgleh) for discovering the workflow :)
'''
)
with gr.Row():
with gr.Column():
control_image = gr.Image(label="Input Illusion", type="pil")
controlnet_conditioning_scale = gr.Slider(minimum=0.0, maximum=5.0, step=0.01, value=0.8, label="Illusion strength", info="ControlNet conditioning scale")
gr.Examples(examples=["checkers.png", "pattern.png", "spiral.jpeg"], inputs=control_image)
prompt = gr.Textbox(label="Prompt")
negative_prompt = gr.Textbox(label="Negative Prompt", value="ugly, disfigured, low quality, blurry, nsfw")
with gr.Accordion(label="Advanced Options", open=False):
#strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.9, label="Strength")
guidance_scale = gr.Slider(minimum=0.0, maximum=50.0, step=0.25, value=7.5, label="Guidance Scale")
sampler = gr.Dropdown(choices=list(SAMPLER_MAP.keys()), value="Euler")
seed = gr.Slider(minimum=-1, maximum=9999999999, step=1, value=2313123, label="Seed", randomize=True)
run_btn = gr.Button("Run")
with gr.Column():
result_image = gr.Image(label="Illusion Diffusion Output")
run_btn.click(
inference,
inputs=[control_image, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, seed, sampler],
outputs=[result_image]
)
app.queue(max_size=20)
if __name__ == "__main__":
app.launch() |