|
from PIL import Image |
|
import torch |
|
import re |
|
import gradio as gr |
|
import random |
|
import time |
|
from diffusers import AutoPipelineForText2Image |
|
from diffusers import AutoPipelineForImage2Image |
|
|
|
from diffusers.utils import load_image |
|
pipeline_text2image = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo").to("cuda") |
|
pipeline_image2image = AutoPipelineForImage2Image.from_pipe(pipeline_text2image).to("cuda") |
|
|
|
def text2img(prompt = "A cinematic shot of a baby racoon wearing an intricate italian priest robe.",guidance_scale=0.0, num_inference_steps=1): |
|
image = pipeline_text2image(prompt=prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] |
|
return image |
|
|
|
def img2img(image,prompt="A cinematic shot of a baby racoon wearing an intricate italian priest robe.", guidance_scale=0.0, num_inference_steps=1,strength=0.5): |
|
|
|
init_image = load_image(image) |
|
init_image = init_image.resize((512, 512)) |
|
image = pipeline_image2image(prompt, image=init_image, strength=strength, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] |
|
return image |
|
|
|
gradio_app_text2img = gr.Interface( |
|
fn=text2img, |
|
inputs=[ |
|
gr.Text(), |
|
gr.Slider(0.0, 2.0, value=1,step=0.1), |
|
gr.Slider(2.0, 20.0, value=1,step=1) |
|
], |
|
outputs="image", |
|
) |
|
|
|
gradio_app_img2img = gr.Interface( |
|
fn=img2img, |
|
inputs=[ |
|
gr.Image(type='filepath'), |
|
gr.Text(), |
|
gr.Slider(0.0, 2.0, value=1,step=0.1), |
|
gr.Slider(2, 20.0, value=1,step=1), |
|
gr.Slider(0.0, 1.0, value=0.5,step=0.05), |
|
], |
|
outputs="image", |
|
) |
|
|
|
|
|
|
|
demo = gr.TabbedInterface([gradio_app_text2img,gradio_app_img2img], ["text2img","img2img"]) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |