File size: 1,772 Bytes
c259297
 
 
18fa0fe
3022b1a
54e47f4
c259297
a645f56
6d8c1e9
172c71b
63399bb
 
c259297
6be7f4d
6d8c1e9
c259297
 
64cfc02
63399bb
a645f56
 
 
 
 
c259297
 
 
 
d96f817
97ddacd
c259297
 
 
 
a645f56
8d51740
a645f56
 
9397616
 
97ddacd
d96f817
a645f56
 
 
c259297
eea9420
172c71b
 
c259297
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
from PIL import Image
import torch
import re
import gradio as gr
import random
import time
from diffusers import AutoPipelineForText2Image
from diffusers import AutoPipelineForImage2Image

from diffusers.utils import load_image
pipeline_text2image = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo").to("cuda")
pipeline_image2image = AutoPipelineForImage2Image.from_pipe(pipeline_text2image).to("cuda")

def text2img(prompt = "A cinematic shot of a baby racoon wearing an intricate italian priest robe.",guidance_scale=0.0, num_inference_steps=1):
    image = pipeline_text2image(prompt=prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]
    return image

def img2img(image,prompt="A cinematic shot of a baby racoon wearing an intricate italian priest robe.", guidance_scale=0.0, num_inference_steps=1,strength=0.5):

    init_image = load_image(image)
    init_image = init_image.resize((512, 512))
    image = pipeline_image2image(prompt, image=init_image, strength=strength, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]
    return image

gradio_app_text2img = gr.Interface(
    fn=text2img,
    inputs=[
        gr.Text(),
        gr.Slider(0.0, 2.0, value=1,step=0.1),
        gr.Slider(2.0, 20.0, value=1,step=1)
    ],
    outputs="image",
)

gradio_app_img2img = gr.Interface(
    fn=img2img,
    inputs=[
        gr.Image(type='filepath'),
        gr.Text(),
        gr.Slider(0.0, 2.0, value=1,step=0.1),
        gr.Slider(2, 20.0, value=1,step=1),
        gr.Slider(0.0, 1.0, value=0.5,step=0.05),
    ],
    outputs="image",
)



demo = gr.TabbedInterface([gradio_app_text2img,gradio_app_img2img], ["text2img","img2img"])

if __name__ == "__main__":
    demo.launch()