Spaces:
Running
Running
File size: 5,132 Bytes
ef187eb f14baf4 0cffd40 d0f928e ef187eb 11fa80e 63b6eaf f14baf4 2b0f02c 11fa80e 0cffd40 8b1e96d 3c4c016 35aa514 f14baf4 8b1e96d ec35e66 4efab5c ec35e66 4efab5c ce19625 35aa514 ce19625 4efab5c 8b1e96d d0f928e f14baf4 ce19625 927c70f 8b1e96d d0f928e ce19625 f4107e3 ce19625 f14baf4 ce19625 927c70f 9b38787 f14baf4 3a2b9b2 8b1e96d ce19625 f14baf4 11fa80e ce19625 f14baf4 ce19625 f14baf4 927c70f f14baf4 0cffd40 8b3ca8d 0cffd40 8b1e96d 0cffd40 4efab5c b6d7387 f14baf4 8b1e96d 0cffd40 f14baf4 8b1e96d f14baf4 ce19625 f14baf4 ce19625 f14baf4 ce19625 927c70f ce19625 927c70f ce19625 927c70f f14baf4 8b3ca8d f4107e3 8b3ca8d fe16630 8b3ca8d 8b1e96d f14baf4 8b1e96d f14baf4 8b1e96d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
import gradio as gr
import torch
from diffusers import StableDiffusionXLPipeline, AutoencoderKL, KDPM2AncestralDiscreteScheduler, UNet2DConditionModel
from huggingface_hub import hf_hub_download
import spaces
from PIL import Image
import requests
from translatepy import Translator
import numpy as np
import random
translator = Translator()
# Constants
model = "Corcelio/mobius"
vae_model = "madebyollin/sdxl-vae-fp16-fix"
MAX_SEED = np.iinfo(np.int32).max
CSS = """
.gradio-container {
max-width: 690px !important;
}
footer {
visibility: hidden;
}
"""
JS = """function () {
gradioURL = window.location.href
if (!gradioURL.endsWith('?__theme=dark')) {
window.location.replace(gradioURL + '?__theme=dark');
}
}"""
# Load VAE component
vae = AutoencoderKL.from_pretrained(
vae_model,
torch_dtype=torch.float16
)
# Ensure model and scheduler are initialized in GPU-enabled function
if torch.cuda.is_available():
unet = UNet2DConditionModel.from_pretrained(model, subfolder="unet").to("cuda", torch.float16)
pipe = StableDiffusionXLPipeline.from_pretrained(model, vae=vae, unet=unet, torch_dtype=torch.float16).to("cuda")
pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(pipe.scheduler.config)
# Function
@spaces.GPU()
def generate_image(
prompt,
negative="low quality",
width=1024,
height=1024,
seed=-1,
nums=1,
scale=1.5,
steps=30,
clip=3):
if seed == -1:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
prompt = str(translator.translate(prompt, 'English'))
print(f'prompt:{prompt}')
image = pipe(
prompt,
negative_prompt=negative,
width=width,
height=height,
guidance_scale=scale,
generator = generator,
num_inference_steps=steps,
num_images_per_prompt=nums,
clip_skip=clip,
).images
return image, seed
examples = [
"a cat eating a piece of cheese",
"a ROBOT riding a BLUE horse on Mars, photorealistic",
"Ironman VS Hulk, ultrarealistic",
"a CUTE robot artist painting on an easel",
"Astronaut in a jungle, cold color palette, oil pastel, detailed, 8k",
"An alien holding sign board contain word 'Flash', futuristic, neonpunk",
"Kids going to school, Anime style"
]
# Gradio Interface
with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
gr.HTML("<h1><center>Mobius💠</center></h1>")
gr.HTML("<p><center><a href='https://huggingface.co/Corcelio/mobius'>mobius</a> text-to-image generation</center><br><center>Adding default prompts to enhance.</center></p>")
with gr.Group():
with gr.Row():
prompt = gr.Textbox(label='Enter Your Prompt(Multi-Languages)', value="best quality, HD, aesthetic", scale=6)
submit = gr.Button(scale=1, variant='primary')
img = gr.Gallery(label='Mobius Generated Image',columns = 1, preview=True)
with gr.Accordion("Advanced Options", open=False):
with gr.Row():
negative = gr.Textbox(label="Negative prompt", value="low quality, ugly, blurry, poor face, bad anatomy")
with gr.Row():
width = gr.Slider(
label="Width",
minimum=512,
maximum=1280,
step=8,
value=1024,
)
height = gr.Slider(
label="Height",
minimum=512,
maximum=1280,
step=8,
value=1024,
)
with gr.Row():
seed = gr.Slider(
label="Seed (-1 Get Random)",
minimum=-1,
maximum=MAX_SEED,
step=1,
value=-1,
scale=2,
)
nums = gr.Slider(
label="Image Numbers",
minimum=1,
maximum=4,
step=1,
value=1,
scale=1,
)
with gr.Row():
scale = gr.Slider(
label="Guidance",
minimum=3.5,
maximum=7,
step=0.1,
value=7,
)
steps = gr.Slider(
label="Steps",
minimum=1,
maximum=50,
step=1,
value=50,
)
clip = gr.Slider(
label="Clip Skip",
minimum=1,
maximum=10,
step=1,
value=3,
)
gr.Examples(
examples=examples,
inputs=prompt,
outputs=img,
fn=generate_image,
cache_examples="lazy",
)
prompt.submit(fn=generate_image,
inputs=[prompt, negative, width, height, seed, nums, scale, steps, clip],
outputs=img,
)
submit.click(fn=generate_image,
inputs=[prompt, negative, width, height, seed, nums, scale, steps, clip],
outputs=img,
)
demo.queue().launch() |