lcm-lora-ssd-1b / app.py
jbilcke-hf's picture
jbilcke-hf HF staff
Update app.py
16c8f31
raw
history blame
5.02 kB
#!/usr/bin/env python
import os
import random
import gradio as gr
import numpy as np
import PIL.Image
import torch
from diffusers import DiffusionPipeline, UNet2DConditionModel, LCMScheduler
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = int(os.getenv('MAX_IMAGE_SIZE', '1024'))
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
if torch.cuda.is_available():
unet = UNet2DConditionModel.from_pretrained(
"latent-consistency/lcm-ssd-1b",
torch_dtype=torch.float16,
variant="fp16"
)
pipe = DiffusionPipeline.from_pretrained(
"segmind/SSD-1B",
unet=unet,
torch_dtype=torch.float16,
variant="fp16"
)
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
pipe.to(device)
else:
pipe = None
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
if randomize_seed:
seed = random.randint(0, MAX_SEED)
return seed
def generate(prompt: str,
negative_prompt: str = '',
use_negative_prompt: bool = False,
seed: int = 0,
width: int = 1024,
height: int = 1024,
guidance_scale: float = 1.0,
num_inference_steps: int = 6) -> PIL.Image.Image:
generator = torch.Generator().manual_seed(seed)
if not use_negative_prompt:
negative_prompt = None # type: ignore
return pipe(prompt=prompt,
negative_prompt=negative_prompt,
width=width,
height=height,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
generator=generator,
output_type='pil').images[0]
with gr.Blocks() as demo:
with gr.Box():
with gr.Row():
prompt = gr.Text(
label='Prompt',
show_label=False,
max_lines=1,
placeholder='Enter your prompt',
container=False,
)
run_button = gr.Button('Run', scale=0)
result = gr.Image(label='Result', show_label=False)
with gr.Accordion('Advanced options', open=False):
with gr.Row():
use_negative_prompt = gr.Checkbox(label='Use negative prompt',
value=False)
negative_prompt = gr.Text(
label='Negative prompt',
max_lines=1,
placeholder='Enter a negative prompt',
visible=False,
)
seed = gr.Slider(label='Seed',
minimum=0,
maximum=MAX_SEED,
step=1,
value=0)
randomize_seed = gr.Checkbox(label='Randomize seed', value=True)
with gr.Row():
width = gr.Slider(
label='Width',
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
)
height = gr.Slider(
label='Height',
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
)
with gr.Row():
guidance_scale = gr.Slider(
label='Guidance scale',
minimum=1,
maximum=20,
step=0.1,
value=5.0)
num_inference_steps = gr.Slider(
label='Number of inference steps',
minimum=2,
maximum=50,
step=1,
value=6)
use_negative_prompt.change(
fn=lambda x: gr.update(visible=x),
inputs=use_negative_prompt,
outputs=negative_prompt,
queue=False,
api_name=False,
)
inputs = [
prompt,
negative_prompt,
use_negative_prompt,
seed,
width,
height,
guidance_scale,
num_inference_steps,
]
prompt.submit(
fn=randomize_seed_fn,
inputs=[seed, randomize_seed],
outputs=seed,
queue=False,
api_name=False,
).then(
fn=generate,
inputs=inputs,
outputs=result,
api_name='run',
)
negative_prompt.submit(
fn=randomize_seed_fn,
inputs=[seed, randomize_seed],
outputs=seed,
queue=False,
api_name=False,
).then(
fn=generate,
inputs=inputs,
outputs=result,
api_name=False,
)
run_button.click(
fn=randomize_seed_fn,
inputs=[seed, randomize_seed],
outputs=seed,
queue=False,
api_name=False,
).then(
fn=generate,
inputs=inputs,
outputs=result,
api_name=False,
)
demo.queue(max_size=6).launch()