flux-lightning / app.py
Jordan Legg
return to normal
51e970b
raw
history blame
1.93 kB
import gradio as gr
import numpy as np
import random
import spaces
import torch
from diffusers import DiffusionPipeline
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype).to(device)
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 2048
@spaces.GPU()
def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
image = pipe(
prompt = prompt,
width = width,
height = height,
num_inference_steps = num_inference_steps,
generator = generator,
guidance_scale=0.0
).images[0]
return image, seed
# Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# FLUX.1 [schnell] Image Generator")
with gr.Row():
with gr.Column():
prompt = gr.Textbox(label="Prompt")
run_button = gr.Button("Generate")
with gr.Column():
result = gr.Image(label="Generated Image")
with gr.Accordion("Advanced Settings", open=False):
seed = gr.Slider(minimum=0, maximum=MAX_SEED, step=1, label="Seed", randomize=True)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
width = gr.Slider(minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024, label="Width")
height = gr.Slider(minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024, label="Height")
num_inference_steps = gr.Slider(minimum=1, maximum=50, step=1, value=4, label="Number of inference steps")
run_button.click(
infer,
inputs=[prompt, seed, randomize_seed, width, height, num_inference_steps],
outputs=[result, seed]
)
demo.launch()