import gradio as gr import numpy as np import random import spaces import torch import time from diffusers import DiffusionPipeline, AutoencoderTiny from custom_pipeline import FluxWithCFGPipeline # Constants MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = 2048 DEFAULT_WIDTH = 1024 DEFAULT_HEIGHT = 768 DEFAULT_INFERENCE_STEPS = 4 # Device and model setup dtype = torch.float16 pipe = FluxWithCFGPipeline.from_pretrained( "black-forest-labs/FLUX.1-schnell", torch_dtype=dtype ) pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype) # pipe.load_lora_weights("ostris/OpenFLUX.1", weight_name="openflux1-v0.1.0-fast-lora.safetensors", adapter_name="fast") # pipe.set_adapters("fast") # pipe.fuse_lora(adapter_names=["fast"], lora_scale=1.0) pipe.to("cuda") # pipe.transformer.to(memory_format=torch.channels_last) # pipe.transformer = torch.compile( # pipe.transformer, mode="max-autotune", fullgraph=True # ) torch.cuda.empty_cache() # Inference function @spaces.GPU(duration=25) def generate_image(prompt, seed=24, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, randomize_seed=False, num_inference_steps=2, progress=gr.Progress(track_tqdm=True)): if randomize_seed: seed = random.randint(0, MAX_SEED) generator = torch.Generator().manual_seed(int(float(seed))) start_time = time.time() # Only generate the last image in the sequence img = pipe.generate_images( prompt=prompt, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator ) latency = f"Latency: {(time.time()-start_time):.2f} seconds" return img, seed, latency # Example prompts examples = [ "sexy woman & man , under wear, full body, sunday", "A glamorous young woman with long, wavy blonde hair and smokey eye makeup, posing in a luxury hotel room. She’s wearing a sparkly gold cocktail dress and holding up a white card with 'Invite' written on it in elegant calligraphy. Soft, warm lighting creates a luxurious atmosphere. ", "A fit male fitness influencer with short dark hair and stubble, standing shirtless in a modern gym. He has defined abs and arm muscles, and is holding a protein shake in one hand and a card that says 'Invite' in the other. Bright, clean lighting highlights his physique.", "A bohemian-style female travel blogger with sun-kissed skin and messy beach waves, sitting on a tropical beach at sunset. She’s wearing a flowy white sundress and holding up a weathered postcard with 'Invite scrawled on it. Golden hour lighting bathes the scene in warm tones. ", "A trendy male fashion influencer with perfectly styled hair and designer stubble, posing on a city street. He’s wearing a tailored suit and holding up a sleek black business card with 'Invite' printed in minimalist white font. The background shows blurred city lights, creating a chic urban atmosphere.", "A fresh-faced young female beauty guru with freckles and natural makeup, sitting at a vanity covered in cosmetics. She’s wearing a pastel pink robe and holding up a makeup palette with 'Invite' written on it in lipstick. Soft, flattering lighting enhances her radiant complexion. ", "A stylish young woman with long, wavy ombre hair and winged eyeliner, posing in front of a neon-lit city skyline at night. She’s wearing a sleek black leather jacket over a sparkly crop top and holding up a holographic business card that says 'Invite' in futuristic font. The card reflects the colorful neon lights, creating a cyberpunk aesthetic.", ] css = """ footer { visibility: hidden; } """ # --- Gradio UI --- with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo: with gr.Column(elem_id="app-container"): gr.Markdown("# 🎨 FLUX 1.1 Pro") with gr.Row(): with gr.Column(scale=2.5): result = gr.Image(label="Generated Image", show_label=False, interactive=False) with gr.Column(scale=1): prompt = gr.Text( label="Prompt", placeholder="sexy woman & man , under wear, full body, sunday", lines=3, show_label=False, container=False, ) generateBtn = gr.Button("🖼️ Generate Image") enhanceBtn = gr.Button("🚀 Enhance Image") with gr.Column("Advanced Options"): with gr.Row(): realtime = gr.Checkbox(label="Realtime Toggler", info="If TRUE then uses more GPU but create image in realtime.", value=False) latency = gr.Text(label="Latency") with gr.Row(): seed = gr.Number(label="Seed", value=42) randomize_seed = gr.Checkbox(label="Randomize Seed", value=True) with gr.Row(): width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_WIDTH) height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_HEIGHT) num_inference_steps = gr.Slider(label="Inference Steps", minimum=1, maximum=4, step=1, value=DEFAULT_INFERENCE_STEPS) with gr.Row(): gr.Markdown("### 🌟 Inspiration Gallery") with gr.Row(): gr.Examples( examples=examples, fn=generate_image, inputs=[prompt], outputs=[result, seed, latency], cache_examples="lazy" ) enhanceBtn.click( fn=generate_image, inputs=[prompt, seed, width, height], outputs=[result, seed, latency], show_progress="full", queue=False, concurrency_limit=None ) generateBtn.click( fn=generate_image, inputs=[prompt, seed, width, height, randomize_seed, num_inference_steps], outputs=[result, seed, latency], show_progress="full", api_name="RealtimeFlux", queue=False ) def update_ui(realtime_enabled): return { prompt: gr.update(interactive=True), generateBtn: gr.update(visible=not realtime_enabled) } realtime.change( fn=update_ui, inputs=[realtime], outputs=[prompt, generateBtn], queue=False, concurrency_limit=None ) def realtime_generation(*args): if args[0]: # If realtime is enabled return next(generate_image(*args[1:])) prompt.submit( fn=generate_image, inputs=[prompt, seed, width, height, randomize_seed, num_inference_steps], outputs=[result, seed, latency], show_progress="full", queue=False, concurrency_limit=None ) for component in [prompt, width, height, num_inference_steps]: component.input( fn=realtime_generation, inputs=[realtime, prompt, seed, width, height, randomize_seed, num_inference_steps], outputs=[result, seed, latency], show_progress="hidden", trigger_mode="always_last", queue=False, concurrency_limit=None ) # Launch the app demo.launch()