import gradio as gr import torch import numpy as np import diffusers import os import random from PIL import Image hf_token = os.environ.get("HF_TOKEN") from diffusers import AutoPipelineForText2Image device = "cuda" # if torch.cuda.is_available() else "cpu" pipe = AutoPipelineForText2Image.from_pretrained("briaai/BRIA-2.3", torch_dtype=torch.float16, force_zeros_for_empty_prompt=False).to(device) pipe.load_ip_adapter("briaai/Image-Prompt", subfolder='models', weight_name="ip_adapter_bria.bin") pipe.to(device) MAX_SEED = np.iinfo(np.int32).max @spaces.GPU(enable_queue=True) def predict(prompt, ip_adapter_images, ip_adapter_scale=0.5, negative_prompt="", seed=100, randomize_seed=False, center_crop=False, width=1024, height=1024, guidance_scale=5.0, num_inference_steps=50, progress=gr.Progress(track_tqdm=True)): if randomize_seed: seed = random.randint(0, MAX_SEED) # Optionally resize images if center crop is not selected if not center_crop: ip_adapter_images = [image.resize((224, 224)) for image in ip_adapter_images] # Create a generator for reproducible random seed generator = torch.Generator(device="cuda").manual_seed(seed) pipe.set_ip_adapter_scale([ip_adapter_scale]) # Pass all images at once to the pipe result_images = pipe( prompt=prompt, ip_adapter_image=ip_adapter_images, # Pass the list of images negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, num_images_per_prompt=1, generator=generator, ).images return result_images, seed examples = [ ["high quality", ["example1.png", "example2.png"], 1.0, "", 1000, False, False, 1152, 896], ] css = """ #col-container { display: flex; flex-direction: column; align-items: center; padding: 10px; } """ with gr.Blocks(css=css) as demo: with gr.Column(): with gr.Row(): prompt = gr.Textbox(label="Prompt", lines=1) ip_adapter_images = gr.Gallery(label="Input Images", elem_id="image-gallery").style(grid=[2], preview=True) ip_adapter_scale = gr.Slider(label="IP Adapter Scale", minimum=0.0, maximum=1.0, step=0.1, value=0.5) negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Optional", lines=1) with gr.Row(): seed = gr.Number(label="Seed", value=100) randomize_seed = gr.Checkbox(label="Randomize seed", value=True) center_crop = gr.Checkbox(label="Center Crop Image", value=False, info="If not checked, the images will be resized.") with gr.Row(): guidance_scale = gr.Slider( label="Guidance Scale", minimum=0.0, maximum=10.0, step=0.1, value=7.0 ) num_inference_steps = gr.Slider( label="Number of Inference Steps", minimum=1, maximum=100, step=1, value=25 ) result = gr.Gallery(label="Generated Images").style(grid=[2], preview=True) run_button = gr.Button("Run") run_button.click( predict, inputs=[prompt, ip_adapter_images, ip_adapter_scale, negative_prompt, seed, randomize_seed, center_crop, width, height, guidance_scale, num_inference_steps], outputs=[result, seed] ) gr.Examples( examples=examples, fn=predict, inputs=[prompt, ip_adapter_images, ip_adapter_scale, negative_prompt, seed, randomize_seed, center_crop, width, height], outputs=[result, seed], cache_examples="lazy" ) demo.queue(max_size=25, api_open=False).launch(show_api=False)