import spaces from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, EulerAncestralDiscreteScheduler import torch import gradio as gr from PIL import Image import numpy as np # Load the models controlnet = ControlNetModel.from_pretrained( "briaai/BRIA-2.2-ControlNet-Recoloring", torch_dtype=torch.float16 ).to('cuda') pipe = StableDiffusionXLControlNetPipeline.from_pretrained( "briaai/BRIA-2.2", controlnet=controlnet, torch_dtype=torch.float16, device_map='auto', low_cpu_mem_usage=True, offload_state_dict=True, ).to('cuda') pipe.scheduler = EulerAncestralDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, steps_offset=1 ) pipe.force_zeros_for_empty_prompt = False def resize_image(image): image = image.convert('RGB') current_size = image.size transform = gr.Image(height=1024, width=1024, keep_aspect_ratio=True, source="upload", tool="editor") resized_image = transform.postprocess(image) return resized_image @spaces.GPU(enable_queue=True) def generate_image(input_image, prompt, controlnet_conditioning_scale): # Always use a random seed for diversity in outputs seed = np.random.randint(2147483647) generator = torch.Generator("cuda").manual_seed(seed) # Resize and prepare the image input_image = resize_image(input_image) grayscale_image = input_image.convert('L').convert('RGB') # Generate the image with fixed 30 steps images = pipe( prompt=prompt, image=grayscale_image, num_inference_steps=30, controlnet_conditioning_scale=float(controlnet_conditioning_scale), generator=generator, ).images return images[0] # Gradio Interface description = "Anything to Anything. Transform anything to anything. Allow an adjuster for controlnet scale." with gr.Blocks() as demo: gr.Markdown("