import gradio as gr import torch from diffusers import DiffusionPipeline from PIL import Image # Load the model model_id = "stabilityai/stable-diffusion-xl-base-1.0" pipe = DiffusionPipeline.from_pretrained( model_id, torch_dtype=torch.float16, # Use float32 for CPU use_safetensors=True ) pipe.to("cuda") def generate_image(prompt, negative_prompt, size): if not prompt: prompt = "a beautiful landscape" if not negative_prompt: negative_prompt = "" width, height = map(int, size.split('x')) generator = torch.Generator("cuda").manual_seed(42) try: result = pipe(prompt=prompt, height=height, width=width, negative_prompt=negative_prompt, generator=generator) if result and hasattr(result, 'images') and len(result.images) > 0: return result.images[0] else: print("Error: No images in the result or result is None") return None except Exception as e: print(f"Error occurred: {e}") return None with gr.Blocks() as demo: gr.Markdown("## Text to Image SDXL") with gr.Row(): with gr.Column(): prompt = gr.Textbox(label="Prompt", placeholder="Enter the prompt here...") negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Enter the negative prompt here...") size = gr.Dropdown(choices=["512x512", "768x768", "1024x1024"], value="1024x1024", label="Size") submit = gr.Button("Submit") with gr.Column(): output = gr.Image(label="Output") submit.click(generate_image, inputs=[prompt, negative_prompt, size], outputs=output) demo.launch()