kukiagrawal's picture
Update app.py
6d2d1a4 verified
import gradio as gr
import torch
from diffusers import StableDiffusionPipeline
# Load the model (replace with your actual model path)
model_name = "ZB-Tech/Text-to-Image" # Specified model
device = "cpu" # Ensure we are using CPU
print(f"Using device: {device}")
try:
# Load the Stable Diffusion model
print("Loading model...")
pipe = StableDiffusionPipeline.from_pretrained(model_name).to(device)
print("Model loaded successfully.")
except Exception as e:
print(f"Error loading model: {e}")
raise
def generate_image(prompt):
try:
print(f"Generating image for prompt: '{prompt}'")
with torch.no_grad():
image = pipe(prompt).images[0] # Adjust this line based on your model's output structure
return image
except Exception as e:
print(f"Error generating image: {e}")
return None
# Gradio interface setup
gr.Interface(
fn=generate_image,
inputs=gr.Textbox(label="Enter your prompt (e.g., 'a man eating dumplings in sea')"),
outputs=gr.Image(type="pil"), # Ensure output type is PIL Image
title="Text-to-Image Generator",
description="Generate images from text prompts."
).launch()