Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -3,13 +3,13 @@ import torch
|
|
3 |
from diffusers import StableDiffusionPipeline
|
4 |
|
5 |
# Load the model (replace with your actual model path)
|
6 |
-
model_name = "
|
7 |
device = "cpu" # Ensure we are using CPU
|
8 |
|
9 |
print(f"Using device: {device}")
|
10 |
|
11 |
-
# Load the Stable Diffusion model
|
12 |
try:
|
|
|
13 |
print("Loading model...")
|
14 |
pipe = StableDiffusionPipeline.from_pretrained(model_name).to(device)
|
15 |
print("Model loaded successfully.")
|
@@ -20,14 +20,12 @@ except Exception as e:
|
|
20 |
def generate_image(prompt):
|
21 |
try:
|
22 |
print(f"Generating image for prompt: '{prompt}'")
|
23 |
-
# Generate image based on the prompt
|
24 |
with torch.no_grad():
|
25 |
-
#
|
26 |
-
|
27 |
-
return image # Return the generated image
|
28 |
except Exception as e:
|
29 |
print(f"Error generating image: {e}")
|
30 |
-
return None
|
31 |
|
32 |
# Gradio interface setup
|
33 |
gr.Interface(
|
|
|
3 |
from diffusers import StableDiffusionPipeline
|
4 |
|
5 |
# Load the model (replace with your actual model path)
|
6 |
+
model_name = "ZB-Tech/Text-to-Image" # Specified model
|
7 |
device = "cpu" # Ensure we are using CPU
|
8 |
|
9 |
print(f"Using device: {device}")
|
10 |
|
|
|
11 |
try:
|
12 |
+
# Load the Stable Diffusion model
|
13 |
print("Loading model...")
|
14 |
pipe = StableDiffusionPipeline.from_pretrained(model_name).to(device)
|
15 |
print("Model loaded successfully.")
|
|
|
20 |
def generate_image(prompt):
|
21 |
try:
|
22 |
print(f"Generating image for prompt: '{prompt}'")
|
|
|
23 |
with torch.no_grad():
|
24 |
+
image = pipe(prompt).images[0] # Adjust this line based on your model's output structure
|
25 |
+
return image
|
|
|
26 |
except Exception as e:
|
27 |
print(f"Error generating image: {e}")
|
28 |
+
return None
|
29 |
|
30 |
# Gradio interface setup
|
31 |
gr.Interface(
|