Update app.py
Browse files
app.py
CHANGED
@@ -4,15 +4,21 @@ from diffusers.utils import export_to_gif
|
|
4 |
|
5 |
ckpt_id = "openai/shap-e"
|
6 |
device = torch.device("cpu")
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
guidance_scale = 15.0
|
10 |
-
prompt = "A gentle
|
11 |
-
images = pipe(
|
12 |
-
prompt,
|
13 |
-
guidance_scale=guidance_scale,
|
14 |
-
num_inference_steps=16,
|
15 |
-
size=256,
|
16 |
-
).images
|
17 |
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
ckpt_id = "openai/shap-e"
|
6 |
device = torch.device("cpu")
|
7 |
+
|
8 |
+
# Attempt to load the pipeline without auth for testing
|
9 |
+
try:
|
10 |
+
pipe = ShapEPipeline.from_pretrained(ckpt_id).to(device)
|
11 |
+
except Exception as e:
|
12 |
+
print(f"Error loading model: {e}")
|
13 |
+
exit(1)
|
14 |
|
15 |
guidance_scale = 15.0
|
16 |
+
prompt = "A gentle AI voice assistant constructed from a circle ring and 3 lines that fly alongside the circle"
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
+
# Run the model with minimal parameters to troubleshoot
|
19 |
+
try:
|
20 |
+
images = pipe(prompt, guidance_scale=guidance_scale).images
|
21 |
+
gif_path = export_to_gif(images, "assistant_3d.gif")
|
22 |
+
print(f"GIF created at: {gif_path}")
|
23 |
+
except Exception as e:
|
24 |
+
print(f"Error generating images: {e}")
|