Masrkai commited on
Commit
97677c5
1 Parent(s): 41e6120

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -19
app.py CHANGED
@@ -1,24 +1,49 @@
1
  import torch
2
  from diffusers import ShapEPipeline
3
  from diffusers.utils import export_to_gif
 
4
 
5
- ckpt_id = "openai/shap-e"
6
- device = torch.device("cpu")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
- # Attempt to load the pipeline without auth for testing
9
- try:
10
- pipe = ShapEPipeline.from_pretrained(ckpt_id).to(device)
11
- except Exception as e:
12
- print(f"Error loading model: {e}")
13
- exit(1)
14
-
15
- guidance_scale = 15.0
16
- prompt = "A gentle AI voice assistant constructed from a circle ring and 3 lines that fly alongside the circle"
17
-
18
- # Run the model with minimal parameters to troubleshoot
19
- try:
20
- images = pipe(prompt, guidance_scale=guidance_scale).images
21
- gif_path = export_to_gif(images, "assistant_3d.gif")
22
- print(f"GIF created at: {gif_path}")
23
- except Exception as e:
24
- print(f"Error generating images: {e}")
 
1
  import torch
2
  from diffusers import ShapEPipeline
3
  from diffusers.utils import export_to_gif
4
+ import PIL.Image
5
 
6
+ def generate_3d_model(prompt, output_path="assistant_3d.gif"):
7
+ """
8
+ Generate a 3D model using ShapE optimized for CPU usage
9
+ """
10
+ try:
11
+ # Force CPU and reduced precision
12
+ pipe = ShapEPipeline.from_pretrained(
13
+ "openai/shap-e",
14
+ torch_dtype=torch.float32,
15
+ low_cpu_mem_usage=True
16
+ ).to("cpu")
17
+
18
+ # Minimal generation settings to reduce memory usage
19
+ outputs = pipe(
20
+ prompt,
21
+ num_inference_steps=32, # Reduced from default
22
+ frame_size=32, # Smaller frame size
23
+ guidance_scale=10.0, # Reduced guidance scale
24
+ num_frames=30 # Reduced number of frames
25
+ )
26
+
27
+ # Ensure we have PIL images
28
+ if not isinstance(outputs.images[0], PIL.Image.Image):
29
+ images = [PIL.Image.fromarray(img) for img in outputs.images]
30
+ else:
31
+ images = outputs.images
32
+
33
+ # Save as GIF
34
+ gif_path = export_to_gif(images, output_path)
35
+ print(f"Successfully created GIF at: {gif_path}")
36
+ return gif_path
37
+
38
+ except Exception as e:
39
+ print(f"Error during generation: {e}")
40
+ print(f"Error type: {type(e)}")
41
+ print(f"Full error details: {str(e)}")
42
+ raise
43
 
44
+ if __name__ == "__main__":
45
+ prompt = "A gentle AI voice assistant constructed from a circle ring and 3 lines that fly alongside the circle" # Simplified prompt
46
+ try:
47
+ generate_3d_model(prompt)
48
+ except Exception as e:
49
+ print(f"Generation failed: {e}")