mrcuddle commited on
Commit
35bb005
·
verified ·
1 Parent(s): 2ca1d47

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -16
app.py CHANGED
@@ -1,36 +1,31 @@
1
  import torch
2
  import gradio as gr
3
  from diffusers import StableVideoDiffusionPipeline
4
- from PIL import Image
5
- import numpy as np
6
- from moviepy import ImageSequenceClip
7
- import spaces
8
 
9
  # Load the pipeline
10
  pipeline = StableVideoDiffusionPipeline.from_pretrained(
11
  "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16"
12
  )
13
- pipeline.enable_model_cpu_offload()
14
 
15
- @spaces.GPU
16
- def generate_video(image, seed):
17
- # Preprocess the image
18
- image = Image.open(image)
19
  image = image.resize((1024, 576))
20
 
21
  # Set the generator seed
22
- generator = torch.manual_seed(seed)
23
 
24
  # Generate the video frames
25
  frames = pipeline(image, decode_chunk_size=8, generator=generator).frames[0]
26
 
27
- # Convert frames to a format suitable for video export
28
- frames = [(frame * 255).astype(np.uint8) for frame in frames]
29
-
30
  # Export the frames to a video file
31
- clip = ImageSequenceClip(frames, fps=7)
32
  output_video_path = "generated.mp4"
33
- clip.write_videofile(output_video_path, codec="libx264")
34
 
35
  return output_video_path
36
 
@@ -38,7 +33,7 @@ def generate_video(image, seed):
38
  iface = gr.Interface(
39
  fn=generate_video,
40
  inputs=[
41
- gr.Image(type="file", label="Upload Image"),
42
  gr.Number(label="Seed", value=42)
43
  ],
44
  outputs=gr.Video(label="Generated Video"),
 
1
  import torch
2
  import gradio as gr
3
  from diffusers import StableVideoDiffusionPipeline
4
+ from diffusers.utils import load_image, export_to_video
5
+
6
+ # Check if GPU is available
7
+ device = "cuda" if torch.cuda.is_available() else "cpu"
8
 
9
  # Load the pipeline
10
  pipeline = StableVideoDiffusionPipeline.from_pretrained(
11
  "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16"
12
  )
13
+ pipeline.to(device)
14
 
15
+ def generate_video(image_path, seed):
16
+ # Load and preprocess the image
17
+ image = load_image(image_path)
 
18
  image = image.resize((1024, 576))
19
 
20
  # Set the generator seed
21
+ generator = torch.Generator(device=device).manual_seed(seed)
22
 
23
  # Generate the video frames
24
  frames = pipeline(image, decode_chunk_size=8, generator=generator).frames[0]
25
 
 
 
 
26
  # Export the frames to a video file
 
27
  output_video_path = "generated.mp4"
28
+ export_to_video(frames, output_video_path, fps=7)
29
 
30
  return output_video_path
31
 
 
33
  iface = gr.Interface(
34
  fn=generate_video,
35
  inputs=[
36
+ gr.Image(type="filepath", label="Upload Image"),
37
  gr.Number(label="Seed", value=42)
38
  ],
39
  outputs=gr.Video(label="Generated Video"),