File size: 1,473 Bytes
4ef7343
2a768ff
 
35bb005
edfd1dd
35bb005
 
 
2a768ff
 
 
bab6e4b
2a768ff
35bb005
2a768ff
0bf4a8e
35bb005
 
 
2a768ff
 
 
35bb005
2a768ff
 
 
 
 
 
9a7af47
2a768ff
 
9bdf0ae
 
2a768ff
 
 
35bb005
9a7af47
2a768ff
 
 
0bfa377
88a9dbd
 
254cae3
00e4e84
2d5fd2a
9bdf0ae
aec76ea
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import torch
import gradio as gr
from diffusers import StableVideoDiffusionPipeline
from diffusers.utils import load_image, export_to_video
import spaces

# Check if GPU is available
device = "cuda" if torch.cuda.is_available() else "cpu"

# Load the pipeline
pipeline = StableVideoDiffusionPipeline.from_pretrained(
    "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16"
)
pipeline.to(device)

@spaces.GPU(duration=120)
def generate_video(image_path, seed):
    # Load and preprocess the image
    image = load_image(image_path)
    image = image.resize((1024, 576))

    # Set the generator seed
    generator = torch.Generator(device=device).manual_seed(seed)

    # Generate the video frames
    frames = pipeline(image, decode_chunk_size=8, generator=generator).frames[0]

    # Export the frames to a video file
    output_video_path = "generated.mp4"
    export_to_video(frames, output_video_path, fps=25)

    return output_video_path

# Create the Gradio interface
iface = gr.Interface(
    fn=generate_video,
    inputs=[
        gr.Image(type="filepath", label="Upload Image"),
        gr.Number(label="Seed", value=666666)
    ],
    outputs=gr.Video(label="Generated Video"),
    title="Stable Video Diffusion",
    examples=[
        ["image.jpeg"],
        ["videos/generated.mp4"]
    ],
    description="Generate a video from an uploaded image using Stable Video Diffusion.",
)
# Launch the interface
iface.launch()