Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,438 Bytes
4ef7343 2a768ff 5cac326 2a768ff 9bdf0ae 2a768ff 9bdf0ae 2a768ff |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import torch
import gradio as gr
from diffusers import StableVideoDiffusionPipeline
from PIL import Image
import numpy as np
from moviepy.editor import ImageSequenceClip
# Load the pipeline
pipeline = StableVideoDiffusionPipeline.from_pretrained(
"stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16"
)
pipeline.enable_model_cpu_offload()
def generate_video(image, seed):
# Preprocess the image
image = Image.open(image)
image = image.resize((1024, 576))
# Set the generator seed
generator = torch.manual_seed(seed)
# Generate the video frames
frames = pipeline(image, decode_chunk_size=8, generator=generator).frames[0]
# Convert frames to a format suitable for video export
frames = [(frame * 255).astype(np.uint8) for frame in frames]
# Export the frames to a video file
clip = ImageSequenceClip(frames, fps=7)
output_video_path = "generated.mp4"
clip.write_videofile(output_video_path, codec="libx264")
return output_video_path
# Create the Gradio interface
iface = gr.Interface(
fn=generate_video,
inputs=[
gr.Image(type="file", label="Upload Image"),
gr.Number(label="Seed", value=42)
],
outputs=gr.Video(label="Generated Video"),
title="Stable Video Diffusion",
description="Generate a video from an uploaded image using Stable Video Diffusion."
)
# Launch the interface
iface.launch() |