Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,901 Bytes
4ef7343 2a768ff 35bb005 edfd1dd 35bb005 2a768ff 49dc777 2a768ff bab6e4b 2a768ff 35bb005 2a768ff 49dc777 35bb005 2a768ff 35bb005 2a768ff 9a7af47 2a768ff 9bdf0ae 49dc777 3a28d5a 49dc777 443ba67 9bdf0ae 443ba67 49dc777 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import torch
import gradio as gr
from diffusers import StableVideoDiffusionPipeline
from diffusers.utils import load_image, export_to_video
import spaces
# Check if GPU is available
device = "cuda" if torch.cuda.is_available() else "cpu"
# Load the pipeline
pipeline = StableVideoDiffusionPipeline.from_pretrained(
"stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16"
)
pipeline.to(device)
@spaces.GPU(duration=120)
def generate_video(image_path, seed):
# Load and preprocess the image
image = load_image(image_path)
image = image.resize((1024, 576))
# Set the generator seed
generator = torch.Generator(device=device).manual_seed(seed)
# Generate the video frames
frames = pipeline(image, decode_chunk_size=8, generator=generator).frames[0]
# Export the frames to a video file
output_video_path = "generated.mp4"
export_to_video(frames, output_video_path, fps=25)
return output_video_path
# Create the Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Stable Video Diffusion")
gr.Markdown("Generate a video from an uploaded image using Stable Video Diffusion.")
with gr.Row():
with gr.Column():
image_input = gr.Image(type="filepath", label="Upload Image")
seed_input = gr.Number(label="Seed", value=666666)
generate_button = gr.Button("Generate Video")
with gr.Column():
video_output = gr.Video(label="Generated Video")
# Display the example image
example_image = gr.Image("example.jpeg", label="Example Image")
with gr.Row():
example_video = gr.Video("generated.mp4", label="Example Video")
generate_button.click(
fn=generate_video,
inputs=[image_input, seed_input],
outputs=video_output
)
# Launch the interface
if __name__ == "__main__":
demo.launch() |