Spaces:
Running
on
Zero
Running
on
Zero
File size: 2,705 Bytes
4ef7343 2a768ff 35bb005 edfd1dd 35bb005 2a768ff 49dc777 2a768ff bab6e4b 2a768ff 35bb005 2a768ff 49dc777 8aa61d7 35bb005 2a768ff 35bb005 2a768ff 8aa61d7 2a768ff 8aa61d7 2a768ff 9bdf0ae 49dc777 a9e96a6 8aa61d7 49dc777 a9e96a6 8aa61d7 49dc777 8aa61d7 49dc777 a9e96a6 8aa61d7 49dc777 8aa61d7 49dc777 443ba67 9bdf0ae 443ba67 8aa61d7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
import torch
import gradio as gr
from diffusers import StableVideoDiffusionPipeline
from diffusers.utils import load_image, export_to_video
import spaces
# Check if GPU is available
device = "cuda" if torch.cuda.is_available() else "cpu"
# Load the pipeline
pipeline = StableVideoDiffusionPipeline.from_pretrained(
"stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16"
)
pipeline.to(device)
@spaces.GPU(duration=120)
def generate_video(image_path, seed, fps, duration, use_duration):
# Load and preprocess the image
image = load_image(image_path)
image = image.resize((1024, 576))
# Set the generator seed
generator = torch.Generator(device=device).manual_seed(seed)
# Generate the video frames
frames = pipeline(image, decode_chunk_size=8, generator=generator).frames[0]
# Calculate fps if duration is specified
if use_duration:
fps = len(frames) / duration
# Export the frames to a video file
output_video_path = "generated.mp4"
export_to_video(frames, output_video_path, fps=fps)
return output_video_path
# Create the Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Stable Video Diffusion")
gr.Markdown("Generate 25 video frames at 576x1024 ")
with gr.Row():
with gr.Column():
image_input = gr.Image(type="filepath", label="Upload Image")
seed_input = gr.Number(label="Seed", value=666666)
use_duration_toggle = gr.Checkbox(label="Use Duration")
fps_input = gr.Number(label="FPS", value=25, minimum=1, maximum=60, visible=True)
duration_input = gr.Number(label="Duration (seconds)", value=1, minimum=1, maximum=60, visible=False)
generate_button = gr.Button("Generate Video")
with gr.Column():
video_output = gr.Video(label="Generated Video")
with gr.Row():
example_image = gr.Image("https://huggingface.co/spaces/mrcuddle/SDXT-Image-To-Video/blob/main/image.jpeg", label="Example Image")
example_video = gr.Video(value="generated.mp4", label="Example Video")
def toggle_visibility(use_duration):
return {
fps_input: gr.update(visible=not use_duration),
duration_input: gr.update(visible=use_duration)
}
use_duration_toggle.change(
fn=toggle_visibility,
inputs=use_duration_toggle,
outputs=[fps_input, duration_input]
)
generate_button.click(
fn=generate_video,
inputs=[image_input, seed_input, fps_input, duration_input, use_duration_toggle],
outputs=video_output
)
# Launch the interface
if __name__ == "__main__":
demo.launch()
|