Spaces:
Paused
Paused
File size: 4,861 Bytes
73d6edb 59c3dd8 ef187eb 0cffd40 24478b9 ef187eb 9c1fd31 2b0f02c 24478b9 9c1fd31 eb7c9df 24478b9 fec3be6 8b1e96d 9c1fd31 ec35e66 4efab5c ec35e66 4efab5c 8b1e96d a0f72b8 96fa82a cddab4e 96fa82a fec3be6 24478b9 6bb7d88 24478b9 96fa82a 9a5c550 24478b9 4429dd4 82ba711 d94350f 24478b9 83f18c9 82ba711 96fa82a 11fa80e 24478b9 d06d30a 9c1fd31 96fa82a a9fe87b 96fa82a a9fe87b 96fa82a b18804c 6663f0e 9c1fd31 d06d30a 24478b9 0cffd40 8b3ca8d 24478b9 8b3ca8d 0cffd40 3958ec9 8b1e96d 0cffd40 4efab5c 24478b9 db04c05 82ba711 24478b9 44ee61c db04c05 44ee61c db04c05 24478b9 a9fe87b 24478b9 a9fe87b 24478b9 a9fe87b cf63248 9a5c550 a9fe87b cf63248 a9fe87b cf63248 a9fe87b 82ba711 9a5c550 f41fe82 8b3ca8d 24478b9 fe16630 4b5a4e3 8b3ca8d 8b1e96d a9fe87b 9a5c550 8b1e96d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 |
import subprocess
subprocess.run(
'pip install numpy==1.26.4',
shell=True
)
import os
import gradio as gr
import torch
import spaces
import random
from PIL import Image
import numpy as np
from glob import glob
from pathlib import Path
from typing import Optional
from diffsynth import save_video, ModelManager, SVDVideoPipeline, HunyuanDiTImagePipeline
import uuid
HF_TOKEN = os.environ.get("HF_TOKEN", None)
# Constants
MAX_SEED = np.iinfo(np.int32).max
CSS = """
footer {
visibility: hidden;
}
"""
JS = """function () {
gradioURL = window.location.href
if (!gradioURL.endsWith('?__theme=dark')) {
window.location.replace(gradioURL + '?__theme=dark');
}
}"""
# Ensure model and scheduler are initialized in GPU-enabled function
if torch.cuda.is_available():
model_manager = ModelManager(
torch_dtype=torch.float16,
device="cuda",
model_id_list=["stable-video-diffusion-img2vid-xt", "ExVideo-SVD-128f-v1"],
downloading_priority=["HuggingFace"])
pipe = SVDVideoPipeline.from_model_manager(model_manager)
@spaces.GPU(duration=120)
def generate(
image,
seed: Optional[int] = -1,
motion_bucket_id: int = 127,
fps_id: int = 25,
num_inference_steps: int = 10,
num_frames: int = 50,
output_folder: str = "outputs",
progress=gr.Progress(track_tqdm=True)):
if seed == -1:
seed = random.randint(0, MAX_SEED)
image = Image.open(image)
torch.manual_seed(seed)
os.makedirs(output_folder, exist_ok=True)
base_count = len(glob(os.path.join(output_folder, "*.mp4")))
video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
video = pipe(
input_image=image.resize((512, 512)),
num_frames=num_frames,
fps=fps_id,
height=512,
width=512,
motion_bucket_id=motion_bucket_id,
num_inference_steps=num_inference_steps,
min_cfg_scale=2,
max_cfg_scale=2,
contrast_enhance_scale=1.2
)
model_manager.to("cpu")
save_video(video, video_path, fps=fps_id)
return video_path, seed
examples = [
"./train.jpg",
"./girl.webp",
"./robo.jpg",
]
# Gradio Interface
with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
gr.HTML("<h1><center>Exvideo📽️</center></h1>")
gr.HTML("<p><center><a href='https://huggingface.co/ECNU-CILab/ExVideo-SVD-128f-v1'>ExVideo</a> image-to-video generation<br><b>Update</b>: first version</center></p>")
with gr.Row():
image = gr.Image(label='Upload Image', height=600, scale=2, image_mode="RGB", type="filepath")
video = gr.Video(label="Generated Video", height=600, scale=2)
with gr.Accordion("Advanced Options", open=True):
with gr.Column(scale=1):
seed = gr.Slider(
label="Seed (-1 Random)",
minimum=-1,
maximum=MAX_SEED,
step=1,
value=-1,
)
motion_bucket_id = gr.Slider(
label="Motion bucket id",
info="Controls how much motion to add/remove from the image",
value=127,
step=1,
minimum=1,
maximum=255
)
fps_id = gr.Slider(
label="Frames per second",
info="The length of your video in seconds will be 25/fps",
value=6,
step=1,
minimum=5,
maximum=30
)
num_inference_steps = gr.Slider(
label="Inference steps",
info="Inference steps",
step=1,
value=10,
minimum=1,
maximum=50
)
num_frames = gr.Slider(
label="Frames num",
info="Frames num",
step=1,
value=50,
minimum=1,
maximum=128
)
with gr.Row():
submit_btn = gr.Button(value="Generate")
#stop_btn = gr.Button(value="Stop", variant="stop")
clear_btn = gr.ClearButton([image, seed, video])
gr.Examples(
examples=examples,
inputs=image,
outputs=[video, seed],
fn=generate,
cache_examples="lazy",
examples_per_page=4,
)
submit_event = submit_btn.click(fn=generate, inputs=[image, seed, motion_bucket_id, fps_id,num_inference_steps, num_frames], outputs=[video, seed], api_name="video")
#stop_btn.click(fn=None, inputs=None, outputs=None, cancels=[submit_event])
demo.queue().launch() |