Spaces:
Running
Running
File size: 1,631 Bytes
4445657 35a579c 4445657 35a579c 4445657 35a579c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
#!/usr/bin/env python
import os
import pathlib
import tempfile
import gradio as gr
import torch
from huggingface_hub import snapshot_download
from modelscope.outputs import OutputKeys
from modelscope.pipelines import pipeline
DESCRIPTION = "# ModelScope-Image2Video"
if not torch.cuda.is_available():
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
if torch.cuda.is_available():
model_cache_dir = os.getenv("MODEL_CACHE_DIR", "./models")
model_dir = pathlib.Path(model_cache_dir) / "MS-Image2Video"
snapshot_download(repo_id="damo-vilab/MS-Image2Video", repo_type="model", local_dir=model_dir)
pipe = pipeline(task="image-to-video", model=model_dir.as_posix(), model_revision="v1.1.0", device="cuda:0")
else:
pipe = None
def image_to_video(image_path: str) -> str:
output_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
pipe(image_path, output_video=output_file.name)[OutputKeys.OUTPUT_VIDEO]
return output_file.name
with gr.Blocks(css="style.css") as demo:
gr.Markdown(DESCRIPTION)
gr.DuplicateButton(
value="Duplicate Space for private use",
elem_id="duplicate-button",
visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
)
with gr.Group():
input_image = gr.Image(label="Input image", type="filepath")
run_button = gr.Button()
output_video = gr.Video(label="Output video")
run_button.click(
fn=image_to_video,
inputs=input_image,
outputs=output_video,
api_name="run",
)
if __name__ == "__main__":
demo.queue(max_size=10).launch()
|