# Copyright 2023 ByteDance and/or its affiliates. # # Copyright (2023) MagicAnimate Authors # # ByteDance, its affiliates and licensors retain all intellectual # property and proprietary rights in and to this material, related # documentation and any modifications thereto. Any use, reproduction, # disclosure or distribution of this material and related documentation # without an express license agreement from ByteDance or # its affiliates is strictly prohibited. import imageio import numpy as np import gradio as gr import os from PIL import Image from demo.animate import MagicAnimate from huggingface_hub import snapshot_download import subprocess subprocess.run('pip cache purge', shell=True) snapshot_download(repo_id="stable-diffusion-v1-5/stable-diffusion-v1-5", local_dir="./stable-diffusion-v1-5", ignore_patterns=["*.safetensors"]) snapshot_download(repo_id="stabilityai/sd-vae-ft-mse", local_dir="./sd-vae-ft-mse") snapshot_download(repo_id="zcxu-eric/MagicAnimate", local_dir="./MagicAnimate") is_spaces = True if "SPACE_ID" in os.environ else False true_for_shared_ui = False #This will be true only if you are in a shared UI if(is_spaces): true_for_shared_ui = True if "zcxu-eric/magicanimate" in os.environ['SPACE_ID'] else False animator = MagicAnimate() def animate(reference_image, motion_sequence_state, seed=1, steps=25, guidance_scale=7.5): return animator(reference_image, motion_sequence_state, seed, steps, guidance_scale) with gr.Blocks() as demo: gr.HTML( """

MagicAnimate: Temporally Consistent Human Image Animation using Diffusion Model

If you like our project, please give us a star ✨ on Github for the latest update.
Project Page
""") animation = gr.Video(format="mp4", label="Animation Results", autoplay=True) with gr.Row(): reference_image = gr.Image(label="Reference Image") motion_sequence = gr.Video(format="mp4", label="Motion Sequence",max_length=5) with gr.Column(): random_seed = gr.Textbox(label="Random seed", value=1, info="default: -1") sampling_steps = gr.Textbox(label="Sampling steps", value=25, info="default: 25") guidance_scale = gr.Textbox(label="Guidance scale", value=7.5, info="default: 7.5") submit = gr.Button("Animate") def read_video(video): reader = imageio.get_reader(video) fps = reader.get_meta_data()['fps'] return video def read_image(image, size=512): return np.array(Image.fromarray(image).resize((size, size))) # when user uploads a new video motion_sequence.upload( read_video, motion_sequence, motion_sequence, queue=False ) # when `first_frame` is updated reference_image.upload( read_image, reference_image, reference_image, queue=False ) # when the `submit` button is clicked submit.click( animate, [reference_image, motion_sequence, random_seed, sampling_steps, guidance_scale], animation ) # Examples gr.Markdown("## Examples") gr.Examples( fn=animate, examples=[ ["inputs/applications/source_image/monalisa.png", "inputs/applications/driving/densepose/running.mp4"], ["inputs/applications/source_image/demo4.png", "inputs/applications/driving/densepose/demo4.mp4"], ["inputs/applications/source_image/dalle2.jpeg", "inputs/applications/driving/densepose/running2.mp4"], ["inputs/applications/source_image/dalle8.jpeg", "inputs/applications/driving/densepose/dancing2.mp4"], ["inputs/applications/source_image/multi1_source.png", "inputs/applications/driving/densepose/multi_dancing.mp4"], ], inputs=[reference_image, motion_sequence], outputs=animation, cache_examples=true_for_shared_ui ) # demo.queue(max_size=15, api_open=False) demo.launch(share=True, show_api=False)