MimicMotion / app.py
fffiloni's picture
Update app.py
ee88ea4 verified
raw
history blame
3.99 kB
import gradio as gr
import os
import yaml
import tempfile
import huggingface_hub
import subprocess
import threading
def stream_output(pipe):
for line in iter(pipe.readline, ''):
print(line, end='')
HF_TKN = os.environ.get("GATED_HF_TOKEN")
huggingface_hub.login(token=HF_TKN)
huggingface_hub.hf_hub_download(
repo_id='yzd-v/DWPose',
filename='yolox_l.onnx',
local_dir='./models/DWPose',
local_dir_use_symlinks=False,
)
huggingface_hub.hf_hub_download(
repo_id='yzd-v/DWPose',
filename='dw-ll_ucoco_384.onnx',
local_dir='./models/DWPose',
local_dir_use_symlinks=False,
)
huggingface_hub.hf_hub_download(
repo_id='ixaac/MimicMotion',
filename='MimicMotion_1.pth',
local_dir='./models',
local_dir_use_symlinks=False,
)
def print_directory_contents(path):
for root, dirs, files in os.walk(path):
level = root.replace(path, '').count(os.sep)
indent = ' ' * 4 * (level)
print(f"{indent}{os.path.basename(root)}/")
subindent = ' ' * 4 * (level + 1)
for f in files:
print(f"{subindent}{f}")
# Path to the directory you want to print
directory_path = './models'
# Print the directory contents
print_directory_contents(directory_path)
def infer(ref_video_in, ref_image_in):
# Create a temporary directory
with tempfile.TemporaryDirectory() as temp_dir:
print("Temporary directory created:", temp_dir)
# Define the values for the variables
ref_video_path = ref_video_in
ref_image_path = ref_image_in
num_frames = 16
resolution = 576
frames_overlap = 6
num_inference_steps = 25
noise_aug_strength = 0
guidance_scale = 2.0
sample_stride = 2
fps = 12
seed = 42
# Create the data structure
data = {
'base_model_path': 'stabilityai/stable-video-diffusion-img2vid-xt-1-1',
'ckpt_path': 'models/MimicMotion_1.pth',
'test_case': [
{
'ref_video_path': ref_video_path,
'ref_image_path': ref_image_path,
'num_frames': num_frames,
'resolution': resolution,
'frames_overlap': frames_overlap,
'num_inference_steps': num_inference_steps,
'noise_aug_strength': noise_aug_strength,
'guidance_scale': guidance_scale,
'sample_stride': sample_stride,
'fps': fps,
'seed': seed
}
]
}
# Define the file path
file_path = os.path.join(temp_dir, 'config.yaml')
# Write the data to a YAML file
with open(file_path, 'w') as file:
yaml.dump(data, file, default_flow_style=False)
print("YAML file 'config.yaml' created successfully in", file_path)
# Execute the inference command
command = ['python', 'inference.py', '--inference_config', file_path]
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1)
# Create threads to handle stdout and stderr
stdout_thread = threading.Thread(target=stream_output, args=(process.stdout,))
stderr_thread = threading.Thread(target=stream_output, args=(process.stderr,))
# Start the threads
stdout_thread.start()
stderr_thread.start()
# Wait for the process to complete and the threads to finish
process.wait()
stdout_thread.join()
stderr_thread.join()
print("Inference script finished with return code:", process.returncode)
# Print the directory contents
print_directory_contents('./outputs')
return "done"
demo = gr.Interface(
fn = infer,
inputs = [gr.Video(), gr.Image(type="filepath")],
outputs = [gr.Textbox()]
)
demo.launch()