File size: 12,990 Bytes
d49a0b0
 
c32f190
 
 
 
a13b293
 
2a8883a
c32f190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19761c7
cc4eb7e
c32f190
5cfe917
 
c32f190
 
70528e9
c32f190
 
 
 
251c2ed
c32f190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5cfe917
c32f190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a2d5c2a
d49a0b0
2a8883a
a13051b
 
 
 
cf99997
a13051b
 
 
 
 
 
 
 
 
 
 
 
d49a0b0
cf99997
e8e67a1
625974f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cf99997
625974f
 
c32f190
 
 
 
 
 
 
 
 
d49a0b0
c32f190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d49a0b0
c32f190
 
 
 
d49a0b0
 
 
c32f190
d49a0b0
 
c32f190
 
 
 
cf99997
c32f190
 
 
 
cf99997
 
 
 
 
 
c32f190
d80e0e6
d49a0b0
c32f190
 
 
625974f
cf99997
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
import os, json, re, sys, subprocess, gc, tqdm, math, time, random, threading, spaces, torch
import numpy as np
import gradio as gr
from PIL import Image, ImageOps
from moviepy import VideoFileClip
from datetime import datetime, timedelta
from huggingface_hub import hf_hub_download, snapshot_download, login
HF_TOKEN=os.environ.get('HF_TOKEN')
login(token=HF_TOKEN)
import insightface
from insightface.app import FaceAnalysis
from facexlib.parsing import init_parsing_model
from facexlib.utils.face_restoration_helper import FaceRestoreHelper
from diffusers import CogVideoXDPMScheduler
from diffusers.utils import load_image
from diffusers.image_processor import VaeImageProcessor
from diffusers.training_utils import free_memory
from util.utils import *
from util.rife_model import load_rife_model, rife_inference_with_latents
from models.utils import process_face_embeddings
from models.transformer_consisid import ConsisIDTransformer3DModel
from models.pipeline_consisid import ConsisIDPipeline
from models.eva_clip import create_model_and_transforms
from models.eva_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
from models.eva_clip.utils_qformer import resize_numpy_image_long
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
os.environ["ZERO_GPU_PATCH_TORCH_DEVICE"] = "True"
device = "cuda" if torch.cuda.is_available() else "cpu"
from accelerate import Accelerator
accelerator=Accelerator()
hf_hub_download(repo_id="ai-forever/Real-ESRGAN", filename="RealESRGAN_x4.pth", local_dir="model_real_esran")
snapshot_download(repo_id="AlexWortega/RIFE", local_dir="model_rife")
snapshot_download(repo_id="BestWishYsh/ConsisID-preview", local_dir="BestWishYsh/ConsisID-preview")

model_path = "BestWishYsh/ConsisID-preview"
lora_path = None
lora_rank = 128
dtype = torch.bfloat16

if os.path.exists(os.path.join(model_path, "transformer_ema")):
    subfolder = "transformer_ema"
else:
    subfolder = "transformer"
        
transformer = ConsisIDTransformer3DModel.from_pretrained_cus(model_path, subfolder=subfolder)
scheduler = CogVideoXDPMScheduler.from_pretrained(model_path, subfolder="scheduler")

try:
    is_kps = transformer.config.is_kps
except:
    is_kps = False
    
# 1. load face helper models
face_helper = FaceRestoreHelper(
    upscale_factor=1,
    face_size=512,
    crop_ratio=(1, 1),
    det_model='retinaface_resnet50',
    save_ext='png',
    device=device,
    model_rootpath=os.path.join(model_path, "face_encoder")
)
face_helper.face_parse = None
face_helper.face_parse = init_parsing_model(model_name='bisenet', device=device, model_rootpath=os.path.join(model_path, "face_encoder"))
face_helper.face_det.eval()
face_helper.face_parse.eval()

model, _, _ = create_model_and_transforms('EVA02-CLIP-L-14-336', os.path.join(model_path, "face_encoder", "EVA02_CLIP_L_336_psz14_s6B.pt"), force_custom_clip=True)
face_clip_model = model.visual
face_clip_model.eval()

eva_transform_mean = getattr(face_clip_model, 'image_mean', OPENAI_DATASET_MEAN)
eva_transform_std = getattr(face_clip_model, 'image_std', OPENAI_DATASET_STD)
if not isinstance(eva_transform_mean, (list, tuple)):
    eva_transform_mean = (eva_transform_mean,) * 3
if not isinstance(eva_transform_std, (list, tuple)):
    eva_transform_std = (eva_transform_std,) * 3
eva_transform_mean = eva_transform_mean
eva_transform_std = eva_transform_std

face_main_model = FaceAnalysis(name='antelopev2', root=os.path.join(model_path, "face_encoder"), providers=['CUDAExecutionProvider'])
handler_ante = insightface.model_zoo.get_model(f'{model_path}/face_encoder/models/antelopev2/glintr100.onnx', providers=['CUDAExecutionProvider'])
face_main_model.prepare(ctx_id=0, det_size=(640, 640))
handler_ante.prepare(ctx_id=0)
    
face_clip_model.to(device, dtype=dtype)
face_helper.face_det.to(device)
face_helper.face_parse.to(device)
transformer.to(device, dtype=dtype)

pipe = accelerator.prepare(ConsisIDPipeline.from_pretrained(model_path, transformer=transformer, scheduler=scheduler, torch_dtype=dtype))
# If you're using with lora, add this code
if lora_path:
    pipe.load_lora_weights(lora_path, weight_name="pytorch_lora_weights.safetensors", adapter_name="test_1")
    pipe.fuse_lora(lora_scale=1 / lora_rank)

scheduler_args = {}
if "variance_type" in pipe.scheduler.config:
    variance_type = pipe.scheduler.config.variance_type
    if variance_type in ["learned", "learned_range"]:
        variance_type = "fixed_small"
    scheduler_args["variance_type"] = variance_type

pipe.scheduler = CogVideoXDPMScheduler.from_config(pipe.scheduler.config, **scheduler_args)
pipe.to(device)

os.makedirs("./output", exist_ok=True)
os.makedirs("./gradio_tmp", exist_ok=True)

upscale_model = load_sd_upscale("model_real_esran/RealESRGAN_x4.pth", device)
frame_interpolation_model = load_rife_model("model_rife")

def convert_to_gif(video_path):
    clip = VideoFileClip(video_path)
    gif_path = video_path.replace(".mp4", ".gif")
    clip.write_gif(gif_path, fps=8)
    return gif_path

@spaces.GPU(duration=180)
def plex(prompt,image_input,stips,gscale,seed_value,scale_status,rife_status,progress=gr.Progress(track_tqdm=True)):
    seed = seed_value
    if seed == -1:
        seed = random.randint(0, 2**8 - 1)
    id_image = np.array(ImageOps.exif_transpose(Image.fromarray(image_input)).convert("RGB"))
    id_image = resize_numpy_image_long(id_image, 1024)
    id_cond, id_vit_hidden, align_crop_face_image, face_kps = process_face_embeddings(face_helper, face_clip_model, handler_ante, eva_transform_mean, eva_transform_std, face_main_model, device, dtype, id_image, original_id_image=id_image, is_align_face=True, cal_uncond=False)
    if is_kps:
        kps_cond = face_kps
    else:
        kps_cond = None
    tensor = align_crop_face_image.cpu().detach()
    tensor = tensor.squeeze()
    tensor = tensor.permute(1, 2, 0)
    tensor = tensor.numpy() * 255
    tensor = tensor.astype(np.uint8)
    image  = ImageOps.exif_transpose(Image.fromarray(tensor))
    prompt = prompt.strip('"')
    generator = torch.Generator(device).manual_seed(seed) if seed else None
    video_pt = pipe(prompt=prompt,image=image,num_videos_per_prompt=1,num_inference_steps=stips,num_frames=49,use_dynamic_cfg=False,guidance_scale=gscale,generator=generator,id_vit_hidden=id_vit_hidden,id_cond=id_cond,kps_cond=kps_cond,output_type="pt",)
    latents = video_pt.frames
    ##free_memory()
    if scale_status:
        latents = upscale_batch_and_concatenate(upscale_model, latents, device)
    if rife_status:
        latents = rife_inference_with_latents(frame_interpolation_model, latents)

    batch_size = latents.shape[0]
    batch_video_frames = []
    for batch_idx in range(batch_size):
        pt_image = latents[batch_idx]
        pt_image = torch.stack([pt_image[i] for i in range(pt_image.shape[0])])

        image_np = VaeImageProcessor.pt_to_numpy(pt_image)
        image_pil = VaeImageProcessor.numpy_to_pil(image_np)
        batch_video_frames.append(image_pil)

    video_path = save_video(batch_video_frames[0], fps=math.ceil((len(batch_video_frames[0]) - 1) / 6))
    video_update = gr.update(visible=True, value=video_path)
    gif_path = convert_to_gif(video_path)
    gif_update = gr.update(visible=True, value=gif_path)
    seed_update = gr.update(visible=True, value=seed)
    gc.collect()
    return video_path, video_update, gif_update, seed_update

examples_images = [
    ["asserts/example_images/1.png", "A woman adorned with a delicate flower crown, is standing amidst a field of gently swaying wildflowers. Her eyes sparkle with a serene gaze, and a faint smile graces her lips, suggesting a moment of peaceful contentment. The shot is framed from the waist up, highlighting the gentle breeze lightly tousling her hair. The background reveals an expansive meadow under a bright blue sky, capturing the tranquility of a sunny afternoon."],
    ["asserts/example_images/2.png", "The video captures a boy walking along a city street, filmed in black and white on a classic 35mm camera. His expression is thoughtful, his brow slightly furrowed as if he's lost in contemplation. The film grain adds a textured, timeless quality to the image, evoking a sense of nostalgia. Around him, the cityscape is filled with vintage buildings, cobblestone sidewalks, and softly blurred figures passing by, their outlines faint and indistinct. Streetlights cast a gentle glow, while shadows play across the boy's path, adding depth to the scene. The lighting highlights the boy's subtle smile, hinting at a fleeting moment of curiosity. The overall cinematic atmosphere, complete with classic film still aesthetics and dramatic contrasts, gives the scene an evocative and introspective feel."],
    ["asserts/example_images/3.png", "The video depicts a man sitting at an office desk, engaged in his work. He is dressed in a formal suit and appears to be focused on his computer screen. The office environment is well-organized, with shelves filled with binders and other office supplies neatly arranged. The man is holding a red cup, possibly containing a beverage, which he drinks from before setting it down on the desk. He then proceeds to type on the keyboard, indicating that he is working on something on his computer. The overall atmosphere of the video suggests a professional setting where the man is diligently working on his tasks."]
]

with gr.Blocks() as demo:
    gr.Markdown("""
           <div style="text-align: center; font-size: 32px; font-weight: bold; margin-bottom: 20px;">
               🤗ConsisID Space🤗
           </div>
           <div style="text-align: center;">
               <a href="https://huggingface.co/BestWishYsh/ConsisID">🤗 Model Hub</a> |
               <a href="https://huggingface.co/datasets/BestWishYsh/ConsisID-preview-Data">📚 Dataset</a> |
               <a href="https://github.com/PKU-YuanGroup/ConsisID">🌐 Github</a> |
               <a href="https://pku-yuangroup.github.io/ConsisID">📝 Page</a> |
               <a href="https://arxiv.org/pdf/2408.06072">📜 arxiv </a>
           </div>
           <div style="text-align: center;display: flex;justify-content: center;align-items: center;margin-top: 1em;margin-bottom: .5em;">
              <span>If the Space is too busy, duplicate it to use privately</span>
              <a href="https://huggingface.co/spaces/BestWishYsh/ConsisID-Space?duplicate=true"><img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-lg.svg" width="160" style="
                margin-left: .75em;
            "></a>
           </div>
           <div style="text-align: center; font-size: 15px; font-weight: bold; color: red; margin-bottom: 20px;">
            ⚠️ This demo is for academic research and experiential use only. 
            </div>
           """)
    with gr.Row():
        with gr.Column():
            with gr.Accordion("IPT2V: Face Input", open=True):
                image_input = gr.Image(label="Input Image (should contain clear face)")
                prompt = gr.Textbox(label="Prompt (Less than 200 Words)", placeholder="Enter your prompt here", lines=3)

            with gr.Group():
                with gr.Column():
                    with gr.Row():
                        stips = gr.Slider(label="Steps", minimum=6, step=1, maximum=10, value=10)
                        gscale = gr.Slider(label="Guidance scale", minimum=1, step=0.1, maximum=20, value=7.0)
                        seed_param = gr.Slider(label="Inference Seed (Leave -1 for random)", minimum=0, step=32, maximum=2**8 - 1, value=-1)
                    with gr.Row():
                        enable_scale = gr.Checkbox(label="Super-Resolution (720 × 480 -> 2880 × 1920) [Real-ESRGAN](https://github.com/xinntao/Real-ESRGAN)", value=False)
                        enable_rife = gr.Checkbox(label="Frame Interpolation (8fps -> 16fps) [RIFE](https://github.com/hzwer/ECCV2022-RIFE)", value=True)

            generate_button = gr.Button("🎬 Generate Video")

        with gr.Column():
            video_output = gr.Video(label="ConsisID Generate Video",)
            with gr.Row():
                download_video_button = gr.File(label="📥 Download Video", visible=False)
                download_gif_button = gr.File(label="📥 Download GIF", visible=False)
                seed_text = gr.Number(label="Seed Used for Video Generation", visible=False)
            with gr.Accordion("Examples", open=False):
                examples_component_images = gr.Examples(
                    examples_images,
                    inputs=[image_input, prompt],
                    cache_examples=False,
                )
    generate_button.click(
        fn=plex,
        inputs=[prompt, image_input, stips, gscale, seed_param, enable_scale, enable_rife],
        outputs=[video_output, download_video_button, download_gif_button, seed_text],
    )

demo.queue(max_size=15)
demo.launch(debug=True,inline=False,show_api=False,share=False)