Spaces:
Running
Running
gartajackhats1985
commited on
Commit
•
c37b2dd
1
Parent(s):
028694a
Upload 171 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +4 -0
- ComfyUI-AdvancedLivePortrait/.github/workflows/publish.yml +24 -0
- ComfyUI-AdvancedLivePortrait/.gitignore +2 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/__pycache__/live_portrait_wrapper.cpython-312.pyc +0 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/config/__pycache__/inference_config.cpython-312.pyc +0 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/config/inference_config.py +8 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/config/models.yaml +43 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/live_portrait_wrapper.py +150 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/modules/__init__.py +0 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/modules/__pycache__/__init__.cpython-312.pyc +0 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/modules/__pycache__/appearance_feature_extractor.cpython-312.pyc +0 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/modules/__pycache__/convnextv2.cpython-312.pyc +0 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/modules/__pycache__/dense_motion.cpython-312.pyc +0 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/modules/__pycache__/motion_extractor.cpython-312.pyc +0 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/modules/__pycache__/spade_generator.cpython-312.pyc +0 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/modules/__pycache__/stitching_retargeting_network.cpython-312.pyc +0 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/modules/__pycache__/util.cpython-312.pyc +0 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/modules/__pycache__/warping_network.cpython-312.pyc +0 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/modules/appearance_feature_extractor.py +48 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/modules/convnextv2.py +149 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/modules/dense_motion.py +104 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/modules/motion_extractor.py +35 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/modules/spade_generator.py +59 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/modules/stitching_retargeting_network.py +38 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/modules/util.py +441 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/modules/warping_network.py +77 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/utils/__init__.py +0 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/utils/__pycache__/__init__.cpython-312.pyc +0 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/utils/__pycache__/camera.cpython-312.pyc +0 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/utils/__pycache__/helper.cpython-312.pyc +0 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/utils/__pycache__/rprint.cpython-312.pyc +0 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/utils/camera.py +75 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/utils/face_analysis_diy.py +78 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/utils/helper.py +124 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/utils/io.py +97 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/utils/resources/mask_template.png +0 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/utils/rprint.py +16 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/utils/timer.py +29 -0
- ComfyUI-AdvancedLivePortrait/LivePortrait/utils/video.py +142 -0
- ComfyUI-AdvancedLivePortrait/README.md +64 -0
- ComfyUI-AdvancedLivePortrait/__init__.py +4 -0
- ComfyUI-AdvancedLivePortrait/__pycache__/__init__.cpython-312.pyc +0 -0
- ComfyUI-AdvancedLivePortrait/__pycache__/nodes.cpython-312.pyc +0 -0
- ComfyUI-AdvancedLivePortrait/install.bat +20 -0
- ComfyUI-AdvancedLivePortrait/nodes.py +980 -0
- ComfyUI-AdvancedLivePortrait/pyproject.toml +15 -0
- ComfyUI-AdvancedLivePortrait/requirements.txt +10 -0
- ComfyUI-AdvancedLivePortrait/sample/driving_video.mp4 +0 -0
- ComfyUI-AdvancedLivePortrait/sample/exp_image.png +0 -0
- ComfyUI-AdvancedLivePortrait/sample/original_sample_asset/driving/d0.mp4 +3 -0
.gitattributes
CHANGED
@@ -38,3 +38,7 @@ comfyui_controlnet_aux/examples/ExecuteAll.png filter=lfs diff=lfs merge=lfs -te
|
|
38 |
comfyui_controlnet_aux/examples/ExecuteAll1.jpg filter=lfs diff=lfs merge=lfs -text
|
39 |
comfyui_controlnet_aux/examples/ExecuteAll2.jpg filter=lfs diff=lfs merge=lfs -text
|
40 |
comfyui_controlnet_aux/src/custom_controlnet_aux/mesh_graphormer/hand_landmarker.task filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
38 |
comfyui_controlnet_aux/examples/ExecuteAll1.jpg filter=lfs diff=lfs merge=lfs -text
|
39 |
comfyui_controlnet_aux/examples/ExecuteAll2.jpg filter=lfs diff=lfs merge=lfs -text
|
40 |
comfyui_controlnet_aux/src/custom_controlnet_aux/mesh_graphormer/hand_landmarker.task filter=lfs diff=lfs merge=lfs -text
|
41 |
+
ComfyUI-AdvancedLivePortrait/sample/original_sample_asset/driving/d0.mp4 filter=lfs diff=lfs merge=lfs -text
|
42 |
+
ComfyUI-AdvancedLivePortrait/sample/original_sample_asset/driving/d3.mp4 filter=lfs diff=lfs merge=lfs -text
|
43 |
+
ComfyUI-AdvancedLivePortrait/sample/original_sample_asset/driving/d6.mp4 filter=lfs diff=lfs merge=lfs -text
|
44 |
+
ComfyUI-AdvancedLivePortrait/sample/original_sample_asset/driving/d9.mp4 filter=lfs diff=lfs merge=lfs -text
|
ComfyUI-AdvancedLivePortrait/.github/workflows/publish.yml
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Publish to Comfy registry
|
2 |
+
on:
|
3 |
+
workflow_dispatch:
|
4 |
+
push:
|
5 |
+
branches:
|
6 |
+
- main
|
7 |
+
- master
|
8 |
+
paths:
|
9 |
+
- "pyproject.toml"
|
10 |
+
|
11 |
+
jobs:
|
12 |
+
publish-node:
|
13 |
+
name: Publish Custom Node to registry
|
14 |
+
runs-on: ubuntu-latest
|
15 |
+
# if this is a forked repository. Skipping the workflow.
|
16 |
+
if: github.event.repository.fork == false
|
17 |
+
steps:
|
18 |
+
- name: Check out code
|
19 |
+
uses: actions/checkout@v4
|
20 |
+
- name: Publish Custom Node
|
21 |
+
uses: Comfy-Org/publish-node-action@main
|
22 |
+
with:
|
23 |
+
## Add your own personal access token to your Github Repository secrets and reference it here.
|
24 |
+
personal_access_token: ${{ secrets.REGISTRY_ACCESS_TOKEN }}
|
ComfyUI-AdvancedLivePortrait/.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
__pycache__
|
2 |
+
.idea
|
ComfyUI-AdvancedLivePortrait/LivePortrait/__pycache__/live_portrait_wrapper.cpython-312.pyc
ADDED
Binary file (7.99 kB). View file
|
|
ComfyUI-AdvancedLivePortrait/LivePortrait/config/__pycache__/inference_config.cpython-312.pyc
ADDED
Binary file (818 Bytes). View file
|
|
ComfyUI-AdvancedLivePortrait/LivePortrait/config/inference_config.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import os
|
3 |
+
|
4 |
+
current_file_path = os.path.abspath(__file__)
|
5 |
+
current_directory = os.path.dirname(current_file_path)
|
6 |
+
class InferenceConfig:
|
7 |
+
def __init__(self):
|
8 |
+
self.flag_use_half_precision: bool = False # whether to use half precision
|
ComfyUI-AdvancedLivePortrait/LivePortrait/config/models.yaml
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model_params:
|
2 |
+
appearance_feature_extractor_params: # the F in the paper
|
3 |
+
image_channel: 3
|
4 |
+
block_expansion: 64
|
5 |
+
num_down_blocks: 2
|
6 |
+
max_features: 512
|
7 |
+
reshape_channel: 32
|
8 |
+
reshape_depth: 16
|
9 |
+
num_resblocks: 6
|
10 |
+
motion_extractor_params: # the M in the paper
|
11 |
+
num_kp: 21
|
12 |
+
backbone: convnextv2_tiny
|
13 |
+
warping_module_params: # the W in the paper
|
14 |
+
num_kp: 21
|
15 |
+
block_expansion: 64
|
16 |
+
max_features: 512
|
17 |
+
num_down_blocks: 2
|
18 |
+
reshape_channel: 32
|
19 |
+
estimate_occlusion_map: True
|
20 |
+
dense_motion_params:
|
21 |
+
block_expansion: 32
|
22 |
+
max_features: 1024
|
23 |
+
num_blocks: 5
|
24 |
+
reshape_depth: 16
|
25 |
+
compress: 4
|
26 |
+
spade_generator_params: # the G in the paper
|
27 |
+
upscale: 2 # represents upsample factor 256x256 -> 512x512
|
28 |
+
block_expansion: 64
|
29 |
+
max_features: 512
|
30 |
+
num_down_blocks: 2
|
31 |
+
stitching_retargeting_module_params: # the S in the paper
|
32 |
+
stitching:
|
33 |
+
input_size: 126 # (21*3)*2
|
34 |
+
hidden_sizes: [128, 128, 64]
|
35 |
+
output_size: 65 # (21*3)+2(tx,ty)
|
36 |
+
lip:
|
37 |
+
input_size: 65 # (21*3)+2
|
38 |
+
hidden_sizes: [128, 128, 64]
|
39 |
+
output_size: 63 # (21*3)
|
40 |
+
eye:
|
41 |
+
input_size: 66 # (21*3)+3
|
42 |
+
hidden_sizes: [256, 256, 128, 128, 64]
|
43 |
+
output_size: 63 # (21*3)
|
ComfyUI-AdvancedLivePortrait/LivePortrait/live_portrait_wrapper.py
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import torch
|
3 |
+
|
4 |
+
from .utils.helper import concat_feat
|
5 |
+
from .utils.camera import headpose_pred_to_degree, get_rotation_matrix
|
6 |
+
from .config.inference_config import InferenceConfig
|
7 |
+
|
8 |
+
class LivePortraitWrapper(object):
|
9 |
+
|
10 |
+
def __init__(self, cfg: InferenceConfig, appearance_feature_extractor, motion_extractor,
|
11 |
+
warping_module, spade_generator, stitching_retargeting_module):
|
12 |
+
|
13 |
+
self.appearance_feature_extractor = appearance_feature_extractor
|
14 |
+
self.motion_extractor = motion_extractor
|
15 |
+
self.warping_module = warping_module
|
16 |
+
self.spade_generator = spade_generator
|
17 |
+
self.stitching_retargeting_module = stitching_retargeting_module
|
18 |
+
|
19 |
+
self.cfg = cfg
|
20 |
+
|
21 |
+
def extract_feature_3d(self, x: torch.Tensor) -> torch.Tensor:
|
22 |
+
""" get the appearance feature of the image by F
|
23 |
+
x: Bx3xHxW, normalized to 0~1
|
24 |
+
"""
|
25 |
+
with torch.no_grad():
|
26 |
+
feature_3d = self.appearance_feature_extractor(x)
|
27 |
+
|
28 |
+
return feature_3d.float()
|
29 |
+
|
30 |
+
def get_kp_info(self, x: torch.Tensor, **kwargs) -> dict:
|
31 |
+
""" get the implicit keypoint information
|
32 |
+
x: Bx3xHxW, normalized to 0~1
|
33 |
+
flag_refine_info: whether to trandform the pose to degrees and the dimention of the reshape
|
34 |
+
return: A dict contains keys: 'pitch', 'yaw', 'roll', 't', 'exp', 'scale', 'kp'
|
35 |
+
"""
|
36 |
+
with torch.no_grad():
|
37 |
+
kp_info = self.motion_extractor(x)
|
38 |
+
|
39 |
+
if self.cfg.flag_use_half_precision:
|
40 |
+
# float the dict
|
41 |
+
for k, v in kp_info.items():
|
42 |
+
if isinstance(v, torch.Tensor):
|
43 |
+
kp_info[k] = v.float()
|
44 |
+
|
45 |
+
flag_refine_info: bool = kwargs.get('flag_refine_info', True)
|
46 |
+
if flag_refine_info:
|
47 |
+
bs = kp_info['kp'].shape[0]
|
48 |
+
kp_info['pitch'] = headpose_pred_to_degree(kp_info['pitch'])[:, None] # Bx1
|
49 |
+
kp_info['yaw'] = headpose_pred_to_degree(kp_info['yaw'])[:, None] # Bx1
|
50 |
+
kp_info['roll'] = headpose_pred_to_degree(kp_info['roll'])[:, None] # Bx1
|
51 |
+
kp_info['kp'] = kp_info['kp'].reshape(bs, -1, 3) # BxNx3
|
52 |
+
kp_info['exp'] = kp_info['exp'].reshape(bs, -1, 3) # BxNx3
|
53 |
+
|
54 |
+
return kp_info
|
55 |
+
def transform_keypoint(self, kp_info: dict):
|
56 |
+
"""
|
57 |
+
transform the implicit keypoints with the pose, shift, and expression deformation
|
58 |
+
kp: BxNx3
|
59 |
+
"""
|
60 |
+
kp = kp_info['kp'] # (bs, k, 3)
|
61 |
+
pitch, yaw, roll = kp_info['pitch'], kp_info['yaw'], kp_info['roll']
|
62 |
+
|
63 |
+
t, exp = kp_info['t'], kp_info['exp']
|
64 |
+
scale = kp_info['scale']
|
65 |
+
|
66 |
+
pitch = headpose_pred_to_degree(pitch)
|
67 |
+
yaw = headpose_pred_to_degree(yaw)
|
68 |
+
roll = headpose_pred_to_degree(roll)
|
69 |
+
|
70 |
+
bs = kp.shape[0]
|
71 |
+
if kp.ndim == 2:
|
72 |
+
num_kp = kp.shape[1] // 3 # Bx(num_kpx3)
|
73 |
+
else:
|
74 |
+
num_kp = kp.shape[1] # Bxnum_kpx3
|
75 |
+
|
76 |
+
rot_mat = get_rotation_matrix(pitch, yaw, roll) # (bs, 3, 3)
|
77 |
+
|
78 |
+
# Eqn.2: s * (R * x_c,s + exp) + t
|
79 |
+
kp_transformed = kp.view(bs, num_kp, 3) @ rot_mat + exp.view(bs, num_kp, 3)
|
80 |
+
kp_transformed *= scale[..., None] # (bs, k, 3) * (bs, 1, 1) = (bs, k, 3)
|
81 |
+
kp_transformed[:, :, 0:2] += t[:, None, 0:2] # remove z, only apply tx ty
|
82 |
+
|
83 |
+
return kp_transformed
|
84 |
+
|
85 |
+
def stitch(self, kp_source: torch.Tensor, kp_driving: torch.Tensor) -> torch.Tensor:
|
86 |
+
"""
|
87 |
+
kp_source: BxNx3
|
88 |
+
kp_driving: BxNx3
|
89 |
+
Return: Bx(3*num_kp+2)
|
90 |
+
"""
|
91 |
+
feat_stiching = concat_feat(kp_source, kp_driving)
|
92 |
+
|
93 |
+
with torch.no_grad():
|
94 |
+
delta = self.stitching_retargeting_module['stitching'](feat_stiching)
|
95 |
+
|
96 |
+
return delta
|
97 |
+
|
98 |
+
def stitching(self, kp_source: torch.Tensor, kp_driving: torch.Tensor) -> torch.Tensor:
|
99 |
+
""" conduct the stitching
|
100 |
+
kp_source: Bxnum_kpx3
|
101 |
+
kp_driving: Bxnum_kpx3
|
102 |
+
"""
|
103 |
+
|
104 |
+
if self.stitching_retargeting_module is not None:
|
105 |
+
|
106 |
+
bs, num_kp = kp_source.shape[:2]
|
107 |
+
|
108 |
+
kp_driving_new = kp_driving.clone()
|
109 |
+
delta = self.stitch(kp_source, kp_driving_new)
|
110 |
+
|
111 |
+
delta_exp = delta[..., :3*num_kp].reshape(bs, num_kp, 3) # 1x20x3
|
112 |
+
delta_tx_ty = delta[..., 3*num_kp:3*num_kp+2].reshape(bs, 1, 2) # 1x1x2
|
113 |
+
|
114 |
+
kp_driving_new += delta_exp
|
115 |
+
kp_driving_new[..., :2] += delta_tx_ty
|
116 |
+
|
117 |
+
return kp_driving_new
|
118 |
+
|
119 |
+
return kp_driving
|
120 |
+
|
121 |
+
def warp_decode(self, feature_3d: torch.Tensor, kp_source: torch.Tensor, kp_driving: torch.Tensor) -> torch.Tensor:
|
122 |
+
""" get the image after the warping of the implicit keypoints
|
123 |
+
feature_3d: Bx32x16x64x64, feature volume
|
124 |
+
kp_source: BxNx3
|
125 |
+
kp_driving: BxNx3
|
126 |
+
"""
|
127 |
+
# The line 18 in Algorithm 1: D(W(f_s; x_s, x′_d,i))
|
128 |
+
with torch.no_grad():
|
129 |
+
# get decoder input
|
130 |
+
ret_dct = self.warping_module(feature_3d, kp_source=kp_source, kp_driving=kp_driving)
|
131 |
+
# decode
|
132 |
+
ret_dct['out'] = self.spade_generator(feature=ret_dct['out'])
|
133 |
+
|
134 |
+
# float the dict
|
135 |
+
if self.cfg.flag_use_half_precision:
|
136 |
+
for k, v in ret_dct.items():
|
137 |
+
if isinstance(v, torch.Tensor):
|
138 |
+
ret_dct[k] = v.float()
|
139 |
+
|
140 |
+
return ret_dct
|
141 |
+
|
142 |
+
def parse_output(self, out: torch.Tensor) -> np.ndarray:
|
143 |
+
""" construct the output as standard
|
144 |
+
return: 1xHxWx3, uint8
|
145 |
+
"""
|
146 |
+
out = np.transpose(out.data.cpu().numpy(), [0, 2, 3, 1]) # 1x3xHxW -> 1xHxWx3
|
147 |
+
out = np.clip(out, 0, 1) # clip to 0~1
|
148 |
+
out = np.clip(out * 255, 0, 255).astype(np.uint8) # 0~1 -> 0~255
|
149 |
+
|
150 |
+
return out
|
ComfyUI-AdvancedLivePortrait/LivePortrait/modules/__init__.py
ADDED
File without changes
|
ComfyUI-AdvancedLivePortrait/LivePortrait/modules/__pycache__/__init__.cpython-312.pyc
ADDED
Binary file (228 Bytes). View file
|
|
ComfyUI-AdvancedLivePortrait/LivePortrait/modules/__pycache__/appearance_feature_extractor.cpython-312.pyc
ADDED
Binary file (3.03 kB). View file
|
|
ComfyUI-AdvancedLivePortrait/LivePortrait/modules/__pycache__/convnextv2.cpython-312.pyc
ADDED
Binary file (7.9 kB). View file
|
|
ComfyUI-AdvancedLivePortrait/LivePortrait/modules/__pycache__/dense_motion.cpython-312.pyc
ADDED
Binary file (6.6 kB). View file
|
|
ComfyUI-AdvancedLivePortrait/LivePortrait/modules/__pycache__/motion_extractor.cpython-312.pyc
ADDED
Binary file (2.17 kB). View file
|
|
ComfyUI-AdvancedLivePortrait/LivePortrait/modules/__pycache__/spade_generator.cpython-312.pyc
ADDED
Binary file (3.61 kB). View file
|
|
ComfyUI-AdvancedLivePortrait/LivePortrait/modules/__pycache__/stitching_retargeting_network.cpython-312.pyc
ADDED
Binary file (2.89 kB). View file
|
|
ComfyUI-AdvancedLivePortrait/LivePortrait/modules/__pycache__/util.cpython-312.pyc
ADDED
Binary file (24.7 kB). View file
|
|
ComfyUI-AdvancedLivePortrait/LivePortrait/modules/__pycache__/warping_network.cpython-312.pyc
ADDED
Binary file (3.13 kB). View file
|
|
ComfyUI-AdvancedLivePortrait/LivePortrait/modules/appearance_feature_extractor.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
Appearance extractor(F) defined in paper, which maps the source image s to a 3D appearance feature volume.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from torch import nn
|
9 |
+
from .util import SameBlock2d, DownBlock2d, ResBlock3d
|
10 |
+
|
11 |
+
|
12 |
+
class AppearanceFeatureExtractor(nn.Module):
|
13 |
+
|
14 |
+
def __init__(self, image_channel, block_expansion, num_down_blocks, max_features, reshape_channel, reshape_depth, num_resblocks):
|
15 |
+
super(AppearanceFeatureExtractor, self).__init__()
|
16 |
+
self.image_channel = image_channel
|
17 |
+
self.block_expansion = block_expansion
|
18 |
+
self.num_down_blocks = num_down_blocks
|
19 |
+
self.max_features = max_features
|
20 |
+
self.reshape_channel = reshape_channel
|
21 |
+
self.reshape_depth = reshape_depth
|
22 |
+
|
23 |
+
self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(3, 3), padding=(1, 1))
|
24 |
+
|
25 |
+
down_blocks = []
|
26 |
+
for i in range(num_down_blocks):
|
27 |
+
in_features = min(max_features, block_expansion * (2 ** i))
|
28 |
+
out_features = min(max_features, block_expansion * (2 ** (i + 1)))
|
29 |
+
down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
|
30 |
+
self.down_blocks = nn.ModuleList(down_blocks)
|
31 |
+
|
32 |
+
self.second = nn.Conv2d(in_channels=out_features, out_channels=max_features, kernel_size=1, stride=1)
|
33 |
+
|
34 |
+
self.resblocks_3d = torch.nn.Sequential()
|
35 |
+
for i in range(num_resblocks):
|
36 |
+
self.resblocks_3d.add_module('3dr' + str(i), ResBlock3d(reshape_channel, kernel_size=3, padding=1))
|
37 |
+
|
38 |
+
def forward(self, source_image):
|
39 |
+
out = self.first(source_image) # Bx3x256x256 -> Bx64x256x256
|
40 |
+
|
41 |
+
for i in range(len(self.down_blocks)):
|
42 |
+
out = self.down_blocks[i](out)
|
43 |
+
out = self.second(out)
|
44 |
+
bs, c, h, w = out.shape # ->Bx512x64x64
|
45 |
+
|
46 |
+
f_s = out.view(bs, self.reshape_channel, self.reshape_depth, h, w) # ->Bx32x16x64x64
|
47 |
+
f_s = self.resblocks_3d(f_s) # ->Bx32x16x64x64
|
48 |
+
return f_s
|
ComfyUI-AdvancedLivePortrait/LivePortrait/modules/convnextv2.py
ADDED
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
This moudle is adapted to the ConvNeXtV2 version for the extraction of implicit keypoints, poses, and expression deformation.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import torch
|
8 |
+
import torch.nn as nn
|
9 |
+
# from timm.models.layers import trunc_normal_, DropPath
|
10 |
+
from .util import LayerNorm, DropPath, trunc_normal_, GRN
|
11 |
+
|
12 |
+
__all__ = ['convnextv2_tiny']
|
13 |
+
|
14 |
+
|
15 |
+
class Block(nn.Module):
|
16 |
+
""" ConvNeXtV2 Block.
|
17 |
+
|
18 |
+
Args:
|
19 |
+
dim (int): Number of input channels.
|
20 |
+
drop_path (float): Stochastic depth rate. Default: 0.0
|
21 |
+
"""
|
22 |
+
|
23 |
+
def __init__(self, dim, drop_path=0.):
|
24 |
+
super().__init__()
|
25 |
+
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
|
26 |
+
self.norm = LayerNorm(dim, eps=1e-6)
|
27 |
+
self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
|
28 |
+
self.act = nn.GELU()
|
29 |
+
self.grn = GRN(4 * dim)
|
30 |
+
self.pwconv2 = nn.Linear(4 * dim, dim)
|
31 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
32 |
+
|
33 |
+
def forward(self, x):
|
34 |
+
input = x
|
35 |
+
x = self.dwconv(x)
|
36 |
+
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
|
37 |
+
x = self.norm(x)
|
38 |
+
x = self.pwconv1(x)
|
39 |
+
x = self.act(x)
|
40 |
+
x = self.grn(x)
|
41 |
+
x = self.pwconv2(x)
|
42 |
+
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
|
43 |
+
|
44 |
+
x = input + self.drop_path(x)
|
45 |
+
return x
|
46 |
+
|
47 |
+
|
48 |
+
class ConvNeXtV2(nn.Module):
|
49 |
+
""" ConvNeXt V2
|
50 |
+
|
51 |
+
Args:
|
52 |
+
in_chans (int): Number of input image channels. Default: 3
|
53 |
+
num_classes (int): Number of classes for classification head. Default: 1000
|
54 |
+
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
|
55 |
+
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
|
56 |
+
drop_path_rate (float): Stochastic depth rate. Default: 0.
|
57 |
+
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
|
58 |
+
"""
|
59 |
+
|
60 |
+
def __init__(
|
61 |
+
self,
|
62 |
+
in_chans=3,
|
63 |
+
depths=[3, 3, 9, 3],
|
64 |
+
dims=[96, 192, 384, 768],
|
65 |
+
drop_path_rate=0.,
|
66 |
+
**kwargs
|
67 |
+
):
|
68 |
+
super().__init__()
|
69 |
+
self.depths = depths
|
70 |
+
self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers
|
71 |
+
stem = nn.Sequential(
|
72 |
+
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
|
73 |
+
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
|
74 |
+
)
|
75 |
+
self.downsample_layers.append(stem)
|
76 |
+
for i in range(3):
|
77 |
+
downsample_layer = nn.Sequential(
|
78 |
+
LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
|
79 |
+
nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
|
80 |
+
)
|
81 |
+
self.downsample_layers.append(downsample_layer)
|
82 |
+
|
83 |
+
self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks
|
84 |
+
dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
|
85 |
+
cur = 0
|
86 |
+
for i in range(4):
|
87 |
+
stage = nn.Sequential(
|
88 |
+
*[Block(dim=dims[i], drop_path=dp_rates[cur + j]) for j in range(depths[i])]
|
89 |
+
)
|
90 |
+
self.stages.append(stage)
|
91 |
+
cur += depths[i]
|
92 |
+
|
93 |
+
self.norm = nn.LayerNorm(dims[-1], eps=1e-6) # final norm layer
|
94 |
+
|
95 |
+
# NOTE: the output semantic items
|
96 |
+
num_bins = kwargs.get('num_bins', 66)
|
97 |
+
num_kp = kwargs.get('num_kp', 24) # the number of implicit keypoints
|
98 |
+
self.fc_kp = nn.Linear(dims[-1], 3 * num_kp) # implicit keypoints
|
99 |
+
|
100 |
+
# print('dims[-1]: ', dims[-1])
|
101 |
+
self.fc_scale = nn.Linear(dims[-1], 1) # scale
|
102 |
+
self.fc_pitch = nn.Linear(dims[-1], num_bins) # pitch bins
|
103 |
+
self.fc_yaw = nn.Linear(dims[-1], num_bins) # yaw bins
|
104 |
+
self.fc_roll = nn.Linear(dims[-1], num_bins) # roll bins
|
105 |
+
self.fc_t = nn.Linear(dims[-1], 3) # translation
|
106 |
+
self.fc_exp = nn.Linear(dims[-1], 3 * num_kp) # expression / delta
|
107 |
+
|
108 |
+
def _init_weights(self, m):
|
109 |
+
if isinstance(m, (nn.Conv2d, nn.Linear)):
|
110 |
+
trunc_normal_(m.weight, std=.02)
|
111 |
+
nn.init.constant_(m.bias, 0)
|
112 |
+
|
113 |
+
def forward_features(self, x):
|
114 |
+
for i in range(4):
|
115 |
+
x = self.downsample_layers[i](x)
|
116 |
+
x = self.stages[i](x)
|
117 |
+
return self.norm(x.mean([-2, -1])) # global average pooling, (N, C, H, W) -> (N, C)
|
118 |
+
|
119 |
+
def forward(self, x):
|
120 |
+
x = self.forward_features(x)
|
121 |
+
|
122 |
+
# implicit keypoints
|
123 |
+
kp = self.fc_kp(x)
|
124 |
+
|
125 |
+
# pose and expression deformation
|
126 |
+
pitch = self.fc_pitch(x)
|
127 |
+
yaw = self.fc_yaw(x)
|
128 |
+
roll = self.fc_roll(x)
|
129 |
+
t = self.fc_t(x)
|
130 |
+
exp = self.fc_exp(x)
|
131 |
+
scale = self.fc_scale(x)
|
132 |
+
|
133 |
+
ret_dct = {
|
134 |
+
'pitch': pitch,
|
135 |
+
'yaw': yaw,
|
136 |
+
'roll': roll,
|
137 |
+
't': t,
|
138 |
+
'exp': exp,
|
139 |
+
'scale': scale,
|
140 |
+
|
141 |
+
'kp': kp, # canonical keypoint
|
142 |
+
}
|
143 |
+
|
144 |
+
return ret_dct
|
145 |
+
|
146 |
+
|
147 |
+
def convnextv2_tiny(**kwargs):
|
148 |
+
model = ConvNeXtV2(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)
|
149 |
+
return model
|
ComfyUI-AdvancedLivePortrait/LivePortrait/modules/dense_motion.py
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
The module that predicting a dense motion from sparse motion representation given by kp_source and kp_driving
|
5 |
+
"""
|
6 |
+
|
7 |
+
from torch import nn
|
8 |
+
import torch.nn.functional as F
|
9 |
+
import torch
|
10 |
+
from .util import Hourglass, make_coordinate_grid, kp2gaussian
|
11 |
+
|
12 |
+
|
13 |
+
class DenseMotionNetwork(nn.Module):
|
14 |
+
def __init__(self, block_expansion, num_blocks, max_features, num_kp, feature_channel, reshape_depth, compress, estimate_occlusion_map=True):
|
15 |
+
super(DenseMotionNetwork, self).__init__()
|
16 |
+
self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp+1)*(compress+1), max_features=max_features, num_blocks=num_blocks) # ~60+G
|
17 |
+
|
18 |
+
self.mask = nn.Conv3d(self.hourglass.out_filters, num_kp + 1, kernel_size=7, padding=3) # 65G! NOTE: computation cost is large
|
19 |
+
self.compress = nn.Conv3d(feature_channel, compress, kernel_size=1) # 0.8G
|
20 |
+
self.norm = nn.BatchNorm3d(compress, affine=True)
|
21 |
+
self.num_kp = num_kp
|
22 |
+
self.flag_estimate_occlusion_map = estimate_occlusion_map
|
23 |
+
|
24 |
+
if self.flag_estimate_occlusion_map:
|
25 |
+
self.occlusion = nn.Conv2d(self.hourglass.out_filters*reshape_depth, 1, kernel_size=7, padding=3)
|
26 |
+
else:
|
27 |
+
self.occlusion = None
|
28 |
+
|
29 |
+
def create_sparse_motions(self, feature, kp_driving, kp_source):
|
30 |
+
bs, _, d, h, w = feature.shape # (bs, 4, 16, 64, 64)
|
31 |
+
identity_grid = make_coordinate_grid((d, h, w), ref=kp_source) # (16, 64, 64, 3)
|
32 |
+
identity_grid = identity_grid.view(1, 1, d, h, w, 3) # (1, 1, d=16, h=64, w=64, 3)
|
33 |
+
coordinate_grid = identity_grid - kp_driving.view(bs, self.num_kp, 1, 1, 1, 3)
|
34 |
+
|
35 |
+
k = coordinate_grid.shape[1]
|
36 |
+
|
37 |
+
# NOTE: there lacks an one-order flow
|
38 |
+
driving_to_source = coordinate_grid + kp_source.view(bs, self.num_kp, 1, 1, 1, 3) # (bs, num_kp, d, h, w, 3)
|
39 |
+
|
40 |
+
# adding background feature
|
41 |
+
identity_grid = identity_grid.repeat(bs, 1, 1, 1, 1, 1)
|
42 |
+
sparse_motions = torch.cat([identity_grid, driving_to_source], dim=1) # (bs, 1+num_kp, d, h, w, 3)
|
43 |
+
return sparse_motions
|
44 |
+
|
45 |
+
def create_deformed_feature(self, feature, sparse_motions):
|
46 |
+
bs, _, d, h, w = feature.shape
|
47 |
+
feature_repeat = feature.unsqueeze(1).unsqueeze(1).repeat(1, self.num_kp+1, 1, 1, 1, 1, 1) # (bs, num_kp+1, 1, c, d, h, w)
|
48 |
+
feature_repeat = feature_repeat.view(bs * (self.num_kp+1), -1, d, h, w) # (bs*(num_kp+1), c, d, h, w)
|
49 |
+
sparse_motions = sparse_motions.view((bs * (self.num_kp+1), d, h, w, -1)) # (bs*(num_kp+1), d, h, w, 3)
|
50 |
+
sparse_deformed = F.grid_sample(feature_repeat, sparse_motions, align_corners=False)
|
51 |
+
sparse_deformed = sparse_deformed.view((bs, self.num_kp+1, -1, d, h, w)) # (bs, num_kp+1, c, d, h, w)
|
52 |
+
|
53 |
+
return sparse_deformed
|
54 |
+
|
55 |
+
def create_heatmap_representations(self, feature, kp_driving, kp_source):
|
56 |
+
spatial_size = feature.shape[3:] # (d=16, h=64, w=64)
|
57 |
+
gaussian_driving = kp2gaussian(kp_driving, spatial_size=spatial_size, kp_variance=0.01) # (bs, num_kp, d, h, w)
|
58 |
+
gaussian_source = kp2gaussian(kp_source, spatial_size=spatial_size, kp_variance=0.01) # (bs, num_kp, d, h, w)
|
59 |
+
heatmap = gaussian_driving - gaussian_source # (bs, num_kp, d, h, w)
|
60 |
+
|
61 |
+
# adding background feature
|
62 |
+
zeros = torch.zeros(heatmap.shape[0], 1, spatial_size[0], spatial_size[1], spatial_size[2]).type(heatmap.dtype).to(heatmap.device)
|
63 |
+
heatmap = torch.cat([zeros, heatmap], dim=1)
|
64 |
+
heatmap = heatmap.unsqueeze(2) # (bs, 1+num_kp, 1, d, h, w)
|
65 |
+
return heatmap
|
66 |
+
|
67 |
+
def forward(self, feature, kp_driving, kp_source):
|
68 |
+
bs, _, d, h, w = feature.shape # (bs, 32, 16, 64, 64)
|
69 |
+
|
70 |
+
feature = self.compress(feature) # (bs, 4, 16, 64, 64)
|
71 |
+
feature = self.norm(feature) # (bs, 4, 16, 64, 64)
|
72 |
+
feature = F.relu(feature) # (bs, 4, 16, 64, 64)
|
73 |
+
|
74 |
+
out_dict = dict()
|
75 |
+
|
76 |
+
# 1. deform 3d feature
|
77 |
+
sparse_motion = self.create_sparse_motions(feature, kp_driving, kp_source) # (bs, 1+num_kp, d, h, w, 3)
|
78 |
+
deformed_feature = self.create_deformed_feature(feature, sparse_motion) # (bs, 1+num_kp, c=4, d=16, h=64, w=64)
|
79 |
+
|
80 |
+
# 2. (bs, 1+num_kp, d, h, w)
|
81 |
+
heatmap = self.create_heatmap_representations(deformed_feature, kp_driving, kp_source) # (bs, 1+num_kp, 1, d, h, w)
|
82 |
+
|
83 |
+
input = torch.cat([heatmap, deformed_feature], dim=2) # (bs, 1+num_kp, c=5, d=16, h=64, w=64)
|
84 |
+
input = input.view(bs, -1, d, h, w) # (bs, (1+num_kp)*c=105, d=16, h=64, w=64)
|
85 |
+
|
86 |
+
prediction = self.hourglass(input)
|
87 |
+
|
88 |
+
mask = self.mask(prediction)
|
89 |
+
mask = F.softmax(mask, dim=1) # (bs, 1+num_kp, d=16, h=64, w=64)
|
90 |
+
out_dict['mask'] = mask
|
91 |
+
mask = mask.unsqueeze(2) # (bs, num_kp+1, 1, d, h, w)
|
92 |
+
sparse_motion = sparse_motion.permute(0, 1, 5, 2, 3, 4) # (bs, num_kp+1, 3, d, h, w)
|
93 |
+
deformation = (sparse_motion * mask).sum(dim=1) # (bs, 3, d, h, w) mask take effect in this place
|
94 |
+
deformation = deformation.permute(0, 2, 3, 4, 1) # (bs, d, h, w, 3)
|
95 |
+
|
96 |
+
out_dict['deformation'] = deformation
|
97 |
+
|
98 |
+
if self.flag_estimate_occlusion_map:
|
99 |
+
bs, _, d, h, w = prediction.shape
|
100 |
+
prediction_reshape = prediction.view(bs, -1, h, w)
|
101 |
+
occlusion_map = torch.sigmoid(self.occlusion(prediction_reshape)) # Bx1x64x64
|
102 |
+
out_dict['occlusion_map'] = occlusion_map
|
103 |
+
|
104 |
+
return out_dict
|
ComfyUI-AdvancedLivePortrait/LivePortrait/modules/motion_extractor.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
Motion extractor(M), which directly predicts the canonical keypoints, head pose and expression deformation of the input image
|
5 |
+
"""
|
6 |
+
|
7 |
+
from torch import nn
|
8 |
+
import torch
|
9 |
+
|
10 |
+
from .convnextv2 import convnextv2_tiny
|
11 |
+
from .util import filter_state_dict
|
12 |
+
|
13 |
+
model_dict = {
|
14 |
+
'convnextv2_tiny': convnextv2_tiny,
|
15 |
+
}
|
16 |
+
|
17 |
+
|
18 |
+
class MotionExtractor(nn.Module):
|
19 |
+
def __init__(self, **kwargs):
|
20 |
+
super(MotionExtractor, self).__init__()
|
21 |
+
|
22 |
+
# default is convnextv2_base
|
23 |
+
backbone = kwargs.get('backbone', 'convnextv2_tiny')
|
24 |
+
self.detector = model_dict.get(backbone)(**kwargs)
|
25 |
+
|
26 |
+
def load_pretrained(self, init_path: str):
|
27 |
+
if init_path not in (None, ''):
|
28 |
+
state_dict = torch.load(init_path, map_location=lambda storage, loc: storage)['model']
|
29 |
+
state_dict = filter_state_dict(state_dict, remove_name='head')
|
30 |
+
ret = self.detector.load_state_dict(state_dict, strict=False)
|
31 |
+
print(f'Load pretrained model from {init_path}, ret: {ret}')
|
32 |
+
|
33 |
+
def forward(self, x):
|
34 |
+
out = self.detector(x)
|
35 |
+
return out
|
ComfyUI-AdvancedLivePortrait/LivePortrait/modules/spade_generator.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
Spade decoder(G) defined in the paper, which input the warped feature to generate the animated image.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from torch import nn
|
9 |
+
import torch.nn.functional as F
|
10 |
+
from .util import SPADEResnetBlock
|
11 |
+
|
12 |
+
|
13 |
+
class SPADEDecoder(nn.Module):
|
14 |
+
def __init__(self, upscale=1, max_features=256, block_expansion=64, out_channels=64, num_down_blocks=2):
|
15 |
+
for i in range(num_down_blocks):
|
16 |
+
input_channels = min(max_features, block_expansion * (2 ** (i + 1)))
|
17 |
+
self.upscale = upscale
|
18 |
+
super().__init__()
|
19 |
+
norm_G = 'spadespectralinstance'
|
20 |
+
label_num_channels = input_channels # 256
|
21 |
+
|
22 |
+
self.fc = nn.Conv2d(input_channels, 2 * input_channels, 3, padding=1)
|
23 |
+
self.G_middle_0 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
|
24 |
+
self.G_middle_1 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
|
25 |
+
self.G_middle_2 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
|
26 |
+
self.G_middle_3 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
|
27 |
+
self.G_middle_4 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
|
28 |
+
self.G_middle_5 = SPADEResnetBlock(2 * input_channels, 2 * input_channels, norm_G, label_num_channels)
|
29 |
+
self.up_0 = SPADEResnetBlock(2 * input_channels, input_channels, norm_G, label_num_channels)
|
30 |
+
self.up_1 = SPADEResnetBlock(input_channels, out_channels, norm_G, label_num_channels)
|
31 |
+
self.up = nn.Upsample(scale_factor=2)
|
32 |
+
|
33 |
+
if self.upscale is None or self.upscale <= 1:
|
34 |
+
self.conv_img = nn.Conv2d(out_channels, 3, 3, padding=1)
|
35 |
+
else:
|
36 |
+
self.conv_img = nn.Sequential(
|
37 |
+
nn.Conv2d(out_channels, 3 * (2 * 2), kernel_size=3, padding=1),
|
38 |
+
nn.PixelShuffle(upscale_factor=2)
|
39 |
+
)
|
40 |
+
|
41 |
+
def forward(self, feature):
|
42 |
+
seg = feature # Bx256x64x64
|
43 |
+
x = self.fc(feature) # Bx512x64x64
|
44 |
+
x = self.G_middle_0(x, seg)
|
45 |
+
x = self.G_middle_1(x, seg)
|
46 |
+
x = self.G_middle_2(x, seg)
|
47 |
+
x = self.G_middle_3(x, seg)
|
48 |
+
x = self.G_middle_4(x, seg)
|
49 |
+
x = self.G_middle_5(x, seg)
|
50 |
+
|
51 |
+
x = self.up(x) # Bx512x64x64 -> Bx512x128x128
|
52 |
+
x = self.up_0(x, seg) # Bx512x128x128 -> Bx256x128x128
|
53 |
+
x = self.up(x) # Bx256x128x128 -> Bx256x256x256
|
54 |
+
x = self.up_1(x, seg) # Bx256x256x256 -> Bx64x256x256
|
55 |
+
|
56 |
+
x = self.conv_img(F.leaky_relu(x, 2e-1)) # Bx64x256x256 -> Bx3xHxW
|
57 |
+
x = torch.sigmoid(x) # Bx3xHxW
|
58 |
+
|
59 |
+
return x
|
ComfyUI-AdvancedLivePortrait/LivePortrait/modules/stitching_retargeting_network.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
Stitching module(S) and two retargeting modules(R) defined in the paper.
|
5 |
+
|
6 |
+
- The stitching module pastes the animated portrait back into the original image space without pixel misalignment, such as in
|
7 |
+
the stitching region.
|
8 |
+
|
9 |
+
- The eyes retargeting module is designed to address the issue of incomplete eye closure during cross-id reenactment, especially
|
10 |
+
when a person with small eyes drives a person with larger eyes.
|
11 |
+
|
12 |
+
- The lip retargeting module is designed similarly to the eye retargeting module, and can also normalize the input by ensuring that
|
13 |
+
the lips are in a closed state, which facilitates better animation driving.
|
14 |
+
"""
|
15 |
+
from torch import nn
|
16 |
+
|
17 |
+
|
18 |
+
class StitchingRetargetingNetwork(nn.Module):
|
19 |
+
def __init__(self, input_size, hidden_sizes, output_size):
|
20 |
+
super(StitchingRetargetingNetwork, self).__init__()
|
21 |
+
layers = []
|
22 |
+
for i in range(len(hidden_sizes)):
|
23 |
+
if i == 0:
|
24 |
+
layers.append(nn.Linear(input_size, hidden_sizes[i]))
|
25 |
+
else:
|
26 |
+
layers.append(nn.Linear(hidden_sizes[i - 1], hidden_sizes[i]))
|
27 |
+
layers.append(nn.ReLU(inplace=True))
|
28 |
+
layers.append(nn.Linear(hidden_sizes[-1], output_size))
|
29 |
+
self.mlp = nn.Sequential(*layers)
|
30 |
+
|
31 |
+
def initialize_weights_to_zero(self):
|
32 |
+
for m in self.modules():
|
33 |
+
if isinstance(m, nn.Linear):
|
34 |
+
nn.init.zeros_(m.weight)
|
35 |
+
nn.init.zeros_(m.bias)
|
36 |
+
|
37 |
+
def forward(self, x):
|
38 |
+
return self.mlp(x)
|
ComfyUI-AdvancedLivePortrait/LivePortrait/modules/util.py
ADDED
@@ -0,0 +1,441 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
This file defines various neural network modules and utility functions, including convolutional and residual blocks,
|
5 |
+
normalizations, and functions for spatial transformation and tensor manipulation.
|
6 |
+
"""
|
7 |
+
|
8 |
+
from torch import nn
|
9 |
+
import torch.nn.functional as F
|
10 |
+
import torch
|
11 |
+
import torch.nn.utils.spectral_norm as spectral_norm
|
12 |
+
import math
|
13 |
+
import warnings
|
14 |
+
|
15 |
+
|
16 |
+
def kp2gaussian(kp, spatial_size, kp_variance):
|
17 |
+
"""
|
18 |
+
Transform a keypoint into gaussian like representation
|
19 |
+
"""
|
20 |
+
mean = kp
|
21 |
+
|
22 |
+
coordinate_grid = make_coordinate_grid(spatial_size, mean)
|
23 |
+
number_of_leading_dimensions = len(mean.shape) - 1
|
24 |
+
shape = (1,) * number_of_leading_dimensions + coordinate_grid.shape
|
25 |
+
coordinate_grid = coordinate_grid.view(*shape)
|
26 |
+
repeats = mean.shape[:number_of_leading_dimensions] + (1, 1, 1, 1)
|
27 |
+
coordinate_grid = coordinate_grid.repeat(*repeats)
|
28 |
+
|
29 |
+
# Preprocess kp shape
|
30 |
+
shape = mean.shape[:number_of_leading_dimensions] + (1, 1, 1, 3)
|
31 |
+
mean = mean.view(*shape)
|
32 |
+
|
33 |
+
mean_sub = (coordinate_grid - mean)
|
34 |
+
|
35 |
+
out = torch.exp(-0.5 * (mean_sub ** 2).sum(-1) / kp_variance)
|
36 |
+
|
37 |
+
return out
|
38 |
+
|
39 |
+
|
40 |
+
def make_coordinate_grid(spatial_size, ref, **kwargs):
|
41 |
+
d, h, w = spatial_size
|
42 |
+
x = torch.arange(w).type(ref.dtype).to(ref.device)
|
43 |
+
y = torch.arange(h).type(ref.dtype).to(ref.device)
|
44 |
+
z = torch.arange(d).type(ref.dtype).to(ref.device)
|
45 |
+
|
46 |
+
# NOTE: must be right-down-in
|
47 |
+
x = (2 * (x / (w - 1)) - 1) # the x axis faces to the right
|
48 |
+
y = (2 * (y / (h - 1)) - 1) # the y axis faces to the bottom
|
49 |
+
z = (2 * (z / (d - 1)) - 1) # the z axis faces to the inner
|
50 |
+
|
51 |
+
yy = y.view(1, -1, 1).repeat(d, 1, w)
|
52 |
+
xx = x.view(1, 1, -1).repeat(d, h, 1)
|
53 |
+
zz = z.view(-1, 1, 1).repeat(1, h, w)
|
54 |
+
|
55 |
+
meshed = torch.cat([xx.unsqueeze_(3), yy.unsqueeze_(3), zz.unsqueeze_(3)], 3)
|
56 |
+
|
57 |
+
return meshed
|
58 |
+
|
59 |
+
|
60 |
+
class ConvT2d(nn.Module):
|
61 |
+
"""
|
62 |
+
Upsampling block for use in decoder.
|
63 |
+
"""
|
64 |
+
|
65 |
+
def __init__(self, in_features, out_features, kernel_size=3, stride=2, padding=1, output_padding=1):
|
66 |
+
super(ConvT2d, self).__init__()
|
67 |
+
|
68 |
+
self.convT = nn.ConvTranspose2d(in_features, out_features, kernel_size=kernel_size, stride=stride,
|
69 |
+
padding=padding, output_padding=output_padding)
|
70 |
+
self.norm = nn.InstanceNorm2d(out_features)
|
71 |
+
|
72 |
+
def forward(self, x):
|
73 |
+
out = self.convT(x)
|
74 |
+
out = self.norm(out)
|
75 |
+
out = F.leaky_relu(out)
|
76 |
+
return out
|
77 |
+
|
78 |
+
|
79 |
+
class ResBlock3d(nn.Module):
|
80 |
+
"""
|
81 |
+
Res block, preserve spatial resolution.
|
82 |
+
"""
|
83 |
+
|
84 |
+
def __init__(self, in_features, kernel_size, padding):
|
85 |
+
super(ResBlock3d, self).__init__()
|
86 |
+
self.conv1 = nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size, padding=padding)
|
87 |
+
self.conv2 = nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size, padding=padding)
|
88 |
+
self.norm1 = nn.BatchNorm3d(in_features, affine=True)
|
89 |
+
self.norm2 = nn.BatchNorm3d(in_features, affine=True)
|
90 |
+
|
91 |
+
def forward(self, x):
|
92 |
+
out = self.norm1(x)
|
93 |
+
out = F.relu(out)
|
94 |
+
out = self.conv1(out)
|
95 |
+
out = self.norm2(out)
|
96 |
+
out = F.relu(out)
|
97 |
+
out = self.conv2(out)
|
98 |
+
out += x
|
99 |
+
return out
|
100 |
+
|
101 |
+
|
102 |
+
class UpBlock3d(nn.Module):
|
103 |
+
"""
|
104 |
+
Upsampling block for use in decoder.
|
105 |
+
"""
|
106 |
+
|
107 |
+
def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
|
108 |
+
super(UpBlock3d, self).__init__()
|
109 |
+
|
110 |
+
self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
|
111 |
+
padding=padding, groups=groups)
|
112 |
+
self.norm = nn.BatchNorm3d(out_features, affine=True)
|
113 |
+
|
114 |
+
def forward(self, x):
|
115 |
+
out = F.interpolate(x, scale_factor=(1, 2, 2))
|
116 |
+
out = self.conv(out)
|
117 |
+
out = self.norm(out)
|
118 |
+
out = F.relu(out)
|
119 |
+
return out
|
120 |
+
|
121 |
+
|
122 |
+
class DownBlock2d(nn.Module):
|
123 |
+
"""
|
124 |
+
Downsampling block for use in encoder.
|
125 |
+
"""
|
126 |
+
|
127 |
+
def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
|
128 |
+
super(DownBlock2d, self).__init__()
|
129 |
+
self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, padding=padding, groups=groups)
|
130 |
+
self.norm = nn.BatchNorm2d(out_features, affine=True)
|
131 |
+
self.pool = nn.AvgPool2d(kernel_size=(2, 2))
|
132 |
+
|
133 |
+
def forward(self, x):
|
134 |
+
out = self.conv(x)
|
135 |
+
out = self.norm(out)
|
136 |
+
out = F.relu(out)
|
137 |
+
out = self.pool(out)
|
138 |
+
return out
|
139 |
+
|
140 |
+
|
141 |
+
class DownBlock3d(nn.Module):
|
142 |
+
"""
|
143 |
+
Downsampling block for use in encoder.
|
144 |
+
"""
|
145 |
+
|
146 |
+
def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
|
147 |
+
super(DownBlock3d, self).__init__()
|
148 |
+
'''
|
149 |
+
self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
|
150 |
+
padding=padding, groups=groups, stride=(1, 2, 2))
|
151 |
+
'''
|
152 |
+
self.conv = nn.Conv3d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
|
153 |
+
padding=padding, groups=groups)
|
154 |
+
self.norm = nn.BatchNorm3d(out_features, affine=True)
|
155 |
+
self.pool = nn.AvgPool3d(kernel_size=(1, 2, 2))
|
156 |
+
|
157 |
+
def forward(self, x):
|
158 |
+
out = self.conv(x)
|
159 |
+
out = self.norm(out)
|
160 |
+
out = F.relu(out)
|
161 |
+
out = self.pool(out)
|
162 |
+
return out
|
163 |
+
|
164 |
+
|
165 |
+
class SameBlock2d(nn.Module):
|
166 |
+
"""
|
167 |
+
Simple block, preserve spatial resolution.
|
168 |
+
"""
|
169 |
+
|
170 |
+
def __init__(self, in_features, out_features, groups=1, kernel_size=3, padding=1, lrelu=False):
|
171 |
+
super(SameBlock2d, self).__init__()
|
172 |
+
self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, padding=padding, groups=groups)
|
173 |
+
self.norm = nn.BatchNorm2d(out_features, affine=True)
|
174 |
+
if lrelu:
|
175 |
+
self.ac = nn.LeakyReLU()
|
176 |
+
else:
|
177 |
+
self.ac = nn.ReLU()
|
178 |
+
|
179 |
+
def forward(self, x):
|
180 |
+
out = self.conv(x)
|
181 |
+
out = self.norm(out)
|
182 |
+
out = self.ac(out)
|
183 |
+
return out
|
184 |
+
|
185 |
+
|
186 |
+
class Encoder(nn.Module):
|
187 |
+
"""
|
188 |
+
Hourglass Encoder
|
189 |
+
"""
|
190 |
+
|
191 |
+
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
|
192 |
+
super(Encoder, self).__init__()
|
193 |
+
|
194 |
+
down_blocks = []
|
195 |
+
for i in range(num_blocks):
|
196 |
+
down_blocks.append(DownBlock3d(in_features if i == 0 else min(max_features, block_expansion * (2 ** i)), min(max_features, block_expansion * (2 ** (i + 1))), kernel_size=3, padding=1))
|
197 |
+
self.down_blocks = nn.ModuleList(down_blocks)
|
198 |
+
|
199 |
+
def forward(self, x):
|
200 |
+
outs = [x]
|
201 |
+
for down_block in self.down_blocks:
|
202 |
+
outs.append(down_block(outs[-1]))
|
203 |
+
return outs
|
204 |
+
|
205 |
+
|
206 |
+
class Decoder(nn.Module):
|
207 |
+
"""
|
208 |
+
Hourglass Decoder
|
209 |
+
"""
|
210 |
+
|
211 |
+
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
|
212 |
+
super(Decoder, self).__init__()
|
213 |
+
|
214 |
+
up_blocks = []
|
215 |
+
|
216 |
+
for i in range(num_blocks)[::-1]:
|
217 |
+
in_filters = (1 if i == num_blocks - 1 else 2) * min(max_features, block_expansion * (2 ** (i + 1)))
|
218 |
+
out_filters = min(max_features, block_expansion * (2 ** i))
|
219 |
+
up_blocks.append(UpBlock3d(in_filters, out_filters, kernel_size=3, padding=1))
|
220 |
+
|
221 |
+
self.up_blocks = nn.ModuleList(up_blocks)
|
222 |
+
self.out_filters = block_expansion + in_features
|
223 |
+
|
224 |
+
self.conv = nn.Conv3d(in_channels=self.out_filters, out_channels=self.out_filters, kernel_size=3, padding=1)
|
225 |
+
self.norm = nn.BatchNorm3d(self.out_filters, affine=True)
|
226 |
+
|
227 |
+
def forward(self, x):
|
228 |
+
out = x.pop()
|
229 |
+
for up_block in self.up_blocks:
|
230 |
+
out = up_block(out)
|
231 |
+
skip = x.pop()
|
232 |
+
out = torch.cat([out, skip], dim=1)
|
233 |
+
out = self.conv(out)
|
234 |
+
out = self.norm(out)
|
235 |
+
out = F.relu(out)
|
236 |
+
return out
|
237 |
+
|
238 |
+
|
239 |
+
class Hourglass(nn.Module):
|
240 |
+
"""
|
241 |
+
Hourglass architecture.
|
242 |
+
"""
|
243 |
+
|
244 |
+
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
|
245 |
+
super(Hourglass, self).__init__()
|
246 |
+
self.encoder = Encoder(block_expansion, in_features, num_blocks, max_features)
|
247 |
+
self.decoder = Decoder(block_expansion, in_features, num_blocks, max_features)
|
248 |
+
self.out_filters = self.decoder.out_filters
|
249 |
+
|
250 |
+
def forward(self, x):
|
251 |
+
return self.decoder(self.encoder(x))
|
252 |
+
|
253 |
+
|
254 |
+
class SPADE(nn.Module):
|
255 |
+
def __init__(self, norm_nc, label_nc):
|
256 |
+
super().__init__()
|
257 |
+
|
258 |
+
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
|
259 |
+
nhidden = 128
|
260 |
+
|
261 |
+
self.mlp_shared = nn.Sequential(
|
262 |
+
nn.Conv2d(label_nc, nhidden, kernel_size=3, padding=1),
|
263 |
+
nn.ReLU())
|
264 |
+
self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=3, padding=1)
|
265 |
+
self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=3, padding=1)
|
266 |
+
|
267 |
+
def forward(self, x, segmap):
|
268 |
+
normalized = self.param_free_norm(x)
|
269 |
+
segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')
|
270 |
+
actv = self.mlp_shared(segmap)
|
271 |
+
gamma = self.mlp_gamma(actv)
|
272 |
+
beta = self.mlp_beta(actv)
|
273 |
+
out = normalized * (1 + gamma) + beta
|
274 |
+
return out
|
275 |
+
|
276 |
+
|
277 |
+
class SPADEResnetBlock(nn.Module):
|
278 |
+
def __init__(self, fin, fout, norm_G, label_nc, use_se=False, dilation=1):
|
279 |
+
super().__init__()
|
280 |
+
# Attributes
|
281 |
+
self.learned_shortcut = (fin != fout)
|
282 |
+
fmiddle = min(fin, fout)
|
283 |
+
self.use_se = use_se
|
284 |
+
# create conv layers
|
285 |
+
self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=dilation, dilation=dilation)
|
286 |
+
self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=dilation, dilation=dilation)
|
287 |
+
if self.learned_shortcut:
|
288 |
+
self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)
|
289 |
+
# apply spectral norm if specified
|
290 |
+
if 'spectral' in norm_G:
|
291 |
+
self.conv_0 = spectral_norm(self.conv_0)
|
292 |
+
self.conv_1 = spectral_norm(self.conv_1)
|
293 |
+
if self.learned_shortcut:
|
294 |
+
self.conv_s = spectral_norm(self.conv_s)
|
295 |
+
# define normalization layers
|
296 |
+
self.norm_0 = SPADE(fin, label_nc)
|
297 |
+
self.norm_1 = SPADE(fmiddle, label_nc)
|
298 |
+
if self.learned_shortcut:
|
299 |
+
self.norm_s = SPADE(fin, label_nc)
|
300 |
+
|
301 |
+
def forward(self, x, seg1):
|
302 |
+
x_s = self.shortcut(x, seg1)
|
303 |
+
dx = self.conv_0(self.actvn(self.norm_0(x, seg1)))
|
304 |
+
dx = self.conv_1(self.actvn(self.norm_1(dx, seg1)))
|
305 |
+
out = x_s + dx
|
306 |
+
return out
|
307 |
+
|
308 |
+
def shortcut(self, x, seg1):
|
309 |
+
if self.learned_shortcut:
|
310 |
+
x_s = self.conv_s(self.norm_s(x, seg1))
|
311 |
+
else:
|
312 |
+
x_s = x
|
313 |
+
return x_s
|
314 |
+
|
315 |
+
def actvn(self, x):
|
316 |
+
return F.leaky_relu(x, 2e-1)
|
317 |
+
|
318 |
+
|
319 |
+
def filter_state_dict(state_dict, remove_name='fc'):
|
320 |
+
new_state_dict = {}
|
321 |
+
for key in state_dict:
|
322 |
+
if remove_name in key:
|
323 |
+
continue
|
324 |
+
new_state_dict[key] = state_dict[key]
|
325 |
+
return new_state_dict
|
326 |
+
|
327 |
+
|
328 |
+
class GRN(nn.Module):
|
329 |
+
""" GRN (Global Response Normalization) layer
|
330 |
+
"""
|
331 |
+
|
332 |
+
def __init__(self, dim):
|
333 |
+
super().__init__()
|
334 |
+
self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim))
|
335 |
+
self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim))
|
336 |
+
|
337 |
+
def forward(self, x):
|
338 |
+
Gx = torch.norm(x, p=2, dim=(1, 2), keepdim=True)
|
339 |
+
Nx = Gx / (Gx.mean(dim=-1, keepdim=True) + 1e-6)
|
340 |
+
return self.gamma * (x * Nx) + self.beta + x
|
341 |
+
|
342 |
+
|
343 |
+
class LayerNorm(nn.Module):
|
344 |
+
r""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
|
345 |
+
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
|
346 |
+
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
|
347 |
+
with shape (batch_size, channels, height, width).
|
348 |
+
"""
|
349 |
+
|
350 |
+
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
|
351 |
+
super().__init__()
|
352 |
+
self.weight = nn.Parameter(torch.ones(normalized_shape))
|
353 |
+
self.bias = nn.Parameter(torch.zeros(normalized_shape))
|
354 |
+
self.eps = eps
|
355 |
+
self.data_format = data_format
|
356 |
+
if self.data_format not in ["channels_last", "channels_first"]:
|
357 |
+
raise NotImplementedError
|
358 |
+
self.normalized_shape = (normalized_shape, )
|
359 |
+
|
360 |
+
def forward(self, x):
|
361 |
+
if self.data_format == "channels_last":
|
362 |
+
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
|
363 |
+
elif self.data_format == "channels_first":
|
364 |
+
u = x.mean(1, keepdim=True)
|
365 |
+
s = (x - u).pow(2).mean(1, keepdim=True)
|
366 |
+
x = (x - u) / torch.sqrt(s + self.eps)
|
367 |
+
x = self.weight[:, None, None] * x + self.bias[:, None, None]
|
368 |
+
return x
|
369 |
+
|
370 |
+
|
371 |
+
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
|
372 |
+
# Cut & paste from PyTorch official master until it's in a few official releases - RW
|
373 |
+
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
|
374 |
+
def norm_cdf(x):
|
375 |
+
# Computes standard normal cumulative distribution function
|
376 |
+
return (1. + math.erf(x / math.sqrt(2.))) / 2.
|
377 |
+
|
378 |
+
if (mean < a - 2 * std) or (mean > b + 2 * std):
|
379 |
+
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
|
380 |
+
"The distribution of values may be incorrect.",
|
381 |
+
stacklevel=2)
|
382 |
+
|
383 |
+
with torch.no_grad():
|
384 |
+
# Values are generated by using a truncated uniform distribution and
|
385 |
+
# then using the inverse CDF for the normal distribution.
|
386 |
+
# Get upper and lower cdf values
|
387 |
+
l = norm_cdf((a - mean) / std)
|
388 |
+
u = norm_cdf((b - mean) / std)
|
389 |
+
|
390 |
+
# Uniformly fill tensor with values from [l, u], then translate to
|
391 |
+
# [2l-1, 2u-1].
|
392 |
+
tensor.uniform_(2 * l - 1, 2 * u - 1)
|
393 |
+
|
394 |
+
# Use inverse cdf transform for normal distribution to get truncated
|
395 |
+
# standard normal
|
396 |
+
tensor.erfinv_()
|
397 |
+
|
398 |
+
# Transform to proper mean, std
|
399 |
+
tensor.mul_(std * math.sqrt(2.))
|
400 |
+
tensor.add_(mean)
|
401 |
+
|
402 |
+
# Clamp to ensure it's in the proper range
|
403 |
+
tensor.clamp_(min=a, max=b)
|
404 |
+
return tensor
|
405 |
+
|
406 |
+
|
407 |
+
def drop_path(x, drop_prob=0., training=False, scale_by_keep=True):
|
408 |
+
""" Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
409 |
+
|
410 |
+
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
|
411 |
+
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
|
412 |
+
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
|
413 |
+
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
|
414 |
+
'survival rate' as the argument.
|
415 |
+
|
416 |
+
"""
|
417 |
+
if drop_prob == 0. or not training:
|
418 |
+
return x
|
419 |
+
keep_prob = 1 - drop_prob
|
420 |
+
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
421 |
+
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
|
422 |
+
if keep_prob > 0.0 and scale_by_keep:
|
423 |
+
random_tensor.div_(keep_prob)
|
424 |
+
return x * random_tensor
|
425 |
+
|
426 |
+
|
427 |
+
class DropPath(nn.Module):
|
428 |
+
""" Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
429 |
+
"""
|
430 |
+
|
431 |
+
def __init__(self, drop_prob=None, scale_by_keep=True):
|
432 |
+
super(DropPath, self).__init__()
|
433 |
+
self.drop_prob = drop_prob
|
434 |
+
self.scale_by_keep = scale_by_keep
|
435 |
+
|
436 |
+
def forward(self, x):
|
437 |
+
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
|
438 |
+
|
439 |
+
|
440 |
+
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
|
441 |
+
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
|
ComfyUI-AdvancedLivePortrait/LivePortrait/modules/warping_network.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
Warping field estimator(W) defined in the paper, which generates a warping field using the implicit
|
5 |
+
keypoint representations x_s and x_d, and employs this flow field to warp the source feature volume f_s.
|
6 |
+
"""
|
7 |
+
|
8 |
+
from torch import nn
|
9 |
+
import torch.nn.functional as F
|
10 |
+
from .util import SameBlock2d
|
11 |
+
from .dense_motion import DenseMotionNetwork
|
12 |
+
|
13 |
+
|
14 |
+
class WarpingNetwork(nn.Module):
|
15 |
+
def __init__(
|
16 |
+
self,
|
17 |
+
num_kp,
|
18 |
+
block_expansion,
|
19 |
+
max_features,
|
20 |
+
num_down_blocks,
|
21 |
+
reshape_channel,
|
22 |
+
estimate_occlusion_map=False,
|
23 |
+
dense_motion_params=None,
|
24 |
+
**kwargs
|
25 |
+
):
|
26 |
+
super(WarpingNetwork, self).__init__()
|
27 |
+
|
28 |
+
self.upscale = kwargs.get('upscale', 1)
|
29 |
+
self.flag_use_occlusion_map = kwargs.get('flag_use_occlusion_map', True)
|
30 |
+
|
31 |
+
if dense_motion_params is not None:
|
32 |
+
self.dense_motion_network = DenseMotionNetwork(
|
33 |
+
num_kp=num_kp,
|
34 |
+
feature_channel=reshape_channel,
|
35 |
+
estimate_occlusion_map=estimate_occlusion_map,
|
36 |
+
**dense_motion_params
|
37 |
+
)
|
38 |
+
else:
|
39 |
+
self.dense_motion_network = None
|
40 |
+
|
41 |
+
self.third = SameBlock2d(max_features, block_expansion * (2 ** num_down_blocks), kernel_size=(3, 3), padding=(1, 1), lrelu=True)
|
42 |
+
self.fourth = nn.Conv2d(in_channels=block_expansion * (2 ** num_down_blocks), out_channels=block_expansion * (2 ** num_down_blocks), kernel_size=1, stride=1)
|
43 |
+
|
44 |
+
self.estimate_occlusion_map = estimate_occlusion_map
|
45 |
+
|
46 |
+
def deform_input(self, inp, deformation):
|
47 |
+
return F.grid_sample(inp, deformation, align_corners=False)
|
48 |
+
|
49 |
+
def forward(self, feature_3d, kp_driving, kp_source):
|
50 |
+
if self.dense_motion_network is not None:
|
51 |
+
# Feature warper, Transforming feature representation according to deformation and occlusion
|
52 |
+
dense_motion = self.dense_motion_network(
|
53 |
+
feature=feature_3d, kp_driving=kp_driving, kp_source=kp_source
|
54 |
+
)
|
55 |
+
if 'occlusion_map' in dense_motion:
|
56 |
+
occlusion_map = dense_motion['occlusion_map'] # Bx1x64x64
|
57 |
+
else:
|
58 |
+
occlusion_map = None
|
59 |
+
|
60 |
+
deformation = dense_motion['deformation'] # Bx16x64x64x3
|
61 |
+
out = self.deform_input(feature_3d, deformation) # Bx32x16x64x64
|
62 |
+
|
63 |
+
bs, c, d, h, w = out.shape # Bx32x16x64x64
|
64 |
+
out = out.view(bs, c * d, h, w) # -> Bx512x64x64
|
65 |
+
out = self.third(out) # -> Bx256x64x64
|
66 |
+
out = self.fourth(out) # -> Bx256x64x64
|
67 |
+
|
68 |
+
if self.flag_use_occlusion_map and (occlusion_map is not None):
|
69 |
+
out = out * occlusion_map
|
70 |
+
|
71 |
+
ret_dct = {
|
72 |
+
'occlusion_map': occlusion_map,
|
73 |
+
'deformation': deformation,
|
74 |
+
'out': out,
|
75 |
+
}
|
76 |
+
|
77 |
+
return ret_dct
|
ComfyUI-AdvancedLivePortrait/LivePortrait/utils/__init__.py
ADDED
File without changes
|
ComfyUI-AdvancedLivePortrait/LivePortrait/utils/__pycache__/__init__.cpython-312.pyc
ADDED
Binary file (226 Bytes). View file
|
|
ComfyUI-AdvancedLivePortrait/LivePortrait/utils/__pycache__/camera.cpython-312.pyc
ADDED
Binary file (3.47 kB). View file
|
|
ComfyUI-AdvancedLivePortrait/LivePortrait/utils/__pycache__/helper.cpython-312.pyc
ADDED
Binary file (5.77 kB). View file
|
|
ComfyUI-AdvancedLivePortrait/LivePortrait/utils/__pycache__/rprint.cpython-312.pyc
ADDED
Binary file (539 Bytes). View file
|
|
ComfyUI-AdvancedLivePortrait/LivePortrait/utils/camera.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
functions for processing and transforming 3D facial keypoints
|
5 |
+
"""
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
import torch
|
9 |
+
import torch.nn.functional as F
|
10 |
+
|
11 |
+
PI = np.pi
|
12 |
+
|
13 |
+
|
14 |
+
def headpose_pred_to_degree(pred):
|
15 |
+
"""
|
16 |
+
pred: (bs, 66) or (bs, 1) or others
|
17 |
+
"""
|
18 |
+
if pred.ndim > 1 and pred.shape[1] == 66:
|
19 |
+
# NOTE: note that the average is modified to 97.5
|
20 |
+
device = pred.device
|
21 |
+
idx_tensor = [idx for idx in range(0, 66)]
|
22 |
+
idx_tensor = torch.FloatTensor(idx_tensor).to(device)
|
23 |
+
pred = F.softmax(pred, dim=1)
|
24 |
+
degree = torch.sum(pred*idx_tensor, axis=1) * 3 - 97.5
|
25 |
+
|
26 |
+
return degree
|
27 |
+
|
28 |
+
return pred
|
29 |
+
|
30 |
+
|
31 |
+
def get_rotation_matrix(pitch_, yaw_, roll_):
|
32 |
+
""" the input is in degree
|
33 |
+
"""
|
34 |
+
# calculate the rotation matrix: vps @ rot
|
35 |
+
|
36 |
+
# transform to radian
|
37 |
+
pitch = pitch_ / 180 * PI
|
38 |
+
yaw = yaw_ / 180 * PI
|
39 |
+
roll = roll_ / 180 * PI
|
40 |
+
|
41 |
+
device = pitch.device
|
42 |
+
|
43 |
+
if pitch.ndim == 1:
|
44 |
+
pitch = pitch.unsqueeze(1)
|
45 |
+
if yaw.ndim == 1:
|
46 |
+
yaw = yaw.unsqueeze(1)
|
47 |
+
if roll.ndim == 1:
|
48 |
+
roll = roll.unsqueeze(1)
|
49 |
+
|
50 |
+
# calculate the euler matrix
|
51 |
+
bs = pitch.shape[0]
|
52 |
+
ones = torch.ones([bs, 1]).to(device)
|
53 |
+
zeros = torch.zeros([bs, 1]).to(device)
|
54 |
+
x, y, z = pitch, yaw, roll
|
55 |
+
|
56 |
+
rot_x = torch.cat([
|
57 |
+
ones, zeros, zeros,
|
58 |
+
zeros, torch.cos(x), -torch.sin(x),
|
59 |
+
zeros, torch.sin(x), torch.cos(x)
|
60 |
+
], dim=1).reshape([bs, 3, 3])
|
61 |
+
|
62 |
+
rot_y = torch.cat([
|
63 |
+
torch.cos(y), zeros, torch.sin(y),
|
64 |
+
zeros, ones, zeros,
|
65 |
+
-torch.sin(y), zeros, torch.cos(y)
|
66 |
+
], dim=1).reshape([bs, 3, 3])
|
67 |
+
|
68 |
+
rot_z = torch.cat([
|
69 |
+
torch.cos(z), -torch.sin(z), zeros,
|
70 |
+
torch.sin(z), torch.cos(z), zeros,
|
71 |
+
zeros, zeros, ones
|
72 |
+
], dim=1).reshape([bs, 3, 3])
|
73 |
+
|
74 |
+
rot = rot_z @ rot_y @ rot_x
|
75 |
+
return rot.permute(0, 2, 1) # transpose
|
ComfyUI-AdvancedLivePortrait/LivePortrait/utils/face_analysis_diy.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
face detectoin and alignment using InsightFace
|
5 |
+
"""
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
from .rprint import rlog as log
|
9 |
+
from insightface.app import FaceAnalysis
|
10 |
+
from insightface.app.common import Face
|
11 |
+
from .timer import Timer
|
12 |
+
|
13 |
+
|
14 |
+
def sort_by_direction(faces, direction: str = 'large-small', face_center=None):
|
15 |
+
if len(faces) <= 0:
|
16 |
+
return faces
|
17 |
+
if direction == 'left-right':
|
18 |
+
return sorted(faces, key=lambda face: face['bbox'][0])
|
19 |
+
if direction == 'right-left':
|
20 |
+
return sorted(faces, key=lambda face: face['bbox'][0], reverse=True)
|
21 |
+
if direction == 'top-bottom':
|
22 |
+
return sorted(faces, key=lambda face: face['bbox'][1])
|
23 |
+
if direction == 'bottom-top':
|
24 |
+
return sorted(faces, key=lambda face: face['bbox'][1], reverse=True)
|
25 |
+
if direction == 'small-large':
|
26 |
+
return sorted(faces, key=lambda face: (face['bbox'][2] - face['bbox'][0]) * (face['bbox'][3] - face['bbox'][1]))
|
27 |
+
if direction == 'large-small':
|
28 |
+
return sorted(faces, key=lambda face: (face['bbox'][2] - face['bbox'][0]) * (face['bbox'][3] - face['bbox'][1]), reverse=True)
|
29 |
+
if direction == 'distance-from-retarget-face':
|
30 |
+
return sorted(faces, key=lambda face: (((face['bbox'][2]+face['bbox'][0])/2-face_center[0])**2+((face['bbox'][3]+face['bbox'][1])/2-face_center[1])**2)**0.5)
|
31 |
+
return faces
|
32 |
+
|
33 |
+
|
34 |
+
class FaceAnalysisDIY(FaceAnalysis):
|
35 |
+
def __init__(self, name='buffalo_l', root='~/.insightface', allowed_modules=None, **kwargs):
|
36 |
+
super().__init__(name=name, root=root, allowed_modules=allowed_modules, **kwargs)
|
37 |
+
|
38 |
+
self.timer = Timer()
|
39 |
+
|
40 |
+
def get(self, img_bgr, **kwargs):
|
41 |
+
max_num = kwargs.get('max_num', 0) # the number of the detected faces, 0 means no limit
|
42 |
+
flag_do_landmark_2d_106 = kwargs.get('flag_do_landmark_2d_106', True) # whether to do 106-point detection
|
43 |
+
direction = kwargs.get('direction', 'large-small') # sorting direction
|
44 |
+
face_center = None
|
45 |
+
|
46 |
+
bboxes, kpss = self.det_model.detect(img_bgr, max_num=max_num, metric='default')
|
47 |
+
if bboxes.shape[0] == 0:
|
48 |
+
return []
|
49 |
+
ret = []
|
50 |
+
for i in range(bboxes.shape[0]):
|
51 |
+
bbox = bboxes[i, 0:4]
|
52 |
+
det_score = bboxes[i, 4]
|
53 |
+
kps = None
|
54 |
+
if kpss is not None:
|
55 |
+
kps = kpss[i]
|
56 |
+
face = Face(bbox=bbox, kps=kps, det_score=det_score)
|
57 |
+
for taskname, model in self.models.items():
|
58 |
+
if taskname == 'detection':
|
59 |
+
continue
|
60 |
+
|
61 |
+
if (not flag_do_landmark_2d_106) and taskname == 'landmark_2d_106':
|
62 |
+
continue
|
63 |
+
|
64 |
+
# print(f'taskname: {taskname}')
|
65 |
+
model.get(img_bgr, face)
|
66 |
+
ret.append(face)
|
67 |
+
|
68 |
+
ret = sort_by_direction(ret, direction, face_center)
|
69 |
+
return ret
|
70 |
+
|
71 |
+
def warmup(self):
|
72 |
+
self.timer.tic()
|
73 |
+
|
74 |
+
img_bgr = np.zeros((512, 512, 3), dtype=np.uint8)
|
75 |
+
self.get(img_bgr)
|
76 |
+
|
77 |
+
elapse = self.timer.toc()
|
78 |
+
log(f'FaceAnalysisDIY warmup time: {elapse:.3f}s')
|
ComfyUI-AdvancedLivePortrait/LivePortrait/utils/helper.py
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
utility functions and classes to handle feature extraction and model loading
|
5 |
+
"""
|
6 |
+
|
7 |
+
import os
|
8 |
+
import os.path as osp
|
9 |
+
import cv2
|
10 |
+
import torch
|
11 |
+
from rich.console import Console
|
12 |
+
from collections import OrderedDict
|
13 |
+
|
14 |
+
from ..modules.spade_generator import SPADEDecoder
|
15 |
+
from ..modules.warping_network import WarpingNetwork
|
16 |
+
from ..modules.motion_extractor import MotionExtractor
|
17 |
+
from ..modules.appearance_feature_extractor import AppearanceFeatureExtractor
|
18 |
+
from ..modules.stitching_retargeting_network import StitchingRetargetingNetwork
|
19 |
+
from .rprint import rlog as log
|
20 |
+
|
21 |
+
|
22 |
+
def suffix(filename):
|
23 |
+
"""a.jpg -> jpg"""
|
24 |
+
pos = filename.rfind(".")
|
25 |
+
if pos == -1:
|
26 |
+
return ""
|
27 |
+
return filename[pos + 1:]
|
28 |
+
|
29 |
+
|
30 |
+
def prefix(filename):
|
31 |
+
"""a.jpg -> a"""
|
32 |
+
pos = filename.rfind(".")
|
33 |
+
if pos == -1:
|
34 |
+
return filename
|
35 |
+
return filename[:pos]
|
36 |
+
|
37 |
+
|
38 |
+
def basename(filename):
|
39 |
+
"""a/b/c.jpg -> c"""
|
40 |
+
return prefix(osp.basename(filename))
|
41 |
+
|
42 |
+
|
43 |
+
def is_video(file_path):
|
44 |
+
if file_path.lower().endswith((".mp4", ".mov", ".avi", ".webm")) or osp.isdir(file_path):
|
45 |
+
return True
|
46 |
+
return False
|
47 |
+
|
48 |
+
def is_template(file_path):
|
49 |
+
if file_path.endswith(".pkl"):
|
50 |
+
return True
|
51 |
+
return False
|
52 |
+
|
53 |
+
|
54 |
+
def mkdir(d, log=False):
|
55 |
+
# return self-assined `d`, for one line code
|
56 |
+
if not osp.exists(d):
|
57 |
+
os.makedirs(d, exist_ok=True)
|
58 |
+
if log:
|
59 |
+
print(f"Make dir: {d}")
|
60 |
+
return d
|
61 |
+
|
62 |
+
|
63 |
+
def squeeze_tensor_to_numpy(tensor):
|
64 |
+
out = tensor.data.squeeze(0).cpu().numpy()
|
65 |
+
return out
|
66 |
+
|
67 |
+
|
68 |
+
def dct2cuda(dct: dict, device_id: int):
|
69 |
+
for key in dct:
|
70 |
+
dct[key] = torch.tensor(dct[key]).cuda(device_id)
|
71 |
+
return dct
|
72 |
+
|
73 |
+
|
74 |
+
def concat_feat(kp_source: torch.Tensor, kp_driving: torch.Tensor) -> torch.Tensor:
|
75 |
+
"""
|
76 |
+
kp_source: (bs, k, 3)
|
77 |
+
kp_driving: (bs, k, 3)
|
78 |
+
Return: (bs, 2k*3)
|
79 |
+
"""
|
80 |
+
bs_src = kp_source.shape[0]
|
81 |
+
bs_dri = kp_driving.shape[0]
|
82 |
+
assert bs_src == bs_dri, 'batch size must be equal'
|
83 |
+
|
84 |
+
feat = torch.cat([kp_source.view(bs_src, -1), kp_driving.view(bs_dri, -1)], dim=1)
|
85 |
+
return feat
|
86 |
+
|
87 |
+
|
88 |
+
# get coefficients of Eqn. 7
|
89 |
+
def calculate_transformation(config, s_kp_info, t_0_kp_info, t_i_kp_info, R_s, R_t_0, R_t_i):
|
90 |
+
if config.relative:
|
91 |
+
new_rotation = (R_t_i @ R_t_0.permute(0, 2, 1)) @ R_s
|
92 |
+
new_expression = s_kp_info['exp'] + (t_i_kp_info['exp'] - t_0_kp_info['exp'])
|
93 |
+
else:
|
94 |
+
new_rotation = R_t_i
|
95 |
+
new_expression = t_i_kp_info['exp']
|
96 |
+
new_translation = s_kp_info['t'] + (t_i_kp_info['t'] - t_0_kp_info['t'])
|
97 |
+
new_translation[..., 2].fill_(0) # Keep the z-axis unchanged
|
98 |
+
new_scale = s_kp_info['scale'] * (t_i_kp_info['scale'] / t_0_kp_info['scale'])
|
99 |
+
return new_rotation, new_expression, new_translation, new_scale
|
100 |
+
|
101 |
+
def load_description(fp):
|
102 |
+
with open(fp, 'r', encoding='utf-8') as f:
|
103 |
+
content = f.read()
|
104 |
+
return content
|
105 |
+
|
106 |
+
|
107 |
+
def resize_to_limit(img, max_dim=1280, n=2):
|
108 |
+
h, w = img.shape[:2]
|
109 |
+
if max_dim > 0 and max(h, w) > max_dim:
|
110 |
+
if h > w:
|
111 |
+
new_h = max_dim
|
112 |
+
new_w = int(w * (max_dim / h))
|
113 |
+
else:
|
114 |
+
new_w = max_dim
|
115 |
+
new_h = int(h * (max_dim / w))
|
116 |
+
img = cv2.resize(img, (new_w, new_h))
|
117 |
+
n = max(n, 1)
|
118 |
+
new_h = img.shape[0] - (img.shape[0] % n)
|
119 |
+
new_w = img.shape[1] - (img.shape[1] % n)
|
120 |
+
if new_h == 0 or new_w == 0:
|
121 |
+
return img
|
122 |
+
if new_h != img.shape[0] or new_w != img.shape[1]:
|
123 |
+
img = img[:new_h, :new_w]
|
124 |
+
return img
|
ComfyUI-AdvancedLivePortrait/LivePortrait/utils/io.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
import os
|
4 |
+
from glob import glob
|
5 |
+
import os.path as osp
|
6 |
+
import imageio
|
7 |
+
import numpy as np
|
8 |
+
import cv2; cv2.setNumThreads(0); cv2.ocl.setUseOpenCL(False)
|
9 |
+
|
10 |
+
|
11 |
+
def load_image_rgb(image_path: str):
|
12 |
+
if not osp.exists(image_path):
|
13 |
+
raise FileNotFoundError(f"Image not found: {image_path}")
|
14 |
+
img = cv2.imread(image_path, cv2.IMREAD_COLOR)
|
15 |
+
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
16 |
+
|
17 |
+
|
18 |
+
def load_driving_info(driving_info):
|
19 |
+
driving_video_ori = []
|
20 |
+
|
21 |
+
def load_images_from_directory(directory):
|
22 |
+
image_paths = sorted(glob(osp.join(directory, '*.png')) + glob(osp.join(directory, '*.jpg')))
|
23 |
+
return [load_image_rgb(im_path) for im_path in image_paths]
|
24 |
+
|
25 |
+
def load_images_from_video(file_path):
|
26 |
+
reader = imageio.get_reader(file_path)
|
27 |
+
return [image for idx, image in enumerate(reader)]
|
28 |
+
|
29 |
+
if osp.isdir(driving_info):
|
30 |
+
driving_video_ori = load_images_from_directory(driving_info)
|
31 |
+
elif osp.isfile(driving_info):
|
32 |
+
driving_video_ori = load_images_from_video(driving_info)
|
33 |
+
|
34 |
+
return driving_video_ori
|
35 |
+
|
36 |
+
|
37 |
+
def contiguous(obj):
|
38 |
+
if not obj.flags.c_contiguous:
|
39 |
+
obj = obj.copy(order="C")
|
40 |
+
return obj
|
41 |
+
|
42 |
+
|
43 |
+
def _resize_to_limit(img: np.ndarray, max_dim=1920, n=2):
|
44 |
+
"""
|
45 |
+
ajust the size of the image so that the maximum dimension does not exceed max_dim, and the width and the height of the image are multiples of n.
|
46 |
+
:param img: the image to be processed.
|
47 |
+
:param max_dim: the maximum dimension constraint.
|
48 |
+
:param n: the number that needs to be multiples of.
|
49 |
+
:return: the adjusted image.
|
50 |
+
"""
|
51 |
+
h, w = img.shape[:2]
|
52 |
+
|
53 |
+
# ajust the size of the image according to the maximum dimension
|
54 |
+
if max_dim > 0 and max(h, w) > max_dim:
|
55 |
+
if h > w:
|
56 |
+
new_h = max_dim
|
57 |
+
new_w = int(w * (max_dim / h))
|
58 |
+
else:
|
59 |
+
new_w = max_dim
|
60 |
+
new_h = int(h * (max_dim / w))
|
61 |
+
img = cv2.resize(img, (new_w, new_h))
|
62 |
+
|
63 |
+
# ensure that the image dimensions are multiples of n
|
64 |
+
n = max(n, 1)
|
65 |
+
new_h = img.shape[0] - (img.shape[0] % n)
|
66 |
+
new_w = img.shape[1] - (img.shape[1] % n)
|
67 |
+
|
68 |
+
if new_h == 0 or new_w == 0:
|
69 |
+
# when the width or height is less than n, no need to process
|
70 |
+
return img
|
71 |
+
|
72 |
+
if new_h != img.shape[0] or new_w != img.shape[1]:
|
73 |
+
img = img[:new_h, :new_w]
|
74 |
+
|
75 |
+
return img
|
76 |
+
|
77 |
+
|
78 |
+
def load_img_online(obj, mode="bgr", **kwargs):
|
79 |
+
max_dim = kwargs.get("max_dim", 1920)
|
80 |
+
n = kwargs.get("n", 2)
|
81 |
+
if isinstance(obj, str):
|
82 |
+
if mode.lower() == "gray":
|
83 |
+
img = cv2.imread(obj, cv2.IMREAD_GRAYSCALE)
|
84 |
+
else:
|
85 |
+
img = cv2.imread(obj, cv2.IMREAD_COLOR)
|
86 |
+
else:
|
87 |
+
img = obj
|
88 |
+
|
89 |
+
# Resize image to satisfy constraints
|
90 |
+
img = _resize_to_limit(img, max_dim=max_dim, n=n)
|
91 |
+
|
92 |
+
if mode.lower() == "bgr":
|
93 |
+
return contiguous(img)
|
94 |
+
elif mode.lower() == "rgb":
|
95 |
+
return contiguous(img[..., ::-1])
|
96 |
+
else:
|
97 |
+
raise Exception(f"Unknown mode {mode}")
|
ComfyUI-AdvancedLivePortrait/LivePortrait/utils/resources/mask_template.png
ADDED
ComfyUI-AdvancedLivePortrait/LivePortrait/utils/rprint.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
custom print and log functions
|
5 |
+
"""
|
6 |
+
|
7 |
+
__all__ = ['rprint', 'rlog']
|
8 |
+
|
9 |
+
try:
|
10 |
+
from rich.console import Console
|
11 |
+
console = Console()
|
12 |
+
rprint = console.print
|
13 |
+
rlog = console.log
|
14 |
+
except:
|
15 |
+
rprint = print
|
16 |
+
rlog = print
|
ComfyUI-AdvancedLivePortrait/LivePortrait/utils/timer.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
tools to measure elapsed time
|
5 |
+
"""
|
6 |
+
|
7 |
+
import time
|
8 |
+
|
9 |
+
class Timer(object):
|
10 |
+
"""A simple timer."""
|
11 |
+
|
12 |
+
def __init__(self):
|
13 |
+
self.total_time = 0.
|
14 |
+
self.calls = 0
|
15 |
+
self.start_time = 0.
|
16 |
+
self.diff = 0.
|
17 |
+
|
18 |
+
def tic(self):
|
19 |
+
# using time.time instead of time.clock because time time.clock
|
20 |
+
# does not normalize for multithreading
|
21 |
+
self.start_time = time.time()
|
22 |
+
|
23 |
+
def toc(self, average=True):
|
24 |
+
self.diff = time.time() - self.start_time
|
25 |
+
return self.diff
|
26 |
+
|
27 |
+
def clear(self):
|
28 |
+
self.start_time = 0.
|
29 |
+
self.diff = 0.
|
ComfyUI-AdvancedLivePortrait/LivePortrait/utils/video.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
functions for processing video
|
5 |
+
"""
|
6 |
+
|
7 |
+
import os.path as osp
|
8 |
+
import numpy as np
|
9 |
+
import subprocess
|
10 |
+
import imageio
|
11 |
+
import cv2
|
12 |
+
|
13 |
+
# from rich.progress import track
|
14 |
+
from .helper import prefix
|
15 |
+
from .rprint import rprint as print
|
16 |
+
|
17 |
+
|
18 |
+
def exec_cmd(cmd):
|
19 |
+
subprocess.run(cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
20 |
+
|
21 |
+
|
22 |
+
def images2video(images, wfp, **kwargs):
|
23 |
+
fps = kwargs.get('fps', 30)
|
24 |
+
video_format = kwargs.get('format', 'mp4') # default is mp4 format
|
25 |
+
codec = kwargs.get('codec', 'libx264') # default is libx264 encoding
|
26 |
+
quality = kwargs.get('quality') # video quality
|
27 |
+
pixelformat = kwargs.get('pixelformat', 'yuv420p') # video pixel format
|
28 |
+
image_mode = kwargs.get('image_mode', 'rgb')
|
29 |
+
macro_block_size = kwargs.get('macro_block_size', 2)
|
30 |
+
ffmpeg_params = ['-crf', str(kwargs.get('crf', 18))]
|
31 |
+
|
32 |
+
writer = imageio.get_writer(
|
33 |
+
wfp, fps=fps, format=video_format,
|
34 |
+
codec=codec, quality=quality, ffmpeg_params=ffmpeg_params, pixelformat=pixelformat, macro_block_size=macro_block_size
|
35 |
+
)
|
36 |
+
|
37 |
+
n = len(images)
|
38 |
+
print('writing',n)
|
39 |
+
for i in range(n):
|
40 |
+
if image_mode.lower() == 'bgr':
|
41 |
+
writer.append_data(images[i][..., ::-1])
|
42 |
+
else:
|
43 |
+
writer.append_data(images[i])
|
44 |
+
|
45 |
+
writer.close()
|
46 |
+
|
47 |
+
# print(f':smiley: Dump to {wfp}\n', style="bold green")
|
48 |
+
print(f'Dump to {wfp}\n')
|
49 |
+
|
50 |
+
|
51 |
+
def video2gif(video_fp, fps=30, size=256):
|
52 |
+
if osp.exists(video_fp):
|
53 |
+
d = osp.split(video_fp)[0]
|
54 |
+
fn = prefix(osp.basename(video_fp))
|
55 |
+
palette_wfp = osp.join(d, 'palette.png')
|
56 |
+
gif_wfp = osp.join(d, f'{fn}.gif')
|
57 |
+
# generate the palette
|
58 |
+
cmd = f'ffmpeg -i {video_fp} -vf "fps={fps},scale={size}:-1:flags=lanczos,palettegen" {palette_wfp} -y'
|
59 |
+
exec_cmd(cmd)
|
60 |
+
# use the palette to generate the gif
|
61 |
+
cmd = f'ffmpeg -i {video_fp} -i {palette_wfp} -filter_complex "fps={fps},scale={size}:-1:flags=lanczos[x];[x][1:v]paletteuse" {gif_wfp} -y'
|
62 |
+
exec_cmd(cmd)
|
63 |
+
else:
|
64 |
+
print(f'video_fp: {video_fp} not exists!')
|
65 |
+
|
66 |
+
|
67 |
+
def merge_audio_video(video_fp, audio_fp, wfp):
|
68 |
+
if osp.exists(video_fp) and osp.exists(audio_fp):
|
69 |
+
cmd = f'ffmpeg -i {video_fp} -i {audio_fp} -c:v copy -c:a aac {wfp} -y'
|
70 |
+
exec_cmd(cmd)
|
71 |
+
print(f'merge {video_fp} and {audio_fp} to {wfp}')
|
72 |
+
else:
|
73 |
+
print(f'video_fp: {video_fp} or audio_fp: {audio_fp} not exists!')
|
74 |
+
|
75 |
+
|
76 |
+
def blend(img: np.ndarray, mask: np.ndarray, background_color=(255, 255, 255)):
|
77 |
+
mask_float = mask.astype(np.float32) / 255.
|
78 |
+
background_color = np.array(background_color).reshape([1, 1, 3])
|
79 |
+
bg = np.ones_like(img) * background_color
|
80 |
+
img = np.clip(mask_float * img + (1 - mask_float) * bg, 0, 255).astype(np.uint8)
|
81 |
+
return img
|
82 |
+
|
83 |
+
|
84 |
+
def concat_frames(I_p_lst, driving_rgb_lst, img_rgb):
|
85 |
+
# TODO: add more concat style, e.g., left-down corner driving
|
86 |
+
out_lst = []
|
87 |
+
print('Concatenating result...',len(I_p_lst))
|
88 |
+
for idx, _ in enumerate(I_p_lst):
|
89 |
+
# track(enumerate(I_p_lst), total=len(I_p_lst), description='Concatenating result...'):
|
90 |
+
source_image_drived = I_p_lst[idx]
|
91 |
+
image_drive = driving_rgb_lst[idx]
|
92 |
+
|
93 |
+
# resize images to match source_image_drived shape
|
94 |
+
h, w, _ = source_image_drived.shape
|
95 |
+
image_drive_resized = cv2.resize(image_drive, (w, h))
|
96 |
+
img_rgb_resized = cv2.resize(img_rgb, (w, h))
|
97 |
+
|
98 |
+
# concatenate images horizontally
|
99 |
+
frame = np.concatenate((image_drive_resized, img_rgb_resized, source_image_drived), axis=1)
|
100 |
+
out_lst.append(frame)
|
101 |
+
return out_lst
|
102 |
+
|
103 |
+
|
104 |
+
class VideoWriter:
|
105 |
+
def __init__(self, **kwargs):
|
106 |
+
self.fps = kwargs.get('fps', 30)
|
107 |
+
self.wfp = kwargs.get('wfp', 'video.mp4')
|
108 |
+
self.video_format = kwargs.get('format', 'mp4')
|
109 |
+
self.codec = kwargs.get('codec', 'libx264')
|
110 |
+
self.quality = kwargs.get('quality')
|
111 |
+
self.pixelformat = kwargs.get('pixelformat', 'yuv420p')
|
112 |
+
self.image_mode = kwargs.get('image_mode', 'rgb')
|
113 |
+
self.ffmpeg_params = kwargs.get('ffmpeg_params')
|
114 |
+
|
115 |
+
self.writer = imageio.get_writer(
|
116 |
+
self.wfp, fps=self.fps, format=self.video_format,
|
117 |
+
codec=self.codec, quality=self.quality,
|
118 |
+
ffmpeg_params=self.ffmpeg_params, pixelformat=self.pixelformat
|
119 |
+
)
|
120 |
+
|
121 |
+
def write(self, image):
|
122 |
+
if self.image_mode.lower() == 'bgr':
|
123 |
+
self.writer.append_data(image[..., ::-1])
|
124 |
+
else:
|
125 |
+
self.writer.append_data(image)
|
126 |
+
|
127 |
+
def close(self):
|
128 |
+
if self.writer is not None:
|
129 |
+
self.writer.close()
|
130 |
+
|
131 |
+
|
132 |
+
def change_video_fps(input_file, output_file, fps=20, codec='libx264', crf=5):
|
133 |
+
cmd = f"ffmpeg -i {input_file} -c:v {codec} -crf {crf} -r {fps} {output_file} -y"
|
134 |
+
exec_cmd(cmd)
|
135 |
+
|
136 |
+
|
137 |
+
def get_fps(filepath):
|
138 |
+
import ffmpeg
|
139 |
+
probe = ffmpeg.probe(filepath)
|
140 |
+
video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
|
141 |
+
fps = eval(video_stream['avg_frame_rate'])
|
142 |
+
return fps
|
ComfyUI-AdvancedLivePortrait/README.md
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ComfyUI-AdvancedLivePortrait
|
2 |
+
|
3 |
+
## Update
|
4 |
+
|
5 |
+
8/21/2024
|
6 |
+
|
7 |
+
You can create a video without a video.
|
8 |
+
|
9 |
+
Track the face of the source video.
|
10 |
+
|
11 |
+
The workflow has been updated.
|
12 |
+
|
13 |
+
## Introduction
|
14 |
+
|
15 |
+
AdvancedLivePortrait is faster and has real-time preview
|
16 |
+
|
17 |
+
https://github.com/user-attachments/assets/90b78639-6477-48af-ba49-7945488df581
|
18 |
+
|
19 |
+
Edit facial expressions in photos.
|
20 |
+
|
21 |
+
Insert facial expressions into videos.
|
22 |
+
|
23 |
+
Create animations using multiple facial expressions.
|
24 |
+
|
25 |
+
Extract facial expressions from sample photos.
|
26 |
+
|
27 |
+
## Installation
|
28 |
+
|
29 |
+
This project has been registered with ComfyUI-Manager. Now you can install it automatically using the manager.
|
30 |
+
|
31 |
+
## Usage
|
32 |
+
|
33 |
+
The workflows and sample datas placed in '\custom_nodes\ComfyUI-AdvancedLivePortrait\sample\'
|
34 |
+
|
35 |
+
-----
|
36 |
+
|
37 |
+
You can add expressions to the video. See 'workflow2_advanced.json'.
|
38 |
+
|
39 |
+
Describes the 'command' in 'workflow2_advanced.json'
|
40 |
+
|
41 |
+
![readme](https://github.com/user-attachments/assets/339568b2-ad52-4aaf-a6ab-fcd877449c56)
|
42 |
+
|
43 |
+
|
44 |
+
[Motion index] = [Changing frame length] : [Length of frames waiting for next motion]
|
45 |
+
|
46 |
+
Motion index 0 is the original source image.
|
47 |
+
|
48 |
+
They are numbered in the order they lead to the motion_link.
|
49 |
+
|
50 |
+
Linking the driving video to 'src_images' will add facial expressions to the driving video.
|
51 |
+
|
52 |
+
-----
|
53 |
+
|
54 |
+
You can save and load expressions with the 'Load Exp Data' 'Save Exp Data' nodes.
|
55 |
+
|
56 |
+
\ComfyUI\output\exp_data\ Path to the folder being saved
|
57 |
+
|
58 |
+
-----
|
59 |
+
|
60 |
+
## Thanks
|
61 |
+
|
62 |
+
Original author's link : https://liveportrait.github.io/
|
63 |
+
|
64 |
+
This project uses a model converted by kijai. link : https://github.com/kijai/ComfyUI-LivePortraitKJ
|
ComfyUI-AdvancedLivePortrait/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS
|
2 |
+
__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS"]
|
3 |
+
|
4 |
+
|
ComfyUI-AdvancedLivePortrait/__pycache__/__init__.cpython-312.pyc
ADDED
Binary file (334 Bytes). View file
|
|
ComfyUI-AdvancedLivePortrait/__pycache__/nodes.cpython-312.pyc
ADDED
Binary file (49.2 kB). View file
|
|
ComfyUI-AdvancedLivePortrait/install.bat
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@echo off
|
2 |
+
|
3 |
+
set "requirements_txt=%~dp0\requirements.txt"
|
4 |
+
set "python_exec=..\..\..\python_embeded\python.exe"
|
5 |
+
|
6 |
+
echo Installing ComfyUI-AdvancedLivePortrait..
|
7 |
+
|
8 |
+
if exist "%python_exec%" (
|
9 |
+
echo Installing with ComfyUI Portable
|
10 |
+
for /f "delims=" %%i in (%requirements_txt%) do (
|
11 |
+
%python_exec% -s -m pip install "%%i"
|
12 |
+
)
|
13 |
+
) else (
|
14 |
+
echo Installing with system Python
|
15 |
+
for /f "delims=" %%i in (%requirements_txt%) do (
|
16 |
+
pip install "%%i"
|
17 |
+
)
|
18 |
+
)
|
19 |
+
|
20 |
+
pause
|
ComfyUI-AdvancedLivePortrait/nodes.py
ADDED
@@ -0,0 +1,980 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
import cv2
|
6 |
+
from PIL import Image
|
7 |
+
import folder_paths
|
8 |
+
import comfy.utils
|
9 |
+
import time
|
10 |
+
import copy
|
11 |
+
import dill
|
12 |
+
import yaml
|
13 |
+
from ultralytics import YOLO
|
14 |
+
|
15 |
+
current_file_path = os.path.abspath(__file__)
|
16 |
+
current_directory = os.path.dirname(current_file_path)
|
17 |
+
|
18 |
+
from .LivePortrait.live_portrait_wrapper import LivePortraitWrapper
|
19 |
+
from .LivePortrait.utils.camera import get_rotation_matrix
|
20 |
+
from .LivePortrait.config.inference_config import InferenceConfig
|
21 |
+
|
22 |
+
from .LivePortrait.modules.spade_generator import SPADEDecoder
|
23 |
+
from .LivePortrait.modules.warping_network import WarpingNetwork
|
24 |
+
from .LivePortrait.modules.motion_extractor import MotionExtractor
|
25 |
+
from .LivePortrait.modules.appearance_feature_extractor import AppearanceFeatureExtractor
|
26 |
+
from .LivePortrait.modules.stitching_retargeting_network import StitchingRetargetingNetwork
|
27 |
+
from collections import OrderedDict
|
28 |
+
|
29 |
+
cur_device = None
|
30 |
+
def get_device():
|
31 |
+
global cur_device
|
32 |
+
if cur_device == None:
|
33 |
+
if torch.cuda.is_available():
|
34 |
+
cur_device = torch.device('cuda')
|
35 |
+
print("Uses CUDA device.")
|
36 |
+
elif torch.backends.mps.is_available():
|
37 |
+
cur_device = torch.device('mps')
|
38 |
+
print("Uses MPS device.")
|
39 |
+
else:
|
40 |
+
cur_device = torch.device('cpu')
|
41 |
+
print("Uses CPU device.")
|
42 |
+
return cur_device
|
43 |
+
|
44 |
+
def tensor2pil(image):
|
45 |
+
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
|
46 |
+
def pil2tensor(image):
|
47 |
+
return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)
|
48 |
+
def rgb_crop(rgb, region):
|
49 |
+
return rgb[region[1]:region[3], region[0]:region[2]]
|
50 |
+
|
51 |
+
def rgb_crop_batch(rgbs, region):
|
52 |
+
return rgbs[:, region[1]:region[3], region[0]:region[2]]
|
53 |
+
def get_rgb_size(rgb):
|
54 |
+
return rgb.shape[1], rgb.shape[0]
|
55 |
+
def create_transform_matrix(x, y, s_x, s_y):
|
56 |
+
return np.float32([[s_x, 0, x], [0, s_y, y]])
|
57 |
+
|
58 |
+
def get_model_dir(m):
|
59 |
+
try:
|
60 |
+
return folder_paths.get_folder_paths(m)[0]
|
61 |
+
except:
|
62 |
+
return os.path.join(folder_paths.models_dir, m)
|
63 |
+
|
64 |
+
def calc_crop_limit(center, img_size, crop_size):
|
65 |
+
pos = center - crop_size / 2
|
66 |
+
if pos < 0:
|
67 |
+
crop_size += pos * 2
|
68 |
+
pos = 0
|
69 |
+
|
70 |
+
pos2 = pos + crop_size
|
71 |
+
|
72 |
+
if img_size < pos2:
|
73 |
+
crop_size -= (pos2 - img_size) * 2
|
74 |
+
pos2 = img_size
|
75 |
+
pos = pos2 - crop_size
|
76 |
+
|
77 |
+
return pos, pos2, crop_size
|
78 |
+
|
79 |
+
def retargeting(delta_out, driving_exp, factor, idxes):
|
80 |
+
for idx in idxes:
|
81 |
+
#delta_out[0, idx] -= src_exp[0, idx] * factor
|
82 |
+
delta_out[0, idx] += driving_exp[0, idx] * factor
|
83 |
+
|
84 |
+
class PreparedSrcImg:
|
85 |
+
def __init__(self, src_rgb, crop_trans_m, x_s_info, f_s_user, x_s_user, mask_ori):
|
86 |
+
self.src_rgb = src_rgb
|
87 |
+
self.crop_trans_m = crop_trans_m
|
88 |
+
self.x_s_info = x_s_info
|
89 |
+
self.f_s_user = f_s_user
|
90 |
+
self.x_s_user = x_s_user
|
91 |
+
self.mask_ori = mask_ori
|
92 |
+
|
93 |
+
import requests
|
94 |
+
from tqdm import tqdm
|
95 |
+
|
96 |
+
class LP_Engine:
|
97 |
+
pipeline = None
|
98 |
+
detect_model = None
|
99 |
+
mask_img = None
|
100 |
+
temp_img_idx = 0
|
101 |
+
|
102 |
+
def get_temp_img_name(self):
|
103 |
+
self.temp_img_idx += 1
|
104 |
+
return "expression_edit_preview" + str(self.temp_img_idx) + ".png"
|
105 |
+
|
106 |
+
def download_model(_, file_path, model_url):
|
107 |
+
print('AdvancedLivePortrait: Downloading model...')
|
108 |
+
response = requests.get(model_url, stream=True)
|
109 |
+
try:
|
110 |
+
if response.status_code == 200:
|
111 |
+
total_size = int(response.headers.get('content-length', 0))
|
112 |
+
block_size = 1024 # 1 Kibibyte
|
113 |
+
|
114 |
+
# tqdm will display a progress bar
|
115 |
+
with open(file_path, 'wb') as file, tqdm(
|
116 |
+
desc='Downloading',
|
117 |
+
total=total_size,
|
118 |
+
unit='iB',
|
119 |
+
unit_scale=True,
|
120 |
+
unit_divisor=1024,
|
121 |
+
) as bar:
|
122 |
+
for data in response.iter_content(block_size):
|
123 |
+
bar.update(len(data))
|
124 |
+
file.write(data)
|
125 |
+
|
126 |
+
except requests.exceptions.RequestException as err:
|
127 |
+
print('AdvancedLivePortrait: Model download failed: {err}')
|
128 |
+
print(f'AdvancedLivePortrait: Download it manually from: {model_url}')
|
129 |
+
print(f'AdvancedLivePortrait: And put it in {file_path}')
|
130 |
+
except Exception as e:
|
131 |
+
print(f'AdvancedLivePortrait: An unexpected error occurred: {e}')
|
132 |
+
|
133 |
+
def remove_ddp_dumplicate_key(_, state_dict):
|
134 |
+
state_dict_new = OrderedDict()
|
135 |
+
for key in state_dict.keys():
|
136 |
+
state_dict_new[key.replace('module.', '')] = state_dict[key]
|
137 |
+
return state_dict_new
|
138 |
+
|
139 |
+
def filter_for_model(_, checkpoint, prefix):
|
140 |
+
filtered_checkpoint = {key.replace(prefix + "_module.", ""): value for key, value in checkpoint.items() if
|
141 |
+
key.startswith(prefix)}
|
142 |
+
return filtered_checkpoint
|
143 |
+
|
144 |
+
def load_model(self, model_config, model_type):
|
145 |
+
|
146 |
+
device = get_device()
|
147 |
+
|
148 |
+
if model_type == 'stitching_retargeting_module':
|
149 |
+
ckpt_path = os.path.join(get_model_dir("liveportrait"), "retargeting_models", model_type + ".pth")
|
150 |
+
else:
|
151 |
+
ckpt_path = os.path.join(get_model_dir("liveportrait"), "base_models", model_type + ".pth")
|
152 |
+
|
153 |
+
is_safetensors = None
|
154 |
+
if os.path.isfile(ckpt_path) == False:
|
155 |
+
is_safetensors = True
|
156 |
+
ckpt_path = os.path.join(get_model_dir("liveportrait"), model_type + ".safetensors")
|
157 |
+
if os.path.isfile(ckpt_path) == False:
|
158 |
+
self.download_model(ckpt_path,
|
159 |
+
"https://huggingface.co/Kijai/LivePortrait_safetensors/resolve/main/" + model_type + ".safetensors")
|
160 |
+
model_params = model_config['model_params'][f'{model_type}_params']
|
161 |
+
if model_type == 'appearance_feature_extractor':
|
162 |
+
model = AppearanceFeatureExtractor(**model_params).to(device)
|
163 |
+
elif model_type == 'motion_extractor':
|
164 |
+
model = MotionExtractor(**model_params).to(device)
|
165 |
+
elif model_type == 'warping_module':
|
166 |
+
model = WarpingNetwork(**model_params).to(device)
|
167 |
+
elif model_type == 'spade_generator':
|
168 |
+
model = SPADEDecoder(**model_params).to(device)
|
169 |
+
elif model_type == 'stitching_retargeting_module':
|
170 |
+
# Special handling for stitching and retargeting module
|
171 |
+
config = model_config['model_params']['stitching_retargeting_module_params']
|
172 |
+
checkpoint = comfy.utils.load_torch_file(ckpt_path)
|
173 |
+
|
174 |
+
stitcher = StitchingRetargetingNetwork(**config.get('stitching'))
|
175 |
+
if is_safetensors:
|
176 |
+
stitcher.load_state_dict(self.filter_for_model(checkpoint, 'retarget_shoulder'))
|
177 |
+
else:
|
178 |
+
stitcher.load_state_dict(self.remove_ddp_dumplicate_key(checkpoint['retarget_shoulder']))
|
179 |
+
stitcher = stitcher.to(device)
|
180 |
+
stitcher.eval()
|
181 |
+
|
182 |
+
return {
|
183 |
+
'stitching': stitcher,
|
184 |
+
}
|
185 |
+
else:
|
186 |
+
raise ValueError(f"Unknown model type: {model_type}")
|
187 |
+
|
188 |
+
|
189 |
+
model.load_state_dict(comfy.utils.load_torch_file(ckpt_path))
|
190 |
+
model.eval()
|
191 |
+
return model
|
192 |
+
|
193 |
+
def load_models(self):
|
194 |
+
model_path = get_model_dir("liveportrait")
|
195 |
+
if not os.path.exists(model_path):
|
196 |
+
os.mkdir(model_path)
|
197 |
+
|
198 |
+
model_config_path = os.path.join(current_directory, 'LivePortrait', 'config', 'models.yaml')
|
199 |
+
model_config = yaml.safe_load(open(model_config_path, 'r'))
|
200 |
+
|
201 |
+
appearance_feature_extractor = self.load_model(model_config, 'appearance_feature_extractor')
|
202 |
+
motion_extractor = self.load_model(model_config, 'motion_extractor')
|
203 |
+
warping_module = self.load_model(model_config, 'warping_module')
|
204 |
+
spade_generator = self.load_model(model_config, 'spade_generator')
|
205 |
+
stitching_retargeting_module = self.load_model(model_config, 'stitching_retargeting_module')
|
206 |
+
|
207 |
+
self.pipeline = LivePortraitWrapper(InferenceConfig(), appearance_feature_extractor, motion_extractor, warping_module, spade_generator, stitching_retargeting_module)
|
208 |
+
|
209 |
+
def get_detect_model(self):
|
210 |
+
if self.detect_model == None:
|
211 |
+
model_dir = get_model_dir("ultralytics")
|
212 |
+
if not os.path.exists(model_dir): os.mkdir(model_dir)
|
213 |
+
model_path = os.path.join(model_dir, "face_yolov8n.pt")
|
214 |
+
if not os.path.exists(model_path):
|
215 |
+
self.download_model(model_path, "https://huggingface.co/Bingsu/adetailer/resolve/main/face_yolov8n.pt")
|
216 |
+
self.detect_model = YOLO(model_path)
|
217 |
+
|
218 |
+
return self.detect_model
|
219 |
+
|
220 |
+
def get_face_bboxes(self, image_rgb):
|
221 |
+
detect_model = self.get_detect_model()
|
222 |
+
pred = detect_model(image_rgb, conf=0.7, device="")
|
223 |
+
return pred[0].boxes.xyxy.cpu().numpy()
|
224 |
+
|
225 |
+
def detect_face(self, image_rgb, crop_factor, sort = True):
|
226 |
+
bboxes = self.get_face_bboxes(image_rgb)
|
227 |
+
w, h = get_rgb_size(image_rgb)
|
228 |
+
|
229 |
+
print(f"w, h:{w, h}")
|
230 |
+
|
231 |
+
cx = w / 2
|
232 |
+
min_diff = w
|
233 |
+
best_box = None
|
234 |
+
for x1, y1, x2, y2 in bboxes:
|
235 |
+
bbox_w = x2 - x1
|
236 |
+
if bbox_w < 30: continue
|
237 |
+
diff = abs(cx - (x1 + bbox_w / 2))
|
238 |
+
if diff < min_diff:
|
239 |
+
best_box = [x1, y1, x2, y2]
|
240 |
+
print(f"diff, min_diff, best_box:{diff, min_diff, best_box}")
|
241 |
+
min_diff = diff
|
242 |
+
|
243 |
+
if best_box == None:
|
244 |
+
print("Failed to detect face!!")
|
245 |
+
return [0, 0, w, h]
|
246 |
+
|
247 |
+
x1, y1, x2, y2 = best_box
|
248 |
+
|
249 |
+
#for x1, y1, x2, y2 in bboxes:
|
250 |
+
bbox_w = x2 - x1
|
251 |
+
bbox_h = y2 - y1
|
252 |
+
|
253 |
+
crop_w = bbox_w * crop_factor
|
254 |
+
crop_h = bbox_h * crop_factor
|
255 |
+
|
256 |
+
crop_w = max(crop_h, crop_w)
|
257 |
+
crop_h = crop_w
|
258 |
+
|
259 |
+
kernel_x = int(x1 + bbox_w / 2)
|
260 |
+
kernel_y = int(y1 + bbox_h / 2)
|
261 |
+
|
262 |
+
new_x1 = int(kernel_x - crop_w / 2)
|
263 |
+
new_x2 = int(kernel_x + crop_w / 2)
|
264 |
+
new_y1 = int(kernel_y - crop_h / 2)
|
265 |
+
new_y2 = int(kernel_y + crop_h / 2)
|
266 |
+
|
267 |
+
if not sort:
|
268 |
+
return [int(new_x1), int(new_y1), int(new_x2), int(new_y2)]
|
269 |
+
|
270 |
+
if new_x1 < 0:
|
271 |
+
new_x2 -= new_x1
|
272 |
+
new_x1 = 0
|
273 |
+
elif w < new_x2:
|
274 |
+
new_x1 -= (new_x2 - w)
|
275 |
+
new_x2 = w
|
276 |
+
if new_x1 < 0:
|
277 |
+
new_x2 -= new_x1
|
278 |
+
new_x1 = 0
|
279 |
+
|
280 |
+
if new_y1 < 0:
|
281 |
+
new_y2 -= new_y1
|
282 |
+
new_y1 = 0
|
283 |
+
elif h < new_y2:
|
284 |
+
new_y1 -= (new_y2 - h)
|
285 |
+
new_y2 = h
|
286 |
+
if new_y1 < 0:
|
287 |
+
new_y2 -= new_y1
|
288 |
+
new_y1 = 0
|
289 |
+
|
290 |
+
if w < new_x2 and h < new_y2:
|
291 |
+
over_x = new_x2 - w
|
292 |
+
over_y = new_y2 - h
|
293 |
+
over_min = min(over_x, over_y)
|
294 |
+
new_x2 -= over_min
|
295 |
+
new_y2 -= over_min
|
296 |
+
|
297 |
+
return [int(new_x1), int(new_y1), int(new_x2), int(new_y2)]
|
298 |
+
|
299 |
+
|
300 |
+
def calc_face_region(self, square, dsize):
|
301 |
+
region = copy.deepcopy(square)
|
302 |
+
is_changed = False
|
303 |
+
if dsize[0] < region[2]:
|
304 |
+
region[2] = dsize[0]
|
305 |
+
is_changed = True
|
306 |
+
if dsize[1] < region[3]:
|
307 |
+
region[3] = dsize[1]
|
308 |
+
is_changed = True
|
309 |
+
|
310 |
+
return region, is_changed
|
311 |
+
|
312 |
+
def expand_img(self, rgb_img, square):
|
313 |
+
#new_img = rgb_crop(rgb_img, face_region)
|
314 |
+
crop_trans_m = create_transform_matrix(max(-square[0], 0), max(-square[1], 0), 1, 1)
|
315 |
+
new_img = cv2.warpAffine(rgb_img, crop_trans_m, (square[2] - square[0], square[3] - square[1]),
|
316 |
+
cv2.INTER_LINEAR)
|
317 |
+
return new_img
|
318 |
+
|
319 |
+
def get_pipeline(self):
|
320 |
+
if self.pipeline == None:
|
321 |
+
print("Load pipeline...")
|
322 |
+
self.load_models()
|
323 |
+
|
324 |
+
return self.pipeline
|
325 |
+
|
326 |
+
def prepare_src_image(self, img):
|
327 |
+
h, w = img.shape[:2]
|
328 |
+
input_shape = [256,256]
|
329 |
+
if h != input_shape[0] or w != input_shape[1]:
|
330 |
+
if 256 < h: interpolation = cv2.INTER_AREA
|
331 |
+
else: interpolation = cv2.INTER_LINEAR
|
332 |
+
x = cv2.resize(img, (input_shape[0], input_shape[1]), interpolation = interpolation)
|
333 |
+
else:
|
334 |
+
x = img.copy()
|
335 |
+
|
336 |
+
if x.ndim == 3:
|
337 |
+
x = x[np.newaxis].astype(np.float32) / 255. # HxWx3 -> 1xHxWx3, normalized to 0~1
|
338 |
+
elif x.ndim == 4:
|
339 |
+
x = x.astype(np.float32) / 255. # BxHxWx3, normalized to 0~1
|
340 |
+
else:
|
341 |
+
raise ValueError(f'img ndim should be 3 or 4: {x.ndim}')
|
342 |
+
x = np.clip(x, 0, 1) # clip to 0~1
|
343 |
+
x = torch.from_numpy(x).permute(0, 3, 1, 2) # 1xHxWx3 -> 1x3xHxW
|
344 |
+
x = x.to(get_device())
|
345 |
+
return x
|
346 |
+
|
347 |
+
def GetMaskImg(self):
|
348 |
+
if self.mask_img is None:
|
349 |
+
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "./LivePortrait/utils/resources/mask_template.png")
|
350 |
+
self.mask_img = cv2.imread(path, cv2.IMREAD_COLOR)
|
351 |
+
return self.mask_img
|
352 |
+
|
353 |
+
def crop_face(self, img_rgb, crop_factor):
|
354 |
+
crop_region = self.detect_face(img_rgb, crop_factor)
|
355 |
+
face_region, is_changed = self.calc_face_region(crop_region, get_rgb_size(img_rgb))
|
356 |
+
face_img = rgb_crop(img_rgb, face_region)
|
357 |
+
if is_changed: face_img = self.expand_img(face_img, crop_region)
|
358 |
+
return face_img
|
359 |
+
|
360 |
+
def prepare_source(self, source_image, crop_factor, is_video = False, tracking = False):
|
361 |
+
print("Prepare source...")
|
362 |
+
engine = self.get_pipeline()
|
363 |
+
source_image_np = (source_image * 255).byte().numpy()
|
364 |
+
img_rgb = source_image_np[0]
|
365 |
+
|
366 |
+
psi_list = []
|
367 |
+
for img_rgb in source_image_np:
|
368 |
+
if tracking or len(psi_list) == 0:
|
369 |
+
crop_region = self.detect_face(img_rgb, crop_factor)
|
370 |
+
face_region, is_changed = self.calc_face_region(crop_region, get_rgb_size(img_rgb))
|
371 |
+
|
372 |
+
s_x = (face_region[2] - face_region[0]) / 512.
|
373 |
+
s_y = (face_region[3] - face_region[1]) / 512.
|
374 |
+
crop_trans_m = create_transform_matrix(crop_region[0], crop_region[1], s_x, s_y)
|
375 |
+
mask_ori = cv2.warpAffine(self.GetMaskImg(), crop_trans_m, get_rgb_size(img_rgb), cv2.INTER_LINEAR)
|
376 |
+
mask_ori = mask_ori.astype(np.float32) / 255.
|
377 |
+
|
378 |
+
if is_changed:
|
379 |
+
s = (crop_region[2] - crop_region[0]) / 512.
|
380 |
+
crop_trans_m = create_transform_matrix(crop_region[0], crop_region[1], s, s)
|
381 |
+
|
382 |
+
face_img = rgb_crop(img_rgb, face_region)
|
383 |
+
if is_changed: face_img = self.expand_img(face_img, crop_region)
|
384 |
+
i_s = self.prepare_src_image(face_img)
|
385 |
+
x_s_info = engine.get_kp_info(i_s)
|
386 |
+
f_s_user = engine.extract_feature_3d(i_s)
|
387 |
+
x_s_user = engine.transform_keypoint(x_s_info)
|
388 |
+
psi = PreparedSrcImg(img_rgb, crop_trans_m, x_s_info, f_s_user, x_s_user, mask_ori)
|
389 |
+
if is_video == False:
|
390 |
+
return psi
|
391 |
+
psi_list.append(psi)
|
392 |
+
|
393 |
+
return psi_list
|
394 |
+
|
395 |
+
def prepare_driving_video(self, face_images):
|
396 |
+
print("Prepare driving video...")
|
397 |
+
pipeline = self.get_pipeline()
|
398 |
+
f_img_np = (face_images * 255).byte().numpy()
|
399 |
+
|
400 |
+
out_list = []
|
401 |
+
for f_img in f_img_np:
|
402 |
+
i_d = self.prepare_src_image(f_img)
|
403 |
+
d_info = pipeline.get_kp_info(i_d)
|
404 |
+
out_list.append(d_info)
|
405 |
+
|
406 |
+
return out_list
|
407 |
+
|
408 |
+
def calc_fe(_, x_d_new, eyes, eyebrow, wink, pupil_x, pupil_y, mouth, eee, woo, smile,
|
409 |
+
rotate_pitch, rotate_yaw, rotate_roll):
|
410 |
+
|
411 |
+
x_d_new[0, 20, 1] += smile * -0.01
|
412 |
+
x_d_new[0, 14, 1] += smile * -0.02
|
413 |
+
x_d_new[0, 17, 1] += smile * 0.0065
|
414 |
+
x_d_new[0, 17, 2] += smile * 0.003
|
415 |
+
x_d_new[0, 13, 1] += smile * -0.00275
|
416 |
+
x_d_new[0, 16, 1] += smile * -0.00275
|
417 |
+
x_d_new[0, 3, 1] += smile * -0.0035
|
418 |
+
x_d_new[0, 7, 1] += smile * -0.0035
|
419 |
+
|
420 |
+
x_d_new[0, 19, 1] += mouth * 0.001
|
421 |
+
x_d_new[0, 19, 2] += mouth * 0.0001
|
422 |
+
x_d_new[0, 17, 1] += mouth * -0.0001
|
423 |
+
rotate_pitch -= mouth * 0.05
|
424 |
+
|
425 |
+
x_d_new[0, 20, 2] += eee * -0.001
|
426 |
+
x_d_new[0, 20, 1] += eee * -0.001
|
427 |
+
#x_d_new[0, 19, 1] += eee * 0.0006
|
428 |
+
x_d_new[0, 14, 1] += eee * -0.001
|
429 |
+
|
430 |
+
x_d_new[0, 14, 1] += woo * 0.001
|
431 |
+
x_d_new[0, 3, 1] += woo * -0.0005
|
432 |
+
x_d_new[0, 7, 1] += woo * -0.0005
|
433 |
+
x_d_new[0, 17, 2] += woo * -0.0005
|
434 |
+
|
435 |
+
x_d_new[0, 11, 1] += wink * 0.001
|
436 |
+
x_d_new[0, 13, 1] += wink * -0.0003
|
437 |
+
x_d_new[0, 17, 0] += wink * 0.0003
|
438 |
+
x_d_new[0, 17, 1] += wink * 0.0003
|
439 |
+
x_d_new[0, 3, 1] += wink * -0.0003
|
440 |
+
rotate_roll -= wink * 0.1
|
441 |
+
rotate_yaw -= wink * 0.1
|
442 |
+
|
443 |
+
if 0 < pupil_x:
|
444 |
+
x_d_new[0, 11, 0] += pupil_x * 0.0007
|
445 |
+
x_d_new[0, 15, 0] += pupil_x * 0.001
|
446 |
+
else:
|
447 |
+
x_d_new[0, 11, 0] += pupil_x * 0.001
|
448 |
+
x_d_new[0, 15, 0] += pupil_x * 0.0007
|
449 |
+
|
450 |
+
x_d_new[0, 11, 1] += pupil_y * -0.001
|
451 |
+
x_d_new[0, 15, 1] += pupil_y * -0.001
|
452 |
+
eyes -= pupil_y / 2.
|
453 |
+
|
454 |
+
x_d_new[0, 11, 1] += eyes * -0.001
|
455 |
+
x_d_new[0, 13, 1] += eyes * 0.0003
|
456 |
+
x_d_new[0, 15, 1] += eyes * -0.001
|
457 |
+
x_d_new[0, 16, 1] += eyes * 0.0003
|
458 |
+
x_d_new[0, 1, 1] += eyes * -0.00025
|
459 |
+
x_d_new[0, 2, 1] += eyes * 0.00025
|
460 |
+
|
461 |
+
|
462 |
+
if 0 < eyebrow:
|
463 |
+
x_d_new[0, 1, 1] += eyebrow * 0.001
|
464 |
+
x_d_new[0, 2, 1] += eyebrow * -0.001
|
465 |
+
else:
|
466 |
+
x_d_new[0, 1, 0] += eyebrow * -0.001
|
467 |
+
x_d_new[0, 2, 0] += eyebrow * 0.001
|
468 |
+
x_d_new[0, 1, 1] += eyebrow * 0.0003
|
469 |
+
x_d_new[0, 2, 1] += eyebrow * -0.0003
|
470 |
+
|
471 |
+
|
472 |
+
return torch.Tensor([rotate_pitch, rotate_yaw, rotate_roll])
|
473 |
+
g_engine = LP_Engine()
|
474 |
+
|
475 |
+
class ExpressionSet:
|
476 |
+
def __init__(self, erst = None, es = None):
|
477 |
+
if es != None:
|
478 |
+
self.e = copy.deepcopy(es.e) # [:, :, :]
|
479 |
+
self.r = copy.deepcopy(es.r) # [:]
|
480 |
+
self.s = copy.deepcopy(es.s)
|
481 |
+
self.t = copy.deepcopy(es.t)
|
482 |
+
elif erst != None:
|
483 |
+
self.e = erst[0]
|
484 |
+
self.r = erst[1]
|
485 |
+
self.s = erst[2]
|
486 |
+
self.t = erst[3]
|
487 |
+
else:
|
488 |
+
self.e = torch.from_numpy(np.zeros((1, 21, 3))).float().to(get_device())
|
489 |
+
self.r = torch.Tensor([0, 0, 0])
|
490 |
+
self.s = 0
|
491 |
+
self.t = 0
|
492 |
+
def div(self, value):
|
493 |
+
self.e /= value
|
494 |
+
self.r /= value
|
495 |
+
self.s /= value
|
496 |
+
self.t /= value
|
497 |
+
def add(self, other):
|
498 |
+
self.e += other.e
|
499 |
+
self.r += other.r
|
500 |
+
self.s += other.s
|
501 |
+
self.t += other.t
|
502 |
+
def sub(self, other):
|
503 |
+
self.e -= other.e
|
504 |
+
self.r -= other.r
|
505 |
+
self.s -= other.s
|
506 |
+
self.t -= other.t
|
507 |
+
def mul(self, value):
|
508 |
+
self.e *= value
|
509 |
+
self.r *= value
|
510 |
+
self.s *= value
|
511 |
+
self.t *= value
|
512 |
+
|
513 |
+
#def apply_ratio(self, ratio): self.exp *= ratio
|
514 |
+
|
515 |
+
def logging_time(original_fn):
|
516 |
+
def wrapper_fn(*args, **kwargs):
|
517 |
+
start_time = time.time()
|
518 |
+
result = original_fn(*args, **kwargs)
|
519 |
+
end_time = time.time()
|
520 |
+
print("WorkingTime[{}]: {} sec".format(original_fn.__name__, end_time - start_time))
|
521 |
+
return result
|
522 |
+
|
523 |
+
return wrapper_fn
|
524 |
+
|
525 |
+
|
526 |
+
#exp_data_dir = os.path.join(current_directory, "exp_data")
|
527 |
+
exp_data_dir = os.path.join(folder_paths.output_directory, "exp_data")
|
528 |
+
if os.path.isdir(exp_data_dir) == False:
|
529 |
+
os.mkdir(exp_data_dir)
|
530 |
+
class SaveExpData:
|
531 |
+
@classmethod
|
532 |
+
def INPUT_TYPES(s):
|
533 |
+
return {"required": {
|
534 |
+
"file_name": ("STRING", {"multiline": False, "default": ""}),
|
535 |
+
},
|
536 |
+
"optional": {"save_exp": ("EXP_DATA",), }
|
537 |
+
}
|
538 |
+
|
539 |
+
RETURN_TYPES = ("STRING",)
|
540 |
+
RETURN_NAMES = ("file_name",)
|
541 |
+
FUNCTION = "run"
|
542 |
+
CATEGORY = "AdvancedLivePortrait"
|
543 |
+
OUTPUT_NODE = True
|
544 |
+
|
545 |
+
def run(self, file_name, save_exp:ExpressionSet=None):
|
546 |
+
if save_exp == None or file_name == "":
|
547 |
+
return file_name
|
548 |
+
|
549 |
+
with open(os.path.join(exp_data_dir, file_name + ".exp"), "wb") as f:
|
550 |
+
dill.dump(save_exp, f)
|
551 |
+
|
552 |
+
return file_name
|
553 |
+
|
554 |
+
class LoadExpData:
|
555 |
+
@classmethod
|
556 |
+
def INPUT_TYPES(s):
|
557 |
+
file_list = [os.path.splitext(file)[0] for file in os.listdir(exp_data_dir) if file.endswith('.exp')]
|
558 |
+
return {"required": {
|
559 |
+
"file_name": (sorted(file_list, key=str.lower),),
|
560 |
+
"ratio": ("FLOAT", {"default": 1, "min": 0, "max": 1, "step": 0.01}),
|
561 |
+
},
|
562 |
+
}
|
563 |
+
|
564 |
+
RETURN_TYPES = ("EXP_DATA",)
|
565 |
+
RETURN_NAMES = ("exp",)
|
566 |
+
FUNCTION = "run"
|
567 |
+
CATEGORY = "AdvancedLivePortrait"
|
568 |
+
|
569 |
+
def run(self, file_name, ratio):
|
570 |
+
# es = ExpressionSet()
|
571 |
+
with open(os.path.join(exp_data_dir, file_name + ".exp"), 'rb') as f:
|
572 |
+
es = dill.load(f)
|
573 |
+
es.mul(ratio)
|
574 |
+
return (es,)
|
575 |
+
|
576 |
+
class ExpData:
|
577 |
+
@classmethod
|
578 |
+
def INPUT_TYPES(s):
|
579 |
+
return {"required":{
|
580 |
+
#"code": ("STRING", {"multiline": False, "default": ""}),
|
581 |
+
"code1": ("INT", {"default": 0}),
|
582 |
+
"value1": ("FLOAT", {"default": 0, "min": -100, "max": 100, "step": 0.1}),
|
583 |
+
"code2": ("INT", {"default": 0}),
|
584 |
+
"value2": ("FLOAT", {"default": 0, "min": -100, "max": 100, "step": 0.1}),
|
585 |
+
"code3": ("INT", {"default": 0}),
|
586 |
+
"value3": ("FLOAT", {"default": 0, "min": -100, "max": 100, "step": 0.1}),
|
587 |
+
"code4": ("INT", {"default": 0}),
|
588 |
+
"value4": ("FLOAT", {"default": 0, "min": -100, "max": 100, "step": 0.1}),
|
589 |
+
"code5": ("INT", {"default": 0}),
|
590 |
+
"value5": ("FLOAT", {"default": 0, "min": -100, "max": 100, "step": 0.1}),
|
591 |
+
},
|
592 |
+
"optional":{"add_exp": ("EXP_DATA",),}
|
593 |
+
}
|
594 |
+
|
595 |
+
RETURN_TYPES = ("EXP_DATA",)
|
596 |
+
RETURN_NAMES = ("exp",)
|
597 |
+
FUNCTION = "run"
|
598 |
+
CATEGORY = "AdvancedLivePortrait"
|
599 |
+
|
600 |
+
def run(self, code1, value1, code2, value2, code3, value3, code4, value4, code5, value5, add_exp=None):
|
601 |
+
if add_exp == None:
|
602 |
+
es = ExpressionSet()
|
603 |
+
else:
|
604 |
+
es = ExpressionSet(es = add_exp)
|
605 |
+
|
606 |
+
codes = [code1, code2, code3, code4, code5]
|
607 |
+
values = [value1, value2, value3, value4, value5]
|
608 |
+
for i in range(5):
|
609 |
+
idx = int(codes[i] / 10)
|
610 |
+
r = codes[i] % 10
|
611 |
+
es.e[0, idx, r] += values[i] * 0.001
|
612 |
+
|
613 |
+
return (es,)
|
614 |
+
|
615 |
+
class PrintExpData:
|
616 |
+
@classmethod
|
617 |
+
def INPUT_TYPES(s):
|
618 |
+
return {"required": {
|
619 |
+
"cut_noise": ("FLOAT", {"default": 0, "min": 0, "max": 100, "step": 0.1}),
|
620 |
+
},
|
621 |
+
"optional": {"exp": ("EXP_DATA",), }
|
622 |
+
}
|
623 |
+
|
624 |
+
RETURN_TYPES = ("EXP_DATA",)
|
625 |
+
RETURN_NAMES = ("exp",)
|
626 |
+
FUNCTION = "run"
|
627 |
+
CATEGORY = "AdvancedLivePortrait"
|
628 |
+
OUTPUT_NODE = True
|
629 |
+
|
630 |
+
def run(self, cut_noise, exp = None):
|
631 |
+
if exp == None: return (exp,)
|
632 |
+
|
633 |
+
cuted_list = []
|
634 |
+
e = exp.exp * 1000
|
635 |
+
for idx in range(21):
|
636 |
+
for r in range(3):
|
637 |
+
a = abs(e[0, idx, r])
|
638 |
+
if(cut_noise < a): cuted_list.append((a, e[0, idx, r], idx*10+r))
|
639 |
+
|
640 |
+
sorted_list = sorted(cuted_list, reverse=True, key=lambda item: item[0])
|
641 |
+
print(f"sorted_list: {[[item[2], round(float(item[1]),1)] for item in sorted_list]}")
|
642 |
+
return (exp,)
|
643 |
+
|
644 |
+
class Command:
|
645 |
+
def __init__(self, es, change, keep):
|
646 |
+
self.es:ExpressionSet = es
|
647 |
+
self.change = change
|
648 |
+
self.keep = keep
|
649 |
+
|
650 |
+
crop_factor_default = 1.7
|
651 |
+
crop_factor_min = 1.5
|
652 |
+
crop_factor_max = 2.5
|
653 |
+
|
654 |
+
class AdvancedLivePortrait:
|
655 |
+
def __init__(self):
|
656 |
+
self.src_images = None
|
657 |
+
self.driving_images = None
|
658 |
+
self.pbar = comfy.utils.ProgressBar(1)
|
659 |
+
self.crop_factor = None
|
660 |
+
|
661 |
+
@classmethod
|
662 |
+
def INPUT_TYPES(s):
|
663 |
+
|
664 |
+
return {
|
665 |
+
"required": {
|
666 |
+
"retargeting_eyes": ("FLOAT", {"default": 0, "min": 0, "max": 1, "step": 0.01}),
|
667 |
+
"retargeting_mouth": ("FLOAT", {"default": 0, "min": 0, "max": 1, "step": 0.01}),
|
668 |
+
"crop_factor": ("FLOAT", {"default": crop_factor_default,
|
669 |
+
"min": crop_factor_min, "max": crop_factor_max, "step": 0.1}),
|
670 |
+
"turn_on": ("BOOLEAN", {"default": True}),
|
671 |
+
"tracking_src_vid": ("BOOLEAN", {"default": False}),
|
672 |
+
"animate_without_vid": ("BOOLEAN", {"default": False}),
|
673 |
+
"command": ("STRING", {"multiline": True, "default": ""}),
|
674 |
+
},
|
675 |
+
"optional": {
|
676 |
+
"src_images": ("IMAGE",),
|
677 |
+
"motion_link": ("EDITOR_LINK",),
|
678 |
+
"driving_images": ("IMAGE",),
|
679 |
+
},
|
680 |
+
}
|
681 |
+
|
682 |
+
RETURN_TYPES = ("IMAGE",)
|
683 |
+
RETURN_NAMES = ("images",)
|
684 |
+
FUNCTION = "run"
|
685 |
+
OUTPUT_NODE = True
|
686 |
+
CATEGORY = "AdvancedLivePortrait"
|
687 |
+
|
688 |
+
# INPUT_IS_LIST = False
|
689 |
+
# OUTPUT_IS_LIST = (False,)
|
690 |
+
|
691 |
+
def parsing_command(self, command, motoin_link):
|
692 |
+
command.replace(' ', '')
|
693 |
+
# if command == '': return
|
694 |
+
lines = command.split('\n')
|
695 |
+
|
696 |
+
cmd_list = []
|
697 |
+
|
698 |
+
total_length = 0
|
699 |
+
|
700 |
+
i = 0
|
701 |
+
#old_es = None
|
702 |
+
for line in lines:
|
703 |
+
i += 1
|
704 |
+
if line == '': continue
|
705 |
+
try:
|
706 |
+
cmds = line.split('=')
|
707 |
+
idx = int(cmds[0])
|
708 |
+
if idx == 0: es = ExpressionSet()
|
709 |
+
else: es = ExpressionSet(es = motoin_link[idx])
|
710 |
+
cmds = cmds[1].split(':')
|
711 |
+
change = int(cmds[0])
|
712 |
+
keep = int(cmds[1])
|
713 |
+
except:
|
714 |
+
assert False, f"(AdvancedLivePortrait) Command Err Line {i}: {line}"
|
715 |
+
|
716 |
+
|
717 |
+
return None, None
|
718 |
+
|
719 |
+
total_length += change + keep
|
720 |
+
es.div(change)
|
721 |
+
cmd_list.append(Command(es, change, keep))
|
722 |
+
|
723 |
+
return cmd_list, total_length
|
724 |
+
|
725 |
+
|
726 |
+
def run(self, retargeting_eyes, retargeting_mouth, turn_on, tracking_src_vid, animate_without_vid, command, crop_factor,
|
727 |
+
src_images=None, driving_images=None, motion_link=None):
|
728 |
+
if turn_on == False: return (None,None)
|
729 |
+
src_length = 1
|
730 |
+
|
731 |
+
if src_images == None:
|
732 |
+
if motion_link != None:
|
733 |
+
self.psi_list = [motion_link[0]]
|
734 |
+
else: return (None,None)
|
735 |
+
|
736 |
+
if src_images != None:
|
737 |
+
src_length = len(src_images)
|
738 |
+
if id(src_images) != id(self.src_images) or self.crop_factor != crop_factor:
|
739 |
+
self.crop_factor = crop_factor
|
740 |
+
self.src_images = src_images
|
741 |
+
if 1 < src_length:
|
742 |
+
self.psi_list = g_engine.prepare_source(src_images, crop_factor, True, tracking_src_vid)
|
743 |
+
else:
|
744 |
+
self.psi_list = [g_engine.prepare_source(src_images, crop_factor)]
|
745 |
+
|
746 |
+
|
747 |
+
cmd_list, cmd_length = self.parsing_command(command, motion_link)
|
748 |
+
if cmd_list == None: return (None,None)
|
749 |
+
cmd_idx = 0
|
750 |
+
|
751 |
+
driving_length = 0
|
752 |
+
if driving_images is not None:
|
753 |
+
if id(driving_images) != id(self.driving_images):
|
754 |
+
self.driving_images = driving_images
|
755 |
+
self.driving_values = g_engine.prepare_driving_video(driving_images)
|
756 |
+
driving_length = len(self.driving_values)
|
757 |
+
|
758 |
+
total_length = max(driving_length, src_length)
|
759 |
+
|
760 |
+
if animate_without_vid:
|
761 |
+
total_length = max(total_length, cmd_length)
|
762 |
+
|
763 |
+
c_i_es = ExpressionSet()
|
764 |
+
c_o_es = ExpressionSet()
|
765 |
+
d_0_es = None
|
766 |
+
out_list = []
|
767 |
+
|
768 |
+
psi = None
|
769 |
+
pipeline = g_engine.get_pipeline()
|
770 |
+
for i in range(total_length):
|
771 |
+
|
772 |
+
if i < src_length:
|
773 |
+
psi = self.psi_list[i]
|
774 |
+
s_info = psi.x_s_info
|
775 |
+
s_es = ExpressionSet(erst=(s_info['kp'] + s_info['exp'], torch.Tensor([0, 0, 0]), s_info['scale'], s_info['t']))
|
776 |
+
|
777 |
+
new_es = ExpressionSet(es = s_es)
|
778 |
+
|
779 |
+
if i < cmd_length:
|
780 |
+
cmd = cmd_list[cmd_idx]
|
781 |
+
if 0 < cmd.change:
|
782 |
+
cmd.change -= 1
|
783 |
+
c_i_es.add(cmd.es)
|
784 |
+
c_i_es.sub(c_o_es)
|
785 |
+
elif 0 < cmd.keep:
|
786 |
+
cmd.keep -= 1
|
787 |
+
|
788 |
+
new_es.add(c_i_es)
|
789 |
+
|
790 |
+
if cmd.change == 0 and cmd.keep == 0:
|
791 |
+
cmd_idx += 1
|
792 |
+
if cmd_idx < len(cmd_list):
|
793 |
+
c_o_es = ExpressionSet(es = c_i_es)
|
794 |
+
cmd = cmd_list[cmd_idx]
|
795 |
+
c_o_es.div(cmd.change)
|
796 |
+
elif 0 < cmd_length:
|
797 |
+
new_es.add(c_i_es)
|
798 |
+
|
799 |
+
if i < driving_length:
|
800 |
+
d_i_info = self.driving_values[i]
|
801 |
+
d_i_r = torch.Tensor([d_i_info['pitch'], d_i_info['yaw'], d_i_info['roll']])#.float().to(device="cuda:0")
|
802 |
+
|
803 |
+
if d_0_es is None:
|
804 |
+
d_0_es = ExpressionSet(erst = (d_i_info['exp'], d_i_r, d_i_info['scale'], d_i_info['t']))
|
805 |
+
|
806 |
+
retargeting(s_es.e, d_0_es.e, retargeting_eyes, (11, 13, 15, 16))
|
807 |
+
retargeting(s_es.e, d_0_es.e, retargeting_mouth, (14, 17, 19, 20))
|
808 |
+
|
809 |
+
new_es.e += d_i_info['exp'] - d_0_es.e
|
810 |
+
new_es.r += d_i_r - d_0_es.r
|
811 |
+
new_es.t += d_i_info['t'] - d_0_es.t
|
812 |
+
|
813 |
+
r_new = get_rotation_matrix(
|
814 |
+
s_info['pitch'] + new_es.r[0], s_info['yaw'] + new_es.r[1], s_info['roll'] + new_es.r[2])
|
815 |
+
d_new = new_es.s * (new_es.e @ r_new) + new_es.t
|
816 |
+
d_new = pipeline.stitching(psi.x_s_user, d_new)
|
817 |
+
crop_out = pipeline.warp_decode(psi.f_s_user, psi.x_s_user, d_new)
|
818 |
+
crop_out = pipeline.parse_output(crop_out['out'])[0]
|
819 |
+
|
820 |
+
crop_with_fullsize = cv2.warpAffine(crop_out, psi.crop_trans_m, get_rgb_size(psi.src_rgb),
|
821 |
+
cv2.INTER_LINEAR)
|
822 |
+
out = np.clip(psi.mask_ori * crop_with_fullsize + (1 - psi.mask_ori) * psi.src_rgb, 0, 255).astype(
|
823 |
+
np.uint8)
|
824 |
+
out_list.append(out)
|
825 |
+
|
826 |
+
self.pbar.update_absolute(i+1, total_length, ("PNG", Image.fromarray(crop_out), None))
|
827 |
+
|
828 |
+
if len(out_list) == 0: return (None,)
|
829 |
+
|
830 |
+
out_imgs = torch.cat([pil2tensor(img_rgb) for img_rgb in out_list])
|
831 |
+
return (out_imgs,)
|
832 |
+
|
833 |
+
class ExpressionEditor:
|
834 |
+
def __init__(self):
|
835 |
+
self.sample_image = None
|
836 |
+
self.src_image = None
|
837 |
+
self.crop_factor = None
|
838 |
+
|
839 |
+
@classmethod
|
840 |
+
def INPUT_TYPES(s):
|
841 |
+
display = "number"
|
842 |
+
#display = "slider"
|
843 |
+
return {
|
844 |
+
"required": {
|
845 |
+
|
846 |
+
"rotate_pitch": ("FLOAT", {"default": 0, "min": -20, "max": 20, "step": 0.5, "display": display}),
|
847 |
+
"rotate_yaw": ("FLOAT", {"default": 0, "min": -20, "max": 20, "step": 0.5, "display": display}),
|
848 |
+
"rotate_roll": ("FLOAT", {"default": 0, "min": -20, "max": 20, "step": 0.5, "display": display}),
|
849 |
+
|
850 |
+
"blink": ("FLOAT", {"default": 0, "min": -20, "max": 5, "step": 0.5, "display": display}),
|
851 |
+
"eyebrow": ("FLOAT", {"default": 0, "min": -10, "max": 15, "step": 0.5, "display": display}),
|
852 |
+
"wink": ("FLOAT", {"default": 0, "min": 0, "max": 25, "step": 0.5, "display": display}),
|
853 |
+
"pupil_x": ("FLOAT", {"default": 0, "min": -15, "max": 15, "step": 0.5, "display": display}),
|
854 |
+
"pupil_y": ("FLOAT", {"default": 0, "min": -15, "max": 15, "step": 0.5, "display": display}),
|
855 |
+
"aaa": ("FLOAT", {"default": 0, "min": -30, "max": 120, "step": 1, "display": display}),
|
856 |
+
"eee": ("FLOAT", {"default": 0, "min": -20, "max": 15, "step": 0.2, "display": display}),
|
857 |
+
"woo": ("FLOAT", {"default": 0, "min": -20, "max": 15, "step": 0.2, "display": display}),
|
858 |
+
"smile": ("FLOAT", {"default": 0, "min": -0.3, "max": 1.3, "step": 0.01, "display": display}),
|
859 |
+
|
860 |
+
"src_ratio": ("FLOAT", {"default": 1, "min": 0, "max": 1, "step": 0.01, "display": display}),
|
861 |
+
"sample_ratio": ("FLOAT", {"default": 1, "min": -0.2, "max": 1.2, "step": 0.01, "display": display}),
|
862 |
+
"sample_parts": (["OnlyExpression", "OnlyRotation", "OnlyMouth", "OnlyEyes", "All"],),
|
863 |
+
"crop_factor": ("FLOAT", {"default": crop_factor_default,
|
864 |
+
"min": crop_factor_min, "max": crop_factor_max, "step": 0.1}),
|
865 |
+
},
|
866 |
+
|
867 |
+
"optional": {"src_image": ("IMAGE",), "motion_link": ("EDITOR_LINK",),
|
868 |
+
"sample_image": ("IMAGE",), "add_exp": ("EXP_DATA",),
|
869 |
+
},
|
870 |
+
}
|
871 |
+
|
872 |
+
RETURN_TYPES = ("IMAGE", "EDITOR_LINK", "EXP_DATA")
|
873 |
+
RETURN_NAMES = ("image", "motion_link", "save_exp")
|
874 |
+
|
875 |
+
FUNCTION = "run"
|
876 |
+
|
877 |
+
OUTPUT_NODE = True
|
878 |
+
|
879 |
+
CATEGORY = "AdvancedLivePortrait"
|
880 |
+
|
881 |
+
# INPUT_IS_LIST = False
|
882 |
+
# OUTPUT_IS_LIST = (False,)
|
883 |
+
|
884 |
+
def run(self, rotate_pitch, rotate_yaw, rotate_roll, blink, eyebrow, wink, pupil_x, pupil_y, aaa, eee, woo, smile,
|
885 |
+
src_ratio, sample_ratio, sample_parts, crop_factor, src_image=None, sample_image=None, motion_link=None, add_exp=None):
|
886 |
+
rotate_yaw = -rotate_yaw
|
887 |
+
|
888 |
+
new_editor_link = None
|
889 |
+
if motion_link != None:
|
890 |
+
self.psi = motion_link[0]
|
891 |
+
new_editor_link = motion_link.copy()
|
892 |
+
elif src_image != None:
|
893 |
+
if id(src_image) != id(self.src_image) or self.crop_factor != crop_factor:
|
894 |
+
self.crop_factor = crop_factor
|
895 |
+
self.psi = g_engine.prepare_source(src_image, crop_factor)
|
896 |
+
self.src_image = src_image
|
897 |
+
new_editor_link = []
|
898 |
+
new_editor_link.append(self.psi)
|
899 |
+
else:
|
900 |
+
return (None,None)
|
901 |
+
|
902 |
+
pipeline = g_engine.get_pipeline()
|
903 |
+
|
904 |
+
psi = self.psi
|
905 |
+
s_info = psi.x_s_info
|
906 |
+
#delta_new = copy.deepcopy()
|
907 |
+
s_exp = s_info['exp'] * src_ratio
|
908 |
+
s_exp[0, 5] = s_info['exp'][0, 5]
|
909 |
+
s_exp += s_info['kp']
|
910 |
+
|
911 |
+
es = ExpressionSet()
|
912 |
+
|
913 |
+
if sample_image != None:
|
914 |
+
if id(self.sample_image) != id(sample_image):
|
915 |
+
self.sample_image = sample_image
|
916 |
+
d_image_np = (sample_image * 255).byte().numpy()
|
917 |
+
d_face = g_engine.crop_face(d_image_np[0], 1.7)
|
918 |
+
i_d = g_engine.prepare_src_image(d_face)
|
919 |
+
self.d_info = pipeline.get_kp_info(i_d)
|
920 |
+
self.d_info['exp'][0, 5, 0] = 0
|
921 |
+
self.d_info['exp'][0, 5, 1] = 0
|
922 |
+
|
923 |
+
# "OnlyExpression", "OnlyRotation", "OnlyMouth", "OnlyEyes", "All"
|
924 |
+
if sample_parts == "OnlyExpression" or sample_parts == "All":
|
925 |
+
es.e += self.d_info['exp'] * sample_ratio
|
926 |
+
if sample_parts == "OnlyRotation" or sample_parts == "All":
|
927 |
+
rotate_pitch += self.d_info['pitch'] * sample_ratio
|
928 |
+
rotate_yaw += self.d_info['yaw'] * sample_ratio
|
929 |
+
rotate_roll += self.d_info['roll'] * sample_ratio
|
930 |
+
elif sample_parts == "OnlyMouth":
|
931 |
+
retargeting(es.e, self.d_info['exp'], sample_ratio, (14, 17, 19, 20))
|
932 |
+
elif sample_parts == "OnlyEyes":
|
933 |
+
retargeting(es.e, self.d_info['exp'], sample_ratio, (1, 2, 11, 13, 15, 16))
|
934 |
+
|
935 |
+
es.r = g_engine.calc_fe(es.e, blink, eyebrow, wink, pupil_x, pupil_y, aaa, eee, woo, smile,
|
936 |
+
rotate_pitch, rotate_yaw, rotate_roll)
|
937 |
+
|
938 |
+
if add_exp != None:
|
939 |
+
es.add(add_exp)
|
940 |
+
|
941 |
+
new_rotate = get_rotation_matrix(s_info['pitch'] + es.r[0], s_info['yaw'] + es.r[1],
|
942 |
+
s_info['roll'] + es.r[2])
|
943 |
+
x_d_new = (s_info['scale'] * (1 + es.s)) * ((s_exp + es.e) @ new_rotate) + s_info['t']
|
944 |
+
|
945 |
+
x_d_new = pipeline.stitching(psi.x_s_user, x_d_new)
|
946 |
+
|
947 |
+
crop_out = pipeline.warp_decode(psi.f_s_user, psi.x_s_user, x_d_new)
|
948 |
+
crop_out = pipeline.parse_output(crop_out['out'])[0]
|
949 |
+
|
950 |
+
crop_with_fullsize = cv2.warpAffine(crop_out, psi.crop_trans_m, get_rgb_size(psi.src_rgb), cv2.INTER_LINEAR)
|
951 |
+
out = np.clip(psi.mask_ori * crop_with_fullsize + (1 - psi.mask_ori) * psi.src_rgb, 0, 255).astype(np.uint8)
|
952 |
+
|
953 |
+
out_img = pil2tensor(out)
|
954 |
+
|
955 |
+
filename = g_engine.get_temp_img_name() #"fe_edit_preview.png"
|
956 |
+
folder_paths.get_save_image_path(filename, folder_paths.get_temp_directory())
|
957 |
+
img = Image.fromarray(crop_out)
|
958 |
+
img.save(os.path.join(folder_paths.get_temp_directory(), filename), compress_level=1)
|
959 |
+
results = list()
|
960 |
+
results.append({"filename": filename, "type": "temp"})
|
961 |
+
|
962 |
+
new_editor_link.append(es)
|
963 |
+
|
964 |
+
return {"ui": {"images": results}, "result": (out_img, new_editor_link, es)}
|
965 |
+
|
966 |
+
NODE_CLASS_MAPPINGS = {
|
967 |
+
"AdvancedLivePortrait": AdvancedLivePortrait,
|
968 |
+
"ExpressionEditor": ExpressionEditor,
|
969 |
+
"LoadExpData": LoadExpData,
|
970 |
+
"SaveExpData": SaveExpData,
|
971 |
+
"ExpData": ExpData,
|
972 |
+
"PrintExpData:": PrintExpData,
|
973 |
+
}
|
974 |
+
|
975 |
+
NODE_DISPLAY_NAME_MAPPINGS = {
|
976 |
+
"AdvancedLivePortrait": "Advanced Live Portrait (PHM)",
|
977 |
+
"ExpressionEditor": "Expression Editor (PHM)",
|
978 |
+
"LoadExpData": "Load Exp Data (PHM)",
|
979 |
+
"SaveExpData": "Save Exp Data (PHM)"
|
980 |
+
}
|
ComfyUI-AdvancedLivePortrait/pyproject.toml
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "comfyui-advancedliveportrait"
|
3 |
+
description = "AdvancedLivePortrait with Facial expression editor"
|
4 |
+
version = "1.0.0"
|
5 |
+
license = {file = "LICENSE"}
|
6 |
+
dependencies = ["numpy>=1.26.4", "opencv-python-headless", "imageio-ffmpeg>=0.5.1", "lmdb>=1.4.1", "timm>=1.0.7", "rich>=13.7.1", "albumentations>=1.4.10", "ultralytics", "tyro==0.8.5", "dill"]
|
7 |
+
|
8 |
+
[project.urls]
|
9 |
+
Repository = "https://github.com/PowerHouseMan/ComfyUI-AdvancedLivePortrait"
|
10 |
+
# Used by Comfy Registry https://comfyregistry.org
|
11 |
+
|
12 |
+
[tool.comfy]
|
13 |
+
PublisherId = "starmapking"
|
14 |
+
DisplayName = "ComfyUI-AdvancedLivePortrait"
|
15 |
+
Icon = ""
|
ComfyUI-AdvancedLivePortrait/requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
numpy>=1.26.4
|
2 |
+
opencv-python-headless
|
3 |
+
imageio-ffmpeg>=0.5.1
|
4 |
+
lmdb>=1.4.1
|
5 |
+
timm>=1.0.7
|
6 |
+
rich>=13.7.1
|
7 |
+
albumentations>=1.4.10
|
8 |
+
ultralytics
|
9 |
+
tyro==0.8.5
|
10 |
+
dill
|
ComfyUI-AdvancedLivePortrait/sample/driving_video.mp4
ADDED
Binary file (185 kB). View file
|
|
ComfyUI-AdvancedLivePortrait/sample/exp_image.png
ADDED
ComfyUI-AdvancedLivePortrait/sample/original_sample_asset/driving/d0.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:63f6f9962e1fdf6e6722172e7a18155204858d5d5ce3b1e0646c150360c33bed
|
3 |
+
size 2958395
|