Spaces:
Configuration error
Configuration error
init space
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- LICENSE.md +3 -0
- app.py +6 -0
- facefusion/__init__.py +0 -0
- facefusion/__pycache__/__init__.cpython-311.pyc +0 -0
- facefusion/__pycache__/choices.cpython-311.pyc +0 -0
- facefusion/__pycache__/content_analyser.cpython-311.pyc +0 -0
- facefusion/__pycache__/core.cpython-311.pyc +0 -0
- facefusion/__pycache__/face_analyser.cpython-311.pyc +0 -0
- facefusion/__pycache__/face_cache.cpython-311.pyc +0 -0
- facefusion/__pycache__/face_helper.cpython-311.pyc +0 -0
- facefusion/__pycache__/face_reference.cpython-311.pyc +0 -0
- facefusion/__pycache__/globals.cpython-311.pyc +0 -0
- facefusion/__pycache__/installer.cpython-311.pyc +0 -0
- facefusion/__pycache__/metadata.cpython-311.pyc +0 -0
- facefusion/__pycache__/typing.cpython-311.pyc +0 -0
- facefusion/__pycache__/utilities.cpython-311.pyc +0 -0
- facefusion/__pycache__/vision.cpython-311.pyc +0 -0
- facefusion/__pycache__/wording.cpython-311.pyc +0 -0
- facefusion/choices.py +26 -0
- facefusion/content_analyser.py +102 -0
- facefusion/core.py +274 -0
- facefusion/face_analyser.py +309 -0
- facefusion/face_cache.py +29 -0
- facefusion/face_helper.py +119 -0
- facefusion/face_reference.py +21 -0
- facefusion/globals.py +48 -0
- facefusion/installer.py +63 -0
- facefusion/metadata.py +13 -0
- facefusion/processors/__init__.py +0 -0
- facefusion/processors/__pycache__/__init__.cpython-311.pyc +0 -0
- facefusion/processors/frame/__init__.py +0 -0
- facefusion/processors/frame/__pycache__/__init__.cpython-311.pyc +0 -0
- facefusion/processors/frame/__pycache__/choices.cpython-311.pyc +0 -0
- facefusion/processors/frame/__pycache__/core.cpython-311.pyc +0 -0
- facefusion/processors/frame/__pycache__/globals.cpython-311.pyc +0 -0
- facefusion/processors/frame/__pycache__/typings.cpython-311.pyc +0 -0
- facefusion/processors/frame/choices.py +13 -0
- facefusion/processors/frame/core.py +96 -0
- facefusion/processors/frame/globals.py +10 -0
- facefusion/processors/frame/modules/__init__.py +0 -0
- facefusion/processors/frame/modules/__pycache__/__init__.cpython-311.pyc +0 -0
- facefusion/processors/frame/modules/__pycache__/face_debugger.cpython-311.pyc +0 -0
- facefusion/processors/frame/modules/__pycache__/face_enhancer.cpython-311.pyc +0 -0
- facefusion/processors/frame/modules/__pycache__/face_swapper.cpython-311.pyc +0 -0
- facefusion/processors/frame/modules/__pycache__/frame_enhancer.cpython-311.pyc +0 -0
- facefusion/processors/frame/modules/face_debugger.py +123 -0
- facefusion/processors/frame/modules/face_enhancer.py +221 -0
- facefusion/processors/frame/modules/face_swapper.py +283 -0
- facefusion/processors/frame/modules/frame_enhancer.py +165 -0
- facefusion/processors/frame/typings.py +7 -0
LICENSE.md
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
MIT license
|
2 |
+
|
3 |
+
Copyright (c) 2023 Henry Ruhs
|
app.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
|
3 |
+
from facefusion import core
|
4 |
+
|
5 |
+
if __name__ == '__main__':
|
6 |
+
core.cli()
|
facefusion/__init__.py
ADDED
File without changes
|
facefusion/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (173 Bytes). View file
|
|
facefusion/__pycache__/choices.cpython-311.pyc
ADDED
Binary file (3.41 kB). View file
|
|
facefusion/__pycache__/content_analyser.cpython-311.pyc
ADDED
Binary file (6.1 kB). View file
|
|
facefusion/__pycache__/core.cpython-311.pyc
ADDED
Binary file (26.9 kB). View file
|
|
facefusion/__pycache__/face_analyser.cpython-311.pyc
ADDED
Binary file (21.4 kB). View file
|
|
facefusion/__pycache__/face_cache.cpython-311.pyc
ADDED
Binary file (1.71 kB). View file
|
|
facefusion/__pycache__/face_helper.cpython-311.pyc
ADDED
Binary file (9.27 kB). View file
|
|
facefusion/__pycache__/face_reference.cpython-311.pyc
ADDED
Binary file (825 Bytes). View file
|
|
facefusion/__pycache__/globals.cpython-311.pyc
ADDED
Binary file (3.09 kB). View file
|
|
facefusion/__pycache__/installer.cpython-311.pyc
ADDED
Binary file (4.51 kB). View file
|
|
facefusion/__pycache__/metadata.cpython-311.pyc
ADDED
Binary file (578 Bytes). View file
|
|
facefusion/__pycache__/typing.cpython-311.pyc
ADDED
Binary file (2.04 kB). View file
|
|
facefusion/__pycache__/utilities.cpython-311.pyc
ADDED
Binary file (21.4 kB). View file
|
|
facefusion/__pycache__/vision.cpython-311.pyc
ADDED
Binary file (3.96 kB). View file
|
|
facefusion/__pycache__/wording.cpython-311.pyc
ADDED
Binary file (9.73 kB). View file
|
|
facefusion/choices.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
|
3 |
+
import numpy
|
4 |
+
|
5 |
+
from facefusion.typing import FaceSelectorMode, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat, OutputVideoEncoder
|
6 |
+
|
7 |
+
|
8 |
+
face_analyser_orders : List[FaceAnalyserOrder] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best' ]
|
9 |
+
face_analyser_ages : List[FaceAnalyserAge] = [ 'child', 'teen', 'adult', 'senior' ]
|
10 |
+
face_analyser_genders : List[FaceAnalyserGender] = [ 'male', 'female' ]
|
11 |
+
face_detector_models : List[str] = [ 'retinaface', 'yunet' ]
|
12 |
+
face_detector_sizes : List[str] = [ '160x160', '320x320', '480x480', '512x512', '640x640', '768x768', '960x960', '1024x1024' ]
|
13 |
+
face_selector_modes : List[FaceSelectorMode] = [ 'reference', 'one', 'many' ]
|
14 |
+
temp_frame_formats : List[TempFrameFormat] = [ 'jpg', 'png' ]
|
15 |
+
output_video_encoders : List[OutputVideoEncoder] = [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ]
|
16 |
+
|
17 |
+
execution_thread_count_range : List[int] = numpy.arange(1, 129, 1).tolist()
|
18 |
+
execution_queue_count_range : List[int] = numpy.arange(1, 33, 1).tolist()
|
19 |
+
max_memory_range : List[int] = numpy.arange(0, 129, 1).tolist()
|
20 |
+
face_detector_score_range : List[float] = numpy.arange(0.0, 1.05, 0.05).tolist()
|
21 |
+
face_mask_blur_range : List[float] = numpy.arange(0.0, 1.05, 0.05).tolist()
|
22 |
+
face_mask_padding_range : List[float] = numpy.arange(0, 101, 1).tolist()
|
23 |
+
reference_face_distance_range : List[float] = numpy.arange(0.0, 1.55, 0.05).tolist()
|
24 |
+
temp_frame_quality_range : List[int] = numpy.arange(0, 101, 1).tolist()
|
25 |
+
output_image_quality_range : List[int] = numpy.arange(0, 101, 1).tolist()
|
26 |
+
output_video_quality_range : List[int] = numpy.arange(0, 101, 1).tolist()
|
facefusion/content_analyser.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Dict
|
2 |
+
from functools import lru_cache
|
3 |
+
import threading
|
4 |
+
import cv2
|
5 |
+
import numpy
|
6 |
+
import onnxruntime
|
7 |
+
from tqdm import tqdm
|
8 |
+
|
9 |
+
import facefusion.globals
|
10 |
+
from facefusion import wording
|
11 |
+
from facefusion.typing import Frame, ModelValue
|
12 |
+
from facefusion.vision import get_video_frame, count_video_frame_total, read_image, detect_fps
|
13 |
+
from facefusion.utilities import resolve_relative_path, conditional_download
|
14 |
+
|
15 |
+
CONTENT_ANALYSER = None
|
16 |
+
THREAD_LOCK : threading.Lock = threading.Lock()
|
17 |
+
MODELS : Dict[str, ModelValue] =\
|
18 |
+
{
|
19 |
+
'open_nsfw':
|
20 |
+
{
|
21 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/open_nsfw.onnx',
|
22 |
+
'path': resolve_relative_path('../.assets/models/open_nsfw.onnx')
|
23 |
+
}
|
24 |
+
}
|
25 |
+
MAX_PROBABILITY = 0.80
|
26 |
+
MAX_RATE = 5
|
27 |
+
STREAM_COUNTER = 0
|
28 |
+
|
29 |
+
|
30 |
+
def get_content_analyser() -> Any:
|
31 |
+
global CONTENT_ANALYSER
|
32 |
+
|
33 |
+
with THREAD_LOCK:
|
34 |
+
if CONTENT_ANALYSER is None:
|
35 |
+
model_path = MODELS.get('open_nsfw').get('path')
|
36 |
+
CONTENT_ANALYSER = onnxruntime.InferenceSession(model_path, providers = facefusion.globals.execution_providers)
|
37 |
+
return CONTENT_ANALYSER
|
38 |
+
|
39 |
+
|
40 |
+
def clear_content_analyser() -> None:
|
41 |
+
global CONTENT_ANALYSER
|
42 |
+
|
43 |
+
CONTENT_ANALYSER = None
|
44 |
+
|
45 |
+
|
46 |
+
def pre_check() -> bool:
|
47 |
+
if not facefusion.globals.skip_download:
|
48 |
+
download_directory_path = resolve_relative_path('../.assets/models')
|
49 |
+
model_url = MODELS.get('open_nsfw').get('url')
|
50 |
+
conditional_download(download_directory_path, [ model_url ])
|
51 |
+
return True
|
52 |
+
|
53 |
+
|
54 |
+
def analyse_stream(frame : Frame, fps : float) -> bool:
|
55 |
+
global STREAM_COUNTER
|
56 |
+
|
57 |
+
STREAM_COUNTER = STREAM_COUNTER + 1
|
58 |
+
if STREAM_COUNTER % int(fps) == 0:
|
59 |
+
return analyse_frame(frame)
|
60 |
+
return False
|
61 |
+
|
62 |
+
|
63 |
+
def prepare_frame(frame : Frame) -> Frame:
|
64 |
+
frame = cv2.resize(frame, (224, 224)).astype(numpy.float32)
|
65 |
+
frame -= numpy.array([ 104, 117, 123 ]).astype(numpy.float32)
|
66 |
+
frame = numpy.expand_dims(frame, axis = 0)
|
67 |
+
return frame
|
68 |
+
|
69 |
+
|
70 |
+
def analyse_frame(frame : Frame) -> bool:
|
71 |
+
content_analyser = get_content_analyser()
|
72 |
+
frame = prepare_frame(frame)
|
73 |
+
probability = content_analyser.run(None,
|
74 |
+
{
|
75 |
+
'input:0': frame
|
76 |
+
})[0][0][1]
|
77 |
+
return probability > MAX_PROBABILITY
|
78 |
+
|
79 |
+
|
80 |
+
@lru_cache(maxsize = None)
|
81 |
+
def analyse_image(image_path : str) -> bool:
|
82 |
+
frame = read_image(image_path)
|
83 |
+
return analyse_frame(frame)
|
84 |
+
|
85 |
+
|
86 |
+
@lru_cache(maxsize = None)
|
87 |
+
def analyse_video(video_path : str, start_frame : int, end_frame : int) -> bool:
|
88 |
+
video_frame_total = count_video_frame_total(video_path)
|
89 |
+
fps = detect_fps(video_path)
|
90 |
+
frame_range = range(start_frame or 0, end_frame or video_frame_total)
|
91 |
+
rate = 0.0
|
92 |
+
counter = 0
|
93 |
+
with tqdm(total = len(frame_range), desc = wording.get('analysing'), unit = 'frame', ascii = ' =') as progress:
|
94 |
+
for frame_number in frame_range:
|
95 |
+
if frame_number % int(fps) == 0:
|
96 |
+
frame = get_video_frame(video_path, frame_number)
|
97 |
+
if analyse_frame(frame):
|
98 |
+
counter += 1
|
99 |
+
rate = counter * int(fps) / len(frame_range) * 100
|
100 |
+
progress.update()
|
101 |
+
progress.set_postfix(rate = rate)
|
102 |
+
return rate > MAX_RATE
|
facefusion/core.py
ADDED
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
os.environ['OMP_NUM_THREADS'] = '1'
|
4 |
+
|
5 |
+
import signal
|
6 |
+
import sys
|
7 |
+
import warnings
|
8 |
+
import platform
|
9 |
+
import shutil
|
10 |
+
import onnxruntime
|
11 |
+
from argparse import ArgumentParser, HelpFormatter
|
12 |
+
|
13 |
+
import facefusion.choices
|
14 |
+
import facefusion.globals
|
15 |
+
from facefusion.face_analyser import get_one_face
|
16 |
+
from facefusion.face_reference import get_face_reference, set_face_reference
|
17 |
+
from facefusion.vision import get_video_frame, read_image
|
18 |
+
from facefusion import face_analyser, content_analyser, metadata, wording
|
19 |
+
from facefusion.content_analyser import analyse_image, analyse_video
|
20 |
+
from facefusion.processors.frame.core import get_frame_processors_modules, load_frame_processor_module
|
21 |
+
from facefusion.utilities import is_image, is_video, detect_fps, compress_image, merge_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clear_temp, list_module_names, encode_execution_providers, decode_execution_providers, normalize_output_path, normalize_padding, create_metavar, update_status
|
22 |
+
|
23 |
+
onnxruntime.set_default_logger_severity(3)
|
24 |
+
warnings.filterwarnings('ignore', category = UserWarning, module = 'gradio')
|
25 |
+
warnings.filterwarnings('ignore', category = UserWarning, module = 'torchvision')
|
26 |
+
|
27 |
+
|
28 |
+
def cli() -> None:
|
29 |
+
signal.signal(signal.SIGINT, lambda signal_number, frame: destroy())
|
30 |
+
program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120), add_help = False)
|
31 |
+
# general
|
32 |
+
program.add_argument('-s', '--source', help = wording.get('source_help'), dest = 'source_path')
|
33 |
+
program.add_argument('-t', '--target', help = wording.get('target_help'), dest = 'target_path')
|
34 |
+
program.add_argument('-o', '--output', help = wording.get('output_help'), dest = 'output_path')
|
35 |
+
program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
|
36 |
+
# misc
|
37 |
+
group_misc = program.add_argument_group('misc')
|
38 |
+
group_misc.add_argument('--skip-download', help = wording.get('skip_download_help'), dest = 'skip_download', action = 'store_true')
|
39 |
+
group_misc.add_argument('--headless', help = wording.get('headless_help'), dest = 'headless', action = 'store_true')
|
40 |
+
# execution
|
41 |
+
group_execution = program.add_argument_group('execution')
|
42 |
+
group_execution.add_argument('--execution-providers', help = wording.get('execution_providers_help'), dest = 'execution_providers', default = [ 'cpu' ], choices = encode_execution_providers(onnxruntime.get_available_providers()), nargs = '+')
|
43 |
+
group_execution.add_argument('--execution-thread-count', help = wording.get('execution_thread_count_help'), dest = 'execution_thread_count', type = int, default = 4, choices = facefusion.choices.execution_thread_count_range, metavar = create_metavar(facefusion.choices.execution_thread_count_range))
|
44 |
+
group_execution.add_argument('--execution-queue-count', help = wording.get('execution_queue_count_help'), dest = 'execution_queue_count', type = int, default = 1, choices = facefusion.choices.execution_queue_count_range, metavar = create_metavar(facefusion.choices.execution_queue_count_range))
|
45 |
+
group_execution.add_argument('--max-memory', help = wording.get('max_memory_help'), dest = 'max_memory', type = int, choices = facefusion.choices.max_memory_range, metavar = create_metavar(facefusion.choices.max_memory_range))
|
46 |
+
# face analyser
|
47 |
+
group_face_analyser = program.add_argument_group('face analyser')
|
48 |
+
group_face_analyser.add_argument('--face-analyser-order', help = wording.get('face_analyser_order_help'), dest = 'face_analyser_order', default = 'left-right', choices = facefusion.choices.face_analyser_orders)
|
49 |
+
group_face_analyser.add_argument('--face-analyser-age', help = wording.get('face_analyser_age_help'), dest = 'face_analyser_age', choices = facefusion.choices.face_analyser_ages)
|
50 |
+
group_face_analyser.add_argument('--face-analyser-gender', help = wording.get('face_analyser_gender_help'), dest = 'face_analyser_gender', choices = facefusion.choices.face_analyser_genders)
|
51 |
+
group_face_analyser.add_argument('--face-detector-model', help = wording.get('face_detector_model_help'), dest = 'face_detector_model', default = 'retinaface', choices = facefusion.choices.face_detector_models)
|
52 |
+
group_face_analyser.add_argument('--face-detector-size', help = wording.get('face_detector_size_help'), dest = 'face_detector_size', default = '640x640', choices = facefusion.choices.face_detector_sizes)
|
53 |
+
group_face_analyser.add_argument('--face-detector-score', help = wording.get('face_detector_score_help'), dest = 'face_detector_score', type = float, default = 0.5, choices = facefusion.choices.face_detector_score_range, metavar = create_metavar(facefusion.choices.face_detector_score_range))
|
54 |
+
# face selector
|
55 |
+
group_face_selector = program.add_argument_group('face selector')
|
56 |
+
group_face_selector.add_argument('--face-selector-mode', help = wording.get('face_selector_mode_help'), dest = 'face_selector_mode', default = 'reference', choices = facefusion.choices.face_selector_modes)
|
57 |
+
group_face_selector.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), dest = 'reference_face_position', type = int, default = 0)
|
58 |
+
group_face_selector.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), dest = 'reference_face_distance', type = float, default = 0.6, choices = facefusion.choices.reference_face_distance_range, metavar = create_metavar(facefusion.choices.reference_face_distance_range))
|
59 |
+
group_face_selector.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), dest = 'reference_frame_number', type = int, default = 0)
|
60 |
+
# face mask
|
61 |
+
group_face_mask = program.add_argument_group('face mask')
|
62 |
+
group_face_mask.add_argument('--face-mask-blur', help = wording.get('face_mask_blur_help'), dest = 'face_mask_blur', type = float, default = 0.3, choices = facefusion.choices.face_mask_blur_range, metavar = create_metavar(facefusion.choices.face_mask_blur_range))
|
63 |
+
group_face_mask.add_argument('--face-mask-padding', help = wording.get('face_mask_padding_help'), dest = 'face_mask_padding', type = int, default = [ 0, 0, 0, 0 ], nargs = '+')
|
64 |
+
# frame extraction
|
65 |
+
group_frame_extraction = program.add_argument_group('frame extraction')
|
66 |
+
group_frame_extraction.add_argument('--trim-frame-start', help = wording.get('trim_frame_start_help'), dest = 'trim_frame_start', type = int)
|
67 |
+
group_frame_extraction.add_argument('--trim-frame-end', help = wording.get('trim_frame_end_help'), dest = 'trim_frame_end', type = int)
|
68 |
+
group_frame_extraction.add_argument('--temp-frame-format', help = wording.get('temp_frame_format_help'), dest = 'temp_frame_format', default = 'jpg', choices = facefusion.choices.temp_frame_formats)
|
69 |
+
group_frame_extraction.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), dest = 'temp_frame_quality', type = int, default = 100, choices = facefusion.choices.temp_frame_quality_range, metavar = create_metavar(facefusion.choices.temp_frame_quality_range))
|
70 |
+
group_frame_extraction.add_argument('--keep-temp', help = wording.get('keep_temp_help'), dest = 'keep_temp', action = 'store_true')
|
71 |
+
# output creation
|
72 |
+
group_output_creation = program.add_argument_group('output creation')
|
73 |
+
group_output_creation.add_argument('--output-image-quality', help = wording.get('output_image_quality_help'), dest = 'output_image_quality', type = int, default = 80, choices = facefusion.choices.output_image_quality_range, metavar = create_metavar(facefusion.choices.output_image_quality_range))
|
74 |
+
group_output_creation.add_argument('--output-video-encoder', help = wording.get('output_video_encoder_help'), dest = 'output_video_encoder', default = 'libx264', choices = facefusion.choices.output_video_encoders)
|
75 |
+
group_output_creation.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), dest = 'output_video_quality', type = int, default = 80, choices = facefusion.choices.output_video_quality_range, metavar = create_metavar(facefusion.choices.output_video_quality_range))
|
76 |
+
group_output_creation.add_argument('--keep-fps', help = wording.get('keep_fps_help'), dest = 'keep_fps', action = 'store_true')
|
77 |
+
group_output_creation.add_argument('--skip-audio', help = wording.get('skip_audio_help'), dest = 'skip_audio', action = 'store_true')
|
78 |
+
# frame processors
|
79 |
+
available_frame_processors = list_module_names('facefusion/processors/frame/modules')
|
80 |
+
program = ArgumentParser(parents = [ program ], formatter_class = program.formatter_class, add_help = True)
|
81 |
+
group_frame_processors = program.add_argument_group('frame processors')
|
82 |
+
group_frame_processors.add_argument('--frame-processors', help = wording.get('frame_processors_help').format(choices = ', '.join(available_frame_processors)), dest = 'frame_processors', default = [ 'face_swapper' ], nargs = '+')
|
83 |
+
for frame_processor in available_frame_processors:
|
84 |
+
frame_processor_module = load_frame_processor_module(frame_processor)
|
85 |
+
frame_processor_module.register_args(group_frame_processors)
|
86 |
+
# uis
|
87 |
+
group_uis = program.add_argument_group('uis')
|
88 |
+
group_uis.add_argument('--ui-layouts', help = wording.get('ui_layouts_help').format(choices = ', '.join(list_module_names('facefusion/uis/layouts'))), dest = 'ui_layouts', default = [ 'default' ], nargs = '+')
|
89 |
+
run(program)
|
90 |
+
|
91 |
+
|
92 |
+
def apply_args(program : ArgumentParser) -> None:
|
93 |
+
args = program.parse_args()
|
94 |
+
# general
|
95 |
+
facefusion.globals.source_path = args.source_path
|
96 |
+
facefusion.globals.target_path = args.target_path
|
97 |
+
facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_path, facefusion.globals.target_path, args.output_path)
|
98 |
+
# misc
|
99 |
+
facefusion.globals.skip_download = args.skip_download
|
100 |
+
facefusion.globals.headless = args.headless
|
101 |
+
# execution
|
102 |
+
facefusion.globals.execution_providers = decode_execution_providers(args.execution_providers)
|
103 |
+
facefusion.globals.execution_thread_count = args.execution_thread_count
|
104 |
+
facefusion.globals.execution_queue_count = args.execution_queue_count
|
105 |
+
facefusion.globals.max_memory = args.max_memory
|
106 |
+
# face analyser
|
107 |
+
facefusion.globals.face_analyser_order = args.face_analyser_order
|
108 |
+
facefusion.globals.face_analyser_age = args.face_analyser_age
|
109 |
+
facefusion.globals.face_analyser_gender = args.face_analyser_gender
|
110 |
+
facefusion.globals.face_detector_model = args.face_detector_model
|
111 |
+
facefusion.globals.face_detector_size = args.face_detector_size
|
112 |
+
facefusion.globals.face_detector_score = args.face_detector_score
|
113 |
+
# face selector
|
114 |
+
facefusion.globals.face_selector_mode = args.face_selector_mode
|
115 |
+
facefusion.globals.reference_face_position = args.reference_face_position
|
116 |
+
facefusion.globals.reference_face_distance = args.reference_face_distance
|
117 |
+
facefusion.globals.reference_frame_number = args.reference_frame_number
|
118 |
+
# face mask
|
119 |
+
facefusion.globals.face_mask_blur = args.face_mask_blur
|
120 |
+
facefusion.globals.face_mask_padding = normalize_padding(args.face_mask_padding)
|
121 |
+
# frame extraction
|
122 |
+
facefusion.globals.trim_frame_start = args.trim_frame_start
|
123 |
+
facefusion.globals.trim_frame_end = args.trim_frame_end
|
124 |
+
facefusion.globals.temp_frame_format = args.temp_frame_format
|
125 |
+
facefusion.globals.temp_frame_quality = args.temp_frame_quality
|
126 |
+
facefusion.globals.keep_temp = args.keep_temp
|
127 |
+
# output creation
|
128 |
+
facefusion.globals.output_image_quality = args.output_image_quality
|
129 |
+
facefusion.globals.output_video_encoder = args.output_video_encoder
|
130 |
+
facefusion.globals.output_video_quality = args.output_video_quality
|
131 |
+
facefusion.globals.keep_fps = args.keep_fps
|
132 |
+
facefusion.globals.skip_audio = args.skip_audio
|
133 |
+
# frame processors
|
134 |
+
available_frame_processors = list_module_names('facefusion/processors/frame/modules')
|
135 |
+
facefusion.globals.frame_processors = args.frame_processors
|
136 |
+
for frame_processor in available_frame_processors:
|
137 |
+
frame_processor_module = load_frame_processor_module(frame_processor)
|
138 |
+
frame_processor_module.apply_args(program)
|
139 |
+
# uis
|
140 |
+
facefusion.globals.ui_layouts = args.ui_layouts
|
141 |
+
|
142 |
+
|
143 |
+
def run(program : ArgumentParser) -> None:
|
144 |
+
apply_args(program)
|
145 |
+
limit_resources()
|
146 |
+
if not pre_check() or not content_analyser.pre_check() or not face_analyser.pre_check():
|
147 |
+
return
|
148 |
+
for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
|
149 |
+
if not frame_processor_module.pre_check():
|
150 |
+
return
|
151 |
+
if facefusion.globals.headless:
|
152 |
+
conditional_process()
|
153 |
+
else:
|
154 |
+
import facefusion.uis.core as ui
|
155 |
+
|
156 |
+
for ui_layout in ui.get_ui_layouts_modules(facefusion.globals.ui_layouts):
|
157 |
+
if not ui_layout.pre_check():
|
158 |
+
return
|
159 |
+
ui.launch()
|
160 |
+
|
161 |
+
|
162 |
+
def destroy() -> None:
|
163 |
+
if facefusion.globals.target_path:
|
164 |
+
clear_temp(facefusion.globals.target_path)
|
165 |
+
sys.exit()
|
166 |
+
|
167 |
+
|
168 |
+
def limit_resources() -> None:
|
169 |
+
if facefusion.globals.max_memory:
|
170 |
+
memory = facefusion.globals.max_memory * 1024 ** 3
|
171 |
+
if platform.system().lower() == 'darwin':
|
172 |
+
memory = facefusion.globals.max_memory * 1024 ** 6
|
173 |
+
if platform.system().lower() == 'windows':
|
174 |
+
import ctypes
|
175 |
+
kernel32 = ctypes.windll.kernel32 # type: ignore[attr-defined]
|
176 |
+
kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory))
|
177 |
+
else:
|
178 |
+
import resource
|
179 |
+
resource.setrlimit(resource.RLIMIT_DATA, (memory, memory))
|
180 |
+
|
181 |
+
|
182 |
+
def pre_check() -> bool:
|
183 |
+
if sys.version_info < (3, 9):
|
184 |
+
update_status(wording.get('python_not_supported').format(version = '3.9'))
|
185 |
+
return False
|
186 |
+
if not shutil.which('ffmpeg'):
|
187 |
+
update_status(wording.get('ffmpeg_not_installed'))
|
188 |
+
return False
|
189 |
+
return True
|
190 |
+
|
191 |
+
|
192 |
+
def conditional_process() -> None:
|
193 |
+
conditional_set_face_reference()
|
194 |
+
for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
|
195 |
+
if not frame_processor_module.pre_process('output'):
|
196 |
+
return
|
197 |
+
if is_image(facefusion.globals.target_path):
|
198 |
+
process_image()
|
199 |
+
if is_video(facefusion.globals.target_path):
|
200 |
+
process_video()
|
201 |
+
|
202 |
+
|
203 |
+
def conditional_set_face_reference() -> None:
|
204 |
+
if 'reference' in facefusion.globals.face_selector_mode and not get_face_reference():
|
205 |
+
if is_video(facefusion.globals.target_path):
|
206 |
+
reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
|
207 |
+
else:
|
208 |
+
reference_frame = read_image(facefusion.globals.target_path)
|
209 |
+
reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position)
|
210 |
+
set_face_reference(reference_face)
|
211 |
+
|
212 |
+
|
213 |
+
def process_image() -> None:
|
214 |
+
if analyse_image(facefusion.globals.target_path):
|
215 |
+
return
|
216 |
+
shutil.copy2(facefusion.globals.target_path, facefusion.globals.output_path)
|
217 |
+
# process frame
|
218 |
+
for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
|
219 |
+
update_status(wording.get('processing'), frame_processor_module.NAME)
|
220 |
+
frame_processor_module.process_image(facefusion.globals.source_path, facefusion.globals.output_path, facefusion.globals.output_path)
|
221 |
+
frame_processor_module.post_process()
|
222 |
+
# compress image
|
223 |
+
update_status(wording.get('compressing_image'))
|
224 |
+
if not compress_image(facefusion.globals.output_path):
|
225 |
+
update_status(wording.get('compressing_image_failed'))
|
226 |
+
# validate image
|
227 |
+
if is_image(facefusion.globals.output_path):
|
228 |
+
update_status(wording.get('processing_image_succeed'))
|
229 |
+
else:
|
230 |
+
update_status(wording.get('processing_image_failed'))
|
231 |
+
|
232 |
+
|
233 |
+
def process_video() -> None:
|
234 |
+
if analyse_video(facefusion.globals.target_path, facefusion.globals.trim_frame_start, facefusion.globals.trim_frame_end):
|
235 |
+
return
|
236 |
+
fps = detect_fps(facefusion.globals.target_path) if facefusion.globals.keep_fps else 25.0
|
237 |
+
# create temp
|
238 |
+
update_status(wording.get('creating_temp'))
|
239 |
+
create_temp(facefusion.globals.target_path)
|
240 |
+
# extract frames
|
241 |
+
update_status(wording.get('extracting_frames_fps').format(fps = fps))
|
242 |
+
extract_frames(facefusion.globals.target_path, fps)
|
243 |
+
# process frame
|
244 |
+
temp_frame_paths = get_temp_frame_paths(facefusion.globals.target_path)
|
245 |
+
if temp_frame_paths:
|
246 |
+
for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
|
247 |
+
update_status(wording.get('processing'), frame_processor_module.NAME)
|
248 |
+
frame_processor_module.process_video(facefusion.globals.source_path, temp_frame_paths)
|
249 |
+
frame_processor_module.post_process()
|
250 |
+
else:
|
251 |
+
update_status(wording.get('temp_frames_not_found'))
|
252 |
+
return
|
253 |
+
# merge video
|
254 |
+
update_status(wording.get('merging_video_fps').format(fps = fps))
|
255 |
+
if not merge_video(facefusion.globals.target_path, fps):
|
256 |
+
update_status(wording.get('merging_video_failed'))
|
257 |
+
return
|
258 |
+
# handle audio
|
259 |
+
if facefusion.globals.skip_audio:
|
260 |
+
update_status(wording.get('skipping_audio'))
|
261 |
+
move_temp(facefusion.globals.target_path, facefusion.globals.output_path)
|
262 |
+
else:
|
263 |
+
update_status(wording.get('restoring_audio'))
|
264 |
+
if not restore_audio(facefusion.globals.target_path, facefusion.globals.output_path):
|
265 |
+
update_status(wording.get('restoring_audio_failed'))
|
266 |
+
move_temp(facefusion.globals.target_path, facefusion.globals.output_path)
|
267 |
+
# clear temp
|
268 |
+
update_status(wording.get('clearing_temp'))
|
269 |
+
clear_temp(facefusion.globals.target_path)
|
270 |
+
# validate video
|
271 |
+
if is_video(facefusion.globals.output_path):
|
272 |
+
update_status(wording.get('processing_video_succeed'))
|
273 |
+
else:
|
274 |
+
update_status(wording.get('processing_video_failed'))
|
facefusion/face_analyser.py
ADDED
@@ -0,0 +1,309 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Optional, List, Dict, Tuple
|
2 |
+
import threading
|
3 |
+
import cv2
|
4 |
+
import numpy
|
5 |
+
import onnxruntime
|
6 |
+
|
7 |
+
import facefusion.globals
|
8 |
+
from facefusion.face_cache import get_faces_cache, set_faces_cache
|
9 |
+
from facefusion.face_helper import warp_face, create_static_anchors, distance_to_kps, distance_to_bbox, apply_nms
|
10 |
+
from facefusion.typing import Frame, Face, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, ModelValue, Bbox, Kps, Score, Embedding
|
11 |
+
from facefusion.utilities import resolve_relative_path, conditional_download
|
12 |
+
from facefusion.vision import resize_frame_dimension
|
13 |
+
|
14 |
+
FACE_ANALYSER = None
|
15 |
+
THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
|
16 |
+
THREAD_LOCK : threading.Lock = threading.Lock()
|
17 |
+
MODELS : Dict[str, ModelValue] =\
|
18 |
+
{
|
19 |
+
'face_detector_retinaface':
|
20 |
+
{
|
21 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/retinaface_10g.onnx',
|
22 |
+
'path': resolve_relative_path('../.assets/models/retinaface_10g.onnx')
|
23 |
+
},
|
24 |
+
'face_detector_yunet':
|
25 |
+
{
|
26 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/yunet_2023mar.onnx',
|
27 |
+
'path': resolve_relative_path('../.assets/models/yunet_2023mar.onnx')
|
28 |
+
},
|
29 |
+
'face_recognizer_arcface_blendface':
|
30 |
+
{
|
31 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/arcface_w600k_r50.onnx',
|
32 |
+
'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx')
|
33 |
+
},
|
34 |
+
'face_recognizer_arcface_inswapper':
|
35 |
+
{
|
36 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/arcface_w600k_r50.onnx',
|
37 |
+
'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx')
|
38 |
+
},
|
39 |
+
'face_recognizer_arcface_simswap':
|
40 |
+
{
|
41 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/arcface_simswap.onnx',
|
42 |
+
'path': resolve_relative_path('../.assets/models/arcface_simswap.onnx')
|
43 |
+
},
|
44 |
+
'gender_age':
|
45 |
+
{
|
46 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gender_age.onnx',
|
47 |
+
'path': resolve_relative_path('../.assets/models/gender_age.onnx')
|
48 |
+
}
|
49 |
+
}
|
50 |
+
|
51 |
+
|
52 |
+
def get_face_analyser() -> Any:
|
53 |
+
global FACE_ANALYSER
|
54 |
+
|
55 |
+
with THREAD_LOCK:
|
56 |
+
if FACE_ANALYSER is None:
|
57 |
+
if facefusion.globals.face_detector_model == 'retinaface':
|
58 |
+
face_detector = onnxruntime.InferenceSession(MODELS.get('face_detector_retinaface').get('path'), providers = facefusion.globals.execution_providers)
|
59 |
+
if facefusion.globals.face_detector_model == 'yunet':
|
60 |
+
face_detector = cv2.FaceDetectorYN.create(MODELS.get('face_detector_yunet').get('path'), '', (0, 0))
|
61 |
+
if facefusion.globals.face_recognizer_model == 'arcface_blendface':
|
62 |
+
face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_blendface').get('path'), providers = facefusion.globals.execution_providers)
|
63 |
+
if facefusion.globals.face_recognizer_model == 'arcface_inswapper':
|
64 |
+
face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_inswapper').get('path'), providers = facefusion.globals.execution_providers)
|
65 |
+
if facefusion.globals.face_recognizer_model == 'arcface_simswap':
|
66 |
+
face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_simswap').get('path'), providers = facefusion.globals.execution_providers)
|
67 |
+
gender_age = onnxruntime.InferenceSession(MODELS.get('gender_age').get('path'), providers = facefusion.globals.execution_providers)
|
68 |
+
FACE_ANALYSER =\
|
69 |
+
{
|
70 |
+
'face_detector': face_detector,
|
71 |
+
'face_recognizer': face_recognizer,
|
72 |
+
'gender_age': gender_age
|
73 |
+
}
|
74 |
+
return FACE_ANALYSER
|
75 |
+
|
76 |
+
|
77 |
+
def clear_face_analyser() -> Any:
|
78 |
+
global FACE_ANALYSER
|
79 |
+
|
80 |
+
FACE_ANALYSER = None
|
81 |
+
|
82 |
+
|
83 |
+
def pre_check() -> bool:
|
84 |
+
if not facefusion.globals.skip_download:
|
85 |
+
download_directory_path = resolve_relative_path('../.assets/models')
|
86 |
+
model_urls =\
|
87 |
+
[
|
88 |
+
MODELS.get('face_detector_retinaface').get('url'),
|
89 |
+
MODELS.get('face_detector_yunet').get('url'),
|
90 |
+
MODELS.get('face_recognizer_arcface_inswapper').get('url'),
|
91 |
+
MODELS.get('face_recognizer_arcface_simswap').get('url'),
|
92 |
+
MODELS.get('gender_age').get('url')
|
93 |
+
]
|
94 |
+
conditional_download(download_directory_path, model_urls)
|
95 |
+
return True
|
96 |
+
|
97 |
+
|
98 |
+
def extract_faces(frame: Frame) -> List[Face]:
|
99 |
+
face_detector_width, face_detector_height = map(int, facefusion.globals.face_detector_size.split('x'))
|
100 |
+
frame_height, frame_width, _ = frame.shape
|
101 |
+
temp_frame = resize_frame_dimension(frame, face_detector_width, face_detector_height)
|
102 |
+
temp_frame_height, temp_frame_width, _ = temp_frame.shape
|
103 |
+
ratio_height = frame_height / temp_frame_height
|
104 |
+
ratio_width = frame_width / temp_frame_width
|
105 |
+
if facefusion.globals.face_detector_model == 'retinaface':
|
106 |
+
bbox_list, kps_list, score_list = detect_with_retinaface(temp_frame, temp_frame_height, temp_frame_width, face_detector_height, face_detector_width, ratio_height, ratio_width)
|
107 |
+
return create_faces(frame, bbox_list, kps_list, score_list)
|
108 |
+
elif facefusion.globals.face_detector_model == 'yunet':
|
109 |
+
bbox_list, kps_list, score_list = detect_with_yunet(temp_frame, temp_frame_height, temp_frame_width, ratio_height, ratio_width)
|
110 |
+
return create_faces(frame, bbox_list, kps_list, score_list)
|
111 |
+
return []
|
112 |
+
|
113 |
+
|
114 |
+
def detect_with_retinaface(temp_frame : Frame, temp_frame_height : int, temp_frame_width : int, face_detector_height : int, face_detector_width : int, ratio_height : float, ratio_width : float) -> Tuple[List[Bbox], List[Kps], List[Score]]:
|
115 |
+
face_detector = get_face_analyser().get('face_detector')
|
116 |
+
bbox_list = []
|
117 |
+
kps_list = []
|
118 |
+
score_list = []
|
119 |
+
feature_strides = [ 8, 16, 32 ]
|
120 |
+
feature_map_channel = 3
|
121 |
+
anchor_total = 2
|
122 |
+
prepare_frame = numpy.zeros((face_detector_height, face_detector_width, 3))
|
123 |
+
prepare_frame[:temp_frame_height, :temp_frame_width, :] = temp_frame
|
124 |
+
temp_frame = (prepare_frame - 127.5) / 128.0
|
125 |
+
temp_frame = numpy.expand_dims(temp_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
|
126 |
+
with THREAD_SEMAPHORE:
|
127 |
+
detections = face_detector.run(None,
|
128 |
+
{
|
129 |
+
face_detector.get_inputs()[0].name: temp_frame
|
130 |
+
})
|
131 |
+
for index, feature_stride in enumerate(feature_strides):
|
132 |
+
keep_indices = numpy.where(detections[index] >= facefusion.globals.face_detector_score)[0]
|
133 |
+
if keep_indices.any():
|
134 |
+
stride_height = face_detector_height // feature_stride
|
135 |
+
stride_width = face_detector_width // feature_stride
|
136 |
+
anchors = create_static_anchors(feature_stride, anchor_total, stride_height, stride_width)
|
137 |
+
bbox_raw = (detections[index + feature_map_channel] * feature_stride)
|
138 |
+
kps_raw = detections[index + feature_map_channel * 2] * feature_stride
|
139 |
+
for bbox in distance_to_bbox(anchors, bbox_raw)[keep_indices]:
|
140 |
+
bbox_list.append(numpy.array(
|
141 |
+
[
|
142 |
+
bbox[0] * ratio_width,
|
143 |
+
bbox[1] * ratio_height,
|
144 |
+
bbox[2] * ratio_width,
|
145 |
+
bbox[3] * ratio_height
|
146 |
+
]))
|
147 |
+
for kps in distance_to_kps(anchors, kps_raw)[keep_indices]:
|
148 |
+
kps_list.append(kps * [ ratio_width, ratio_height ])
|
149 |
+
for score in detections[index][keep_indices]:
|
150 |
+
score_list.append(score[0])
|
151 |
+
return bbox_list, kps_list, score_list
|
152 |
+
|
153 |
+
|
154 |
+
def detect_with_yunet(temp_frame : Frame, temp_frame_height : int, temp_frame_width : int, ratio_height : float, ratio_width : float) -> Tuple[List[Bbox], List[Kps], List[Score]]:
|
155 |
+
face_detector = get_face_analyser().get('face_detector')
|
156 |
+
face_detector.setInputSize((temp_frame_width, temp_frame_height))
|
157 |
+
face_detector.setScoreThreshold(facefusion.globals.face_detector_score)
|
158 |
+
bbox_list = []
|
159 |
+
kps_list = []
|
160 |
+
score_list = []
|
161 |
+
with THREAD_SEMAPHORE:
|
162 |
+
_, detections = face_detector.detect(temp_frame)
|
163 |
+
if detections.any():
|
164 |
+
for detection in detections:
|
165 |
+
bbox_list.append(numpy.array(
|
166 |
+
[
|
167 |
+
detection[0] * ratio_width,
|
168 |
+
detection[1] * ratio_height,
|
169 |
+
(detection[0] + detection[2]) * ratio_width,
|
170 |
+
(detection[1] + detection[3]) * ratio_height
|
171 |
+
]))
|
172 |
+
kps_list.append(detection[4:14].reshape((5, 2)) * [ ratio_width, ratio_height])
|
173 |
+
score_list.append(detection[14])
|
174 |
+
return bbox_list, kps_list, score_list
|
175 |
+
|
176 |
+
|
177 |
+
def create_faces(frame : Frame, bbox_list : List[Bbox], kps_list : List[Kps], score_list : List[Score]) -> List[Face] :
|
178 |
+
faces : List[Face] = []
|
179 |
+
if facefusion.globals.face_detector_score > 0:
|
180 |
+
keep_indices = apply_nms(bbox_list, 0.4)
|
181 |
+
for index in keep_indices:
|
182 |
+
bbox = bbox_list[index]
|
183 |
+
kps = kps_list[index]
|
184 |
+
score = score_list[index]
|
185 |
+
embedding, normed_embedding = calc_embedding(frame, kps)
|
186 |
+
gender, age = detect_gender_age(frame, kps)
|
187 |
+
faces.append(Face(
|
188 |
+
bbox = bbox,
|
189 |
+
kps = kps,
|
190 |
+
score = score,
|
191 |
+
embedding = embedding,
|
192 |
+
normed_embedding = normed_embedding,
|
193 |
+
gender = gender,
|
194 |
+
age = age
|
195 |
+
))
|
196 |
+
return faces
|
197 |
+
|
198 |
+
|
199 |
+
def calc_embedding(temp_frame : Frame, kps : Kps) -> Tuple[Embedding, Embedding]:
|
200 |
+
face_recognizer = get_face_analyser().get('face_recognizer')
|
201 |
+
crop_frame, matrix = warp_face(temp_frame, kps, 'arcface_v2', (112, 112))
|
202 |
+
crop_frame = crop_frame.astype(numpy.float32) / 127.5 - 1
|
203 |
+
crop_frame = crop_frame[:, :, ::-1].transpose(2, 0, 1)
|
204 |
+
crop_frame = numpy.expand_dims(crop_frame, axis = 0)
|
205 |
+
embedding = face_recognizer.run(None,
|
206 |
+
{
|
207 |
+
face_recognizer.get_inputs()[0].name: crop_frame
|
208 |
+
})[0]
|
209 |
+
embedding = embedding.ravel()
|
210 |
+
normed_embedding = embedding / numpy.linalg.norm(embedding)
|
211 |
+
return embedding, normed_embedding
|
212 |
+
|
213 |
+
|
214 |
+
def detect_gender_age(frame : Frame, kps : Kps) -> Tuple[int, int]:
|
215 |
+
gender_age = get_face_analyser().get('gender_age')
|
216 |
+
crop_frame, affine_matrix = warp_face(frame, kps, 'arcface_v2', (96, 96))
|
217 |
+
crop_frame = numpy.expand_dims(crop_frame, axis = 0).transpose(0, 3, 1, 2).astype(numpy.float32)
|
218 |
+
prediction = gender_age.run(None,
|
219 |
+
{
|
220 |
+
gender_age.get_inputs()[0].name: crop_frame
|
221 |
+
})[0][0]
|
222 |
+
gender = int(numpy.argmax(prediction[:2]))
|
223 |
+
age = int(numpy.round(prediction[2] * 100))
|
224 |
+
return gender, age
|
225 |
+
|
226 |
+
|
227 |
+
def get_one_face(frame : Frame, position : int = 0) -> Optional[Face]:
|
228 |
+
many_faces = get_many_faces(frame)
|
229 |
+
if many_faces:
|
230 |
+
try:
|
231 |
+
return many_faces[position]
|
232 |
+
except IndexError:
|
233 |
+
return many_faces[-1]
|
234 |
+
return None
|
235 |
+
|
236 |
+
|
237 |
+
def get_many_faces(frame : Frame) -> List[Face]:
|
238 |
+
try:
|
239 |
+
faces_cache = get_faces_cache(frame)
|
240 |
+
if faces_cache:
|
241 |
+
faces = faces_cache
|
242 |
+
else:
|
243 |
+
faces = extract_faces(frame)
|
244 |
+
set_faces_cache(frame, faces)
|
245 |
+
if facefusion.globals.face_analyser_order:
|
246 |
+
faces = sort_by_order(faces, facefusion.globals.face_analyser_order)
|
247 |
+
if facefusion.globals.face_analyser_age:
|
248 |
+
faces = filter_by_age(faces, facefusion.globals.face_analyser_age)
|
249 |
+
if facefusion.globals.face_analyser_gender:
|
250 |
+
faces = filter_by_gender(faces, facefusion.globals.face_analyser_gender)
|
251 |
+
return faces
|
252 |
+
except (AttributeError, ValueError):
|
253 |
+
return []
|
254 |
+
|
255 |
+
|
256 |
+
def find_similar_faces(frame : Frame, reference_face : Face, face_distance : float) -> List[Face]:
|
257 |
+
many_faces = get_many_faces(frame)
|
258 |
+
similar_faces = []
|
259 |
+
if many_faces:
|
260 |
+
for face in many_faces:
|
261 |
+
if hasattr(face, 'normed_embedding') and hasattr(reference_face, 'normed_embedding'):
|
262 |
+
current_face_distance = 1 - numpy.dot(face.normed_embedding, reference_face.normed_embedding)
|
263 |
+
if current_face_distance < face_distance:
|
264 |
+
similar_faces.append(face)
|
265 |
+
return similar_faces
|
266 |
+
|
267 |
+
|
268 |
+
def sort_by_order(faces : List[Face], order : FaceAnalyserOrder) -> List[Face]:
|
269 |
+
if order == 'left-right':
|
270 |
+
return sorted(faces, key = lambda face: face.bbox[0])
|
271 |
+
if order == 'right-left':
|
272 |
+
return sorted(faces, key = lambda face: face.bbox[0], reverse = True)
|
273 |
+
if order == 'top-bottom':
|
274 |
+
return sorted(faces, key = lambda face: face.bbox[1])
|
275 |
+
if order == 'bottom-top':
|
276 |
+
return sorted(faces, key = lambda face: face.bbox[1], reverse = True)
|
277 |
+
if order == 'small-large':
|
278 |
+
return sorted(faces, key = lambda face: (face.bbox[2] - face.bbox[0]) * (face.bbox[3] - face.bbox[1]))
|
279 |
+
if order == 'large-small':
|
280 |
+
return sorted(faces, key = lambda face: (face.bbox[2] - face.bbox[0]) * (face.bbox[3] - face.bbox[1]), reverse = True)
|
281 |
+
if order == 'best-worst':
|
282 |
+
return sorted(faces, key = lambda face: face.score, reverse = True)
|
283 |
+
if order == 'worst-best':
|
284 |
+
return sorted(faces, key = lambda face: face.score)
|
285 |
+
return faces
|
286 |
+
|
287 |
+
|
288 |
+
def filter_by_age(faces : List[Face], age : FaceAnalyserAge) -> List[Face]:
|
289 |
+
filter_faces = []
|
290 |
+
for face in faces:
|
291 |
+
if face.age < 13 and age == 'child':
|
292 |
+
filter_faces.append(face)
|
293 |
+
elif face.age < 19 and age == 'teen':
|
294 |
+
filter_faces.append(face)
|
295 |
+
elif face.age < 60 and age == 'adult':
|
296 |
+
filter_faces.append(face)
|
297 |
+
elif face.age > 59 and age == 'senior':
|
298 |
+
filter_faces.append(face)
|
299 |
+
return filter_faces
|
300 |
+
|
301 |
+
|
302 |
+
def filter_by_gender(faces : List[Face], gender : FaceAnalyserGender) -> List[Face]:
|
303 |
+
filter_faces = []
|
304 |
+
for face in faces:
|
305 |
+
if face.gender == 0 and gender == 'female':
|
306 |
+
filter_faces.append(face)
|
307 |
+
if face.gender == 1 and gender == 'male':
|
308 |
+
filter_faces.append(face)
|
309 |
+
return filter_faces
|
facefusion/face_cache.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional, List, Dict
|
2 |
+
import hashlib
|
3 |
+
|
4 |
+
from facefusion.typing import Frame, Face
|
5 |
+
|
6 |
+
FACES_CACHE : Dict[str, List[Face]] = {}
|
7 |
+
|
8 |
+
|
9 |
+
def get_faces_cache(frame : Frame) -> Optional[List[Face]]:
|
10 |
+
frame_hash = create_frame_hash(frame)
|
11 |
+
if frame_hash in FACES_CACHE:
|
12 |
+
return FACES_CACHE[frame_hash]
|
13 |
+
return None
|
14 |
+
|
15 |
+
|
16 |
+
def set_faces_cache(frame : Frame, faces : List[Face]) -> None:
|
17 |
+
frame_hash = create_frame_hash(frame)
|
18 |
+
if frame_hash:
|
19 |
+
FACES_CACHE[frame_hash] = faces
|
20 |
+
|
21 |
+
|
22 |
+
def clear_faces_cache() -> None:
|
23 |
+
global FACES_CACHE
|
24 |
+
|
25 |
+
FACES_CACHE = {}
|
26 |
+
|
27 |
+
|
28 |
+
def create_frame_hash(frame : Frame) -> Optional[str]:
|
29 |
+
return hashlib.sha1(frame.tobytes()).hexdigest() if frame.any() else None
|
facefusion/face_helper.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Dict, Tuple, List
|
2 |
+
from functools import lru_cache
|
3 |
+
from cv2.typing import Size
|
4 |
+
import cv2
|
5 |
+
import numpy
|
6 |
+
|
7 |
+
from facefusion.typing import Bbox, Kps, Frame, Matrix, Template, Padding
|
8 |
+
|
9 |
+
TEMPLATES : Dict[Template, numpy.ndarray[Any, Any]] =\
|
10 |
+
{
|
11 |
+
'arcface_v1': numpy.array(
|
12 |
+
[
|
13 |
+
[ 39.7300, 51.1380 ],
|
14 |
+
[ 72.2700, 51.1380 ],
|
15 |
+
[ 56.0000, 68.4930 ],
|
16 |
+
[ 42.4630, 87.0100 ],
|
17 |
+
[ 69.5370, 87.0100 ]
|
18 |
+
]),
|
19 |
+
'arcface_v2': numpy.array(
|
20 |
+
[
|
21 |
+
[ 38.2946, 51.6963 ],
|
22 |
+
[ 73.5318, 51.5014 ],
|
23 |
+
[ 56.0252, 71.7366 ],
|
24 |
+
[ 41.5493, 92.3655 ],
|
25 |
+
[ 70.7299, 92.2041 ]
|
26 |
+
]),
|
27 |
+
'ffhq': numpy.array(
|
28 |
+
[
|
29 |
+
[ 192.98138, 239.94708 ],
|
30 |
+
[ 318.90277, 240.1936 ],
|
31 |
+
[ 256.63416, 314.01935 ],
|
32 |
+
[ 201.26117, 371.41043 ],
|
33 |
+
[ 313.08905, 371.15118 ]
|
34 |
+
])
|
35 |
+
}
|
36 |
+
|
37 |
+
|
38 |
+
def warp_face(temp_frame : Frame, kps : Kps, template : Template, size : Size) -> Tuple[Frame, Matrix]:
|
39 |
+
normed_template = TEMPLATES.get(template) * size[1] / size[0]
|
40 |
+
affine_matrix = cv2.estimateAffinePartial2D(kps, normed_template, method = cv2.LMEDS)[0]
|
41 |
+
crop_frame = cv2.warpAffine(temp_frame, affine_matrix, (size[1], size[1]), borderMode = cv2.BORDER_REPLICATE)
|
42 |
+
return crop_frame, affine_matrix
|
43 |
+
|
44 |
+
|
45 |
+
def paste_back(temp_frame : Frame, crop_frame: Frame, affine_matrix : Matrix, face_mask_blur : float, face_mask_padding : Padding) -> Frame:
|
46 |
+
inverse_matrix = cv2.invertAffineTransform(affine_matrix)
|
47 |
+
temp_frame_size = temp_frame.shape[:2][::-1]
|
48 |
+
mask_size = tuple(crop_frame.shape[:2])
|
49 |
+
mask_frame = create_static_mask_frame(mask_size, face_mask_blur, face_mask_padding)
|
50 |
+
inverse_mask_frame = cv2.warpAffine(mask_frame, inverse_matrix, temp_frame_size).clip(0, 1)
|
51 |
+
inverse_crop_frame = cv2.warpAffine(crop_frame, inverse_matrix, temp_frame_size, borderMode = cv2.BORDER_REPLICATE)
|
52 |
+
paste_frame = temp_frame.copy()
|
53 |
+
paste_frame[:, :, 0] = inverse_mask_frame * inverse_crop_frame[:, :, 0] + (1 - inverse_mask_frame) * temp_frame[:, :, 0]
|
54 |
+
paste_frame[:, :, 1] = inverse_mask_frame * inverse_crop_frame[:, :, 1] + (1 - inverse_mask_frame) * temp_frame[:, :, 1]
|
55 |
+
paste_frame[:, :, 2] = inverse_mask_frame * inverse_crop_frame[:, :, 2] + (1 - inverse_mask_frame) * temp_frame[:, :, 2]
|
56 |
+
return paste_frame
|
57 |
+
|
58 |
+
|
59 |
+
@lru_cache(maxsize = None)
|
60 |
+
def create_static_mask_frame(mask_size : Size, face_mask_blur : float, face_mask_padding : Padding) -> Frame:
|
61 |
+
mask_frame = numpy.ones(mask_size, numpy.float32)
|
62 |
+
blur_amount = int(mask_size[0] * 0.5 * face_mask_blur)
|
63 |
+
blur_area = max(blur_amount // 2, 1)
|
64 |
+
mask_frame[:max(blur_area, int(mask_size[1] * face_mask_padding[0] / 100)), :] = 0
|
65 |
+
mask_frame[-max(blur_area, int(mask_size[1] * face_mask_padding[2] / 100)):, :] = 0
|
66 |
+
mask_frame[:, :max(blur_area, int(mask_size[0] * face_mask_padding[3] / 100))] = 0
|
67 |
+
mask_frame[:, -max(blur_area, int(mask_size[0] * face_mask_padding[1] / 100)):] = 0
|
68 |
+
if blur_amount > 0:
|
69 |
+
mask_frame = cv2.GaussianBlur(mask_frame, (0, 0), blur_amount * 0.25)
|
70 |
+
return mask_frame
|
71 |
+
|
72 |
+
|
73 |
+
@lru_cache(maxsize = None)
|
74 |
+
def create_static_anchors(feature_stride : int, anchor_total : int, stride_height : int, stride_width : int) -> numpy.ndarray[Any, Any]:
|
75 |
+
y, x = numpy.mgrid[:stride_height, :stride_width][::-1]
|
76 |
+
anchors = numpy.stack((y, x), axis = -1)
|
77 |
+
anchors = (anchors * feature_stride).reshape((-1, 2))
|
78 |
+
anchors = numpy.stack([ anchors ] * anchor_total, axis = 1).reshape((-1, 2))
|
79 |
+
return anchors
|
80 |
+
|
81 |
+
|
82 |
+
def distance_to_bbox(points : numpy.ndarray[Any, Any], distance : numpy.ndarray[Any, Any]) -> Bbox:
|
83 |
+
x1 = points[:, 0] - distance[:, 0]
|
84 |
+
y1 = points[:, 1] - distance[:, 1]
|
85 |
+
x2 = points[:, 0] + distance[:, 2]
|
86 |
+
y2 = points[:, 1] + distance[:, 3]
|
87 |
+
bbox = numpy.column_stack([ x1, y1, x2, y2 ])
|
88 |
+
return bbox
|
89 |
+
|
90 |
+
|
91 |
+
def distance_to_kps(points : numpy.ndarray[Any, Any], distance : numpy.ndarray[Any, Any]) -> Kps:
|
92 |
+
x = points[:, 0::2] + distance[:, 0::2]
|
93 |
+
y = points[:, 1::2] + distance[:, 1::2]
|
94 |
+
kps = numpy.stack((x, y), axis = -1)
|
95 |
+
return kps
|
96 |
+
|
97 |
+
|
98 |
+
def apply_nms(bbox_list : List[Bbox], iou_threshold : float) -> List[int]:
|
99 |
+
keep_indices = []
|
100 |
+
dimension_list = numpy.reshape(bbox_list, (-1, 4))
|
101 |
+
x1 = dimension_list[:, 0]
|
102 |
+
y1 = dimension_list[:, 1]
|
103 |
+
x2 = dimension_list[:, 2]
|
104 |
+
y2 = dimension_list[:, 3]
|
105 |
+
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
|
106 |
+
indices = numpy.arange(len(bbox_list))
|
107 |
+
while indices.size > 0:
|
108 |
+
index = indices[0]
|
109 |
+
remain_indices = indices[1:]
|
110 |
+
keep_indices.append(index)
|
111 |
+
xx1 = numpy.maximum(x1[index], x1[remain_indices])
|
112 |
+
yy1 = numpy.maximum(y1[index], y1[remain_indices])
|
113 |
+
xx2 = numpy.minimum(x2[index], x2[remain_indices])
|
114 |
+
yy2 = numpy.minimum(y2[index], y2[remain_indices])
|
115 |
+
width = numpy.maximum(0, xx2 - xx1 + 1)
|
116 |
+
height = numpy.maximum(0, yy2 - yy1 + 1)
|
117 |
+
iou = width * height / (areas[index] + areas[remain_indices] - width * height)
|
118 |
+
indices = indices[numpy.where(iou <= iou_threshold)[0] + 1]
|
119 |
+
return keep_indices
|
facefusion/face_reference.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional
|
2 |
+
|
3 |
+
from facefusion.typing import Face
|
4 |
+
|
5 |
+
FACE_REFERENCE = None
|
6 |
+
|
7 |
+
|
8 |
+
def get_face_reference() -> Optional[Face]:
|
9 |
+
return FACE_REFERENCE
|
10 |
+
|
11 |
+
|
12 |
+
def set_face_reference(face : Face) -> None:
|
13 |
+
global FACE_REFERENCE
|
14 |
+
|
15 |
+
FACE_REFERENCE = face
|
16 |
+
|
17 |
+
|
18 |
+
def clear_face_reference() -> None:
|
19 |
+
global FACE_REFERENCE
|
20 |
+
|
21 |
+
FACE_REFERENCE = None
|
facefusion/globals.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Optional
|
2 |
+
|
3 |
+
from facefusion.typing import FaceSelectorMode, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, OutputVideoEncoder, FaceDetectorModel, FaceRecognizerModel, TempFrameFormat, Padding
|
4 |
+
|
5 |
+
# general
|
6 |
+
source_path : Optional[str] = None
|
7 |
+
target_path : Optional[str] = None
|
8 |
+
output_path : Optional[str] = None
|
9 |
+
# misc
|
10 |
+
skip_download : Optional[bool] = None
|
11 |
+
headless : Optional[bool] = None
|
12 |
+
# execution
|
13 |
+
execution_providers : List[str] = []
|
14 |
+
execution_thread_count : Optional[int] = None
|
15 |
+
execution_queue_count : Optional[int] = None
|
16 |
+
max_memory : Optional[int] = None
|
17 |
+
# face analyser
|
18 |
+
face_analyser_order : Optional[FaceAnalyserOrder] = None
|
19 |
+
face_analyser_age : Optional[FaceAnalyserAge] = None
|
20 |
+
face_analyser_gender : Optional[FaceAnalyserGender] = None
|
21 |
+
face_detector_model : Optional[FaceDetectorModel] = None
|
22 |
+
face_detector_size : Optional[str] = None
|
23 |
+
face_detector_score : Optional[float] = None
|
24 |
+
face_recognizer_model : Optional[FaceRecognizerModel] = None
|
25 |
+
# face selector
|
26 |
+
face_selector_mode : Optional[FaceSelectorMode] = None
|
27 |
+
reference_face_position : Optional[int] = None
|
28 |
+
reference_face_distance : Optional[float] = None
|
29 |
+
reference_frame_number : Optional[int] = None
|
30 |
+
# face mask
|
31 |
+
face_mask_blur : Optional[float] = None
|
32 |
+
face_mask_padding : Optional[Padding] = None
|
33 |
+
# frame extraction
|
34 |
+
trim_frame_start : Optional[int] = None
|
35 |
+
trim_frame_end : Optional[int] = None
|
36 |
+
temp_frame_format : Optional[TempFrameFormat] = None
|
37 |
+
temp_frame_quality : Optional[int] = None
|
38 |
+
keep_temp : Optional[bool] = None
|
39 |
+
# output creation
|
40 |
+
output_image_quality : Optional[int] = None
|
41 |
+
output_video_encoder : Optional[OutputVideoEncoder] = None
|
42 |
+
output_video_quality : Optional[int] = None
|
43 |
+
keep_fps : Optional[bool] = None
|
44 |
+
skip_audio : Optional[bool] = None
|
45 |
+
# frame processors
|
46 |
+
frame_processors : List[str] = []
|
47 |
+
# uis
|
48 |
+
ui_layouts : List[str] = []
|
facefusion/installer.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, Tuple
|
2 |
+
import subprocess
|
3 |
+
from argparse import ArgumentParser, HelpFormatter
|
4 |
+
|
5 |
+
subprocess.call([ 'pip', 'install' , 'inquirer', '-q' ])
|
6 |
+
|
7 |
+
import inquirer
|
8 |
+
|
9 |
+
from facefusion import metadata, wording
|
10 |
+
|
11 |
+
TORCH : Dict[str, str] =\
|
12 |
+
{
|
13 |
+
'default': 'default',
|
14 |
+
'cpu': 'cpu',
|
15 |
+
'cuda': 'cu118',
|
16 |
+
'rocm': 'rocm5.6'
|
17 |
+
}
|
18 |
+
ONNXRUNTIMES : Dict[str, Tuple[str, str]] =\
|
19 |
+
{
|
20 |
+
'default': ('onnxruntime', '1.16.3'),
|
21 |
+
'cuda': ('onnxruntime-gpu', '1.16.3'),
|
22 |
+
'coreml-legacy': ('onnxruntime-coreml', '1.13.1'),
|
23 |
+
'coreml-silicon': ('onnxruntime-silicon', '1.16.0'),
|
24 |
+
'directml': ('onnxruntime-directml', '1.16.3'),
|
25 |
+
'openvino': ('onnxruntime-openvino', '1.16.0')
|
26 |
+
}
|
27 |
+
|
28 |
+
|
29 |
+
def cli() -> None:
|
30 |
+
program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120))
|
31 |
+
program.add_argument('--torch', help = wording.get('install_dependency_help').format(dependency = 'torch'), dest = 'torch', choices = TORCH.keys())
|
32 |
+
program.add_argument('--onnxruntime', help = wording.get('install_dependency_help').format(dependency = 'onnxruntime'), dest = 'onnxruntime', choices = ONNXRUNTIMES.keys())
|
33 |
+
program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
|
34 |
+
run(program)
|
35 |
+
|
36 |
+
|
37 |
+
def run(program : ArgumentParser) -> None:
|
38 |
+
args = program.parse_args()
|
39 |
+
|
40 |
+
if args.torch and args.onnxruntime:
|
41 |
+
answers =\
|
42 |
+
{
|
43 |
+
'torch': args.torch,
|
44 |
+
'onnxruntime': args.onnxruntime
|
45 |
+
}
|
46 |
+
else:
|
47 |
+
answers = inquirer.prompt(
|
48 |
+
[
|
49 |
+
inquirer.List('torch', message = wording.get('install_dependency_help').format(dependency = 'torch'), choices = list(TORCH.keys())),
|
50 |
+
inquirer.List('onnxruntime', message = wording.get('install_dependency_help').format(dependency = 'onnxruntime'), choices = list(ONNXRUNTIMES.keys()))
|
51 |
+
])
|
52 |
+
if answers:
|
53 |
+
torch = answers['torch']
|
54 |
+
torch_wheel = TORCH[torch]
|
55 |
+
onnxruntime = answers['onnxruntime']
|
56 |
+
onnxruntime_name, onnxruntime_version = ONNXRUNTIMES[onnxruntime]
|
57 |
+
subprocess.call([ 'pip', 'uninstall', 'torch', '-y' ])
|
58 |
+
if torch_wheel == 'default':
|
59 |
+
subprocess.call([ 'pip', 'install', '-r', 'requirements.txt' ])
|
60 |
+
else:
|
61 |
+
subprocess.call([ 'pip', 'install', '-r', 'requirements.txt', '--extra-index-url', 'https://download.pytorch.org/whl/' + torch_wheel ])
|
62 |
+
subprocess.call([ 'pip', 'uninstall', 'onnxruntime', onnxruntime_name, '-y' ])
|
63 |
+
subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version ])
|
facefusion/metadata.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
METADATA =\
|
2 |
+
{
|
3 |
+
'name': 'FaceFusion',
|
4 |
+
'description': 'Next generation face swapper and enhancer',
|
5 |
+
'version': '2.0.0',
|
6 |
+
'license': 'MIT',
|
7 |
+
'author': 'Henry Ruhs',
|
8 |
+
'url': 'https://facefusion.io'
|
9 |
+
}
|
10 |
+
|
11 |
+
|
12 |
+
def get(key : str) -> str:
|
13 |
+
return METADATA[key]
|
facefusion/processors/__init__.py
ADDED
File without changes
|
facefusion/processors/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (184 Bytes). View file
|
|
facefusion/processors/frame/__init__.py
ADDED
File without changes
|
facefusion/processors/frame/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (190 Bytes). View file
|
|
facefusion/processors/frame/__pycache__/choices.cpython-311.pyc
ADDED
Binary file (1.51 kB). View file
|
|
facefusion/processors/frame/__pycache__/core.cpython-311.pyc
ADDED
Binary file (6.02 kB). View file
|
|
facefusion/processors/frame/__pycache__/globals.cpython-311.pyc
ADDED
Binary file (948 Bytes). View file
|
|
facefusion/processors/frame/__pycache__/typings.cpython-311.pyc
ADDED
Binary file (709 Bytes). View file
|
|
facefusion/processors/frame/choices.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
import numpy
|
3 |
+
|
4 |
+
from facefusion.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem
|
5 |
+
|
6 |
+
face_swapper_models : List[FaceSwapperModel] = [ 'blendface_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial' ]
|
7 |
+
face_enhancer_models : List[FaceEnhancerModel] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'restoreformer' ]
|
8 |
+
frame_enhancer_models : List[FrameEnhancerModel] = [ 'real_esrgan_x2plus', 'real_esrgan_x4plus', 'real_esrnet_x4plus' ]
|
9 |
+
|
10 |
+
face_enhancer_blend_range : List[int] = numpy.arange(0, 101, 1).tolist()
|
11 |
+
frame_enhancer_blend_range : List[int] = numpy.arange(0, 101, 1).tolist()
|
12 |
+
|
13 |
+
face_debugger_items : List[FaceDebuggerItem] = [ 'bbox', 'kps', 'face-mask', 'score' ]
|
facefusion/processors/frame/core.py
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import importlib
|
3 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
4 |
+
from queue import Queue
|
5 |
+
from types import ModuleType
|
6 |
+
from typing import Any, List
|
7 |
+
from tqdm import tqdm
|
8 |
+
|
9 |
+
import facefusion.globals
|
10 |
+
from facefusion.typing import Process_Frames
|
11 |
+
from facefusion import wording
|
12 |
+
from facefusion.utilities import encode_execution_providers
|
13 |
+
|
14 |
+
FRAME_PROCESSORS_MODULES : List[ModuleType] = []
|
15 |
+
FRAME_PROCESSORS_METHODS =\
|
16 |
+
[
|
17 |
+
'get_frame_processor',
|
18 |
+
'clear_frame_processor',
|
19 |
+
'get_options',
|
20 |
+
'set_options',
|
21 |
+
'register_args',
|
22 |
+
'apply_args',
|
23 |
+
'pre_check',
|
24 |
+
'pre_process',
|
25 |
+
'process_frame',
|
26 |
+
'process_frames',
|
27 |
+
'process_image',
|
28 |
+
'process_video',
|
29 |
+
'post_process'
|
30 |
+
]
|
31 |
+
|
32 |
+
|
33 |
+
def load_frame_processor_module(frame_processor : str) -> Any:
|
34 |
+
try:
|
35 |
+
frame_processor_module = importlib.import_module('facefusion.processors.frame.modules.' + frame_processor)
|
36 |
+
for method_name in FRAME_PROCESSORS_METHODS:
|
37 |
+
if not hasattr(frame_processor_module, method_name):
|
38 |
+
raise NotImplementedError
|
39 |
+
except ModuleNotFoundError:
|
40 |
+
sys.exit(wording.get('frame_processor_not_loaded').format(frame_processor = frame_processor))
|
41 |
+
except NotImplementedError:
|
42 |
+
sys.exit(wording.get('frame_processor_not_implemented').format(frame_processor = frame_processor))
|
43 |
+
return frame_processor_module
|
44 |
+
|
45 |
+
|
46 |
+
def get_frame_processors_modules(frame_processors : List[str]) -> List[ModuleType]:
|
47 |
+
global FRAME_PROCESSORS_MODULES
|
48 |
+
|
49 |
+
if not FRAME_PROCESSORS_MODULES:
|
50 |
+
for frame_processor in frame_processors:
|
51 |
+
frame_processor_module = load_frame_processor_module(frame_processor)
|
52 |
+
FRAME_PROCESSORS_MODULES.append(frame_processor_module)
|
53 |
+
return FRAME_PROCESSORS_MODULES
|
54 |
+
|
55 |
+
|
56 |
+
def clear_frame_processors_modules() -> None:
|
57 |
+
global FRAME_PROCESSORS_MODULES
|
58 |
+
|
59 |
+
for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
|
60 |
+
frame_processor_module.clear_frame_processor()
|
61 |
+
FRAME_PROCESSORS_MODULES = []
|
62 |
+
|
63 |
+
|
64 |
+
def multi_process_frames(source_path : str, temp_frame_paths : List[str], process_frames : Process_Frames) -> None:
|
65 |
+
with tqdm(total = len(temp_frame_paths), desc = wording.get('processing'), unit = 'frame', ascii = ' =') as progress:
|
66 |
+
progress.set_postfix(
|
67 |
+
{
|
68 |
+
'execution_providers': encode_execution_providers(facefusion.globals.execution_providers),
|
69 |
+
'execution_thread_count': facefusion.globals.execution_thread_count,
|
70 |
+
'execution_queue_count': facefusion.globals.execution_queue_count
|
71 |
+
})
|
72 |
+
with ThreadPoolExecutor(max_workers = facefusion.globals.execution_thread_count) as executor:
|
73 |
+
futures = []
|
74 |
+
queue_temp_frame_paths : Queue[str] = create_queue(temp_frame_paths)
|
75 |
+
queue_per_future = max(len(temp_frame_paths) // facefusion.globals.execution_thread_count * facefusion.globals.execution_queue_count, 1)
|
76 |
+
while not queue_temp_frame_paths.empty():
|
77 |
+
payload_temp_frame_paths = pick_queue(queue_temp_frame_paths, queue_per_future)
|
78 |
+
future = executor.submit(process_frames, source_path, payload_temp_frame_paths, progress.update)
|
79 |
+
futures.append(future)
|
80 |
+
for future_done in as_completed(futures):
|
81 |
+
future_done.result()
|
82 |
+
|
83 |
+
|
84 |
+
def create_queue(temp_frame_paths : List[str]) -> Queue[str]:
|
85 |
+
queue : Queue[str] = Queue()
|
86 |
+
for frame_path in temp_frame_paths:
|
87 |
+
queue.put(frame_path)
|
88 |
+
return queue
|
89 |
+
|
90 |
+
|
91 |
+
def pick_queue(queue : Queue[str], queue_per_future : int) -> List[str]:
|
92 |
+
queues = []
|
93 |
+
for _ in range(queue_per_future):
|
94 |
+
if not queue.empty():
|
95 |
+
queues.append(queue.get())
|
96 |
+
return queues
|
facefusion/processors/frame/globals.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Optional
|
2 |
+
|
3 |
+
from facefusion.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem
|
4 |
+
|
5 |
+
face_swapper_model : Optional[FaceSwapperModel] = None
|
6 |
+
face_enhancer_model : Optional[FaceEnhancerModel] = None
|
7 |
+
face_enhancer_blend : Optional[int] = None
|
8 |
+
frame_enhancer_model : Optional[FrameEnhancerModel] = None
|
9 |
+
frame_enhancer_blend : Optional[int] = None
|
10 |
+
face_debugger_items : Optional[List[FaceDebuggerItem]] = None
|
facefusion/processors/frame/modules/__init__.py
ADDED
File without changes
|
facefusion/processors/frame/modules/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (198 Bytes). View file
|
|
facefusion/processors/frame/modules/__pycache__/face_debugger.cpython-311.pyc
ADDED
Binary file (9.04 kB). View file
|
|
facefusion/processors/frame/modules/__pycache__/face_enhancer.cpython-311.pyc
ADDED
Binary file (13.9 kB). View file
|
|
facefusion/processors/frame/modules/__pycache__/face_swapper.cpython-311.pyc
ADDED
Binary file (17.8 kB). View file
|
|
facefusion/processors/frame/modules/__pycache__/frame_enhancer.cpython-311.pyc
ADDED
Binary file (10.6 kB). View file
|
|
facefusion/processors/frame/modules/face_debugger.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, List, Literal
|
2 |
+
from argparse import ArgumentParser
|
3 |
+
import cv2
|
4 |
+
import numpy
|
5 |
+
|
6 |
+
import facefusion.globals
|
7 |
+
import facefusion.processors.frame.core as frame_processors
|
8 |
+
from facefusion import wording
|
9 |
+
from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser
|
10 |
+
from facefusion.face_reference import get_face_reference
|
11 |
+
from facefusion.content_analyser import clear_content_analyser
|
12 |
+
from facefusion.typing import Face, Frame, Update_Process, ProcessMode
|
13 |
+
from facefusion.vision import read_image, read_static_image, write_image
|
14 |
+
from facefusion.face_helper import warp_face, create_static_mask_frame
|
15 |
+
from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices
|
16 |
+
|
17 |
+
NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_DEBUGGER'
|
18 |
+
|
19 |
+
|
20 |
+
def get_frame_processor() -> None:
|
21 |
+
pass
|
22 |
+
|
23 |
+
|
24 |
+
def clear_frame_processor() -> None:
|
25 |
+
pass
|
26 |
+
|
27 |
+
|
28 |
+
def get_options(key : Literal['model']) -> None:
|
29 |
+
pass
|
30 |
+
|
31 |
+
|
32 |
+
def set_options(key : Literal['model'], value : Any) -> None:
|
33 |
+
pass
|
34 |
+
|
35 |
+
|
36 |
+
def register_args(program : ArgumentParser) -> None:
|
37 |
+
program.add_argument('--face-debugger-items', help = wording.get('face_debugger_items_help'), dest = 'face_debugger_items', default = [ 'kps', 'face-mask' ], choices = frame_processors_choices.face_debugger_items, nargs = '+')
|
38 |
+
|
39 |
+
|
40 |
+
def apply_args(program : ArgumentParser) -> None:
|
41 |
+
args = program.parse_args()
|
42 |
+
frame_processors_globals.face_debugger_items = args.face_debugger_items
|
43 |
+
|
44 |
+
|
45 |
+
def pre_check() -> bool:
|
46 |
+
return True
|
47 |
+
|
48 |
+
|
49 |
+
def pre_process(mode : ProcessMode) -> bool:
|
50 |
+
return True
|
51 |
+
|
52 |
+
|
53 |
+
def post_process() -> None:
|
54 |
+
clear_frame_processor()
|
55 |
+
clear_face_analyser()
|
56 |
+
clear_content_analyser()
|
57 |
+
|
58 |
+
|
59 |
+
def debug_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
|
60 |
+
primary_color = (0, 0, 255)
|
61 |
+
secondary_color = (0, 255, 0)
|
62 |
+
bounding_box = target_face.bbox.astype(numpy.int32)
|
63 |
+
if 'bbox' in frame_processors_globals.face_debugger_items:
|
64 |
+
cv2.rectangle(temp_frame, (bounding_box[0], bounding_box[1]), (bounding_box[2], bounding_box[3]), secondary_color, 2)
|
65 |
+
if 'face-mask' in frame_processors_globals.face_debugger_items:
|
66 |
+
crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, 'arcface_v2', (128, 128))
|
67 |
+
inverse_matrix = cv2.invertAffineTransform(affine_matrix)
|
68 |
+
temp_frame_size = temp_frame.shape[:2][::-1]
|
69 |
+
mask_frame = create_static_mask_frame(crop_frame.shape[:2], 0, facefusion.globals.face_mask_padding)
|
70 |
+
mask_frame[mask_frame > 0] = 255
|
71 |
+
inverse_mask_frame = cv2.warpAffine(mask_frame.astype(numpy.uint8), inverse_matrix, temp_frame_size)
|
72 |
+
inverse_mask_contours = cv2.findContours(inverse_mask_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
|
73 |
+
cv2.drawContours(temp_frame, inverse_mask_contours, 0, primary_color, 2)
|
74 |
+
if bounding_box[3] - bounding_box[1] > 60 and bounding_box[2] - bounding_box[0] > 60:
|
75 |
+
if 'kps' in frame_processors_globals.face_debugger_items:
|
76 |
+
kps = target_face.kps.astype(numpy.int32)
|
77 |
+
for index in range(kps.shape[0]):
|
78 |
+
cv2.circle(temp_frame, (kps[index][0], kps[index][1]), 3, primary_color, -1)
|
79 |
+
if 'score' in frame_processors_globals.face_debugger_items:
|
80 |
+
score_text = str(round(target_face.score, 2))
|
81 |
+
score_position = (bounding_box[0] + 10, bounding_box[1] + 20)
|
82 |
+
cv2.putText(temp_frame, score_text, score_position, cv2.FONT_HERSHEY_SIMPLEX, 0.5, secondary_color, 2)
|
83 |
+
return temp_frame
|
84 |
+
|
85 |
+
|
86 |
+
def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
|
87 |
+
if 'reference' in facefusion.globals.face_selector_mode:
|
88 |
+
similar_faces = find_similar_faces(temp_frame, reference_face, facefusion.globals.reference_face_distance)
|
89 |
+
if similar_faces:
|
90 |
+
for similar_face in similar_faces:
|
91 |
+
temp_frame = debug_face(source_face, similar_face, temp_frame)
|
92 |
+
if 'one' in facefusion.globals.face_selector_mode:
|
93 |
+
target_face = get_one_face(temp_frame)
|
94 |
+
if target_face:
|
95 |
+
temp_frame = debug_face(source_face, target_face, temp_frame)
|
96 |
+
if 'many' in facefusion.globals.face_selector_mode:
|
97 |
+
many_faces = get_many_faces(temp_frame)
|
98 |
+
if many_faces:
|
99 |
+
for target_face in many_faces:
|
100 |
+
temp_frame = debug_face(source_face, target_face, temp_frame)
|
101 |
+
return temp_frame
|
102 |
+
|
103 |
+
|
104 |
+
def process_frames(source_path : str, temp_frame_paths : List[str], update_progress : Update_Process) -> None:
|
105 |
+
source_face = get_one_face(read_static_image(source_path))
|
106 |
+
reference_face = get_face_reference() if 'reference' in facefusion.globals.face_selector_mode else None
|
107 |
+
for temp_frame_path in temp_frame_paths:
|
108 |
+
temp_frame = read_image(temp_frame_path)
|
109 |
+
result_frame = process_frame(source_face, reference_face, temp_frame)
|
110 |
+
write_image(temp_frame_path, result_frame)
|
111 |
+
update_progress()
|
112 |
+
|
113 |
+
|
114 |
+
def process_image(source_path : str, target_path : str, output_path : str) -> None:
|
115 |
+
source_face = get_one_face(read_static_image(source_path))
|
116 |
+
target_frame = read_static_image(target_path)
|
117 |
+
reference_face = get_one_face(target_frame, facefusion.globals.reference_face_position) if 'reference' in facefusion.globals.face_selector_mode else None
|
118 |
+
result_frame = process_frame(source_face, reference_face, target_frame)
|
119 |
+
write_image(output_path, result_frame)
|
120 |
+
|
121 |
+
|
122 |
+
def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
|
123 |
+
frame_processors.multi_process_frames(source_path, temp_frame_paths, process_frames)
|
facefusion/processors/frame/modules/face_enhancer.py
ADDED
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, List, Dict, Literal, Optional
|
2 |
+
from argparse import ArgumentParser
|
3 |
+
import cv2
|
4 |
+
import threading
|
5 |
+
import numpy
|
6 |
+
import onnxruntime
|
7 |
+
|
8 |
+
import facefusion.globals
|
9 |
+
import facefusion.processors.frame.core as frame_processors
|
10 |
+
from facefusion import wording
|
11 |
+
from facefusion.face_analyser import get_many_faces, clear_face_analyser
|
12 |
+
from facefusion.face_helper import warp_face, paste_back
|
13 |
+
from facefusion.content_analyser import clear_content_analyser
|
14 |
+
from facefusion.typing import Face, Frame, Update_Process, ProcessMode, ModelValue, OptionsWithModel
|
15 |
+
from facefusion.utilities import conditional_download, resolve_relative_path, is_image, is_video, is_file, is_download_done, create_metavar, update_status
|
16 |
+
from facefusion.vision import read_image, read_static_image, write_image
|
17 |
+
from facefusion.processors.frame import globals as frame_processors_globals
|
18 |
+
from facefusion.processors.frame import choices as frame_processors_choices
|
19 |
+
|
20 |
+
FRAME_PROCESSOR = None
|
21 |
+
THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
|
22 |
+
THREAD_LOCK : threading.Lock = threading.Lock()
|
23 |
+
NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_ENHANCER'
|
24 |
+
MODELS : Dict[str, ModelValue] =\
|
25 |
+
{
|
26 |
+
'codeformer':
|
27 |
+
{
|
28 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/codeformer.onnx',
|
29 |
+
'path': resolve_relative_path('../.assets/models/codeformer.onnx'),
|
30 |
+
'template': 'ffhq',
|
31 |
+
'size': (512, 512)
|
32 |
+
},
|
33 |
+
'gfpgan_1.2':
|
34 |
+
{
|
35 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.2.onnx',
|
36 |
+
'path': resolve_relative_path('../.assets/models/gfpgan_1.2.onnx'),
|
37 |
+
'template': 'ffhq',
|
38 |
+
'size': (512, 512)
|
39 |
+
},
|
40 |
+
'gfpgan_1.3':
|
41 |
+
{
|
42 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.3.onnx',
|
43 |
+
'path': resolve_relative_path('../.assets/models/gfpgan_1.3.onnx'),
|
44 |
+
'template': 'ffhq',
|
45 |
+
'size': (512, 512)
|
46 |
+
},
|
47 |
+
'gfpgan_1.4':
|
48 |
+
{
|
49 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.4.onnx',
|
50 |
+
'path': resolve_relative_path('../.assets/models/gfpgan_1.4.onnx'),
|
51 |
+
'template': 'ffhq',
|
52 |
+
'size': (512, 512)
|
53 |
+
},
|
54 |
+
'gpen_bfr_256':
|
55 |
+
{
|
56 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_256.onnx',
|
57 |
+
'path': resolve_relative_path('../.assets/models/gpen_bfr_256.onnx'),
|
58 |
+
'template': 'arcface_v2',
|
59 |
+
'size': (128, 256)
|
60 |
+
},
|
61 |
+
'gpen_bfr_512':
|
62 |
+
{
|
63 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_512.onnx',
|
64 |
+
'path': resolve_relative_path('../.assets/models/gpen_bfr_512.onnx'),
|
65 |
+
'template': 'ffhq',
|
66 |
+
'size': (512, 512)
|
67 |
+
},
|
68 |
+
'restoreformer':
|
69 |
+
{
|
70 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/restoreformer.onnx',
|
71 |
+
'path': resolve_relative_path('../.assets/models/restoreformer.onnx'),
|
72 |
+
'template': 'ffhq',
|
73 |
+
'size': (512, 512)
|
74 |
+
}
|
75 |
+
}
|
76 |
+
OPTIONS : Optional[OptionsWithModel] = None
|
77 |
+
|
78 |
+
|
79 |
+
def get_frame_processor() -> Any:
|
80 |
+
global FRAME_PROCESSOR
|
81 |
+
|
82 |
+
with THREAD_LOCK:
|
83 |
+
if FRAME_PROCESSOR is None:
|
84 |
+
model_path = get_options('model').get('path')
|
85 |
+
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = facefusion.globals.execution_providers)
|
86 |
+
return FRAME_PROCESSOR
|
87 |
+
|
88 |
+
|
89 |
+
def clear_frame_processor() -> None:
|
90 |
+
global FRAME_PROCESSOR
|
91 |
+
|
92 |
+
FRAME_PROCESSOR = None
|
93 |
+
|
94 |
+
|
95 |
+
def get_options(key : Literal['model']) -> Any:
|
96 |
+
global OPTIONS
|
97 |
+
|
98 |
+
if OPTIONS is None:
|
99 |
+
OPTIONS =\
|
100 |
+
{
|
101 |
+
'model': MODELS[frame_processors_globals.face_enhancer_model]
|
102 |
+
}
|
103 |
+
return OPTIONS.get(key)
|
104 |
+
|
105 |
+
|
106 |
+
def set_options(key : Literal['model'], value : Any) -> None:
|
107 |
+
global OPTIONS
|
108 |
+
|
109 |
+
OPTIONS[key] = value
|
110 |
+
|
111 |
+
|
112 |
+
def register_args(program : ArgumentParser) -> None:
|
113 |
+
program.add_argument('--face-enhancer-model', help = wording.get('frame_processor_model_help'), dest = 'face_enhancer_model', default = 'gfpgan_1.4', choices = frame_processors_choices.face_enhancer_models)
|
114 |
+
program.add_argument('--face-enhancer-blend', help = wording.get('frame_processor_blend_help'), dest = 'face_enhancer_blend', type = int, default = 80, choices = frame_processors_choices.face_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.face_enhancer_blend_range))
|
115 |
+
|
116 |
+
|
117 |
+
def apply_args(program : ArgumentParser) -> None:
|
118 |
+
args = program.parse_args()
|
119 |
+
frame_processors_globals.face_enhancer_model = args.face_enhancer_model
|
120 |
+
frame_processors_globals.face_enhancer_blend = args.face_enhancer_blend
|
121 |
+
|
122 |
+
|
123 |
+
def pre_check() -> bool:
|
124 |
+
if not facefusion.globals.skip_download:
|
125 |
+
download_directory_path = resolve_relative_path('../.assets/models')
|
126 |
+
model_url = get_options('model').get('url')
|
127 |
+
conditional_download(download_directory_path, [ model_url ])
|
128 |
+
return True
|
129 |
+
|
130 |
+
|
131 |
+
def pre_process(mode : ProcessMode) -> bool:
|
132 |
+
model_url = get_options('model').get('url')
|
133 |
+
model_path = get_options('model').get('path')
|
134 |
+
if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
|
135 |
+
update_status(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
|
136 |
+
return False
|
137 |
+
elif not is_file(model_path):
|
138 |
+
update_status(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
|
139 |
+
return False
|
140 |
+
if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
|
141 |
+
update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
|
142 |
+
return False
|
143 |
+
if mode == 'output' and not facefusion.globals.output_path:
|
144 |
+
update_status(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
|
145 |
+
return False
|
146 |
+
return True
|
147 |
+
|
148 |
+
|
149 |
+
def post_process() -> None:
|
150 |
+
clear_frame_processor()
|
151 |
+
clear_face_analyser()
|
152 |
+
clear_content_analyser()
|
153 |
+
read_static_image.cache_clear()
|
154 |
+
|
155 |
+
|
156 |
+
def enhance_face(target_face: Face, temp_frame: Frame) -> Frame:
|
157 |
+
frame_processor = get_frame_processor()
|
158 |
+
model_template = get_options('model').get('template')
|
159 |
+
model_size = get_options('model').get('size')
|
160 |
+
crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, model_template, model_size)
|
161 |
+
crop_frame = prepare_crop_frame(crop_frame)
|
162 |
+
frame_processor_inputs = {}
|
163 |
+
for frame_processor_input in frame_processor.get_inputs():
|
164 |
+
if frame_processor_input.name == 'input':
|
165 |
+
frame_processor_inputs[frame_processor_input.name] = crop_frame
|
166 |
+
if frame_processor_input.name == 'weight':
|
167 |
+
frame_processor_inputs[frame_processor_input.name] = numpy.array([ 1 ], dtype = numpy.double)
|
168 |
+
with THREAD_SEMAPHORE:
|
169 |
+
crop_frame = frame_processor.run(None, frame_processor_inputs)[0][0]
|
170 |
+
crop_frame = normalize_crop_frame(crop_frame)
|
171 |
+
paste_frame = paste_back(temp_frame, crop_frame, affine_matrix, facefusion.globals.face_mask_blur, (0, 0, 0, 0))
|
172 |
+
temp_frame = blend_frame(temp_frame, paste_frame)
|
173 |
+
return temp_frame
|
174 |
+
|
175 |
+
|
176 |
+
def prepare_crop_frame(crop_frame : Frame) -> Frame:
|
177 |
+
crop_frame = crop_frame[:, :, ::-1] / 255.0
|
178 |
+
crop_frame = (crop_frame - 0.5) / 0.5
|
179 |
+
crop_frame = numpy.expand_dims(crop_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
|
180 |
+
return crop_frame
|
181 |
+
|
182 |
+
|
183 |
+
def normalize_crop_frame(crop_frame : Frame) -> Frame:
|
184 |
+
crop_frame = numpy.clip(crop_frame, -1, 1)
|
185 |
+
crop_frame = (crop_frame + 1) / 2
|
186 |
+
crop_frame = crop_frame.transpose(1, 2, 0)
|
187 |
+
crop_frame = (crop_frame * 255.0).round()
|
188 |
+
crop_frame = crop_frame.astype(numpy.uint8)[:, :, ::-1]
|
189 |
+
return crop_frame
|
190 |
+
|
191 |
+
|
192 |
+
def blend_frame(temp_frame : Frame, paste_frame : Frame) -> Frame:
|
193 |
+
face_enhancer_blend = 1 - (frame_processors_globals.face_enhancer_blend / 100)
|
194 |
+
temp_frame = cv2.addWeighted(temp_frame, face_enhancer_blend, paste_frame, 1 - face_enhancer_blend, 0)
|
195 |
+
return temp_frame
|
196 |
+
|
197 |
+
|
198 |
+
def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
|
199 |
+
many_faces = get_many_faces(temp_frame)
|
200 |
+
if many_faces:
|
201 |
+
for target_face in many_faces:
|
202 |
+
temp_frame = enhance_face(target_face, temp_frame)
|
203 |
+
return temp_frame
|
204 |
+
|
205 |
+
|
206 |
+
def process_frames(source_path : str, temp_frame_paths : List[str], update_progress : Update_Process) -> None:
|
207 |
+
for temp_frame_path in temp_frame_paths:
|
208 |
+
temp_frame = read_image(temp_frame_path)
|
209 |
+
result_frame = process_frame(None, None, temp_frame)
|
210 |
+
write_image(temp_frame_path, result_frame)
|
211 |
+
update_progress()
|
212 |
+
|
213 |
+
|
214 |
+
def process_image(source_path : str, target_path : str, output_path : str) -> None:
|
215 |
+
target_frame = read_static_image(target_path)
|
216 |
+
result_frame = process_frame(None, None, target_frame)
|
217 |
+
write_image(output_path, result_frame)
|
218 |
+
|
219 |
+
|
220 |
+
def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
|
221 |
+
frame_processors.multi_process_frames(None, temp_frame_paths, process_frames)
|
facefusion/processors/frame/modules/face_swapper.py
ADDED
@@ -0,0 +1,283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, List, Dict, Literal, Optional
|
2 |
+
from argparse import ArgumentParser
|
3 |
+
import threading
|
4 |
+
import numpy
|
5 |
+
import onnx
|
6 |
+
import onnxruntime
|
7 |
+
from onnx import numpy_helper
|
8 |
+
|
9 |
+
import facefusion.globals
|
10 |
+
import facefusion.processors.frame.core as frame_processors
|
11 |
+
from facefusion import wording
|
12 |
+
from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser
|
13 |
+
from facefusion.face_helper import warp_face, paste_back
|
14 |
+
from facefusion.face_reference import get_face_reference
|
15 |
+
from facefusion.content_analyser import clear_content_analyser
|
16 |
+
from facefusion.typing import Face, Frame, Update_Process, ProcessMode, ModelValue, OptionsWithModel, Embedding
|
17 |
+
from facefusion.utilities import conditional_download, resolve_relative_path, is_image, is_video, is_file, is_download_done, update_status
|
18 |
+
from facefusion.vision import read_image, read_static_image, write_image
|
19 |
+
from facefusion.processors.frame import globals as frame_processors_globals
|
20 |
+
from facefusion.processors.frame import choices as frame_processors_choices
|
21 |
+
|
22 |
+
FRAME_PROCESSOR = None
|
23 |
+
MODEL_MATRIX = None
|
24 |
+
THREAD_LOCK : threading.Lock = threading.Lock()
|
25 |
+
NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_SWAPPER'
|
26 |
+
MODELS : Dict[str, ModelValue] =\
|
27 |
+
{
|
28 |
+
'blendface_256':
|
29 |
+
{
|
30 |
+
'type': 'blendface',
|
31 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/blendface_256.onnx',
|
32 |
+
'path': resolve_relative_path('../.assets/models/blendface_256.onnx'),
|
33 |
+
'template': 'ffhq',
|
34 |
+
'size': (512, 256),
|
35 |
+
'mean': [ 0.0, 0.0, 0.0 ],
|
36 |
+
'standard_deviation': [ 1.0, 1.0, 1.0 ]
|
37 |
+
},
|
38 |
+
'inswapper_128':
|
39 |
+
{
|
40 |
+
'type': 'inswapper',
|
41 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128.onnx',
|
42 |
+
'path': resolve_relative_path('../.assets/models/inswapper_128.onnx'),
|
43 |
+
'template': 'arcface_v2',
|
44 |
+
'size': (128, 128),
|
45 |
+
'mean': [ 0.0, 0.0, 0.0 ],
|
46 |
+
'standard_deviation': [ 1.0, 1.0, 1.0 ]
|
47 |
+
},
|
48 |
+
'inswapper_128_fp16':
|
49 |
+
{
|
50 |
+
'type': 'inswapper',
|
51 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128_fp16.onnx',
|
52 |
+
'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.onnx'),
|
53 |
+
'template': 'arcface_v2',
|
54 |
+
'size': (128, 128),
|
55 |
+
'mean': [ 0.0, 0.0, 0.0 ],
|
56 |
+
'standard_deviation': [ 1.0, 1.0, 1.0 ]
|
57 |
+
},
|
58 |
+
'simswap_256':
|
59 |
+
{
|
60 |
+
'type': 'simswap',
|
61 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/simswap_256.onnx',
|
62 |
+
'path': resolve_relative_path('../.assets/models/simswap_256.onnx'),
|
63 |
+
'template': 'arcface_v1',
|
64 |
+
'size': (112, 256),
|
65 |
+
'mean': [ 0.485, 0.456, 0.406 ],
|
66 |
+
'standard_deviation': [ 0.229, 0.224, 0.225 ]
|
67 |
+
},
|
68 |
+
'simswap_512_unofficial':
|
69 |
+
{
|
70 |
+
'type': 'simswap',
|
71 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/simswap_512_unofficial.onnx',
|
72 |
+
'path': resolve_relative_path('../.assets/models/simswap_512_unofficial.onnx'),
|
73 |
+
'template': 'arcface_v1',
|
74 |
+
'size': (112, 512),
|
75 |
+
'mean': [ 0.0, 0.0, 0.0 ],
|
76 |
+
'standard_deviation': [ 1.0, 1.0, 1.0 ]
|
77 |
+
}
|
78 |
+
}
|
79 |
+
OPTIONS : Optional[OptionsWithModel] = None
|
80 |
+
|
81 |
+
|
82 |
+
def get_frame_processor() -> Any:
|
83 |
+
global FRAME_PROCESSOR
|
84 |
+
|
85 |
+
with THREAD_LOCK:
|
86 |
+
if FRAME_PROCESSOR is None:
|
87 |
+
model_path = get_options('model').get('path')
|
88 |
+
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = facefusion.globals.execution_providers)
|
89 |
+
return FRAME_PROCESSOR
|
90 |
+
|
91 |
+
|
92 |
+
def clear_frame_processor() -> None:
|
93 |
+
global FRAME_PROCESSOR
|
94 |
+
|
95 |
+
FRAME_PROCESSOR = None
|
96 |
+
|
97 |
+
|
98 |
+
def get_model_matrix() -> Any:
|
99 |
+
global MODEL_MATRIX
|
100 |
+
|
101 |
+
with THREAD_LOCK:
|
102 |
+
if MODEL_MATRIX is None:
|
103 |
+
model_path = get_options('model').get('path')
|
104 |
+
model = onnx.load(model_path)
|
105 |
+
MODEL_MATRIX = numpy_helper.to_array(model.graph.initializer[-1])
|
106 |
+
return MODEL_MATRIX
|
107 |
+
|
108 |
+
|
109 |
+
def clear_model_matrix() -> None:
|
110 |
+
global MODEL_MATRIX
|
111 |
+
|
112 |
+
MODEL_MATRIX = None
|
113 |
+
|
114 |
+
|
115 |
+
def get_options(key : Literal['model']) -> Any:
|
116 |
+
global OPTIONS
|
117 |
+
|
118 |
+
if OPTIONS is None:
|
119 |
+
OPTIONS =\
|
120 |
+
{
|
121 |
+
'model': MODELS[frame_processors_globals.face_swapper_model]
|
122 |
+
}
|
123 |
+
return OPTIONS.get(key)
|
124 |
+
|
125 |
+
|
126 |
+
def set_options(key : Literal['model'], value : Any) -> None:
|
127 |
+
global OPTIONS
|
128 |
+
|
129 |
+
OPTIONS[key] = value
|
130 |
+
|
131 |
+
|
132 |
+
def register_args(program : ArgumentParser) -> None:
|
133 |
+
program.add_argument('--face-swapper-model', help = wording.get('frame_processor_model_help'), dest = 'face_swapper_model', default = 'inswapper_128', choices = frame_processors_choices.face_swapper_models)
|
134 |
+
|
135 |
+
|
136 |
+
def apply_args(program : ArgumentParser) -> None:
|
137 |
+
args = program.parse_args()
|
138 |
+
frame_processors_globals.face_swapper_model = args.face_swapper_model
|
139 |
+
if args.face_swapper_model == 'blendface_256':
|
140 |
+
facefusion.globals.face_recognizer_model = 'arcface_blendface'
|
141 |
+
if args.face_swapper_model == 'inswapper_128' or args.face_swapper_model == 'inswapper_128_fp16':
|
142 |
+
facefusion.globals.face_recognizer_model = 'arcface_inswapper'
|
143 |
+
if args.face_swapper_model == 'simswap_256' or args.face_swapper_model == 'simswap_512_unofficial':
|
144 |
+
facefusion.globals.face_recognizer_model = 'arcface_simswap'
|
145 |
+
|
146 |
+
|
147 |
+
def pre_check() -> bool:
|
148 |
+
if not facefusion.globals.skip_download:
|
149 |
+
download_directory_path = resolve_relative_path('../.assets/models')
|
150 |
+
model_url = get_options('model').get('url')
|
151 |
+
conditional_download(download_directory_path, [ model_url ])
|
152 |
+
return True
|
153 |
+
|
154 |
+
|
155 |
+
def pre_process(mode : ProcessMode) -> bool:
|
156 |
+
model_url = get_options('model').get('url')
|
157 |
+
model_path = get_options('model').get('path')
|
158 |
+
if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
|
159 |
+
update_status(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
|
160 |
+
return False
|
161 |
+
elif not is_file(model_path):
|
162 |
+
update_status(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
|
163 |
+
return False
|
164 |
+
if not is_image(facefusion.globals.source_path):
|
165 |
+
update_status(wording.get('select_image_source') + wording.get('exclamation_mark'), NAME)
|
166 |
+
return False
|
167 |
+
elif not get_one_face(read_static_image(facefusion.globals.source_path)):
|
168 |
+
update_status(wording.get('no_source_face_detected') + wording.get('exclamation_mark'), NAME)
|
169 |
+
return False
|
170 |
+
if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
|
171 |
+
update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
|
172 |
+
return False
|
173 |
+
if mode == 'output' and not facefusion.globals.output_path:
|
174 |
+
update_status(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
|
175 |
+
return False
|
176 |
+
return True
|
177 |
+
|
178 |
+
|
179 |
+
def post_process() -> None:
|
180 |
+
clear_frame_processor()
|
181 |
+
clear_model_matrix()
|
182 |
+
clear_face_analyser()
|
183 |
+
clear_content_analyser()
|
184 |
+
read_static_image.cache_clear()
|
185 |
+
|
186 |
+
|
187 |
+
def swap_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
|
188 |
+
frame_processor = get_frame_processor()
|
189 |
+
model_template = get_options('model').get('template')
|
190 |
+
model_size = get_options('model').get('size')
|
191 |
+
model_type = get_options('model').get('type')
|
192 |
+
crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, model_template, model_size)
|
193 |
+
crop_frame = prepare_crop_frame(crop_frame)
|
194 |
+
frame_processor_inputs = {}
|
195 |
+
for frame_processor_input in frame_processor.get_inputs():
|
196 |
+
if frame_processor_input.name == 'source':
|
197 |
+
if model_type == 'blendface':
|
198 |
+
frame_processor_inputs[frame_processor_input.name] = prepare_source_frame(source_face)
|
199 |
+
else:
|
200 |
+
frame_processor_inputs[frame_processor_input.name] = prepare_source_embedding(source_face)
|
201 |
+
if frame_processor_input.name == 'target':
|
202 |
+
frame_processor_inputs[frame_processor_input.name] = crop_frame
|
203 |
+
crop_frame = frame_processor.run(None, frame_processor_inputs)[0][0]
|
204 |
+
crop_frame = normalize_crop_frame(crop_frame)
|
205 |
+
temp_frame = paste_back(temp_frame, crop_frame, affine_matrix, facefusion.globals.face_mask_blur, facefusion.globals.face_mask_padding)
|
206 |
+
return temp_frame
|
207 |
+
|
208 |
+
|
209 |
+
def prepare_source_frame(source_face : Face) -> numpy.ndarray[Any, Any]:
|
210 |
+
source_frame = read_static_image(facefusion.globals.source_path)
|
211 |
+
source_frame, _ = warp_face(source_frame, source_face.kps, 'arcface_v2', (112, 112))
|
212 |
+
source_frame = source_frame[:, :, ::-1] / 255.0
|
213 |
+
source_frame = source_frame.transpose(2, 0, 1)
|
214 |
+
source_frame = numpy.expand_dims(source_frame, axis = 0).astype(numpy.float32)
|
215 |
+
return source_frame
|
216 |
+
|
217 |
+
|
218 |
+
def prepare_source_embedding(source_face : Face) -> Embedding:
|
219 |
+
model_type = get_options('model').get('type')
|
220 |
+
if model_type == 'inswapper':
|
221 |
+
model_matrix = get_model_matrix()
|
222 |
+
source_embedding = source_face.embedding.reshape((1, -1))
|
223 |
+
source_embedding = numpy.dot(source_embedding, model_matrix) / numpy.linalg.norm(source_embedding)
|
224 |
+
else:
|
225 |
+
source_embedding = source_face.normed_embedding.reshape(1, -1)
|
226 |
+
return source_embedding
|
227 |
+
|
228 |
+
|
229 |
+
def prepare_crop_frame(crop_frame : Frame) -> Frame:
|
230 |
+
model_mean = get_options('model').get('mean')
|
231 |
+
model_standard_deviation = get_options('model').get('standard_deviation')
|
232 |
+
crop_frame = crop_frame[:, :, ::-1] / 255.0
|
233 |
+
crop_frame = (crop_frame - model_mean) / model_standard_deviation
|
234 |
+
crop_frame = crop_frame.transpose(2, 0, 1)
|
235 |
+
crop_frame = numpy.expand_dims(crop_frame, axis = 0).astype(numpy.float32)
|
236 |
+
return crop_frame
|
237 |
+
|
238 |
+
|
239 |
+
def normalize_crop_frame(crop_frame : Frame) -> Frame:
|
240 |
+
crop_frame = crop_frame.transpose(1, 2, 0)
|
241 |
+
crop_frame = (crop_frame * 255.0).round()
|
242 |
+
crop_frame = crop_frame[:, :, ::-1].astype(numpy.uint8)
|
243 |
+
return crop_frame
|
244 |
+
|
245 |
+
|
246 |
+
def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
|
247 |
+
if 'reference' in facefusion.globals.face_selector_mode:
|
248 |
+
similar_faces = find_similar_faces(temp_frame, reference_face, facefusion.globals.reference_face_distance)
|
249 |
+
if similar_faces:
|
250 |
+
for similar_face in similar_faces:
|
251 |
+
temp_frame = swap_face(source_face, similar_face, temp_frame)
|
252 |
+
if 'one' in facefusion.globals.face_selector_mode:
|
253 |
+
target_face = get_one_face(temp_frame)
|
254 |
+
if target_face:
|
255 |
+
temp_frame = swap_face(source_face, target_face, temp_frame)
|
256 |
+
if 'many' in facefusion.globals.face_selector_mode:
|
257 |
+
many_faces = get_many_faces(temp_frame)
|
258 |
+
if many_faces:
|
259 |
+
for target_face in many_faces:
|
260 |
+
temp_frame = swap_face(source_face, target_face, temp_frame)
|
261 |
+
return temp_frame
|
262 |
+
|
263 |
+
|
264 |
+
def process_frames(source_path : str, temp_frame_paths : List[str], update_progress : Update_Process) -> None:
|
265 |
+
source_face = get_one_face(read_static_image(source_path))
|
266 |
+
reference_face = get_face_reference() if 'reference' in facefusion.globals.face_selector_mode else None
|
267 |
+
for temp_frame_path in temp_frame_paths:
|
268 |
+
temp_frame = read_image(temp_frame_path)
|
269 |
+
result_frame = process_frame(source_face, reference_face, temp_frame)
|
270 |
+
write_image(temp_frame_path, result_frame)
|
271 |
+
update_progress()
|
272 |
+
|
273 |
+
|
274 |
+
def process_image(source_path : str, target_path : str, output_path : str) -> None:
|
275 |
+
source_face = get_one_face(read_static_image(source_path))
|
276 |
+
target_frame = read_static_image(target_path)
|
277 |
+
reference_face = get_one_face(target_frame, facefusion.globals.reference_face_position) if 'reference' in facefusion.globals.face_selector_mode else None
|
278 |
+
result_frame = process_frame(source_face, reference_face, target_frame)
|
279 |
+
write_image(output_path, result_frame)
|
280 |
+
|
281 |
+
|
282 |
+
def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
|
283 |
+
frame_processors.multi_process_frames(source_path, temp_frame_paths, process_frames)
|
facefusion/processors/frame/modules/frame_enhancer.py
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, List, Dict, Literal, Optional
|
2 |
+
from argparse import ArgumentParser
|
3 |
+
import threading
|
4 |
+
import cv2
|
5 |
+
from basicsr.archs.rrdbnet_arch import RRDBNet
|
6 |
+
from realesrgan import RealESRGANer
|
7 |
+
|
8 |
+
import facefusion.globals
|
9 |
+
import facefusion.processors.frame.core as frame_processors
|
10 |
+
from facefusion import wording
|
11 |
+
from facefusion.face_analyser import clear_face_analyser
|
12 |
+
from facefusion.content_analyser import clear_content_analyser
|
13 |
+
from facefusion.typing import Frame, Face, Update_Process, ProcessMode, ModelValue, OptionsWithModel
|
14 |
+
from facefusion.utilities import conditional_download, resolve_relative_path, is_file, is_download_done, map_device, create_metavar, update_status
|
15 |
+
from facefusion.vision import read_image, read_static_image, write_image
|
16 |
+
from facefusion.processors.frame import globals as frame_processors_globals
|
17 |
+
from facefusion.processors.frame import choices as frame_processors_choices
|
18 |
+
|
19 |
+
FRAME_PROCESSOR = None
|
20 |
+
THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
|
21 |
+
THREAD_LOCK : threading.Lock = threading.Lock()
|
22 |
+
NAME = 'FACEFUSION.FRAME_PROCESSOR.FRAME_ENHANCER'
|
23 |
+
MODELS: Dict[str, ModelValue] =\
|
24 |
+
{
|
25 |
+
'real_esrgan_x2plus':
|
26 |
+
{
|
27 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/real_esrgan_x2plus.pth',
|
28 |
+
'path': resolve_relative_path('../.assets/models/real_esrgan_x2plus.pth'),
|
29 |
+
'scale': 2
|
30 |
+
},
|
31 |
+
'real_esrgan_x4plus':
|
32 |
+
{
|
33 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/real_esrgan_x4plus.pth',
|
34 |
+
'path': resolve_relative_path('../.assets/models/real_esrgan_x4plus.pth'),
|
35 |
+
'scale': 4
|
36 |
+
},
|
37 |
+
'real_esrnet_x4plus':
|
38 |
+
{
|
39 |
+
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/real_esrnet_x4plus.pth',
|
40 |
+
'path': resolve_relative_path('../.assets/models/real_esrnet_x4plus.pth'),
|
41 |
+
'scale': 4
|
42 |
+
}
|
43 |
+
}
|
44 |
+
OPTIONS : Optional[OptionsWithModel] = None
|
45 |
+
|
46 |
+
|
47 |
+
def get_frame_processor() -> Any:
|
48 |
+
global FRAME_PROCESSOR
|
49 |
+
|
50 |
+
with THREAD_LOCK:
|
51 |
+
if FRAME_PROCESSOR is None:
|
52 |
+
model_path = get_options('model').get('path')
|
53 |
+
model_scale = get_options('model').get('scale')
|
54 |
+
FRAME_PROCESSOR = RealESRGANer(
|
55 |
+
model_path = model_path,
|
56 |
+
model = RRDBNet(
|
57 |
+
num_in_ch = 3,
|
58 |
+
num_out_ch = 3,
|
59 |
+
scale = model_scale
|
60 |
+
),
|
61 |
+
device = map_device(facefusion.globals.execution_providers),
|
62 |
+
scale = model_scale
|
63 |
+
)
|
64 |
+
return FRAME_PROCESSOR
|
65 |
+
|
66 |
+
|
67 |
+
def clear_frame_processor() -> None:
|
68 |
+
global FRAME_PROCESSOR
|
69 |
+
|
70 |
+
FRAME_PROCESSOR = None
|
71 |
+
|
72 |
+
|
73 |
+
def get_options(key : Literal['model']) -> Any:
|
74 |
+
global OPTIONS
|
75 |
+
|
76 |
+
if OPTIONS is None:
|
77 |
+
OPTIONS =\
|
78 |
+
{
|
79 |
+
'model': MODELS[frame_processors_globals.frame_enhancer_model]
|
80 |
+
}
|
81 |
+
return OPTIONS.get(key)
|
82 |
+
|
83 |
+
|
84 |
+
def set_options(key : Literal['model'], value : Any) -> None:
|
85 |
+
global OPTIONS
|
86 |
+
|
87 |
+
OPTIONS[key] = value
|
88 |
+
|
89 |
+
|
90 |
+
def register_args(program : ArgumentParser) -> None:
|
91 |
+
program.add_argument('--frame-enhancer-model', help = wording.get('frame_processor_model_help'), dest = 'frame_enhancer_model', default = 'real_esrgan_x2plus', choices = frame_processors_choices.frame_enhancer_models)
|
92 |
+
program.add_argument('--frame-enhancer-blend', help = wording.get('frame_processor_blend_help'), dest = 'frame_enhancer_blend', type = int, default = 80, choices = frame_processors_choices.frame_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.frame_enhancer_blend_range))
|
93 |
+
|
94 |
+
|
95 |
+
def apply_args(program : ArgumentParser) -> None:
|
96 |
+
args = program.parse_args()
|
97 |
+
frame_processors_globals.frame_enhancer_model = args.frame_enhancer_model
|
98 |
+
frame_processors_globals.frame_enhancer_blend = args.frame_enhancer_blend
|
99 |
+
|
100 |
+
|
101 |
+
def pre_check() -> bool:
|
102 |
+
if not facefusion.globals.skip_download:
|
103 |
+
download_directory_path = resolve_relative_path('../.assets/models')
|
104 |
+
model_url = get_options('model').get('url')
|
105 |
+
conditional_download(download_directory_path, [ model_url ])
|
106 |
+
return True
|
107 |
+
|
108 |
+
|
109 |
+
def pre_process(mode : ProcessMode) -> bool:
|
110 |
+
model_url = get_options('model').get('url')
|
111 |
+
model_path = get_options('model').get('path')
|
112 |
+
if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
|
113 |
+
update_status(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
|
114 |
+
return False
|
115 |
+
elif not is_file(model_path):
|
116 |
+
update_status(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
|
117 |
+
return False
|
118 |
+
if mode == 'output' and not facefusion.globals.output_path:
|
119 |
+
update_status(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
|
120 |
+
return False
|
121 |
+
return True
|
122 |
+
|
123 |
+
|
124 |
+
def post_process() -> None:
|
125 |
+
clear_frame_processor()
|
126 |
+
clear_face_analyser()
|
127 |
+
clear_content_analyser()
|
128 |
+
read_static_image.cache_clear()
|
129 |
+
|
130 |
+
|
131 |
+
def enhance_frame(temp_frame : Frame) -> Frame:
|
132 |
+
with THREAD_SEMAPHORE:
|
133 |
+
paste_frame, _ = get_frame_processor().enhance(temp_frame)
|
134 |
+
temp_frame = blend_frame(temp_frame, paste_frame)
|
135 |
+
return temp_frame
|
136 |
+
|
137 |
+
|
138 |
+
def blend_frame(temp_frame : Frame, paste_frame : Frame) -> Frame:
|
139 |
+
frame_enhancer_blend = 1 - (frame_processors_globals.frame_enhancer_blend / 100)
|
140 |
+
paste_frame_height, paste_frame_width = paste_frame.shape[0:2]
|
141 |
+
temp_frame = cv2.resize(temp_frame, (paste_frame_width, paste_frame_height))
|
142 |
+
temp_frame = cv2.addWeighted(temp_frame, frame_enhancer_blend, paste_frame, 1 - frame_enhancer_blend, 0)
|
143 |
+
return temp_frame
|
144 |
+
|
145 |
+
|
146 |
+
def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
|
147 |
+
return enhance_frame(temp_frame)
|
148 |
+
|
149 |
+
|
150 |
+
def process_frames(source_path : str, temp_frame_paths : List[str], update_progress : Update_Process) -> None:
|
151 |
+
for temp_frame_path in temp_frame_paths:
|
152 |
+
temp_frame = read_image(temp_frame_path)
|
153 |
+
result_frame = process_frame(None, None, temp_frame)
|
154 |
+
write_image(temp_frame_path, result_frame)
|
155 |
+
update_progress()
|
156 |
+
|
157 |
+
|
158 |
+
def process_image(source_path : str, target_path : str, output_path : str) -> None:
|
159 |
+
target_frame = read_static_image(target_path)
|
160 |
+
result = process_frame(None, None, target_frame)
|
161 |
+
write_image(output_path, result)
|
162 |
+
|
163 |
+
|
164 |
+
def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
|
165 |
+
frame_processors.multi_process_frames(None, temp_frame_paths, process_frames)
|
facefusion/processors/frame/typings.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Literal
|
2 |
+
|
3 |
+
FaceSwapperModel = Literal['blendface_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial']
|
4 |
+
FaceEnhancerModel = Literal['codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'restoreformer']
|
5 |
+
FrameEnhancerModel = Literal['real_esrgan_x2plus', 'real_esrgan_x4plus', 'real_esrnet_x4plus']
|
6 |
+
|
7 |
+
FaceDebuggerItem = Literal['bbox', 'kps', 'face-mask', 'score']
|