diff --git a/README.md b/README.md
index 1a8ad1938dbe6d4ff96f514e3e96a5e35a1baa24..45bb9533f0b6ad7374cc2aaf20c334663396a747 100644
--- a/README.md
+++ b/README.md
@@ -1,13 +1,101 @@
----
-title: MyDeepFakeAI
-emoji: 🏃
-colorFrom: red
-colorTo: yellow
-sdk: gradio
-sdk_version: 4.13.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
+FaceFusion
+==========
+
+> Next generation face swapper and enhancer.
+
+[![Build Status](https://img.shields.io/github/actions/workflow/status/facefusion/facefusion/ci.yml.svg?branch=master)](https://github.com/facefusion/facefusion/actions?query=workflow:ci)
+![License](https://img.shields.io/badge/license-MIT-green)
+
+
+Preview
+-------
+
+![Preview](https://raw.githubusercontent.com/facefusion/facefusion/master/.github/preview.png?sanitize=true)
+
+
+Installation
+------------
+
+Be aware, the installation needs technical skills and is not for beginners. Please do not open platform and installation related issues on GitHub. We have a very helpful [Discord](https://join.facefusion.io) community that will guide you to complete the installation.
+
+Get started with the [installation](https://docs.facefusion.io/installation) guide.
+
+
+Usage
+-----
+
+Run the command:
+
+```
+python run.py [options]
+
+options:
+  -h, --help                                                                                                         show this help message and exit
+  -s SOURCE_PATHS, --source SOURCE_PATHS                                                                             select a source image
+  -t TARGET_PATH, --target TARGET_PATH                                                                               select a target image or video
+  -o OUTPUT_PATH, --output OUTPUT_PATH                                                                               specify the output file or directory
+  -v, --version                                                                                                      show program's version number and exit
+
+misc:
+  --skip-download                                                                                                    omit automate downloads and lookups
+  --headless                                                                                                         run the program in headless mode
+  --log-level {error,warn,info,debug}                                                                                choose from the available log levels
+
+execution:
+  --execution-providers EXECUTION_PROVIDERS [EXECUTION_PROVIDERS ...]                                                choose from the available execution providers (choices: cpu, ...)
+  --execution-thread-count [1-128]                                                                                   specify the number of execution threads
+  --execution-queue-count [1-32]                                                                                     specify the number of execution queries
+  --max-memory [0-128]                                                                                               specify the maximum amount of ram to be used (in gb)
+
+face analyser:
+  --face-analyser-order {left-right,right-left,top-bottom,bottom-top,small-large,large-small,best-worst,worst-best}  specify the order used for the face analyser
+  --face-analyser-age {child,teen,adult,senior}                                                                      specify the age used for the face analyser
+  --face-analyser-gender {male,female}                                                                               specify the gender used for the face analyser
+  --face-detector-model {retinaface,yunet}                                                                           specify the model used for the face detector
+  --face-detector-size {160x160,320x320,480x480,512x512,640x640,768x768,960x960,1024x1024}                           specify the size threshold used for the face detector
+  --face-detector-score [0.0-1.0]                                                                                    specify the score threshold used for the face detector
+
+face selector:
+  --face-selector-mode {reference,one,many}                                                                          specify the mode for the face selector
+  --reference-face-position REFERENCE_FACE_POSITION                                                                  specify the position of the reference face
+  --reference-face-distance [0.0-1.5]                                                                                specify the distance between the reference face and the target face
+  --reference-frame-number REFERENCE_FRAME_NUMBER                                                                    specify the number of the reference frame
+
+face mask:
+  --face-mask-types FACE_MASK_TYPES [FACE_MASK_TYPES ...]                                                            choose from the available face mask types (choices: box, occlusion, region)
+  --face-mask-blur [0.0-1.0]                                                                                         specify the blur amount for face mask
+  --face-mask-padding FACE_MASK_PADDING [FACE_MASK_PADDING ...]                                                      specify the face mask padding (top, right, bottom, left) in percent
+  --face-mask-regions FACE_MASK_REGIONS [FACE_MASK_REGIONS ...]                                                      choose from the available face mask regions (choices: skin, left-eyebrow, right-eyebrow, left-eye, right-eye, eye-glasses, nose, mouth, upper-lip, lower-lip)
+
+frame extraction:
+  --trim-frame-start TRIM_FRAME_START                                                                                specify the start frame for extraction
+  --trim-frame-end TRIM_FRAME_END                                                                                    specify the end frame for extraction
+  --temp-frame-format {jpg,png}                                                                                      specify the image format used for frame extraction
+  --temp-frame-quality [0-100]                                                                                       specify the image quality used for frame extraction
+  --keep-temp                                                                                                        retain temporary frames after processing
+
+output creation:
+  --output-image-quality [0-100]                                                                                     specify the quality used for the output image
+  --output-video-encoder {libx264,libx265,libvpx-vp9,h264_nvenc,hevc_nvenc}                                          specify the encoder used for the output video
+  --output-video-quality [0-100]                                                                                     specify the quality used for the output video
+  --keep-fps                                                                                                         preserve the frames per second (fps) of the target
+  --skip-audio                                                                                                       omit audio from the target
+
+frame processors:
+  --frame-processors FRAME_PROCESSORS [FRAME_PROCESSORS ...]                                                         choose from the available frame processors (choices: face_debugger, face_enhancer, face_swapper, frame_enhancer, ...)
+  --face-debugger-items FACE_DEBUGGER_ITEMS [FACE_DEBUGGER_ITEMS ...]                                                specify the face debugger items (choices: bbox, kps, face-mask, score)
+  --face-enhancer-model {codeformer,gfpgan_1.2,gfpgan_1.3,gfpgan_1.4,gpen_bfr_256,gpen_bfr_512,restoreformer}        choose the model for the frame processor
+  --face-enhancer-blend [0-100]                                                                                      specify the blend amount for the frame processor
+  --face-swapper-model {blendswap_256,inswapper_128,inswapper_128_fp16,simswap_256,simswap_512_unofficial}           choose the model for the frame processor
+  --frame-enhancer-model {real_esrgan_x2plus,real_esrgan_x4plus,real_esrnet_x4plus}                                  choose the model for the frame processor
+  --frame-enhancer-blend [0-100]                                                                                     specify the blend amount for the frame processor
+
+uis:
+  --ui-layouts UI_LAYOUTS [UI_LAYOUTS ...]                                                                           choose from the available ui layouts (choices: benchmark, webcam, default, ...)
+```
+
+
+Documentation
+-------------
+
+Read the [documentation](https://docs.facefusion.io) for a deep dive.
diff --git a/facefusion/__init__.py b/facefusion/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/facefusion/choices.py b/facefusion/choices.py
new file mode 100644
index 0000000000000000000000000000000000000000..9808aa51deac969053727d95d6150f32da012db6
--- /dev/null
+++ b/facefusion/choices.py
@@ -0,0 +1,26 @@
+from typing import List
+
+from facefusion.typing import FaceSelectorMode, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, FaceMaskType, FaceMaskRegion, TempFrameFormat, OutputVideoEncoder
+from facefusion.common_helper import create_range
+
+face_analyser_orders : List[FaceAnalyserOrder] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best' ]
+face_analyser_ages : List[FaceAnalyserAge] = [ 'child', 'teen', 'adult', 'senior' ]
+face_analyser_genders : List[FaceAnalyserGender] = [ 'male', 'female' ]
+face_detector_models : List[str] = [ 'retinaface', 'yunet' ]
+face_detector_sizes : List[str] = [ '160x160', '320x320', '480x480', '512x512', '640x640', '768x768', '960x960', '1024x1024' ]
+face_selector_modes : List[FaceSelectorMode] = [ 'reference', 'one', 'many' ]
+face_mask_types : List[FaceMaskType] = [ 'box', 'occlusion', 'region' ]
+face_mask_regions : List[FaceMaskRegion] = [ 'skin', 'left-eyebrow', 'right-eyebrow', 'left-eye', 'right-eye', 'eye-glasses', 'nose', 'mouth', 'upper-lip', 'lower-lip' ]
+temp_frame_formats : List[TempFrameFormat] = [ 'jpg', 'png' ]
+output_video_encoders : List[OutputVideoEncoder] = [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ]
+
+execution_thread_count_range : List[float] = create_range(1, 128, 1)
+execution_queue_count_range : List[float] = create_range(1, 32, 1)
+max_memory_range : List[float] = create_range(0, 128, 1)
+face_detector_score_range : List[float] = create_range(0.0, 1.0, 0.05)
+face_mask_blur_range : List[float] = create_range(0.0, 1.0, 0.05)
+face_mask_padding_range : List[float] = create_range(0, 100, 1)
+reference_face_distance_range : List[float] = create_range(0.0, 1.5, 0.05)
+temp_frame_quality_range : List[float] = create_range(0, 100, 1)
+output_image_quality_range : List[float] = create_range(0, 100, 1)
+output_video_quality_range : List[float] = create_range(0, 100, 1)
diff --git a/facefusion/common_helper.py b/facefusion/common_helper.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ddcad8d379909b269033777e1f640ddbde5bc9b
--- /dev/null
+++ b/facefusion/common_helper.py
@@ -0,0 +1,10 @@
+from typing import List, Any
+import numpy
+
+
+def create_metavar(ranges : List[Any]) -> str:
+	return '[' + str(ranges[0]) + '-' + str(ranges[-1]) + ']'
+
+
+def create_range(start : float, stop : float, step : float) -> List[float]:
+	return (numpy.around(numpy.arange(start, stop + step, step), decimals = 2)).tolist()
diff --git a/facefusion/content_analyser.py b/facefusion/content_analyser.py
new file mode 100644
index 0000000000000000000000000000000000000000..daa276e986ee7b3ac4ad7b612c59e4c741bee808
--- /dev/null
+++ b/facefusion/content_analyser.py
@@ -0,0 +1,103 @@
+from typing import Any, Dict
+from functools import lru_cache
+import threading
+import cv2
+import numpy
+import onnxruntime
+from tqdm import tqdm
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.typing import Frame, ModelValue
+from facefusion.vision import get_video_frame, count_video_frame_total, read_image, detect_fps
+from facefusion.filesystem import resolve_relative_path
+from facefusion.download import conditional_download
+
+CONTENT_ANALYSER = None
+THREAD_LOCK : threading.Lock = threading.Lock()
+MODELS : Dict[str, ModelValue] =\
+{
+	'open_nsfw':
+	{
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/open_nsfw.onnx',
+		'path': resolve_relative_path('../.assets/models/open_nsfw.onnx')
+	}
+}
+MAX_PROBABILITY = 0.80
+MAX_RATE = 5
+STREAM_COUNTER = 0
+
+
+def get_content_analyser() -> Any:
+	global CONTENT_ANALYSER
+
+	with THREAD_LOCK:
+		if CONTENT_ANALYSER is None:
+			model_path = MODELS.get('open_nsfw').get('path')
+			CONTENT_ANALYSER = onnxruntime.InferenceSession(model_path, providers = facefusion.globals.execution_providers)
+	return CONTENT_ANALYSER
+
+
+def clear_content_analyser() -> None:
+	global CONTENT_ANALYSER
+
+	CONTENT_ANALYSER = None
+
+
+def pre_check() -> bool:
+	if not facefusion.globals.skip_download:
+		download_directory_path = resolve_relative_path('../.assets/models')
+		model_url = MODELS.get('open_nsfw').get('url')
+		conditional_download(download_directory_path, [ model_url ])
+	return True
+
+
+def analyse_stream(frame : Frame, fps : float) -> bool:
+	global STREAM_COUNTER
+
+	STREAM_COUNTER = STREAM_COUNTER + 1
+	if STREAM_COUNTER % int(fps) == 0:
+		return analyse_frame(frame)
+	return False
+
+
+def prepare_frame(frame : Frame) -> Frame:
+	frame = cv2.resize(frame, (224, 224)).astype(numpy.float32)
+	frame -= numpy.array([ 104, 117, 123 ]).astype(numpy.float32)
+	frame = numpy.expand_dims(frame, axis = 0)
+	return frame
+
+
+def analyse_frame(frame : Frame) -> bool:
+	content_analyser = get_content_analyser()
+	frame = prepare_frame(frame)
+	probability = content_analyser.run(None,
+	{
+		'input:0': frame
+	})[0][0][1]
+	return probability > MAX_PROBABILITY
+
+
+@lru_cache(maxsize = None)
+def analyse_image(image_path : str) -> bool:
+	frame = read_image(image_path)
+	return analyse_frame(frame)
+
+
+@lru_cache(maxsize = None)
+def analyse_video(video_path : str, start_frame : int, end_frame : int) -> bool:
+	video_frame_total = count_video_frame_total(video_path)
+	fps = detect_fps(video_path)
+	frame_range = range(start_frame or 0, end_frame or video_frame_total)
+	rate = 0.0
+	counter = 0
+	with tqdm(total = len(frame_range), desc = wording.get('analysing'), unit = 'frame', ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress:
+		for frame_number in frame_range:
+			if frame_number % int(fps) == 0:
+				frame = get_video_frame(video_path, frame_number)
+				if analyse_frame(frame):
+					counter += 1
+			rate = counter * int(fps) / len(frame_range) * 100
+			progress.update()
+			progress.set_postfix(rate = rate)
+	return rate > MAX_RATE
diff --git a/facefusion/core.py b/facefusion/core.py
new file mode 100644
index 0000000000000000000000000000000000000000..9936e5692d311044cd64cbd23e4eb830e417764e
--- /dev/null
+++ b/facefusion/core.py
@@ -0,0 +1,318 @@
+import os
+
+os.environ['OMP_NUM_THREADS'] = '1'
+
+import signal
+import ssl
+import sys
+import warnings
+import platform
+import shutil
+import onnxruntime
+from argparse import ArgumentParser, HelpFormatter
+
+import facefusion.choices
+import facefusion.globals
+from facefusion.face_analyser import get_one_face, get_average_face
+from facefusion.face_store import get_reference_faces, append_reference_face
+from facefusion.vision import get_video_frame, detect_fps, read_image, read_static_images
+from facefusion import face_analyser, face_masker, content_analyser, metadata, logger, wording
+from facefusion.content_analyser import analyse_image, analyse_video
+from facefusion.processors.frame.core import get_frame_processors_modules, load_frame_processor_module
+from facefusion.common_helper import create_metavar
+from facefusion.execution_helper import encode_execution_providers, decode_execution_providers
+from facefusion.normalizer import normalize_output_path, normalize_padding
+from facefusion.filesystem import is_image, is_video, list_module_names, get_temp_frame_paths, create_temp, move_temp, clear_temp
+from facefusion.ffmpeg import extract_frames, compress_image, merge_video, restore_audio
+
+onnxruntime.set_default_logger_severity(3)
+warnings.filterwarnings('ignore', category = UserWarning, module = 'gradio')
+warnings.filterwarnings('ignore', category = UserWarning, module = 'torchvision')
+
+if platform.system().lower() == 'darwin':
+    ssl._create_default_https_context = ssl._create_unverified_context
+
+
+def cli() -> None:
+    signal.signal(signal.SIGINT, lambda signal_number, frame: destroy())
+    program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120), add_help = False)
+    # general
+    program.add_argument('-s', '--source', action = 'append', help = wording.get('source_help'), dest = 'source_paths')
+    program.add_argument('-t', '--target', help = wording.get('target_help'), dest = 'target_path')
+    program.add_argument('-o', '--output', help = wording.get('output_help'), dest = 'output_path')
+    program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
+    # misc
+    group_misc = program.add_argument_group('misc')
+    group_misc.add_argument('--skip-download', help = wording.get('skip_download_help'), action = 'store_true')
+    group_misc.add_argument('--headless', help = wording.get('headless_help'), action = 'store_true')
+    group_misc.add_argument('--log-level', help = wording.get('log_level_help'), default = 'info', choices = logger.get_log_levels())
+    # execution
+    execution_providers = encode_execution_providers(onnxruntime.get_available_providers())
+    group_execution = program.add_argument_group('execution')
+    group_execution.add_argument('--execution-providers', help = wording.get('execution_providers_help').format(choices = ', '.join(execution_providers)), default = [ 'cpu' ], choices = execution_providers, nargs = '+', metavar = 'EXECUTION_PROVIDERS')
+    group_execution.add_argument('--execution-thread-count', help = wording.get('execution_thread_count_help'), type = int, default = 4, choices = facefusion.choices.execution_thread_count_range, metavar = create_metavar(facefusion.choices.execution_thread_count_range))
+    group_execution.add_argument('--execution-queue-count', help = wording.get('execution_queue_count_help'), type = int, default = 1, choices = facefusion.choices.execution_queue_count_range, metavar = create_metavar(facefusion.choices.execution_queue_count_range))
+    group_execution.add_argument('--max-memory', help = wording.get('max_memory_help'), type = int, choices = facefusion.choices.max_memory_range, metavar = create_metavar(facefusion.choices.max_memory_range))
+    # face analyser
+    group_face_analyser = program.add_argument_group('face analyser')
+    group_face_analyser.add_argument('--face-analyser-order', help = wording.get('face_analyser_order_help'), default = 'left-right', choices = facefusion.choices.face_analyser_orders)
+    group_face_analyser.add_argument('--face-analyser-age', help = wording.get('face_analyser_age_help'), choices = facefusion.choices.face_analyser_ages)
+    group_face_analyser.add_argument('--face-analyser-gender', help = wording.get('face_analyser_gender_help'), choices = facefusion.choices.face_analyser_genders)
+    group_face_analyser.add_argument('--face-detector-model', help = wording.get('face_detector_model_help'), default = 'retinaface', choices = facefusion.choices.face_detector_models)
+    group_face_analyser.add_argument('--face-detector-size', help = wording.get('face_detector_size_help'), default = '640x640', choices = facefusion.choices.face_detector_sizes)
+    group_face_analyser.add_argument('--face-detector-score', help = wording.get('face_detector_score_help'), type = float, default = 0.5, choices = facefusion.choices.face_detector_score_range, metavar = create_metavar(facefusion.choices.face_detector_score_range))
+    # face selector
+    group_face_selector = program.add_argument_group('face selector')
+    group_face_selector.add_argument('--face-selector-mode', help = wording.get('face_selector_mode_help'), default = 'reference', choices = facefusion.choices.face_selector_modes)
+    group_face_selector.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), type = int, default = 0)
+    group_face_selector.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), type = float, default = 0.6, choices = facefusion.choices.reference_face_distance_range, metavar = create_metavar(facefusion.choices.reference_face_distance_range))
+    group_face_selector.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), type = int, default = 0)
+    # face mask
+    group_face_mask = program.add_argument_group('face mask')
+    group_face_mask.add_argument('--face-mask-types', help = wording.get('face_mask_types_help').format(choices = ', '.join(facefusion.choices.face_mask_types)), default = [ 'box' ], choices = facefusion.choices.face_mask_types, nargs = '+', metavar = 'FACE_MASK_TYPES')
+    group_face_mask.add_argument('--face-mask-blur', help = wording.get('face_mask_blur_help'), type = float, default = 0.3, choices = facefusion.choices.face_mask_blur_range, metavar = create_metavar(facefusion.choices.face_mask_blur_range))
+    group_face_mask.add_argument('--face-mask-padding', help = wording.get('face_mask_padding_help'), type = int, default = [ 0, 0, 0, 0 ], nargs = '+')
+    group_face_mask.add_argument('--face-mask-regions', help = wording.get('face_mask_regions_help').format(choices = ', '.join(facefusion.choices.face_mask_regions)), default = facefusion.choices.face_mask_regions, choices = facefusion.choices.face_mask_regions,  nargs = '+', metavar = 'FACE_MASK_REGIONS')
+    # frame extraction
+    group_frame_extraction = program.add_argument_group('frame extraction')
+    group_frame_extraction.add_argument('--trim-frame-start', help = wording.get('trim_frame_start_help'), type = int)
+    group_frame_extraction.add_argument('--trim-frame-end', help = wording.get('trim_frame_end_help'), type = int)
+    group_frame_extraction.add_argument('--temp-frame-format', help = wording.get('temp_frame_format_help'), default = 'jpg', choices = facefusion.choices.temp_frame_formats)
+    group_frame_extraction.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), type = int, default = 100, choices = facefusion.choices.temp_frame_quality_range, metavar = create_metavar(facefusion.choices.temp_frame_quality_range))
+    group_frame_extraction.add_argument('--keep-temp', help = wording.get('keep_temp_help'), action = 'store_true')
+    # output creation
+    group_output_creation = program.add_argument_group('output creation')
+    group_output_creation.add_argument('--output-image-quality', help = wording.get('output_image_quality_help'), type = int, default = 80, choices = facefusion.choices.output_image_quality_range, metavar = create_metavar(facefusion.choices.output_image_quality_range))
+    group_output_creation.add_argument('--output-video-encoder', help = wording.get('output_video_encoder_help'), default = 'libx264', choices = facefusion.choices.output_video_encoders)
+    group_output_creation.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), type = int, default = 80, choices = facefusion.choices.output_video_quality_range, metavar = create_metavar(facefusion.choices.output_video_quality_range))
+    group_output_creation.add_argument('--keep-fps', help = wording.get('keep_fps_help'), action = 'store_true')
+    group_output_creation.add_argument('--skip-audio', help = wording.get('skip_audio_help'), action = 'store_true')
+    # frame processors
+    available_frame_processors = list_module_names('facefusion/processors/frame/modules')
+    program = ArgumentParser(parents = [ program ], formatter_class = program.formatter_class, add_help = True)
+    group_frame_processors = program.add_argument_group('frame processors')
+    group_frame_processors.add_argument('--frame-processors', help = wording.get('frame_processors_help').format(choices = ', '.join(available_frame_processors)), default = [ 'face_swapper' ], nargs = '+')
+    for frame_processor in available_frame_processors:
+        frame_processor_module = load_frame_processor_module(frame_processor)
+        frame_processor_module.register_args(group_frame_processors)
+    # uis
+    group_uis = program.add_argument_group('uis')
+    group_uis.add_argument('--ui-layouts', help = wording.get('ui_layouts_help').format(choices = ', '.join(list_module_names('facefusion/uis/layouts'))), default = [ 'default' ], nargs = '+')
+    run(program)
+
+
+def apply_args(program : ArgumentParser) -> None:
+    args = program.parse_args()
+    # general
+    facefusion.globals.source_paths = args.source_paths
+    facefusion.globals.target_path = args.target_path
+    facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_paths, facefusion.globals.target_path, args.output_path)
+    # misc
+    facefusion.globals.skip_download = args.skip_download
+    facefusion.globals.headless = args.headless
+    facefusion.globals.log_level = args.log_level
+    # execution
+    facefusion.globals.execution_providers = decode_execution_providers(args.execution_providers)
+    facefusion.globals.execution_thread_count = args.execution_thread_count
+    facefusion.globals.execution_queue_count = args.execution_queue_count
+    facefusion.globals.max_memory = args.max_memory
+    # face analyser
+    facefusion.globals.face_analyser_order = args.face_analyser_order
+    facefusion.globals.face_analyser_age = args.face_analyser_age
+    facefusion.globals.face_analyser_gender = args.face_analyser_gender
+    facefusion.globals.face_detector_model = args.face_detector_model
+    facefusion.globals.face_detector_size = args.face_detector_size
+    facefusion.globals.face_detector_score = args.face_detector_score
+    # face selector
+    facefusion.globals.face_selector_mode = args.face_selector_mode
+    facefusion.globals.reference_face_position = args.reference_face_position
+    facefusion.globals.reference_face_distance = args.reference_face_distance
+    facefusion.globals.reference_frame_number = args.reference_frame_number
+    # face mask
+    facefusion.globals.face_mask_types = args.face_mask_types
+    facefusion.globals.face_mask_blur = args.face_mask_blur
+    facefusion.globals.face_mask_padding = normalize_padding(args.face_mask_padding)
+    facefusion.globals.face_mask_regions = args.face_mask_regions
+    # frame extraction
+    facefusion.globals.trim_frame_start = args.trim_frame_start
+    facefusion.globals.trim_frame_end = args.trim_frame_end
+    facefusion.globals.temp_frame_format = args.temp_frame_format
+    facefusion.globals.temp_frame_quality = args.temp_frame_quality
+    facefusion.globals.keep_temp = args.keep_temp
+    # output creation
+    facefusion.globals.output_image_quality = args.output_image_quality
+    facefusion.globals.output_video_encoder = args.output_video_encoder
+    facefusion.globals.output_video_quality = args.output_video_quality
+    facefusion.globals.keep_fps = args.keep_fps
+    facefusion.globals.skip_audio = args.skip_audio
+    # frame processors
+    available_frame_processors = list_module_names('facefusion/processors/frame/modules')
+    facefusion.globals.frame_processors = args.frame_processors
+    for frame_processor in available_frame_processors:
+        frame_processor_module = load_frame_processor_module(frame_processor)
+        frame_processor_module.apply_args(program)
+    # uis
+    facefusion.globals.ui_layouts = args.ui_layouts
+
+
+def run(program : ArgumentParser) -> None:
+    apply_args(program)
+    logger.init(facefusion.globals.log_level)
+    limit_resources()
+    if not pre_check() or not content_analyser.pre_check() or not face_analyser.pre_check() or not face_masker.pre_check():
+        return
+    for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
+        if not frame_processor_module.pre_check():
+            return
+    if facefusion.globals.headless:
+        conditional_process()
+    else:
+        import facefusion.uis.core as ui
+
+        for ui_layout in ui.get_ui_layouts_modules(facefusion.globals.ui_layouts):
+            if not ui_layout.pre_check():
+                return
+        ui.launch()
+
+
+def destroy() -> None:
+    if facefusion.globals.target_path:
+        clear_temp(facefusion.globals.target_path)
+    sys.exit()
+
+
+def limit_resources() -> None:
+    if facefusion.globals.max_memory:
+        memory = facefusion.globals.max_memory * 1024 ** 3
+        if platform.system().lower() == 'darwin':
+            memory = facefusion.globals.max_memory * 1024 ** 6
+        if platform.system().lower() == 'windows':
+            import ctypes
+
+            kernel32 = ctypes.windll.kernel32 # type: ignore[attr-defined]
+            kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory))
+        else:
+            import resource
+
+            resource.setrlimit(resource.RLIMIT_DATA, (memory, memory))
+
+
+def pre_check() -> bool:
+    if sys.version_info < (3, 9):
+        logger.error(wording.get('python_not_supported').format(version = '3.9'), __name__.upper())
+        print(wording.get('python_not_supported').format(version = '3.9'), __name__.upper())
+        return False
+    if not shutil.which('ffmpeg'):
+        logger.error(wording.get('ffmpeg_not_installed'), __name__.upper())
+        print(wording.get('ffmpeg_not_installed'), __name__.upper())
+        return False
+    return True
+
+
+def conditional_process() -> None:
+    conditional_append_reference_faces()
+    for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
+        if not frame_processor_module.pre_process('output'):
+            return
+    if is_image(facefusion.globals.target_path):
+        process_image()
+    if is_video(facefusion.globals.target_path):
+        process_video()
+
+
+def conditional_append_reference_faces() -> None:
+    if 'reference' in facefusion.globals.face_selector_mode and not get_reference_faces():
+        source_frames = read_static_images(facefusion.globals.source_paths)
+        source_face = get_average_face(source_frames)
+        if is_video(facefusion.globals.target_path):
+            reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
+        else:
+            reference_frame = read_image(facefusion.globals.target_path)
+        reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position)
+        append_reference_face('origin', reference_face)
+        if source_face and reference_face:
+            for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
+                reference_frame = frame_processor_module.get_reference_frame(source_face, reference_face, reference_frame)
+                reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position)
+                append_reference_face(frame_processor_module.__name__, reference_face)
+
+
+def process_image() -> None:
+    if analyse_image(facefusion.globals.target_path):
+        return
+    shutil.copy2(facefusion.globals.target_path, facefusion.globals.output_path)
+    # process frame
+    for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
+        logger.info(wording.get('processing'), frame_processor_module.NAME)
+        print(wording.get('processing'), frame_processor_module.NAME)
+        frame_processor_module.process_image(facefusion.globals.source_paths, facefusion.globals.output_path, facefusion.globals.output_path)
+        frame_processor_module.post_process()
+    # compress image
+    logger.info(wording.get('compressing_image'), __name__.upper())
+    print(wording.get('compressing_image'), __name__.upper())
+    if not compress_image(facefusion.globals.output_path):
+        logger.error(wording.get('compressing_image_failed'), __name__.upper())
+        print(wording.get('compressing_image_failed'), __name__.upper())
+    # validate image
+    if is_image(facefusion.globals.output_path):
+        logger.info(wording.get('processing_image_succeed'), __name__.upper())
+        print(wording.get('processing_image_succeed'), __name__.upper())
+    else:
+        logger.error(wording.get('processing_image_failed'), __name__.upper())
+        print(wording.get('processing_image_failed'), __name__.upper())
+
+
+def process_video() -> None:
+    if analyse_video(facefusion.globals.target_path, facefusion.globals.trim_frame_start, facefusion.globals.trim_frame_end):
+        return
+    fps = detect_fps(facefusion.globals.target_path) if facefusion.globals.keep_fps else 25.0
+    # create temp
+    logger.info(wording.get('creating_temp'), __name__.upper())
+    print(wording.get('creating_temp'), __name__.upper())
+    create_temp(facefusion.globals.target_path)
+    # extract frames
+    logger.info(wording.get('extracting_frames_fps').format(fps = fps), __name__.upper())
+    print(wording.get('extracting_frames_fps').format(fps = fps), __name__.upper())
+    extract_frames(facefusion.globals.target_path, fps)
+    # process frame
+    temp_frame_paths = get_temp_frame_paths(facefusion.globals.target_path)
+    if temp_frame_paths:
+        for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
+            logger.info(wording.get('processing'), frame_processor_module.NAME)
+            print(wording.get('processing'), frame_processor_module.NAME)
+            frame_processor_module.process_video(facefusion.globals.source_paths, temp_frame_paths)
+            frame_processor_module.post_process()
+    else:
+        logger.error(wording.get('temp_frames_not_found'), __name__.upper())
+        print(wording.get('temp_frames_not_found'), __name__.upper())
+        return
+    # merge video
+    logger.info(wording.get('merging_video_fps').format(fps = fps), __name__.upper())
+    print(wording.get('merging_video_fps').format(fps = fps), __name__.upper())
+    if not merge_video(facefusion.globals.target_path, fps):
+        logger.error(wording.get('merging_video_failed'), __name__.upper())
+        print(wording.get('merging_video_failed'), __name__.upper())
+        return
+    # handle audio
+    if facefusion.globals.skip_audio:
+        logger.info(wording.get('skipping_audio'), __name__.upper())
+        print(wording.get('skipping_audio'), __name__.upper())
+        move_temp(facefusion.globals.target_path, facefusion.globals.output_path)
+    else:
+        logger.info(wording.get('restoring_audio'), __name__.upper())
+        print(wording.get('restoring_audio'), __name__.upper())
+        if not restore_audio(facefusion.globals.target_path, facefusion.globals.output_path):
+            logger.warn(wording.get('restoring_audio_skipped'), __name__.upper())
+            print(wording.get('restoring_audio_skipped'), __name__.upper())
+            move_temp(facefusion.globals.target_path, facefusion.globals.output_path)
+    # clear temp
+    logger.info(wording.get('clearing_temp'), __name__.upper())
+    print(wording.get('clearing_temp'), __name__.upper())
+    clear_temp(facefusion.globals.target_path)
+    # validate video
+    if is_video(facefusion.globals.output_path):
+        logger.info(wording.get('processing_video_succeed'), __name__.upper())
+        print(wording.get('processing_video_succeed'), __name__.upper())
+    else:
+        logger.error(wording.get('processing_video_failed'), __name__.upper())
+        print(wording.get('processing_video_failed'), __name__.upper())
diff --git a/facefusion/download.py b/facefusion/download.py
new file mode 100644
index 0000000000000000000000000000000000000000..d50935f2df78386344a9376a8dddbd3267dbd65a
--- /dev/null
+++ b/facefusion/download.py
@@ -0,0 +1,44 @@
+import os
+import subprocess
+import urllib.request
+from typing import List
+from concurrent.futures import ThreadPoolExecutor
+from functools import lru_cache
+from tqdm import tqdm
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.filesystem import is_file
+
+
+def conditional_download(download_directory_path : str, urls : List[str]) -> None:
+	with ThreadPoolExecutor() as executor:
+		for url in urls:
+			executor.submit(get_download_size, url)
+	for url in urls:
+		download_file_path = os.path.join(download_directory_path, os.path.basename(url))
+		initial = os.path.getsize(download_file_path) if is_file(download_file_path) else 0
+		total = get_download_size(url)
+		if initial < total:
+			with tqdm(total = total, initial = initial, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress:
+				subprocess.Popen([ 'curl', '--create-dirs', '--silent', '--insecure', '--location', '--continue-at', '-', '--output', download_file_path, url ])
+				current = initial
+				while current < total:
+					if is_file(download_file_path):
+						current = os.path.getsize(download_file_path)
+						progress.update(current - progress.n)
+
+
+@lru_cache(maxsize = None)
+def get_download_size(url : str) -> int:
+	try:
+		response = urllib.request.urlopen(url, timeout = 10)
+		return int(response.getheader('Content-Length'))
+	except (OSError, ValueError):
+		return 0
+
+
+def is_download_done(url : str, file_path : str) -> bool:
+	if is_file(file_path):
+		return get_download_size(url) == os.path.getsize(file_path)
+	return False
diff --git a/facefusion/execution_helper.py b/facefusion/execution_helper.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c66865a84c6dc8fa7893e6c2f099a62daaed85e
--- /dev/null
+++ b/facefusion/execution_helper.py
@@ -0,0 +1,22 @@
+from typing import List
+import onnxruntime
+
+
+def encode_execution_providers(execution_providers : List[str]) -> List[str]:
+	return [ execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers ]
+
+
+def decode_execution_providers(execution_providers: List[str]) -> List[str]:
+	available_execution_providers = onnxruntime.get_available_providers()
+	encoded_execution_providers = encode_execution_providers(available_execution_providers)
+	return [ execution_provider for execution_provider, encoded_execution_provider in zip(available_execution_providers, encoded_execution_providers) if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers) ]
+
+
+def map_device(execution_providers : List[str]) -> str:
+	if 'CoreMLExecutionProvider' in execution_providers:
+		return 'mps'
+	if 'CUDAExecutionProvider' in execution_providers or 'ROCMExecutionProvider' in execution_providers :
+		return 'cuda'
+	if 'OpenVINOExecutionProvider' in execution_providers:
+		return 'mkl'
+	return 'cpu'
diff --git a/facefusion/face_analyser.py b/facefusion/face_analyser.py
new file mode 100644
index 0000000000000000000000000000000000000000..06960e4eeca474ca12e47c3dbfc5cfe8d2b69dd6
--- /dev/null
+++ b/facefusion/face_analyser.py
@@ -0,0 +1,347 @@
+from typing import Any, Optional, List, Tuple
+import threading
+import cv2
+import numpy
+import onnxruntime
+
+import facefusion.globals
+from facefusion.download import conditional_download
+from facefusion.face_store import get_static_faces, set_static_faces
+from facefusion.face_helper import warp_face, create_static_anchors, distance_to_kps, distance_to_bbox, apply_nms
+from facefusion.filesystem import resolve_relative_path
+from facefusion.typing import Frame, Face, FaceSet, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, ModelSet, Bbox, Kps, Score, Embedding
+from facefusion.vision import resize_frame_dimension
+
+FACE_ANALYSER = None
+THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
+THREAD_LOCK : threading.Lock = threading.Lock()
+MODELS : ModelSet =\
+{
+	'face_detector_retinaface':
+	{
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/retinaface_10g.onnx',
+		'path': resolve_relative_path('../.assets/models/retinaface_10g.onnx')
+	},
+	'face_detector_yunet':
+	{
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/yunet_2023mar.onnx',
+		'path': resolve_relative_path('../.assets/models/yunet_2023mar.onnx')
+	},
+	'face_recognizer_arcface_blendswap':
+	{
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/arcface_w600k_r50.onnx',
+		'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx')
+	},
+	'face_recognizer_arcface_inswapper':
+	{
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/arcface_w600k_r50.onnx',
+		'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx')
+	},
+	'face_recognizer_arcface_simswap':
+	{
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/arcface_simswap.onnx',
+		'path': resolve_relative_path('../.assets/models/arcface_simswap.onnx')
+	},
+	'gender_age':
+	{
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gender_age.onnx',
+		'path': resolve_relative_path('../.assets/models/gender_age.onnx')
+	}
+}
+
+
+def get_face_analyser() -> Any:
+	global FACE_ANALYSER
+
+	with THREAD_LOCK:
+		if FACE_ANALYSER is None:
+			if facefusion.globals.face_detector_model == 'retinaface':
+				face_detector = onnxruntime.InferenceSession(MODELS.get('face_detector_retinaface').get('path'), providers = facefusion.globals.execution_providers)
+			if facefusion.globals.face_detector_model == 'yunet':
+				face_detector = cv2.FaceDetectorYN.create(MODELS.get('face_detector_yunet').get('path'), '', (0, 0))
+			if facefusion.globals.face_recognizer_model == 'arcface_blendswap':
+				face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_blendswap').get('path'), providers = facefusion.globals.execution_providers)
+			if facefusion.globals.face_recognizer_model == 'arcface_inswapper':
+				face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_inswapper').get('path'), providers = facefusion.globals.execution_providers)
+			if facefusion.globals.face_recognizer_model == 'arcface_simswap':
+				face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_simswap').get('path'), providers = facefusion.globals.execution_providers)
+			gender_age = onnxruntime.InferenceSession(MODELS.get('gender_age').get('path'), providers = facefusion.globals.execution_providers)
+			FACE_ANALYSER =\
+			{
+				'face_detector': face_detector,
+				'face_recognizer': face_recognizer,
+				'gender_age': gender_age
+			}
+	return FACE_ANALYSER
+
+
+def clear_face_analyser() -> Any:
+	global FACE_ANALYSER
+
+	FACE_ANALYSER = None
+
+
+def pre_check() -> bool:
+	if not facefusion.globals.skip_download:
+		download_directory_path = resolve_relative_path('../.assets/models')
+		model_urls =\
+		[
+			MODELS.get('face_detector_retinaface').get('url'),
+			MODELS.get('face_detector_yunet').get('url'),
+			MODELS.get('face_recognizer_arcface_inswapper').get('url'),
+			MODELS.get('face_recognizer_arcface_simswap').get('url'),
+			MODELS.get('gender_age').get('url')
+		]
+		conditional_download(download_directory_path, model_urls)
+	return True
+
+
+def extract_faces(frame: Frame) -> List[Face]:
+	face_detector_width, face_detector_height = map(int, facefusion.globals.face_detector_size.split('x'))
+	frame_height, frame_width, _ = frame.shape
+	temp_frame = resize_frame_dimension(frame, face_detector_width, face_detector_height)
+	temp_frame_height, temp_frame_width, _ = temp_frame.shape
+	ratio_height = frame_height / temp_frame_height
+	ratio_width = frame_width / temp_frame_width
+	if facefusion.globals.face_detector_model == 'retinaface':
+		bbox_list, kps_list, score_list = detect_with_retinaface(temp_frame, temp_frame_height, temp_frame_width, face_detector_height, face_detector_width, ratio_height, ratio_width)
+		return create_faces(frame, bbox_list, kps_list, score_list)
+	elif facefusion.globals.face_detector_model == 'yunet':
+		bbox_list, kps_list, score_list = detect_with_yunet(temp_frame, temp_frame_height, temp_frame_width, ratio_height, ratio_width)
+		return create_faces(frame, bbox_list, kps_list, score_list)
+	return []
+
+
+def detect_with_retinaface(temp_frame : Frame, temp_frame_height : int, temp_frame_width : int, face_detector_height : int, face_detector_width : int, ratio_height : float, ratio_width : float) -> Tuple[List[Bbox], List[Kps], List[Score]]:
+	face_detector = get_face_analyser().get('face_detector')
+	bbox_list = []
+	kps_list = []
+	score_list = []
+	feature_strides = [ 8, 16, 32 ]
+	feature_map_channel = 3
+	anchor_total = 2
+	prepare_frame = numpy.zeros((face_detector_height, face_detector_width, 3))
+	prepare_frame[:temp_frame_height, :temp_frame_width, :] = temp_frame
+	temp_frame = (prepare_frame - 127.5) / 128.0
+	temp_frame = numpy.expand_dims(temp_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
+	with THREAD_SEMAPHORE:
+		detections = face_detector.run(None,
+		{
+			face_detector.get_inputs()[0].name: temp_frame
+		})
+	for index, feature_stride in enumerate(feature_strides):
+		keep_indices = numpy.where(detections[index] >= facefusion.globals.face_detector_score)[0]
+		if keep_indices.any():
+			stride_height = face_detector_height // feature_stride
+			stride_width = face_detector_width // feature_stride
+			anchors = create_static_anchors(feature_stride, anchor_total, stride_height, stride_width)
+			bbox_raw = (detections[index + feature_map_channel] * feature_stride)
+			kps_raw = detections[index + feature_map_channel * 2] * feature_stride
+			for bbox in distance_to_bbox(anchors, bbox_raw)[keep_indices]:
+				bbox_list.append(numpy.array(
+				[
+					bbox[0] * ratio_width,
+					bbox[1] * ratio_height,
+					bbox[2] * ratio_width,
+					bbox[3] * ratio_height
+				]))
+			for kps in distance_to_kps(anchors, kps_raw)[keep_indices]:
+				kps_list.append(kps * [ ratio_width, ratio_height ])
+			for score in detections[index][keep_indices]:
+				score_list.append(score[0])
+	return bbox_list, kps_list, score_list
+
+
+def detect_with_yunet(temp_frame : Frame, temp_frame_height : int, temp_frame_width : int, ratio_height : float, ratio_width : float) -> Tuple[List[Bbox], List[Kps], List[Score]]:
+	face_detector = get_face_analyser().get('face_detector')
+	face_detector.setInputSize((temp_frame_width, temp_frame_height))
+	face_detector.setScoreThreshold(facefusion.globals.face_detector_score)
+	bbox_list = []
+	kps_list = []
+	score_list = []
+	with THREAD_SEMAPHORE:
+		_, detections = face_detector.detect(temp_frame)
+	if detections.any():
+		for detection in detections:
+			bbox_list.append(numpy.array(
+			[
+				detection[0] * ratio_width,
+				detection[1] * ratio_height,
+				(detection[0] + detection[2]) * ratio_width,
+				(detection[1] + detection[3]) * ratio_height
+			]))
+			kps_list.append(detection[4:14].reshape((5, 2)) * [ ratio_width, ratio_height])
+			score_list.append(detection[14])
+	return bbox_list, kps_list, score_list
+
+
+def create_faces(frame : Frame, bbox_list : List[Bbox], kps_list : List[Kps], score_list : List[Score]) -> List[Face]:
+	faces = []
+	if facefusion.globals.face_detector_score > 0:
+		sort_indices = numpy.argsort(-numpy.array(score_list))
+		bbox_list = [ bbox_list[index] for index in sort_indices ]
+		kps_list = [ kps_list[index] for index in sort_indices ]
+		score_list = [ score_list[index] for index in sort_indices ]
+		keep_indices = apply_nms(bbox_list, 0.4)
+		for index in keep_indices:
+			bbox = bbox_list[index]
+			kps = kps_list[index]
+			score = score_list[index]
+			embedding, normed_embedding = calc_embedding(frame, kps)
+			gender, age = detect_gender_age(frame, kps)
+			faces.append(Face(
+				bbox = bbox,
+				kps = kps,
+				score = score,
+				embedding = embedding,
+				normed_embedding = normed_embedding,
+				gender = gender,
+				age = age
+			))
+	return faces
+
+
+def calc_embedding(temp_frame : Frame, kps : Kps) -> Tuple[Embedding, Embedding]:
+	face_recognizer = get_face_analyser().get('face_recognizer')
+	crop_frame, matrix = warp_face(temp_frame, kps, 'arcface_112_v2', (112, 112))
+	crop_frame = crop_frame.astype(numpy.float32) / 127.5 - 1
+	crop_frame = crop_frame[:, :, ::-1].transpose(2, 0, 1)
+	crop_frame = numpy.expand_dims(crop_frame, axis = 0)
+	embedding = face_recognizer.run(None,
+	{
+		face_recognizer.get_inputs()[0].name: crop_frame
+	})[0]
+	embedding = embedding.ravel()
+	normed_embedding = embedding / numpy.linalg.norm(embedding)
+	return embedding, normed_embedding
+
+
+def detect_gender_age(frame : Frame, kps : Kps) -> Tuple[int, int]:
+	gender_age = get_face_analyser().get('gender_age')
+	crop_frame, affine_matrix = warp_face(frame, kps, 'arcface_112_v2', (96, 96))
+	crop_frame = numpy.expand_dims(crop_frame, axis = 0).transpose(0, 3, 1, 2).astype(numpy.float32)
+	prediction = gender_age.run(None,
+	{
+		gender_age.get_inputs()[0].name: crop_frame
+	})[0][0]
+	gender = int(numpy.argmax(prediction[:2]))
+	age = int(numpy.round(prediction[2] * 100))
+	return gender, age
+
+
+def get_one_face(frame : Frame, position : int = 0) -> Optional[Face]:
+	many_faces = get_many_faces(frame)
+	if many_faces:
+		try:
+			return many_faces[position]
+		except IndexError:
+			return many_faces[-1]
+	return None
+
+
+def get_average_face(frames : List[Frame], position : int = 0) -> Optional[Face]:
+	average_face = None
+	faces = []
+	embedding_list = []
+	normed_embedding_list = []
+	for frame in frames:
+		face = get_one_face(frame, position)
+		if face:
+			faces.append(face)
+			embedding_list.append(face.embedding)
+			normed_embedding_list.append(face.normed_embedding)
+	if faces:
+		average_face = Face(
+			bbox = faces[0].bbox,
+			kps = faces[0].kps,
+			score = faces[0].score,
+			embedding = numpy.mean(embedding_list, axis = 0),
+			normed_embedding = numpy.mean(normed_embedding_list, axis = 0),
+			gender = faces[0].gender,
+			age = faces[0].age
+		)
+	return average_face
+
+
+def get_many_faces(frame : Frame) -> List[Face]:
+	try:
+		faces_cache = get_static_faces(frame)
+		if faces_cache:
+			faces = faces_cache
+		else:
+			faces = extract_faces(frame)
+			set_static_faces(frame, faces)
+		if facefusion.globals.face_analyser_order:
+			faces = sort_by_order(faces, facefusion.globals.face_analyser_order)
+		if facefusion.globals.face_analyser_age:
+			faces = filter_by_age(faces, facefusion.globals.face_analyser_age)
+		if facefusion.globals.face_analyser_gender:
+			faces = filter_by_gender(faces, facefusion.globals.face_analyser_gender)
+		return faces
+	except (AttributeError, ValueError):
+		return []
+
+
+def find_similar_faces(frame : Frame, reference_faces : FaceSet, face_distance : float) -> List[Face]:
+	similar_faces : List[Face] = []
+	many_faces = get_many_faces(frame)
+
+	if reference_faces:
+		for reference_set in reference_faces:
+			if not similar_faces:
+				for reference_face in reference_faces[reference_set]:
+					for face in many_faces:
+						if compare_faces(face, reference_face, face_distance):
+							similar_faces.append(face)
+	return similar_faces
+
+
+def compare_faces(face : Face, reference_face : Face, face_distance : float) -> bool:
+	if hasattr(face, 'normed_embedding') and hasattr(reference_face, 'normed_embedding'):
+		current_face_distance = 1 - numpy.dot(face.normed_embedding, reference_face.normed_embedding)
+		return current_face_distance < face_distance
+	return False
+
+
+def sort_by_order(faces : List[Face], order : FaceAnalyserOrder) -> List[Face]:
+	if order == 'left-right':
+		return sorted(faces, key = lambda face: face.bbox[0])
+	if order == 'right-left':
+		return sorted(faces, key = lambda face: face.bbox[0], reverse = True)
+	if order == 'top-bottom':
+		return sorted(faces, key = lambda face: face.bbox[1])
+	if order == 'bottom-top':
+		return sorted(faces, key = lambda face: face.bbox[1], reverse = True)
+	if order == 'small-large':
+		return sorted(faces, key = lambda face: (face.bbox[2] - face.bbox[0]) * (face.bbox[3] - face.bbox[1]))
+	if order == 'large-small':
+		return sorted(faces, key = lambda face: (face.bbox[2] - face.bbox[0]) * (face.bbox[3] - face.bbox[1]), reverse = True)
+	if order == 'best-worst':
+		return sorted(faces, key = lambda face: face.score, reverse = True)
+	if order == 'worst-best':
+		return sorted(faces, key = lambda face: face.score)
+	return faces
+
+
+def filter_by_age(faces : List[Face], age : FaceAnalyserAge) -> List[Face]:
+	filter_faces = []
+	for face in faces:
+		if face.age < 13 and age == 'child':
+			filter_faces.append(face)
+		elif face.age < 19 and age == 'teen':
+			filter_faces.append(face)
+		elif face.age < 60 and age == 'adult':
+			filter_faces.append(face)
+		elif face.age > 59 and age == 'senior':
+			filter_faces.append(face)
+	return filter_faces
+
+
+def filter_by_gender(faces : List[Face], gender : FaceAnalyserGender) -> List[Face]:
+	filter_faces = []
+	for face in faces:
+		if face.gender == 0 and gender == 'female':
+			filter_faces.append(face)
+		if face.gender == 1 and gender == 'male':
+			filter_faces.append(face)
+	return filter_faces
diff --git a/facefusion/face_helper.py b/facefusion/face_helper.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce7940fd9a5ecfd6ca5aa32e9d749d2628c6fa80
--- /dev/null
+++ b/facefusion/face_helper.py
@@ -0,0 +1,111 @@
+from typing import Any, Dict, Tuple, List
+from cv2.typing import Size
+from functools import lru_cache
+import cv2
+import numpy
+
+from facefusion.typing import Bbox, Kps, Frame, Mask, Matrix, Template
+
+TEMPLATES : Dict[Template, numpy.ndarray[Any, Any]] =\
+{
+	'arcface_112_v1': numpy.array(
+	[
+		[ 39.7300, 51.1380 ],
+		[ 72.2700, 51.1380 ],
+		[ 56.0000, 68.4930 ],
+		[ 42.4630, 87.0100 ],
+		[ 69.5370, 87.0100 ]
+	]),
+	'arcface_112_v2': numpy.array(
+	[
+		[ 38.2946, 51.6963 ],
+		[ 73.5318, 51.5014 ],
+		[ 56.0252, 71.7366 ],
+		[ 41.5493, 92.3655 ],
+		[ 70.7299, 92.2041 ]
+	]),
+	'arcface_128_v2': numpy.array(
+	[
+		[ 46.2946, 51.6963 ],
+		[ 81.5318, 51.5014 ],
+		[ 64.0252, 71.7366 ],
+		[ 49.5493, 92.3655 ],
+		[ 78.7299, 92.2041 ]
+	]),
+	'ffhq_512': numpy.array(
+	[
+		[ 192.98138, 239.94708 ],
+		[ 318.90277, 240.1936 ],
+		[ 256.63416, 314.01935 ],
+		[ 201.26117, 371.41043 ],
+		[ 313.08905, 371.15118 ]
+	])
+}
+
+
+def warp_face(temp_frame : Frame, kps : Kps, template : Template, size : Size) -> Tuple[Frame, Matrix]:
+	normed_template = TEMPLATES.get(template) * size[1] / size[0]
+	affine_matrix = cv2.estimateAffinePartial2D(kps, normed_template, method = cv2.RANSAC, ransacReprojThreshold = 100)[0]
+	crop_frame = cv2.warpAffine(temp_frame, affine_matrix, (size[1], size[1]), borderMode = cv2.BORDER_REPLICATE)
+	return crop_frame, affine_matrix
+
+
+def paste_back(temp_frame : Frame, crop_frame: Frame, crop_mask : Mask, affine_matrix : Matrix) -> Frame:
+	inverse_matrix = cv2.invertAffineTransform(affine_matrix)
+	temp_frame_size = temp_frame.shape[:2][::-1]
+	inverse_crop_mask = cv2.warpAffine(crop_mask, inverse_matrix, temp_frame_size).clip(0, 1)
+	inverse_crop_frame = cv2.warpAffine(crop_frame, inverse_matrix, temp_frame_size, borderMode = cv2.BORDER_REPLICATE)
+	paste_frame = temp_frame.copy()
+	paste_frame[:, :, 0] = inverse_crop_mask * inverse_crop_frame[:, :, 0] + (1 - inverse_crop_mask) * temp_frame[:, :, 0]
+	paste_frame[:, :, 1] = inverse_crop_mask * inverse_crop_frame[:, :, 1] + (1 - inverse_crop_mask) * temp_frame[:, :, 1]
+	paste_frame[:, :, 2] = inverse_crop_mask * inverse_crop_frame[:, :, 2] + (1 - inverse_crop_mask) * temp_frame[:, :, 2]
+	return paste_frame
+
+
+@lru_cache(maxsize = None)
+def create_static_anchors(feature_stride : int, anchor_total : int, stride_height : int, stride_width : int) -> numpy.ndarray[Any, Any]:
+	y, x = numpy.mgrid[:stride_height, :stride_width][::-1]
+	anchors = numpy.stack((y, x), axis = -1)
+	anchors = (anchors * feature_stride).reshape((-1, 2))
+	anchors = numpy.stack([ anchors ] * anchor_total, axis = 1).reshape((-1, 2))
+	return anchors
+
+
+def distance_to_bbox(points : numpy.ndarray[Any, Any], distance : numpy.ndarray[Any, Any]) -> Bbox:
+	x1 = points[:, 0] - distance[:, 0]
+	y1 = points[:, 1] - distance[:, 1]
+	x2 = points[:, 0] + distance[:, 2]
+	y2 = points[:, 1] + distance[:, 3]
+	bbox = numpy.column_stack([ x1, y1, x2, y2 ])
+	return bbox
+
+
+def distance_to_kps(points : numpy.ndarray[Any, Any], distance : numpy.ndarray[Any, Any]) -> Kps:
+	x = points[:, 0::2] + distance[:, 0::2]
+	y = points[:, 1::2] + distance[:, 1::2]
+	kps = numpy.stack((x, y), axis = -1)
+	return kps
+
+
+def apply_nms(bbox_list : List[Bbox], iou_threshold : float) -> List[int]:
+	keep_indices = []
+	dimension_list = numpy.reshape(bbox_list, (-1, 4))
+	x1 = dimension_list[:, 0]
+	y1 = dimension_list[:, 1]
+	x2 = dimension_list[:, 2]
+	y2 = dimension_list[:, 3]
+	areas = (x2 - x1 + 1) * (y2 - y1 + 1)
+	indices = numpy.arange(len(bbox_list))
+	while indices.size > 0:
+		index = indices[0]
+		remain_indices = indices[1:]
+		keep_indices.append(index)
+		xx1 = numpy.maximum(x1[index], x1[remain_indices])
+		yy1 = numpy.maximum(y1[index], y1[remain_indices])
+		xx2 = numpy.minimum(x2[index], x2[remain_indices])
+		yy2 = numpy.minimum(y2[index], y2[remain_indices])
+		width = numpy.maximum(0, xx2 - xx1 + 1)
+		height = numpy.maximum(0, yy2 - yy1 + 1)
+		iou = width * height / (areas[index] + areas[remain_indices] - width * height)
+		indices = indices[numpy.where(iou <= iou_threshold)[0] + 1]
+	return keep_indices
diff --git a/facefusion/face_masker.py b/facefusion/face_masker.py
new file mode 100644
index 0000000000000000000000000000000000000000..96d877b760f7cfe0763ae2b2f50881b644452709
--- /dev/null
+++ b/facefusion/face_masker.py
@@ -0,0 +1,128 @@
+from typing import Any, Dict, List
+from cv2.typing import Size
+from functools import lru_cache
+import threading
+import cv2
+import numpy
+import onnxruntime
+
+import facefusion.globals
+from facefusion.typing import Frame, Mask, Padding, FaceMaskRegion, ModelSet
+from facefusion.filesystem import resolve_relative_path
+from facefusion.download import conditional_download
+
+FACE_OCCLUDER = None
+FACE_PARSER = None
+THREAD_LOCK : threading.Lock = threading.Lock()
+MODELS : ModelSet =\
+{
+	'face_occluder':
+	{
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/face_occluder.onnx',
+		'path': resolve_relative_path('../.assets/models/face_occluder.onnx')
+	},
+	'face_parser':
+	{
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/face_parser.onnx',
+		'path': resolve_relative_path('../.assets/models/face_parser.onnx')
+	}
+}
+FACE_MASK_REGIONS : Dict[FaceMaskRegion, int] =\
+{
+	'skin': 1,
+	'left-eyebrow': 2,
+	'right-eyebrow': 3,
+	'left-eye': 4,
+	'right-eye': 5,
+	'eye-glasses': 6,
+	'nose': 10,
+	'mouth': 11,
+	'upper-lip': 12,
+	'lower-lip': 13
+}
+
+
+def get_face_occluder() -> Any:
+	global FACE_OCCLUDER
+
+	with THREAD_LOCK:
+		if FACE_OCCLUDER is None:
+			model_path = MODELS.get('face_occluder').get('path')
+			FACE_OCCLUDER = onnxruntime.InferenceSession(model_path, providers = facefusion.globals.execution_providers)
+	return FACE_OCCLUDER
+
+
+def get_face_parser() -> Any:
+	global FACE_PARSER
+
+	with THREAD_LOCK:
+		if FACE_PARSER is None:
+			model_path = MODELS.get('face_parser').get('path')
+			FACE_PARSER = onnxruntime.InferenceSession(model_path, providers = facefusion.globals.execution_providers)
+	return FACE_PARSER
+
+
+def clear_face_occluder() -> None:
+	global FACE_OCCLUDER
+
+	FACE_OCCLUDER = None
+
+
+def clear_face_parser() -> None:
+	global FACE_PARSER
+
+	FACE_PARSER = None
+
+
+def pre_check() -> bool:
+	if not facefusion.globals.skip_download:
+		download_directory_path = resolve_relative_path('../.assets/models')
+		model_urls =\
+		[
+			MODELS.get('face_occluder').get('url'),
+			MODELS.get('face_parser').get('url'),
+		]
+		conditional_download(download_directory_path, model_urls)
+	return True
+
+
+@lru_cache(maxsize = None)
+def create_static_box_mask(crop_size : Size, face_mask_blur : float, face_mask_padding : Padding) -> Mask:
+	blur_amount = int(crop_size[0] * 0.5 * face_mask_blur)
+	blur_area = max(blur_amount // 2, 1)
+	box_mask = numpy.ones(crop_size, numpy.float32)
+	box_mask[:max(blur_area, int(crop_size[1] * face_mask_padding[0] / 100)), :] = 0
+	box_mask[-max(blur_area, int(crop_size[1] * face_mask_padding[2] / 100)):, :] = 0
+	box_mask[:, :max(blur_area, int(crop_size[0] * face_mask_padding[3] / 100))] = 0
+	box_mask[:, -max(blur_area, int(crop_size[0] * face_mask_padding[1] / 100)):] = 0
+	if blur_amount > 0:
+		box_mask = cv2.GaussianBlur(box_mask, (0, 0), blur_amount * 0.25)
+	return box_mask
+
+
+def create_occlusion_mask(crop_frame : Frame) -> Mask:
+	face_occluder = get_face_occluder()
+	prepare_frame = cv2.resize(crop_frame, face_occluder.get_inputs()[0].shape[1:3][::-1])
+	prepare_frame = numpy.expand_dims(prepare_frame, axis = 0).astype(numpy.float32) / 255
+	prepare_frame = prepare_frame.transpose(0, 1, 2, 3)
+	occlusion_mask = face_occluder.run(None,
+	{
+		face_occluder.get_inputs()[0].name: prepare_frame
+	})[0][0]
+	occlusion_mask = occlusion_mask.transpose(0, 1, 2).clip(0, 1).astype(numpy.float32)
+	occlusion_mask = cv2.resize(occlusion_mask, crop_frame.shape[:2][::-1])
+	return occlusion_mask
+
+
+def create_region_mask(crop_frame : Frame, face_mask_regions : List[FaceMaskRegion]) -> Mask:
+	face_parser = get_face_parser()
+	prepare_frame = cv2.flip(cv2.resize(crop_frame, (512, 512)), 1)
+	prepare_frame = numpy.expand_dims(prepare_frame, axis = 0).astype(numpy.float32)[:, :, ::-1] / 127.5 - 1
+	prepare_frame = prepare_frame.transpose(0, 3, 1, 2)
+	region_mask = face_parser.run(None,
+	{
+		face_parser.get_inputs()[0].name: prepare_frame
+	})[0][0]
+	region_mask = numpy.isin(region_mask.argmax(0), [ FACE_MASK_REGIONS[region] for region in face_mask_regions ])
+	region_mask = cv2.resize(region_mask.astype(numpy.float32), crop_frame.shape[:2][::-1])
+	return region_mask
diff --git a/facefusion/face_store.py b/facefusion/face_store.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f0dfa4dc44f5fbc6ab663cedadacec4b2f951f5
--- /dev/null
+++ b/facefusion/face_store.py
@@ -0,0 +1,47 @@
+from typing import Optional, List
+import hashlib
+
+from facefusion.typing import Frame, Face, FaceStore, FaceSet
+
+FACE_STORE: FaceStore =\
+{
+	'static_faces': {},
+	'reference_faces': {}
+}
+
+
+def get_static_faces(frame : Frame) -> Optional[List[Face]]:
+	frame_hash = create_frame_hash(frame)
+	if frame_hash in FACE_STORE['static_faces']:
+		return FACE_STORE['static_faces'][frame_hash]
+	return None
+
+
+def set_static_faces(frame : Frame, faces : List[Face]) -> None:
+	frame_hash = create_frame_hash(frame)
+	if frame_hash:
+		FACE_STORE['static_faces'][frame_hash] = faces
+
+
+def clear_static_faces() -> None:
+	FACE_STORE['static_faces'] = {}
+
+
+def create_frame_hash(frame: Frame) -> Optional[str]:
+	return hashlib.sha1(frame.tobytes()).hexdigest() if frame.any() else None
+
+
+def get_reference_faces() -> Optional[FaceSet]:
+	if FACE_STORE['reference_faces']:
+		return FACE_STORE['reference_faces']
+	return None
+
+
+def append_reference_face(name : str, face : Face) -> None:
+	if name not in FACE_STORE['reference_faces']:
+		FACE_STORE['reference_faces'][name] = []
+	FACE_STORE['reference_faces'][name].append(face)
+
+
+def clear_reference_faces() -> None:
+	FACE_STORE['reference_faces'] = {}
diff --git a/facefusion/ffmpeg.py b/facefusion/ffmpeg.py
new file mode 100644
index 0000000000000000000000000000000000000000..4cbb38e862f99c779563094b1553cd820ca9962a
--- /dev/null
+++ b/facefusion/ffmpeg.py
@@ -0,0 +1,81 @@
+from typing import List
+import subprocess
+
+import facefusion.globals
+from facefusion import logger
+from facefusion.filesystem import get_temp_frames_pattern, get_temp_output_video_path
+from facefusion.vision import detect_fps
+
+
+def run_ffmpeg(args : List[str]) -> bool:
+	commands = [ 'ffmpeg', '-hide_banner', '-loglevel', 'error' ]
+	commands.extend(args)
+	try:
+		subprocess.run(commands, stderr = subprocess.PIPE, check = True)
+		return True
+	except subprocess.CalledProcessError as exception:
+		logger.debug(exception.stderr.decode().strip(), __name__.upper())
+		return False
+
+
+def open_ffmpeg(args : List[str]) -> subprocess.Popen[bytes]:
+	commands = [ 'ffmpeg', '-hide_banner', '-loglevel', 'error' ]
+	commands.extend(args)
+	return subprocess.Popen(commands, stdin = subprocess.PIPE)
+
+
+def extract_frames(target_path : str, fps : float) -> bool:
+	temp_frame_compression = round(31 - (facefusion.globals.temp_frame_quality * 0.31))
+	trim_frame_start = facefusion.globals.trim_frame_start
+	trim_frame_end = facefusion.globals.trim_frame_end
+	temp_frames_pattern = get_temp_frames_pattern(target_path, '%04d')
+	commands = [ '-hwaccel', 'auto', '-i', target_path, '-q:v', str(temp_frame_compression), '-pix_fmt', 'rgb24' ]
+	if trim_frame_start is not None and trim_frame_end is not None:
+		commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ':end_frame=' + str(trim_frame_end) + ',fps=' + str(fps) ])
+	elif trim_frame_start is not None:
+		commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ',fps=' + str(fps) ])
+	elif trim_frame_end is not None:
+		commands.extend([ '-vf', 'trim=end_frame=' + str(trim_frame_end) + ',fps=' + str(fps) ])
+	else:
+		commands.extend([ '-vf', 'fps=' + str(fps) ])
+	commands.extend([ '-vsync', '0', temp_frames_pattern ])
+	return run_ffmpeg(commands)
+
+
+def compress_image(output_path : str) -> bool:
+	output_image_compression = round(31 - (facefusion.globals.output_image_quality * 0.31))
+	commands = [ '-hwaccel', 'auto', '-i', output_path, '-q:v', str(output_image_compression), '-y', output_path ]
+	return run_ffmpeg(commands)
+
+
+def merge_video(target_path : str, fps : float) -> bool:
+	temp_output_video_path = get_temp_output_video_path(target_path)
+	temp_frames_pattern = get_temp_frames_pattern(target_path, '%04d')
+	commands = [ '-hwaccel', 'auto', '-r', str(fps), '-i', temp_frames_pattern, '-c:v', facefusion.globals.output_video_encoder ]
+	if facefusion.globals.output_video_encoder in [ 'libx264', 'libx265' ]:
+		output_video_compression = round(51 - (facefusion.globals.output_video_quality * 0.51))
+		commands.extend([ '-crf', str(output_video_compression) ])
+	if facefusion.globals.output_video_encoder in [ 'libvpx-vp9' ]:
+		output_video_compression = round(63 - (facefusion.globals.output_video_quality * 0.63))
+		commands.extend([ '-crf', str(output_video_compression) ])
+	if facefusion.globals.output_video_encoder in [ 'h264_nvenc', 'hevc_nvenc' ]:
+		output_video_compression = round(51 - (facefusion.globals.output_video_quality * 0.51))
+		commands.extend([ '-cq', str(output_video_compression) ])
+	commands.extend([ '-pix_fmt', 'yuv420p', '-colorspace', 'bt709', '-y', temp_output_video_path ])
+	return run_ffmpeg(commands)
+
+
+def restore_audio(target_path : str, output_path : str) -> bool:
+	fps = detect_fps(target_path)
+	trim_frame_start = facefusion.globals.trim_frame_start
+	trim_frame_end = facefusion.globals.trim_frame_end
+	temp_output_video_path = get_temp_output_video_path(target_path)
+	commands = [ '-hwaccel', 'auto', '-i', temp_output_video_path ]
+	if trim_frame_start is not None:
+		start_time = trim_frame_start / fps
+		commands.extend([ '-ss', str(start_time) ])
+	if trim_frame_end is not None:
+		end_time = trim_frame_end / fps
+		commands.extend([ '-to', str(end_time) ])
+	commands.extend([ '-i', target_path, '-c', 'copy', '-map', '0:v:0', '-map', '1:a:0', '-shortest', '-y', output_path ])
+	return run_ffmpeg(commands)
diff --git a/facefusion/filesystem.py b/facefusion/filesystem.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce9728195f594d177c33f1b31e907f0b1a47115a
--- /dev/null
+++ b/facefusion/filesystem.py
@@ -0,0 +1,91 @@
+from typing import List, Optional
+import glob
+import os
+import shutil
+import tempfile
+import filetype
+from pathlib import Path
+
+import facefusion.globals
+
+TEMP_DIRECTORY_PATH = os.path.join(tempfile.gettempdir(), 'facefusion')
+TEMP_OUTPUT_VIDEO_NAME = 'temp.mp4'
+
+
+def get_temp_frame_paths(target_path : str) -> List[str]:
+	temp_frames_pattern = get_temp_frames_pattern(target_path, '*')
+	return sorted(glob.glob(temp_frames_pattern))
+
+
+def get_temp_frames_pattern(target_path : str, temp_frame_prefix : str) -> str:
+	temp_directory_path = get_temp_directory_path(target_path)
+	return os.path.join(temp_directory_path, temp_frame_prefix + '.' + facefusion.globals.temp_frame_format)
+
+
+def get_temp_directory_path(target_path : str) -> str:
+	target_name, _ = os.path.splitext(os.path.basename(target_path))
+	return os.path.join(TEMP_DIRECTORY_PATH, target_name)
+
+
+def get_temp_output_video_path(target_path : str) -> str:
+	temp_directory_path = get_temp_directory_path(target_path)
+	return os.path.join(temp_directory_path, TEMP_OUTPUT_VIDEO_NAME)
+
+
+def create_temp(target_path : str) -> None:
+	temp_directory_path = get_temp_directory_path(target_path)
+	Path(temp_directory_path).mkdir(parents = True, exist_ok = True)
+
+
+def move_temp(target_path : str, output_path : str) -> None:
+	temp_output_video_path = get_temp_output_video_path(target_path)
+	if is_file(temp_output_video_path):
+		if is_file(output_path):
+			os.remove(output_path)
+		shutil.move(temp_output_video_path, output_path)
+
+
+def clear_temp(target_path : str) -> None:
+	temp_directory_path = get_temp_directory_path(target_path)
+	parent_directory_path = os.path.dirname(temp_directory_path)
+	if not facefusion.globals.keep_temp and is_directory(temp_directory_path):
+		shutil.rmtree(temp_directory_path)
+	if os.path.exists(parent_directory_path) and not os.listdir(parent_directory_path):
+		os.rmdir(parent_directory_path)
+
+
+def is_file(file_path : str) -> bool:
+	return bool(file_path and os.path.isfile(file_path))
+
+
+def is_directory(directory_path : str) -> bool:
+	return bool(directory_path and os.path.isdir(directory_path))
+
+
+def is_image(image_path : str) -> bool:
+	if is_file(image_path):
+		return filetype.helpers.is_image(image_path)
+	return False
+
+
+def are_images(image_paths : List[str]) -> bool:
+	if image_paths:
+		return all(is_image(image_path) for image_path in image_paths)
+	return False
+
+
+def is_video(video_path : str) -> bool:
+	if is_file(video_path):
+		return filetype.helpers.is_video(video_path)
+	return False
+
+
+def resolve_relative_path(path : str) -> str:
+	return os.path.abspath(os.path.join(os.path.dirname(__file__), path))
+
+
+def list_module_names(path : str) -> Optional[List[str]]:
+	if os.path.exists(path):
+		files = os.listdir(path)
+		return [ Path(file).stem for file in files if not Path(file).stem.startswith(('.', '__')) ]
+	return None
diff --git a/facefusion/globals.py b/facefusion/globals.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe7aed3f57e0ce1eb3c101c9140a4f58c43fdcd9
--- /dev/null
+++ b/facefusion/globals.py
@@ -0,0 +1,51 @@
+from typing import List, Optional
+
+from facefusion.typing import LogLevel, FaceSelectorMode, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, FaceMaskType, FaceMaskRegion, OutputVideoEncoder, FaceDetectorModel, FaceRecognizerModel, TempFrameFormat, Padding
+
+# general
+source_paths : Optional[List[str]] = None
+target_path : Optional[str] = None
+output_path : Optional[str] = None
+# misc
+skip_download : Optional[bool] = None
+headless : Optional[bool] = None
+log_level : Optional[LogLevel] = None
+# execution
+execution_providers : List[str] = []
+execution_thread_count : Optional[int] = None
+execution_queue_count : Optional[int] = None
+max_memory : Optional[int] = None
+# face analyser
+face_analyser_order : Optional[FaceAnalyserOrder] = None
+face_analyser_age : Optional[FaceAnalyserAge] = None
+face_analyser_gender : Optional[FaceAnalyserGender] = None
+face_detector_model : Optional[FaceDetectorModel] = None
+face_detector_size : Optional[str] = None
+face_detector_score : Optional[float] = None
+face_recognizer_model : Optional[FaceRecognizerModel] = None
+# face selector
+face_selector_mode : Optional[FaceSelectorMode] = None
+reference_face_position : Optional[int] = None
+reference_face_distance : Optional[float] = None
+reference_frame_number : Optional[int] = None
+# face mask
+face_mask_types : Optional[List[FaceMaskType]] = None
+face_mask_blur : Optional[float] = None
+face_mask_padding : Optional[Padding] = None
+face_mask_regions : Optional[List[FaceMaskRegion]] = None
+# frame extraction
+trim_frame_start : Optional[int] = None
+trim_frame_end : Optional[int] = None
+temp_frame_format : Optional[TempFrameFormat] = None
+temp_frame_quality : Optional[int] = None
+keep_temp : Optional[bool] = None
+# output creation
+output_image_quality : Optional[int] = None
+output_video_encoder : Optional[OutputVideoEncoder] = None
+output_video_quality : Optional[int] = None
+keep_fps : Optional[bool] = None
+skip_audio : Optional[bool] = None
+# frame processors
+frame_processors : List[str] = []
+# uis
+ui_layouts : List[str] = []
diff --git a/facefusion/installer.py b/facefusion/installer.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b1d56349fb74dccb41352202ea2e5a6e0060432
--- /dev/null
+++ b/facefusion/installer.py
@@ -0,0 +1,92 @@
+from typing import Dict, Tuple
+import sys
+import os
+import platform
+import tempfile
+import subprocess
+from argparse import ArgumentParser, HelpFormatter
+
+subprocess.call([ 'pip', 'install' , 'inquirer', '-q' ])
+
+import inquirer
+
+from facefusion import metadata, wording
+
+TORCH : Dict[str, str] =\
+{
+	'default': 'default',
+	'cpu': 'cpu'
+}
+ONNXRUNTIMES : Dict[str, Tuple[str, str]] =\
+{
+	'default': ('onnxruntime', '1.16.3')
+}
+if platform.system().lower() == 'linux' or platform.system().lower() == 'windows':
+	TORCH['cuda'] = 'cu118'
+	TORCH['cuda-nightly'] = 'cu121'
+	ONNXRUNTIMES['cuda'] = ('onnxruntime-gpu', '1.16.3')
+	ONNXRUNTIMES['cuda-nightly'] = ('ort-nightly-gpu', '1.17.0.dev20231205004')
+	ONNXRUNTIMES['openvino'] = ('onnxruntime-openvino', '1.16.0')
+if platform.system().lower() == 'linux':
+	TORCH['rocm'] = 'rocm5.6'
+	ONNXRUNTIMES['rocm'] = ('onnxruntime-rocm', '1.16.3')
+if platform.system().lower() == 'darwin':
+	ONNXRUNTIMES['coreml-legacy'] = ('onnxruntime-coreml', '1.13.1')
+	ONNXRUNTIMES['coreml-silicon'] = ('onnxruntime-silicon', '1.16.0')
+if platform.system().lower() == 'windows':
+	ONNXRUNTIMES['directml'] = ('onnxruntime-directml', '1.16.3')
+
+
+def cli() -> None:
+	program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120))
+	program.add_argument('--torch', help = wording.get('install_dependency_help').format(dependency = 'torch'), choices = TORCH.keys())
+	program.add_argument('--onnxruntime', help = wording.get('install_dependency_help').format(dependency = 'onnxruntime'), choices = ONNXRUNTIMES.keys())
+	program.add_argument('--skip-venv', help = wording.get('skip_venv_help'), action = 'store_true')
+	program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
+	run(program)
+
+
+def run(program : ArgumentParser) -> None:
+	args = program.parse_args()
+	python_id = 'cp' + str(sys.version_info.major) + str(sys.version_info.minor)
+
+	if not args.skip_venv:
+		os.environ['PIP_REQUIRE_VIRTUALENV'] = '1'
+	if args.torch and args.onnxruntime:
+		answers =\
+		{
+			'torch': args.torch,
+			'onnxruntime': args.onnxruntime
+		}
+	else:
+		answers = inquirer.prompt(
+		[
+			inquirer.List('torch', message = wording.get('install_dependency_help').format(dependency = 'torch'), choices = list(TORCH.keys())),
+			inquirer.List('onnxruntime', message = wording.get('install_dependency_help').format(dependency = 'onnxruntime'), choices = list(ONNXRUNTIMES.keys()))
+		])
+	if answers:
+		torch = answers['torch']
+		torch_wheel = TORCH[torch]
+		onnxruntime = answers['onnxruntime']
+		onnxruntime_name, onnxruntime_version = ONNXRUNTIMES[onnxruntime]
+
+		subprocess.call([ 'pip', 'uninstall', 'torch', '-y', '-q' ])
+		if torch_wheel == 'default':
+			subprocess.call([ 'pip', 'install', '-r', 'requirements.txt' ])
+		else:
+			subprocess.call([ 'pip', 'install', '-r', 'requirements.txt', '--extra-index-url', 'https://download.pytorch.org/whl/' + torch_wheel ])
+		if onnxruntime == 'rocm':
+			if python_id in [ 'cp39', 'cp310', 'cp311' ]:
+				wheel_name = 'onnxruntime_training-' + onnxruntime_version + '+rocm56-' + python_id + '-' + python_id + '-manylinux_2_17_x86_64.manylinux2014_x86_64.whl'
+				wheel_path = os.path.join(tempfile.gettempdir(), wheel_name)
+				wheel_url = 'https://download.onnxruntime.ai/' + wheel_name
+				subprocess.call([ 'curl', '--silent', '--location', '--continue-at', '-', '--output', wheel_path, wheel_url ])
+				subprocess.call([ 'pip', 'uninstall', wheel_path, '-y', '-q' ])
+				subprocess.call([ 'pip', 'install', wheel_path ])
+				os.remove(wheel_path)
+		else:
+			subprocess.call([ 'pip', 'uninstall', 'onnxruntime', onnxruntime_name, '-y', '-q' ])
+			if onnxruntime == 'cuda-nightly':
+				subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version, '--extra-index-url', 'https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/ort-cuda-12-nightly/pypi/simple' ])
+			else:
+				subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version ])
diff --git a/facefusion/logger.py b/facefusion/logger.py
new file mode 100644
index 0000000000000000000000000000000000000000..56fe367982c5a9b96a9e85f38e5cfc66384132f8
--- /dev/null
+++ b/facefusion/logger.py
@@ -0,0 +1,39 @@
+from typing import Dict
+from logging import basicConfig, getLogger, Logger, DEBUG, INFO, WARNING, ERROR
+
+from facefusion.typing import LogLevel
+
+
+def init(log_level : LogLevel) -> None:
+	basicConfig(format = None)
+	get_package_logger().setLevel(get_log_levels()[log_level])
+
+
+def get_package_logger() -> Logger:
+	return getLogger('facefusion')
+
+
+def debug(message : str, scope : str) -> None:
+	get_package_logger().debug('[' + scope + '] ' + message)
+
+
+def info(message : str, scope : str) -> None:
+	get_package_logger().info('[' + scope + '] ' + message)
+
+
+def warn(message : str, scope : str) -> None:
+	get_package_logger().warning('[' + scope + '] ' + message)
+
+
+def error(message : str, scope : str) -> None:
+	get_package_logger().error('[' + scope + '] ' + message)
+
+
+def get_log_levels() -> Dict[LogLevel, int]:
+	return\
+	{
+		'error': ERROR,
+		'warn': WARNING,
+		'info': INFO,
+		'debug': DEBUG
+	}
diff --git a/facefusion/metadata.py b/facefusion/metadata.py
new file mode 100644
index 0000000000000000000000000000000000000000..181bc418f4e1acfa803dfe49c1706060697d147b
--- /dev/null
+++ b/facefusion/metadata.py
@@ -0,0 +1,13 @@
+METADATA =\
+{
+	'name': 'FaceFusion',
+	'description': 'Next generation face swapper and enhancer',
+	'version': '2.1.3',
+	'license': 'MIT',
+	'author': 'Henry Ruhs',
+	'url': 'https://facefusion.io'
+}
+
+
+def get(key : str) -> str:
+	return METADATA[key]
diff --git a/facefusion/normalizer.py b/facefusion/normalizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..eee93e57d836480a22ce8d604e2cf99a80451a6f
--- /dev/null
+++ b/facefusion/normalizer.py
@@ -0,0 +1,34 @@
+from typing import List, Optional
+import os
+
+from facefusion.filesystem import is_file, is_directory
+from facefusion.typing import Padding
+
+
+def normalize_output_path(source_paths : List[str], target_path : str, output_path : str) -> Optional[str]:
+	if is_file(target_path) and is_directory(output_path):
+		target_name, target_extension = os.path.splitext(os.path.basename(target_path))
+		if source_paths and is_file(source_paths[0]):
+			source_name, _ = os.path.splitext(os.path.basename(source_paths[0]))
+			return os.path.join(output_path, source_name + '-' + target_name + target_extension)
+		return os.path.join(output_path, target_name + target_extension)
+	if is_file(target_path) and output_path:
+		_, target_extension = os.path.splitext(os.path.basename(target_path))
+		output_name, output_extension = os.path.splitext(os.path.basename(output_path))
+		output_directory_path = os.path.dirname(output_path)
+		if is_directory(output_directory_path) and output_extension:
+			return os.path.join(output_directory_path, output_name + target_extension)
+		return None
+	return output_path
+
+
+def normalize_padding(padding : Optional[List[int]]) -> Optional[Padding]:
+	if padding and len(padding) == 1:
+		return tuple([ padding[0], padding[0], padding[0], padding[0] ]) # type: ignore[return-value]
+	if padding and len(padding) == 2:
+		return tuple([ padding[0], padding[1], padding[0], padding[1] ]) # type: ignore[return-value]
+	if padding and len(padding) == 3:
+		return tuple([ padding[0], padding[1], padding[2], padding[1] ]) # type: ignore[return-value]
+	if padding and len(padding) == 4:
+		return tuple(padding) # type: ignore[return-value]
+	return None
diff --git a/facefusion/processors/__init__.py b/facefusion/processors/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/facefusion/processors/frame/__init__.py b/facefusion/processors/frame/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/facefusion/processors/frame/choices.py b/facefusion/processors/frame/choices.py
new file mode 100644
index 0000000000000000000000000000000000000000..64e35c4731892350e3544b80d478f5f774048211
--- /dev/null
+++ b/facefusion/processors/frame/choices.py
@@ -0,0 +1,13 @@
+from typing import List
+import numpy
+
+from facefusion.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem
+
+face_swapper_models : List[FaceSwapperModel] = [ 'blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial' ]
+face_enhancer_models : List[FaceEnhancerModel] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'restoreformer' ]
+frame_enhancer_models : List[FrameEnhancerModel] = [ 'real_esrgan_x2plus', 'real_esrgan_x4plus', 'real_esrnet_x4plus' ]
+
+face_enhancer_blend_range : List[int] = numpy.arange(0, 101, 1).tolist()
+frame_enhancer_blend_range : List[int] = numpy.arange(0, 101, 1).tolist()
+
+face_debugger_items : List[FaceDebuggerItem] = [ 'bbox', 'kps', 'face-mask', 'score' ]
diff --git a/facefusion/processors/frame/core.py b/facefusion/processors/frame/core.py
new file mode 100644
index 0000000000000000000000000000000000000000..fdbbe883ed3c1878b155d7b3c4624ae34a69886a
--- /dev/null
+++ b/facefusion/processors/frame/core.py
@@ -0,0 +1,98 @@
+import sys
+import importlib
+from concurrent.futures import ThreadPoolExecutor, as_completed
+from queue import Queue
+from types import ModuleType
+from typing import Any, List
+from tqdm import tqdm
+
+import facefusion.globals
+from facefusion.typing import Process_Frames
+from facefusion.execution_helper import encode_execution_providers
+from facefusion import logger, wording
+
+FRAME_PROCESSORS_MODULES : List[ModuleType] = []
+FRAME_PROCESSORS_METHODS =\
+[
+	'get_frame_processor',
+	'clear_frame_processor',
+	'get_options',
+	'set_options',
+	'register_args',
+	'apply_args',
+	'pre_check',
+	'pre_process',
+	'get_reference_frame',
+	'process_frame',
+	'process_frames',
+	'process_image',
+	'process_video',
+	'post_process'
+]
+
+
+def load_frame_processor_module(frame_processor : str) -> Any:
+	try:
+		frame_processor_module = importlib.import_module('facefusion.processors.frame.modules.' + frame_processor)
+		for method_name in FRAME_PROCESSORS_METHODS:
+			if not hasattr(frame_processor_module, method_name):
+				raise NotImplementedError
+	except ModuleNotFoundError as exception:
+		logger.debug(exception.msg, __name__.upper())
+		sys.exit(wording.get('frame_processor_not_loaded').format(frame_processor = frame_processor))
+	except NotImplementedError:
+		sys.exit(wording.get('frame_processor_not_implemented').format(frame_processor = frame_processor))
+	return frame_processor_module
+
+
+def get_frame_processors_modules(frame_processors : List[str]) -> List[ModuleType]:
+	global FRAME_PROCESSORS_MODULES
+
+	if not FRAME_PROCESSORS_MODULES:
+		for frame_processor in frame_processors:
+			frame_processor_module = load_frame_processor_module(frame_processor)
+			FRAME_PROCESSORS_MODULES.append(frame_processor_module)
+	return FRAME_PROCESSORS_MODULES
+
+
+def clear_frame_processors_modules() -> None:
+	global FRAME_PROCESSORS_MODULES
+
+	for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
+		frame_processor_module.clear_frame_processor()
+	FRAME_PROCESSORS_MODULES = []
+
+
+def multi_process_frames(source_paths : List[str], temp_frame_paths : List[str], process_frames : Process_Frames) -> None:
+	with tqdm(total = len(temp_frame_paths), desc = wording.get('processing'), unit = 'frame', ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress:
+		progress.set_postfix(
+		{
+			'execution_providers': encode_execution_providers(facefusion.globals.execution_providers),
+			'execution_thread_count': facefusion.globals.execution_thread_count,
+			'execution_queue_count': facefusion.globals.execution_queue_count
+		})
+		with ThreadPoolExecutor(max_workers = facefusion.globals.execution_thread_count) as executor:
+			futures = []
+			queue_temp_frame_paths : Queue[str] = create_queue(temp_frame_paths)
+			queue_per_future = max(len(temp_frame_paths) // facefusion.globals.execution_thread_count * facefusion.globals.execution_queue_count, 1)
+			while not queue_temp_frame_paths.empty():
+				payload_temp_frame_paths = pick_queue(queue_temp_frame_paths, queue_per_future)
+				future = executor.submit(process_frames, source_paths, payload_temp_frame_paths, progress.update)
+				futures.append(future)
+			for future_done in as_completed(futures):
+				future_done.result()
+
+
+def create_queue(temp_frame_paths : List[str]) -> Queue[str]:
+	queue : Queue[str] = Queue()
+	for frame_path in temp_frame_paths:
+		queue.put(frame_path)
+	return queue
+
+
+def pick_queue(queue : Queue[str], queue_per_future : int) -> List[str]:
+	queues = []
+	for _ in range(queue_per_future):
+		if not queue.empty():
+			queues.append(queue.get())
+	return queues
diff --git a/facefusion/processors/frame/globals.py b/facefusion/processors/frame/globals.py
new file mode 100644
index 0000000000000000000000000000000000000000..526b85732dadd0b8b8eb1ad04454b75968854fc7
--- /dev/null
+++ b/facefusion/processors/frame/globals.py
@@ -0,0 +1,10 @@
+from typing import List, Optional
+
+from facefusion.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem
+
+face_swapper_model : Optional[FaceSwapperModel] = None
+face_enhancer_model : Optional[FaceEnhancerModel] = None
+face_enhancer_blend : Optional[int] = None
+frame_enhancer_model : Optional[FrameEnhancerModel] = None
+frame_enhancer_blend : Optional[int] = None
+face_debugger_items : Optional[List[FaceDebuggerItem]] = None
diff --git a/facefusion/processors/frame/modules/__init__.py b/facefusion/processors/frame/modules/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/facefusion/processors/frame/modules/face_debugger.py b/facefusion/processors/frame/modules/face_debugger.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ec4e9e42f432a63573dbc1bdb4faa9ebe84fb65
--- /dev/null
+++ b/facefusion/processors/frame/modules/face_debugger.py
@@ -0,0 +1,142 @@
+from typing import Any, List, Literal
+from argparse import ArgumentParser
+import cv2
+import numpy
+
+import facefusion.globals
+import facefusion.processors.frame.core as frame_processors
+from facefusion import wording
+from facefusion.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser
+from facefusion.face_store import get_reference_faces
+from facefusion.content_analyser import clear_content_analyser
+from facefusion.typing import Face, FaceSet, Frame, Update_Process, ProcessMode
+from facefusion.vision import read_image, read_static_image, read_static_images, write_image
+from facefusion.face_helper import warp_face
+from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser
+from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices
+
+NAME = __name__.upper()
+
+
+def get_frame_processor() -> None:
+	pass
+
+
+def clear_frame_processor() -> None:
+	pass
+
+
+def get_options(key : Literal['model']) -> None:
+	pass
+
+
+def set_options(key : Literal['model'], value : Any) -> None:
+	pass
+
+
+def register_args(program : ArgumentParser) -> None:
+	program.add_argument('--face-debugger-items', help = wording.get('face_debugger_items_help').format(choices = ', '.join(frame_processors_choices.face_debugger_items)), default = [ 'kps', 'face-mask' ], choices = frame_processors_choices.face_debugger_items, nargs = '+', metavar = 'FACE_DEBUGGER_ITEMS')
+
+
+def apply_args(program : ArgumentParser) -> None:
+	args = program.parse_args()
+	frame_processors_globals.face_debugger_items = args.face_debugger_items
+
+
+def pre_check() -> bool:
+	return True
+
+
+def pre_process(mode : ProcessMode) -> bool:
+	return True
+
+
+def post_process() -> None:
+	clear_frame_processor()
+	clear_face_analyser()
+	clear_content_analyser()
+	clear_face_occluder()
+	clear_face_parser()
+	read_static_image.cache_clear()
+
+
+def debug_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
+	primary_color = (0, 0, 255)
+	secondary_color = (0, 255, 0)
+	bounding_box = target_face.bbox.astype(numpy.int32)
+	if 'bbox' in frame_processors_globals.face_debugger_items:
+		cv2.rectangle(temp_frame, (bounding_box[0], bounding_box[1]), (bounding_box[2], bounding_box[3]), secondary_color, 2)
+	if 'face-mask' in frame_processors_globals.face_debugger_items:
+		crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, 'arcface_128_v2', (128, 512))
+		inverse_matrix = cv2.invertAffineTransform(affine_matrix)
+		temp_frame_size = temp_frame.shape[:2][::-1]
+		crop_mask_list = []
+		if 'box' in facefusion.globals.face_mask_types:
+			crop_mask_list.append(create_static_box_mask(crop_frame.shape[:2][::-1], 0, facefusion.globals.face_mask_padding))
+		if 'occlusion' in facefusion.globals.face_mask_types:
+			crop_mask_list.append(create_occlusion_mask(crop_frame))
+		if 'region' in facefusion.globals.face_mask_types:
+			crop_mask_list.append(create_region_mask(crop_frame, facefusion.globals.face_mask_regions))
+		crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1)
+		crop_mask = (crop_mask * 255).astype(numpy.uint8)
+		inverse_mask_frame = cv2.warpAffine(crop_mask, inverse_matrix, temp_frame_size)
+		inverse_mask_frame_edges = cv2.threshold(inverse_mask_frame, 100, 255, cv2.THRESH_BINARY)[1]
+		inverse_mask_frame_edges[inverse_mask_frame_edges > 0] = 255
+		inverse_mask_contours = cv2.findContours(inverse_mask_frame_edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[0]
+		cv2.drawContours(temp_frame, inverse_mask_contours, -1, primary_color, 2)
+	if bounding_box[3] - bounding_box[1] > 60 and bounding_box[2] - bounding_box[0] > 60:
+		if 'kps' in frame_processors_globals.face_debugger_items:
+			kps = target_face.kps.astype(numpy.int32)
+			for index in range(kps.shape[0]):
+				cv2.circle(temp_frame, (kps[index][0], kps[index][1]), 3, primary_color, -1)
+		if 'score' in frame_processors_globals.face_debugger_items:
+			score_text = str(round(target_face.score, 2))
+			score_position = (bounding_box[0] + 10, bounding_box[1] + 20)
+			cv2.putText(temp_frame, score_text, score_position, cv2.FONT_HERSHEY_SIMPLEX, 0.5, secondary_color, 2)
+	return temp_frame
+
+
+def get_reference_frame(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
+	pass
+
+
+def process_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame:
+	if 'reference' in facefusion.globals.face_selector_mode:
+		similar_faces = find_similar_faces(temp_frame, reference_faces, facefusion.globals.reference_face_distance)
+		if similar_faces:
+			for similar_face in similar_faces:
+				temp_frame = debug_face(source_face, similar_face, temp_frame)
+	if 'one' in facefusion.globals.face_selector_mode:
+		target_face = get_one_face(temp_frame)
+		if target_face:
+			temp_frame = debug_face(source_face, target_face, temp_frame)
+	if 'many' in facefusion.globals.face_selector_mode:
+		many_faces = get_many_faces(temp_frame)
+		if many_faces:
+			for target_face in many_faces:
+				temp_frame = debug_face(source_face, target_face, temp_frame)
+	return temp_frame
+
+
+def process_frames(source_paths : List[str], temp_frame_paths : List[str], update_progress : Update_Process) -> None:
+	source_frames = read_static_images(source_paths)
+	source_face = get_average_face(source_frames)
+	reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
+	for temp_frame_path in temp_frame_paths:
+		temp_frame = read_image(temp_frame_path)
+		result_frame = process_frame(source_face, reference_faces, temp_frame)
+		write_image(temp_frame_path, result_frame)
+		update_progress()
+
+
+def process_image(source_paths : List[str], target_path : str, output_path : str) -> None:
+	source_frames = read_static_images(source_paths)
+	source_face = get_average_face(source_frames)
+	target_frame = read_static_image(target_path)
+	reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
+	result_frame = process_frame(source_face, reference_faces, target_frame)
+	write_image(output_path, result_frame)
+
+
+def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
+	frame_processors.multi_process_frames(source_paths, temp_frame_paths, process_frames)
diff --git a/facefusion/processors/frame/modules/face_enhancer.py b/facefusion/processors/frame/modules/face_enhancer.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9997b3e449c1b156e6926fd660f2370627ff13f
--- /dev/null
+++ b/facefusion/processors/frame/modules/face_enhancer.py
@@ -0,0 +1,249 @@
+from typing import Any, List, Literal, Optional
+from argparse import ArgumentParser
+import cv2
+import threading
+import numpy
+import onnxruntime
+
+import facefusion.globals
+import facefusion.processors.frame.core as frame_processors
+from facefusion import logger, wording
+from facefusion.face_analyser import get_many_faces, clear_face_analyser, find_similar_faces, get_one_face
+from facefusion.face_helper import warp_face, paste_back
+from facefusion.content_analyser import clear_content_analyser
+from facefusion.face_store import get_reference_faces
+from facefusion.typing import Face, FaceSet, Frame, Update_Process, ProcessMode, ModelSet, OptionsWithModel
+from facefusion.common_helper import create_metavar
+from facefusion.filesystem import is_file, is_image, is_video, resolve_relative_path
+from facefusion.download import conditional_download, is_download_done
+from facefusion.vision import read_image, read_static_image, write_image
+from facefusion.processors.frame import globals as frame_processors_globals
+from facefusion.processors.frame import choices as frame_processors_choices
+from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, clear_face_occluder
+
+FRAME_PROCESSOR = None
+THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
+THREAD_LOCK : threading.Lock = threading.Lock()
+NAME = __name__.upper()
+MODELS : ModelSet =\
+{
+	'codeformer':
+	{
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/codeformer.onnx',
+		'path': resolve_relative_path('../.assets/models/codeformer.onnx'),
+		'template': 'ffhq_512',
+		'size': (512, 512)
+	},
+	'gfpgan_1.2':
+	{
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.2.onnx',
+		'path': resolve_relative_path('../.assets/models/gfpgan_1.2.onnx'),
+		'template': 'ffhq_512',
+		'size': (512, 512)
+	},
+	'gfpgan_1.3':
+	{
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.3.onnx',
+		'path': resolve_relative_path('../.assets/models/gfpgan_1.3.onnx'),
+		'template': 'ffhq_512',
+		'size': (512, 512)
+	},
+	'gfpgan_1.4':
+	{
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.4.onnx',
+		'path': resolve_relative_path('../.assets/models/gfpgan_1.4.onnx'),
+		'template': 'ffhq_512',
+		'size': (512, 512)
+	},
+	'gpen_bfr_256':
+	{
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_256.onnx',
+		'path': resolve_relative_path('../.assets/models/gpen_bfr_256.onnx'),
+		'template': 'arcface_128_v2',
+		'size': (128, 256)
+	},
+	'gpen_bfr_512':
+	{
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_512.onnx',
+		'path': resolve_relative_path('../.assets/models/gpen_bfr_512.onnx'),
+		'template': 'ffhq_512',
+		'size': (512, 512)
+	},
+	'restoreformer':
+	{
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/restoreformer.onnx',
+		'path': resolve_relative_path('../.assets/models/restoreformer.onnx'),
+		'template': 'ffhq_512',
+		'size': (512, 512)
+	}
+}
+OPTIONS : Optional[OptionsWithModel] = None
+
+
+def get_frame_processor() -> Any:
+	global FRAME_PROCESSOR
+
+	with THREAD_LOCK:
+		if FRAME_PROCESSOR is None:
+			model_path = get_options('model').get('path')
+			FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = facefusion.globals.execution_providers)
+	return FRAME_PROCESSOR
+
+
+def clear_frame_processor() -> None:
+	global FRAME_PROCESSOR
+
+	FRAME_PROCESSOR = None
+
+
+def get_options(key : Literal['model']) -> Any:
+	global OPTIONS
+
+	if OPTIONS is None:
+		OPTIONS =\
+		{
+			'model': MODELS[frame_processors_globals.face_enhancer_model]
+		}
+	return OPTIONS.get(key)
+
+
+def set_options(key : Literal['model'], value : Any) -> None:
+	global OPTIONS
+
+	OPTIONS[key] = value
+
+
+def register_args(program : ArgumentParser) -> None:
+	program.add_argument('--face-enhancer-model', help = wording.get('frame_processor_model_help'), default = 'gfpgan_1.4', choices = frame_processors_choices.face_enhancer_models)
+	program.add_argument('--face-enhancer-blend', help = wording.get('frame_processor_blend_help'), type = int, default = 80, choices = frame_processors_choices.face_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.face_enhancer_blend_range))
+
+
+def apply_args(program : ArgumentParser) -> None:
+	args = program.parse_args()
+	frame_processors_globals.face_enhancer_model = args.face_enhancer_model
+	frame_processors_globals.face_enhancer_blend = args.face_enhancer_blend
+
+
+def pre_check() -> bool:
+	if not facefusion.globals.skip_download:
+		download_directory_path = resolve_relative_path('../.assets/models')
+		model_url = get_options('model').get('url')
+		conditional_download(download_directory_path, [ model_url ])
+	return True
+
+
+def pre_process(mode : ProcessMode) -> bool:
+	model_url = get_options('model').get('url')
+	model_path = get_options('model').get('path')
+	if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
+		logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
+		return False
+	elif not is_file(model_path):
+		logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
+		return False
+	if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
+		logger.error(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
+		return False
+	if mode == 'output' and not facefusion.globals.output_path:
+		logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
+		return False
+	return True
+
+
+def post_process() -> None:
+	clear_frame_processor()
+	clear_face_analyser()
+	clear_content_analyser()
+	clear_face_occluder()
+	read_static_image.cache_clear()
+
+
+def enhance_face(target_face: Face, temp_frame: Frame) -> Frame:
+	frame_processor = get_frame_processor()
+	model_template = get_options('model').get('template')
+	model_size = get_options('model').get('size')
+	crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, model_template, model_size)
+	crop_mask_list =\
+	[
+		create_static_box_mask(crop_frame.shape[:2][::-1], facefusion.globals.face_mask_blur, (0, 0, 0, 0))
+	]
+	if 'occlusion' in facefusion.globals.face_mask_types:
+		crop_mask_list.append(create_occlusion_mask(crop_frame))
+	crop_frame = prepare_crop_frame(crop_frame)
+	frame_processor_inputs = {}
+	for frame_processor_input in frame_processor.get_inputs():
+		if frame_processor_input.name == 'input':
+			frame_processor_inputs[frame_processor_input.name] = crop_frame
+		if frame_processor_input.name == 'weight':
+			frame_processor_inputs[frame_processor_input.name] = numpy.array([ 1 ], dtype = numpy.double)
+	with THREAD_SEMAPHORE:
+		crop_frame = frame_processor.run(None, frame_processor_inputs)[0][0]
+	crop_frame = normalize_crop_frame(crop_frame)
+	crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1)
+	paste_frame = paste_back(temp_frame, crop_frame, crop_mask, affine_matrix)
+	temp_frame = blend_frame(temp_frame, paste_frame)
+	return temp_frame
+
+
+def prepare_crop_frame(crop_frame : Frame) -> Frame:
+	crop_frame = crop_frame[:, :, ::-1] / 255.0
+	crop_frame = (crop_frame - 0.5) / 0.5
+	crop_frame = numpy.expand_dims(crop_frame.transpose(2, 0, 1), axis = 0).astype(numpy.float32)
+	return crop_frame
+
+
+def normalize_crop_frame(crop_frame : Frame) -> Frame:
+	crop_frame = numpy.clip(crop_frame, -1, 1)
+	crop_frame = (crop_frame + 1) / 2
+	crop_frame = crop_frame.transpose(1, 2, 0)
+	crop_frame = (crop_frame * 255.0).round()
+	crop_frame = crop_frame.astype(numpy.uint8)[:, :, ::-1]
+	return crop_frame
+
+
+def blend_frame(temp_frame : Frame, paste_frame : Frame) -> Frame:
+	face_enhancer_blend = 1 - (frame_processors_globals.face_enhancer_blend / 100)
+	temp_frame = cv2.addWeighted(temp_frame, face_enhancer_blend, paste_frame, 1 - face_enhancer_blend, 0)
+	return temp_frame
+
+
+def get_reference_frame(source_face : Face, target_face : Face, temp_frame : Frame) -> Optional[Frame]:
+	return enhance_face(target_face, temp_frame)
+
+
+def process_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame:
+	if 'reference' in facefusion.globals.face_selector_mode:
+		similar_faces = find_similar_faces(temp_frame, reference_faces, facefusion.globals.reference_face_distance)
+		if similar_faces:
+			for similar_face in similar_faces:
+				temp_frame = enhance_face(similar_face, temp_frame)
+	if 'one' in facefusion.globals.face_selector_mode:
+		target_face = get_one_face(temp_frame)
+		if target_face:
+			temp_frame = enhance_face(target_face, temp_frame)
+	if 'many' in facefusion.globals.face_selector_mode:
+		many_faces = get_many_faces(temp_frame)
+		if many_faces:
+			for target_face in many_faces:
+				temp_frame = enhance_face(target_face, temp_frame)
+	return temp_frame
+
+
+def process_frames(source_path : List[str], temp_frame_paths : List[str], update_progress : Update_Process) -> None:
+	reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
+	for temp_frame_path in temp_frame_paths:
+		temp_frame = read_image(temp_frame_path)
+		result_frame = process_frame(None, reference_faces, temp_frame)
+		write_image(temp_frame_path, result_frame)
+		update_progress()
+
+
+def process_image(source_path : str, target_path : str, output_path : str) -> None:
+	reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
+	target_frame = read_static_image(target_path)
+	result_frame = process_frame(None, reference_faces, target_frame)
+	write_image(output_path, result_frame)
+
+
+def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
+	frame_processors.multi_process_frames(None, temp_frame_paths, process_frames)
diff --git a/facefusion/processors/frame/modules/face_swapper.py b/facefusion/processors/frame/modules/face_swapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..5df02348aa4ffd21107cbc6c58eeedda42905525
--- /dev/null
+++ b/facefusion/processors/frame/modules/face_swapper.py
@@ -0,0 +1,302 @@
+from typing import Any, List, Literal, Optional
+from argparse import ArgumentParser
+import threading
+import numpy
+import onnx
+import onnxruntime
+from onnx import numpy_helper
+
+import facefusion.globals
+import facefusion.processors.frame.core as frame_processors
+from facefusion import logger, wording
+from facefusion.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser
+from facefusion.face_helper import warp_face, paste_back
+from facefusion.face_store import get_reference_faces
+from facefusion.content_analyser import clear_content_analyser
+from facefusion.typing import Face, FaceSet, Frame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, Embedding
+from facefusion.filesystem import is_file, is_image, are_images, is_video, resolve_relative_path
+from facefusion.download import conditional_download, is_download_done
+from facefusion.vision import read_image, read_static_image, read_static_images, write_image
+from facefusion.processors.frame import globals as frame_processors_globals
+from facefusion.processors.frame import choices as frame_processors_choices
+from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser
+
+FRAME_PROCESSOR = None
+MODEL_MATRIX = None
+THREAD_LOCK : threading.Lock = threading.Lock()
+NAME = __name__.upper()
+MODELS : ModelSet =\
+{
+	'blendswap_256':
+	{
+		'type': 'blendswap',
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/blendswap_256.onnx',
+		'path': resolve_relative_path('../.assets/models/blendswap_256.onnx'),
+		'template': 'ffhq_512',
+		'size': (512, 256),
+		'mean': [ 0.0, 0.0, 0.0 ],
+		'standard_deviation': [ 1.0, 1.0, 1.0 ]
+	},
+	'inswapper_128':
+	{
+		'type': 'inswapper',
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128.onnx',
+		'path': resolve_relative_path('../.assets/models/inswapper_128.onnx'),
+		'template': 'arcface_128_v2',
+		'size': (128, 128),
+		'mean': [ 0.0, 0.0, 0.0 ],
+		'standard_deviation': [ 1.0, 1.0, 1.0 ]
+	},
+	'inswapper_128_fp16':
+	{
+		'type': 'inswapper',
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128_fp16.onnx',
+		'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.onnx'),
+		'template': 'arcface_128_v2',
+		'size': (128, 128),
+		'mean': [ 0.0, 0.0, 0.0 ],
+		'standard_deviation': [ 1.0, 1.0, 1.0 ]
+	},
+	'simswap_256':
+	{
+		'type': 'simswap',
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/simswap_256.onnx',
+		'path': resolve_relative_path('../.assets/models/simswap_256.onnx'),
+		'template': 'arcface_112_v1',
+		'size': (112, 256),
+		'mean': [ 0.485, 0.456, 0.406 ],
+		'standard_deviation': [ 0.229, 0.224, 0.225 ]
+	},
+	'simswap_512_unofficial':
+	{
+		'type': 'simswap',
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/simswap_512_unofficial.onnx',
+		'path': resolve_relative_path('../.assets/models/simswap_512_unofficial.onnx'),
+		'template': 'arcface_112_v1',
+		'size': (112, 512),
+		'mean': [ 0.0, 0.0, 0.0 ],
+		'standard_deviation': [ 1.0, 1.0, 1.0 ]
+	}
+}
+OPTIONS : Optional[OptionsWithModel] = None
+
+
+def get_frame_processor() -> Any:
+	global FRAME_PROCESSOR
+
+	with THREAD_LOCK:
+		if FRAME_PROCESSOR is None:
+			model_path = get_options('model').get('path')
+			FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = facefusion.globals.execution_providers)
+	return FRAME_PROCESSOR
+
+
+def clear_frame_processor() -> None:
+	global FRAME_PROCESSOR
+
+	FRAME_PROCESSOR = None
+
+
+def get_model_matrix() -> Any:
+	global MODEL_MATRIX
+
+	with THREAD_LOCK:
+		if MODEL_MATRIX is None:
+			model_path = get_options('model').get('path')
+			model = onnx.load(model_path)
+			MODEL_MATRIX = numpy_helper.to_array(model.graph.initializer[-1])
+	return MODEL_MATRIX
+
+
+def clear_model_matrix() -> None:
+	global MODEL_MATRIX
+
+	MODEL_MATRIX = None
+
+
+def get_options(key : Literal['model']) -> Any:
+	global OPTIONS
+
+	if OPTIONS is None:
+		OPTIONS =\
+		{
+			'model': MODELS[frame_processors_globals.face_swapper_model]
+		}
+	return OPTIONS.get(key)
+
+
+def set_options(key : Literal['model'], value : Any) -> None:
+	global OPTIONS
+
+	OPTIONS[key] = value
+
+
+def register_args(program : ArgumentParser) -> None:
+	program.add_argument('--face-swapper-model', help = wording.get('frame_processor_model_help'), default = 'inswapper_128', choices = frame_processors_choices.face_swapper_models)
+
+
+def apply_args(program : ArgumentParser) -> None:
+	args = program.parse_args()
+	frame_processors_globals.face_swapper_model = args.face_swapper_model
+	if args.face_swapper_model == 'blendswap_256':
+		facefusion.globals.face_recognizer_model = 'arcface_blendswap'
+	if args.face_swapper_model == 'inswapper_128' or args.face_swapper_model == 'inswapper_128_fp16':
+		facefusion.globals.face_recognizer_model = 'arcface_inswapper'
+	if args.face_swapper_model == 'simswap_256' or args.face_swapper_model == 'simswap_512_unofficial':
+		facefusion.globals.face_recognizer_model = 'arcface_simswap'
+
+
+def pre_check() -> bool:
+	if not facefusion.globals.skip_download:
+		download_directory_path = resolve_relative_path('../.assets/models')
+		model_url = get_options('model').get('url')
+		conditional_download(download_directory_path, [ model_url ])
+	return True
+
+
+def pre_process(mode : ProcessMode) -> bool:
+	model_url = get_options('model').get('url')
+	model_path = get_options('model').get('path')
+	if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
+		logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
+		return False
+	elif not is_file(model_path):
+		logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
+		return False
+	if not are_images(facefusion.globals.source_paths):
+		logger.error(wording.get('select_image_source') + wording.get('exclamation_mark'), NAME)
+		return False
+	for source_frame in read_static_images(facefusion.globals.source_paths):
+		if not get_one_face(source_frame):
+			logger.error(wording.get('no_source_face_detected') + wording.get('exclamation_mark'), NAME)
+			return False
+	if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
+		logger.error(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
+		return False
+	if mode == 'output' and not facefusion.globals.output_path:
+		logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
+		return False
+	return True
+
+
+def post_process() -> None:
+	clear_frame_processor()
+	clear_model_matrix()
+	clear_face_analyser()
+	clear_content_analyser()
+	clear_face_occluder()
+	clear_face_parser()
+	read_static_image.cache_clear()
+
+
+def swap_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
+	frame_processor = get_frame_processor()
+	model_template = get_options('model').get('template')
+	model_size = get_options('model').get('size')
+	model_type = get_options('model').get('type')
+	crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, model_template, model_size)
+	crop_mask_list = []
+	if 'box' in facefusion.globals.face_mask_types:
+		crop_mask_list.append(create_static_box_mask(crop_frame.shape[:2][::-1], facefusion.globals.face_mask_blur, facefusion.globals.face_mask_padding))
+	if 'occlusion' in facefusion.globals.face_mask_types:
+		crop_mask_list.append(create_occlusion_mask(crop_frame))
+	crop_frame = prepare_crop_frame(crop_frame)
+	frame_processor_inputs = {}
+	for frame_processor_input in frame_processor.get_inputs():
+		if frame_processor_input.name == 'source':
+			if model_type == 'blendswap':
+				frame_processor_inputs[frame_processor_input.name] = prepare_source_frame(source_face)
+			else:
+				frame_processor_inputs[frame_processor_input.name] = prepare_source_embedding(source_face)
+		if frame_processor_input.name == 'target':
+			frame_processor_inputs[frame_processor_input.name] = crop_frame
+	crop_frame = frame_processor.run(None, frame_processor_inputs)[0][0]
+	crop_frame = normalize_crop_frame(crop_frame)
+	if 'region' in facefusion.globals.face_mask_types:
+		crop_mask_list.append(create_region_mask(crop_frame, facefusion.globals.face_mask_regions))
+	crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1)
+	temp_frame = paste_back(temp_frame, crop_frame, crop_mask, affine_matrix)
+	return temp_frame
+
+
+def prepare_source_frame(source_face : Face) -> Frame:
+	source_frame = read_static_image(facefusion.globals.source_paths[0])
+	source_frame, _ = warp_face(source_frame, source_face.kps, 'arcface_112_v2', (112, 112))
+	source_frame = source_frame[:, :, ::-1] / 255.0
+	source_frame = source_frame.transpose(2, 0, 1)
+	source_frame = numpy.expand_dims(source_frame, axis = 0).astype(numpy.float32)
+	return source_frame
+
+
+def prepare_source_embedding(source_face : Face) -> Embedding:
+	model_type = get_options('model').get('type')
+	if model_type == 'inswapper':
+		model_matrix = get_model_matrix()
+		source_embedding = source_face.embedding.reshape((1, -1))
+		source_embedding = numpy.dot(source_embedding, model_matrix) / numpy.linalg.norm(source_embedding)
+	else:
+		source_embedding = source_face.normed_embedding.reshape(1, -1)
+	return source_embedding
+
+
+def prepare_crop_frame(crop_frame : Frame) -> Frame:
+	model_mean = get_options('model').get('mean')
+	model_standard_deviation = get_options('model').get('standard_deviation')
+	crop_frame = crop_frame[:, :, ::-1] / 255.0
+	crop_frame = (crop_frame - model_mean) / model_standard_deviation
+	crop_frame = crop_frame.transpose(2, 0, 1)
+	crop_frame = numpy.expand_dims(crop_frame, axis = 0).astype(numpy.float32)
+	return crop_frame
+
+
+def normalize_crop_frame(crop_frame : Frame) -> Frame:
+	crop_frame = crop_frame.transpose(1, 2, 0)
+	crop_frame = (crop_frame * 255.0).round()
+	crop_frame = crop_frame[:, :, ::-1].astype(numpy.uint8)
+	return crop_frame
+
+
+def get_reference_frame(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
+	return swap_face(source_face, target_face, temp_frame)
+
+
+def process_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame:
+	if 'reference' in facefusion.globals.face_selector_mode:
+		similar_faces = find_similar_faces(temp_frame, reference_faces, facefusion.globals.reference_face_distance)
+		if similar_faces:
+			for similar_face in similar_faces:
+				temp_frame = swap_face(source_face, similar_face, temp_frame)
+	if 'one' in facefusion.globals.face_selector_mode:
+		target_face = get_one_face(temp_frame)
+		if target_face:
+			temp_frame = swap_face(source_face, target_face, temp_frame)
+	if 'many' in facefusion.globals.face_selector_mode:
+		many_faces = get_many_faces(temp_frame)
+		if many_faces:
+			for target_face in many_faces:
+				temp_frame = swap_face(source_face, target_face, temp_frame)
+	return temp_frame
+
+
+def process_frames(source_paths : List[str], temp_frame_paths : List[str], update_progress : Update_Process) -> None:
+	source_frames = read_static_images(source_paths)
+	source_face = get_average_face(source_frames)
+	reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
+	for temp_frame_path in temp_frame_paths:
+		temp_frame = read_image(temp_frame_path)
+		result_frame = process_frame(source_face, reference_faces, temp_frame)
+		write_image(temp_frame_path, result_frame)
+		update_progress()
+
+
+def process_image(source_paths : List[str], target_path : str, output_path : str) -> None:
+	source_frames = read_static_images(source_paths)
+	source_face = get_average_face(source_frames)
+	reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
+	target_frame = read_static_image(target_path)
+	result_frame = process_frame(source_face, reference_faces, target_frame)
+	write_image(output_path, result_frame)
+
+
+def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
+	frame_processors.multi_process_frames(source_paths, temp_frame_paths, process_frames)
diff --git a/facefusion/processors/frame/modules/frame_enhancer.py b/facefusion/processors/frame/modules/frame_enhancer.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e5cca57cbb30e7dd5e97d26d837a30b46222254
--- /dev/null
+++ b/facefusion/processors/frame/modules/frame_enhancer.py
@@ -0,0 +1,172 @@
+from typing import Any, List, Literal, Optional
+from argparse import ArgumentParser
+import threading
+import cv2
+from basicsr.archs.rrdbnet_arch import RRDBNet
+from realesrgan import RealESRGANer
+
+import facefusion.globals
+import facefusion.processors.frame.core as frame_processors
+from facefusion import logger, wording
+from facefusion.face_analyser import clear_face_analyser
+from facefusion.content_analyser import clear_content_analyser
+from facefusion.typing import Face, FaceSet, Frame, Update_Process, ProcessMode, ModelSet, OptionsWithModel
+from facefusion.common_helper import create_metavar
+from facefusion.execution_helper import map_device
+from facefusion.filesystem import is_file, resolve_relative_path
+from facefusion.download import conditional_download, is_download_done
+from facefusion.vision import read_image, read_static_image, write_image
+from facefusion.processors.frame import globals as frame_processors_globals
+from facefusion.processors.frame import choices as frame_processors_choices
+
+FRAME_PROCESSOR = None
+THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
+THREAD_LOCK : threading.Lock = threading.Lock()
+NAME = __name__.upper()
+MODELS : ModelSet =\
+{
+	'real_esrgan_x2plus':
+	{
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/real_esrgan_x2plus.pth',
+		'path': resolve_relative_path('../.assets/models/real_esrgan_x2plus.pth'),
+		'scale': 2
+	},
+	'real_esrgan_x4plus':
+	{
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/real_esrgan_x4plus.pth',
+		'path': resolve_relative_path('../.assets/models/real_esrgan_x4plus.pth'),
+		'scale': 4
+	},
+	'real_esrnet_x4plus':
+	{
+		'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/real_esrnet_x4plus.pth',
+		'path': resolve_relative_path('../.assets/models/real_esrnet_x4plus.pth'),
+		'scale': 4
+	}
+}
+OPTIONS : Optional[OptionsWithModel] = None
+
+
+def get_frame_processor() -> Any:
+	global FRAME_PROCESSOR
+
+	with THREAD_LOCK:
+		if FRAME_PROCESSOR is None:
+			model_path = get_options('model').get('path')
+			model_scale = get_options('model').get('scale')
+			FRAME_PROCESSOR = RealESRGANer(
+				model_path = model_path,
+				model = RRDBNet(
+					num_in_ch = 3,
+					num_out_ch = 3,
+					scale = model_scale
+				),
+				device = map_device(facefusion.globals.execution_providers),
+				scale = model_scale
+			)
+	return FRAME_PROCESSOR
+
+
+def clear_frame_processor() -> None:
+	global FRAME_PROCESSOR
+
+	FRAME_PROCESSOR = None
+
+
+def get_options(key : Literal['model']) -> Any:
+	global OPTIONS
+
+	if OPTIONS is None:
+		OPTIONS =\
+		{
+			'model': MODELS[frame_processors_globals.frame_enhancer_model]
+		}
+	return OPTIONS.get(key)
+
+
+def set_options(key : Literal['model'], value : Any) -> None:
+	global OPTIONS
+
+	OPTIONS[key] = value
+
+
+def register_args(program : ArgumentParser) -> None:
+	program.add_argument('--frame-enhancer-model', help = wording.get('frame_processor_model_help'), default = 'real_esrgan_x2plus', choices = frame_processors_choices.frame_enhancer_models)
+	program.add_argument('--frame-enhancer-blend', help = wording.get('frame_processor_blend_help'), type = int, default = 80, choices = frame_processors_choices.frame_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.frame_enhancer_blend_range))
+
+
+def apply_args(program : ArgumentParser) -> None:
+	args = program.parse_args()
+	frame_processors_globals.frame_enhancer_model = args.frame_enhancer_model
+	frame_processors_globals.frame_enhancer_blend = args.frame_enhancer_blend
+
+
+def pre_check() -> bool:
+	if not facefusion.globals.skip_download:
+		download_directory_path = resolve_relative_path('../.assets/models')
+		model_url = get_options('model').get('url')
+		conditional_download(download_directory_path, [ model_url ])
+	return True
+
+
+def pre_process(mode : ProcessMode) -> bool:
+	model_url = get_options('model').get('url')
+	model_path = get_options('model').get('path')
+	if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
+		logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
+		return False
+	elif not is_file(model_path):
+		logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
+		return False
+	if mode == 'output' and not facefusion.globals.output_path:
+		logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
+		return False
+	return True
+
+
+def post_process() -> None:
+	clear_frame_processor()
+	clear_face_analyser()
+	clear_content_analyser()
+	read_static_image.cache_clear()
+
+
+def enhance_frame(temp_frame : Frame) -> Frame:
+	with THREAD_SEMAPHORE:
+		paste_frame, _ = get_frame_processor().enhance(temp_frame)
+		temp_frame = blend_frame(temp_frame, paste_frame)
+	return temp_frame
+
+
+def blend_frame(temp_frame : Frame, paste_frame : Frame) -> Frame:
+	frame_enhancer_blend = 1 - (frame_processors_globals.frame_enhancer_blend / 100)
+	paste_frame_height, paste_frame_width = paste_frame.shape[0:2]
+	temp_frame = cv2.resize(temp_frame, (paste_frame_width, paste_frame_height))
+	temp_frame = cv2.addWeighted(temp_frame, frame_enhancer_blend, paste_frame, 1 - frame_enhancer_blend, 0)
+	return temp_frame
+
+
+def get_reference_frame(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
+	pass
+
+
+def process_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame:
+	return enhance_frame(temp_frame)
+
+
+def process_frames(source_paths : List[str], temp_frame_paths : List[str], update_progress : Update_Process) -> None:
+	for temp_frame_path in temp_frame_paths:
+		temp_frame = read_image(temp_frame_path)
+		result_frame = process_frame(None, None, temp_frame)
+		write_image(temp_frame_path, result_frame)
+		update_progress()
+
+
+def process_image(source_paths : List[str], target_path : str, output_path : str) -> None:
+	target_frame = read_static_image(target_path)
+	result = process_frame(None, None, target_frame)
+	write_image(output_path, result)
+
+
+def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
+	frame_processors.multi_process_frames(None, temp_frame_paths, process_frames)
diff --git a/facefusion/processors/frame/typings.py b/facefusion/processors/frame/typings.py
new file mode 100644
index 0000000000000000000000000000000000000000..a397eef74f530a0f645ebd1d6737994719a9ba9e
--- /dev/null
+++ b/facefusion/processors/frame/typings.py
@@ -0,0 +1,7 @@
+from typing import Literal
+
+FaceSwapperModel = Literal['blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial']
+FaceEnhancerModel = Literal['codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'restoreformer']
+FrameEnhancerModel = Literal['real_esrgan_x2plus', 'real_esrgan_x4plus', 'real_esrnet_x4plus']
+
+FaceDebuggerItem = Literal['bbox', 'kps', 'face-mask', 'score']
diff --git a/facefusion/typing.py b/facefusion/typing.py
new file mode 100644
index 0000000000000000000000000000000000000000..2964040fb394a2a0f624f40e4f3aad0c12b38226
--- /dev/null
+++ b/facefusion/typing.py
@@ -0,0 +1,51 @@
+from typing import Any, Literal, Callable, List, Tuple, Dict, TypedDict
+from collections import namedtuple
+import numpy
+
+Bbox = numpy.ndarray[Any, Any]
+Kps = numpy.ndarray[Any, Any]
+Score = float
+Embedding = numpy.ndarray[Any, Any]
+Face = namedtuple('Face',
+[
+	'bbox',
+	'kps',
+	'score',
+	'embedding',
+	'normed_embedding',
+	'gender',
+	'age'
+])
+FaceSet = Dict[str, List[Face]]
+FaceStore = TypedDict('FaceStore',
+{
+	'static_faces' : FaceSet,
+	'reference_faces': FaceSet
+})
+Frame = numpy.ndarray[Any, Any]
+Mask = numpy.ndarray[Any, Any]
+Matrix = numpy.ndarray[Any, Any]
+Padding = Tuple[int, int, int, int]
+
+Update_Process = Callable[[], None]
+Process_Frames = Callable[[List[str], List[str], Update_Process], None]
+LogLevel = Literal['error',	'warn',	'info',	'debug']
+Template = Literal['arcface_112_v1', 'arcface_112_v2', 'arcface_128_v2', 'ffhq_512']
+ProcessMode = Literal['output', 'preview', 'stream']
+FaceSelectorMode = Literal['reference', 'one', 'many']
+FaceAnalyserOrder = Literal['left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best']
+FaceAnalyserAge = Literal['child', 'teen', 'adult', 'senior']
+FaceAnalyserGender = Literal['male', 'female']
+FaceDetectorModel = Literal['retinaface', 'yunet']
+FaceRecognizerModel = Literal['arcface_blendswap', 'arcface_inswapper', 'arcface_simswap']
+FaceMaskType = Literal['box', 'occlusion', 'region']
+FaceMaskRegion = Literal['skin', 'left-eyebrow', 'right-eyebrow', 'left-eye', 'right-eye', 'eye-glasses', 'nose', 'mouth', 'upper-lip', 'lower-lip']
+TempFrameFormat = Literal['jpg', 'png']
+OutputVideoEncoder = Literal['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc']
+
+ModelValue = Dict[str, Any]
+ModelSet = Dict[str, ModelValue]
+OptionsWithModel = TypedDict('OptionsWithModel',
+{
+	'model' : ModelValue
+})
diff --git a/facefusion/uis/__init__.py b/facefusion/uis/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/facefusion/uis/assets/fixes.css b/facefusion/uis/assets/fixes.css
new file mode 100644
index 0000000000000000000000000000000000000000..f65a7cfd3e3e34111a09a9100c6714ff49558615
--- /dev/null
+++ b/facefusion/uis/assets/fixes.css
@@ -0,0 +1,7 @@
+:root:root:root button:not([class])
+{
+	border-radius: 0.375rem;
+	float: left;
+	overflow: hidden;
+	width: 100%;
+}
diff --git a/facefusion/uis/assets/overrides.css b/facefusion/uis/assets/overrides.css
new file mode 100644
index 0000000000000000000000000000000000000000..86ca371d5a146c1a28e5bc188f3771b6ebc1d263
--- /dev/null
+++ b/facefusion/uis/assets/overrides.css
@@ -0,0 +1,44 @@
+:root:root:root input[type="number"]
+{
+	max-width: 6rem;
+}
+
+:root:root:root [type="checkbox"],
+:root:root:root [type="radio"]
+{
+	border-radius: 50%;
+	height: 1.125rem;
+	width: 1.125rem;
+}
+
+:root:root:root input[type="range"]
+{
+	height: 0.5rem;
+}
+
+:root:root:root input[type="range"]::-moz-range-thumb,
+:root:root:root input[type="range"]::-webkit-slider-thumb
+{
+	background: var(--neutral-300);
+	border: unset;
+	border-radius: 50%;
+	height: 1.125rem;
+	width: 1.125rem;
+}
+
+:root:root:root input[type="range"]::-webkit-slider-thumb
+{
+	margin-top: 0.375rem;
+}
+
+:root:root:root .grid-wrap.fixed-height
+{
+	min-height: unset;
+}
+
+:root:root:root .grid-container
+{
+	grid-auto-rows: minmax(5em, 1fr);
+	grid-template-columns: repeat(var(--grid-cols), minmax(5em, 1fr));
+	grid-template-rows: repeat(var(--grid-rows), minmax(5em, 1fr));
+}
diff --git a/facefusion/uis/choices.py b/facefusion/uis/choices.py
new file mode 100644
index 0000000000000000000000000000000000000000..92ae5491260c816d7bd86e2c4ee8b6fd5d43b4bc
--- /dev/null
+++ b/facefusion/uis/choices.py
@@ -0,0 +1,7 @@
+from typing import List
+
+from facefusion.uis.typing import WebcamMode
+
+common_options : List[str] = [ 'keep-fps', 'keep-temp', 'skip-audio', 'skip-download' ]
+webcam_modes : List[WebcamMode] = [ 'inline', 'udp', 'v4l2' ]
+webcam_resolutions : List[str] = [ '320x240', '640x480', '800x600', '1024x768', '1280x720', '1280x960', '1920x1080', '2560x1440', '3840x2160' ]
diff --git a/facefusion/uis/components/__init__.py b/facefusion/uis/components/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/facefusion/uis/components/about.py b/facefusion/uis/components/about.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2c52caa1bbb88262ea2137e71ccd940dcf1acbf
--- /dev/null
+++ b/facefusion/uis/components/about.py
@@ -0,0 +1,23 @@
+from typing import Optional
+import gradio
+
+from facefusion import metadata, wording
+
+ABOUT_BUTTON : Optional[gradio.HTML] = None
+DONATE_BUTTON : Optional[gradio.HTML] = None
+
+
+def render() -> None:
+	global ABOUT_BUTTON
+	global DONATE_BUTTON
+
+	ABOUT_BUTTON = gradio.Button(
+		value = metadata.get('name') + ' ' + metadata.get('version'),
+		variant = 'primary',
+		link = metadata.get('url')
+	)
+	DONATE_BUTTON = gradio.Button(
+		value = wording.get('donate_button_label'),
+		link = 'https://donate.facefusion.io',
+		size = 'sm'
+	)
diff --git a/facefusion/uis/components/benchmark.py b/facefusion/uis/components/benchmark.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc5c08be2730e35e2a59d35461f3556794c1a8e7
--- /dev/null
+++ b/facefusion/uis/components/benchmark.py
@@ -0,0 +1,132 @@
+from typing import Any, Optional, List, Dict, Generator
+import time
+import tempfile
+import statistics
+import gradio
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.face_analyser import get_face_analyser
+from facefusion.face_store import clear_static_faces
+from facefusion.processors.frame.core import get_frame_processors_modules
+from facefusion.vision import count_video_frame_total
+from facefusion.core import limit_resources, conditional_process
+from facefusion.normalizer import normalize_output_path
+from facefusion.filesystem import clear_temp
+from facefusion.uis.core import get_ui_component
+
+BENCHMARK_RESULTS_DATAFRAME : Optional[gradio.Dataframe] = None
+BENCHMARK_START_BUTTON : Optional[gradio.Button] = None
+BENCHMARK_CLEAR_BUTTON : Optional[gradio.Button] = None
+BENCHMARKS : Dict[str, str] =\
+{
+	'240p': '.assets/examples/target-240p.mp4',
+	'360p': '.assets/examples/target-360p.mp4',
+	'540p': '.assets/examples/target-540p.mp4',
+	'720p': '.assets/examples/target-720p.mp4',
+	'1080p': '.assets/examples/target-1080p.mp4',
+	'1440p': '.assets/examples/target-1440p.mp4',
+	'2160p': '.assets/examples/target-2160p.mp4'
+}
+
+
+def render() -> None:
+	global BENCHMARK_RESULTS_DATAFRAME
+	global BENCHMARK_START_BUTTON
+	global BENCHMARK_CLEAR_BUTTON
+
+	BENCHMARK_RESULTS_DATAFRAME = gradio.Dataframe(
+		label = wording.get('benchmark_results_dataframe_label'),
+		headers =
+		[
+			'target_path',
+			'benchmark_cycles',
+			'average_run',
+			'fastest_run',
+			'slowest_run',
+			'relative_fps'
+		],
+		datatype =
+		[
+			'str',
+			'number',
+			'number',
+			'number',
+			'number',
+			'number'
+		]
+	)
+	BENCHMARK_START_BUTTON = gradio.Button(
+		value = wording.get('start_button_label'),
+		variant = 'primary',
+		size = 'sm'
+	)
+	BENCHMARK_CLEAR_BUTTON = gradio.Button(
+		value = wording.get('clear_button_label'),
+		size = 'sm'
+	)
+
+
+def listen() -> None:
+	benchmark_runs_checkbox_group = get_ui_component('benchmark_runs_checkbox_group')
+	benchmark_cycles_slider = get_ui_component('benchmark_cycles_slider')
+	if benchmark_runs_checkbox_group and benchmark_cycles_slider:
+		BENCHMARK_START_BUTTON.click(start, inputs = [ benchmark_runs_checkbox_group, benchmark_cycles_slider ], outputs = BENCHMARK_RESULTS_DATAFRAME)
+	BENCHMARK_CLEAR_BUTTON.click(clear, outputs = BENCHMARK_RESULTS_DATAFRAME)
+
+
+def start(benchmark_runs : List[str], benchmark_cycles : int) -> Generator[List[Any], None, None]:
+	facefusion.globals.source_paths = [ '.assets/examples/source.jpg' ]
+	target_paths = [ BENCHMARKS[benchmark_run] for benchmark_run in benchmark_runs if benchmark_run in BENCHMARKS ]
+	benchmark_results = []
+	if target_paths:
+		pre_process()
+		for target_path in target_paths:
+			benchmark_results.append(benchmark(target_path, benchmark_cycles))
+			yield benchmark_results
+		post_process()
+
+
+def pre_process() -> None:
+	limit_resources()
+	get_face_analyser()
+	for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
+		frame_processor_module.get_frame_processor()
+
+
+def post_process() -> None:
+	clear_static_faces()
+
+
+def benchmark(target_path : str, benchmark_cycles : int) -> List[Any]:
+	process_times = []
+	total_fps = 0.0
+	for i in range(benchmark_cycles):
+		facefusion.globals.target_path = target_path
+		facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_paths, facefusion.globals.target_path, tempfile.gettempdir())
+		video_frame_total = count_video_frame_total(facefusion.globals.target_path)
+		start_time = time.perf_counter()
+		conditional_process()
+		end_time = time.perf_counter()
+		process_time = end_time - start_time
+		total_fps += video_frame_total / process_time
+		process_times.append(process_time)
+	average_run = round(statistics.mean(process_times), 2)
+	fastest_run = round(min(process_times), 2)
+	slowest_run = round(max(process_times), 2)
+	relative_fps = round(total_fps / benchmark_cycles, 2)
+	return\
+	[
+		facefusion.globals.target_path,
+		benchmark_cycles,
+		average_run,
+		fastest_run,
+		slowest_run,
+		relative_fps
+	]
+
+
+def clear() -> gradio.Dataframe:
+	if facefusion.globals.target_path:
+		clear_temp(facefusion.globals.target_path)
+	return gradio.Dataframe(value = None)
diff --git a/facefusion/uis/components/benchmark_options.py b/facefusion/uis/components/benchmark_options.py
new file mode 100644
index 0000000000000000000000000000000000000000..75767a88817c1b709f0177d74d65976038fd8746
--- /dev/null
+++ b/facefusion/uis/components/benchmark_options.py
@@ -0,0 +1,29 @@
+from typing import Optional
+import gradio
+
+from facefusion import wording
+from facefusion.uis.core import register_ui_component
+from facefusion.uis.components.benchmark import BENCHMARKS
+
+BENCHMARK_RUNS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
+BENCHMARK_CYCLES_SLIDER : Optional[gradio.Button] = None
+
+
+def render() -> None:
+	global BENCHMARK_RUNS_CHECKBOX_GROUP
+	global BENCHMARK_CYCLES_SLIDER
+
+	BENCHMARK_RUNS_CHECKBOX_GROUP = gradio.CheckboxGroup(
+		label = wording.get('benchmark_runs_checkbox_group_label'),
+		value = list(BENCHMARKS.keys()),
+		choices = list(BENCHMARKS.keys())
+	)
+	BENCHMARK_CYCLES_SLIDER = gradio.Slider(
+		label = wording.get('benchmark_cycles_slider_label'),
+		value = 3,
+		step = 1,
+		minimum = 1,
+		maximum = 10
+	)
+	register_ui_component('benchmark_runs_checkbox_group', BENCHMARK_RUNS_CHECKBOX_GROUP)
+	register_ui_component('benchmark_cycles_slider', BENCHMARK_CYCLES_SLIDER)
diff --git a/facefusion/uis/components/common_options.py b/facefusion/uis/components/common_options.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b3e2d3971a9c0c865ea88b7e4125524fb2cbe5c
--- /dev/null
+++ b/facefusion/uis/components/common_options.py
@@ -0,0 +1,38 @@
+from typing import Optional, List
+import gradio
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.uis import choices as uis_choices
+
+COMMON_OPTIONS_CHECKBOX_GROUP : Optional[gradio.Checkboxgroup] = None
+
+
+def render() -> None:
+	global COMMON_OPTIONS_CHECKBOX_GROUP
+
+	value = []
+	if facefusion.globals.keep_fps:
+		value.append('keep-fps')
+	if facefusion.globals.keep_temp:
+		value.append('keep-temp')
+	if facefusion.globals.skip_audio:
+		value.append('skip-audio')
+	if facefusion.globals.skip_download:
+		value.append('skip-download')
+	COMMON_OPTIONS_CHECKBOX_GROUP = gradio.Checkboxgroup(
+		label = wording.get('common_options_checkbox_group_label'),
+		choices = uis_choices.common_options,
+		value = value
+	)
+
+
+def listen() -> None:
+	COMMON_OPTIONS_CHECKBOX_GROUP.change(update, inputs = COMMON_OPTIONS_CHECKBOX_GROUP)
+
+
+def update(common_options : List[str]) -> None:
+	facefusion.globals.keep_fps = 'keep-fps' in common_options
+	facefusion.globals.keep_temp = 'keep-temp' in common_options
+	facefusion.globals.skip_audio = 'skip-audio' in common_options
+	facefusion.globals.skip_download = 'skip-download' in common_options
diff --git a/facefusion/uis/components/execution.py b/facefusion/uis/components/execution.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8df28fd062f4615a54b69078bfc7faf636f038d
--- /dev/null
+++ b/facefusion/uis/components/execution.py
@@ -0,0 +1,34 @@
+from typing import List, Optional
+import gradio
+import onnxruntime
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.face_analyser import clear_face_analyser
+from facefusion.processors.frame.core import clear_frame_processors_modules
+from facefusion.execution_helper import encode_execution_providers, decode_execution_providers
+
+EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
+
+
+def render() -> None:
+	global EXECUTION_PROVIDERS_CHECKBOX_GROUP
+
+	EXECUTION_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup(
+		label = wording.get('execution_providers_checkbox_group_label'),
+		choices = encode_execution_providers(onnxruntime.get_available_providers()),
+		value = encode_execution_providers(facefusion.globals.execution_providers)
+	)
+
+
+def listen() -> None:
+	EXECUTION_PROVIDERS_CHECKBOX_GROUP.change(update_execution_providers, inputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP, outputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP)
+
+
+def update_execution_providers(execution_providers : List[str]) -> gradio.CheckboxGroup:
+	clear_face_analyser()
+	clear_frame_processors_modules()
+	if not execution_providers:
+		execution_providers = encode_execution_providers(onnxruntime.get_available_providers())
+	facefusion.globals.execution_providers = decode_execution_providers(execution_providers)
+	return gradio.CheckboxGroup(value = execution_providers)
diff --git a/facefusion/uis/components/execution_queue_count.py b/facefusion/uis/components/execution_queue_count.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc8a3c87d3b86490b8e290a939cc1fc029ac7fb0
--- /dev/null
+++ b/facefusion/uis/components/execution_queue_count.py
@@ -0,0 +1,28 @@
+from typing import Optional
+import gradio
+
+import facefusion.globals
+import facefusion.choices
+from facefusion import wording
+
+EXECUTION_QUEUE_COUNT_SLIDER : Optional[gradio.Slider] = None
+
+
+def render() -> None:
+	global EXECUTION_QUEUE_COUNT_SLIDER
+
+	EXECUTION_QUEUE_COUNT_SLIDER = gradio.Slider(
+		label = wording.get('execution_queue_count_slider_label'),
+		value = facefusion.globals.execution_queue_count,
+		step = facefusion.choices.execution_queue_count_range[1] - facefusion.choices.execution_queue_count_range[0],
+		minimum = facefusion.choices.execution_queue_count_range[0],
+		maximum = facefusion.choices.execution_queue_count_range[-1]
+	)
+
+
+def listen() -> None:
+	EXECUTION_QUEUE_COUNT_SLIDER.change(update_execution_queue_count, inputs = EXECUTION_QUEUE_COUNT_SLIDER)
+
+
+def update_execution_queue_count(execution_queue_count : int = 1) -> None:
+	facefusion.globals.execution_queue_count = execution_queue_count
diff --git a/facefusion/uis/components/execution_thread_count.py b/facefusion/uis/components/execution_thread_count.py
new file mode 100644
index 0000000000000000000000000000000000000000..615d164215f663a1e49cc122c270e32731a6f3dc
--- /dev/null
+++ b/facefusion/uis/components/execution_thread_count.py
@@ -0,0 +1,29 @@
+from typing import Optional
+import gradio
+
+import facefusion.globals
+import facefusion.choices
+from facefusion import wording
+
+EXECUTION_THREAD_COUNT_SLIDER : Optional[gradio.Slider] = None
+
+
+def render() -> None:
+	global EXECUTION_THREAD_COUNT_SLIDER
+
+	EXECUTION_THREAD_COUNT_SLIDER = gradio.Slider(
+		label = wording.get('execution_thread_count_slider_label'),
+		value = facefusion.globals.execution_thread_count,
+		step = facefusion.choices.execution_thread_count_range[1] - facefusion.choices.execution_thread_count_range[0],
+		minimum = facefusion.choices.execution_thread_count_range[0],
+		maximum = facefusion.choices.execution_thread_count_range[-1]
+	)
+
+
+def listen() -> None:
+	EXECUTION_THREAD_COUNT_SLIDER.change(update_execution_thread_count, inputs = EXECUTION_THREAD_COUNT_SLIDER)
+
+
+def update_execution_thread_count(execution_thread_count : int = 1) -> None:
+	facefusion.globals.execution_thread_count = execution_thread_count
+
diff --git a/facefusion/uis/components/face_analyser.py b/facefusion/uis/components/face_analyser.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf0b23b2c0100f641a57ba041b58421a015f2800
--- /dev/null
+++ b/facefusion/uis/components/face_analyser.py
@@ -0,0 +1,98 @@
+from typing import Optional
+
+import gradio
+
+import facefusion.globals
+import facefusion.choices
+from facefusion import wording
+from facefusion.typing import FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, FaceDetectorModel
+from facefusion.uis.core import register_ui_component
+
+FACE_ANALYSER_ORDER_DROPDOWN : Optional[gradio.Dropdown] = None
+FACE_ANALYSER_AGE_DROPDOWN : Optional[gradio.Dropdown] = None
+FACE_ANALYSER_GENDER_DROPDOWN : Optional[gradio.Dropdown] = None
+FACE_DETECTOR_SIZE_DROPDOWN : Optional[gradio.Dropdown] = None
+FACE_DETECTOR_SCORE_SLIDER : Optional[gradio.Slider] = None
+FACE_DETECTOR_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
+
+
+def render() -> None:
+	global FACE_ANALYSER_ORDER_DROPDOWN
+	global FACE_ANALYSER_AGE_DROPDOWN
+	global FACE_ANALYSER_GENDER_DROPDOWN
+	global FACE_DETECTOR_SIZE_DROPDOWN
+	global FACE_DETECTOR_SCORE_SLIDER
+	global FACE_DETECTOR_MODEL_DROPDOWN
+
+	with gradio.Row():
+		FACE_ANALYSER_ORDER_DROPDOWN = gradio.Dropdown(
+			label = wording.get('face_analyser_order_dropdown_label'),
+			choices = facefusion.choices.face_analyser_orders,
+			value = facefusion.globals.face_analyser_order
+		)
+		FACE_ANALYSER_AGE_DROPDOWN = gradio.Dropdown(
+			label = wording.get('face_analyser_age_dropdown_label'),
+			choices = [ 'none' ] + facefusion.choices.face_analyser_ages,
+			value = facefusion.globals.face_analyser_age or 'none'
+		)
+		FACE_ANALYSER_GENDER_DROPDOWN = gradio.Dropdown(
+			label = wording.get('face_analyser_gender_dropdown_label'),
+			choices = [ 'none' ] + facefusion.choices.face_analyser_genders,
+			value = facefusion.globals.face_analyser_gender or 'none'
+		)
+	FACE_DETECTOR_MODEL_DROPDOWN = gradio.Dropdown(
+		label = wording.get('face_detector_model_dropdown_label'),
+		choices = facefusion.choices.face_detector_models,
+		value = facefusion.globals.face_detector_model
+	)
+	FACE_DETECTOR_SIZE_DROPDOWN = gradio.Dropdown(
+		label = wording.get('face_detector_size_dropdown_label'),
+		choices = facefusion.choices.face_detector_sizes,
+		value = facefusion.globals.face_detector_size
+	)
+	FACE_DETECTOR_SCORE_SLIDER = gradio.Slider(
+		label = wording.get('face_detector_score_slider_label'),
+		value = facefusion.globals.face_detector_score,
+		step = facefusion.choices.face_detector_score_range[1] - facefusion.choices.face_detector_score_range[0],
+		minimum = facefusion.choices.face_detector_score_range[0],
+		maximum = facefusion.choices.face_detector_score_range[-1]
+	)
+	register_ui_component('face_analyser_order_dropdown', FACE_ANALYSER_ORDER_DROPDOWN)
+	register_ui_component('face_analyser_age_dropdown', FACE_ANALYSER_AGE_DROPDOWN)
+	register_ui_component('face_analyser_gender_dropdown', FACE_ANALYSER_GENDER_DROPDOWN)
+	register_ui_component('face_detector_model_dropdown', FACE_DETECTOR_MODEL_DROPDOWN)
+	register_ui_component('face_detector_size_dropdown', FACE_DETECTOR_SIZE_DROPDOWN)
+	register_ui_component('face_detector_score_slider', FACE_DETECTOR_SCORE_SLIDER)
+
+
+def listen() -> None:
+	FACE_ANALYSER_ORDER_DROPDOWN.select(update_face_analyser_order, inputs = FACE_ANALYSER_ORDER_DROPDOWN)
+	FACE_ANALYSER_AGE_DROPDOWN.select(update_face_analyser_age, inputs = FACE_ANALYSER_AGE_DROPDOWN)
+	FACE_ANALYSER_GENDER_DROPDOWN.select(update_face_analyser_gender, inputs = FACE_ANALYSER_GENDER_DROPDOWN)
+	FACE_DETECTOR_MODEL_DROPDOWN.change(update_face_detector_model, inputs = FACE_DETECTOR_MODEL_DROPDOWN)
+	FACE_DETECTOR_SIZE_DROPDOWN.select(update_face_detector_size, inputs = FACE_DETECTOR_SIZE_DROPDOWN)
+	FACE_DETECTOR_SCORE_SLIDER.change(update_face_detector_score, inputs = FACE_DETECTOR_SCORE_SLIDER)
+
+
+def update_face_analyser_order(face_analyser_order : FaceAnalyserOrder) -> None:
+	facefusion.globals.face_analyser_order = face_analyser_order if face_analyser_order != 'none' else None
+
+
+def update_face_analyser_age(face_analyser_age : FaceAnalyserAge) -> None:
+	facefusion.globals.face_analyser_age = face_analyser_age if face_analyser_age != 'none' else None
+
+
+def update_face_analyser_gender(face_analyser_gender : FaceAnalyserGender) -> None:
+	facefusion.globals.face_analyser_gender = face_analyser_gender if face_analyser_gender != 'none' else None
+
+
+def update_face_detector_model(face_detector_model : FaceDetectorModel) -> None:
+	facefusion.globals.face_detector_model = face_detector_model
+
+
+def update_face_detector_size(face_detector_size : str) -> None:
+	facefusion.globals.face_detector_size = face_detector_size
+
+
+def update_face_detector_score(face_detector_score : float) -> None:
+	facefusion.globals.face_detector_score = face_detector_score
diff --git a/facefusion/uis/components/face_masker.py b/facefusion/uis/components/face_masker.py
new file mode 100644
index 0000000000000000000000000000000000000000..978a019958d7552742754e2d79661800f795a8ea
--- /dev/null
+++ b/facefusion/uis/components/face_masker.py
@@ -0,0 +1,123 @@
+from typing import Optional, Tuple, List
+import gradio
+
+import facefusion.globals
+import facefusion.choices
+from facefusion import wording
+from facefusion.typing import FaceMaskType, FaceMaskRegion
+from facefusion.uis.core import register_ui_component
+
+FACE_MASK_TYPES_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
+FACE_MASK_BLUR_SLIDER : Optional[gradio.Slider] = None
+FACE_MASK_BOX_GROUP : Optional[gradio.Group] = None
+FACE_MASK_REGION_GROUP : Optional[gradio.Group] = None
+FACE_MASK_PADDING_TOP_SLIDER : Optional[gradio.Slider] = None
+FACE_MASK_PADDING_RIGHT_SLIDER : Optional[gradio.Slider] = None
+FACE_MASK_PADDING_BOTTOM_SLIDER : Optional[gradio.Slider] = None
+FACE_MASK_PADDING_LEFT_SLIDER : Optional[gradio.Slider] = None
+FACE_MASK_REGION_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
+
+
+def render() -> None:
+	global FACE_MASK_TYPES_CHECKBOX_GROUP
+	global FACE_MASK_BLUR_SLIDER
+	global FACE_MASK_BOX_GROUP
+	global FACE_MASK_REGION_GROUP
+	global FACE_MASK_PADDING_TOP_SLIDER
+	global FACE_MASK_PADDING_RIGHT_SLIDER
+	global FACE_MASK_PADDING_BOTTOM_SLIDER
+	global FACE_MASK_PADDING_LEFT_SLIDER
+	global FACE_MASK_REGION_CHECKBOX_GROUP
+
+	has_box_mask = 'box' in facefusion.globals.face_mask_types
+	has_region_mask = 'region' in facefusion.globals.face_mask_types
+	FACE_MASK_TYPES_CHECKBOX_GROUP = gradio.CheckboxGroup(
+		label = wording.get('face_mask_types_checkbox_group_label'),
+		choices = facefusion.choices.face_mask_types,
+		value = facefusion.globals.face_mask_types
+	)
+	with gradio.Group(visible = has_box_mask) as FACE_MASK_BOX_GROUP:
+		FACE_MASK_BLUR_SLIDER = gradio.Slider(
+			label = wording.get('face_mask_blur_slider_label'),
+			step = facefusion.choices.face_mask_blur_range[1] - facefusion.choices.face_mask_blur_range[0],
+			minimum = facefusion.choices.face_mask_blur_range[0],
+			maximum = facefusion.choices.face_mask_blur_range[-1],
+			value = facefusion.globals.face_mask_blur
+		)
+		with gradio.Row():
+			FACE_MASK_PADDING_TOP_SLIDER = gradio.Slider(
+				label = wording.get('face_mask_padding_top_slider_label'),
+				step = facefusion.choices.face_mask_padding_range[1] - facefusion.choices.face_mask_padding_range[0],
+				minimum = facefusion.choices.face_mask_padding_range[0],
+				maximum = facefusion.choices.face_mask_padding_range[-1],
+				value = facefusion.globals.face_mask_padding[0]
+			)
+			FACE_MASK_PADDING_RIGHT_SLIDER = gradio.Slider(
+				label = wording.get('face_mask_padding_right_slider_label'),
+				step = facefusion.choices.face_mask_padding_range[1] - facefusion.choices.face_mask_padding_range[0],
+				minimum = facefusion.choices.face_mask_padding_range[0],
+				maximum = facefusion.choices.face_mask_padding_range[-1],
+				value = facefusion.globals.face_mask_padding[1]
+			)
+		with gradio.Row():
+			FACE_MASK_PADDING_BOTTOM_SLIDER = gradio.Slider(
+				label = wording.get('face_mask_padding_bottom_slider_label'),
+				step = facefusion.choices.face_mask_padding_range[1] - facefusion.choices.face_mask_padding_range[0],
+				minimum = facefusion.choices.face_mask_padding_range[0],
+				maximum = facefusion.choices.face_mask_padding_range[-1],
+				value = facefusion.globals.face_mask_padding[2]
+			)
+			FACE_MASK_PADDING_LEFT_SLIDER = gradio.Slider(
+				label = wording.get('face_mask_padding_left_slider_label'),
+				step = facefusion.choices.face_mask_padding_range[1] - facefusion.choices.face_mask_padding_range[0],
+				minimum = facefusion.choices.face_mask_padding_range[0],
+				maximum = facefusion.choices.face_mask_padding_range[-1],
+				value = facefusion.globals.face_mask_padding[3]
+			)
+	with gradio.Row():
+		FACE_MASK_REGION_CHECKBOX_GROUP = gradio.CheckboxGroup(
+			label = wording.get('face_mask_region_checkbox_group_label'),
+			choices = facefusion.choices.face_mask_regions,
+			value = facefusion.globals.face_mask_regions,
+			visible = has_region_mask
+		)
+	register_ui_component('face_mask_types_checkbox_group', FACE_MASK_TYPES_CHECKBOX_GROUP)
+	register_ui_component('face_mask_blur_slider', FACE_MASK_BLUR_SLIDER)
+	register_ui_component('face_mask_padding_top_slider', FACE_MASK_PADDING_TOP_SLIDER)
+	register_ui_component('face_mask_padding_right_slider', FACE_MASK_PADDING_RIGHT_SLIDER)
+	register_ui_component('face_mask_padding_bottom_slider', FACE_MASK_PADDING_BOTTOM_SLIDER)
+	register_ui_component('face_mask_padding_left_slider', FACE_MASK_PADDING_LEFT_SLIDER)
+	register_ui_component('face_mask_region_checkbox_group', FACE_MASK_REGION_CHECKBOX_GROUP)
+
+
+def listen() -> None:
+	FACE_MASK_TYPES_CHECKBOX_GROUP.change(update_face_mask_type, inputs = FACE_MASK_TYPES_CHECKBOX_GROUP, outputs = [ FACE_MASK_TYPES_CHECKBOX_GROUP, FACE_MASK_BOX_GROUP, FACE_MASK_REGION_CHECKBOX_GROUP ])
+	FACE_MASK_BLUR_SLIDER.change(update_face_mask_blur, inputs = FACE_MASK_BLUR_SLIDER)
+	FACE_MASK_REGION_CHECKBOX_GROUP.change(update_face_mask_regions, inputs = FACE_MASK_REGION_CHECKBOX_GROUP, outputs = FACE_MASK_REGION_CHECKBOX_GROUP)
+	face_mask_padding_sliders = [ FACE_MASK_PADDING_TOP_SLIDER, FACE_MASK_PADDING_RIGHT_SLIDER, FACE_MASK_PADDING_BOTTOM_SLIDER, FACE_MASK_PADDING_LEFT_SLIDER ]
+	for face_mask_padding_slider in face_mask_padding_sliders:
+		face_mask_padding_slider.change(update_face_mask_padding, inputs = face_mask_padding_sliders)
+
+
+def update_face_mask_type(face_mask_types : List[FaceMaskType]) -> Tuple[gradio.CheckboxGroup, gradio.Group, gradio.CheckboxGroup]:
+	if not face_mask_types:
+		face_mask_types = facefusion.choices.face_mask_types
+	facefusion.globals.face_mask_types = face_mask_types
+	has_box_mask = 'box' in face_mask_types
+	has_region_mask = 'region' in face_mask_types
+	return gradio.CheckboxGroup(value = face_mask_types), gradio.Group(visible = has_box_mask), gradio.CheckboxGroup(visible = has_region_mask)
+
+
+def update_face_mask_blur(face_mask_blur : float) -> None:
+	facefusion.globals.face_mask_blur = face_mask_blur
+
+
+def update_face_mask_padding(face_mask_padding_top : int, face_mask_padding_right : int, face_mask_padding_bottom : int, face_mask_padding_left : int) -> None:
+	facefusion.globals.face_mask_padding = (face_mask_padding_top, face_mask_padding_right, face_mask_padding_bottom, face_mask_padding_left)
+
+
+def update_face_mask_regions(face_mask_regions : List[FaceMaskRegion]) -> gradio.CheckboxGroup:
+	if not face_mask_regions:
+		face_mask_regions = facefusion.choices.face_mask_regions
+	facefusion.globals.face_mask_regions = face_mask_regions
+	return gradio.CheckboxGroup(value = face_mask_regions)
diff --git a/facefusion/uis/components/face_selector.py b/facefusion/uis/components/face_selector.py
new file mode 100644
index 0000000000000000000000000000000000000000..90ebf3deae972cda921e349734eed16ce0f6ef1c
--- /dev/null
+++ b/facefusion/uis/components/face_selector.py
@@ -0,0 +1,164 @@
+from typing import List, Optional, Tuple, Any, Dict
+
+import gradio
+
+import facefusion.globals
+import facefusion.choices
+from facefusion import wording
+from facefusion.face_store import clear_static_faces, clear_reference_faces
+from facefusion.vision import get_video_frame, read_static_image, normalize_frame_color
+from facefusion.face_analyser import get_many_faces
+from facefusion.typing import Frame, FaceSelectorMode
+from facefusion.filesystem import is_image, is_video
+from facefusion.uis.core import get_ui_component, register_ui_component
+from facefusion.uis.typing import ComponentName
+
+FACE_SELECTOR_MODE_DROPDOWN : Optional[gradio.Dropdown] = None
+REFERENCE_FACE_POSITION_GALLERY : Optional[gradio.Gallery] = None
+REFERENCE_FACE_DISTANCE_SLIDER : Optional[gradio.Slider] = None
+
+
+def render() -> None:
+	global FACE_SELECTOR_MODE_DROPDOWN
+	global REFERENCE_FACE_POSITION_GALLERY
+	global REFERENCE_FACE_DISTANCE_SLIDER
+
+	reference_face_gallery_args: Dict[str, Any] =\
+	{
+		'label': wording.get('reference_face_gallery_label'),
+		'object_fit': 'cover',
+		'columns': 8,
+		'allow_preview': False,
+		'visible': 'reference' in facefusion.globals.face_selector_mode
+	}
+	if is_image(facefusion.globals.target_path):
+		reference_frame = read_static_image(facefusion.globals.target_path)
+		reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame)
+	if is_video(facefusion.globals.target_path):
+		reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
+		reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame)
+	FACE_SELECTOR_MODE_DROPDOWN = gradio.Dropdown(
+		label = wording.get('face_selector_mode_dropdown_label'),
+		choices = facefusion.choices.face_selector_modes,
+		value = facefusion.globals.face_selector_mode
+	)
+	REFERENCE_FACE_POSITION_GALLERY = gradio.Gallery(**reference_face_gallery_args)
+	REFERENCE_FACE_DISTANCE_SLIDER = gradio.Slider(
+		label = wording.get('reference_face_distance_slider_label'),
+		value = facefusion.globals.reference_face_distance,
+		step = facefusion.choices.reference_face_distance_range[1] - facefusion.choices.reference_face_distance_range[0],
+		minimum = facefusion.choices.reference_face_distance_range[0],
+		maximum = facefusion.choices.reference_face_distance_range[-1],
+		visible = 'reference' in facefusion.globals.face_selector_mode
+	)
+	register_ui_component('face_selector_mode_dropdown', FACE_SELECTOR_MODE_DROPDOWN)
+	register_ui_component('reference_face_position_gallery', REFERENCE_FACE_POSITION_GALLERY)
+	register_ui_component('reference_face_distance_slider', REFERENCE_FACE_DISTANCE_SLIDER)
+
+
+def listen() -> None:
+	FACE_SELECTOR_MODE_DROPDOWN.select(update_face_selector_mode, inputs = FACE_SELECTOR_MODE_DROPDOWN, outputs = [ REFERENCE_FACE_POSITION_GALLERY, REFERENCE_FACE_DISTANCE_SLIDER ])
+	REFERENCE_FACE_POSITION_GALLERY.select(clear_and_update_reference_face_position)
+	REFERENCE_FACE_DISTANCE_SLIDER.change(update_reference_face_distance, inputs = REFERENCE_FACE_DISTANCE_SLIDER)
+	multi_component_names : List[ComponentName] =\
+	[
+		'target_image',
+		'target_video'
+	]
+	for component_name in multi_component_names:
+		component = get_ui_component(component_name)
+		if component:
+			for method in [ 'upload', 'change', 'clear' ]:
+				getattr(component, method)(update_reference_face_position)
+				getattr(component, method)(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
+	change_one_component_names : List[ComponentName] =\
+	[
+		'face_analyser_order_dropdown',
+		'face_analyser_age_dropdown',
+		'face_analyser_gender_dropdown'
+	]
+	for component_name in change_one_component_names:
+		component = get_ui_component(component_name)
+		if component:
+			component.change(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
+	change_two_component_names : List[ComponentName] =\
+	[
+		'face_detector_model_dropdown',
+		'face_detector_size_dropdown',
+		'face_detector_score_slider'
+	]
+	for component_name in change_two_component_names:
+		component = get_ui_component(component_name)
+		if component:
+			component.change(clear_and_update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
+	preview_frame_slider = get_ui_component('preview_frame_slider')
+	if preview_frame_slider:
+		preview_frame_slider.change(update_reference_frame_number, inputs = preview_frame_slider)
+		preview_frame_slider.release(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
+
+
+def update_face_selector_mode(face_selector_mode : FaceSelectorMode) -> Tuple[gradio.Gallery, gradio.Slider]:
+	if face_selector_mode == 'reference':
+		facefusion.globals.face_selector_mode = face_selector_mode
+		return gradio.Gallery(visible = True), gradio.Slider(visible = True)
+	if face_selector_mode == 'one':
+		facefusion.globals.face_selector_mode = face_selector_mode
+		return gradio.Gallery(visible = False), gradio.Slider(visible = False)
+	if face_selector_mode == 'many':
+		facefusion.globals.face_selector_mode = face_selector_mode
+		return gradio.Gallery(visible = False), gradio.Slider(visible = False)
+
+
+def clear_and_update_reference_face_position(event : gradio.SelectData) -> gradio.Gallery:
+	clear_reference_faces()
+	clear_static_faces()
+	update_reference_face_position(event.index)
+	return update_reference_position_gallery()
+
+
+def update_reference_face_position(reference_face_position : int = 0) -> None:
+	facefusion.globals.reference_face_position = reference_face_position
+
+
+def update_reference_face_distance(reference_face_distance : float) -> None:
+	facefusion.globals.reference_face_distance = reference_face_distance
+
+
+def update_reference_frame_number(reference_frame_number : int) -> None:
+	facefusion.globals.reference_frame_number = reference_frame_number
+
+
+def clear_and_update_reference_position_gallery() -> gradio.Gallery:
+	clear_reference_faces()
+	clear_static_faces()
+	return update_reference_position_gallery()
+
+
+def update_reference_position_gallery() -> gradio.Gallery:
+	gallery_frames = []
+	if is_image(facefusion.globals.target_path):
+		reference_frame = read_static_image(facefusion.globals.target_path)
+		gallery_frames = extract_gallery_frames(reference_frame)
+	if is_video(facefusion.globals.target_path):
+		reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
+		gallery_frames = extract_gallery_frames(reference_frame)
+	if gallery_frames:
+		return gradio.Gallery(value = gallery_frames)
+	return gradio.Gallery(value = None)
+
+
+def extract_gallery_frames(reference_frame : Frame) -> List[Frame]:
+	crop_frames = []
+	faces = get_many_faces(reference_frame)
+	for face in faces:
+		start_x, start_y, end_x, end_y = map(int, face.bbox)
+		padding_x = int((end_x - start_x) * 0.25)
+		padding_y = int((end_y - start_y) * 0.25)
+		start_x = max(0, start_x - padding_x)
+		start_y = max(0, start_y - padding_y)
+		end_x = max(0, end_x + padding_x)
+		end_y = max(0, end_y + padding_y)
+		crop_frame = reference_frame[start_y:end_y, start_x:end_x]
+		crop_frame = normalize_frame_color(crop_frame)
+		crop_frames.append(crop_frame)
+	return crop_frames
diff --git a/facefusion/uis/components/frame_processors.py b/facefusion/uis/components/frame_processors.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac687349b33e5806a05e91a44f6c01507b5e5ee5
--- /dev/null
+++ b/facefusion/uis/components/frame_processors.py
@@ -0,0 +1,40 @@
+from typing import List, Optional
+import gradio
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.processors.frame.core import load_frame_processor_module, clear_frame_processors_modules
+from facefusion.filesystem import list_module_names
+from facefusion.uis.core import register_ui_component
+
+FRAME_PROCESSORS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
+
+
+def render() -> None:
+	global FRAME_PROCESSORS_CHECKBOX_GROUP
+
+	FRAME_PROCESSORS_CHECKBOX_GROUP = gradio.CheckboxGroup(
+		label = wording.get('frame_processors_checkbox_group_label'),
+		choices = sort_frame_processors(facefusion.globals.frame_processors),
+		value = facefusion.globals.frame_processors
+	)
+	register_ui_component('frame_processors_checkbox_group', FRAME_PROCESSORS_CHECKBOX_GROUP)
+
+
+def listen() -> None:
+	FRAME_PROCESSORS_CHECKBOX_GROUP.change(update_frame_processors, inputs = FRAME_PROCESSORS_CHECKBOX_GROUP, outputs = FRAME_PROCESSORS_CHECKBOX_GROUP)
+
+
+def update_frame_processors(frame_processors : List[str]) -> gradio.CheckboxGroup:
+	facefusion.globals.frame_processors = frame_processors
+	clear_frame_processors_modules()
+	for frame_processor in frame_processors:
+		frame_processor_module = load_frame_processor_module(frame_processor)
+		if not frame_processor_module.pre_check():
+			return gradio.CheckboxGroup()
+	return gradio.CheckboxGroup(value = frame_processors, choices = sort_frame_processors(frame_processors))
+
+
+def sort_frame_processors(frame_processors : List[str]) -> list[str]:
+	available_frame_processors = list_module_names('facefusion/processors/frame/modules')
+	return sorted(available_frame_processors, key = lambda frame_processor : frame_processors.index(frame_processor) if frame_processor in frame_processors else len(frame_processors))
diff --git a/facefusion/uis/components/frame_processors_options.py b/facefusion/uis/components/frame_processors_options.py
new file mode 100644
index 0000000000000000000000000000000000000000..40f73ca6669293904ee31b270949f890a0c92dd0
--- /dev/null
+++ b/facefusion/uis/components/frame_processors_options.py
@@ -0,0 +1,141 @@
+from typing import List, Optional, Tuple
+import gradio
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.processors.frame.core import load_frame_processor_module
+from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices
+from facefusion.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem
+from facefusion.uis.core import get_ui_component, register_ui_component
+
+FACE_SWAPPER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
+FACE_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
+FACE_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None
+FRAME_ENHANCER_MODEL_DROPDOWN : Optional[gradio.Dropdown] = None
+FRAME_ENHANCER_BLEND_SLIDER : Optional[gradio.Slider] = None
+FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
+
+
+def render() -> None:
+	global FACE_SWAPPER_MODEL_DROPDOWN
+	global FACE_ENHANCER_MODEL_DROPDOWN
+	global FACE_ENHANCER_BLEND_SLIDER
+	global FRAME_ENHANCER_MODEL_DROPDOWN
+	global FRAME_ENHANCER_BLEND_SLIDER
+	global FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP
+
+	FACE_SWAPPER_MODEL_DROPDOWN = gradio.Dropdown(
+		label = wording.get('face_swapper_model_dropdown_label'),
+		choices = frame_processors_choices.face_swapper_models,
+		value = frame_processors_globals.face_swapper_model,
+		visible = 'face_swapper' in facefusion.globals.frame_processors
+	)
+	FACE_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown(
+		label = wording.get('face_enhancer_model_dropdown_label'),
+		choices = frame_processors_choices.face_enhancer_models,
+		value = frame_processors_globals.face_enhancer_model,
+		visible = 'face_enhancer' in facefusion.globals.frame_processors
+	)
+	FACE_ENHANCER_BLEND_SLIDER = gradio.Slider(
+		label = wording.get('face_enhancer_blend_slider_label'),
+		value = frame_processors_globals.face_enhancer_blend,
+		step = frame_processors_choices.face_enhancer_blend_range[1] - frame_processors_choices.face_enhancer_blend_range[0],
+		minimum = frame_processors_choices.face_enhancer_blend_range[0],
+		maximum = frame_processors_choices.face_enhancer_blend_range[-1],
+		visible = 'face_enhancer' in facefusion.globals.frame_processors
+	)
+	FRAME_ENHANCER_MODEL_DROPDOWN = gradio.Dropdown(
+		label = wording.get('frame_enhancer_model_dropdown_label'),
+		choices = frame_processors_choices.frame_enhancer_models,
+		value = frame_processors_globals.frame_enhancer_model,
+		visible = 'frame_enhancer' in facefusion.globals.frame_processors
+	)
+	FRAME_ENHANCER_BLEND_SLIDER = gradio.Slider(
+		label = wording.get('frame_enhancer_blend_slider_label'),
+		value = frame_processors_globals.frame_enhancer_blend,
+		step = frame_processors_choices.frame_enhancer_blend_range[1] - frame_processors_choices.frame_enhancer_blend_range[0],
+		minimum = frame_processors_choices.frame_enhancer_blend_range[0],
+		maximum = frame_processors_choices.frame_enhancer_blend_range[-1],
+		visible = 'face_enhancer' in facefusion.globals.frame_processors
+	)
+	FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP = gradio.CheckboxGroup(
+		label = wording.get('face_debugger_items_checkbox_group_label'),
+		choices = frame_processors_choices.face_debugger_items,
+		value = frame_processors_globals.face_debugger_items,
+		visible = 'face_debugger' in facefusion.globals.frame_processors
+	)
+
+	register_ui_component('face_swapper_model_dropdown', FACE_SWAPPER_MODEL_DROPDOWN)
+	register_ui_component('face_enhancer_model_dropdown', FACE_ENHANCER_MODEL_DROPDOWN)
+	register_ui_component('face_enhancer_blend_slider', FACE_ENHANCER_BLEND_SLIDER)
+	register_ui_component('frame_enhancer_model_dropdown', FRAME_ENHANCER_MODEL_DROPDOWN)
+	register_ui_component('frame_enhancer_blend_slider', FRAME_ENHANCER_BLEND_SLIDER)
+	register_ui_component('face_debugger_items_checkbox_group', FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP)
+
+
+def listen() -> None:
+	FACE_SWAPPER_MODEL_DROPDOWN.change(update_face_swapper_model, inputs = FACE_SWAPPER_MODEL_DROPDOWN, outputs = FACE_SWAPPER_MODEL_DROPDOWN)
+	FACE_ENHANCER_MODEL_DROPDOWN.change(update_face_enhancer_model, inputs = FACE_ENHANCER_MODEL_DROPDOWN, outputs = FACE_ENHANCER_MODEL_DROPDOWN)
+	FACE_ENHANCER_BLEND_SLIDER.change(update_face_enhancer_blend, inputs = FACE_ENHANCER_BLEND_SLIDER)
+	FRAME_ENHANCER_MODEL_DROPDOWN.change(update_frame_enhancer_model, inputs = FRAME_ENHANCER_MODEL_DROPDOWN, outputs = FRAME_ENHANCER_MODEL_DROPDOWN)
+	FRAME_ENHANCER_BLEND_SLIDER.change(update_frame_enhancer_blend, inputs = FRAME_ENHANCER_BLEND_SLIDER)
+	FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP.change(update_face_debugger_items, inputs = FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP)
+	frame_processors_checkbox_group = get_ui_component('frame_processors_checkbox_group')
+	if frame_processors_checkbox_group:
+		frame_processors_checkbox_group.change(toggle_face_swapper_model, inputs = frame_processors_checkbox_group, outputs = [ FACE_SWAPPER_MODEL_DROPDOWN, FACE_ENHANCER_MODEL_DROPDOWN, FACE_ENHANCER_BLEND_SLIDER, FRAME_ENHANCER_MODEL_DROPDOWN, FRAME_ENHANCER_BLEND_SLIDER, FACE_DEBUGGER_ITEMS_CHECKBOX_GROUP ])
+
+
+def update_face_swapper_model(face_swapper_model : FaceSwapperModel) -> gradio.Dropdown:
+	frame_processors_globals.face_swapper_model = face_swapper_model
+	if face_swapper_model == 'blendswap_256':
+		facefusion.globals.face_recognizer_model = 'arcface_blendswap'
+	if face_swapper_model == 'inswapper_128' or face_swapper_model == 'inswapper_128_fp16':
+		facefusion.globals.face_recognizer_model = 'arcface_inswapper'
+	if face_swapper_model == 'simswap_256' or face_swapper_model == 'simswap_512_unofficial':
+		facefusion.globals.face_recognizer_model = 'arcface_simswap'
+	face_swapper_module = load_frame_processor_module('face_swapper')
+	face_swapper_module.clear_frame_processor()
+	face_swapper_module.set_options('model', face_swapper_module.MODELS[face_swapper_model])
+	if not face_swapper_module.pre_check():
+		return gradio.Dropdown()
+	return gradio.Dropdown(value = face_swapper_model)
+
+
+def update_face_enhancer_model(face_enhancer_model : FaceEnhancerModel) -> gradio.Dropdown:
+	frame_processors_globals.face_enhancer_model = face_enhancer_model
+	face_enhancer_module = load_frame_processor_module('face_enhancer')
+	face_enhancer_module.clear_frame_processor()
+	face_enhancer_module.set_options('model', face_enhancer_module.MODELS[face_enhancer_model])
+	if not face_enhancer_module.pre_check():
+		return gradio.Dropdown()
+	return gradio.Dropdown(value = face_enhancer_model)
+
+
+def update_face_enhancer_blend(face_enhancer_blend : int) -> None:
+	frame_processors_globals.face_enhancer_blend = face_enhancer_blend
+
+
+def update_frame_enhancer_model(frame_enhancer_model : FrameEnhancerModel) -> gradio.Dropdown:
+	frame_processors_globals.frame_enhancer_model = frame_enhancer_model
+	frame_enhancer_module = load_frame_processor_module('frame_enhancer')
+	frame_enhancer_module.clear_frame_processor()
+	frame_enhancer_module.set_options('model', frame_enhancer_module.MODELS[frame_enhancer_model])
+	if not frame_enhancer_module.pre_check():
+		return gradio.Dropdown()
+	return gradio.Dropdown(value = frame_enhancer_model)
+
+
+def update_frame_enhancer_blend(frame_enhancer_blend : int) -> None:
+	frame_processors_globals.frame_enhancer_blend = frame_enhancer_blend
+
+
+def update_face_debugger_items(face_debugger_items : List[FaceDebuggerItem]) -> None:
+	frame_processors_globals.face_debugger_items = face_debugger_items
+
+
+def toggle_face_swapper_model(frame_processors : List[str]) -> Tuple[gradio.Dropdown, gradio.Dropdown, gradio.Slider, gradio.Dropdown, gradio.Slider, gradio.CheckboxGroup]:
+	has_face_swapper = 'face_swapper' in frame_processors
+	has_face_enhancer = 'face_enhancer' in frame_processors
+	has_frame_enhancer = 'frame_enhancer' in frame_processors
+	has_face_debugger = 'face_debugger' in frame_processors
+	return gradio.Dropdown(visible = has_face_swapper), gradio.Dropdown(visible = has_face_enhancer), gradio.Slider(visible = has_face_enhancer), gradio.Dropdown(visible = has_frame_enhancer), gradio.Slider(visible = has_frame_enhancer), gradio.CheckboxGroup(visible = has_face_debugger)
diff --git a/facefusion/uis/components/limit_resources.py b/facefusion/uis/components/limit_resources.py
new file mode 100644
index 0000000000000000000000000000000000000000..6703cf1a0b45a16cb0b7fde03e193662ef2d7210
--- /dev/null
+++ b/facefusion/uis/components/limit_resources.py
@@ -0,0 +1,27 @@
+from typing import Optional
+import gradio
+
+import facefusion.globals
+import facefusion.choices
+from facefusion import wording
+
+MAX_MEMORY_SLIDER : Optional[gradio.Slider] = None
+
+
+def render() -> None:
+	global MAX_MEMORY_SLIDER
+
+	MAX_MEMORY_SLIDER = gradio.Slider(
+		label = wording.get('max_memory_slider_label'),
+		step = facefusion.choices.max_memory_range[1] - facefusion.choices.max_memory_range[0],
+		minimum = facefusion.choices.max_memory_range[0],
+		maximum = facefusion.choices.max_memory_range[-1]
+	)
+
+
+def listen() -> None:
+	MAX_MEMORY_SLIDER.change(update_max_memory, inputs = MAX_MEMORY_SLIDER)
+
+
+def update_max_memory(max_memory : int) -> None:
+	facefusion.globals.max_memory = max_memory if max_memory > 0 else None
diff --git a/facefusion/uis/components/output.py b/facefusion/uis/components/output.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d7388577eaa7333d01af689a8b0473653de7e2e
--- /dev/null
+++ b/facefusion/uis/components/output.py
@@ -0,0 +1,62 @@
+from typing import Tuple, Optional
+import gradio
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.core import limit_resources, conditional_process
+from facefusion.uis.core import get_ui_component
+from facefusion.normalizer import normalize_output_path
+from facefusion.filesystem import is_image, is_video, clear_temp
+
+OUTPUT_IMAGE : Optional[gradio.Image] = None
+OUTPUT_VIDEO : Optional[gradio.Video] = None
+OUTPUT_START_BUTTON : Optional[gradio.Button] = None
+OUTPUT_CLEAR_BUTTON : Optional[gradio.Button] = None
+
+
+def render() -> None:
+	global OUTPUT_IMAGE
+	global OUTPUT_VIDEO
+	global OUTPUT_START_BUTTON
+	global OUTPUT_CLEAR_BUTTON
+
+	OUTPUT_IMAGE = gradio.Image(
+		label = wording.get('output_image_or_video_label'),
+		visible = False
+	)
+	OUTPUT_VIDEO = gradio.Video(
+		label = wording.get('output_image_or_video_label')
+	)
+	OUTPUT_START_BUTTON = gradio.Button(
+		value = wording.get('start_button_label'),
+		variant = 'primary',
+		size = 'sm'
+	)
+	OUTPUT_CLEAR_BUTTON = gradio.Button(
+		value = wording.get('clear_button_label'),
+		size = 'sm'
+	)
+
+
+def listen() -> None:
+	output_path_textbox = get_ui_component('output_path_textbox')
+	if output_path_textbox:
+		OUTPUT_START_BUTTON.click(start, inputs = output_path_textbox, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO ])
+	OUTPUT_CLEAR_BUTTON.click(clear, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO ])
+
+
+def start(output_path : str) -> Tuple[gradio.Image, gradio.Video]:
+	facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_paths, facefusion.globals.target_path, output_path)
+	limit_resources()
+	conditional_process()
+	if is_image(facefusion.globals.output_path):
+		return gradio.Image(value = facefusion.globals.output_path, visible = True), gradio.Video(value = None, visible = False)
+	if is_video(facefusion.globals.output_path):
+		return gradio.Image(value = None, visible = False), gradio.Video(value = facefusion.globals.output_path, visible = True)
+	return gradio.Image(), gradio.Video()
+
+
+def clear() -> Tuple[gradio.Image, gradio.Video]:
+	if facefusion.globals.target_path:
+		clear_temp(facefusion.globals.target_path)
+	return gradio.Image(value = None), gradio.Video(value = None)
diff --git a/facefusion/uis/components/output_options.py b/facefusion/uis/components/output_options.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b32a1103c57028ed02885831ee7138db0c93f62
--- /dev/null
+++ b/facefusion/uis/components/output_options.py
@@ -0,0 +1,94 @@
+from typing import Optional, Tuple, List
+import tempfile
+import gradio
+
+import facefusion.globals
+import facefusion.choices
+from facefusion import wording
+from facefusion.typing import OutputVideoEncoder
+from facefusion.filesystem import is_image, is_video
+from facefusion.uis.typing import ComponentName
+from facefusion.uis.core import get_ui_component, register_ui_component
+
+OUTPUT_PATH_TEXTBOX : Optional[gradio.Textbox] = None
+OUTPUT_IMAGE_QUALITY_SLIDER : Optional[gradio.Slider] = None
+OUTPUT_VIDEO_ENCODER_DROPDOWN : Optional[gradio.Dropdown] = None
+OUTPUT_VIDEO_QUALITY_SLIDER : Optional[gradio.Slider] = None
+
+
+def render() -> None:
+	global OUTPUT_PATH_TEXTBOX
+	global OUTPUT_IMAGE_QUALITY_SLIDER
+	global OUTPUT_VIDEO_ENCODER_DROPDOWN
+	global OUTPUT_VIDEO_QUALITY_SLIDER
+
+	OUTPUT_PATH_TEXTBOX = gradio.Textbox(
+		label = wording.get('output_path_textbox_label'),
+		value = facefusion.globals.output_path or tempfile.gettempdir(),
+		max_lines = 1
+	)
+	OUTPUT_IMAGE_QUALITY_SLIDER = gradio.Slider(
+		label = wording.get('output_image_quality_slider_label'),
+		value = facefusion.globals.output_image_quality,
+		step = facefusion.choices.output_image_quality_range[1] - facefusion.choices.output_image_quality_range[0],
+		minimum = facefusion.choices.output_image_quality_range[0],
+		maximum = facefusion.choices.output_image_quality_range[-1],
+		visible = is_image(facefusion.globals.target_path)
+	)
+	OUTPUT_VIDEO_ENCODER_DROPDOWN = gradio.Dropdown(
+		label = wording.get('output_video_encoder_dropdown_label'),
+		choices = facefusion.choices.output_video_encoders,
+		value = facefusion.globals.output_video_encoder,
+		visible = is_video(facefusion.globals.target_path)
+	)
+	OUTPUT_VIDEO_QUALITY_SLIDER = gradio.Slider(
+		label = wording.get('output_video_quality_slider_label'),
+		value = facefusion.globals.output_video_quality,
+		step = facefusion.choices.output_video_quality_range[1] - facefusion.choices.output_video_quality_range[0],
+		minimum = facefusion.choices.output_video_quality_range[0],
+		maximum = facefusion.choices.output_video_quality_range[-1],
+		visible = is_video(facefusion.globals.target_path)
+	)
+	register_ui_component('output_path_textbox', OUTPUT_PATH_TEXTBOX)
+
+
+def listen() -> None:
+	OUTPUT_PATH_TEXTBOX.change(update_output_path, inputs = OUTPUT_PATH_TEXTBOX)
+	OUTPUT_IMAGE_QUALITY_SLIDER.change(update_output_image_quality, inputs = OUTPUT_IMAGE_QUALITY_SLIDER)
+	OUTPUT_VIDEO_ENCODER_DROPDOWN.select(update_output_video_encoder, inputs = OUTPUT_VIDEO_ENCODER_DROPDOWN)
+	OUTPUT_VIDEO_QUALITY_SLIDER.change(update_output_video_quality, inputs = OUTPUT_VIDEO_QUALITY_SLIDER)
+	multi_component_names : List[ComponentName] =\
+	[
+		'source_image',
+		'target_image',
+		'target_video'
+	]
+	for component_name in multi_component_names:
+		component = get_ui_component(component_name)
+		if component:
+			for method in [ 'upload', 'change', 'clear' ]:
+				getattr(component, method)(remote_update, outputs = [ OUTPUT_IMAGE_QUALITY_SLIDER, OUTPUT_VIDEO_ENCODER_DROPDOWN, OUTPUT_VIDEO_QUALITY_SLIDER ])
+
+
+def remote_update() -> Tuple[gradio.Slider, gradio.Dropdown, gradio.Slider]:
+	if is_image(facefusion.globals.target_path):
+		return gradio.Slider(visible = True), gradio.Dropdown(visible = False), gradio.Slider(visible = False)
+	if is_video(facefusion.globals.target_path):
+		return gradio.Slider(visible = False), gradio.Dropdown(visible = True), gradio.Slider(visible = True)
+	return gradio.Slider(visible = False), gradio.Dropdown(visible = False), gradio.Slider(visible = False)
+
+
+def update_output_path(output_path : str) -> None:
+	facefusion.globals.output_path = output_path
+
+
+def update_output_image_quality(output_image_quality : int) -> None:
+	facefusion.globals.output_image_quality = output_image_quality
+
+
+def update_output_video_encoder(output_video_encoder: OutputVideoEncoder) -> None:
+	facefusion.globals.output_video_encoder = output_video_encoder
+
+
+def update_output_video_quality(output_video_quality : int) -> None:
+	facefusion.globals.output_video_quality = output_video_quality
diff --git a/facefusion/uis/components/preview.py b/facefusion/uis/components/preview.py
new file mode 100644
index 0000000000000000000000000000000000000000..66588745f596a66a766f3121e9b20597f778740d
--- /dev/null
+++ b/facefusion/uis/components/preview.py
@@ -0,0 +1,173 @@
+from typing import Any, Dict, List, Optional
+import cv2
+import gradio
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.core import conditional_append_reference_faces
+from facefusion.face_store import clear_static_faces, get_reference_faces, clear_reference_faces
+from facefusion.typing import Frame, Face, FaceSet
+from facefusion.vision import get_video_frame, count_video_frame_total, normalize_frame_color, resize_frame_dimension, read_static_image, read_static_images
+from facefusion.face_analyser import get_average_face, clear_face_analyser
+from facefusion.content_analyser import analyse_frame
+from facefusion.processors.frame.core import load_frame_processor_module
+from facefusion.filesystem import is_image, is_video
+from facefusion.uis.typing import ComponentName
+from facefusion.uis.core import get_ui_component, register_ui_component
+
+PREVIEW_IMAGE : Optional[gradio.Image] = None
+PREVIEW_FRAME_SLIDER : Optional[gradio.Slider] = None
+
+
+def render() -> None:
+	global PREVIEW_IMAGE
+	global PREVIEW_FRAME_SLIDER
+
+	preview_image_args: Dict[str, Any] =\
+	{
+		'label': wording.get('preview_image_label'),
+		'interactive': False
+	}
+	preview_frame_slider_args: Dict[str, Any] =\
+	{
+		'label': wording.get('preview_frame_slider_label'),
+		'step': 1,
+		'minimum': 0,
+		'maximum': 100,
+		'visible': False
+	}
+	conditional_append_reference_faces()
+	source_frames = read_static_images(facefusion.globals.source_paths)
+	source_face = get_average_face(source_frames)
+	reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
+	if is_image(facefusion.globals.target_path):
+		target_frame = read_static_image(facefusion.globals.target_path)
+		preview_frame = process_preview_frame(source_face, reference_faces, target_frame)
+		preview_image_args['value'] = normalize_frame_color(preview_frame)
+	if is_video(facefusion.globals.target_path):
+		temp_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
+		preview_frame = process_preview_frame(source_face, reference_faces, temp_frame)
+		preview_image_args['value'] = normalize_frame_color(preview_frame)
+		preview_image_args['visible'] = True
+		preview_frame_slider_args['value'] = facefusion.globals.reference_frame_number
+		preview_frame_slider_args['maximum'] = count_video_frame_total(facefusion.globals.target_path)
+		preview_frame_slider_args['visible'] = True
+	PREVIEW_IMAGE = gradio.Image(**preview_image_args)
+	PREVIEW_FRAME_SLIDER = gradio.Slider(**preview_frame_slider_args)
+	register_ui_component('preview_frame_slider', PREVIEW_FRAME_SLIDER)
+
+
+def listen() -> None:
+	PREVIEW_FRAME_SLIDER.release(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
+	multi_one_component_names : List[ComponentName] =\
+	[
+		'source_image',
+		'target_image',
+		'target_video'
+	]
+	for component_name in multi_one_component_names:
+		component = get_ui_component(component_name)
+		if component:
+			for method in [ 'upload', 'change', 'clear' ]:
+				getattr(component, method)(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
+	multi_two_component_names : List[ComponentName] =\
+	[
+		'target_image',
+		'target_video'
+	]
+	for component_name in multi_two_component_names:
+		component = get_ui_component(component_name)
+		if component:
+			for method in [ 'upload', 'change', 'clear' ]:
+				getattr(component, method)(update_preview_frame_slider, outputs = PREVIEW_FRAME_SLIDER)
+	select_component_names : List[ComponentName] =\
+	[
+		'reference_face_position_gallery',
+		'face_analyser_order_dropdown',
+		'face_analyser_age_dropdown',
+		'face_analyser_gender_dropdown'
+	]
+	for component_name in select_component_names:
+		component = get_ui_component(component_name)
+		if component:
+			component.select(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
+	change_one_component_names : List[ComponentName] =\
+	[
+		'face_debugger_items_checkbox_group',
+		'face_enhancer_model_dropdown',
+		'face_enhancer_blend_slider',
+		'frame_enhancer_model_dropdown',
+		'frame_enhancer_blend_slider',
+		'face_selector_mode_dropdown',
+		'reference_face_distance_slider',
+		'face_mask_types_checkbox_group',
+		'face_mask_blur_slider',
+		'face_mask_padding_top_slider',
+		'face_mask_padding_bottom_slider',
+		'face_mask_padding_left_slider',
+		'face_mask_padding_right_slider',
+		'face_mask_region_checkbox_group'
+	]
+	for component_name in change_one_component_names:
+		component = get_ui_component(component_name)
+		if component:
+			component.change(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
+	change_two_component_names : List[ComponentName] =\
+	[
+		'frame_processors_checkbox_group',
+		'face_swapper_model_dropdown',
+		'face_detector_model_dropdown',
+		'face_detector_size_dropdown',
+		'face_detector_score_slider'
+	]
+	for component_name in change_two_component_names:
+		component = get_ui_component(component_name)
+		if component:
+			component.change(clear_and_update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
+
+
+def clear_and_update_preview_image(frame_number : int = 0) -> gradio.Image:
+	clear_face_analyser()
+	clear_reference_faces()
+	clear_static_faces()
+	return update_preview_image(frame_number)
+
+
+def update_preview_image(frame_number : int = 0) -> gradio.Image:
+	conditional_append_reference_faces()
+	source_frames = read_static_images(facefusion.globals.source_paths)
+	source_face = get_average_face(source_frames)
+	reference_face = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
+	if is_image(facefusion.globals.target_path):
+		target_frame = read_static_image(facefusion.globals.target_path)
+		preview_frame = process_preview_frame(source_face, reference_face, target_frame)
+		preview_frame = normalize_frame_color(preview_frame)
+		return gradio.Image(value = preview_frame)
+	if is_video(facefusion.globals.target_path):
+		temp_frame = get_video_frame(facefusion.globals.target_path, frame_number)
+		preview_frame = process_preview_frame(source_face, reference_face, temp_frame)
+		preview_frame = normalize_frame_color(preview_frame)
+		return gradio.Image(value = preview_frame)
+	return gradio.Image(value = None)
+
+
+def update_preview_frame_slider() -> gradio.Slider:
+	if is_video(facefusion.globals.target_path):
+		video_frame_total = count_video_frame_total(facefusion.globals.target_path)
+		return gradio.Slider(maximum = video_frame_total, visible = True)
+	return gradio.Slider(value = None, maximum = None, visible = False)
+
+
+def process_preview_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame:
+	temp_frame = resize_frame_dimension(temp_frame, 640, 640)
+	if analyse_frame(temp_frame):
+		return cv2.GaussianBlur(temp_frame, (99, 99), 0)
+	for frame_processor in facefusion.globals.frame_processors:
+		frame_processor_module = load_frame_processor_module(frame_processor)
+		if frame_processor_module.pre_process('preview'):
+			temp_frame = frame_processor_module.process_frame(
+				source_face,
+				reference_faces,
+				temp_frame
+			)
+	return temp_frame
diff --git a/facefusion/uis/components/source.py b/facefusion/uis/components/source.py
new file mode 100644
index 0000000000000000000000000000000000000000..5fd7a6e76a50805d483e207990c857f99d53bc70
--- /dev/null
+++ b/facefusion/uis/components/source.py
@@ -0,0 +1,49 @@
+from typing import Optional, List
+import gradio
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.uis.typing import File
+from facefusion.filesystem import are_images
+from facefusion.uis.core import register_ui_component
+
+SOURCE_FILE : Optional[gradio.File] = None
+SOURCE_IMAGE : Optional[gradio.Image] = None
+
+
+def render() -> None:
+	global SOURCE_FILE
+	global SOURCE_IMAGE
+
+	are_source_images = are_images(facefusion.globals.source_paths)
+	SOURCE_FILE = gradio.File(
+		file_count = 'multiple',
+		file_types =
+		[
+			'.png',
+			'.jpg',
+			'.webp'
+		],
+		label = wording.get('source_file_label'),
+		value = facefusion.globals.source_paths if are_source_images else None
+	)
+	source_file_names = [ source_file_value['name'] for source_file_value in SOURCE_FILE.value ] if SOURCE_FILE.value else None
+	SOURCE_IMAGE = gradio.Image(
+		value = source_file_names[0] if are_source_images else None,
+		visible = are_source_images,
+		show_label = False
+	)
+	register_ui_component('source_image', SOURCE_IMAGE)
+
+
+def listen() -> None:
+	SOURCE_FILE.change(update, inputs = SOURCE_FILE, outputs = SOURCE_IMAGE)
+
+
+def update(files : List[File]) -> gradio.Image:
+	file_names = [ file.name for file in files ] if files else None
+	if are_images(file_names):
+		facefusion.globals.source_paths = file_names
+		return gradio.Image(value = file_names[0], visible = True)
+	facefusion.globals.source_paths = None
+	return gradio.Image(value = None, visible = False)
diff --git a/facefusion/uis/components/target.py b/facefusion/uis/components/target.py
new file mode 100644
index 0000000000000000000000000000000000000000..307b670c9827bafc43da150dac2585ed0bf5a011
--- /dev/null
+++ b/facefusion/uis/components/target.py
@@ -0,0 +1,63 @@
+from typing import Tuple, Optional
+import gradio
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.face_store import clear_static_faces, clear_reference_faces
+from facefusion.uis.typing import File
+from facefusion.filesystem import is_image, is_video
+from facefusion.uis.core import register_ui_component
+
+TARGET_FILE : Optional[gradio.File] = None
+TARGET_IMAGE : Optional[gradio.Image] = None
+TARGET_VIDEO : Optional[gradio.Video] = None
+
+
+def render() -> None:
+	global TARGET_FILE
+	global TARGET_IMAGE
+	global TARGET_VIDEO
+
+	is_target_image = is_image(facefusion.globals.target_path)
+	is_target_video = is_video(facefusion.globals.target_path)
+	TARGET_FILE = gradio.File(
+		label = wording.get('target_file_label'),
+		file_count = 'single',
+		file_types =
+		[
+			'.png',
+			'.jpg',
+			'.webp',
+			'.mp4'
+		],
+		value = facefusion.globals.target_path if is_target_image or is_target_video else None
+	)
+	TARGET_IMAGE = gradio.Image(
+		value = TARGET_FILE.value['name'] if is_target_image else None,
+		visible = is_target_image,
+		show_label = False
+	)
+	TARGET_VIDEO = gradio.Video(
+		value = TARGET_FILE.value['name'] if is_target_video else None,
+		visible = is_target_video,
+		show_label = False
+	)
+	register_ui_component('target_image', TARGET_IMAGE)
+	register_ui_component('target_video', TARGET_VIDEO)
+
+
+def listen() -> None:
+	TARGET_FILE.change(update, inputs = TARGET_FILE, outputs = [ TARGET_IMAGE, TARGET_VIDEO ])
+
+
+def update(file : File) -> Tuple[gradio.Image, gradio.Video]:
+	clear_reference_faces()
+	clear_static_faces()
+	if file and is_image(file.name):
+		facefusion.globals.target_path = file.name
+		return gradio.Image(value = file.name, visible = True), gradio.Video(value = None, visible = False)
+	if file and is_video(file.name):
+		facefusion.globals.target_path = file.name
+		return gradio.Image(value = None, visible = False), gradio.Video(value = file.name, visible = True)
+	facefusion.globals.target_path = None
+	return gradio.Image(value = None, visible = False), gradio.Video(value = None, visible = False)
diff --git a/facefusion/uis/components/temp_frame.py b/facefusion/uis/components/temp_frame.py
new file mode 100644
index 0000000000000000000000000000000000000000..d07f8365c73d75ef547d336f1a712cca2bdc6135
--- /dev/null
+++ b/facefusion/uis/components/temp_frame.py
@@ -0,0 +1,55 @@
+from typing import Optional, Tuple
+import gradio
+
+import facefusion.globals
+import facefusion.choices
+from facefusion import wording
+from facefusion.typing import TempFrameFormat
+from facefusion.filesystem import is_video
+from facefusion.uis.core import get_ui_component
+
+TEMP_FRAME_FORMAT_DROPDOWN : Optional[gradio.Dropdown] = None
+TEMP_FRAME_QUALITY_SLIDER : Optional[gradio.Slider] = None
+
+
+def render() -> None:
+	global TEMP_FRAME_FORMAT_DROPDOWN
+	global TEMP_FRAME_QUALITY_SLIDER
+
+	TEMP_FRAME_FORMAT_DROPDOWN = gradio.Dropdown(
+		label = wording.get('temp_frame_format_dropdown_label'),
+		choices = facefusion.choices.temp_frame_formats,
+		value = facefusion.globals.temp_frame_format,
+		visible = is_video(facefusion.globals.target_path)
+	)
+	TEMP_FRAME_QUALITY_SLIDER = gradio.Slider(
+		label = wording.get('temp_frame_quality_slider_label'),
+		value = facefusion.globals.temp_frame_quality,
+		step = facefusion.choices.temp_frame_quality_range[1] - facefusion.choices.temp_frame_quality_range[0],
+		minimum = facefusion.choices.temp_frame_quality_range[0],
+		maximum = facefusion.choices.temp_frame_quality_range[-1],
+		visible = is_video(facefusion.globals.target_path)
+	)
+
+
+def listen() -> None:
+	TEMP_FRAME_FORMAT_DROPDOWN.select(update_temp_frame_format, inputs = TEMP_FRAME_FORMAT_DROPDOWN)
+	TEMP_FRAME_QUALITY_SLIDER.change(update_temp_frame_quality, inputs = TEMP_FRAME_QUALITY_SLIDER)
+	target_video = get_ui_component('target_video')
+	if target_video:
+		for method in [ 'upload', 'change', 'clear' ]:
+			getattr(target_video, method)(remote_update, outputs = [ TEMP_FRAME_FORMAT_DROPDOWN, TEMP_FRAME_QUALITY_SLIDER ])
+
+
+def remote_update() -> Tuple[gradio.Dropdown, gradio.Slider]:
+	if is_video(facefusion.globals.target_path):
+		return gradio.Dropdown(visible = True), gradio.Slider(visible = True)
+	return gradio.Dropdown(visible = False), gradio.Slider(visible = False)
+
+
+def update_temp_frame_format(temp_frame_format : TempFrameFormat) -> None:
+	facefusion.globals.temp_frame_format = temp_frame_format
+
+
+def update_temp_frame_quality(temp_frame_quality : int) -> None:
+	facefusion.globals.temp_frame_quality = temp_frame_quality
diff --git a/facefusion/uis/components/trim_frame.py b/facefusion/uis/components/trim_frame.py
new file mode 100644
index 0000000000000000000000000000000000000000..10d6089a5fad9c4f86563dceae11ccdf746a232d
--- /dev/null
+++ b/facefusion/uis/components/trim_frame.py
@@ -0,0 +1,71 @@
+from typing import Any, Dict, Tuple, Optional
+import gradio
+
+import facefusion.globals
+from facefusion import wording
+from facefusion.vision import count_video_frame_total
+from facefusion.filesystem import is_video
+from facefusion.uis.core import get_ui_component
+
+TRIM_FRAME_START_SLIDER : Optional[gradio.Slider] = None
+TRIM_FRAME_END_SLIDER : Optional[gradio.Slider] = None
+
+
+def render() -> None:
+	global TRIM_FRAME_START_SLIDER
+	global TRIM_FRAME_END_SLIDER
+
+	trim_frame_start_slider_args : Dict[str, Any] =\
+	{
+		'label': wording.get('trim_frame_start_slider_label'),
+		'step': 1,
+		'minimum': 0,
+		'maximum': 100,
+		'visible': False
+	}
+	trim_frame_end_slider_args : Dict[str, Any] =\
+	{
+		'label': wording.get('trim_frame_end_slider_label'),
+		'step': 1,
+		'minimum': 0,
+		'maximum': 100,
+		'visible': False
+	}
+	if is_video(facefusion.globals.target_path):
+		video_frame_total = count_video_frame_total(facefusion.globals.target_path)
+		trim_frame_start_slider_args['value'] = facefusion.globals.trim_frame_start or 0
+		trim_frame_start_slider_args['maximum'] = video_frame_total
+		trim_frame_start_slider_args['visible'] = True
+		trim_frame_end_slider_args['value'] = facefusion.globals.trim_frame_end or video_frame_total
+		trim_frame_end_slider_args['maximum'] = video_frame_total
+		trim_frame_end_slider_args['visible'] = True
+	with gradio.Row():
+		TRIM_FRAME_START_SLIDER = gradio.Slider(**trim_frame_start_slider_args)
+		TRIM_FRAME_END_SLIDER = gradio.Slider(**trim_frame_end_slider_args)
+
+
+def listen() -> None:
+	TRIM_FRAME_START_SLIDER.change(update_trim_frame_start, inputs = TRIM_FRAME_START_SLIDER)
+	TRIM_FRAME_END_SLIDER.change(update_trim_frame_end, inputs = TRIM_FRAME_END_SLIDER)
+	target_video = get_ui_component('target_video')
+	if target_video:
+		for method in [ 'upload', 'change', 'clear' ]:
+			getattr(target_video, method)(remote_update, outputs = [ TRIM_FRAME_START_SLIDER, TRIM_FRAME_END_SLIDER ])
+
+
+def remote_update() -> Tuple[gradio.Slider, gradio.Slider]:
+	if is_video(facefusion.globals.target_path):
+		video_frame_total = count_video_frame_total(facefusion.globals.target_path)
+		facefusion.globals.trim_frame_start = None
+		facefusion.globals.trim_frame_end = None
+		return gradio.Slider(value = 0, maximum = video_frame_total, visible = True), gradio.Slider(value = video_frame_total, maximum = video_frame_total, visible = True)
+	return gradio.Slider(value = None, maximum = None, visible = False), gradio.Slider(value = None, maximum = None, visible = False)
+
+
+def update_trim_frame_start(trim_frame_start : int) -> None:
+	facefusion.globals.trim_frame_start = trim_frame_start if trim_frame_start > 0 else None
+
+
+def update_trim_frame_end(trim_frame_end : int) -> None:
+	video_frame_total = count_video_frame_total(facefusion.globals.target_path)
+	facefusion.globals.trim_frame_end = trim_frame_end if trim_frame_end < video_frame_total else None
diff --git a/facefusion/uis/components/webcam.py b/facefusion/uis/components/webcam.py
new file mode 100644
index 0000000000000000000000000000000000000000..d1217a6da8a282f6ed0c76c1e1335a429cc8dfc0
--- /dev/null
+++ b/facefusion/uis/components/webcam.py
@@ -0,0 +1,155 @@
+from typing import Optional, Generator, Deque
+from concurrent.futures import ThreadPoolExecutor
+from collections import deque
+import os
+import platform
+import subprocess
+import cv2
+import gradio
+from tqdm import tqdm
+
+import facefusion.globals
+from facefusion import logger, wording
+from facefusion.content_analyser import analyse_stream
+from facefusion.typing import Frame, Face
+from facefusion.face_analyser import get_average_face
+from facefusion.processors.frame.core import get_frame_processors_modules
+from facefusion.ffmpeg import open_ffmpeg
+from facefusion.vision import normalize_frame_color, read_static_images
+from facefusion.uis.typing import StreamMode, WebcamMode
+from facefusion.uis.core import get_ui_component
+
+WEBCAM_CAPTURE : Optional[cv2.VideoCapture] = None
+WEBCAM_IMAGE : Optional[gradio.Image] = None
+WEBCAM_START_BUTTON : Optional[gradio.Button] = None
+WEBCAM_STOP_BUTTON : Optional[gradio.Button] = None
+
+
+def get_webcam_capture() -> Optional[cv2.VideoCapture]:
+	global WEBCAM_CAPTURE
+
+	if WEBCAM_CAPTURE is None:
+		if platform.system().lower() == 'windows':
+			webcam_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)
+		else:
+			webcam_capture = cv2.VideoCapture(0)
+		if webcam_capture and webcam_capture.isOpened():
+			WEBCAM_CAPTURE = webcam_capture
+	return WEBCAM_CAPTURE
+
+
+def clear_webcam_capture() -> None:
+	global WEBCAM_CAPTURE
+
+	if WEBCAM_CAPTURE:
+		WEBCAM_CAPTURE.release()
+	WEBCAM_CAPTURE = None
+
+
+def render() -> None:
+	global WEBCAM_IMAGE
+	global WEBCAM_START_BUTTON
+	global WEBCAM_STOP_BUTTON
+
+	WEBCAM_IMAGE = gradio.Image(
+		label = wording.get('webcam_image_label')
+	)
+	WEBCAM_START_BUTTON = gradio.Button(
+		value = wording.get('start_button_label'),
+		variant = 'primary',
+		size = 'sm'
+	)
+	WEBCAM_STOP_BUTTON = gradio.Button(
+		value = wording.get('stop_button_label'),
+		size = 'sm'
+	)
+
+
+def listen() -> None:
+	start_event = None
+	webcam_mode_radio = get_ui_component('webcam_mode_radio')
+	webcam_resolution_dropdown = get_ui_component('webcam_resolution_dropdown')
+	webcam_fps_slider = get_ui_component('webcam_fps_slider')
+	if webcam_mode_radio and webcam_resolution_dropdown and webcam_fps_slider:
+		start_event = WEBCAM_START_BUTTON.click(start, inputs = [ webcam_mode_radio, webcam_resolution_dropdown, webcam_fps_slider ], outputs = WEBCAM_IMAGE)
+	WEBCAM_STOP_BUTTON.click(stop, cancels = start_event)
+	source_image = get_ui_component('source_image')
+	if source_image:
+		for method in [ 'upload', 'change', 'clear' ]:
+			getattr(source_image, method)(stop, cancels = start_event)
+
+
+def start(webcam_mode : WebcamMode, resolution : str, fps : float) -> Generator[Frame, None, None]:
+	facefusion.globals.face_selector_mode = 'one'
+	facefusion.globals.face_analyser_order = 'large-small'
+	source_frames = read_static_images(facefusion.globals.source_paths)
+	source_face = get_average_face(source_frames)
+	stream = None
+	if webcam_mode in [ 'udp', 'v4l2' ]:
+		stream = open_stream(webcam_mode, resolution, fps) # type: ignore[arg-type]
+	webcam_width, webcam_height = map(int, resolution.split('x'))
+	webcam_capture = get_webcam_capture()
+	if webcam_capture and webcam_capture.isOpened():
+		webcam_capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')) # type: ignore[attr-defined]
+		webcam_capture.set(cv2.CAP_PROP_FRAME_WIDTH, webcam_width)
+		webcam_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, webcam_height)
+		webcam_capture.set(cv2.CAP_PROP_FPS, fps)
+		for capture_frame in multi_process_capture(source_face, webcam_capture, fps):
+			if webcam_mode == 'inline':
+				yield normalize_frame_color(capture_frame)
+			else:
+				try:
+					stream.stdin.write(capture_frame.tobytes())
+				except Exception:
+					clear_webcam_capture()
+				yield None
+
+
+def multi_process_capture(source_face : Face, webcam_capture : cv2.VideoCapture, fps : float) -> Generator[Frame, None, None]:
+	with tqdm(desc = wording.get('processing'), unit = 'frame', ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress:
+		with ThreadPoolExecutor(max_workers = facefusion.globals.execution_thread_count) as executor:
+			futures = []
+			deque_capture_frames : Deque[Frame] = deque()
+			while webcam_capture and webcam_capture.isOpened():
+				_, capture_frame = webcam_capture.read()
+				if analyse_stream(capture_frame, fps):
+					return
+				future = executor.submit(process_stream_frame, source_face, capture_frame)
+				futures.append(future)
+				for future_done in [ future for future in futures if future.done() ]:
+					capture_frame = future_done.result()
+					deque_capture_frames.append(capture_frame)
+					futures.remove(future_done)
+				while deque_capture_frames:
+					progress.update()
+					yield deque_capture_frames.popleft()
+
+
+def stop() -> gradio.Image:
+	clear_webcam_capture()
+	return gradio.Image(value = None)
+
+
+def process_stream_frame(source_face : Face, temp_frame : Frame) -> Frame:
+	for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
+		if frame_processor_module.pre_process('stream'):
+			temp_frame = frame_processor_module.process_frame(
+				source_face,
+				None,
+				temp_frame
+			)
+	return temp_frame
+
+
+def open_stream(stream_mode : StreamMode, resolution : str, fps : float) -> subprocess.Popen[bytes]:
+	commands = [ '-f', 'rawvideo', '-pix_fmt', 'bgr24', '-s', resolution, '-r', str(fps), '-i', '-' ]
+	if stream_mode == 'udp':
+		commands.extend([ '-b:v', '2000k', '-f', 'mpegts', 'udp://localhost:27000?pkt_size=1316' ])
+	if stream_mode == 'v4l2':
+		try:
+			device_name = os.listdir('/sys/devices/virtual/video4linux')[0]
+			if device_name:
+				commands.extend([ '-f', 'v4l2', '/dev/' + device_name ])
+		except FileNotFoundError:
+			logger.error(wording.get('stream_not_loaded').format(stream_mode = stream_mode), __name__.upper())
+	return open_ffmpeg(commands)
diff --git a/facefusion/uis/components/webcam_options.py b/facefusion/uis/components/webcam_options.py
new file mode 100644
index 0000000000000000000000000000000000000000..edb245c811ff85c6ceffa58f35ecae692b357cb6
--- /dev/null
+++ b/facefusion/uis/components/webcam_options.py
@@ -0,0 +1,37 @@
+from typing import Optional
+import gradio
+
+from facefusion import wording
+from facefusion.uis import choices as uis_choices
+from facefusion.uis.core import register_ui_component
+
+WEBCAM_MODE_RADIO : Optional[gradio.Radio] = None
+WEBCAM_RESOLUTION_DROPDOWN : Optional[gradio.Dropdown] = None
+WEBCAM_FPS_SLIDER : Optional[gradio.Slider] = None
+
+
+def render() -> None:
+	global WEBCAM_MODE_RADIO
+	global WEBCAM_RESOLUTION_DROPDOWN
+	global WEBCAM_FPS_SLIDER
+
+	WEBCAM_MODE_RADIO = gradio.Radio(
+		label = wording.get('webcam_mode_radio_label'),
+		choices = uis_choices.webcam_modes,
+		value = 'inline'
+	)
+	WEBCAM_RESOLUTION_DROPDOWN = gradio.Dropdown(
+		label = wording.get('webcam_resolution_dropdown'),
+		choices = uis_choices.webcam_resolutions,
+		value = uis_choices.webcam_resolutions[0]
+	)
+	WEBCAM_FPS_SLIDER = gradio.Slider(
+		label = wording.get('webcam_fps_slider'),
+		value = 25,
+		step = 1,
+		minimum = 1,
+		maximum = 60
+	)
+	register_ui_component('webcam_mode_radio', WEBCAM_MODE_RADIO)
+	register_ui_component('webcam_resolution_dropdown', WEBCAM_RESOLUTION_DROPDOWN)
+	register_ui_component('webcam_fps_slider', WEBCAM_FPS_SLIDER)
diff --git a/facefusion/uis/core.py b/facefusion/uis/core.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f7b6cd0a8f48b1250b3cd103644f547da8d63ae
--- /dev/null
+++ b/facefusion/uis/core.py
@@ -0,0 +1,131 @@
+from typing import Dict, Optional, Any, List
+from types import ModuleType
+import importlib
+import sys
+import gradio
+
+import facefusion.globals
+from facefusion import metadata, logger, wording
+from facefusion.uis.typing import Component, ComponentName
+from facefusion.filesystem import resolve_relative_path
+
+UI_COMPONENTS: Dict[ComponentName, Component] = {}
+UI_LAYOUT_MODULES : List[ModuleType] = []
+UI_LAYOUT_METHODS =\
+[
+	'pre_check',
+	'pre_render',
+	'render',
+	'listen',
+	'run'
+]
+
+
+def load_ui_layout_module(ui_layout : str) -> Any:
+	try:
+		ui_layout_module = importlib.import_module('facefusion.uis.layouts.' + ui_layout)
+		for method_name in UI_LAYOUT_METHODS:
+			if not hasattr(ui_layout_module, method_name):
+				raise NotImplementedError
+	except ModuleNotFoundError as exception:
+		logger.debug(exception.msg, __name__.upper())
+		sys.exit(wording.get('ui_layout_not_loaded').format(ui_layout = ui_layout))
+	except NotImplementedError:
+		sys.exit(wording.get('ui_layout_not_implemented').format(ui_layout = ui_layout))
+	return ui_layout_module
+
+
+def get_ui_layouts_modules(ui_layouts : List[str]) -> List[ModuleType]:
+	global UI_LAYOUT_MODULES
+
+	if not UI_LAYOUT_MODULES:
+		for ui_layout in ui_layouts:
+			ui_layout_module = load_ui_layout_module(ui_layout)
+			UI_LAYOUT_MODULES.append(ui_layout_module)
+	return UI_LAYOUT_MODULES
+
+
+def get_ui_component(name : ComponentName) -> Optional[Component]:
+	if name in UI_COMPONENTS:
+		return UI_COMPONENTS[name]
+	return None
+
+
+def register_ui_component(name : ComponentName, component: Component) -> None:
+	UI_COMPONENTS[name] = component
+
+
+def launch() -> None:
+	with gradio.Blocks(theme = get_theme(), css = get_css(), title = metadata.get('name') + ' ' + metadata.get('version')) as ui:
+		for ui_layout in facefusion.globals.ui_layouts:
+			ui_layout_module = load_ui_layout_module(ui_layout)
+			if ui_layout_module.pre_render():
+				ui_layout_module.render()
+				ui_layout_module.listen()
+
+	for ui_layout in facefusion.globals.ui_layouts:
+		ui_layout_module = load_ui_layout_module(ui_layout)
+		ui_layout_module.run(ui)
+
+
+def get_theme() -> gradio.Theme:
+	return gradio.themes.Base(
+		primary_hue = gradio.themes.colors.red,
+		secondary_hue = gradio.themes.colors.neutral,
+		font = gradio.themes.GoogleFont('Open Sans')
+	).set(
+		background_fill_primary = '*neutral_100',
+		block_background_fill = 'white',
+		block_border_width = '0',
+		block_label_background_fill = '*primary_100',
+		block_label_background_fill_dark = '*primary_600',
+		block_label_border_width = 'none',
+		block_label_margin = '0.5rem',
+		block_label_radius = '*radius_md',
+		block_label_text_color = '*primary_500',
+		block_label_text_color_dark = 'white',
+		block_label_text_weight = '600',
+		block_title_background_fill = '*primary_100',
+		block_title_background_fill_dark = '*primary_600',
+		block_title_padding = '*block_label_padding',
+		block_title_radius = '*block_label_radius',
+		block_title_text_color = '*primary_500',
+		block_title_text_size = '*text_sm',
+		block_title_text_weight = '600',
+		block_padding = '0.5rem',
+		border_color_primary = 'transparent',
+		border_color_primary_dark = 'transparent',
+		button_large_padding = '2rem 0.5rem',
+		button_large_text_weight = 'normal',
+		button_primary_background_fill = '*primary_500',
+		button_primary_text_color = 'white',
+		button_secondary_background_fill = 'white',
+		button_secondary_border_color = 'transparent',
+		button_secondary_border_color_dark = 'transparent',
+		button_secondary_border_color_hover = 'transparent',
+		button_secondary_border_color_hover_dark = 'transparent',
+		button_secondary_text_color = '*neutral_800',
+		button_small_padding = '0.75rem',
+		checkbox_background_color = '*neutral_200',
+		checkbox_background_color_selected = '*primary_600',
+		checkbox_background_color_selected_dark = '*primary_700',
+		checkbox_border_color_focus = '*primary_500',
+		checkbox_border_color_focus_dark = '*primary_600',
+		checkbox_border_color_selected = '*primary_600',
+		checkbox_border_color_selected_dark = '*primary_700',
+		checkbox_label_background_fill = '*neutral_50',
+		checkbox_label_background_fill_hover = '*neutral_50',
+		checkbox_label_background_fill_selected = '*primary_500',
+		checkbox_label_background_fill_selected_dark = '*primary_600',
+		checkbox_label_text_color_selected = 'white',
+		input_background_fill = '*neutral_50',
+		shadow_drop = 'none',
+		slider_color = '*primary_500',
+		slider_color_dark = '*primary_600'
+	)
+
+
+def get_css() -> str:
+	fixes_css_path = resolve_relative_path('uis/assets/fixes.css')
+	overrides_css_path = resolve_relative_path('uis/assets/overrides.css')
+	return open(fixes_css_path, 'r').read() + open(overrides_css_path, 'r').read()
diff --git a/facefusion/uis/layouts/benchmark.py b/facefusion/uis/layouts/benchmark.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae9c3202771c2cf06ac4d11483968f9c7ad51423
--- /dev/null
+++ b/facefusion/uis/layouts/benchmark.py
@@ -0,0 +1,63 @@
+import gradio
+
+import facefusion.globals
+from facefusion.download import conditional_download
+from facefusion.uis.components import about, frame_processors, frame_processors_options, execution, execution_thread_count, execution_queue_count, limit_resources, benchmark_options, benchmark
+
+
+def pre_check() -> bool:
+	if not facefusion.globals.skip_download:
+		conditional_download('.assets/examples',
+		[
+			'https://github.com/facefusion/facefusion-assets/releases/download/examples/source.jpg',
+			'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-240p.mp4',
+			'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-360p.mp4',
+			'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-540p.mp4',
+			'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-720p.mp4',
+			'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-1080p.mp4',
+			'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-1440p.mp4',
+			'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-2160p.mp4'
+		])
+		return True
+	return False
+
+
+def pre_render() -> bool:
+	return True
+
+
+def render() -> gradio.Blocks:
+	with gradio.Blocks() as layout:
+		with gradio.Row():
+			with gradio.Column(scale = 2):
+				with gradio.Blocks():
+					about.render()
+				with gradio.Blocks():
+					frame_processors.render()
+					frame_processors_options.render()
+				with gradio.Blocks():
+					execution.render()
+					execution_thread_count.render()
+					execution_queue_count.render()
+				with gradio.Blocks():
+					limit_resources.render()
+				with gradio.Blocks():
+					benchmark_options.render()
+			with gradio.Column(scale = 5):
+				with gradio.Blocks():
+					benchmark.render()
+	return layout
+
+
+def listen() -> None:
+	frame_processors.listen()
+	frame_processors_options.listen()
+	execution.listen()
+	execution_thread_count.listen()
+	execution_queue_count.listen()
+	limit_resources.listen()
+	benchmark.listen()
+
+
+def run(ui : gradio.Blocks) -> None:
+	ui.queue(concurrency_count = 2, api_open = False).launch(show_api = False)
diff --git a/facefusion/uis/layouts/default.py b/facefusion/uis/layouts/default.py
new file mode 100644
index 0000000000000000000000000000000000000000..4537297e053d4ce2a3ce8334f146afc81a7d73b9
--- /dev/null
+++ b/facefusion/uis/layouts/default.py
@@ -0,0 +1,77 @@
+import gradio
+
+from facefusion.uis.components import about, frame_processors, frame_processors_options, execution, execution_thread_count, execution_queue_count, limit_resources, temp_frame, output_options, common_options, source, target, output, preview, trim_frame, face_analyser, face_selector, face_masker
+
+
+def pre_check() -> bool:
+	return True
+
+
+def pre_render() -> bool:
+	return True
+
+
+def render() -> gradio.Blocks:
+	with gradio.Blocks() as layout:
+		with gradio.Row():
+			with gradio.Column(scale = 2):
+				with gradio.Blocks():
+					about.render()
+				with gradio.Blocks():
+					frame_processors.render()
+					frame_processors_options.render()
+				with gradio.Blocks():
+					execution.render()
+					execution_thread_count.render()
+					execution_queue_count.render()
+				with gradio.Blocks():
+					limit_resources.render()
+				with gradio.Blocks():
+					temp_frame.render()
+				with gradio.Blocks():
+					output_options.render()
+				with gradio.Blocks():
+					common_options.render()
+			with gradio.Column(scale = 2):
+				with gradio.Blocks():
+					source.render()
+				with gradio.Blocks():
+					target.render()
+				with gradio.Blocks():
+					output.render()
+			with gradio.Column(scale = 3):
+				with gradio.Blocks():
+					preview.render()
+				with gradio.Blocks():
+					trim_frame.render()
+				with gradio.Blocks():
+					face_selector.render()
+				with gradio.Blocks():
+					face_masker.render()
+				with gradio.Blocks():
+					face_analyser.render()
+	return layout
+
+
+def listen() -> None:
+	frame_processors.listen()
+	frame_processors_options.listen()
+	execution.listen()
+	execution_thread_count.listen()
+	execution_queue_count.listen()
+	limit_resources.listen()
+	temp_frame.listen()
+	output_options.listen()
+	common_options.listen()
+	source.listen()
+	target.listen()
+	output.listen()
+	preview.listen()
+	trim_frame.listen()
+	face_selector.listen()
+	face_masker.listen()
+	face_analyser.listen()
+
+
+def run(ui : gradio.Blocks) -> None:
+	ui.launch(show_api = False)
diff --git a/facefusion/uis/layouts/webcam.py b/facefusion/uis/layouts/webcam.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5b6e184fbbec96b58e1d05474bbeab473229356
--- /dev/null
+++ b/facefusion/uis/layouts/webcam.py
@@ -0,0 +1,46 @@
+import gradio
+
+from facefusion.uis.components import about, frame_processors, frame_processors_options, execution, execution_thread_count, webcam_options, source, webcam
+
+
+def pre_check() -> bool:
+	return True
+
+
+def pre_render() -> bool:
+	return True
+
+
+def render() -> gradio.Blocks:
+	with gradio.Blocks() as layout:
+		with gradio.Row():
+			with gradio.Column(scale = 2):
+				with gradio.Blocks():
+					about.render()
+				with gradio.Blocks():
+					frame_processors.render()
+					frame_processors_options.render()
+				with gradio.Blocks():
+					execution.render()
+					execution_thread_count.render()
+				with gradio.Blocks():
+					webcam_options.render()
+				with gradio.Blocks():
+					source.render()
+			with gradio.Column(scale = 5):
+				with gradio.Blocks():
+					webcam.render()
+	return layout
+
+
+def listen() -> None:
+	frame_processors.listen()
+	frame_processors_options.listen()
+	execution.listen()
+	execution_thread_count.listen()
+	source.listen()
+	webcam.listen()
+
+
+def run(ui : gradio.Blocks) -> None:
+	ui.queue(concurrency_count = 2, api_open = False).launch(show_api = False)
diff --git a/facefusion/uis/typing.py b/facefusion/uis/typing.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2c57d326df2430571c60a6c498e992a1c1ba2f4
--- /dev/null
+++ b/facefusion/uis/typing.py
@@ -0,0 +1,43 @@
+from typing import Literal, Any, IO
+import gradio
+
+File = IO[Any]
+Component = gradio.File or gradio.Image or gradio.Video or gradio.Slider
+ComponentName = Literal\
+[
+	'source_image',
+	'target_image',
+	'target_video',
+	'preview_frame_slider',
+	'face_selector_mode_dropdown',
+	'reference_face_position_gallery',
+	'reference_face_distance_slider',
+	'face_analyser_order_dropdown',
+	'face_analyser_age_dropdown',
+	'face_analyser_gender_dropdown',
+	'face_detector_model_dropdown',
+	'face_detector_size_dropdown',
+	'face_detector_score_slider',
+	'face_mask_types_checkbox_group',
+	'face_mask_blur_slider',
+	'face_mask_padding_top_slider',
+	'face_mask_padding_bottom_slider',
+	'face_mask_padding_left_slider',
+	'face_mask_padding_right_slider',
+	'face_mask_region_checkbox_group',
+	'frame_processors_checkbox_group',
+	'face_swapper_model_dropdown',
+	'face_enhancer_model_dropdown',
+	'face_enhancer_blend_slider',
+	'frame_enhancer_model_dropdown',
+	'frame_enhancer_blend_slider',
+	'face_debugger_items_checkbox_group',
+	'output_path_textbox',
+	'benchmark_runs_checkbox_group',
+	'benchmark_cycles_slider',
+	'webcam_mode_radio',
+	'webcam_resolution_dropdown',
+	'webcam_fps_slider'
+]
+WebcamMode = Literal['inline', 'udp', 'v4l2']
+StreamMode = Literal['udp', 'v4l2']
diff --git a/facefusion/vision.py b/facefusion/vision.py
new file mode 100644
index 0000000000000000000000000000000000000000..4706bf7ce92b95985199fc30cdeb0c7dfd822e8f
--- /dev/null
+++ b/facefusion/vision.py
@@ -0,0 +1,75 @@
+from typing import Optional, List
+from functools import lru_cache
+import cv2
+
+from facefusion.typing import Frame
+
+
+def get_video_frame(video_path : str, frame_number : int = 0) -> Optional[Frame]:
+	if video_path:
+		video_capture = cv2.VideoCapture(video_path)
+		if video_capture.isOpened():
+			frame_total = video_capture.get(cv2.CAP_PROP_FRAME_COUNT)
+			video_capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1))
+			has_frame, frame = video_capture.read()
+			video_capture.release()
+			if has_frame:
+				return frame
+	return None
+
+
+def detect_fps(video_path : str) -> Optional[float]:
+	if video_path:
+		video_capture = cv2.VideoCapture(video_path)
+		if video_capture.isOpened():
+			return video_capture.get(cv2.CAP_PROP_FPS)
+	return None
+
+
+def count_video_frame_total(video_path : str) -> int:
+	if video_path:
+		video_capture = cv2.VideoCapture(video_path)
+		if video_capture.isOpened():
+			video_frame_total = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
+			video_capture.release()
+			return video_frame_total
+	return 0
+
+
+def normalize_frame_color(frame : Frame) -> Frame:
+	return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
+
+
+def resize_frame_dimension(frame : Frame, max_width : int, max_height : int) -> Frame:
+	height, width = frame.shape[:2]
+	if height > max_height or width > max_width:
+		scale = min(max_height / height, max_width / width)
+		new_width = int(width * scale)
+		new_height = int(height * scale)
+		return cv2.resize(frame, (new_width, new_height))
+	return frame
+
+
+@lru_cache(maxsize = 128)
+def read_static_image(image_path : str) -> Optional[Frame]:
+	return read_image(image_path)
+
+
+def read_static_images(image_paths : List[str]) -> Optional[List[Frame]]:
+	frames = []
+	if image_paths:
+		for image_path in image_paths:
+			frames.append(read_static_image(image_path))
+	return frames
+
+
+def read_image(image_path : str) -> Optional[Frame]:
+	if image_path:
+		return cv2.imread(image_path)
+	return None
+
+
+def write_image(image_path : str, frame : Frame) -> bool:
+	if image_path:
+		return cv2.imwrite(image_path, frame)
+	return False
diff --git a/facefusion/wording.py b/facefusion/wording.py
new file mode 100644
index 0000000000000000000000000000000000000000..78f3cd3df122303c4427c874814d99dfc41f1867
--- /dev/null
+++ b/facefusion/wording.py
@@ -0,0 +1,136 @@
+WORDING =\
+{
+	'python_not_supported': 'Python version is not supported, upgrade to {version} or higher',
+	'ffmpeg_not_installed': 'FFMpeg is not installed',
+	'install_dependency_help': 'select the variant of {dependency} to install',
+	'skip_venv_help': 'skip the virtual environment check',
+	'source_help': 'select a source image',
+	'target_help': 'select a target image or video',
+	'output_help': 'specify the output file or directory',
+	'frame_processors_help': 'choose from the available frame processors (choices: {choices}, ...)',
+	'frame_processor_model_help': 'choose the model for the frame processor',
+	'frame_processor_blend_help': 'specify the blend amount for the frame processor',
+	'face_debugger_items_help': 'specify the face debugger items (choices: {choices})',
+	'ui_layouts_help': 'choose from the available ui layouts (choices: {choices}, ...)',
+	'keep_fps_help': 'preserve the frames per second (fps) of the target',
+	'keep_temp_help': 'retain temporary frames after processing',
+	'skip_audio_help': 'omit audio from the target',
+	'face_analyser_order_help': 'specify the order used for the face analyser',
+	'face_analyser_age_help': 'specify the age used for the face analyser',
+	'face_analyser_gender_help': 'specify the gender used for the face analyser',
+	'face_detector_model_help': 'specify the model used for the face detector',
+	'face_detector_size_help': 'specify the size threshold used for the face detector',
+	'face_detector_score_help': 'specify the score threshold used for the face detector',
+	'face_selector_mode_help': 'specify the mode for the face selector',
+	'reference_face_position_help': 'specify the position of the reference face',
+	'reference_face_distance_help': 'specify the distance between the reference face and the target face',
+	'reference_frame_number_help': 'specify the number of the reference frame',
+	'face_mask_types_help': 'choose from the available face mask types (choices: {choices})',
+	'face_mask_blur_help': 'specify the blur amount for face mask',
+	'face_mask_padding_help': 'specify the face mask padding (top, right, bottom, left) in percent',
+	'face_mask_regions_help': 'choose from the available face mask regions (choices: {choices})',
+	'trim_frame_start_help': 'specify the start frame for extraction',
+	'trim_frame_end_help': 'specify the end frame for extraction',
+	'temp_frame_format_help': 'specify the image format used for frame extraction',
+	'temp_frame_quality_help': 'specify the image quality used for frame extraction',
+	'output_image_quality_help': 'specify the quality used for the output image',
+	'output_video_encoder_help': 'specify the encoder used for the output video',
+	'output_video_quality_help': 'specify the quality used for the output video',
+	'max_memory_help': 'specify the maximum amount of ram to be used (in gb)',
+	'execution_providers_help': 'choose from the available execution providers (choices: {choices}, ...)',
+	'execution_thread_count_help': 'specify the number of execution threads',
+	'execution_queue_count_help': 'specify the number of execution queries',
+	'skip_download_help': 'omit automate downloads and lookups',
+	'headless_help': 'run the program in headless mode',
+	'log_level_help': 'choose from the available log levels',
+	'creating_temp': 'Creating temporary resources',
+	'extracting_frames_fps': 'Extracting frames with {fps} FPS',
+	'analysing': 'Analysing',
+	'processing': 'Processing',
+	'downloading': 'Downloading',
+	'temp_frames_not_found': 'Temporary frames not found',
+	'compressing_image': 'Compressing image',
+	'compressing_image_failed': 'Compressing image failed',
+	'merging_video_fps': 'Merging video with {fps} FPS',
+	'merging_video_failed': 'Merging video failed',
+	'skipping_audio': 'Skipping audio',
+	'restoring_audio': 'Restoring audio',
+	'restoring_audio_skipped': 'Restoring audio skipped',
+	'clearing_temp': 'Clearing temporary resources',
+	'processing_image_succeed': 'Processing to image succeed',
+	'processing_image_failed': 'Processing to image failed',
+	'processing_video_succeed': 'Processing to video succeed',
+	'processing_video_failed': 'Processing to video failed',
+	'model_download_not_done': 'Download of the model is not done',
+	'model_file_not_present': 'File of the model is not present',
+	'select_image_source': 'Select an image for source path',
+	'select_image_or_video_target': 'Select an image or video for target path',
+	'select_file_or_directory_output': 'Select an file or directory for output path',
+	'no_source_face_detected': 'No source face detected',
+	'frame_processor_not_loaded': 'Frame processor {frame_processor} could not be loaded',
+	'frame_processor_not_implemented': 'Frame processor {frame_processor} not implemented correctly',
+	'ui_layout_not_loaded': 'UI layout {ui_layout} could not be loaded',
+	'ui_layout_not_implemented': 'UI layout {ui_layout} not implemented correctly',
+	'stream_not_loaded': 'Stream {stream_mode} could not be loaded',
+	'donate_button_label': 'DONATE',
+	'start_button_label': 'START',
+	'stop_button_label': 'STOP',
+	'clear_button_label': 'CLEAR',
+	'benchmark_runs_checkbox_group_label': 'BENCHMARK RUNS',
+	'benchmark_results_dataframe_label': 'BENCHMARK RESULTS',
+	'benchmark_cycles_slider_label': 'BENCHMARK CYCLES',
+	'execution_providers_checkbox_group_label': 'EXECUTION PROVIDERS',
+	'execution_thread_count_slider_label': 'EXECUTION THREAD COUNT',
+	'execution_queue_count_slider_label': 'EXECUTION QUEUE COUNT',
+	'face_analyser_order_dropdown_label': 'FACE ANALYSER ORDER',
+	'face_analyser_age_dropdown_label': 'FACE ANALYSER AGE',
+	'face_analyser_gender_dropdown_label': 'FACE ANALYSER GENDER',
+	'face_detector_model_dropdown_label': 'FACE DETECTOR MODEL',
+	'face_detector_size_dropdown_label': 'FACE DETECTOR SIZE',
+	'face_detector_score_slider_label': 'FACE DETECTOR SCORE',
+	'face_selector_mode_dropdown_label': 'FACE SELECTOR MODE',
+	'reference_face_gallery_label': 'REFERENCE FACE',
+	'reference_face_distance_slider_label': 'REFERENCE FACE DISTANCE',
+	'face_mask_types_checkbox_group_label': 'FACE MASK TYPES',
+	'face_mask_blur_slider_label': 'FACE MASK BLUR',
+	'face_mask_padding_top_slider_label': 'FACE MASK PADDING TOP',
+	'face_mask_padding_bottom_slider_label': 'FACE MASK PADDING BOTTOM',
+	'face_mask_padding_left_slider_label': 'FACE MASK PADDING LEFT',
+	'face_mask_padding_right_slider_label': 'FACE MASK PADDING RIGHT',
+	'face_mask_region_checkbox_group_label': 'FACE MASK REGIONS',
+	'max_memory_slider_label': 'MAX MEMORY',
+	'output_image_or_video_label': 'OUTPUT',
+	'output_path_textbox_label': 'OUTPUT PATH',
+	'output_image_quality_slider_label': 'OUTPUT IMAGE QUALITY',
+	'output_video_encoder_dropdown_label': 'OUTPUT VIDEO ENCODER',
+	'output_video_quality_slider_label': 'OUTPUT VIDEO QUALITY',
+	'preview_image_label': 'PREVIEW',
+	'preview_frame_slider_label': 'PREVIEW FRAME',
+	'frame_processors_checkbox_group_label': 'FRAME PROCESSORS',
+	'face_swapper_model_dropdown_label': 'FACE SWAPPER MODEL',
+	'face_enhancer_model_dropdown_label': 'FACE ENHANCER MODEL',
+	'face_enhancer_blend_slider_label': 'FACE ENHANCER BLEND',
+	'frame_enhancer_model_dropdown_label': 'FRAME ENHANCER MODEL',
+	'frame_enhancer_blend_slider_label': 'FRAME ENHANCER BLEND',
+	'face_debugger_items_checkbox_group_label': 'FACE DEBUGGER ITEMS',
+	'common_options_checkbox_group_label': 'OPTIONS',
+	'temp_frame_format_dropdown_label': 'TEMP FRAME FORMAT',
+	'temp_frame_quality_slider_label': 'TEMP FRAME QUALITY',
+	'trim_frame_start_slider_label': 'TRIM FRAME START',
+	'trim_frame_end_slider_label': 'TRIM FRAME END',
+	'source_file_label': 'SOURCE',
+	'target_file_label': 'TARGET',
+	'webcam_image_label': 'WEBCAM',
+	'webcam_mode_radio_label': 'WEBCAM MODE',
+	'webcam_resolution_dropdown': 'WEBCAM RESOLUTION',
+	'webcam_fps_slider': 'WEBCAM FPS',
+	'point': '.',
+	'comma': ',',
+	'colon': ':',
+	'question_mark': '?',
+	'exclamation_mark': '!'
+}
+
+
+def get(key : str) -> str:
+	return WORDING[key]
diff --git a/install.py b/install.py
index 16fad5a662df1ccbd6b16dc27ca4fbb169b5b35a..307f686fa5ed6409029975433c9c76a6a735c656 100644
--- a/install.py
+++ b/install.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python3
 
-from DeepFakeAI import installer
+from facefusion import installer
 
 if __name__ == '__main__':
 	installer.cli()
diff --git a/run.py b/run.py
index 35b03f055c871fc8dde66ee8171412fae23d3ab9..3b796757e894649c07f2f23d42563319e0880eec 100644
--- a/run.py
+++ b/run.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python3
 
-from DeepFakeAI import core
+from facefusion import core
 
 if __name__ == '__main__':
     core.cli()
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 9733d904a7f56cd190d3b949701d970588ee4e58..0935222d5ee24ecd175a3327bc8a2add59ebe0b5 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -2,16 +2,16 @@ import subprocess
 import sys
 import pytest
 
-from DeepFakeAI import wording
-from DeepFakeAI.download import conditional_download
+from facefusion import wording
+from facefusion.download import conditional_download
 
 
 @pytest.fixture(scope = 'module', autouse = True)
 def before_all() -> None:
 	conditional_download('.assets/examples',
 	[
-		'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/source.jpg',
-		'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-1080p.mp4'
+		'https://github.com/facefusion/facefusion-assets/releases/download/examples/source.jpg',
+		'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-1080p.mp4'
 	])
 	subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-1080p.mp4', '-vframes', '1', '.assets/examples/target-1080p.jpg' ])
 
diff --git a/tests/test_common_helper.py b/tests/test_common_helper.py
index b9c2f501008e225015527fa5b72e49bbcd621354..40ef7f320d356dbbe4d480f57e96408631e830fd 100644
--- a/tests/test_common_helper.py
+++ b/tests/test_common_helper.py
@@ -1,4 +1,4 @@
-from DeepFakeAI.common_helper import create_metavar, create_range
+from facefusion.common_helper import create_metavar, create_range
 
 
 def test_create_metavar() -> None:
diff --git a/tests/test_download.py b/tests/test_download.py
index e5e84a96b52aa6344434c312a16b0c2e55ed4aa2..f80c44b89bc32bf2a51c002f2ab9118ee8d6150b 100644
--- a/tests/test_download.py
+++ b/tests/test_download.py
@@ -1,23 +1,23 @@
 import pytest
 
-from DeepFakeAI.download import conditional_download, get_download_size, is_download_done
+from facefusion.download import conditional_download, get_download_size, is_download_done
 
 
 @pytest.fixture(scope = 'module', autouse = True)
 def before_all() -> None:
 	conditional_download('.assets/examples',
 	[
-		'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-240p.mp4'
+		'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-240p.mp4'
 	])
 
 
 def test_get_download_size() -> None:
-	assert get_download_size('https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-240p.mp4') == 191675
-	assert get_download_size('https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-360p.mp4') == 370732
+	assert get_download_size('https://github.com/facefusion/facefusion-assets/releases/download/examples/target-240p.mp4') == 191675
+	assert get_download_size('https://github.com/facefusion/facefusion-assets/releases/download/examples/target-360p.mp4') == 370732
 	assert get_download_size('invalid') == 0
 
 
 def test_is_download_done() -> None:
-	assert is_download_done('https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-240p.mp4', '.assets/examples/target-240p.mp4') is True
-	assert is_download_done('https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-240p.mp4','invalid') is False
+	assert is_download_done('https://github.com/facefusion/facefusion-assets/releases/download/examples/target-240p.mp4', '.assets/examples/target-240p.mp4') is True
+	assert is_download_done('https://github.com/facefusion/facefusion-assets/releases/download/examples/target-240p.mp4','invalid') is False
 	assert is_download_done('invalid', 'invalid') is False
diff --git a/tests/test_execution_helper.py b/tests/test_execution_helper.py
index bd60d15ea86309f382c4fb40d43b366122b8e7b6..5d199123e5d2d37b0689dd37769125491f028d06 100644
--- a/tests/test_execution_helper.py
+++ b/tests/test_execution_helper.py
@@ -1,4 +1,4 @@
-from DeepFakeAI.execution_helper import encode_execution_providers, decode_execution_providers
+from facefusion.execution_helper import encode_execution_providers, decode_execution_providers
 
 
 def test_encode_execution_providers() -> None:
diff --git a/tests/test_ffmpeg.py b/tests/test_ffmpeg.py
index dc79f3e1dbb4f56a01222b64fcfd0dd7ad322779..b67ed7599ccb784a114cf60511ce19e334dfe6ea 100644
--- a/tests/test_ffmpeg.py
+++ b/tests/test_ffmpeg.py
@@ -2,18 +2,18 @@ import glob
 import subprocess
 import pytest
 
-import DeepFakeAI.globals
-from DeepFakeAI.filesystem import get_temp_directory_path, create_temp, clear_temp
-from DeepFakeAI.download import conditional_download
-from DeepFakeAI.ffmpeg import extract_frames
+import facefusion.globals
+from facefusion.filesystem import get_temp_directory_path, create_temp, clear_temp
+from facefusion.download import conditional_download
+from facefusion.ffmpeg import extract_frames
 
 
 @pytest.fixture(scope = 'module', autouse = True)
 def before_all() -> None:
 	conditional_download('.assets/examples',
 	[
-		'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/source.jpg',
-		'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-240p.mp4'
+		'https://github.com/facefusion/facefusion-assets/releases/download/examples/source.jpg',
+		'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-240p.mp4'
 	])
 	subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=25', '.assets/examples/target-240p-25fps.mp4' ])
 	subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=30', '.assets/examples/target-240p-30fps.mp4' ])
@@ -22,10 +22,10 @@ def before_all() -> None:
 
 @pytest.fixture(scope = 'function', autouse = True)
 def before_each() -> None:
-	DeepFakeAI.globals.trim_frame_start = None
-	DeepFakeAI.globals.trim_frame_end = None
-	DeepFakeAI.globals.temp_frame_quality = 80
-	DeepFakeAI.globals.temp_frame_format = 'jpg'
+	facefusion.globals.trim_frame_start = None
+	facefusion.globals.trim_frame_end = None
+	facefusion.globals.temp_frame_quality = 80
+	facefusion.globals.temp_frame_format = 'jpg'
 
 
 def test_extract_frames() -> None:
@@ -46,7 +46,7 @@ def test_extract_frames() -> None:
 
 
 def test_extract_frames_with_trim_start() -> None:
-	DeepFakeAI.globals.trim_frame_start = 224
+	facefusion.globals.trim_frame_start = 224
 	data_provider =\
 	[
 		('.assets/examples/target-240p-25fps.mp4', 55),
@@ -64,8 +64,8 @@ def test_extract_frames_with_trim_start() -> None:
 
 
 def test_extract_frames_with_trim_start_and_trim_end() -> None:
-	DeepFakeAI.globals.trim_frame_start = 124
-	DeepFakeAI.globals.trim_frame_end = 224
+	facefusion.globals.trim_frame_start = 124
+	facefusion.globals.trim_frame_end = 224
 	data_provider =\
 	[
 		('.assets/examples/target-240p-25fps.mp4', 120),
@@ -83,7 +83,7 @@ def test_extract_frames_with_trim_start_and_trim_end() -> None:
 
 
 def test_extract_frames_with_trim_end() -> None:
-	DeepFakeAI.globals.trim_frame_end = 100
+	facefusion.globals.trim_frame_end = 100
 	data_provider =\
 	[
 		('.assets/examples/target-240p-25fps.mp4', 120),
diff --git a/tests/test_filesystem.py b/tests/test_filesystem.py
index c21def4f8510c870223c54a23cff5d4403c6cd6f..c50474214966aaf9d6952a05a1a1a76f0ebfcf80 100644
--- a/tests/test_filesystem.py
+++ b/tests/test_filesystem.py
@@ -1,4 +1,4 @@
-from DeepFakeAI.filesystem import is_file, is_directory, is_image, are_images, is_video
+from facefusion.filesystem import is_file, is_directory, is_image, are_images, is_video
 
 
 def test_is_file() -> None:
diff --git a/tests/test_normalizer.py b/tests/test_normalizer.py
index 521fd802964e73e602f1ef4c7fa256aef234bc22..0547e12d90ecbd527a433aa8f9ec96e8d1d72548 100644
--- a/tests/test_normalizer.py
+++ b/tests/test_normalizer.py
@@ -1,6 +1,6 @@
 import platform
 
-from DeepFakeAI.normalizer import normalize_output_path, normalize_padding
+from facefusion.normalizer import normalize_output_path, normalize_padding
 
 
 def test_normalize_output_path() -> None:
diff --git a/tests/test_vision.py b/tests/test_vision.py
index 2ab0b3b33cc8de0c936292e1a96f06885228e8f0..5b51e62992191dd386ad58fc87fb584b04dfe8a2 100644
--- a/tests/test_vision.py
+++ b/tests/test_vision.py
@@ -1,16 +1,16 @@
 import subprocess
 import pytest
 
-from DeepFakeAI.download import conditional_download
-from DeepFakeAI.vision import get_video_frame, detect_fps, count_video_frame_total
+from facefusion.download import conditional_download
+from facefusion.vision import get_video_frame, detect_fps, count_video_frame_total
 
 
 @pytest.fixture(scope = 'module', autouse = True)
 def before_all() -> None:
 	conditional_download('.assets/examples',
 	[
-		'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/source.jpg',
-		'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-240p.mp4'
+		'https://github.com/facefusion/facefusion-assets/releases/download/examples/source.jpg',
+		'https://github.com/facefusion/facefusion-assets/releases/download/examples/target-240p.mp4'
 	])
 	subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=25', '.assets/examples/target-240p-25fps.mp4' ])
 	subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=30', '.assets/examples/target-240p-30fps.mp4' ])