ninjawick commited on
Commit
d2a2947
1 Parent(s): 02c4dcb

Upload 84 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .editorconfig +8 -0
  2. .flake8 +3 -0
  3. .gitignore +3 -0
  4. README.md +100 -13
  5. facefusion.ipynb +1 -0
  6. facefusion/choices.py +14 -14
  7. facefusion/cli_helper.py +5 -0
  8. facefusion/common_helper.py +10 -0
  9. facefusion/content_analyser.py +4 -9
  10. facefusion/core.py +89 -64
  11. facefusion/download.py +44 -0
  12. facefusion/execution_helper.py +22 -0
  13. facefusion/face_analyser.py +60 -22
  14. facefusion/face_helper.py +19 -27
  15. facefusion/face_masker.py +128 -0
  16. facefusion/face_store.py +47 -0
  17. facefusion/ffmpeg.py +81 -0
  18. facefusion/filesystem.py +91 -0
  19. facefusion/globals.py +5 -2
  20. facefusion/installer.py +43 -14
  21. facefusion/logger.py +39 -0
  22. facefusion/metadata.py +1 -1
  23. facefusion/normalizer.py +34 -0
  24. facefusion/processors/frame/choices.py +1 -1
  25. facefusion/processors/frame/core.py +8 -6
  26. facefusion/processors/frame/modules/face_debugger.py +44 -25
  27. facefusion/processors/frame/modules/face_enhancer.py +57 -29
  28. facefusion/processors/frame/modules/face_swapper.py +66 -47
  29. facefusion/processors/frame/modules/frame_enhancer.py +22 -15
  30. facefusion/processors/frame/typings.py +1 -1
  31. facefusion/typing.py +15 -5
  32. facefusion/uis/components/benchmark.py +6 -5
  33. facefusion/uis/components/execution.py +1 -1
  34. facefusion/uis/components/face_analyser.py +1 -1
  35. facefusion/uis/components/face_masker.py +123 -0
  36. facefusion/uis/components/face_selector.py +6 -7
  37. facefusion/uis/components/frame_processors.py +1 -1
  38. facefusion/uis/components/frame_processors_options.py +2 -2
  39. facefusion/uis/components/output.py +3 -2
  40. facefusion/uis/components/output_options.py +1 -1
  41. facefusion/uis/components/preview.py +25 -22
  42. facefusion/uis/components/source.py +15 -12
  43. facefusion/uis/components/target.py +7 -7
  44. facefusion/uis/components/temp_frame.py +1 -1
  45. facefusion/uis/components/trim_frame.py +1 -1
  46. facefusion/uis/components/webcam.py +25 -17
  47. facefusion/uis/core.py +4 -3
  48. facefusion/uis/layouts/benchmark.py +1 -1
  49. facefusion/uis/layouts/default.py +3 -3
  50. facefusion/uis/typing.py +4 -1
.editorconfig ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ root = true
2
+
3
+ [*]
4
+ end_of_line = lf
5
+ insert_final_newline = true
6
+ indent_size = 4
7
+ indent_style = tab
8
+ trim_trailing_whitespace = true
.flake8 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [flake8]
2
+ select = E3, E4, F
3
+ per-file-ignores = facefusion/core.py:E402, facefusion/installer.py:E402
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .assets
2
+ .idea
3
+ .vscode
README.md CHANGED
@@ -1,13 +1,100 @@
1
- ---
2
- title: FaceFusion
3
- emoji: 😻
4
- colorFrom: purple
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 4.7.1
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FaceFusion
2
+ ==========
3
+
4
+ > Next generation face swapper and enhancer.
5
+
6
+ [![Build Status](https://img.shields.io/github/actions/workflow/status/facefusion/facefusion/ci.yml.svg?branch=master)](https://github.com/facefusion/facefusion/actions?query=workflow:ci)
7
+ ![License](https://img.shields.io/badge/license-MIT-green)
8
+
9
+ colab version is live and fixed but do update me in mails when google intefers or bans code, i love the cat and mouse play with google moderators and the devops guy.
10
+ im thinking about hosting somewhere for free, just have to figure out how to put limitation on process which determined by time or time of video. contact me if you are good at it. i'll handle the hosting stuff
11
+
12
+ Preview
13
+ -------
14
+
15
+ ![Preview](https://raw.githubusercontent.com/facefusion/facefusion/master/.github/preview.png?sanitize=true)
16
+
17
+
18
+ Installation
19
+ ------------
20
+
21
+ Be aware, the installation needs technical skills and is not for beginners. Please do not open platform and installation related issues on GitHub. i dont care what you do.
22
+
23
+ Usage
24
+ -----
25
+
26
+ Run the command:
27
+
28
+ ```
29
+ python run.py [options]
30
+
31
+ options:
32
+ -h, --help show this help message and exit
33
+ -s SOURCE_PATHS, --source SOURCE_PATHS select a source image
34
+ -t TARGET_PATH, --target TARGET_PATH select a target image or video
35
+ -o OUTPUT_PATH, --output OUTPUT_PATH specify the output file or directory
36
+ -v, --version show program's version number and exit
37
+
38
+ misc:
39
+ --skip-download omit automate downloads and lookups
40
+ --headless run the program in headless mode
41
+ --log-level {error,warn,info,debug} choose from the available log levels
42
+
43
+ execution:
44
+ --execution-providers EXECUTION_PROVIDERS [EXECUTION_PROVIDERS ...] choose from the available execution providers (choices: cpu, ...)
45
+ --execution-thread-count [1-128] specify the number of execution threads
46
+ --execution-queue-count [1-32] specify the number of execution queries
47
+ --max-memory [0-128] specify the maximum amount of ram to be used (in gb)
48
+
49
+ face analyser:
50
+ --face-analyser-order {left-right,right-left,top-bottom,bottom-top,small-large,large-small,best-worst,worst-best} specify the order used for the face analyser
51
+ --face-analyser-age {child,teen,adult,senior} specify the age used for the face analyser
52
+ --face-analyser-gender {male,female} specify the gender used for the face analyser
53
+ --face-detector-model {retinaface,yunet} specify the model used for the face detector
54
+ --face-detector-size {160x160,320x320,480x480,512x512,640x640,768x768,960x960,1024x1024} specify the size threshold used for the face detector
55
+ --face-detector-score [0.0-1.0] specify the score threshold used for the face detector
56
+
57
+ face selector:
58
+ --face-selector-mode {reference,one,many} specify the mode for the face selector
59
+ --reference-face-position REFERENCE_FACE_POSITION specify the position of the reference face
60
+ --reference-face-distance [0.0-1.5] specify the distance between the reference face and the target face
61
+ --reference-frame-number REFERENCE_FRAME_NUMBER specify the number of the reference frame
62
+
63
+ face mask:
64
+ --face-mask-types FACE_MASK_TYPES [FACE_MASK_TYPES ...] choose from the available face mask types (choices: box, occlusion, region)
65
+ --face-mask-blur [0.0-1.0] specify the blur amount for face mask
66
+ --face-mask-padding FACE_MASK_PADDING [FACE_MASK_PADDING ...] specify the face mask padding (top, right, bottom, left) in percent
67
+ --face-mask-regions FACE_MASK_REGIONS [FACE_MASK_REGIONS ...] choose from the available face mask regions (choices: skin, left-eyebrow, right-eyebrow, left-eye, right-eye, eye-glasses, nose, mouth, upper-lip, lower-lip)
68
+
69
+ frame extraction:
70
+ --trim-frame-start TRIM_FRAME_START specify the start frame for extraction
71
+ --trim-frame-end TRIM_FRAME_END specify the end frame for extraction
72
+ --temp-frame-format {jpg,png} specify the image format used for frame extraction
73
+ --temp-frame-quality [0-100] specify the image quality used for frame extraction
74
+ --keep-temp retain temporary frames after processing
75
+
76
+ output creation:
77
+ --output-image-quality [0-100] specify the quality used for the output image
78
+ --output-video-encoder {libx264,libx265,libvpx-vp9,h264_nvenc,hevc_nvenc} specify the encoder used for the output video
79
+ --output-video-quality [0-100] specify the quality used for the output video
80
+ --keep-fps preserve the frames per second (fps) of the target
81
+ --skip-audio omit audio from the target
82
+
83
+ frame processors:
84
+ --frame-processors FRAME_PROCESSORS [FRAME_PROCESSORS ...] choose from the available frame processors (choices: face_debugger, face_enhancer, face_swapper, frame_enhancer, ...)
85
+ --face-debugger-items FACE_DEBUGGER_ITEMS [FACE_DEBUGGER_ITEMS ...] specify the face debugger items (choices: bbox, kps, face-mask, score)
86
+ --face-enhancer-model {codeformer,gfpgan_1.2,gfpgan_1.3,gfpgan_1.4,gpen_bfr_256,gpen_bfr_512,restoreformer} choose the model for the frame processor
87
+ --face-enhancer-blend [0-100] specify the blend amount for the frame processor
88
+ --face-swapper-model {blendswap_256,inswapper_128,inswapper_128_fp16,simswap_256,simswap_512_unofficial} choose the model for the frame processor
89
+ --frame-enhancer-model {real_esrgan_x2plus,real_esrgan_x4plus,real_esrnet_x4plus} choose the model for the frame processor
90
+ --frame-enhancer-blend [0-100] specify the blend amount for the frame processor
91
+
92
+ uis:
93
+ --ui-layouts UI_LAYOUTS [UI_LAYOUTS ...] choose from the available ui layouts (choices: benchmark, webcam, default, ...)
94
+ ```
95
+
96
+
97
+ Documentation
98
+ -------------
99
+
100
+ Read the [documentation](https://docs.facefusion.io) for a deep dive.
facefusion.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells":[{"cell_type":"markdown","metadata":{"id":"hgVreYca3LcQ"},"source":["FaceFusion 2.1.3"]},{"cell_type":"markdown","metadata":{"id":"28qKtK7F3bzl"},"source":["Install"]},{"cell_type":"code","execution_count":1,"metadata":{"id":"ZlrnUA3i3gMB","executionInfo":{"status":"ok","timestamp":1705140624340,"user_tz":-330,"elapsed":359996,"user":{"displayName":"","userId":""}},"outputId":"ee900c2a-5b4c-40f5-f1bc-d804db7aa6f4","colab":{"base_uri":"https://localhost:8080/"}},"outputs":[{"output_type":"stream","name":"stdout","text":["Cloning into 'face_fusion-unlocked'...\n","remote: Enumerating objects: 111, done.\u001b[K\n","remote: Counting objects: 100% (20/20), done.\u001b[K\n","remote: Compressing objects: 100% (19/19), done.\u001b[K\n","remote: Total 111 (delta 1), reused 20 (delta 1), pack-reused 91\u001b[K\n","Receiving objects: 100% (111/111), 74.24 KiB | 6.75 MiB/s, done.\n","Resolving deltas: 100% (13/13), done.\n","/content/face_fusion-unlocked\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.4/58.4 kB\u001b[0m \u001b[31m1.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hLooking in indexes: https://pypi.org/simple, https://download.pytorch.org/whl/cu121\n","Collecting basicsr==1.4.2 (from -r requirements.txt (line 1))\n"," Downloading basicsr-1.4.2.tar.gz (172 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m172.5/172.5 kB\u001b[0m \u001b[31m3.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n","Collecting filetype==1.2.0 (from -r requirements.txt (line 2))\n"," Downloading filetype-1.2.0-py2.py3-none-any.whl (19 kB)\n","Collecting gradio==3.50.2 (from -r requirements.txt (line 3))\n"," Downloading gradio-3.50.2-py3-none-any.whl (20.3 MB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.3/20.3 MB\u001b[0m \u001b[31m16.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hCollecting numpy==1.26.2 (from -r requirements.txt (line 4))\n"," Downloading numpy-1.26.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (18.2 MB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m18.2/18.2 MB\u001b[0m \u001b[31m80.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hCollecting onnx==1.15.0 (from -r requirements.txt (line 5))\n"," Downloading onnx-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (15.7 MB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.7/15.7 MB\u001b[0m \u001b[31m86.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hCollecting onnxruntime==1.16.3 (from -r requirements.txt (line 6))\n"," Downloading onnxruntime-1.16.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (6.4 MB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.4/6.4 MB\u001b[0m \u001b[31m108.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hCollecting opencv-python==4.8.1.78 (from -r requirements.txt (line 7))\n"," Downloading opencv_python-4.8.1.78-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (61.7 MB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m61.7/61.7 MB\u001b[0m \u001b[31m10.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hCollecting psutil==5.9.6 (from -r requirements.txt (line 8))\n"," Downloading psutil-5.9.6-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (283 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m283.6/283.6 kB\u001b[0m \u001b[31m32.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hCollecting realesrgan==0.3.0 (from -r requirements.txt (line 9))\n"," Downloading realesrgan-0.3.0-py3-none-any.whl (26 kB)\n","Collecting torch==2.1.1 (from -r requirements.txt (line 10))\n"," Downloading https://download.pytorch.org/whl/cu121/torch-2.1.1%2Bcu121-cp310-cp310-linux_x86_64.whl (2200.7 MB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.2/2.2 GB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: tqdm==4.66.1 in /usr/local/lib/python3.10/dist-packages (from -r requirements.txt (line 11)) (4.66.1)\n","Collecting addict (from basicsr==1.4.2->-r requirements.txt (line 1))\n"," Downloading addict-2.4.0-py3-none-any.whl (3.8 kB)\n","Requirement already satisfied: future in /usr/local/lib/python3.10/dist-packages (from basicsr==1.4.2->-r requirements.txt (line 1)) (0.18.3)\n","Collecting lmdb (from basicsr==1.4.2->-r requirements.txt (line 1))\n"," Downloading lmdb-1.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (299 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m299.2/299.2 kB\u001b[0m \u001b[31m32.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: Pillow in /usr/local/lib/python3.10/dist-packages (from basicsr==1.4.2->-r requirements.txt (line 1)) (9.4.0)\n","Requirement already satisfied: pyyaml in /usr/local/lib/python3.10/dist-packages (from basicsr==1.4.2->-r requirements.txt (line 1)) (6.0.1)\n","Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from basicsr==1.4.2->-r requirements.txt (line 1)) (2.31.0)\n","Requirement already satisfied: scikit-image in /usr/local/lib/python3.10/dist-packages (from basicsr==1.4.2->-r requirements.txt (line 1)) (0.19.3)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.10/dist-packages (from basicsr==1.4.2->-r requirements.txt (line 1)) (1.11.4)\n","Collecting tb-nightly (from basicsr==1.4.2->-r requirements.txt (line 1))\n"," Downloading tb_nightly-2.16.0a20240112-py3-none-any.whl (5.5 MB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.5/5.5 MB\u001b[0m \u001b[31m67.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: torchvision in /usr/local/lib/python3.10/dist-packages (from basicsr==1.4.2->-r requirements.txt (line 1)) (0.16.0+cu121)\n","Collecting yapf (from basicsr==1.4.2->-r requirements.txt (line 1))\n"," Downloading yapf-0.40.2-py3-none-any.whl (254 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m254.7/254.7 kB\u001b[0m \u001b[31m26.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hCollecting aiofiles<24.0,>=22.0 (from gradio==3.50.2->-r requirements.txt (line 3))\n"," Downloading aiofiles-23.2.1-py3-none-any.whl (15 kB)\n","Requirement already satisfied: altair<6.0,>=4.2.0 in /usr/local/lib/python3.10/dist-packages (from gradio==3.50.2->-r requirements.txt (line 3)) (4.2.2)\n","Collecting fastapi (from gradio==3.50.2->-r requirements.txt (line 3))\n"," Downloading fastapi-0.109.0-py3-none-any.whl (92 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m92.0/92.0 kB\u001b[0m \u001b[31m12.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hCollecting ffmpy (from gradio==3.50.2->-r requirements.txt (line 3))\n"," Downloading ffmpy-0.3.1.tar.gz (5.5 kB)\n"," Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n","Collecting gradio-client==0.6.1 (from gradio==3.50.2->-r requirements.txt (line 3))\n"," Downloading gradio_client-0.6.1-py3-none-any.whl (299 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m299.2/299.2 kB\u001b[0m \u001b[31m34.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hCollecting httpx (from gradio==3.50.2->-r requirements.txt (line 3))\n"," Downloading httpx-0.26.0-py3-none-any.whl (75 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m75.9/75.9 kB\u001b[0m \u001b[31m10.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: huggingface-hub>=0.14.0 in /usr/local/lib/python3.10/dist-packages (from gradio==3.50.2->-r requirements.txt (line 3)) (0.20.2)\n","Requirement already satisfied: importlib-resources<7.0,>=1.3 in /usr/local/lib/python3.10/dist-packages (from gradio==3.50.2->-r requirements.txt (line 3)) (6.1.1)\n","Requirement already satisfied: jinja2<4.0 in /usr/local/lib/python3.10/dist-packages (from gradio==3.50.2->-r requirements.txt (line 3)) (3.1.2)\n","Requirement already satisfied: markupsafe~=2.0 in /usr/local/lib/python3.10/dist-packages (from gradio==3.50.2->-r requirements.txt (line 3)) (2.1.3)\n","Requirement already satisfied: matplotlib~=3.0 in /usr/local/lib/python3.10/dist-packages (from gradio==3.50.2->-r requirements.txt (line 3)) (3.7.1)\n","Collecting orjson~=3.0 (from gradio==3.50.2->-r requirements.txt (line 3))\n"," Downloading orjson-3.9.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (138 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m138.7/138.7 kB\u001b[0m \u001b[31m18.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from gradio==3.50.2->-r requirements.txt (line 3)) (23.2)\n","Requirement already satisfied: pandas<3.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from gradio==3.50.2->-r requirements.txt (line 3)) (1.5.3)\n","Requirement already satisfied: pydantic!=1.8,!=1.8.1,!=2.0.0,!=2.0.1,<3.0.0,>=1.7.4 in /usr/local/lib/python3.10/dist-packages (from gradio==3.50.2->-r requirements.txt (line 3)) (1.10.13)\n","Collecting pydub (from gradio==3.50.2->-r requirements.txt (line 3))\n"," Downloading pydub-0.25.1-py2.py3-none-any.whl (32 kB)\n","Collecting python-multipart (from gradio==3.50.2->-r requirements.txt (line 3))\n"," Downloading python_multipart-0.0.6-py3-none-any.whl (45 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m45.7/45.7 kB\u001b[0m \u001b[31m6.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hCollecting semantic-version~=2.0 (from gradio==3.50.2->-r requirements.txt (line 3))\n"," Downloading semantic_version-2.10.0-py2.py3-none-any.whl (15 kB)\n","Requirement already satisfied: typing-extensions~=4.0 in /usr/local/lib/python3.10/dist-packages (from gradio==3.50.2->-r requirements.txt (line 3)) (4.5.0)\n","Collecting uvicorn>=0.14.0 (from gradio==3.50.2->-r requirements.txt (line 3))\n"," Downloading uvicorn-0.25.0-py3-none-any.whl (60 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m60.3/60.3 kB\u001b[0m \u001b[31m8.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hCollecting websockets<12.0,>=10.0 (from gradio==3.50.2->-r requirements.txt (line 3))\n"," Downloading websockets-11.0.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (129 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m129.9/129.9 kB\u001b[0m \u001b[31m17.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: protobuf>=3.20.2 in /usr/local/lib/python3.10/dist-packages (from onnx==1.15.0->-r requirements.txt (line 5)) (3.20.3)\n","Collecting coloredlogs (from onnxruntime==1.16.3->-r requirements.txt (line 6))\n"," Downloading coloredlogs-15.0.1-py2.py3-none-any.whl (46 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m46.0/46.0 kB\u001b[0m \u001b[31m5.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: flatbuffers in /usr/local/lib/python3.10/dist-packages (from onnxruntime==1.16.3->-r requirements.txt (line 6)) (23.5.26)\n","Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from onnxruntime==1.16.3->-r requirements.txt (line 6)) (1.12)\n","Collecting facexlib>=0.2.5 (from realesrgan==0.3.0->-r requirements.txt (line 9))\n"," Downloading facexlib-0.3.0-py3-none-any.whl (59 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m59.6/59.6 kB\u001b[0m \u001b[31m8.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hCollecting gfpgan>=1.3.5 (from realesrgan==0.3.0->-r requirements.txt (line 9))\n"," Downloading gfpgan-1.3.8-py3-none-any.whl (52 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m52.2/52.2 kB\u001b[0m \u001b[31m7.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch==2.1.1->-r requirements.txt (line 10)) (3.13.1)\n","Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch==2.1.1->-r requirements.txt (line 10)) (3.2.1)\n","Requirement already satisfied: fsspec in /usr/local/lib/python3.10/dist-packages (from torch==2.1.1->-r requirements.txt (line 10)) (2023.6.0)\n","Requirement already satisfied: triton==2.1.0 in /usr/local/lib/python3.10/dist-packages (from torch==2.1.1->-r requirements.txt (line 10)) (2.1.0)\n","Requirement already satisfied: entrypoints in /usr/local/lib/python3.10/dist-packages (from altair<6.0,>=4.2.0->gradio==3.50.2->-r requirements.txt (line 3)) (0.4)\n","Requirement already satisfied: jsonschema>=3.0 in /usr/local/lib/python3.10/dist-packages (from altair<6.0,>=4.2.0->gradio==3.50.2->-r requirements.txt (line 3)) (4.19.2)\n","Requirement already satisfied: toolz in /usr/local/lib/python3.10/dist-packages (from altair<6.0,>=4.2.0->gradio==3.50.2->-r requirements.txt (line 3)) (0.12.0)\n","Collecting filterpy (from facexlib>=0.2.5->realesrgan==0.3.0->-r requirements.txt (line 9))\n"," Downloading filterpy-1.4.5.zip (177 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m178.0/178.0 kB\u001b[0m \u001b[31m22.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n","Requirement already satisfied: numba in /usr/local/lib/python3.10/dist-packages (from facexlib>=0.2.5->realesrgan==0.3.0->-r requirements.txt (line 9)) (0.58.1)\n","Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib~=3.0->gradio==3.50.2->-r requirements.txt (line 3)) (1.2.0)\n","Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.10/dist-packages (from matplotlib~=3.0->gradio==3.50.2->-r requirements.txt (line 3)) (0.12.1)\n","Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.10/dist-packages (from matplotlib~=3.0->gradio==3.50.2->-r requirements.txt (line 3)) (4.47.0)\n","Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib~=3.0->gradio==3.50.2->-r requirements.txt (line 3)) (1.4.5)\n","Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.10/dist-packages (from matplotlib~=3.0->gradio==3.50.2->-r requirements.txt (line 3)) (3.1.1)\n","Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.10/dist-packages (from matplotlib~=3.0->gradio==3.50.2->-r requirements.txt (line 3)) (2.8.2)\n","Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas<3.0,>=1.0->gradio==3.50.2->-r requirements.txt (line 3)) (2023.3.post1)\n","Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->basicsr==1.4.2->-r requirements.txt (line 1)) (3.3.2)\n","Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->basicsr==1.4.2->-r requirements.txt (line 1)) (3.6)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->basicsr==1.4.2->-r requirements.txt (line 1)) (2.0.7)\n","Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->basicsr==1.4.2->-r requirements.txt (line 1)) (2023.11.17)\n","Requirement already satisfied: click>=7.0 in /usr/local/lib/python3.10/dist-packages (from uvicorn>=0.14.0->gradio==3.50.2->-r requirements.txt (line 3)) (8.1.7)\n","Collecting h11>=0.8 (from uvicorn>=0.14.0->gradio==3.50.2->-r requirements.txt (line 3))\n"," Downloading h11-0.14.0-py3-none-any.whl (58 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m8.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hCollecting humanfriendly>=9.1 (from coloredlogs->onnxruntime==1.16.3->-r requirements.txt (line 6))\n"," Downloading humanfriendly-10.0-py2.py3-none-any.whl (86 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m86.8/86.8 kB\u001b[0m \u001b[31m10.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hCollecting starlette<0.36.0,>=0.35.0 (from fastapi->gradio==3.50.2->-r requirements.txt (line 3))\n"," Downloading starlette-0.35.1-py3-none-any.whl (71 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m71.1/71.1 kB\u001b[0m \u001b[31m9.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hCollecting typing-extensions~=4.0 (from gradio==3.50.2->-r requirements.txt (line 3))\n"," Downloading typing_extensions-4.9.0-py3-none-any.whl (32 kB)\n","Requirement already satisfied: anyio in /usr/local/lib/python3.10/dist-packages (from httpx->gradio==3.50.2->-r requirements.txt (line 3)) (3.7.1)\n","Collecting httpcore==1.* (from httpx->gradio==3.50.2->-r requirements.txt (line 3))\n"," Downloading httpcore-1.0.2-py3-none-any.whl (76 kB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m76.9/76.9 kB\u001b[0m \u001b[31m11.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: sniffio in /usr/local/lib/python3.10/dist-packages (from httpx->gradio==3.50.2->-r requirements.txt (line 3)) (1.3.0)\n","Requirement already satisfied: imageio>=2.4.1 in /usr/local/lib/python3.10/dist-packages (from scikit-image->basicsr==1.4.2->-r requirements.txt (line 1)) (2.31.6)\n","Requirement already satisfied: tifffile>=2019.7.26 in /usr/local/lib/python3.10/dist-packages (from scikit-image->basicsr==1.4.2->-r requirements.txt (line 1)) (2023.12.9)\n","Requirement already satisfied: PyWavelets>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from scikit-image->basicsr==1.4.2->-r requirements.txt (line 1)) (1.5.0)\n","Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->onnxruntime==1.16.3->-r requirements.txt (line 6)) (1.3.0)\n","Requirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.10/dist-packages (from tb-nightly->basicsr==1.4.2->-r requirements.txt (line 1)) (1.4.0)\n","Requirement already satisfied: grpcio>=1.48.2 in /usr/local/lib/python3.10/dist-packages (from tb-nightly->basicsr==1.4.2->-r requirements.txt (line 1)) (1.60.0)\n","Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.10/dist-packages (from tb-nightly->basicsr==1.4.2->-r requirements.txt (line 1)) (3.5.1)\n","Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.10/dist-packages (from tb-nightly->basicsr==1.4.2->-r requirements.txt (line 1)) (67.7.2)\n","Requirement already satisfied: six>1.9 in /usr/local/lib/python3.10/dist-packages (from tb-nightly->basicsr==1.4.2->-r requirements.txt (line 1)) (1.16.0)\n","Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from tb-nightly->basicsr==1.4.2->-r requirements.txt (line 1)) (0.7.2)\n","Collecting tf-keras-nightly (from tb-nightly->basicsr==1.4.2->-r requirements.txt (line 1))\n"," Downloading tf_keras_nightly-2.16.0.dev2024011210-py3-none-any.whl (1.7 MB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m87.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: werkzeug>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from tb-nightly->basicsr==1.4.2->-r requirements.txt (line 1)) (3.0.1)\n","INFO: pip is looking at multiple versions of torchvision to determine which version is compatible with other requirements. This could take a while.\n","Collecting torchvision (from basicsr==1.4.2->-r requirements.txt (line 1))\n"," Downloading https://download.pytorch.org/whl/cu121/torchvision-0.16.2%2Bcu121-cp310-cp310-linux_x86_64.whl (6.8 MB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.8/6.8 MB\u001b[0m \u001b[31m71.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h Downloading torchvision-0.16.2-cp310-cp310-manylinux1_x86_64.whl (6.8 MB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.8/6.8 MB\u001b[0m \u001b[31m79.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h Downloading https://download.pytorch.org/whl/cu121/torchvision-0.16.1%2Bcu121-cp310-cp310-linux_x86_64.whl (6.8 MB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.8/6.8 MB\u001b[0m \u001b[31m80.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: importlib-metadata>=6.6.0 in /usr/local/lib/python3.10/dist-packages (from yapf->basicsr==1.4.2->-r requirements.txt (line 1)) (7.0.1)\n","Requirement already satisfied: platformdirs>=3.5.1 in /usr/local/lib/python3.10/dist-packages (from yapf->basicsr==1.4.2->-r requirements.txt (line 1)) (4.1.0)\n","Requirement already satisfied: tomli>=2.0.1 in /usr/local/lib/python3.10/dist-packages (from yapf->basicsr==1.4.2->-r requirements.txt (line 1)) (2.0.1)\n","Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.10/dist-packages (from importlib-metadata>=6.6.0->yapf->basicsr==1.4.2->-r requirements.txt (line 1)) (3.17.0)\n","Requirement already satisfied: attrs>=22.2.0 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6.0,>=4.2.0->gradio==3.50.2->-r requirements.txt (line 3)) (23.2.0)\n","Requirement already satisfied: jsonschema-specifications>=2023.03.6 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6.0,>=4.2.0->gradio==3.50.2->-r requirements.txt (line 3)) (2023.12.1)\n","Requirement already satisfied: referencing>=0.28.4 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6.0,>=4.2.0->gradio==3.50.2->-r requirements.txt (line 3)) (0.32.1)\n","Requirement already satisfied: rpds-py>=0.7.1 in /usr/local/lib/python3.10/dist-packages (from jsonschema>=3.0->altair<6.0,>=4.2.0->gradio==3.50.2->-r requirements.txt (line 3)) (0.16.2)\n","Requirement already satisfied: exceptiongroup in /usr/local/lib/python3.10/dist-packages (from anyio->httpx->gradio==3.50.2->-r requirements.txt (line 3)) (1.2.0)\n","Requirement already satisfied: llvmlite<0.42,>=0.41.0dev0 in /usr/local/lib/python3.10/dist-packages (from numba->facexlib>=0.2.5->realesrgan==0.3.0->-r requirements.txt (line 9)) (0.41.1)\n","Building wheels for collected packages: basicsr, ffmpy, filterpy\n"," Building wheel for basicsr (setup.py) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for basicsr: filename=basicsr-1.4.2-py3-none-any.whl size=214817 sha256=15d5980fe1ec455325b835d2034d8190b5b7fdd8588804c26e1e633031f984b4\n"," Stored in directory: /root/.cache/pip/wheels/38/83/99/2d8437cc652a01af27df5ff037a4075e95b52d67705c5f30ca\n"," Building wheel for ffmpy (setup.py) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for ffmpy: filename=ffmpy-0.3.1-py3-none-any.whl size=5579 sha256=7cbb03178f8a1428b4aa57a5c99bbc956c2ca3ec2014189414eca0578d389ea5\n"," Stored in directory: /root/.cache/pip/wheels/01/a6/d1/1c0828c304a4283b2c1639a09ad86f83d7c487ef34c6b4a1bf\n"," Building wheel for filterpy (setup.py) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for filterpy: filename=filterpy-1.4.5-py3-none-any.whl size=110458 sha256=5a95e31be99475d4be4a1308d83ccf7f1c15b278b9c2dac623747e7a61bb9602\n"," Stored in directory: /root/.cache/pip/wheels/0f/0c/ea/218f266af4ad626897562199fbbcba521b8497303200186102\n","Successfully built basicsr ffmpy filterpy\n","Installing collected packages: pydub, lmdb, filetype, ffmpy, addict, websockets, typing-extensions, tf-keras-nightly, semantic-version, python-multipart, psutil, orjson, numpy, humanfriendly, h11, aiofiles, yapf, uvicorn, torch, tb-nightly, starlette, opencv-python, onnx, httpcore, coloredlogs, torchvision, onnxruntime, httpx, fastapi, gradio-client, filterpy, basicsr, gradio, facexlib, gfpgan, realesrgan\n"," Attempting uninstall: typing-extensions\n"," Found existing installation: typing_extensions 4.5.0\n"," Uninstalling typing_extensions-4.5.0:\n"," Successfully uninstalled typing_extensions-4.5.0\n"," Attempting uninstall: psutil\n"," Found existing installation: psutil 5.9.5\n"," Uninstalling psutil-5.9.5:\n"," Successfully uninstalled psutil-5.9.5\n"," Attempting uninstall: numpy\n"," Found existing installation: numpy 1.23.5\n"," Uninstalling numpy-1.23.5:\n"," Successfully uninstalled numpy-1.23.5\n"," Attempting uninstall: opencv-python\n"," Found existing installation: opencv-python 4.8.0.76\n"," Uninstalling opencv-python-4.8.0.76:\n"," Successfully uninstalled opencv-python-4.8.0.76\n"," Attempting uninstall: torchvision\n"," Found existing installation: torchvision 0.16.0+cu121\n"," Uninstalling torchvision-0.16.0+cu121:\n"," Successfully uninstalled torchvision-0.16.0+cu121\n","\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n","lida 0.0.10 requires kaleido, which is not installed.\n","tensorflow-probability 0.22.0 requires typing-extensions<4.6.0, but you have typing-extensions 4.9.0 which is incompatible.\n","torchaudio 2.1.0+cu121 requires torch==2.1.0, but you have torch 2.1.1+cu121 which is incompatible.\n","torchdata 0.7.0 requires torch==2.1.0, but you have torch 2.1.1+cu121 which is incompatible.\n","torchtext 0.16.0 requires torch==2.1.0, but you have torch 2.1.1+cu121 which is incompatible.\u001b[0m\u001b[31m\n","\u001b[0mSuccessfully installed addict-2.4.0 aiofiles-23.2.1 basicsr-1.4.2 coloredlogs-15.0.1 facexlib-0.3.0 fastapi-0.109.0 ffmpy-0.3.1 filetype-1.2.0 filterpy-1.4.5 gfpgan-1.3.8 gradio-3.50.2 gradio-client-0.6.1 h11-0.14.0 httpcore-1.0.2 httpx-0.26.0 humanfriendly-10.0 lmdb-1.4.1 numpy-1.26.2 onnx-1.15.0 onnxruntime-1.16.3 opencv-python-4.8.1.78 orjson-3.9.10 psutil-5.9.6 pydub-0.25.1 python-multipart-0.0.6 realesrgan-0.3.0 semantic-version-2.10.0 starlette-0.35.1 tb-nightly-2.16.0a20240112 tf-keras-nightly-2.16.0.dev2024011210 torch-2.1.1+cu121 torchvision-0.16.1+cu121 typing-extensions-4.9.0 uvicorn-0.25.0 websockets-11.0.3 yapf-0.40.2\n","\u001b[33mWARNING: Skipping ort-nightly-gpu as it is not installed.\u001b[0m\u001b[33m\n","\u001b[0mLooking in indexes: https://pypi.org/simple, https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/ort-cuda-12-nightly/pypi/simple\n","Collecting ort-nightly-gpu==1.17.0.dev20231205004\n"," Downloading https://aiinfra.pkgs.visualstudio.com/2692857e-05ef-43b4-ba9c-ccf1c22c437c/_packaging/d3daa2b0-aa56-45ac-8145-2c3dc0661c87/pypi/download/ort-nightly-gpu/1.17.dev20231205004/ort_nightly_gpu-1.17.0.dev20231205004-cp310-cp310-manylinux_2_28_x86_64.whl (168.1 MB)\n","\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m168.1/168.1 MB\u001b[0m \u001b[31m2.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hRequirement already satisfied: coloredlogs in /usr/local/lib/python3.10/dist-packages (from ort-nightly-gpu==1.17.0.dev20231205004) (15.0.1)\n","Requirement already satisfied: flatbuffers in /usr/local/lib/python3.10/dist-packages (from ort-nightly-gpu==1.17.0.dev20231205004) (23.5.26)\n","Requirement already satisfied: numpy>=1.21.6 in /usr/local/lib/python3.10/dist-packages (from ort-nightly-gpu==1.17.0.dev20231205004) (1.26.2)\n","Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from ort-nightly-gpu==1.17.0.dev20231205004) (23.2)\n","Requirement already satisfied: protobuf in /usr/local/lib/python3.10/dist-packages (from ort-nightly-gpu==1.17.0.dev20231205004) (3.20.3)\n","Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from ort-nightly-gpu==1.17.0.dev20231205004) (1.12)\n","Requirement already satisfied: humanfriendly>=9.1 in /usr/local/lib/python3.10/dist-packages (from coloredlogs->ort-nightly-gpu==1.17.0.dev20231205004) (10.0)\n","Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->ort-nightly-gpu==1.17.0.dev20231205004) (1.3.0)\n","Installing collected packages: ort-nightly-gpu\n","Successfully installed ort-nightly-gpu-1.17.0.dev20231205004\n"]}],"source":["!git clone https://github.com/revolverocelot1/face_fusion-unlocked\n","%cd '/content/face_fusion-unlocked'\n","!python install.py --torch cuda-nightly --onnxruntime cuda-nightly --skip-venv"]},{"cell_type":"markdown","metadata":{"id":"J6HT5NpVcZOC"},"source":["Setup"]},{"cell_type":"code","execution_count":2,"metadata":{"id":"YVHiNI-bb6IB","executionInfo":{"status":"ok","timestamp":1705140626589,"user_tz":-330,"elapsed":2262,"user":{"displayName":"","userId":""}},"outputId":"103e92c4-8be2-4b29-9be3-5fbc409edb50","colab":{"base_uri":"https://localhost:8080/"}},"outputs":[{"output_type":"stream","name":"stdout","text":["# remote.moe:22 SSH-2.0-Go\n"]}],"source":["!ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa <<<y\n","!ssh-keyscan -t rsa remote.moe >> ~/.ssh/known_hosts"]},{"cell_type":"markdown","metadata":{"id":"vsfWtUCSGrrl"},"source":["Run"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"YVHiNI-bb6IA","outputId":"e08f355a-6ad4-49ae-d6d4-54750a5b853d","colab":{"base_uri":"https://localhost:8080/"}},"outputs":[{"output_type":"stream","name":"stdout","text":["/content/face_fusion-unlocked\n","\u001b[1mhttp\u001b[0m (80)\n","http://u63wrnupb33bvcdxfhtwzikq67plajwhhrkrpl3jvb6r4dl67tzq.remote.moe/\n","\n","$\n"," \n","Downloading: 100% 22.5M/22.5M [00:00<00:00, 25.2MB/s]\n","Downloading: 100% 16.1M/16.1M [00:00<00:00, 29.0MB/s]\n","Downloading: 100% 227k/227k [00:00<00:00, 505kB/s]\n","Downloading: 100% 166M/166M [00:01<00:00, 140MB/s]\n","Downloading: 100% 200M/200M [00:01<00:00, 140MB/s]\n","Downloading: 100% 1.26M/1.26M [00:00<00:00, 2.67MB/s]\n","Downloading: 100% 67.1M/67.1M [00:00<00:00, 96.4MB/s]\n","Downloading: 100% 50.7M/50.7M [00:00<00:00, 67.9MB/s]\n","Downloading: 100% 529M/529M [00:02<00:00, 191MB/s]\n","Running on local URL: http://127.0.0.1:7860\n","\n","To create a public link, set `share=True` in `launch()`.\n","[FACEFUSION.PROCESSORS.FRAME.MODULES.FACE_SWAPPER] Select an image for source path!\n","Analysing: 100% 1135/1135 [00:09<00:00, 119.34frame/s, rate=0]\n","[FACEFUSION.CORE] Creating temporary resources\n","[FACEFUSION.CORE] Extracting frames with 25.0 FPS\n","[FACEFUSION.PROCESSORS.FRAME.MODULES.FACE_SWAPPER] Processing\n","Processing: 100% 1135/1135 [01:29<00:00, 12.73frame/s, execution_providers=['cuda', 'cpu'], execution_thread_count=4, execution_queue_count=1]\n","[FACEFUSION.PROCESSORS.FRAME.MODULES.FACE_DEBUGGER] Processing\n","Processing: 100% 1135/1135 [00:43<00:00, 26.04frame/s, execution_providers=['cuda', 'cpu'], execution_thread_count=4, execution_queue_count=1]\n","[FACEFUSION.CORE] Merging video with 25.0 FPS\n","[FACEFUSION.CORE] Restoring audio\n","[FACEFUSION.CORE] Clearing temporary resources\n","[FACEFUSION.CORE] Processing to video succeed\n","[FACEFUSION.CORE] Creating temporary resources\n","[FACEFUSION.CORE] Extracting frames with 25.0 FPS\n","[FACEFUSION.PROCESSORS.FRAME.MODULES.FACE_SWAPPER] Processing\n","Processing: 33% 371/1135 [00:29<01:12, 10.56frame/s, execution_providers=['cuda', 'cpu'], execution_thread_count=4, execution_queue_count=1]"]}],"source":["%cd '/content/face_fusion-unlocked'\n","!python run.py --execution-providers cuda & ssh -R 80:localhost:7860 remote.moe"]}],"metadata":{"accelerator":"GPU","colab":{"provenance":[{"file_id":"https://github.com/facefusion/facefusion-colab/blob/master/facefusion.ipynb","timestamp":1705141499581},{"file_id":"17FwlS26zvLhXtiEGvdC7EiC2lUToBJ5v","timestamp":1694089316266}]},"kernelspec":{"display_name":"Python","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}
facefusion/choices.py CHANGED
@@ -1,9 +1,7 @@
1
  from typing import List
2
 
3
- import numpy
4
-
5
- from facefusion.typing import FaceSelectorMode, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat, OutputVideoEncoder
6
-
7
 
8
  face_analyser_orders : List[FaceAnalyserOrder] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best' ]
9
  face_analyser_ages : List[FaceAnalyserAge] = [ 'child', 'teen', 'adult', 'senior' ]
@@ -11,16 +9,18 @@ face_analyser_genders : List[FaceAnalyserGender] = [ 'male', 'female' ]
11
  face_detector_models : List[str] = [ 'retinaface', 'yunet' ]
12
  face_detector_sizes : List[str] = [ '160x160', '320x320', '480x480', '512x512', '640x640', '768x768', '960x960', '1024x1024' ]
13
  face_selector_modes : List[FaceSelectorMode] = [ 'reference', 'one', 'many' ]
 
 
14
  temp_frame_formats : List[TempFrameFormat] = [ 'jpg', 'png' ]
15
  output_video_encoders : List[OutputVideoEncoder] = [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ]
16
 
17
- execution_thread_count_range : List[int] = numpy.arange(1, 129, 1).tolist()
18
- execution_queue_count_range : List[int] = numpy.arange(1, 33, 1).tolist()
19
- max_memory_range : List[int] = numpy.arange(0, 129, 1).tolist()
20
- face_detector_score_range : List[float] = numpy.arange(0.0, 1.05, 0.05).tolist()
21
- face_mask_blur_range : List[float] = numpy.arange(0.0, 1.05, 0.05).tolist()
22
- face_mask_padding_range : List[float] = numpy.arange(0, 101, 1).tolist()
23
- reference_face_distance_range : List[float] = numpy.arange(0.0, 1.55, 0.05).tolist()
24
- temp_frame_quality_range : List[int] = numpy.arange(0, 101, 1).tolist()
25
- output_image_quality_range : List[int] = numpy.arange(0, 101, 1).tolist()
26
- output_video_quality_range : List[int] = numpy.arange(0, 101, 1).tolist()
 
1
  from typing import List
2
 
3
+ from facefusion.typing import FaceSelectorMode, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, FaceMaskType, FaceMaskRegion, TempFrameFormat, OutputVideoEncoder
4
+ from facefusion.common_helper import create_range
 
 
5
 
6
  face_analyser_orders : List[FaceAnalyserOrder] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best' ]
7
  face_analyser_ages : List[FaceAnalyserAge] = [ 'child', 'teen', 'adult', 'senior' ]
 
9
  face_detector_models : List[str] = [ 'retinaface', 'yunet' ]
10
  face_detector_sizes : List[str] = [ '160x160', '320x320', '480x480', '512x512', '640x640', '768x768', '960x960', '1024x1024' ]
11
  face_selector_modes : List[FaceSelectorMode] = [ 'reference', 'one', 'many' ]
12
+ face_mask_types : List[FaceMaskType] = [ 'box', 'occlusion', 'region' ]
13
+ face_mask_regions : List[FaceMaskRegion] = [ 'skin', 'left-eyebrow', 'right-eyebrow', 'left-eye', 'right-eye', 'eye-glasses', 'nose', 'mouth', 'upper-lip', 'lower-lip' ]
14
  temp_frame_formats : List[TempFrameFormat] = [ 'jpg', 'png' ]
15
  output_video_encoders : List[OutputVideoEncoder] = [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ]
16
 
17
+ execution_thread_count_range : List[float] = create_range(1, 128, 1)
18
+ execution_queue_count_range : List[float] = create_range(1, 32, 1)
19
+ max_memory_range : List[float] = create_range(0, 128, 1)
20
+ face_detector_score_range : List[float] = create_range(0.0, 1.0, 0.05)
21
+ face_mask_blur_range : List[float] = create_range(0.0, 1.0, 0.05)
22
+ face_mask_padding_range : List[float] = create_range(0, 100, 1)
23
+ reference_face_distance_range : List[float] = create_range(0.0, 1.5, 0.05)
24
+ temp_frame_quality_range : List[float] = create_range(0, 100, 1)
25
+ output_image_quality_range : List[float] = create_range(0, 100, 1)
26
+ output_video_quality_range : List[float] = create_range(0, 100, 1)
facefusion/cli_helper.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from typing import List, Any
2
+
3
+
4
+ def create_metavar(ranges : List[Any]) -> str:
5
+ return '[' + str(ranges[0]) + '-' + str(ranges[-1]) + ']'
facefusion/common_helper.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Any
2
+ import numpy
3
+
4
+
5
+ def create_metavar(ranges : List[Any]) -> str:
6
+ return '[' + str(ranges[0]) + '-' + str(ranges[-1]) + ']'
7
+
8
+
9
+ def create_range(start : float, stop : float, step : float) -> List[float]:
10
+ return (numpy.around(numpy.arange(start, stop + step, step), decimals = 2)).tolist()
facefusion/content_analyser.py CHANGED
@@ -10,7 +10,8 @@ import facefusion.globals
10
  from facefusion import wording
11
  from facefusion.typing import Frame, ModelValue
12
  from facefusion.vision import get_video_frame, count_video_frame_total, read_image, detect_fps
13
- from facefusion.utilities import resolve_relative_path, conditional_download
 
14
 
15
  CONTENT_ANALYSER = None
16
  THREAD_LOCK : threading.Lock = threading.Lock()
@@ -68,13 +69,7 @@ def prepare_frame(frame : Frame) -> Frame:
68
 
69
 
70
  def analyse_frame(frame : Frame) -> bool:
71
- content_analyser = get_content_analyser()
72
- frame = prepare_frame(frame)
73
- probability = content_analyser.run(None,
74
- {
75
- 'input:0': frame
76
- })[0][0][1]
77
- return probability > MAX_PROBABILITY
78
 
79
 
80
  @lru_cache(maxsize = None)
@@ -90,7 +85,7 @@ def analyse_video(video_path : str, start_frame : int, end_frame : int) -> bool:
90
  frame_range = range(start_frame or 0, end_frame or video_frame_total)
91
  rate = 0.0
92
  counter = 0
93
- with tqdm(total = len(frame_range), desc = wording.get('analysing'), unit = 'frame', ascii = ' =') as progress:
94
  for frame_number in frame_range:
95
  if frame_number % int(fps) == 0:
96
  frame = get_video_frame(video_path, frame_number)
 
10
  from facefusion import wording
11
  from facefusion.typing import Frame, ModelValue
12
  from facefusion.vision import get_video_frame, count_video_frame_total, read_image, detect_fps
13
+ from facefusion.filesystem import resolve_relative_path
14
+ from facefusion.download import conditional_download
15
 
16
  CONTENT_ANALYSER = None
17
  THREAD_LOCK : threading.Lock = threading.Lock()
 
69
 
70
 
71
  def analyse_frame(frame : Frame) -> bool:
72
+ return False
 
 
 
 
 
 
73
 
74
 
75
  @lru_cache(maxsize = None)
 
85
  frame_range = range(start_frame or 0, end_frame or video_frame_total)
86
  rate = 0.0
87
  counter = 0
88
+ with tqdm(total = len(frame_range), desc = wording.get('analysing'), unit = 'frame', ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress:
89
  for frame_number in frame_range:
90
  if frame_number % int(fps) == 0:
91
  frame = get_video_frame(video_path, frame_number)
facefusion/core.py CHANGED
@@ -3,6 +3,7 @@ import os
3
  os.environ['OMP_NUM_THREADS'] = '1'
4
 
5
  import signal
 
6
  import sys
7
  import warnings
8
  import platform
@@ -12,92 +13,104 @@ from argparse import ArgumentParser, HelpFormatter
12
 
13
  import facefusion.choices
14
  import facefusion.globals
15
- from facefusion.face_analyser import get_one_face
16
- from facefusion.face_reference import get_face_reference, set_face_reference
17
- from facefusion.vision import get_video_frame, read_image
18
- from facefusion import face_analyser, content_analyser, metadata, wording
19
  from facefusion.content_analyser import analyse_image, analyse_video
20
  from facefusion.processors.frame.core import get_frame_processors_modules, load_frame_processor_module
21
- from facefusion.utilities import is_image, is_video, detect_fps, compress_image, merge_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clear_temp, list_module_names, encode_execution_providers, decode_execution_providers, normalize_output_path, normalize_padding, create_metavar, update_status
 
 
 
 
22
 
23
  onnxruntime.set_default_logger_severity(3)
24
  warnings.filterwarnings('ignore', category = UserWarning, module = 'gradio')
25
  warnings.filterwarnings('ignore', category = UserWarning, module = 'torchvision')
26
 
 
 
 
27
 
28
  def cli() -> None:
29
  signal.signal(signal.SIGINT, lambda signal_number, frame: destroy())
30
  program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120), add_help = False)
31
  # general
32
- program.add_argument('-s', '--source', help = wording.get('source_help'), dest = 'source_path')
33
  program.add_argument('-t', '--target', help = wording.get('target_help'), dest = 'target_path')
34
  program.add_argument('-o', '--output', help = wording.get('output_help'), dest = 'output_path')
35
  program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
36
  # misc
37
  group_misc = program.add_argument_group('misc')
38
- group_misc.add_argument('--skip-download', help = wording.get('skip_download_help'), dest = 'skip_download', action = 'store_true')
39
- group_misc.add_argument('--headless', help = wording.get('headless_help'), dest = 'headless', action = 'store_true')
 
40
  # execution
 
41
  group_execution = program.add_argument_group('execution')
42
- group_execution.add_argument('--execution-providers', help = wording.get('execution_providers_help'), dest = 'execution_providers', default = [ 'cpu' ], choices = encode_execution_providers(onnxruntime.get_available_providers()), nargs = '+')
43
- group_execution.add_argument('--execution-thread-count', help = wording.get('execution_thread_count_help'), dest = 'execution_thread_count', type = int, default = 4, choices = facefusion.choices.execution_thread_count_range, metavar = create_metavar(facefusion.choices.execution_thread_count_range))
44
- group_execution.add_argument('--execution-queue-count', help = wording.get('execution_queue_count_help'), dest = 'execution_queue_count', type = int, default = 1, choices = facefusion.choices.execution_queue_count_range, metavar = create_metavar(facefusion.choices.execution_queue_count_range))
45
- group_execution.add_argument('--max-memory', help = wording.get('max_memory_help'), dest = 'max_memory', type = int, choices = facefusion.choices.max_memory_range, metavar = create_metavar(facefusion.choices.max_memory_range))
46
  # face analyser
47
  group_face_analyser = program.add_argument_group('face analyser')
48
- group_face_analyser.add_argument('--face-analyser-order', help = wording.get('face_analyser_order_help'), dest = 'face_analyser_order', default = 'left-right', choices = facefusion.choices.face_analyser_orders)
49
- group_face_analyser.add_argument('--face-analyser-age', help = wording.get('face_analyser_age_help'), dest = 'face_analyser_age', choices = facefusion.choices.face_analyser_ages)
50
- group_face_analyser.add_argument('--face-analyser-gender', help = wording.get('face_analyser_gender_help'), dest = 'face_analyser_gender', choices = facefusion.choices.face_analyser_genders)
51
- group_face_analyser.add_argument('--face-detector-model', help = wording.get('face_detector_model_help'), dest = 'face_detector_model', default = 'retinaface', choices = facefusion.choices.face_detector_models)
52
- group_face_analyser.add_argument('--face-detector-size', help = wording.get('face_detector_size_help'), dest = 'face_detector_size', default = '640x640', choices = facefusion.choices.face_detector_sizes)
53
- group_face_analyser.add_argument('--face-detector-score', help = wording.get('face_detector_score_help'), dest = 'face_detector_score', type = float, default = 0.5, choices = facefusion.choices.face_detector_score_range, metavar = create_metavar(facefusion.choices.face_detector_score_range))
54
  # face selector
55
  group_face_selector = program.add_argument_group('face selector')
56
- group_face_selector.add_argument('--face-selector-mode', help = wording.get('face_selector_mode_help'), dest = 'face_selector_mode', default = 'reference', choices = facefusion.choices.face_selector_modes)
57
- group_face_selector.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), dest = 'reference_face_position', type = int, default = 0)
58
- group_face_selector.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), dest = 'reference_face_distance', type = float, default = 0.6, choices = facefusion.choices.reference_face_distance_range, metavar = create_metavar(facefusion.choices.reference_face_distance_range))
59
- group_face_selector.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), dest = 'reference_frame_number', type = int, default = 0)
60
  # face mask
61
  group_face_mask = program.add_argument_group('face mask')
62
- group_face_mask.add_argument('--face-mask-blur', help = wording.get('face_mask_blur_help'), dest = 'face_mask_blur', type = float, default = 0.3, choices = facefusion.choices.face_mask_blur_range, metavar = create_metavar(facefusion.choices.face_mask_blur_range))
63
- group_face_mask.add_argument('--face-mask-padding', help = wording.get('face_mask_padding_help'), dest = 'face_mask_padding', type = int, default = [ 0, 0, 0, 0 ], nargs = '+')
 
 
64
  # frame extraction
65
  group_frame_extraction = program.add_argument_group('frame extraction')
66
- group_frame_extraction.add_argument('--trim-frame-start', help = wording.get('trim_frame_start_help'), dest = 'trim_frame_start', type = int)
67
- group_frame_extraction.add_argument('--trim-frame-end', help = wording.get('trim_frame_end_help'), dest = 'trim_frame_end', type = int)
68
- group_frame_extraction.add_argument('--temp-frame-format', help = wording.get('temp_frame_format_help'), dest = 'temp_frame_format', default = 'jpg', choices = facefusion.choices.temp_frame_formats)
69
- group_frame_extraction.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), dest = 'temp_frame_quality', type = int, default = 100, choices = facefusion.choices.temp_frame_quality_range, metavar = create_metavar(facefusion.choices.temp_frame_quality_range))
70
- group_frame_extraction.add_argument('--keep-temp', help = wording.get('keep_temp_help'), dest = 'keep_temp', action = 'store_true')
71
  # output creation
72
  group_output_creation = program.add_argument_group('output creation')
73
- group_output_creation.add_argument('--output-image-quality', help = wording.get('output_image_quality_help'), dest = 'output_image_quality', type = int, default = 80, choices = facefusion.choices.output_image_quality_range, metavar = create_metavar(facefusion.choices.output_image_quality_range))
74
- group_output_creation.add_argument('--output-video-encoder', help = wording.get('output_video_encoder_help'), dest = 'output_video_encoder', default = 'libx264', choices = facefusion.choices.output_video_encoders)
75
- group_output_creation.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), dest = 'output_video_quality', type = int, default = 80, choices = facefusion.choices.output_video_quality_range, metavar = create_metavar(facefusion.choices.output_video_quality_range))
76
- group_output_creation.add_argument('--keep-fps', help = wording.get('keep_fps_help'), dest = 'keep_fps', action = 'store_true')
77
- group_output_creation.add_argument('--skip-audio', help = wording.get('skip_audio_help'), dest = 'skip_audio', action = 'store_true')
78
  # frame processors
79
  available_frame_processors = list_module_names('facefusion/processors/frame/modules')
80
  program = ArgumentParser(parents = [ program ], formatter_class = program.formatter_class, add_help = True)
81
  group_frame_processors = program.add_argument_group('frame processors')
82
- group_frame_processors.add_argument('--frame-processors', help = wording.get('frame_processors_help').format(choices = ', '.join(available_frame_processors)), dest = 'frame_processors', default = [ 'face_swapper' ], nargs = '+')
83
  for frame_processor in available_frame_processors:
84
  frame_processor_module = load_frame_processor_module(frame_processor)
85
  frame_processor_module.register_args(group_frame_processors)
86
  # uis
87
  group_uis = program.add_argument_group('uis')
88
- group_uis.add_argument('--ui-layouts', help = wording.get('ui_layouts_help').format(choices = ', '.join(list_module_names('facefusion/uis/layouts'))), dest = 'ui_layouts', default = [ 'default' ], nargs = '+')
89
  run(program)
90
 
91
 
92
  def apply_args(program : ArgumentParser) -> None:
93
  args = program.parse_args()
94
  # general
95
- facefusion.globals.source_path = args.source_path
96
  facefusion.globals.target_path = args.target_path
97
- facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_path, facefusion.globals.target_path, args.output_path)
98
  # misc
99
  facefusion.globals.skip_download = args.skip_download
100
  facefusion.globals.headless = args.headless
 
101
  # execution
102
  facefusion.globals.execution_providers = decode_execution_providers(args.execution_providers)
103
  facefusion.globals.execution_thread_count = args.execution_thread_count
@@ -116,8 +129,10 @@ def apply_args(program : ArgumentParser) -> None:
116
  facefusion.globals.reference_face_distance = args.reference_face_distance
117
  facefusion.globals.reference_frame_number = args.reference_frame_number
118
  # face mask
 
119
  facefusion.globals.face_mask_blur = args.face_mask_blur
120
  facefusion.globals.face_mask_padding = normalize_padding(args.face_mask_padding)
 
121
  # frame extraction
122
  facefusion.globals.trim_frame_start = args.trim_frame_start
123
  facefusion.globals.trim_frame_end = args.trim_frame_end
@@ -142,8 +157,9 @@ def apply_args(program : ArgumentParser) -> None:
142
 
143
  def run(program : ArgumentParser) -> None:
144
  apply_args(program)
 
145
  limit_resources()
146
- if not pre_check() or not content_analyser.pre_check() or not face_analyser.pre_check():
147
  return
148
  for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
149
  if not frame_processor_module.pre_check():
@@ -172,25 +188,27 @@ def limit_resources() -> None:
172
  memory = facefusion.globals.max_memory * 1024 ** 6
173
  if platform.system().lower() == 'windows':
174
  import ctypes
 
175
  kernel32 = ctypes.windll.kernel32 # type: ignore[attr-defined]
176
  kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory))
177
  else:
178
  import resource
 
179
  resource.setrlimit(resource.RLIMIT_DATA, (memory, memory))
180
 
181
 
182
  def pre_check() -> bool:
183
  if sys.version_info < (3, 9):
184
- update_status(wording.get('python_not_supported').format(version = '3.9'))
185
  return False
186
  if not shutil.which('ffmpeg'):
187
- update_status(wording.get('ffmpeg_not_installed'))
188
  return False
189
  return True
190
 
191
 
192
  def conditional_process() -> None:
193
- conditional_set_face_reference()
194
  for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
195
  if not frame_processor_module.pre_process('output'):
196
  return
@@ -200,14 +218,21 @@ def conditional_process() -> None:
200
  process_video()
201
 
202
 
203
- def conditional_set_face_reference() -> None:
204
- if 'reference' in facefusion.globals.face_selector_mode and not get_face_reference():
 
 
205
  if is_video(facefusion.globals.target_path):
206
  reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
207
  else:
208
  reference_frame = read_image(facefusion.globals.target_path)
209
  reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position)
210
- set_face_reference(reference_face)
 
 
 
 
 
211
 
212
 
213
  def process_image() -> None:
@@ -216,18 +241,18 @@ def process_image() -> None:
216
  shutil.copy2(facefusion.globals.target_path, facefusion.globals.output_path)
217
  # process frame
218
  for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
219
- update_status(wording.get('processing'), frame_processor_module.NAME)
220
- frame_processor_module.process_image(facefusion.globals.source_path, facefusion.globals.output_path, facefusion.globals.output_path)
221
  frame_processor_module.post_process()
222
  # compress image
223
- update_status(wording.get('compressing_image'))
224
  if not compress_image(facefusion.globals.output_path):
225
- update_status(wording.get('compressing_image_failed'))
226
  # validate image
227
  if is_image(facefusion.globals.output_path):
228
- update_status(wording.get('processing_image_succeed'))
229
  else:
230
- update_status(wording.get('processing_image_failed'))
231
 
232
 
233
  def process_video() -> None:
@@ -235,40 +260,40 @@ def process_video() -> None:
235
  return
236
  fps = detect_fps(facefusion.globals.target_path) if facefusion.globals.keep_fps else 25.0
237
  # create temp
238
- update_status(wording.get('creating_temp'))
239
  create_temp(facefusion.globals.target_path)
240
  # extract frames
241
- update_status(wording.get('extracting_frames_fps').format(fps = fps))
242
  extract_frames(facefusion.globals.target_path, fps)
243
  # process frame
244
  temp_frame_paths = get_temp_frame_paths(facefusion.globals.target_path)
245
  if temp_frame_paths:
246
  for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
247
- update_status(wording.get('processing'), frame_processor_module.NAME)
248
- frame_processor_module.process_video(facefusion.globals.source_path, temp_frame_paths)
249
  frame_processor_module.post_process()
250
  else:
251
- update_status(wording.get('temp_frames_not_found'))
252
  return
253
  # merge video
254
- update_status(wording.get('merging_video_fps').format(fps = fps))
255
  if not merge_video(facefusion.globals.target_path, fps):
256
- update_status(wording.get('merging_video_failed'))
257
  return
258
  # handle audio
259
  if facefusion.globals.skip_audio:
260
- update_status(wording.get('skipping_audio'))
261
  move_temp(facefusion.globals.target_path, facefusion.globals.output_path)
262
  else:
263
- update_status(wording.get('restoring_audio'))
264
  if not restore_audio(facefusion.globals.target_path, facefusion.globals.output_path):
265
- update_status(wording.get('restoring_audio_failed'))
266
  move_temp(facefusion.globals.target_path, facefusion.globals.output_path)
267
  # clear temp
268
- update_status(wording.get('clearing_temp'))
269
  clear_temp(facefusion.globals.target_path)
270
  # validate video
271
  if is_video(facefusion.globals.output_path):
272
- update_status(wording.get('processing_video_succeed'))
273
  else:
274
- update_status(wording.get('processing_video_failed'))
 
3
  os.environ['OMP_NUM_THREADS'] = '1'
4
 
5
  import signal
6
+ import ssl
7
  import sys
8
  import warnings
9
  import platform
 
13
 
14
  import facefusion.choices
15
  import facefusion.globals
16
+ from facefusion.face_analyser import get_one_face, get_average_face
17
+ from facefusion.face_store import get_reference_faces, append_reference_face
18
+ from facefusion.vision import get_video_frame, detect_fps, read_image, read_static_images
19
+ from facefusion import face_analyser, face_masker, content_analyser, metadata, logger, wording
20
  from facefusion.content_analyser import analyse_image, analyse_video
21
  from facefusion.processors.frame.core import get_frame_processors_modules, load_frame_processor_module
22
+ from facefusion.common_helper import create_metavar
23
+ from facefusion.execution_helper import encode_execution_providers, decode_execution_providers
24
+ from facefusion.normalizer import normalize_output_path, normalize_padding
25
+ from facefusion.filesystem import is_image, is_video, list_module_names, get_temp_frame_paths, create_temp, move_temp, clear_temp
26
+ from facefusion.ffmpeg import extract_frames, compress_image, merge_video, restore_audio
27
 
28
  onnxruntime.set_default_logger_severity(3)
29
  warnings.filterwarnings('ignore', category = UserWarning, module = 'gradio')
30
  warnings.filterwarnings('ignore', category = UserWarning, module = 'torchvision')
31
 
32
+ if platform.system().lower() == 'darwin':
33
+ ssl._create_default_https_context = ssl._create_unverified_context
34
+
35
 
36
  def cli() -> None:
37
  signal.signal(signal.SIGINT, lambda signal_number, frame: destroy())
38
  program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120), add_help = False)
39
  # general
40
+ program.add_argument('-s', '--source', action = 'append', help = wording.get('source_help'), dest = 'source_paths')
41
  program.add_argument('-t', '--target', help = wording.get('target_help'), dest = 'target_path')
42
  program.add_argument('-o', '--output', help = wording.get('output_help'), dest = 'output_path')
43
  program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
44
  # misc
45
  group_misc = program.add_argument_group('misc')
46
+ group_misc.add_argument('--skip-download', help = wording.get('skip_download_help'), action = 'store_true')
47
+ group_misc.add_argument('--headless', help = wording.get('headless_help'), action = 'store_true')
48
+ group_misc.add_argument('--log-level', help = wording.get('log_level_help'), default = 'info', choices = logger.get_log_levels())
49
  # execution
50
+ execution_providers = encode_execution_providers(onnxruntime.get_available_providers())
51
  group_execution = program.add_argument_group('execution')
52
+ group_execution.add_argument('--execution-providers', help = wording.get('execution_providers_help').format(choices = ', '.join(execution_providers)), default = [ 'cpu' ], choices = execution_providers, nargs = '+', metavar = 'EXECUTION_PROVIDERS')
53
+ group_execution.add_argument('--execution-thread-count', help = wording.get('execution_thread_count_help'), type = int, default = 4, choices = facefusion.choices.execution_thread_count_range, metavar = create_metavar(facefusion.choices.execution_thread_count_range))
54
+ group_execution.add_argument('--execution-queue-count', help = wording.get('execution_queue_count_help'), type = int, default = 1, choices = facefusion.choices.execution_queue_count_range, metavar = create_metavar(facefusion.choices.execution_queue_count_range))
55
+ group_execution.add_argument('--max-memory', help = wording.get('max_memory_help'), type = int, choices = facefusion.choices.max_memory_range, metavar = create_metavar(facefusion.choices.max_memory_range))
56
  # face analyser
57
  group_face_analyser = program.add_argument_group('face analyser')
58
+ group_face_analyser.add_argument('--face-analyser-order', help = wording.get('face_analyser_order_help'), default = 'left-right', choices = facefusion.choices.face_analyser_orders)
59
+ group_face_analyser.add_argument('--face-analyser-age', help = wording.get('face_analyser_age_help'), choices = facefusion.choices.face_analyser_ages)
60
+ group_face_analyser.add_argument('--face-analyser-gender', help = wording.get('face_analyser_gender_help'), choices = facefusion.choices.face_analyser_genders)
61
+ group_face_analyser.add_argument('--face-detector-model', help = wording.get('face_detector_model_help'), default = 'retinaface', choices = facefusion.choices.face_detector_models)
62
+ group_face_analyser.add_argument('--face-detector-size', help = wording.get('face_detector_size_help'), default = '640x640', choices = facefusion.choices.face_detector_sizes)
63
+ group_face_analyser.add_argument('--face-detector-score', help = wording.get('face_detector_score_help'), type = float, default = 0.5, choices = facefusion.choices.face_detector_score_range, metavar = create_metavar(facefusion.choices.face_detector_score_range))
64
  # face selector
65
  group_face_selector = program.add_argument_group('face selector')
66
+ group_face_selector.add_argument('--face-selector-mode', help = wording.get('face_selector_mode_help'), default = 'reference', choices = facefusion.choices.face_selector_modes)
67
+ group_face_selector.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), type = int, default = 0)
68
+ group_face_selector.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), type = float, default = 0.6, choices = facefusion.choices.reference_face_distance_range, metavar = create_metavar(facefusion.choices.reference_face_distance_range))
69
+ group_face_selector.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), type = int, default = 0)
70
  # face mask
71
  group_face_mask = program.add_argument_group('face mask')
72
+ group_face_mask.add_argument('--face-mask-types', help = wording.get('face_mask_types_help').format(choices = ', '.join(facefusion.choices.face_mask_types)), default = [ 'box' ], choices = facefusion.choices.face_mask_types, nargs = '+', metavar = 'FACE_MASK_TYPES')
73
+ group_face_mask.add_argument('--face-mask-blur', help = wording.get('face_mask_blur_help'), type = float, default = 0.3, choices = facefusion.choices.face_mask_blur_range, metavar = create_metavar(facefusion.choices.face_mask_blur_range))
74
+ group_face_mask.add_argument('--face-mask-padding', help = wording.get('face_mask_padding_help'), type = int, default = [ 0, 0, 0, 0 ], nargs = '+')
75
+ group_face_mask.add_argument('--face-mask-regions', help = wording.get('face_mask_regions_help').format(choices = ', '.join(facefusion.choices.face_mask_regions)), default = facefusion.choices.face_mask_regions, choices = facefusion.choices.face_mask_regions, nargs = '+', metavar = 'FACE_MASK_REGIONS')
76
  # frame extraction
77
  group_frame_extraction = program.add_argument_group('frame extraction')
78
+ group_frame_extraction.add_argument('--trim-frame-start', help = wording.get('trim_frame_start_help'), type = int)
79
+ group_frame_extraction.add_argument('--trim-frame-end', help = wording.get('trim_frame_end_help'), type = int)
80
+ group_frame_extraction.add_argument('--temp-frame-format', help = wording.get('temp_frame_format_help'), default = 'jpg', choices = facefusion.choices.temp_frame_formats)
81
+ group_frame_extraction.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), type = int, default = 100, choices = facefusion.choices.temp_frame_quality_range, metavar = create_metavar(facefusion.choices.temp_frame_quality_range))
82
+ group_frame_extraction.add_argument('--keep-temp', help = wording.get('keep_temp_help'), action = 'store_true')
83
  # output creation
84
  group_output_creation = program.add_argument_group('output creation')
85
+ group_output_creation.add_argument('--output-image-quality', help = wording.get('output_image_quality_help'), type = int, default = 80, choices = facefusion.choices.output_image_quality_range, metavar = create_metavar(facefusion.choices.output_image_quality_range))
86
+ group_output_creation.add_argument('--output-video-encoder', help = wording.get('output_video_encoder_help'), default = 'libx264', choices = facefusion.choices.output_video_encoders)
87
+ group_output_creation.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), type = int, default = 80, choices = facefusion.choices.output_video_quality_range, metavar = create_metavar(facefusion.choices.output_video_quality_range))
88
+ group_output_creation.add_argument('--keep-fps', help = wording.get('keep_fps_help'), action = 'store_true')
89
+ group_output_creation.add_argument('--skip-audio', help = wording.get('skip_audio_help'), action = 'store_true')
90
  # frame processors
91
  available_frame_processors = list_module_names('facefusion/processors/frame/modules')
92
  program = ArgumentParser(parents = [ program ], formatter_class = program.formatter_class, add_help = True)
93
  group_frame_processors = program.add_argument_group('frame processors')
94
+ group_frame_processors.add_argument('--frame-processors', help = wording.get('frame_processors_help').format(choices = ', '.join(available_frame_processors)), default = [ 'face_swapper' ], nargs = '+')
95
  for frame_processor in available_frame_processors:
96
  frame_processor_module = load_frame_processor_module(frame_processor)
97
  frame_processor_module.register_args(group_frame_processors)
98
  # uis
99
  group_uis = program.add_argument_group('uis')
100
+ group_uis.add_argument('--ui-layouts', help = wording.get('ui_layouts_help').format(choices = ', '.join(list_module_names('facefusion/uis/layouts'))), default = [ 'default' ], nargs = '+')
101
  run(program)
102
 
103
 
104
  def apply_args(program : ArgumentParser) -> None:
105
  args = program.parse_args()
106
  # general
107
+ facefusion.globals.source_paths = args.source_paths
108
  facefusion.globals.target_path = args.target_path
109
+ facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_paths, facefusion.globals.target_path, args.output_path)
110
  # misc
111
  facefusion.globals.skip_download = args.skip_download
112
  facefusion.globals.headless = args.headless
113
+ facefusion.globals.log_level = args.log_level
114
  # execution
115
  facefusion.globals.execution_providers = decode_execution_providers(args.execution_providers)
116
  facefusion.globals.execution_thread_count = args.execution_thread_count
 
129
  facefusion.globals.reference_face_distance = args.reference_face_distance
130
  facefusion.globals.reference_frame_number = args.reference_frame_number
131
  # face mask
132
+ facefusion.globals.face_mask_types = args.face_mask_types
133
  facefusion.globals.face_mask_blur = args.face_mask_blur
134
  facefusion.globals.face_mask_padding = normalize_padding(args.face_mask_padding)
135
+ facefusion.globals.face_mask_regions = args.face_mask_regions
136
  # frame extraction
137
  facefusion.globals.trim_frame_start = args.trim_frame_start
138
  facefusion.globals.trim_frame_end = args.trim_frame_end
 
157
 
158
  def run(program : ArgumentParser) -> None:
159
  apply_args(program)
160
+ logger.init(facefusion.globals.log_level)
161
  limit_resources()
162
+ if not pre_check() or not content_analyser.pre_check() or not face_analyser.pre_check() or not face_masker.pre_check():
163
  return
164
  for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
165
  if not frame_processor_module.pre_check():
 
188
  memory = facefusion.globals.max_memory * 1024 ** 6
189
  if platform.system().lower() == 'windows':
190
  import ctypes
191
+
192
  kernel32 = ctypes.windll.kernel32 # type: ignore[attr-defined]
193
  kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory))
194
  else:
195
  import resource
196
+
197
  resource.setrlimit(resource.RLIMIT_DATA, (memory, memory))
198
 
199
 
200
  def pre_check() -> bool:
201
  if sys.version_info < (3, 9):
202
+ logger.error(wording.get('python_not_supported').format(version = '3.9'), __name__.upper())
203
  return False
204
  if not shutil.which('ffmpeg'):
205
+ logger.error(wording.get('ffmpeg_not_installed'), __name__.upper())
206
  return False
207
  return True
208
 
209
 
210
  def conditional_process() -> None:
211
+ conditional_append_reference_faces()
212
  for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
213
  if not frame_processor_module.pre_process('output'):
214
  return
 
218
  process_video()
219
 
220
 
221
+ def conditional_append_reference_faces() -> None:
222
+ if 'reference' in facefusion.globals.face_selector_mode and not get_reference_faces():
223
+ source_frames = read_static_images(facefusion.globals.source_paths)
224
+ source_face = get_average_face(source_frames)
225
  if is_video(facefusion.globals.target_path):
226
  reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
227
  else:
228
  reference_frame = read_image(facefusion.globals.target_path)
229
  reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position)
230
+ append_reference_face('origin', reference_face)
231
+ if source_face and reference_face:
232
+ for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
233
+ reference_frame = frame_processor_module.get_reference_frame(source_face, reference_face, reference_frame)
234
+ reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position)
235
+ append_reference_face(frame_processor_module.__name__, reference_face)
236
 
237
 
238
  def process_image() -> None:
 
241
  shutil.copy2(facefusion.globals.target_path, facefusion.globals.output_path)
242
  # process frame
243
  for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
244
+ logger.info(wording.get('processing'), frame_processor_module.NAME)
245
+ frame_processor_module.process_image(facefusion.globals.source_paths, facefusion.globals.output_path, facefusion.globals.output_path)
246
  frame_processor_module.post_process()
247
  # compress image
248
+ logger.info(wording.get('compressing_image'), __name__.upper())
249
  if not compress_image(facefusion.globals.output_path):
250
+ logger.error(wording.get('compressing_image_failed'), __name__.upper())
251
  # validate image
252
  if is_image(facefusion.globals.output_path):
253
+ logger.info(wording.get('processing_image_succeed'), __name__.upper())
254
  else:
255
+ logger.error(wording.get('processing_image_failed'), __name__.upper())
256
 
257
 
258
  def process_video() -> None:
 
260
  return
261
  fps = detect_fps(facefusion.globals.target_path) if facefusion.globals.keep_fps else 25.0
262
  # create temp
263
+ logger.info(wording.get('creating_temp'), __name__.upper())
264
  create_temp(facefusion.globals.target_path)
265
  # extract frames
266
+ logger.info(wording.get('extracting_frames_fps').format(fps = fps), __name__.upper())
267
  extract_frames(facefusion.globals.target_path, fps)
268
  # process frame
269
  temp_frame_paths = get_temp_frame_paths(facefusion.globals.target_path)
270
  if temp_frame_paths:
271
  for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
272
+ logger.info(wording.get('processing'), frame_processor_module.NAME)
273
+ frame_processor_module.process_video(facefusion.globals.source_paths, temp_frame_paths)
274
  frame_processor_module.post_process()
275
  else:
276
+ logger.error(wording.get('temp_frames_not_found'), __name__.upper())
277
  return
278
  # merge video
279
+ logger.info(wording.get('merging_video_fps').format(fps = fps), __name__.upper())
280
  if not merge_video(facefusion.globals.target_path, fps):
281
+ logger.error(wording.get('merging_video_failed'), __name__.upper())
282
  return
283
  # handle audio
284
  if facefusion.globals.skip_audio:
285
+ logger.info(wording.get('skipping_audio'), __name__.upper())
286
  move_temp(facefusion.globals.target_path, facefusion.globals.output_path)
287
  else:
288
+ logger.info(wording.get('restoring_audio'), __name__.upper())
289
  if not restore_audio(facefusion.globals.target_path, facefusion.globals.output_path):
290
+ logger.warn(wording.get('restoring_audio_skipped'), __name__.upper())
291
  move_temp(facefusion.globals.target_path, facefusion.globals.output_path)
292
  # clear temp
293
+ logger.info(wording.get('clearing_temp'), __name__.upper())
294
  clear_temp(facefusion.globals.target_path)
295
  # validate video
296
  if is_video(facefusion.globals.output_path):
297
+ logger.info(wording.get('processing_video_succeed'), __name__.upper())
298
  else:
299
+ logger.error(wording.get('processing_video_failed'), __name__.upper())
facefusion/download.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import urllib.request
4
+ from typing import List
5
+ from concurrent.futures import ThreadPoolExecutor
6
+ from functools import lru_cache
7
+ from tqdm import tqdm
8
+
9
+ import facefusion.globals
10
+ from facefusion import wording
11
+ from facefusion.filesystem import is_file
12
+
13
+
14
+ def conditional_download(download_directory_path : str, urls : List[str]) -> None:
15
+ with ThreadPoolExecutor() as executor:
16
+ for url in urls:
17
+ executor.submit(get_download_size, url)
18
+ for url in urls:
19
+ download_file_path = os.path.join(download_directory_path, os.path.basename(url))
20
+ initial = os.path.getsize(download_file_path) if is_file(download_file_path) else 0
21
+ total = get_download_size(url)
22
+ if initial < total:
23
+ with tqdm(total = total, initial = initial, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress:
24
+ subprocess.Popen([ 'curl', '--create-dirs', '--silent', '--insecure', '--location', '--continue-at', '-', '--output', download_file_path, url ])
25
+ current = initial
26
+ while current < total:
27
+ if is_file(download_file_path):
28
+ current = os.path.getsize(download_file_path)
29
+ progress.update(current - progress.n)
30
+
31
+
32
+ @lru_cache(maxsize = None)
33
+ def get_download_size(url : str) -> int:
34
+ try:
35
+ response = urllib.request.urlopen(url, timeout = 10)
36
+ return int(response.getheader('Content-Length'))
37
+ except (OSError, ValueError):
38
+ return 0
39
+
40
+
41
+ def is_download_done(url : str, file_path : str) -> bool:
42
+ if is_file(file_path):
43
+ return get_download_size(url) == os.path.getsize(file_path)
44
+ return False
facefusion/execution_helper.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import onnxruntime
3
+
4
+
5
+ def encode_execution_providers(execution_providers : List[str]) -> List[str]:
6
+ return [ execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers ]
7
+
8
+
9
+ def decode_execution_providers(execution_providers: List[str]) -> List[str]:
10
+ available_execution_providers = onnxruntime.get_available_providers()
11
+ encoded_execution_providers = encode_execution_providers(available_execution_providers)
12
+ return [ execution_provider for execution_provider, encoded_execution_provider in zip(available_execution_providers, encoded_execution_providers) if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers) ]
13
+
14
+
15
+ def map_device(execution_providers : List[str]) -> str:
16
+ if 'CoreMLExecutionProvider' in execution_providers:
17
+ return 'mps'
18
+ if 'CUDAExecutionProvider' in execution_providers or 'ROCMExecutionProvider' in execution_providers :
19
+ return 'cuda'
20
+ if 'OpenVINOExecutionProvider' in execution_providers:
21
+ return 'mkl'
22
+ return 'cpu'
facefusion/face_analyser.py CHANGED
@@ -1,20 +1,21 @@
1
- from typing import Any, Optional, List, Dict, Tuple
2
  import threading
3
  import cv2
4
  import numpy
5
  import onnxruntime
6
 
7
  import facefusion.globals
8
- from facefusion.face_cache import get_faces_cache, set_faces_cache
 
9
  from facefusion.face_helper import warp_face, create_static_anchors, distance_to_kps, distance_to_bbox, apply_nms
10
- from facefusion.typing import Frame, Face, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, ModelValue, Bbox, Kps, Score, Embedding
11
- from facefusion.utilities import resolve_relative_path, conditional_download
12
  from facefusion.vision import resize_frame_dimension
13
 
14
  FACE_ANALYSER = None
15
  THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
16
  THREAD_LOCK : threading.Lock = threading.Lock()
17
- MODELS : Dict[str, ModelValue] =\
18
  {
19
  'face_detector_retinaface':
20
  {
@@ -26,7 +27,7 @@ MODELS : Dict[str, ModelValue] =\
26
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/yunet_2023mar.onnx',
27
  'path': resolve_relative_path('../.assets/models/yunet_2023mar.onnx')
28
  },
29
- 'face_recognizer_arcface_blendface':
30
  {
31
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/arcface_w600k_r50.onnx',
32
  'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx')
@@ -58,8 +59,8 @@ def get_face_analyser() -> Any:
58
  face_detector = onnxruntime.InferenceSession(MODELS.get('face_detector_retinaface').get('path'), providers = facefusion.globals.execution_providers)
59
  if facefusion.globals.face_detector_model == 'yunet':
60
  face_detector = cv2.FaceDetectorYN.create(MODELS.get('face_detector_yunet').get('path'), '', (0, 0))
61
- if facefusion.globals.face_recognizer_model == 'arcface_blendface':
62
- face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_blendface').get('path'), providers = facefusion.globals.execution_providers)
63
  if facefusion.globals.face_recognizer_model == 'arcface_inswapper':
64
  face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_inswapper').get('path'), providers = facefusion.globals.execution_providers)
65
  if facefusion.globals.face_recognizer_model == 'arcface_simswap':
@@ -174,9 +175,13 @@ def detect_with_yunet(temp_frame : Frame, temp_frame_height : int, temp_frame_wi
174
  return bbox_list, kps_list, score_list
175
 
176
 
177
- def create_faces(frame : Frame, bbox_list : List[Bbox], kps_list : List[Kps], score_list : List[Score]) -> List[Face] :
178
- faces : List[Face] = []
179
  if facefusion.globals.face_detector_score > 0:
 
 
 
 
180
  keep_indices = apply_nms(bbox_list, 0.4)
181
  for index in keep_indices:
182
  bbox = bbox_list[index]
@@ -198,7 +203,7 @@ def create_faces(frame : Frame, bbox_list : List[Bbox], kps_list : List[Kps], sc
198
 
199
  def calc_embedding(temp_frame : Frame, kps : Kps) -> Tuple[Embedding, Embedding]:
200
  face_recognizer = get_face_analyser().get('face_recognizer')
201
- crop_frame, matrix = warp_face(temp_frame, kps, 'arcface_v2', (112, 112))
202
  crop_frame = crop_frame.astype(numpy.float32) / 127.5 - 1
203
  crop_frame = crop_frame[:, :, ::-1].transpose(2, 0, 1)
204
  crop_frame = numpy.expand_dims(crop_frame, axis = 0)
@@ -213,7 +218,7 @@ def calc_embedding(temp_frame : Frame, kps : Kps) -> Tuple[Embedding, Embedding]
213
 
214
  def detect_gender_age(frame : Frame, kps : Kps) -> Tuple[int, int]:
215
  gender_age = get_face_analyser().get('gender_age')
216
- crop_frame, affine_matrix = warp_face(frame, kps, 'arcface_v2', (96, 96))
217
  crop_frame = numpy.expand_dims(crop_frame, axis = 0).transpose(0, 3, 1, 2).astype(numpy.float32)
218
  prediction = gender_age.run(None,
219
  {
@@ -234,14 +239,38 @@ def get_one_face(frame : Frame, position : int = 0) -> Optional[Face]:
234
  return None
235
 
236
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
  def get_many_faces(frame : Frame) -> List[Face]:
238
  try:
239
- faces_cache = get_faces_cache(frame)
240
  if faces_cache:
241
  faces = faces_cache
242
  else:
243
  faces = extract_faces(frame)
244
- set_faces_cache(frame, faces)
245
  if facefusion.globals.face_analyser_order:
246
  faces = sort_by_order(faces, facefusion.globals.face_analyser_order)
247
  if facefusion.globals.face_analyser_age:
@@ -253,18 +282,27 @@ def get_many_faces(frame : Frame) -> List[Face]:
253
  return []
254
 
255
 
256
- def find_similar_faces(frame : Frame, reference_face : Face, face_distance : float) -> List[Face]:
 
257
  many_faces = get_many_faces(frame)
258
- similar_faces = []
259
- if many_faces:
260
- for face in many_faces:
261
- if hasattr(face, 'normed_embedding') and hasattr(reference_face, 'normed_embedding'):
262
- current_face_distance = 1 - numpy.dot(face.normed_embedding, reference_face.normed_embedding)
263
- if current_face_distance < face_distance:
264
- similar_faces.append(face)
 
265
  return similar_faces
266
 
267
 
 
 
 
 
 
 
 
268
  def sort_by_order(faces : List[Face], order : FaceAnalyserOrder) -> List[Face]:
269
  if order == 'left-right':
270
  return sorted(faces, key = lambda face: face.bbox[0])
 
1
+ from typing import Any, Optional, List, Tuple
2
  import threading
3
  import cv2
4
  import numpy
5
  import onnxruntime
6
 
7
  import facefusion.globals
8
+ from facefusion.download import conditional_download
9
+ from facefusion.face_store import get_static_faces, set_static_faces
10
  from facefusion.face_helper import warp_face, create_static_anchors, distance_to_kps, distance_to_bbox, apply_nms
11
+ from facefusion.filesystem import resolve_relative_path
12
+ from facefusion.typing import Frame, Face, FaceSet, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, ModelSet, Bbox, Kps, Score, Embedding
13
  from facefusion.vision import resize_frame_dimension
14
 
15
  FACE_ANALYSER = None
16
  THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
17
  THREAD_LOCK : threading.Lock = threading.Lock()
18
+ MODELS : ModelSet =\
19
  {
20
  'face_detector_retinaface':
21
  {
 
27
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/yunet_2023mar.onnx',
28
  'path': resolve_relative_path('../.assets/models/yunet_2023mar.onnx')
29
  },
30
+ 'face_recognizer_arcface_blendswap':
31
  {
32
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/arcface_w600k_r50.onnx',
33
  'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx')
 
59
  face_detector = onnxruntime.InferenceSession(MODELS.get('face_detector_retinaface').get('path'), providers = facefusion.globals.execution_providers)
60
  if facefusion.globals.face_detector_model == 'yunet':
61
  face_detector = cv2.FaceDetectorYN.create(MODELS.get('face_detector_yunet').get('path'), '', (0, 0))
62
+ if facefusion.globals.face_recognizer_model == 'arcface_blendswap':
63
+ face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_blendswap').get('path'), providers = facefusion.globals.execution_providers)
64
  if facefusion.globals.face_recognizer_model == 'arcface_inswapper':
65
  face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_inswapper').get('path'), providers = facefusion.globals.execution_providers)
66
  if facefusion.globals.face_recognizer_model == 'arcface_simswap':
 
175
  return bbox_list, kps_list, score_list
176
 
177
 
178
+ def create_faces(frame : Frame, bbox_list : List[Bbox], kps_list : List[Kps], score_list : List[Score]) -> List[Face]:
179
+ faces = []
180
  if facefusion.globals.face_detector_score > 0:
181
+ sort_indices = numpy.argsort(-numpy.array(score_list))
182
+ bbox_list = [ bbox_list[index] for index in sort_indices ]
183
+ kps_list = [ kps_list[index] for index in sort_indices ]
184
+ score_list = [ score_list[index] for index in sort_indices ]
185
  keep_indices = apply_nms(bbox_list, 0.4)
186
  for index in keep_indices:
187
  bbox = bbox_list[index]
 
203
 
204
  def calc_embedding(temp_frame : Frame, kps : Kps) -> Tuple[Embedding, Embedding]:
205
  face_recognizer = get_face_analyser().get('face_recognizer')
206
+ crop_frame, matrix = warp_face(temp_frame, kps, 'arcface_112_v2', (112, 112))
207
  crop_frame = crop_frame.astype(numpy.float32) / 127.5 - 1
208
  crop_frame = crop_frame[:, :, ::-1].transpose(2, 0, 1)
209
  crop_frame = numpy.expand_dims(crop_frame, axis = 0)
 
218
 
219
  def detect_gender_age(frame : Frame, kps : Kps) -> Tuple[int, int]:
220
  gender_age = get_face_analyser().get('gender_age')
221
+ crop_frame, affine_matrix = warp_face(frame, kps, 'arcface_112_v2', (96, 96))
222
  crop_frame = numpy.expand_dims(crop_frame, axis = 0).transpose(0, 3, 1, 2).astype(numpy.float32)
223
  prediction = gender_age.run(None,
224
  {
 
239
  return None
240
 
241
 
242
+ def get_average_face(frames : List[Frame], position : int = 0) -> Optional[Face]:
243
+ average_face = None
244
+ faces = []
245
+ embedding_list = []
246
+ normed_embedding_list = []
247
+ for frame in frames:
248
+ face = get_one_face(frame, position)
249
+ if face:
250
+ faces.append(face)
251
+ embedding_list.append(face.embedding)
252
+ normed_embedding_list.append(face.normed_embedding)
253
+ if faces:
254
+ average_face = Face(
255
+ bbox = faces[0].bbox,
256
+ kps = faces[0].kps,
257
+ score = faces[0].score,
258
+ embedding = numpy.mean(embedding_list, axis = 0),
259
+ normed_embedding = numpy.mean(normed_embedding_list, axis = 0),
260
+ gender = faces[0].gender,
261
+ age = faces[0].age
262
+ )
263
+ return average_face
264
+
265
+
266
  def get_many_faces(frame : Frame) -> List[Face]:
267
  try:
268
+ faces_cache = get_static_faces(frame)
269
  if faces_cache:
270
  faces = faces_cache
271
  else:
272
  faces = extract_faces(frame)
273
+ set_static_faces(frame, faces)
274
  if facefusion.globals.face_analyser_order:
275
  faces = sort_by_order(faces, facefusion.globals.face_analyser_order)
276
  if facefusion.globals.face_analyser_age:
 
282
  return []
283
 
284
 
285
+ def find_similar_faces(frame : Frame, reference_faces : FaceSet, face_distance : float) -> List[Face]:
286
+ similar_faces : List[Face] = []
287
  many_faces = get_many_faces(frame)
288
+
289
+ if reference_faces:
290
+ for reference_set in reference_faces:
291
+ if not similar_faces:
292
+ for reference_face in reference_faces[reference_set]:
293
+ for face in many_faces:
294
+ if compare_faces(face, reference_face, face_distance):
295
+ similar_faces.append(face)
296
  return similar_faces
297
 
298
 
299
+ def compare_faces(face : Face, reference_face : Face, face_distance : float) -> bool:
300
+ if hasattr(face, 'normed_embedding') and hasattr(reference_face, 'normed_embedding'):
301
+ current_face_distance = 1 - numpy.dot(face.normed_embedding, reference_face.normed_embedding)
302
+ return current_face_distance < face_distance
303
+ return False
304
+
305
+
306
  def sort_by_order(faces : List[Face], order : FaceAnalyserOrder) -> List[Face]:
307
  if order == 'left-right':
308
  return sorted(faces, key = lambda face: face.bbox[0])
facefusion/face_helper.py CHANGED
@@ -1,14 +1,14 @@
1
  from typing import Any, Dict, Tuple, List
2
- from functools import lru_cache
3
  from cv2.typing import Size
 
4
  import cv2
5
  import numpy
6
 
7
- from facefusion.typing import Bbox, Kps, Frame, Matrix, Template, Padding
8
 
9
  TEMPLATES : Dict[Template, numpy.ndarray[Any, Any]] =\
10
  {
11
- 'arcface_v1': numpy.array(
12
  [
13
  [ 39.7300, 51.1380 ],
14
  [ 72.2700, 51.1380 ],
@@ -16,7 +16,7 @@ TEMPLATES : Dict[Template, numpy.ndarray[Any, Any]] =\
16
  [ 42.4630, 87.0100 ],
17
  [ 69.5370, 87.0100 ]
18
  ]),
19
- 'arcface_v2': numpy.array(
20
  [
21
  [ 38.2946, 51.6963 ],
22
  [ 73.5318, 51.5014 ],
@@ -24,7 +24,15 @@ TEMPLATES : Dict[Template, numpy.ndarray[Any, Any]] =\
24
  [ 41.5493, 92.3655 ],
25
  [ 70.7299, 92.2041 ]
26
  ]),
27
- 'ffhq': numpy.array(
 
 
 
 
 
 
 
 
28
  [
29
  [ 192.98138, 239.94708 ],
30
  [ 318.90277, 240.1936 ],
@@ -37,39 +45,23 @@ TEMPLATES : Dict[Template, numpy.ndarray[Any, Any]] =\
37
 
38
  def warp_face(temp_frame : Frame, kps : Kps, template : Template, size : Size) -> Tuple[Frame, Matrix]:
39
  normed_template = TEMPLATES.get(template) * size[1] / size[0]
40
- affine_matrix = cv2.estimateAffinePartial2D(kps, normed_template, method = cv2.LMEDS)[0]
41
  crop_frame = cv2.warpAffine(temp_frame, affine_matrix, (size[1], size[1]), borderMode = cv2.BORDER_REPLICATE)
42
  return crop_frame, affine_matrix
43
 
44
 
45
- def paste_back(temp_frame : Frame, crop_frame: Frame, affine_matrix : Matrix, face_mask_blur : float, face_mask_padding : Padding) -> Frame:
46
  inverse_matrix = cv2.invertAffineTransform(affine_matrix)
47
  temp_frame_size = temp_frame.shape[:2][::-1]
48
- mask_size = tuple(crop_frame.shape[:2])
49
- mask_frame = create_static_mask_frame(mask_size, face_mask_blur, face_mask_padding)
50
- inverse_mask_frame = cv2.warpAffine(mask_frame, inverse_matrix, temp_frame_size).clip(0, 1)
51
  inverse_crop_frame = cv2.warpAffine(crop_frame, inverse_matrix, temp_frame_size, borderMode = cv2.BORDER_REPLICATE)
52
  paste_frame = temp_frame.copy()
53
- paste_frame[:, :, 0] = inverse_mask_frame * inverse_crop_frame[:, :, 0] + (1 - inverse_mask_frame) * temp_frame[:, :, 0]
54
- paste_frame[:, :, 1] = inverse_mask_frame * inverse_crop_frame[:, :, 1] + (1 - inverse_mask_frame) * temp_frame[:, :, 1]
55
- paste_frame[:, :, 2] = inverse_mask_frame * inverse_crop_frame[:, :, 2] + (1 - inverse_mask_frame) * temp_frame[:, :, 2]
56
  return paste_frame
57
 
58
 
59
- @lru_cache(maxsize = None)
60
- def create_static_mask_frame(mask_size : Size, face_mask_blur : float, face_mask_padding : Padding) -> Frame:
61
- mask_frame = numpy.ones(mask_size, numpy.float32)
62
- blur_amount = int(mask_size[0] * 0.5 * face_mask_blur)
63
- blur_area = max(blur_amount // 2, 1)
64
- mask_frame[:max(blur_area, int(mask_size[1] * face_mask_padding[0] / 100)), :] = 0
65
- mask_frame[-max(blur_area, int(mask_size[1] * face_mask_padding[2] / 100)):, :] = 0
66
- mask_frame[:, :max(blur_area, int(mask_size[0] * face_mask_padding[3] / 100))] = 0
67
- mask_frame[:, -max(blur_area, int(mask_size[0] * face_mask_padding[1] / 100)):] = 0
68
- if blur_amount > 0:
69
- mask_frame = cv2.GaussianBlur(mask_frame, (0, 0), blur_amount * 0.25)
70
- return mask_frame
71
-
72
-
73
  @lru_cache(maxsize = None)
74
  def create_static_anchors(feature_stride : int, anchor_total : int, stride_height : int, stride_width : int) -> numpy.ndarray[Any, Any]:
75
  y, x = numpy.mgrid[:stride_height, :stride_width][::-1]
 
1
  from typing import Any, Dict, Tuple, List
 
2
  from cv2.typing import Size
3
+ from functools import lru_cache
4
  import cv2
5
  import numpy
6
 
7
+ from facefusion.typing import Bbox, Kps, Frame, Mask, Matrix, Template
8
 
9
  TEMPLATES : Dict[Template, numpy.ndarray[Any, Any]] =\
10
  {
11
+ 'arcface_112_v1': numpy.array(
12
  [
13
  [ 39.7300, 51.1380 ],
14
  [ 72.2700, 51.1380 ],
 
16
  [ 42.4630, 87.0100 ],
17
  [ 69.5370, 87.0100 ]
18
  ]),
19
+ 'arcface_112_v2': numpy.array(
20
  [
21
  [ 38.2946, 51.6963 ],
22
  [ 73.5318, 51.5014 ],
 
24
  [ 41.5493, 92.3655 ],
25
  [ 70.7299, 92.2041 ]
26
  ]),
27
+ 'arcface_128_v2': numpy.array(
28
+ [
29
+ [ 46.2946, 51.6963 ],
30
+ [ 81.5318, 51.5014 ],
31
+ [ 64.0252, 71.7366 ],
32
+ [ 49.5493, 92.3655 ],
33
+ [ 78.7299, 92.2041 ]
34
+ ]),
35
+ 'ffhq_512': numpy.array(
36
  [
37
  [ 192.98138, 239.94708 ],
38
  [ 318.90277, 240.1936 ],
 
45
 
46
  def warp_face(temp_frame : Frame, kps : Kps, template : Template, size : Size) -> Tuple[Frame, Matrix]:
47
  normed_template = TEMPLATES.get(template) * size[1] / size[0]
48
+ affine_matrix = cv2.estimateAffinePartial2D(kps, normed_template, method = cv2.RANSAC, ransacReprojThreshold = 100)[0]
49
  crop_frame = cv2.warpAffine(temp_frame, affine_matrix, (size[1], size[1]), borderMode = cv2.BORDER_REPLICATE)
50
  return crop_frame, affine_matrix
51
 
52
 
53
+ def paste_back(temp_frame : Frame, crop_frame: Frame, crop_mask : Mask, affine_matrix : Matrix) -> Frame:
54
  inverse_matrix = cv2.invertAffineTransform(affine_matrix)
55
  temp_frame_size = temp_frame.shape[:2][::-1]
56
+ inverse_crop_mask = cv2.warpAffine(crop_mask, inverse_matrix, temp_frame_size).clip(0, 1)
 
 
57
  inverse_crop_frame = cv2.warpAffine(crop_frame, inverse_matrix, temp_frame_size, borderMode = cv2.BORDER_REPLICATE)
58
  paste_frame = temp_frame.copy()
59
+ paste_frame[:, :, 0] = inverse_crop_mask * inverse_crop_frame[:, :, 0] + (1 - inverse_crop_mask) * temp_frame[:, :, 0]
60
+ paste_frame[:, :, 1] = inverse_crop_mask * inverse_crop_frame[:, :, 1] + (1 - inverse_crop_mask) * temp_frame[:, :, 1]
61
+ paste_frame[:, :, 2] = inverse_crop_mask * inverse_crop_frame[:, :, 2] + (1 - inverse_crop_mask) * temp_frame[:, :, 2]
62
  return paste_frame
63
 
64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  @lru_cache(maxsize = None)
66
  def create_static_anchors(feature_stride : int, anchor_total : int, stride_height : int, stride_width : int) -> numpy.ndarray[Any, Any]:
67
  y, x = numpy.mgrid[:stride_height, :stride_width][::-1]
facefusion/face_masker.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List
2
+ from cv2.typing import Size
3
+ from functools import lru_cache
4
+ import threading
5
+ import cv2
6
+ import numpy
7
+ import onnxruntime
8
+
9
+ import facefusion.globals
10
+ from facefusion.typing import Frame, Mask, Padding, FaceMaskRegion, ModelSet
11
+ from facefusion.filesystem import resolve_relative_path
12
+ from facefusion.download import conditional_download
13
+
14
+ FACE_OCCLUDER = None
15
+ FACE_PARSER = None
16
+ THREAD_LOCK : threading.Lock = threading.Lock()
17
+ MODELS : ModelSet =\
18
+ {
19
+ 'face_occluder':
20
+ {
21
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/face_occluder.onnx',
22
+ 'path': resolve_relative_path('../.assets/models/face_occluder.onnx')
23
+ },
24
+ 'face_parser':
25
+ {
26
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/face_parser.onnx',
27
+ 'path': resolve_relative_path('../.assets/models/face_parser.onnx')
28
+ }
29
+ }
30
+ FACE_MASK_REGIONS : Dict[FaceMaskRegion, int] =\
31
+ {
32
+ 'skin': 1,
33
+ 'left-eyebrow': 2,
34
+ 'right-eyebrow': 3,
35
+ 'left-eye': 4,
36
+ 'right-eye': 5,
37
+ 'eye-glasses': 6,
38
+ 'nose': 10,
39
+ 'mouth': 11,
40
+ 'upper-lip': 12,
41
+ 'lower-lip': 13
42
+ }
43
+
44
+
45
+ def get_face_occluder() -> Any:
46
+ global FACE_OCCLUDER
47
+
48
+ with THREAD_LOCK:
49
+ if FACE_OCCLUDER is None:
50
+ model_path = MODELS.get('face_occluder').get('path')
51
+ FACE_OCCLUDER = onnxruntime.InferenceSession(model_path, providers = facefusion.globals.execution_providers)
52
+ return FACE_OCCLUDER
53
+
54
+
55
+ def get_face_parser() -> Any:
56
+ global FACE_PARSER
57
+
58
+ with THREAD_LOCK:
59
+ if FACE_PARSER is None:
60
+ model_path = MODELS.get('face_parser').get('path')
61
+ FACE_PARSER = onnxruntime.InferenceSession(model_path, providers = facefusion.globals.execution_providers)
62
+ return FACE_PARSER
63
+
64
+
65
+ def clear_face_occluder() -> None:
66
+ global FACE_OCCLUDER
67
+
68
+ FACE_OCCLUDER = None
69
+
70
+
71
+ def clear_face_parser() -> None:
72
+ global FACE_PARSER
73
+
74
+ FACE_PARSER = None
75
+
76
+
77
+ def pre_check() -> bool:
78
+ if not facefusion.globals.skip_download:
79
+ download_directory_path = resolve_relative_path('../.assets/models')
80
+ model_urls =\
81
+ [
82
+ MODELS.get('face_occluder').get('url'),
83
+ MODELS.get('face_parser').get('url'),
84
+ ]
85
+ conditional_download(download_directory_path, model_urls)
86
+ return True
87
+
88
+
89
+ @lru_cache(maxsize = None)
90
+ def create_static_box_mask(crop_size : Size, face_mask_blur : float, face_mask_padding : Padding) -> Mask:
91
+ blur_amount = int(crop_size[0] * 0.5 * face_mask_blur)
92
+ blur_area = max(blur_amount // 2, 1)
93
+ box_mask = numpy.ones(crop_size, numpy.float32)
94
+ box_mask[:max(blur_area, int(crop_size[1] * face_mask_padding[0] / 100)), :] = 0
95
+ box_mask[-max(blur_area, int(crop_size[1] * face_mask_padding[2] / 100)):, :] = 0
96
+ box_mask[:, :max(blur_area, int(crop_size[0] * face_mask_padding[3] / 100))] = 0
97
+ box_mask[:, -max(blur_area, int(crop_size[0] * face_mask_padding[1] / 100)):] = 0
98
+ if blur_amount > 0:
99
+ box_mask = cv2.GaussianBlur(box_mask, (0, 0), blur_amount * 0.25)
100
+ return box_mask
101
+
102
+
103
+ def create_occlusion_mask(crop_frame : Frame) -> Mask:
104
+ face_occluder = get_face_occluder()
105
+ prepare_frame = cv2.resize(crop_frame, face_occluder.get_inputs()[0].shape[1:3][::-1])
106
+ prepare_frame = numpy.expand_dims(prepare_frame, axis = 0).astype(numpy.float32) / 255
107
+ prepare_frame = prepare_frame.transpose(0, 1, 2, 3)
108
+ occlusion_mask = face_occluder.run(None,
109
+ {
110
+ face_occluder.get_inputs()[0].name: prepare_frame
111
+ })[0][0]
112
+ occlusion_mask = occlusion_mask.transpose(0, 1, 2).clip(0, 1).astype(numpy.float32)
113
+ occlusion_mask = cv2.resize(occlusion_mask, crop_frame.shape[:2][::-1])
114
+ return occlusion_mask
115
+
116
+
117
+ def create_region_mask(crop_frame : Frame, face_mask_regions : List[FaceMaskRegion]) -> Mask:
118
+ face_parser = get_face_parser()
119
+ prepare_frame = cv2.flip(cv2.resize(crop_frame, (512, 512)), 1)
120
+ prepare_frame = numpy.expand_dims(prepare_frame, axis = 0).astype(numpy.float32)[:, :, ::-1] / 127.5 - 1
121
+ prepare_frame = prepare_frame.transpose(0, 3, 1, 2)
122
+ region_mask = face_parser.run(None,
123
+ {
124
+ face_parser.get_inputs()[0].name: prepare_frame
125
+ })[0][0]
126
+ region_mask = numpy.isin(region_mask.argmax(0), [ FACE_MASK_REGIONS[region] for region in face_mask_regions ])
127
+ region_mask = cv2.resize(region_mask.astype(numpy.float32), crop_frame.shape[:2][::-1])
128
+ return region_mask
facefusion/face_store.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, List
2
+ import hashlib
3
+
4
+ from facefusion.typing import Frame, Face, FaceStore, FaceSet
5
+
6
+ FACE_STORE: FaceStore =\
7
+ {
8
+ 'static_faces': {},
9
+ 'reference_faces': {}
10
+ }
11
+
12
+
13
+ def get_static_faces(frame : Frame) -> Optional[List[Face]]:
14
+ frame_hash = create_frame_hash(frame)
15
+ if frame_hash in FACE_STORE['static_faces']:
16
+ return FACE_STORE['static_faces'][frame_hash]
17
+ return None
18
+
19
+
20
+ def set_static_faces(frame : Frame, faces : List[Face]) -> None:
21
+ frame_hash = create_frame_hash(frame)
22
+ if frame_hash:
23
+ FACE_STORE['static_faces'][frame_hash] = faces
24
+
25
+
26
+ def clear_static_faces() -> None:
27
+ FACE_STORE['static_faces'] = {}
28
+
29
+
30
+ def create_frame_hash(frame: Frame) -> Optional[str]:
31
+ return hashlib.sha1(frame.tobytes()).hexdigest() if frame.any() else None
32
+
33
+
34
+ def get_reference_faces() -> Optional[FaceSet]:
35
+ if FACE_STORE['reference_faces']:
36
+ return FACE_STORE['reference_faces']
37
+ return None
38
+
39
+
40
+ def append_reference_face(name : str, face : Face) -> None:
41
+ if name not in FACE_STORE['reference_faces']:
42
+ FACE_STORE['reference_faces'][name] = []
43
+ FACE_STORE['reference_faces'][name].append(face)
44
+
45
+
46
+ def clear_reference_faces() -> None:
47
+ FACE_STORE['reference_faces'] = {}
facefusion/ffmpeg.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import subprocess
3
+
4
+ import facefusion.globals
5
+ from facefusion import logger
6
+ from facefusion.filesystem import get_temp_frames_pattern, get_temp_output_video_path
7
+ from facefusion.vision import detect_fps
8
+
9
+
10
+ def run_ffmpeg(args : List[str]) -> bool:
11
+ commands = [ 'ffmpeg', '-hide_banner', '-loglevel', 'error' ]
12
+ commands.extend(args)
13
+ try:
14
+ subprocess.run(commands, stderr = subprocess.PIPE, check = True)
15
+ return True
16
+ except subprocess.CalledProcessError as exception:
17
+ logger.debug(exception.stderr.decode().strip(), __name__.upper())
18
+ return False
19
+
20
+
21
+ def open_ffmpeg(args : List[str]) -> subprocess.Popen[bytes]:
22
+ commands = [ 'ffmpeg', '-hide_banner', '-loglevel', 'error' ]
23
+ commands.extend(args)
24
+ return subprocess.Popen(commands, stdin = subprocess.PIPE)
25
+
26
+
27
+ def extract_frames(target_path : str, fps : float) -> bool:
28
+ temp_frame_compression = round(31 - (facefusion.globals.temp_frame_quality * 0.31))
29
+ trim_frame_start = facefusion.globals.trim_frame_start
30
+ trim_frame_end = facefusion.globals.trim_frame_end
31
+ temp_frames_pattern = get_temp_frames_pattern(target_path, '%04d')
32
+ commands = [ '-hwaccel', 'auto', '-i', target_path, '-q:v', str(temp_frame_compression), '-pix_fmt', 'rgb24' ]
33
+ if trim_frame_start is not None and trim_frame_end is not None:
34
+ commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ':end_frame=' + str(trim_frame_end) + ',fps=' + str(fps) ])
35
+ elif trim_frame_start is not None:
36
+ commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ',fps=' + str(fps) ])
37
+ elif trim_frame_end is not None:
38
+ commands.extend([ '-vf', 'trim=end_frame=' + str(trim_frame_end) + ',fps=' + str(fps) ])
39
+ else:
40
+ commands.extend([ '-vf', 'fps=' + str(fps) ])
41
+ commands.extend([ '-vsync', '0', temp_frames_pattern ])
42
+ return run_ffmpeg(commands)
43
+
44
+
45
+ def compress_image(output_path : str) -> bool:
46
+ output_image_compression = round(31 - (facefusion.globals.output_image_quality * 0.31))
47
+ commands = [ '-hwaccel', 'auto', '-i', output_path, '-q:v', str(output_image_compression), '-y', output_path ]
48
+ return run_ffmpeg(commands)
49
+
50
+
51
+ def merge_video(target_path : str, fps : float) -> bool:
52
+ temp_output_video_path = get_temp_output_video_path(target_path)
53
+ temp_frames_pattern = get_temp_frames_pattern(target_path, '%04d')
54
+ commands = [ '-hwaccel', 'auto', '-r', str(fps), '-i', temp_frames_pattern, '-c:v', facefusion.globals.output_video_encoder ]
55
+ if facefusion.globals.output_video_encoder in [ 'libx264', 'libx265' ]:
56
+ output_video_compression = round(51 - (facefusion.globals.output_video_quality * 0.51))
57
+ commands.extend([ '-crf', str(output_video_compression) ])
58
+ if facefusion.globals.output_video_encoder in [ 'libvpx-vp9' ]:
59
+ output_video_compression = round(63 - (facefusion.globals.output_video_quality * 0.63))
60
+ commands.extend([ '-crf', str(output_video_compression) ])
61
+ if facefusion.globals.output_video_encoder in [ 'h264_nvenc', 'hevc_nvenc' ]:
62
+ output_video_compression = round(51 - (facefusion.globals.output_video_quality * 0.51))
63
+ commands.extend([ '-cq', str(output_video_compression) ])
64
+ commands.extend([ '-pix_fmt', 'yuv420p', '-colorspace', 'bt709', '-y', temp_output_video_path ])
65
+ return run_ffmpeg(commands)
66
+
67
+
68
+ def restore_audio(target_path : str, output_path : str) -> bool:
69
+ fps = detect_fps(target_path)
70
+ trim_frame_start = facefusion.globals.trim_frame_start
71
+ trim_frame_end = facefusion.globals.trim_frame_end
72
+ temp_output_video_path = get_temp_output_video_path(target_path)
73
+ commands = [ '-hwaccel', 'auto', '-i', temp_output_video_path ]
74
+ if trim_frame_start is not None:
75
+ start_time = trim_frame_start / fps
76
+ commands.extend([ '-ss', str(start_time) ])
77
+ if trim_frame_end is not None:
78
+ end_time = trim_frame_end / fps
79
+ commands.extend([ '-to', str(end_time) ])
80
+ commands.extend([ '-i', target_path, '-c', 'copy', '-map', '0:v:0', '-map', '1:a:0', '-shortest', '-y', output_path ])
81
+ return run_ffmpeg(commands)
facefusion/filesystem.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+ import glob
3
+ import os
4
+ import shutil
5
+ import tempfile
6
+ import filetype
7
+ from pathlib import Path
8
+
9
+ import facefusion.globals
10
+
11
+ TEMP_DIRECTORY_PATH = os.path.join(tempfile.gettempdir(), 'facefusion')
12
+ TEMP_OUTPUT_VIDEO_NAME = 'temp.mp4'
13
+
14
+
15
+ def get_temp_frame_paths(target_path : str) -> List[str]:
16
+ temp_frames_pattern = get_temp_frames_pattern(target_path, '*')
17
+ return sorted(glob.glob(temp_frames_pattern))
18
+
19
+
20
+ def get_temp_frames_pattern(target_path : str, temp_frame_prefix : str) -> str:
21
+ temp_directory_path = get_temp_directory_path(target_path)
22
+ return os.path.join(temp_directory_path, temp_frame_prefix + '.' + facefusion.globals.temp_frame_format)
23
+
24
+
25
+ def get_temp_directory_path(target_path : str) -> str:
26
+ target_name, _ = os.path.splitext(os.path.basename(target_path))
27
+ return os.path.join(TEMP_DIRECTORY_PATH, target_name)
28
+
29
+
30
+ def get_temp_output_video_path(target_path : str) -> str:
31
+ temp_directory_path = get_temp_directory_path(target_path)
32
+ return os.path.join(temp_directory_path, TEMP_OUTPUT_VIDEO_NAME)
33
+
34
+
35
+ def create_temp(target_path : str) -> None:
36
+ temp_directory_path = get_temp_directory_path(target_path)
37
+ Path(temp_directory_path).mkdir(parents = True, exist_ok = True)
38
+
39
+
40
+ def move_temp(target_path : str, output_path : str) -> None:
41
+ temp_output_video_path = get_temp_output_video_path(target_path)
42
+ if is_file(temp_output_video_path):
43
+ if is_file(output_path):
44
+ os.remove(output_path)
45
+ shutil.move(temp_output_video_path, output_path)
46
+
47
+
48
+ def clear_temp(target_path : str) -> None:
49
+ temp_directory_path = get_temp_directory_path(target_path)
50
+ parent_directory_path = os.path.dirname(temp_directory_path)
51
+ if not facefusion.globals.keep_temp and is_directory(temp_directory_path):
52
+ shutil.rmtree(temp_directory_path)
53
+ if os.path.exists(parent_directory_path) and not os.listdir(parent_directory_path):
54
+ os.rmdir(parent_directory_path)
55
+
56
+
57
+ def is_file(file_path : str) -> bool:
58
+ return bool(file_path and os.path.isfile(file_path))
59
+
60
+
61
+ def is_directory(directory_path : str) -> bool:
62
+ return bool(directory_path and os.path.isdir(directory_path))
63
+
64
+
65
+ def is_image(image_path : str) -> bool:
66
+ if is_file(image_path):
67
+ return filetype.helpers.is_image(image_path)
68
+ return False
69
+
70
+
71
+ def are_images(image_paths : List[str]) -> bool:
72
+ if image_paths:
73
+ return all(is_image(image_path) for image_path in image_paths)
74
+ return False
75
+
76
+
77
+ def is_video(video_path : str) -> bool:
78
+ if is_file(video_path):
79
+ return filetype.helpers.is_video(video_path)
80
+ return False
81
+
82
+
83
+ def resolve_relative_path(path : str) -> str:
84
+ return os.path.abspath(os.path.join(os.path.dirname(__file__), path))
85
+
86
+
87
+ def list_module_names(path : str) -> Optional[List[str]]:
88
+ if os.path.exists(path):
89
+ files = os.listdir(path)
90
+ return [ Path(file).stem for file in files if not Path(file).stem.startswith(('.', '__')) ]
91
+ return None
facefusion/globals.py CHANGED
@@ -1,14 +1,15 @@
1
  from typing import List, Optional
2
 
3
- from facefusion.typing import FaceSelectorMode, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, OutputVideoEncoder, FaceDetectorModel, FaceRecognizerModel, TempFrameFormat, Padding
4
 
5
  # general
6
- source_path : Optional[str] = None
7
  target_path : Optional[str] = None
8
  output_path : Optional[str] = None
9
  # misc
10
  skip_download : Optional[bool] = None
11
  headless : Optional[bool] = None
 
12
  # execution
13
  execution_providers : List[str] = []
14
  execution_thread_count : Optional[int] = None
@@ -28,8 +29,10 @@ reference_face_position : Optional[int] = None
28
  reference_face_distance : Optional[float] = None
29
  reference_frame_number : Optional[int] = None
30
  # face mask
 
31
  face_mask_blur : Optional[float] = None
32
  face_mask_padding : Optional[Padding] = None
 
33
  # frame extraction
34
  trim_frame_start : Optional[int] = None
35
  trim_frame_end : Optional[int] = None
 
1
  from typing import List, Optional
2
 
3
+ from facefusion.typing import LogLevel, FaceSelectorMode, FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, FaceMaskType, FaceMaskRegion, OutputVideoEncoder, FaceDetectorModel, FaceRecognizerModel, TempFrameFormat, Padding
4
 
5
  # general
6
+ source_paths : Optional[List[str]] = None
7
  target_path : Optional[str] = None
8
  output_path : Optional[str] = None
9
  # misc
10
  skip_download : Optional[bool] = None
11
  headless : Optional[bool] = None
12
+ log_level : Optional[LogLevel] = None
13
  # execution
14
  execution_providers : List[str] = []
15
  execution_thread_count : Optional[int] = None
 
29
  reference_face_distance : Optional[float] = None
30
  reference_frame_number : Optional[int] = None
31
  # face mask
32
+ face_mask_types : Optional[List[FaceMaskType]] = None
33
  face_mask_blur : Optional[float] = None
34
  face_mask_padding : Optional[Padding] = None
35
+ face_mask_regions : Optional[List[FaceMaskRegion]] = None
36
  # frame extraction
37
  trim_frame_start : Optional[int] = None
38
  trim_frame_end : Optional[int] = None
facefusion/installer.py CHANGED
@@ -1,4 +1,8 @@
1
  from typing import Dict, Tuple
 
 
 
 
2
  import subprocess
3
  from argparse import ArgumentParser, HelpFormatter
4
 
@@ -11,32 +15,43 @@ from facefusion import metadata, wording
11
  TORCH : Dict[str, str] =\
12
  {
13
  'default': 'default',
14
- 'cpu': 'cpu',
15
- 'cuda': 'cu118',
16
- 'rocm': 'rocm5.6'
17
  }
18
  ONNXRUNTIMES : Dict[str, Tuple[str, str]] =\
19
  {
20
- 'default': ('onnxruntime', '1.16.3'),
21
- 'cuda': ('onnxruntime-gpu', '1.16.3'),
22
- 'coreml-legacy': ('onnxruntime-coreml', '1.13.1'),
23
- 'coreml-silicon': ('onnxruntime-silicon', '1.16.0'),
24
- 'directml': ('onnxruntime-directml', '1.16.3'),
25
- 'openvino': ('onnxruntime-openvino', '1.16.0')
26
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
 
29
  def cli() -> None:
30
  program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120))
31
- program.add_argument('--torch', help = wording.get('install_dependency_help').format(dependency = 'torch'), dest = 'torch', choices = TORCH.keys())
32
- program.add_argument('--onnxruntime', help = wording.get('install_dependency_help').format(dependency = 'onnxruntime'), dest = 'onnxruntime', choices = ONNXRUNTIMES.keys())
 
33
  program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
34
  run(program)
35
 
36
 
37
  def run(program : ArgumentParser) -> None:
38
  args = program.parse_args()
 
39
 
 
 
40
  if args.torch and args.onnxruntime:
41
  answers =\
42
  {
@@ -54,10 +69,24 @@ def run(program : ArgumentParser) -> None:
54
  torch_wheel = TORCH[torch]
55
  onnxruntime = answers['onnxruntime']
56
  onnxruntime_name, onnxruntime_version = ONNXRUNTIMES[onnxruntime]
57
- subprocess.call([ 'pip', 'uninstall', 'torch', '-y' ])
 
58
  if torch_wheel == 'default':
59
  subprocess.call([ 'pip', 'install', '-r', 'requirements.txt' ])
60
  else:
61
  subprocess.call([ 'pip', 'install', '-r', 'requirements.txt', '--extra-index-url', 'https://download.pytorch.org/whl/' + torch_wheel ])
62
- subprocess.call([ 'pip', 'uninstall', 'onnxruntime', onnxruntime_name, '-y' ])
63
- subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version ])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from typing import Dict, Tuple
2
+ import sys
3
+ import os
4
+ import platform
5
+ import tempfile
6
  import subprocess
7
  from argparse import ArgumentParser, HelpFormatter
8
 
 
15
  TORCH : Dict[str, str] =\
16
  {
17
  'default': 'default',
18
+ 'cpu': 'cpu'
 
 
19
  }
20
  ONNXRUNTIMES : Dict[str, Tuple[str, str]] =\
21
  {
22
+ 'default': ('onnxruntime', '1.16.3')
 
 
 
 
 
23
  }
24
+ if platform.system().lower() == 'linux' or platform.system().lower() == 'windows':
25
+ TORCH['cuda'] = 'cu118'
26
+ TORCH['cuda-nightly'] = 'cu121'
27
+ ONNXRUNTIMES['cuda'] = ('onnxruntime-gpu', '1.16.3')
28
+ ONNXRUNTIMES['cuda-nightly'] = ('ort-nightly-gpu', '1.17.0.dev20231205004')
29
+ ONNXRUNTIMES['openvino'] = ('onnxruntime-openvino', '1.16.0')
30
+ if platform.system().lower() == 'linux':
31
+ TORCH['rocm'] = 'rocm5.6'
32
+ ONNXRUNTIMES['rocm'] = ('onnxruntime-rocm', '1.16.3')
33
+ if platform.system().lower() == 'darwin':
34
+ ONNXRUNTIMES['coreml-legacy'] = ('onnxruntime-coreml', '1.13.1')
35
+ ONNXRUNTIMES['coreml-silicon'] = ('onnxruntime-silicon', '1.16.0')
36
+ if platform.system().lower() == 'windows':
37
+ ONNXRUNTIMES['directml'] = ('onnxruntime-directml', '1.16.3')
38
 
39
 
40
  def cli() -> None:
41
  program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120))
42
+ program.add_argument('--torch', help = wording.get('install_dependency_help').format(dependency = 'torch'), choices = TORCH.keys())
43
+ program.add_argument('--onnxruntime', help = wording.get('install_dependency_help').format(dependency = 'onnxruntime'), choices = ONNXRUNTIMES.keys())
44
+ program.add_argument('--skip-venv', help = wording.get('skip_venv_help'), action = 'store_true')
45
  program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
46
  run(program)
47
 
48
 
49
  def run(program : ArgumentParser) -> None:
50
  args = program.parse_args()
51
+ python_id = 'cp' + str(sys.version_info.major) + str(sys.version_info.minor)
52
 
53
+ if not args.skip_venv:
54
+ os.environ['PIP_REQUIRE_VIRTUALENV'] = '1'
55
  if args.torch and args.onnxruntime:
56
  answers =\
57
  {
 
69
  torch_wheel = TORCH[torch]
70
  onnxruntime = answers['onnxruntime']
71
  onnxruntime_name, onnxruntime_version = ONNXRUNTIMES[onnxruntime]
72
+
73
+ subprocess.call([ 'pip', 'uninstall', 'torch', '-y', '-q' ])
74
  if torch_wheel == 'default':
75
  subprocess.call([ 'pip', 'install', '-r', 'requirements.txt' ])
76
  else:
77
  subprocess.call([ 'pip', 'install', '-r', 'requirements.txt', '--extra-index-url', 'https://download.pytorch.org/whl/' + torch_wheel ])
78
+ if onnxruntime == 'rocm':
79
+ if python_id in [ 'cp39', 'cp310', 'cp311' ]:
80
+ wheel_name = 'onnxruntime_training-' + onnxruntime_version + '+rocm56-' + python_id + '-' + python_id + '-manylinux_2_17_x86_64.manylinux2014_x86_64.whl'
81
+ wheel_path = os.path.join(tempfile.gettempdir(), wheel_name)
82
+ wheel_url = 'https://download.onnxruntime.ai/' + wheel_name
83
+ subprocess.call([ 'curl', '--silent', '--location', '--continue-at', '-', '--output', wheel_path, wheel_url ])
84
+ subprocess.call([ 'pip', 'uninstall', wheel_path, '-y', '-q' ])
85
+ subprocess.call([ 'pip', 'install', wheel_path ])
86
+ os.remove(wheel_path)
87
+ else:
88
+ subprocess.call([ 'pip', 'uninstall', 'onnxruntime', onnxruntime_name, '-y', '-q' ])
89
+ if onnxruntime == 'cuda-nightly':
90
+ subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version, '--extra-index-url', 'https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/ort-cuda-12-nightly/pypi/simple' ])
91
+ else:
92
+ subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version ])
facefusion/logger.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict
2
+ from logging import basicConfig, getLogger, Logger, DEBUG, INFO, WARNING, ERROR
3
+
4
+ from facefusion.typing import LogLevel
5
+
6
+
7
+ def init(log_level : LogLevel) -> None:
8
+ basicConfig(format = None)
9
+ get_package_logger().setLevel(get_log_levels()[log_level])
10
+
11
+
12
+ def get_package_logger() -> Logger:
13
+ return getLogger('facefusion')
14
+
15
+
16
+ def debug(message : str, scope : str) -> None:
17
+ get_package_logger().debug('[' + scope + '] ' + message)
18
+
19
+
20
+ def info(message : str, scope : str) -> None:
21
+ get_package_logger().info('[' + scope + '] ' + message)
22
+
23
+
24
+ def warn(message : str, scope : str) -> None:
25
+ get_package_logger().warning('[' + scope + '] ' + message)
26
+
27
+
28
+ def error(message : str, scope : str) -> None:
29
+ get_package_logger().error('[' + scope + '] ' + message)
30
+
31
+
32
+ def get_log_levels() -> Dict[LogLevel, int]:
33
+ return\
34
+ {
35
+ 'error': ERROR,
36
+ 'warn': WARNING,
37
+ 'info': INFO,
38
+ 'debug': DEBUG
39
+ }
facefusion/metadata.py CHANGED
@@ -2,7 +2,7 @@ METADATA =\
2
  {
3
  'name': 'FaceFusion',
4
  'description': 'Next generation face swapper and enhancer',
5
- 'version': '2.0.0',
6
  'license': 'MIT',
7
  'author': 'Henry Ruhs',
8
  'url': 'https://facefusion.io'
 
2
  {
3
  'name': 'FaceFusion',
4
  'description': 'Next generation face swapper and enhancer',
5
+ 'version': '2.1.3',
6
  'license': 'MIT',
7
  'author': 'Henry Ruhs',
8
  'url': 'https://facefusion.io'
facefusion/normalizer.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+ import os
3
+
4
+ from facefusion.filesystem import is_file, is_directory
5
+ from facefusion.typing import Padding
6
+
7
+
8
+ def normalize_output_path(source_paths : List[str], target_path : str, output_path : str) -> Optional[str]:
9
+ if is_file(target_path) and is_directory(output_path):
10
+ target_name, target_extension = os.path.splitext(os.path.basename(target_path))
11
+ if source_paths and is_file(source_paths[0]):
12
+ source_name, _ = os.path.splitext(os.path.basename(source_paths[0]))
13
+ return os.path.join(output_path, source_name + '-' + target_name + target_extension)
14
+ return os.path.join(output_path, target_name + target_extension)
15
+ if is_file(target_path) and output_path:
16
+ _, target_extension = os.path.splitext(os.path.basename(target_path))
17
+ output_name, output_extension = os.path.splitext(os.path.basename(output_path))
18
+ output_directory_path = os.path.dirname(output_path)
19
+ if is_directory(output_directory_path) and output_extension:
20
+ return os.path.join(output_directory_path, output_name + target_extension)
21
+ return None
22
+ return output_path
23
+
24
+
25
+ def normalize_padding(padding : Optional[List[int]]) -> Optional[Padding]:
26
+ if padding and len(padding) == 1:
27
+ return tuple([ padding[0], padding[0], padding[0], padding[0] ]) # type: ignore[return-value]
28
+ if padding and len(padding) == 2:
29
+ return tuple([ padding[0], padding[1], padding[0], padding[1] ]) # type: ignore[return-value]
30
+ if padding and len(padding) == 3:
31
+ return tuple([ padding[0], padding[1], padding[2], padding[1] ]) # type: ignore[return-value]
32
+ if padding and len(padding) == 4:
33
+ return tuple(padding) # type: ignore[return-value]
34
+ return None
facefusion/processors/frame/choices.py CHANGED
@@ -3,7 +3,7 @@ import numpy
3
 
4
  from facefusion.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem
5
 
6
- face_swapper_models : List[FaceSwapperModel] = [ 'blendface_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial' ]
7
  face_enhancer_models : List[FaceEnhancerModel] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'restoreformer' ]
8
  frame_enhancer_models : List[FrameEnhancerModel] = [ 'real_esrgan_x2plus', 'real_esrgan_x4plus', 'real_esrnet_x4plus' ]
9
 
 
3
 
4
  from facefusion.processors.frame.typings import FaceSwapperModel, FaceEnhancerModel, FrameEnhancerModel, FaceDebuggerItem
5
 
6
+ face_swapper_models : List[FaceSwapperModel] = [ 'blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial' ]
7
  face_enhancer_models : List[FaceEnhancerModel] = [ 'codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'restoreformer' ]
8
  frame_enhancer_models : List[FrameEnhancerModel] = [ 'real_esrgan_x2plus', 'real_esrgan_x4plus', 'real_esrnet_x4plus' ]
9
 
facefusion/processors/frame/core.py CHANGED
@@ -8,8 +8,8 @@ from tqdm import tqdm
8
 
9
  import facefusion.globals
10
  from facefusion.typing import Process_Frames
11
- from facefusion import wording
12
- from facefusion.utilities import encode_execution_providers
13
 
14
  FRAME_PROCESSORS_MODULES : List[ModuleType] = []
15
  FRAME_PROCESSORS_METHODS =\
@@ -22,6 +22,7 @@ FRAME_PROCESSORS_METHODS =\
22
  'apply_args',
23
  'pre_check',
24
  'pre_process',
 
25
  'process_frame',
26
  'process_frames',
27
  'process_image',
@@ -36,7 +37,8 @@ def load_frame_processor_module(frame_processor : str) -> Any:
36
  for method_name in FRAME_PROCESSORS_METHODS:
37
  if not hasattr(frame_processor_module, method_name):
38
  raise NotImplementedError
39
- except ModuleNotFoundError:
 
40
  sys.exit(wording.get('frame_processor_not_loaded').format(frame_processor = frame_processor))
41
  except NotImplementedError:
42
  sys.exit(wording.get('frame_processor_not_implemented').format(frame_processor = frame_processor))
@@ -61,8 +63,8 @@ def clear_frame_processors_modules() -> None:
61
  FRAME_PROCESSORS_MODULES = []
62
 
63
 
64
- def multi_process_frames(source_path : str, temp_frame_paths : List[str], process_frames : Process_Frames) -> None:
65
- with tqdm(total = len(temp_frame_paths), desc = wording.get('processing'), unit = 'frame', ascii = ' =') as progress:
66
  progress.set_postfix(
67
  {
68
  'execution_providers': encode_execution_providers(facefusion.globals.execution_providers),
@@ -75,7 +77,7 @@ def multi_process_frames(source_path : str, temp_frame_paths : List[str], proces
75
  queue_per_future = max(len(temp_frame_paths) // facefusion.globals.execution_thread_count * facefusion.globals.execution_queue_count, 1)
76
  while not queue_temp_frame_paths.empty():
77
  payload_temp_frame_paths = pick_queue(queue_temp_frame_paths, queue_per_future)
78
- future = executor.submit(process_frames, source_path, payload_temp_frame_paths, progress.update)
79
  futures.append(future)
80
  for future_done in as_completed(futures):
81
  future_done.result()
 
8
 
9
  import facefusion.globals
10
  from facefusion.typing import Process_Frames
11
+ from facefusion.execution_helper import encode_execution_providers
12
+ from facefusion import logger, wording
13
 
14
  FRAME_PROCESSORS_MODULES : List[ModuleType] = []
15
  FRAME_PROCESSORS_METHODS =\
 
22
  'apply_args',
23
  'pre_check',
24
  'pre_process',
25
+ 'get_reference_frame',
26
  'process_frame',
27
  'process_frames',
28
  'process_image',
 
37
  for method_name in FRAME_PROCESSORS_METHODS:
38
  if not hasattr(frame_processor_module, method_name):
39
  raise NotImplementedError
40
+ except ModuleNotFoundError as exception:
41
+ logger.debug(exception.msg, __name__.upper())
42
  sys.exit(wording.get('frame_processor_not_loaded').format(frame_processor = frame_processor))
43
  except NotImplementedError:
44
  sys.exit(wording.get('frame_processor_not_implemented').format(frame_processor = frame_processor))
 
63
  FRAME_PROCESSORS_MODULES = []
64
 
65
 
66
+ def multi_process_frames(source_paths : List[str], temp_frame_paths : List[str], process_frames : Process_Frames) -> None:
67
+ with tqdm(total = len(temp_frame_paths), desc = wording.get('processing'), unit = 'frame', ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress:
68
  progress.set_postfix(
69
  {
70
  'execution_providers': encode_execution_providers(facefusion.globals.execution_providers),
 
77
  queue_per_future = max(len(temp_frame_paths) // facefusion.globals.execution_thread_count * facefusion.globals.execution_queue_count, 1)
78
  while not queue_temp_frame_paths.empty():
79
  payload_temp_frame_paths = pick_queue(queue_temp_frame_paths, queue_per_future)
80
+ future = executor.submit(process_frames, source_paths, payload_temp_frame_paths, progress.update)
81
  futures.append(future)
82
  for future_done in as_completed(futures):
83
  future_done.result()
facefusion/processors/frame/modules/face_debugger.py CHANGED
@@ -6,15 +6,16 @@ import numpy
6
  import facefusion.globals
7
  import facefusion.processors.frame.core as frame_processors
8
  from facefusion import wording
9
- from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser
10
- from facefusion.face_reference import get_face_reference
11
  from facefusion.content_analyser import clear_content_analyser
12
- from facefusion.typing import Face, Frame, Update_Process, ProcessMode
13
- from facefusion.vision import read_image, read_static_image, write_image
14
- from facefusion.face_helper import warp_face, create_static_mask_frame
 
15
  from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices
16
 
17
- NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_DEBUGGER'
18
 
19
 
20
  def get_frame_processor() -> None:
@@ -34,7 +35,7 @@ def set_options(key : Literal['model'], value : Any) -> None:
34
 
35
 
36
  def register_args(program : ArgumentParser) -> None:
37
- program.add_argument('--face-debugger-items', help = wording.get('face_debugger_items_help'), dest = 'face_debugger_items', default = [ 'kps', 'face-mask' ], choices = frame_processors_choices.face_debugger_items, nargs = '+')
38
 
39
 
40
  def apply_args(program : ArgumentParser) -> None:
@@ -54,6 +55,9 @@ def post_process() -> None:
54
  clear_frame_processor()
55
  clear_face_analyser()
56
  clear_content_analyser()
 
 
 
57
 
58
 
59
  def debug_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
@@ -63,14 +67,23 @@ def debug_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Fr
63
  if 'bbox' in frame_processors_globals.face_debugger_items:
64
  cv2.rectangle(temp_frame, (bounding_box[0], bounding_box[1]), (bounding_box[2], bounding_box[3]), secondary_color, 2)
65
  if 'face-mask' in frame_processors_globals.face_debugger_items:
66
- crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, 'arcface_v2', (128, 128))
67
  inverse_matrix = cv2.invertAffineTransform(affine_matrix)
68
  temp_frame_size = temp_frame.shape[:2][::-1]
69
- mask_frame = create_static_mask_frame(crop_frame.shape[:2], 0, facefusion.globals.face_mask_padding)
70
- mask_frame[mask_frame > 0] = 255
71
- inverse_mask_frame = cv2.warpAffine(mask_frame.astype(numpy.uint8), inverse_matrix, temp_frame_size)
72
- inverse_mask_contours = cv2.findContours(inverse_mask_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
73
- cv2.drawContours(temp_frame, inverse_mask_contours, 0, primary_color, 2)
 
 
 
 
 
 
 
 
 
74
  if bounding_box[3] - bounding_box[1] > 60 and bounding_box[2] - bounding_box[0] > 60:
75
  if 'kps' in frame_processors_globals.face_debugger_items:
76
  kps = target_face.kps.astype(numpy.int32)
@@ -83,9 +96,13 @@ def debug_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Fr
83
  return temp_frame
84
 
85
 
86
- def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
 
 
 
 
87
  if 'reference' in facefusion.globals.face_selector_mode:
88
- similar_faces = find_similar_faces(temp_frame, reference_face, facefusion.globals.reference_face_distance)
89
  if similar_faces:
90
  for similar_face in similar_faces:
91
  temp_frame = debug_face(source_face, similar_face, temp_frame)
@@ -101,23 +118,25 @@ def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame)
101
  return temp_frame
102
 
103
 
104
- def process_frames(source_path : str, temp_frame_paths : List[str], update_progress : Update_Process) -> None:
105
- source_face = get_one_face(read_static_image(source_path))
106
- reference_face = get_face_reference() if 'reference' in facefusion.globals.face_selector_mode else None
 
107
  for temp_frame_path in temp_frame_paths:
108
  temp_frame = read_image(temp_frame_path)
109
- result_frame = process_frame(source_face, reference_face, temp_frame)
110
  write_image(temp_frame_path, result_frame)
111
  update_progress()
112
 
113
 
114
- def process_image(source_path : str, target_path : str, output_path : str) -> None:
115
- source_face = get_one_face(read_static_image(source_path))
 
116
  target_frame = read_static_image(target_path)
117
- reference_face = get_one_face(target_frame, facefusion.globals.reference_face_position) if 'reference' in facefusion.globals.face_selector_mode else None
118
- result_frame = process_frame(source_face, reference_face, target_frame)
119
  write_image(output_path, result_frame)
120
 
121
 
122
- def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
123
- frame_processors.multi_process_frames(source_path, temp_frame_paths, process_frames)
 
6
  import facefusion.globals
7
  import facefusion.processors.frame.core as frame_processors
8
  from facefusion import wording
9
+ from facefusion.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser
10
+ from facefusion.face_store import get_reference_faces
11
  from facefusion.content_analyser import clear_content_analyser
12
+ from facefusion.typing import Face, FaceSet, Frame, Update_Process, ProcessMode
13
+ from facefusion.vision import read_image, read_static_image, read_static_images, write_image
14
+ from facefusion.face_helper import warp_face
15
+ from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser
16
  from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices
17
 
18
+ NAME = __name__.upper()
19
 
20
 
21
  def get_frame_processor() -> None:
 
35
 
36
 
37
  def register_args(program : ArgumentParser) -> None:
38
+ program.add_argument('--face-debugger-items', help = wording.get('face_debugger_items_help').format(choices = ', '.join(frame_processors_choices.face_debugger_items)), default = [ 'kps', 'face-mask' ], choices = frame_processors_choices.face_debugger_items, nargs = '+', metavar = 'FACE_DEBUGGER_ITEMS')
39
 
40
 
41
  def apply_args(program : ArgumentParser) -> None:
 
55
  clear_frame_processor()
56
  clear_face_analyser()
57
  clear_content_analyser()
58
+ clear_face_occluder()
59
+ clear_face_parser()
60
+ read_static_image.cache_clear()
61
 
62
 
63
  def debug_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
 
67
  if 'bbox' in frame_processors_globals.face_debugger_items:
68
  cv2.rectangle(temp_frame, (bounding_box[0], bounding_box[1]), (bounding_box[2], bounding_box[3]), secondary_color, 2)
69
  if 'face-mask' in frame_processors_globals.face_debugger_items:
70
+ crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, 'arcface_128_v2', (128, 512))
71
  inverse_matrix = cv2.invertAffineTransform(affine_matrix)
72
  temp_frame_size = temp_frame.shape[:2][::-1]
73
+ crop_mask_list = []
74
+ if 'box' in facefusion.globals.face_mask_types:
75
+ crop_mask_list.append(create_static_box_mask(crop_frame.shape[:2][::-1], 0, facefusion.globals.face_mask_padding))
76
+ if 'occlusion' in facefusion.globals.face_mask_types:
77
+ crop_mask_list.append(create_occlusion_mask(crop_frame))
78
+ if 'region' in facefusion.globals.face_mask_types:
79
+ crop_mask_list.append(create_region_mask(crop_frame, facefusion.globals.face_mask_regions))
80
+ crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1)
81
+ crop_mask = (crop_mask * 255).astype(numpy.uint8)
82
+ inverse_mask_frame = cv2.warpAffine(crop_mask, inverse_matrix, temp_frame_size)
83
+ inverse_mask_frame_edges = cv2.threshold(inverse_mask_frame, 100, 255, cv2.THRESH_BINARY)[1]
84
+ inverse_mask_frame_edges[inverse_mask_frame_edges > 0] = 255
85
+ inverse_mask_contours = cv2.findContours(inverse_mask_frame_edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[0]
86
+ cv2.drawContours(temp_frame, inverse_mask_contours, -1, primary_color, 2)
87
  if bounding_box[3] - bounding_box[1] > 60 and bounding_box[2] - bounding_box[0] > 60:
88
  if 'kps' in frame_processors_globals.face_debugger_items:
89
  kps = target_face.kps.astype(numpy.int32)
 
96
  return temp_frame
97
 
98
 
99
+ def get_reference_frame(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
100
+ pass
101
+
102
+
103
+ def process_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame:
104
  if 'reference' in facefusion.globals.face_selector_mode:
105
+ similar_faces = find_similar_faces(temp_frame, reference_faces, facefusion.globals.reference_face_distance)
106
  if similar_faces:
107
  for similar_face in similar_faces:
108
  temp_frame = debug_face(source_face, similar_face, temp_frame)
 
118
  return temp_frame
119
 
120
 
121
+ def process_frames(source_paths : List[str], temp_frame_paths : List[str], update_progress : Update_Process) -> None:
122
+ source_frames = read_static_images(source_paths)
123
+ source_face = get_average_face(source_frames)
124
+ reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
125
  for temp_frame_path in temp_frame_paths:
126
  temp_frame = read_image(temp_frame_path)
127
+ result_frame = process_frame(source_face, reference_faces, temp_frame)
128
  write_image(temp_frame_path, result_frame)
129
  update_progress()
130
 
131
 
132
+ def process_image(source_paths : List[str], target_path : str, output_path : str) -> None:
133
+ source_frames = read_static_images(source_paths)
134
+ source_face = get_average_face(source_frames)
135
  target_frame = read_static_image(target_path)
136
+ reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
137
+ result_frame = process_frame(source_face, reference_faces, target_frame)
138
  write_image(output_path, result_frame)
139
 
140
 
141
+ def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
142
+ frame_processors.multi_process_frames(source_paths, temp_frame_paths, process_frames)
facefusion/processors/frame/modules/face_enhancer.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import Any, List, Dict, Literal, Optional
2
  from argparse import ArgumentParser
3
  import cv2
4
  import threading
@@ -7,69 +7,73 @@ import onnxruntime
7
 
8
  import facefusion.globals
9
  import facefusion.processors.frame.core as frame_processors
10
- from facefusion import wording
11
- from facefusion.face_analyser import get_many_faces, clear_face_analyser
12
  from facefusion.face_helper import warp_face, paste_back
13
  from facefusion.content_analyser import clear_content_analyser
14
- from facefusion.typing import Face, Frame, Update_Process, ProcessMode, ModelValue, OptionsWithModel
15
- from facefusion.utilities import conditional_download, resolve_relative_path, is_image, is_video, is_file, is_download_done, create_metavar, update_status
 
 
 
16
  from facefusion.vision import read_image, read_static_image, write_image
17
  from facefusion.processors.frame import globals as frame_processors_globals
18
  from facefusion.processors.frame import choices as frame_processors_choices
 
19
 
20
  FRAME_PROCESSOR = None
21
  THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
22
  THREAD_LOCK : threading.Lock = threading.Lock()
23
- NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_ENHANCER'
24
- MODELS : Dict[str, ModelValue] =\
25
  {
26
  'codeformer':
27
  {
28
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/codeformer.onnx',
29
  'path': resolve_relative_path('../.assets/models/codeformer.onnx'),
30
- 'template': 'ffhq',
31
  'size': (512, 512)
32
  },
33
  'gfpgan_1.2':
34
  {
35
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.2.onnx',
36
  'path': resolve_relative_path('../.assets/models/gfpgan_1.2.onnx'),
37
- 'template': 'ffhq',
38
  'size': (512, 512)
39
  },
40
  'gfpgan_1.3':
41
  {
42
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.3.onnx',
43
  'path': resolve_relative_path('../.assets/models/gfpgan_1.3.onnx'),
44
- 'template': 'ffhq',
45
  'size': (512, 512)
46
  },
47
  'gfpgan_1.4':
48
  {
49
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.4.onnx',
50
  'path': resolve_relative_path('../.assets/models/gfpgan_1.4.onnx'),
51
- 'template': 'ffhq',
52
  'size': (512, 512)
53
  },
54
  'gpen_bfr_256':
55
  {
56
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_256.onnx',
57
  'path': resolve_relative_path('../.assets/models/gpen_bfr_256.onnx'),
58
- 'template': 'arcface_v2',
59
  'size': (128, 256)
60
  },
61
  'gpen_bfr_512':
62
  {
63
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_512.onnx',
64
  'path': resolve_relative_path('../.assets/models/gpen_bfr_512.onnx'),
65
- 'template': 'ffhq',
66
  'size': (512, 512)
67
  },
68
  'restoreformer':
69
  {
70
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/restoreformer.onnx',
71
  'path': resolve_relative_path('../.assets/models/restoreformer.onnx'),
72
- 'template': 'ffhq',
73
  'size': (512, 512)
74
  }
75
  }
@@ -110,8 +114,8 @@ def set_options(key : Literal['model'], value : Any) -> None:
110
 
111
 
112
  def register_args(program : ArgumentParser) -> None:
113
- program.add_argument('--face-enhancer-model', help = wording.get('frame_processor_model_help'), dest = 'face_enhancer_model', default = 'gfpgan_1.4', choices = frame_processors_choices.face_enhancer_models)
114
- program.add_argument('--face-enhancer-blend', help = wording.get('frame_processor_blend_help'), dest = 'face_enhancer_blend', type = int, default = 80, choices = frame_processors_choices.face_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.face_enhancer_blend_range))
115
 
116
 
117
  def apply_args(program : ArgumentParser) -> None:
@@ -132,16 +136,16 @@ def pre_process(mode : ProcessMode) -> bool:
132
  model_url = get_options('model').get('url')
133
  model_path = get_options('model').get('path')
134
  if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
135
- update_status(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
136
  return False
137
  elif not is_file(model_path):
138
- update_status(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
139
  return False
140
  if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
141
- update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
142
  return False
143
  if mode == 'output' and not facefusion.globals.output_path:
144
- update_status(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
145
  return False
146
  return True
147
 
@@ -150,6 +154,7 @@ def post_process() -> None:
150
  clear_frame_processor()
151
  clear_face_analyser()
152
  clear_content_analyser()
 
153
  read_static_image.cache_clear()
154
 
155
 
@@ -158,6 +163,12 @@ def enhance_face(target_face: Face, temp_frame: Frame) -> Frame:
158
  model_template = get_options('model').get('template')
159
  model_size = get_options('model').get('size')
160
  crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, model_template, model_size)
 
 
 
 
 
 
161
  crop_frame = prepare_crop_frame(crop_frame)
162
  frame_processor_inputs = {}
163
  for frame_processor_input in frame_processor.get_inputs():
@@ -168,7 +179,8 @@ def enhance_face(target_face: Face, temp_frame: Frame) -> Frame:
168
  with THREAD_SEMAPHORE:
169
  crop_frame = frame_processor.run(None, frame_processor_inputs)[0][0]
170
  crop_frame = normalize_crop_frame(crop_frame)
171
- paste_frame = paste_back(temp_frame, crop_frame, affine_matrix, facefusion.globals.face_mask_blur, (0, 0, 0, 0))
 
172
  temp_frame = blend_frame(temp_frame, paste_frame)
173
  return temp_frame
174
 
@@ -195,27 +207,43 @@ def blend_frame(temp_frame : Frame, paste_frame : Frame) -> Frame:
195
  return temp_frame
196
 
197
 
198
- def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
199
- many_faces = get_many_faces(temp_frame)
200
- if many_faces:
201
- for target_face in many_faces:
 
 
 
 
 
 
 
 
 
202
  temp_frame = enhance_face(target_face, temp_frame)
 
 
 
 
 
203
  return temp_frame
204
 
205
 
206
- def process_frames(source_path : str, temp_frame_paths : List[str], update_progress : Update_Process) -> None:
 
207
  for temp_frame_path in temp_frame_paths:
208
  temp_frame = read_image(temp_frame_path)
209
- result_frame = process_frame(None, None, temp_frame)
210
  write_image(temp_frame_path, result_frame)
211
  update_progress()
212
 
213
 
214
  def process_image(source_path : str, target_path : str, output_path : str) -> None:
 
215
  target_frame = read_static_image(target_path)
216
- result_frame = process_frame(None, None, target_frame)
217
  write_image(output_path, result_frame)
218
 
219
 
220
- def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
221
  frame_processors.multi_process_frames(None, temp_frame_paths, process_frames)
 
1
+ from typing import Any, List, Literal, Optional
2
  from argparse import ArgumentParser
3
  import cv2
4
  import threading
 
7
 
8
  import facefusion.globals
9
  import facefusion.processors.frame.core as frame_processors
10
+ from facefusion import logger, wording
11
+ from facefusion.face_analyser import get_many_faces, clear_face_analyser, find_similar_faces, get_one_face
12
  from facefusion.face_helper import warp_face, paste_back
13
  from facefusion.content_analyser import clear_content_analyser
14
+ from facefusion.face_store import get_reference_faces
15
+ from facefusion.typing import Face, FaceSet, Frame, Update_Process, ProcessMode, ModelSet, OptionsWithModel
16
+ from facefusion.common_helper import create_metavar
17
+ from facefusion.filesystem import is_file, is_image, is_video, resolve_relative_path
18
+ from facefusion.download import conditional_download, is_download_done
19
  from facefusion.vision import read_image, read_static_image, write_image
20
  from facefusion.processors.frame import globals as frame_processors_globals
21
  from facefusion.processors.frame import choices as frame_processors_choices
22
+ from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, clear_face_occluder
23
 
24
  FRAME_PROCESSOR = None
25
  THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
26
  THREAD_LOCK : threading.Lock = threading.Lock()
27
+ NAME = __name__.upper()
28
+ MODELS : ModelSet =\
29
  {
30
  'codeformer':
31
  {
32
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/codeformer.onnx',
33
  'path': resolve_relative_path('../.assets/models/codeformer.onnx'),
34
+ 'template': 'ffhq_512',
35
  'size': (512, 512)
36
  },
37
  'gfpgan_1.2':
38
  {
39
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.2.onnx',
40
  'path': resolve_relative_path('../.assets/models/gfpgan_1.2.onnx'),
41
+ 'template': 'ffhq_512',
42
  'size': (512, 512)
43
  },
44
  'gfpgan_1.3':
45
  {
46
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.3.onnx',
47
  'path': resolve_relative_path('../.assets/models/gfpgan_1.3.onnx'),
48
+ 'template': 'ffhq_512',
49
  'size': (512, 512)
50
  },
51
  'gfpgan_1.4':
52
  {
53
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.4.onnx',
54
  'path': resolve_relative_path('../.assets/models/gfpgan_1.4.onnx'),
55
+ 'template': 'ffhq_512',
56
  'size': (512, 512)
57
  },
58
  'gpen_bfr_256':
59
  {
60
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_256.onnx',
61
  'path': resolve_relative_path('../.assets/models/gpen_bfr_256.onnx'),
62
+ 'template': 'arcface_128_v2',
63
  'size': (128, 256)
64
  },
65
  'gpen_bfr_512':
66
  {
67
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_512.onnx',
68
  'path': resolve_relative_path('../.assets/models/gpen_bfr_512.onnx'),
69
+ 'template': 'ffhq_512',
70
  'size': (512, 512)
71
  },
72
  'restoreformer':
73
  {
74
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/restoreformer.onnx',
75
  'path': resolve_relative_path('../.assets/models/restoreformer.onnx'),
76
+ 'template': 'ffhq_512',
77
  'size': (512, 512)
78
  }
79
  }
 
114
 
115
 
116
  def register_args(program : ArgumentParser) -> None:
117
+ program.add_argument('--face-enhancer-model', help = wording.get('frame_processor_model_help'), default = 'gfpgan_1.4', choices = frame_processors_choices.face_enhancer_models)
118
+ program.add_argument('--face-enhancer-blend', help = wording.get('frame_processor_blend_help'), type = int, default = 80, choices = frame_processors_choices.face_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.face_enhancer_blend_range))
119
 
120
 
121
  def apply_args(program : ArgumentParser) -> None:
 
136
  model_url = get_options('model').get('url')
137
  model_path = get_options('model').get('path')
138
  if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
139
+ logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
140
  return False
141
  elif not is_file(model_path):
142
+ logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
143
  return False
144
  if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
145
+ logger.error(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
146
  return False
147
  if mode == 'output' and not facefusion.globals.output_path:
148
+ logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
149
  return False
150
  return True
151
 
 
154
  clear_frame_processor()
155
  clear_face_analyser()
156
  clear_content_analyser()
157
+ clear_face_occluder()
158
  read_static_image.cache_clear()
159
 
160
 
 
163
  model_template = get_options('model').get('template')
164
  model_size = get_options('model').get('size')
165
  crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, model_template, model_size)
166
+ crop_mask_list =\
167
+ [
168
+ create_static_box_mask(crop_frame.shape[:2][::-1], facefusion.globals.face_mask_blur, (0, 0, 0, 0))
169
+ ]
170
+ if 'occlusion' in facefusion.globals.face_mask_types:
171
+ crop_mask_list.append(create_occlusion_mask(crop_frame))
172
  crop_frame = prepare_crop_frame(crop_frame)
173
  frame_processor_inputs = {}
174
  for frame_processor_input in frame_processor.get_inputs():
 
179
  with THREAD_SEMAPHORE:
180
  crop_frame = frame_processor.run(None, frame_processor_inputs)[0][0]
181
  crop_frame = normalize_crop_frame(crop_frame)
182
+ crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1)
183
+ paste_frame = paste_back(temp_frame, crop_frame, crop_mask, affine_matrix)
184
  temp_frame = blend_frame(temp_frame, paste_frame)
185
  return temp_frame
186
 
 
207
  return temp_frame
208
 
209
 
210
+ def get_reference_frame(source_face : Face, target_face : Face, temp_frame : Frame) -> Optional[Frame]:
211
+ return enhance_face(target_face, temp_frame)
212
+
213
+
214
+ def process_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame:
215
+ if 'reference' in facefusion.globals.face_selector_mode:
216
+ similar_faces = find_similar_faces(temp_frame, reference_faces, facefusion.globals.reference_face_distance)
217
+ if similar_faces:
218
+ for similar_face in similar_faces:
219
+ temp_frame = enhance_face(similar_face, temp_frame)
220
+ if 'one' in facefusion.globals.face_selector_mode:
221
+ target_face = get_one_face(temp_frame)
222
+ if target_face:
223
  temp_frame = enhance_face(target_face, temp_frame)
224
+ if 'many' in facefusion.globals.face_selector_mode:
225
+ many_faces = get_many_faces(temp_frame)
226
+ if many_faces:
227
+ for target_face in many_faces:
228
+ temp_frame = enhance_face(target_face, temp_frame)
229
  return temp_frame
230
 
231
 
232
+ def process_frames(source_path : List[str], temp_frame_paths : List[str], update_progress : Update_Process) -> None:
233
+ reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
234
  for temp_frame_path in temp_frame_paths:
235
  temp_frame = read_image(temp_frame_path)
236
+ result_frame = process_frame(None, reference_faces, temp_frame)
237
  write_image(temp_frame_path, result_frame)
238
  update_progress()
239
 
240
 
241
  def process_image(source_path : str, target_path : str, output_path : str) -> None:
242
+ reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
243
  target_frame = read_static_image(target_path)
244
+ result_frame = process_frame(None, reference_faces, target_frame)
245
  write_image(output_path, result_frame)
246
 
247
 
248
+ def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
249
  frame_processors.multi_process_frames(None, temp_frame_paths, process_frames)
facefusion/processors/frame/modules/face_swapper.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import Any, List, Dict, Literal, Optional
2
  from argparse import ArgumentParser
3
  import threading
4
  import numpy
@@ -8,29 +8,31 @@ from onnx import numpy_helper
8
 
9
  import facefusion.globals
10
  import facefusion.processors.frame.core as frame_processors
11
- from facefusion import wording
12
- from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces, clear_face_analyser
13
  from facefusion.face_helper import warp_face, paste_back
14
- from facefusion.face_reference import get_face_reference
15
  from facefusion.content_analyser import clear_content_analyser
16
- from facefusion.typing import Face, Frame, Update_Process, ProcessMode, ModelValue, OptionsWithModel, Embedding
17
- from facefusion.utilities import conditional_download, resolve_relative_path, is_image, is_video, is_file, is_download_done, update_status
18
- from facefusion.vision import read_image, read_static_image, write_image
 
19
  from facefusion.processors.frame import globals as frame_processors_globals
20
  from facefusion.processors.frame import choices as frame_processors_choices
 
21
 
22
  FRAME_PROCESSOR = None
23
  MODEL_MATRIX = None
24
  THREAD_LOCK : threading.Lock = threading.Lock()
25
- NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_SWAPPER'
26
- MODELS : Dict[str, ModelValue] =\
27
  {
28
- 'blendface_256':
29
  {
30
- 'type': 'blendface',
31
- 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/blendface_256.onnx',
32
- 'path': resolve_relative_path('../.assets/models/blendface_256.onnx'),
33
- 'template': 'ffhq',
34
  'size': (512, 256),
35
  'mean': [ 0.0, 0.0, 0.0 ],
36
  'standard_deviation': [ 1.0, 1.0, 1.0 ]
@@ -40,7 +42,7 @@ MODELS : Dict[str, ModelValue] =\
40
  'type': 'inswapper',
41
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128.onnx',
42
  'path': resolve_relative_path('../.assets/models/inswapper_128.onnx'),
43
- 'template': 'arcface_v2',
44
  'size': (128, 128),
45
  'mean': [ 0.0, 0.0, 0.0 ],
46
  'standard_deviation': [ 1.0, 1.0, 1.0 ]
@@ -50,7 +52,7 @@ MODELS : Dict[str, ModelValue] =\
50
  'type': 'inswapper',
51
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128_fp16.onnx',
52
  'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.onnx'),
53
- 'template': 'arcface_v2',
54
  'size': (128, 128),
55
  'mean': [ 0.0, 0.0, 0.0 ],
56
  'standard_deviation': [ 1.0, 1.0, 1.0 ]
@@ -60,7 +62,7 @@ MODELS : Dict[str, ModelValue] =\
60
  'type': 'simswap',
61
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/simswap_256.onnx',
62
  'path': resolve_relative_path('../.assets/models/simswap_256.onnx'),
63
- 'template': 'arcface_v1',
64
  'size': (112, 256),
65
  'mean': [ 0.485, 0.456, 0.406 ],
66
  'standard_deviation': [ 0.229, 0.224, 0.225 ]
@@ -70,7 +72,7 @@ MODELS : Dict[str, ModelValue] =\
70
  'type': 'simswap',
71
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/simswap_512_unofficial.onnx',
72
  'path': resolve_relative_path('../.assets/models/simswap_512_unofficial.onnx'),
73
- 'template': 'arcface_v1',
74
  'size': (112, 512),
75
  'mean': [ 0.0, 0.0, 0.0 ],
76
  'standard_deviation': [ 1.0, 1.0, 1.0 ]
@@ -130,14 +132,14 @@ def set_options(key : Literal['model'], value : Any) -> None:
130
 
131
 
132
  def register_args(program : ArgumentParser) -> None:
133
- program.add_argument('--face-swapper-model', help = wording.get('frame_processor_model_help'), dest = 'face_swapper_model', default = 'inswapper_128', choices = frame_processors_choices.face_swapper_models)
134
 
135
 
136
  def apply_args(program : ArgumentParser) -> None:
137
  args = program.parse_args()
138
  frame_processors_globals.face_swapper_model = args.face_swapper_model
139
- if args.face_swapper_model == 'blendface_256':
140
- facefusion.globals.face_recognizer_model = 'arcface_blendface'
141
  if args.face_swapper_model == 'inswapper_128' or args.face_swapper_model == 'inswapper_128_fp16':
142
  facefusion.globals.face_recognizer_model = 'arcface_inswapper'
143
  if args.face_swapper_model == 'simswap_256' or args.face_swapper_model == 'simswap_512_unofficial':
@@ -156,22 +158,23 @@ def pre_process(mode : ProcessMode) -> bool:
156
  model_url = get_options('model').get('url')
157
  model_path = get_options('model').get('path')
158
  if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
159
- update_status(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
160
  return False
161
  elif not is_file(model_path):
162
- update_status(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
163
  return False
164
- if not is_image(facefusion.globals.source_path):
165
- update_status(wording.get('select_image_source') + wording.get('exclamation_mark'), NAME)
166
- return False
167
- elif not get_one_face(read_static_image(facefusion.globals.source_path)):
168
- update_status(wording.get('no_source_face_detected') + wording.get('exclamation_mark'), NAME)
169
  return False
 
 
 
 
170
  if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
171
- update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
172
  return False
173
  if mode == 'output' and not facefusion.globals.output_path:
174
- update_status(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
175
  return False
176
  return True
177
 
@@ -181,6 +184,8 @@ def post_process() -> None:
181
  clear_model_matrix()
182
  clear_face_analyser()
183
  clear_content_analyser()
 
 
184
  read_static_image.cache_clear()
185
 
186
 
@@ -190,11 +195,16 @@ def swap_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Fra
190
  model_size = get_options('model').get('size')
191
  model_type = get_options('model').get('type')
192
  crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, model_template, model_size)
 
 
 
 
 
193
  crop_frame = prepare_crop_frame(crop_frame)
194
  frame_processor_inputs = {}
195
  for frame_processor_input in frame_processor.get_inputs():
196
  if frame_processor_input.name == 'source':
197
- if model_type == 'blendface':
198
  frame_processor_inputs[frame_processor_input.name] = prepare_source_frame(source_face)
199
  else:
200
  frame_processor_inputs[frame_processor_input.name] = prepare_source_embedding(source_face)
@@ -202,13 +212,16 @@ def swap_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Fra
202
  frame_processor_inputs[frame_processor_input.name] = crop_frame
203
  crop_frame = frame_processor.run(None, frame_processor_inputs)[0][0]
204
  crop_frame = normalize_crop_frame(crop_frame)
205
- temp_frame = paste_back(temp_frame, crop_frame, affine_matrix, facefusion.globals.face_mask_blur, facefusion.globals.face_mask_padding)
 
 
 
206
  return temp_frame
207
 
208
 
209
- def prepare_source_frame(source_face : Face) -> numpy.ndarray[Any, Any]:
210
- source_frame = read_static_image(facefusion.globals.source_path)
211
- source_frame, _ = warp_face(source_frame, source_face.kps, 'arcface_v2', (112, 112))
212
  source_frame = source_frame[:, :, ::-1] / 255.0
213
  source_frame = source_frame.transpose(2, 0, 1)
214
  source_frame = numpy.expand_dims(source_frame, axis = 0).astype(numpy.float32)
@@ -243,9 +256,13 @@ def normalize_crop_frame(crop_frame : Frame) -> Frame:
243
  return crop_frame
244
 
245
 
246
- def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
 
 
 
 
247
  if 'reference' in facefusion.globals.face_selector_mode:
248
- similar_faces = find_similar_faces(temp_frame, reference_face, facefusion.globals.reference_face_distance)
249
  if similar_faces:
250
  for similar_face in similar_faces:
251
  temp_frame = swap_face(source_face, similar_face, temp_frame)
@@ -261,23 +278,25 @@ def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame)
261
  return temp_frame
262
 
263
 
264
- def process_frames(source_path : str, temp_frame_paths : List[str], update_progress : Update_Process) -> None:
265
- source_face = get_one_face(read_static_image(source_path))
266
- reference_face = get_face_reference() if 'reference' in facefusion.globals.face_selector_mode else None
 
267
  for temp_frame_path in temp_frame_paths:
268
  temp_frame = read_image(temp_frame_path)
269
- result_frame = process_frame(source_face, reference_face, temp_frame)
270
  write_image(temp_frame_path, result_frame)
271
  update_progress()
272
 
273
 
274
- def process_image(source_path : str, target_path : str, output_path : str) -> None:
275
- source_face = get_one_face(read_static_image(source_path))
 
 
276
  target_frame = read_static_image(target_path)
277
- reference_face = get_one_face(target_frame, facefusion.globals.reference_face_position) if 'reference' in facefusion.globals.face_selector_mode else None
278
- result_frame = process_frame(source_face, reference_face, target_frame)
279
  write_image(output_path, result_frame)
280
 
281
 
282
- def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
283
- frame_processors.multi_process_frames(source_path, temp_frame_paths, process_frames)
 
1
+ from typing import Any, List, Literal, Optional
2
  from argparse import ArgumentParser
3
  import threading
4
  import numpy
 
8
 
9
  import facefusion.globals
10
  import facefusion.processors.frame.core as frame_processors
11
+ from facefusion import logger, wording
12
+ from facefusion.face_analyser import get_one_face, get_average_face, get_many_faces, find_similar_faces, clear_face_analyser
13
  from facefusion.face_helper import warp_face, paste_back
14
+ from facefusion.face_store import get_reference_faces
15
  from facefusion.content_analyser import clear_content_analyser
16
+ from facefusion.typing import Face, FaceSet, Frame, Update_Process, ProcessMode, ModelSet, OptionsWithModel, Embedding
17
+ from facefusion.filesystem import is_file, is_image, are_images, is_video, resolve_relative_path
18
+ from facefusion.download import conditional_download, is_download_done
19
+ from facefusion.vision import read_image, read_static_image, read_static_images, write_image
20
  from facefusion.processors.frame import globals as frame_processors_globals
21
  from facefusion.processors.frame import choices as frame_processors_choices
22
+ from facefusion.face_masker import create_static_box_mask, create_occlusion_mask, create_region_mask, clear_face_occluder, clear_face_parser
23
 
24
  FRAME_PROCESSOR = None
25
  MODEL_MATRIX = None
26
  THREAD_LOCK : threading.Lock = threading.Lock()
27
+ NAME = __name__.upper()
28
+ MODELS : ModelSet =\
29
  {
30
+ 'blendswap_256':
31
  {
32
+ 'type': 'blendswap',
33
+ 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/blendswap_256.onnx',
34
+ 'path': resolve_relative_path('../.assets/models/blendswap_256.onnx'),
35
+ 'template': 'ffhq_512',
36
  'size': (512, 256),
37
  'mean': [ 0.0, 0.0, 0.0 ],
38
  'standard_deviation': [ 1.0, 1.0, 1.0 ]
 
42
  'type': 'inswapper',
43
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128.onnx',
44
  'path': resolve_relative_path('../.assets/models/inswapper_128.onnx'),
45
+ 'template': 'arcface_128_v2',
46
  'size': (128, 128),
47
  'mean': [ 0.0, 0.0, 0.0 ],
48
  'standard_deviation': [ 1.0, 1.0, 1.0 ]
 
52
  'type': 'inswapper',
53
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128_fp16.onnx',
54
  'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.onnx'),
55
+ 'template': 'arcface_128_v2',
56
  'size': (128, 128),
57
  'mean': [ 0.0, 0.0, 0.0 ],
58
  'standard_deviation': [ 1.0, 1.0, 1.0 ]
 
62
  'type': 'simswap',
63
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/simswap_256.onnx',
64
  'path': resolve_relative_path('../.assets/models/simswap_256.onnx'),
65
+ 'template': 'arcface_112_v1',
66
  'size': (112, 256),
67
  'mean': [ 0.485, 0.456, 0.406 ],
68
  'standard_deviation': [ 0.229, 0.224, 0.225 ]
 
72
  'type': 'simswap',
73
  'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/simswap_512_unofficial.onnx',
74
  'path': resolve_relative_path('../.assets/models/simswap_512_unofficial.onnx'),
75
+ 'template': 'arcface_112_v1',
76
  'size': (112, 512),
77
  'mean': [ 0.0, 0.0, 0.0 ],
78
  'standard_deviation': [ 1.0, 1.0, 1.0 ]
 
132
 
133
 
134
  def register_args(program : ArgumentParser) -> None:
135
+ program.add_argument('--face-swapper-model', help = wording.get('frame_processor_model_help'), default = 'inswapper_128', choices = frame_processors_choices.face_swapper_models)
136
 
137
 
138
  def apply_args(program : ArgumentParser) -> None:
139
  args = program.parse_args()
140
  frame_processors_globals.face_swapper_model = args.face_swapper_model
141
+ if args.face_swapper_model == 'blendswap_256':
142
+ facefusion.globals.face_recognizer_model = 'arcface_blendswap'
143
  if args.face_swapper_model == 'inswapper_128' or args.face_swapper_model == 'inswapper_128_fp16':
144
  facefusion.globals.face_recognizer_model = 'arcface_inswapper'
145
  if args.face_swapper_model == 'simswap_256' or args.face_swapper_model == 'simswap_512_unofficial':
 
158
  model_url = get_options('model').get('url')
159
  model_path = get_options('model').get('path')
160
  if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
161
+ logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
162
  return False
163
  elif not is_file(model_path):
164
+ logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
165
  return False
166
+ if not are_images(facefusion.globals.source_paths):
167
+ logger.error(wording.get('select_image_source') + wording.get('exclamation_mark'), NAME)
 
 
 
168
  return False
169
+ for source_frame in read_static_images(facefusion.globals.source_paths):
170
+ if not get_one_face(source_frame):
171
+ logger.error(wording.get('no_source_face_detected') + wording.get('exclamation_mark'), NAME)
172
+ return False
173
  if mode in [ 'output', 'preview' ] and not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
174
+ logger.error(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
175
  return False
176
  if mode == 'output' and not facefusion.globals.output_path:
177
+ logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
178
  return False
179
  return True
180
 
 
184
  clear_model_matrix()
185
  clear_face_analyser()
186
  clear_content_analyser()
187
+ clear_face_occluder()
188
+ clear_face_parser()
189
  read_static_image.cache_clear()
190
 
191
 
 
195
  model_size = get_options('model').get('size')
196
  model_type = get_options('model').get('type')
197
  crop_frame, affine_matrix = warp_face(temp_frame, target_face.kps, model_template, model_size)
198
+ crop_mask_list = []
199
+ if 'box' in facefusion.globals.face_mask_types:
200
+ crop_mask_list.append(create_static_box_mask(crop_frame.shape[:2][::-1], facefusion.globals.face_mask_blur, facefusion.globals.face_mask_padding))
201
+ if 'occlusion' in facefusion.globals.face_mask_types:
202
+ crop_mask_list.append(create_occlusion_mask(crop_frame))
203
  crop_frame = prepare_crop_frame(crop_frame)
204
  frame_processor_inputs = {}
205
  for frame_processor_input in frame_processor.get_inputs():
206
  if frame_processor_input.name == 'source':
207
+ if model_type == 'blendswap':
208
  frame_processor_inputs[frame_processor_input.name] = prepare_source_frame(source_face)
209
  else:
210
  frame_processor_inputs[frame_processor_input.name] = prepare_source_embedding(source_face)
 
212
  frame_processor_inputs[frame_processor_input.name] = crop_frame
213
  crop_frame = frame_processor.run(None, frame_processor_inputs)[0][0]
214
  crop_frame = normalize_crop_frame(crop_frame)
215
+ if 'region' in facefusion.globals.face_mask_types:
216
+ crop_mask_list.append(create_region_mask(crop_frame, facefusion.globals.face_mask_regions))
217
+ crop_mask = numpy.minimum.reduce(crop_mask_list).clip(0, 1)
218
+ temp_frame = paste_back(temp_frame, crop_frame, crop_mask, affine_matrix)
219
  return temp_frame
220
 
221
 
222
+ def prepare_source_frame(source_face : Face) -> Frame:
223
+ source_frame = read_static_image(facefusion.globals.source_paths[0])
224
+ source_frame, _ = warp_face(source_frame, source_face.kps, 'arcface_112_v2', (112, 112))
225
  source_frame = source_frame[:, :, ::-1] / 255.0
226
  source_frame = source_frame.transpose(2, 0, 1)
227
  source_frame = numpy.expand_dims(source_frame, axis = 0).astype(numpy.float32)
 
256
  return crop_frame
257
 
258
 
259
+ def get_reference_frame(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
260
+ return swap_face(source_face, target_face, temp_frame)
261
+
262
+
263
+ def process_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame:
264
  if 'reference' in facefusion.globals.face_selector_mode:
265
+ similar_faces = find_similar_faces(temp_frame, reference_faces, facefusion.globals.reference_face_distance)
266
  if similar_faces:
267
  for similar_face in similar_faces:
268
  temp_frame = swap_face(source_face, similar_face, temp_frame)
 
278
  return temp_frame
279
 
280
 
281
+ def process_frames(source_paths : List[str], temp_frame_paths : List[str], update_progress : Update_Process) -> None:
282
+ source_frames = read_static_images(source_paths)
283
+ source_face = get_average_face(source_frames)
284
+ reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
285
  for temp_frame_path in temp_frame_paths:
286
  temp_frame = read_image(temp_frame_path)
287
+ result_frame = process_frame(source_face, reference_faces, temp_frame)
288
  write_image(temp_frame_path, result_frame)
289
  update_progress()
290
 
291
 
292
+ def process_image(source_paths : List[str], target_path : str, output_path : str) -> None:
293
+ source_frames = read_static_images(source_paths)
294
+ source_face = get_average_face(source_frames)
295
+ reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
296
  target_frame = read_static_image(target_path)
297
+ result_frame = process_frame(source_face, reference_faces, target_frame)
 
298
  write_image(output_path, result_frame)
299
 
300
 
301
+ def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
302
+ frame_processors.multi_process_frames(source_paths, temp_frame_paths, process_frames)
facefusion/processors/frame/modules/frame_enhancer.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import Any, List, Dict, Literal, Optional
2
  from argparse import ArgumentParser
3
  import threading
4
  import cv2
@@ -7,11 +7,14 @@ from realesrgan import RealESRGANer
7
 
8
  import facefusion.globals
9
  import facefusion.processors.frame.core as frame_processors
10
- from facefusion import wording
11
  from facefusion.face_analyser import clear_face_analyser
12
  from facefusion.content_analyser import clear_content_analyser
13
- from facefusion.typing import Frame, Face, Update_Process, ProcessMode, ModelValue, OptionsWithModel
14
- from facefusion.utilities import conditional_download, resolve_relative_path, is_file, is_download_done, map_device, create_metavar, update_status
 
 
 
15
  from facefusion.vision import read_image, read_static_image, write_image
16
  from facefusion.processors.frame import globals as frame_processors_globals
17
  from facefusion.processors.frame import choices as frame_processors_choices
@@ -19,8 +22,8 @@ from facefusion.processors.frame import choices as frame_processors_choices
19
  FRAME_PROCESSOR = None
20
  THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
21
  THREAD_LOCK : threading.Lock = threading.Lock()
22
- NAME = 'FACEFUSION.FRAME_PROCESSOR.FRAME_ENHANCER'
23
- MODELS: Dict[str, ModelValue] =\
24
  {
25
  'real_esrgan_x2plus':
26
  {
@@ -88,8 +91,8 @@ def set_options(key : Literal['model'], value : Any) -> None:
88
 
89
 
90
  def register_args(program : ArgumentParser) -> None:
91
- program.add_argument('--frame-enhancer-model', help = wording.get('frame_processor_model_help'), dest = 'frame_enhancer_model', default = 'real_esrgan_x2plus', choices = frame_processors_choices.frame_enhancer_models)
92
- program.add_argument('--frame-enhancer-blend', help = wording.get('frame_processor_blend_help'), dest = 'frame_enhancer_blend', type = int, default = 80, choices = frame_processors_choices.frame_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.frame_enhancer_blend_range))
93
 
94
 
95
  def apply_args(program : ArgumentParser) -> None:
@@ -110,13 +113,13 @@ def pre_process(mode : ProcessMode) -> bool:
110
  model_url = get_options('model').get('url')
111
  model_path = get_options('model').get('path')
112
  if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
113
- update_status(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
114
  return False
115
  elif not is_file(model_path):
116
- update_status(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
117
  return False
118
  if mode == 'output' and not facefusion.globals.output_path:
119
- update_status(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
120
  return False
121
  return True
122
 
@@ -143,11 +146,15 @@ def blend_frame(temp_frame : Frame, paste_frame : Frame) -> Frame:
143
  return temp_frame
144
 
145
 
146
- def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
 
 
 
 
147
  return enhance_frame(temp_frame)
148
 
149
 
150
- def process_frames(source_path : str, temp_frame_paths : List[str], update_progress : Update_Process) -> None:
151
  for temp_frame_path in temp_frame_paths:
152
  temp_frame = read_image(temp_frame_path)
153
  result_frame = process_frame(None, None, temp_frame)
@@ -155,11 +162,11 @@ def process_frames(source_path : str, temp_frame_paths : List[str], update_progr
155
  update_progress()
156
 
157
 
158
- def process_image(source_path : str, target_path : str, output_path : str) -> None:
159
  target_frame = read_static_image(target_path)
160
  result = process_frame(None, None, target_frame)
161
  write_image(output_path, result)
162
 
163
 
164
- def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
165
  frame_processors.multi_process_frames(None, temp_frame_paths, process_frames)
 
1
+ from typing import Any, List, Literal, Optional
2
  from argparse import ArgumentParser
3
  import threading
4
  import cv2
 
7
 
8
  import facefusion.globals
9
  import facefusion.processors.frame.core as frame_processors
10
+ from facefusion import logger, wording
11
  from facefusion.face_analyser import clear_face_analyser
12
  from facefusion.content_analyser import clear_content_analyser
13
+ from facefusion.typing import Face, FaceSet, Frame, Update_Process, ProcessMode, ModelSet, OptionsWithModel
14
+ from facefusion.common_helper import create_metavar
15
+ from facefusion.execution_helper import map_device
16
+ from facefusion.filesystem import is_file, resolve_relative_path
17
+ from facefusion.download import conditional_download, is_download_done
18
  from facefusion.vision import read_image, read_static_image, write_image
19
  from facefusion.processors.frame import globals as frame_processors_globals
20
  from facefusion.processors.frame import choices as frame_processors_choices
 
22
  FRAME_PROCESSOR = None
23
  THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
24
  THREAD_LOCK : threading.Lock = threading.Lock()
25
+ NAME = __name__.upper()
26
+ MODELS : ModelSet =\
27
  {
28
  'real_esrgan_x2plus':
29
  {
 
91
 
92
 
93
  def register_args(program : ArgumentParser) -> None:
94
+ program.add_argument('--frame-enhancer-model', help = wording.get('frame_processor_model_help'), default = 'real_esrgan_x2plus', choices = frame_processors_choices.frame_enhancer_models)
95
+ program.add_argument('--frame-enhancer-blend', help = wording.get('frame_processor_blend_help'), type = int, default = 80, choices = frame_processors_choices.frame_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.frame_enhancer_blend_range))
96
 
97
 
98
  def apply_args(program : ArgumentParser) -> None:
 
113
  model_url = get_options('model').get('url')
114
  model_path = get_options('model').get('path')
115
  if not facefusion.globals.skip_download and not is_download_done(model_url, model_path):
116
+ logger.error(wording.get('model_download_not_done') + wording.get('exclamation_mark'), NAME)
117
  return False
118
  elif not is_file(model_path):
119
+ logger.error(wording.get('model_file_not_present') + wording.get('exclamation_mark'), NAME)
120
  return False
121
  if mode == 'output' and not facefusion.globals.output_path:
122
+ logger.error(wording.get('select_file_or_directory_output') + wording.get('exclamation_mark'), NAME)
123
  return False
124
  return True
125
 
 
146
  return temp_frame
147
 
148
 
149
+ def get_reference_frame(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
150
+ pass
151
+
152
+
153
+ def process_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame:
154
  return enhance_frame(temp_frame)
155
 
156
 
157
+ def process_frames(source_paths : List[str], temp_frame_paths : List[str], update_progress : Update_Process) -> None:
158
  for temp_frame_path in temp_frame_paths:
159
  temp_frame = read_image(temp_frame_path)
160
  result_frame = process_frame(None, None, temp_frame)
 
162
  update_progress()
163
 
164
 
165
+ def process_image(source_paths : List[str], target_path : str, output_path : str) -> None:
166
  target_frame = read_static_image(target_path)
167
  result = process_frame(None, None, target_frame)
168
  write_image(output_path, result)
169
 
170
 
171
+ def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None:
172
  frame_processors.multi_process_frames(None, temp_frame_paths, process_frames)
facefusion/processors/frame/typings.py CHANGED
@@ -1,6 +1,6 @@
1
  from typing import Literal
2
 
3
- FaceSwapperModel = Literal['blendface_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial']
4
  FaceEnhancerModel = Literal['codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'restoreformer']
5
  FrameEnhancerModel = Literal['real_esrgan_x2plus', 'real_esrgan_x4plus', 'real_esrnet_x4plus']
6
 
 
1
  from typing import Literal
2
 
3
+ FaceSwapperModel = Literal['blendswap_256', 'inswapper_128', 'inswapper_128_fp16', 'simswap_256', 'simswap_512_unofficial']
4
  FaceEnhancerModel = Literal['codeformer', 'gfpgan_1.2', 'gfpgan_1.3', 'gfpgan_1.4', 'gpen_bfr_256', 'gpen_bfr_512', 'restoreformer']
5
  FrameEnhancerModel = Literal['real_esrgan_x2plus', 'real_esrgan_x4plus', 'real_esrnet_x4plus']
6
 
facefusion/typing.py CHANGED
@@ -1,5 +1,5 @@
1
- from collections import namedtuple
2
  from typing import Any, Literal, Callable, List, Tuple, Dict, TypedDict
 
3
  import numpy
4
 
5
  Bbox = numpy.ndarray[Any, Any]
@@ -16,25 +16,35 @@ Face = namedtuple('Face',
16
  'gender',
17
  'age'
18
  ])
 
 
 
 
 
 
19
  Frame = numpy.ndarray[Any, Any]
 
20
  Matrix = numpy.ndarray[Any, Any]
21
  Padding = Tuple[int, int, int, int]
22
 
23
  Update_Process = Callable[[], None]
24
- Process_Frames = Callable[[str, List[str], Update_Process], None]
25
-
26
- Template = Literal['arcface_v1', 'arcface_v2', 'ffhq']
27
  ProcessMode = Literal['output', 'preview', 'stream']
28
  FaceSelectorMode = Literal['reference', 'one', 'many']
29
  FaceAnalyserOrder = Literal['left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best']
30
  FaceAnalyserAge = Literal['child', 'teen', 'adult', 'senior']
31
  FaceAnalyserGender = Literal['male', 'female']
32
  FaceDetectorModel = Literal['retinaface', 'yunet']
33
- FaceRecognizerModel = Literal['arcface_blendface', 'arcface_inswapper', 'arcface_simswap']
 
 
34
  TempFrameFormat = Literal['jpg', 'png']
35
  OutputVideoEncoder = Literal['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc']
36
 
37
  ModelValue = Dict[str, Any]
 
38
  OptionsWithModel = TypedDict('OptionsWithModel',
39
  {
40
  'model' : ModelValue
 
 
1
  from typing import Any, Literal, Callable, List, Tuple, Dict, TypedDict
2
+ from collections import namedtuple
3
  import numpy
4
 
5
  Bbox = numpy.ndarray[Any, Any]
 
16
  'gender',
17
  'age'
18
  ])
19
+ FaceSet = Dict[str, List[Face]]
20
+ FaceStore = TypedDict('FaceStore',
21
+ {
22
+ 'static_faces' : FaceSet,
23
+ 'reference_faces': FaceSet
24
+ })
25
  Frame = numpy.ndarray[Any, Any]
26
+ Mask = numpy.ndarray[Any, Any]
27
  Matrix = numpy.ndarray[Any, Any]
28
  Padding = Tuple[int, int, int, int]
29
 
30
  Update_Process = Callable[[], None]
31
+ Process_Frames = Callable[[List[str], List[str], Update_Process], None]
32
+ LogLevel = Literal['error', 'warn', 'info', 'debug']
33
+ Template = Literal['arcface_112_v1', 'arcface_112_v2', 'arcface_128_v2', 'ffhq_512']
34
  ProcessMode = Literal['output', 'preview', 'stream']
35
  FaceSelectorMode = Literal['reference', 'one', 'many']
36
  FaceAnalyserOrder = Literal['left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small', 'best-worst', 'worst-best']
37
  FaceAnalyserAge = Literal['child', 'teen', 'adult', 'senior']
38
  FaceAnalyserGender = Literal['male', 'female']
39
  FaceDetectorModel = Literal['retinaface', 'yunet']
40
+ FaceRecognizerModel = Literal['arcface_blendswap', 'arcface_inswapper', 'arcface_simswap']
41
+ FaceMaskType = Literal['box', 'occlusion', 'region']
42
+ FaceMaskRegion = Literal['skin', 'left-eyebrow', 'right-eyebrow', 'left-eye', 'right-eye', 'eye-glasses', 'nose', 'mouth', 'upper-lip', 'lower-lip']
43
  TempFrameFormat = Literal['jpg', 'png']
44
  OutputVideoEncoder = Literal['libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc']
45
 
46
  ModelValue = Dict[str, Any]
47
+ ModelSet = Dict[str, ModelValue]
48
  OptionsWithModel = TypedDict('OptionsWithModel',
49
  {
50
  'model' : ModelValue
facefusion/uis/components/benchmark.py CHANGED
@@ -7,11 +7,12 @@ import gradio
7
  import facefusion.globals
8
  from facefusion import wording
9
  from facefusion.face_analyser import get_face_analyser
10
- from facefusion.face_cache import clear_faces_cache
11
  from facefusion.processors.frame.core import get_frame_processors_modules
12
  from facefusion.vision import count_video_frame_total
13
  from facefusion.core import limit_resources, conditional_process
14
- from facefusion.utilities import normalize_output_path, clear_temp
 
15
  from facefusion.uis.core import get_ui_component
16
 
17
  BENCHMARK_RESULTS_DATAFRAME : Optional[gradio.Dataframe] = None
@@ -75,7 +76,7 @@ def listen() -> None:
75
 
76
 
77
  def start(benchmark_runs : List[str], benchmark_cycles : int) -> Generator[List[Any], None, None]:
78
- facefusion.globals.source_path = '.assets/examples/source.jpg'
79
  target_paths = [ BENCHMARKS[benchmark_run] for benchmark_run in benchmark_runs if benchmark_run in BENCHMARKS ]
80
  benchmark_results = []
81
  if target_paths:
@@ -94,7 +95,7 @@ def pre_process() -> None:
94
 
95
 
96
  def post_process() -> None:
97
- clear_faces_cache()
98
 
99
 
100
  def benchmark(target_path : str, benchmark_cycles : int) -> List[Any]:
@@ -102,7 +103,7 @@ def benchmark(target_path : str, benchmark_cycles : int) -> List[Any]:
102
  total_fps = 0.0
103
  for i in range(benchmark_cycles):
104
  facefusion.globals.target_path = target_path
105
- facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_path, facefusion.globals.target_path, tempfile.gettempdir())
106
  video_frame_total = count_video_frame_total(facefusion.globals.target_path)
107
  start_time = time.perf_counter()
108
  conditional_process()
 
7
  import facefusion.globals
8
  from facefusion import wording
9
  from facefusion.face_analyser import get_face_analyser
10
+ from facefusion.face_store import clear_static_faces
11
  from facefusion.processors.frame.core import get_frame_processors_modules
12
  from facefusion.vision import count_video_frame_total
13
  from facefusion.core import limit_resources, conditional_process
14
+ from facefusion.normalizer import normalize_output_path
15
+ from facefusion.filesystem import clear_temp
16
  from facefusion.uis.core import get_ui_component
17
 
18
  BENCHMARK_RESULTS_DATAFRAME : Optional[gradio.Dataframe] = None
 
76
 
77
 
78
  def start(benchmark_runs : List[str], benchmark_cycles : int) -> Generator[List[Any], None, None]:
79
+ facefusion.globals.source_paths = [ '.assets/examples/source.jpg' ]
80
  target_paths = [ BENCHMARKS[benchmark_run] for benchmark_run in benchmark_runs if benchmark_run in BENCHMARKS ]
81
  benchmark_results = []
82
  if target_paths:
 
95
 
96
 
97
  def post_process() -> None:
98
+ clear_static_faces()
99
 
100
 
101
  def benchmark(target_path : str, benchmark_cycles : int) -> List[Any]:
 
103
  total_fps = 0.0
104
  for i in range(benchmark_cycles):
105
  facefusion.globals.target_path = target_path
106
+ facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_paths, facefusion.globals.target_path, tempfile.gettempdir())
107
  video_frame_total = count_video_frame_total(facefusion.globals.target_path)
108
  start_time = time.perf_counter()
109
  conditional_process()
facefusion/uis/components/execution.py CHANGED
@@ -6,7 +6,7 @@ import facefusion.globals
6
  from facefusion import wording
7
  from facefusion.face_analyser import clear_face_analyser
8
  from facefusion.processors.frame.core import clear_frame_processors_modules
9
- from facefusion.utilities import encode_execution_providers, decode_execution_providers
10
 
11
  EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
12
 
 
6
  from facefusion import wording
7
  from facefusion.face_analyser import clear_face_analyser
8
  from facefusion.processors.frame.core import clear_frame_processors_modules
9
+ from facefusion.execution_helper import encode_execution_providers, decode_execution_providers
10
 
11
  EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
12
 
facefusion/uis/components/face_analyser.py CHANGED
@@ -53,7 +53,7 @@ def render() -> None:
53
  FACE_DETECTOR_SCORE_SLIDER = gradio.Slider(
54
  label = wording.get('face_detector_score_slider_label'),
55
  value = facefusion.globals.face_detector_score,
56
- step =facefusion.choices.face_detector_score_range[1] - facefusion.choices.face_detector_score_range[0],
57
  minimum = facefusion.choices.face_detector_score_range[0],
58
  maximum = facefusion.choices.face_detector_score_range[-1]
59
  )
 
53
  FACE_DETECTOR_SCORE_SLIDER = gradio.Slider(
54
  label = wording.get('face_detector_score_slider_label'),
55
  value = facefusion.globals.face_detector_score,
56
+ step = facefusion.choices.face_detector_score_range[1] - facefusion.choices.face_detector_score_range[0],
57
  minimum = facefusion.choices.face_detector_score_range[0],
58
  maximum = facefusion.choices.face_detector_score_range[-1]
59
  )
facefusion/uis/components/face_masker.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple, List
2
+ import gradio
3
+
4
+ import facefusion.globals
5
+ import facefusion.choices
6
+ from facefusion import wording
7
+ from facefusion.typing import FaceMaskType, FaceMaskRegion
8
+ from facefusion.uis.core import register_ui_component
9
+
10
+ FACE_MASK_TYPES_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
11
+ FACE_MASK_BLUR_SLIDER : Optional[gradio.Slider] = None
12
+ FACE_MASK_BOX_GROUP : Optional[gradio.Group] = None
13
+ FACE_MASK_REGION_GROUP : Optional[gradio.Group] = None
14
+ FACE_MASK_PADDING_TOP_SLIDER : Optional[gradio.Slider] = None
15
+ FACE_MASK_PADDING_RIGHT_SLIDER : Optional[gradio.Slider] = None
16
+ FACE_MASK_PADDING_BOTTOM_SLIDER : Optional[gradio.Slider] = None
17
+ FACE_MASK_PADDING_LEFT_SLIDER : Optional[gradio.Slider] = None
18
+ FACE_MASK_REGION_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
19
+
20
+
21
+ def render() -> None:
22
+ global FACE_MASK_TYPES_CHECKBOX_GROUP
23
+ global FACE_MASK_BLUR_SLIDER
24
+ global FACE_MASK_BOX_GROUP
25
+ global FACE_MASK_REGION_GROUP
26
+ global FACE_MASK_PADDING_TOP_SLIDER
27
+ global FACE_MASK_PADDING_RIGHT_SLIDER
28
+ global FACE_MASK_PADDING_BOTTOM_SLIDER
29
+ global FACE_MASK_PADDING_LEFT_SLIDER
30
+ global FACE_MASK_REGION_CHECKBOX_GROUP
31
+
32
+ has_box_mask = 'box' in facefusion.globals.face_mask_types
33
+ has_region_mask = 'region' in facefusion.globals.face_mask_types
34
+ FACE_MASK_TYPES_CHECKBOX_GROUP = gradio.CheckboxGroup(
35
+ label = wording.get('face_mask_types_checkbox_group_label'),
36
+ choices = facefusion.choices.face_mask_types,
37
+ value = facefusion.globals.face_mask_types
38
+ )
39
+ with gradio.Group(visible = has_box_mask) as FACE_MASK_BOX_GROUP:
40
+ FACE_MASK_BLUR_SLIDER = gradio.Slider(
41
+ label = wording.get('face_mask_blur_slider_label'),
42
+ step = facefusion.choices.face_mask_blur_range[1] - facefusion.choices.face_mask_blur_range[0],
43
+ minimum = facefusion.choices.face_mask_blur_range[0],
44
+ maximum = facefusion.choices.face_mask_blur_range[-1],
45
+ value = facefusion.globals.face_mask_blur
46
+ )
47
+ with gradio.Row():
48
+ FACE_MASK_PADDING_TOP_SLIDER = gradio.Slider(
49
+ label = wording.get('face_mask_padding_top_slider_label'),
50
+ step = facefusion.choices.face_mask_padding_range[1] - facefusion.choices.face_mask_padding_range[0],
51
+ minimum = facefusion.choices.face_mask_padding_range[0],
52
+ maximum = facefusion.choices.face_mask_padding_range[-1],
53
+ value = facefusion.globals.face_mask_padding[0]
54
+ )
55
+ FACE_MASK_PADDING_RIGHT_SLIDER = gradio.Slider(
56
+ label = wording.get('face_mask_padding_right_slider_label'),
57
+ step = facefusion.choices.face_mask_padding_range[1] - facefusion.choices.face_mask_padding_range[0],
58
+ minimum = facefusion.choices.face_mask_padding_range[0],
59
+ maximum = facefusion.choices.face_mask_padding_range[-1],
60
+ value = facefusion.globals.face_mask_padding[1]
61
+ )
62
+ with gradio.Row():
63
+ FACE_MASK_PADDING_BOTTOM_SLIDER = gradio.Slider(
64
+ label = wording.get('face_mask_padding_bottom_slider_label'),
65
+ step = facefusion.choices.face_mask_padding_range[1] - facefusion.choices.face_mask_padding_range[0],
66
+ minimum = facefusion.choices.face_mask_padding_range[0],
67
+ maximum = facefusion.choices.face_mask_padding_range[-1],
68
+ value = facefusion.globals.face_mask_padding[2]
69
+ )
70
+ FACE_MASK_PADDING_LEFT_SLIDER = gradio.Slider(
71
+ label = wording.get('face_mask_padding_left_slider_label'),
72
+ step = facefusion.choices.face_mask_padding_range[1] - facefusion.choices.face_mask_padding_range[0],
73
+ minimum = facefusion.choices.face_mask_padding_range[0],
74
+ maximum = facefusion.choices.face_mask_padding_range[-1],
75
+ value = facefusion.globals.face_mask_padding[3]
76
+ )
77
+ with gradio.Row():
78
+ FACE_MASK_REGION_CHECKBOX_GROUP = gradio.CheckboxGroup(
79
+ label = wording.get('face_mask_region_checkbox_group_label'),
80
+ choices = facefusion.choices.face_mask_regions,
81
+ value = facefusion.globals.face_mask_regions,
82
+ visible = has_region_mask
83
+ )
84
+ register_ui_component('face_mask_types_checkbox_group', FACE_MASK_TYPES_CHECKBOX_GROUP)
85
+ register_ui_component('face_mask_blur_slider', FACE_MASK_BLUR_SLIDER)
86
+ register_ui_component('face_mask_padding_top_slider', FACE_MASK_PADDING_TOP_SLIDER)
87
+ register_ui_component('face_mask_padding_right_slider', FACE_MASK_PADDING_RIGHT_SLIDER)
88
+ register_ui_component('face_mask_padding_bottom_slider', FACE_MASK_PADDING_BOTTOM_SLIDER)
89
+ register_ui_component('face_mask_padding_left_slider', FACE_MASK_PADDING_LEFT_SLIDER)
90
+ register_ui_component('face_mask_region_checkbox_group', FACE_MASK_REGION_CHECKBOX_GROUP)
91
+
92
+
93
+ def listen() -> None:
94
+ FACE_MASK_TYPES_CHECKBOX_GROUP.change(update_face_mask_type, inputs = FACE_MASK_TYPES_CHECKBOX_GROUP, outputs = [ FACE_MASK_TYPES_CHECKBOX_GROUP, FACE_MASK_BOX_GROUP, FACE_MASK_REGION_CHECKBOX_GROUP ])
95
+ FACE_MASK_BLUR_SLIDER.change(update_face_mask_blur, inputs = FACE_MASK_BLUR_SLIDER)
96
+ FACE_MASK_REGION_CHECKBOX_GROUP.change(update_face_mask_regions, inputs = FACE_MASK_REGION_CHECKBOX_GROUP, outputs = FACE_MASK_REGION_CHECKBOX_GROUP)
97
+ face_mask_padding_sliders = [ FACE_MASK_PADDING_TOP_SLIDER, FACE_MASK_PADDING_RIGHT_SLIDER, FACE_MASK_PADDING_BOTTOM_SLIDER, FACE_MASK_PADDING_LEFT_SLIDER ]
98
+ for face_mask_padding_slider in face_mask_padding_sliders:
99
+ face_mask_padding_slider.change(update_face_mask_padding, inputs = face_mask_padding_sliders)
100
+
101
+
102
+ def update_face_mask_type(face_mask_types : List[FaceMaskType]) -> Tuple[gradio.CheckboxGroup, gradio.Group, gradio.CheckboxGroup]:
103
+ if not face_mask_types:
104
+ face_mask_types = facefusion.choices.face_mask_types
105
+ facefusion.globals.face_mask_types = face_mask_types
106
+ has_box_mask = 'box' in face_mask_types
107
+ has_region_mask = 'region' in face_mask_types
108
+ return gradio.CheckboxGroup(value = face_mask_types), gradio.Group(visible = has_box_mask), gradio.CheckboxGroup(visible = has_region_mask)
109
+
110
+
111
+ def update_face_mask_blur(face_mask_blur : float) -> None:
112
+ facefusion.globals.face_mask_blur = face_mask_blur
113
+
114
+
115
+ def update_face_mask_padding(face_mask_padding_top : int, face_mask_padding_right : int, face_mask_padding_bottom : int, face_mask_padding_left : int) -> None:
116
+ facefusion.globals.face_mask_padding = (face_mask_padding_top, face_mask_padding_right, face_mask_padding_bottom, face_mask_padding_left)
117
+
118
+
119
+ def update_face_mask_regions(face_mask_regions : List[FaceMaskRegion]) -> gradio.CheckboxGroup:
120
+ if not face_mask_regions:
121
+ face_mask_regions = facefusion.choices.face_mask_regions
122
+ facefusion.globals.face_mask_regions = face_mask_regions
123
+ return gradio.CheckboxGroup(value = face_mask_regions)
facefusion/uis/components/face_selector.py CHANGED
@@ -5,12 +5,11 @@ import gradio
5
  import facefusion.globals
6
  import facefusion.choices
7
  from facefusion import wording
8
- from facefusion.face_cache import clear_faces_cache
9
  from facefusion.vision import get_video_frame, read_static_image, normalize_frame_color
10
  from facefusion.face_analyser import get_many_faces
11
- from facefusion.face_reference import clear_face_reference
12
  from facefusion.typing import Frame, FaceSelectorMode
13
- from facefusion.utilities import is_image, is_video
14
  from facefusion.uis.core import get_ui_component, register_ui_component
15
  from facefusion.uis.typing import ComponentName
16
 
@@ -111,8 +110,8 @@ def update_face_selector_mode(face_selector_mode : FaceSelectorMode) -> Tuple[gr
111
 
112
 
113
  def clear_and_update_reference_face_position(event : gradio.SelectData) -> gradio.Gallery:
114
- clear_face_reference()
115
- clear_faces_cache()
116
  update_reference_face_position(event.index)
117
  return update_reference_position_gallery()
118
 
@@ -130,8 +129,8 @@ def update_reference_frame_number(reference_frame_number : int) -> None:
130
 
131
 
132
  def clear_and_update_reference_position_gallery() -> gradio.Gallery:
133
- clear_face_reference()
134
- clear_faces_cache()
135
  return update_reference_position_gallery()
136
 
137
 
 
5
  import facefusion.globals
6
  import facefusion.choices
7
  from facefusion import wording
8
+ from facefusion.face_store import clear_static_faces, clear_reference_faces
9
  from facefusion.vision import get_video_frame, read_static_image, normalize_frame_color
10
  from facefusion.face_analyser import get_many_faces
 
11
  from facefusion.typing import Frame, FaceSelectorMode
12
+ from facefusion.filesystem import is_image, is_video
13
  from facefusion.uis.core import get_ui_component, register_ui_component
14
  from facefusion.uis.typing import ComponentName
15
 
 
110
 
111
 
112
  def clear_and_update_reference_face_position(event : gradio.SelectData) -> gradio.Gallery:
113
+ clear_reference_faces()
114
+ clear_static_faces()
115
  update_reference_face_position(event.index)
116
  return update_reference_position_gallery()
117
 
 
129
 
130
 
131
  def clear_and_update_reference_position_gallery() -> gradio.Gallery:
132
+ clear_reference_faces()
133
+ clear_static_faces()
134
  return update_reference_position_gallery()
135
 
136
 
facefusion/uis/components/frame_processors.py CHANGED
@@ -4,7 +4,7 @@ import gradio
4
  import facefusion.globals
5
  from facefusion import wording
6
  from facefusion.processors.frame.core import load_frame_processor_module, clear_frame_processors_modules
7
- from facefusion.utilities import list_module_names
8
  from facefusion.uis.core import register_ui_component
9
 
10
  FRAME_PROCESSORS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
 
4
  import facefusion.globals
5
  from facefusion import wording
6
  from facefusion.processors.frame.core import load_frame_processor_module, clear_frame_processors_modules
7
+ from facefusion.filesystem import list_module_names
8
  from facefusion.uis.core import register_ui_component
9
 
10
  FRAME_PROCESSORS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
facefusion/uis/components/frame_processors_options.py CHANGED
@@ -87,8 +87,8 @@ def listen() -> None:
87
 
88
  def update_face_swapper_model(face_swapper_model : FaceSwapperModel) -> gradio.Dropdown:
89
  frame_processors_globals.face_swapper_model = face_swapper_model
90
- if face_swapper_model == 'blendface_256':
91
- facefusion.globals.face_recognizer_model = 'arcface_blendface'
92
  if face_swapper_model == 'inswapper_128' or face_swapper_model == 'inswapper_128_fp16':
93
  facefusion.globals.face_recognizer_model = 'arcface_inswapper'
94
  if face_swapper_model == 'simswap_256' or face_swapper_model == 'simswap_512_unofficial':
 
87
 
88
  def update_face_swapper_model(face_swapper_model : FaceSwapperModel) -> gradio.Dropdown:
89
  frame_processors_globals.face_swapper_model = face_swapper_model
90
+ if face_swapper_model == 'blendswap_256':
91
+ facefusion.globals.face_recognizer_model = 'arcface_blendswap'
92
  if face_swapper_model == 'inswapper_128' or face_swapper_model == 'inswapper_128_fp16':
93
  facefusion.globals.face_recognizer_model = 'arcface_inswapper'
94
  if face_swapper_model == 'simswap_256' or face_swapper_model == 'simswap_512_unofficial':
facefusion/uis/components/output.py CHANGED
@@ -5,7 +5,8 @@ import facefusion.globals
5
  from facefusion import wording
6
  from facefusion.core import limit_resources, conditional_process
7
  from facefusion.uis.core import get_ui_component
8
- from facefusion.utilities import is_image, is_video, normalize_output_path, clear_temp
 
9
 
10
  OUTPUT_IMAGE : Optional[gradio.Image] = None
11
  OUTPUT_VIDEO : Optional[gradio.Video] = None
@@ -45,7 +46,7 @@ def listen() -> None:
45
 
46
 
47
  def start(output_path : str) -> Tuple[gradio.Image, gradio.Video]:
48
- facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_path, facefusion.globals.target_path, output_path)
49
  limit_resources()
50
  conditional_process()
51
  if is_image(facefusion.globals.output_path):
 
5
  from facefusion import wording
6
  from facefusion.core import limit_resources, conditional_process
7
  from facefusion.uis.core import get_ui_component
8
+ from facefusion.normalizer import normalize_output_path
9
+ from facefusion.filesystem import is_image, is_video, clear_temp
10
 
11
  OUTPUT_IMAGE : Optional[gradio.Image] = None
12
  OUTPUT_VIDEO : Optional[gradio.Video] = None
 
46
 
47
 
48
  def start(output_path : str) -> Tuple[gradio.Image, gradio.Video]:
49
+ facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_paths, facefusion.globals.target_path, output_path)
50
  limit_resources()
51
  conditional_process()
52
  if is_image(facefusion.globals.output_path):
facefusion/uis/components/output_options.py CHANGED
@@ -6,7 +6,7 @@ import facefusion.globals
6
  import facefusion.choices
7
  from facefusion import wording
8
  from facefusion.typing import OutputVideoEncoder
9
- from facefusion.utilities import is_image, is_video
10
  from facefusion.uis.typing import ComponentName
11
  from facefusion.uis.core import get_ui_component, register_ui_component
12
 
 
6
  import facefusion.choices
7
  from facefusion import wording
8
  from facefusion.typing import OutputVideoEncoder
9
+ from facefusion.filesystem import is_image, is_video
10
  from facefusion.uis.typing import ComponentName
11
  from facefusion.uis.core import get_ui_component, register_ui_component
12
 
facefusion/uis/components/preview.py CHANGED
@@ -4,15 +4,14 @@ import gradio
4
 
5
  import facefusion.globals
6
  from facefusion import wording
7
- from facefusion.core import conditional_set_face_reference
8
- from facefusion.face_cache import clear_faces_cache
9
- from facefusion.typing import Frame, Face
10
- from facefusion.vision import get_video_frame, count_video_frame_total, normalize_frame_color, resize_frame_dimension, read_static_image
11
- from facefusion.face_analyser import get_one_face, clear_face_analyser
12
- from facefusion.face_reference import get_face_reference, clear_face_reference
13
  from facefusion.content_analyser import analyse_frame
14
  from facefusion.processors.frame.core import load_frame_processor_module
15
- from facefusion.utilities import is_video, is_image
16
  from facefusion.uis.typing import ComponentName
17
  from facefusion.uis.core import get_ui_component, register_ui_component
18
 
@@ -37,16 +36,17 @@ def render() -> None:
37
  'maximum': 100,
38
  'visible': False
39
  }
40
- conditional_set_face_reference()
41
- source_face = get_one_face(read_static_image(facefusion.globals.source_path))
42
- reference_face = get_face_reference() if 'reference' in facefusion.globals.face_selector_mode else None
 
43
  if is_image(facefusion.globals.target_path):
44
  target_frame = read_static_image(facefusion.globals.target_path)
45
- preview_frame = process_preview_frame(source_face, reference_face, target_frame)
46
  preview_image_args['value'] = normalize_frame_color(preview_frame)
47
  if is_video(facefusion.globals.target_path):
48
  temp_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
49
- preview_frame = process_preview_frame(source_face, reference_face, temp_frame)
50
  preview_image_args['value'] = normalize_frame_color(preview_frame)
51
  preview_image_args['visible'] = True
52
  preview_frame_slider_args['value'] = facefusion.globals.reference_frame_number
@@ -58,7 +58,7 @@ def render() -> None:
58
 
59
 
60
  def listen() -> None:
61
- PREVIEW_FRAME_SLIDER.change(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
62
  multi_one_component_names : List[ComponentName] =\
63
  [
64
  'source_image',
@@ -93,7 +93,6 @@ def listen() -> None:
93
  component.select(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
94
  change_one_component_names : List[ComponentName] =\
95
  [
96
- 'frame_processors_checkbox_group',
97
  'face_debugger_items_checkbox_group',
98
  'face_enhancer_model_dropdown',
99
  'face_enhancer_blend_slider',
@@ -101,11 +100,13 @@ def listen() -> None:
101
  'frame_enhancer_blend_slider',
102
  'face_selector_mode_dropdown',
103
  'reference_face_distance_slider',
 
104
  'face_mask_blur_slider',
105
  'face_mask_padding_top_slider',
106
  'face_mask_padding_bottom_slider',
107
  'face_mask_padding_left_slider',
108
- 'face_mask_padding_right_slider'
 
109
  ]
110
  for component_name in change_one_component_names:
111
  component = get_ui_component(component_name)
@@ -113,6 +114,7 @@ def listen() -> None:
113
  component.change(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
114
  change_two_component_names : List[ComponentName] =\
115
  [
 
116
  'face_swapper_model_dropdown',
117
  'face_detector_model_dropdown',
118
  'face_detector_size_dropdown',
@@ -126,15 +128,16 @@ def listen() -> None:
126
 
127
  def clear_and_update_preview_image(frame_number : int = 0) -> gradio.Image:
128
  clear_face_analyser()
129
- clear_face_reference()
130
- clear_faces_cache()
131
  return update_preview_image(frame_number)
132
 
133
 
134
  def update_preview_image(frame_number : int = 0) -> gradio.Image:
135
- conditional_set_face_reference()
136
- source_face = get_one_face(read_static_image(facefusion.globals.source_path))
137
- reference_face = get_face_reference() if 'reference' in facefusion.globals.face_selector_mode else None
 
138
  if is_image(facefusion.globals.target_path):
139
  target_frame = read_static_image(facefusion.globals.target_path)
140
  preview_frame = process_preview_frame(source_face, reference_face, target_frame)
@@ -155,7 +158,7 @@ def update_preview_frame_slider() -> gradio.Slider:
155
  return gradio.Slider(value = None, maximum = None, visible = False)
156
 
157
 
158
- def process_preview_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
159
  temp_frame = resize_frame_dimension(temp_frame, 640, 640)
160
  if analyse_frame(temp_frame):
161
  return cv2.GaussianBlur(temp_frame, (99, 99), 0)
@@ -164,7 +167,7 @@ def process_preview_frame(source_face : Face, reference_face : Face, temp_frame
164
  if frame_processor_module.pre_process('preview'):
165
  temp_frame = frame_processor_module.process_frame(
166
  source_face,
167
- reference_face,
168
  temp_frame
169
  )
170
  return temp_frame
 
4
 
5
  import facefusion.globals
6
  from facefusion import wording
7
+ from facefusion.core import conditional_append_reference_faces
8
+ from facefusion.face_store import clear_static_faces, get_reference_faces, clear_reference_faces
9
+ from facefusion.typing import Frame, Face, FaceSet
10
+ from facefusion.vision import get_video_frame, count_video_frame_total, normalize_frame_color, resize_frame_dimension, read_static_image, read_static_images
11
+ from facefusion.face_analyser import get_average_face, clear_face_analyser
 
12
  from facefusion.content_analyser import analyse_frame
13
  from facefusion.processors.frame.core import load_frame_processor_module
14
+ from facefusion.filesystem import is_image, is_video
15
  from facefusion.uis.typing import ComponentName
16
  from facefusion.uis.core import get_ui_component, register_ui_component
17
 
 
36
  'maximum': 100,
37
  'visible': False
38
  }
39
+ conditional_append_reference_faces()
40
+ source_frames = read_static_images(facefusion.globals.source_paths)
41
+ source_face = get_average_face(source_frames)
42
+ reference_faces = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
43
  if is_image(facefusion.globals.target_path):
44
  target_frame = read_static_image(facefusion.globals.target_path)
45
+ preview_frame = process_preview_frame(source_face, reference_faces, target_frame)
46
  preview_image_args['value'] = normalize_frame_color(preview_frame)
47
  if is_video(facefusion.globals.target_path):
48
  temp_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
49
+ preview_frame = process_preview_frame(source_face, reference_faces, temp_frame)
50
  preview_image_args['value'] = normalize_frame_color(preview_frame)
51
  preview_image_args['visible'] = True
52
  preview_frame_slider_args['value'] = facefusion.globals.reference_frame_number
 
58
 
59
 
60
  def listen() -> None:
61
+ PREVIEW_FRAME_SLIDER.release(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
62
  multi_one_component_names : List[ComponentName] =\
63
  [
64
  'source_image',
 
93
  component.select(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
94
  change_one_component_names : List[ComponentName] =\
95
  [
 
96
  'face_debugger_items_checkbox_group',
97
  'face_enhancer_model_dropdown',
98
  'face_enhancer_blend_slider',
 
100
  'frame_enhancer_blend_slider',
101
  'face_selector_mode_dropdown',
102
  'reference_face_distance_slider',
103
+ 'face_mask_types_checkbox_group',
104
  'face_mask_blur_slider',
105
  'face_mask_padding_top_slider',
106
  'face_mask_padding_bottom_slider',
107
  'face_mask_padding_left_slider',
108
+ 'face_mask_padding_right_slider',
109
+ 'face_mask_region_checkbox_group'
110
  ]
111
  for component_name in change_one_component_names:
112
  component = get_ui_component(component_name)
 
114
  component.change(update_preview_image, inputs = PREVIEW_FRAME_SLIDER, outputs = PREVIEW_IMAGE)
115
  change_two_component_names : List[ComponentName] =\
116
  [
117
+ 'frame_processors_checkbox_group',
118
  'face_swapper_model_dropdown',
119
  'face_detector_model_dropdown',
120
  'face_detector_size_dropdown',
 
128
 
129
  def clear_and_update_preview_image(frame_number : int = 0) -> gradio.Image:
130
  clear_face_analyser()
131
+ clear_reference_faces()
132
+ clear_static_faces()
133
  return update_preview_image(frame_number)
134
 
135
 
136
  def update_preview_image(frame_number : int = 0) -> gradio.Image:
137
+ conditional_append_reference_faces()
138
+ source_frames = read_static_images(facefusion.globals.source_paths)
139
+ source_face = get_average_face(source_frames)
140
+ reference_face = get_reference_faces() if 'reference' in facefusion.globals.face_selector_mode else None
141
  if is_image(facefusion.globals.target_path):
142
  target_frame = read_static_image(facefusion.globals.target_path)
143
  preview_frame = process_preview_frame(source_face, reference_face, target_frame)
 
158
  return gradio.Slider(value = None, maximum = None, visible = False)
159
 
160
 
161
+ def process_preview_frame(source_face : Face, reference_faces : FaceSet, temp_frame : Frame) -> Frame:
162
  temp_frame = resize_frame_dimension(temp_frame, 640, 640)
163
  if analyse_frame(temp_frame):
164
  return cv2.GaussianBlur(temp_frame, (99, 99), 0)
 
167
  if frame_processor_module.pre_process('preview'):
168
  temp_frame = frame_processor_module.process_frame(
169
  source_face,
170
+ reference_faces,
171
  temp_frame
172
  )
173
  return temp_frame
facefusion/uis/components/source.py CHANGED
@@ -1,9 +1,10 @@
1
- from typing import Any, IO, Optional
2
  import gradio
3
 
4
  import facefusion.globals
5
  from facefusion import wording
6
- from facefusion.utilities import is_image
 
7
  from facefusion.uis.core import register_ui_component
8
 
9
  SOURCE_FILE : Optional[gradio.File] = None
@@ -14,9 +15,9 @@ def render() -> None:
14
  global SOURCE_FILE
15
  global SOURCE_IMAGE
16
 
17
- is_source_image = is_image(facefusion.globals.source_path)
18
  SOURCE_FILE = gradio.File(
19
- file_count = 'single',
20
  file_types =
21
  [
22
  '.png',
@@ -24,11 +25,12 @@ def render() -> None:
24
  '.webp'
25
  ],
26
  label = wording.get('source_file_label'),
27
- value = facefusion.globals.source_path if is_source_image else None
28
  )
 
29
  SOURCE_IMAGE = gradio.Image(
30
- value = SOURCE_FILE.value['name'] if is_source_image else None,
31
- visible = is_source_image,
32
  show_label = False
33
  )
34
  register_ui_component('source_image', SOURCE_IMAGE)
@@ -38,9 +40,10 @@ def listen() -> None:
38
  SOURCE_FILE.change(update, inputs = SOURCE_FILE, outputs = SOURCE_IMAGE)
39
 
40
 
41
- def update(file: IO[Any]) -> gradio.Image:
42
- if file and is_image(file.name):
43
- facefusion.globals.source_path = file.name
44
- return gradio.Image(value = file.name, visible = True)
45
- facefusion.globals.source_path = None
 
46
  return gradio.Image(value = None, visible = False)
 
1
+ from typing import Optional, List
2
  import gradio
3
 
4
  import facefusion.globals
5
  from facefusion import wording
6
+ from facefusion.uis.typing import File
7
+ from facefusion.filesystem import are_images
8
  from facefusion.uis.core import register_ui_component
9
 
10
  SOURCE_FILE : Optional[gradio.File] = None
 
15
  global SOURCE_FILE
16
  global SOURCE_IMAGE
17
 
18
+ are_source_images = are_images(facefusion.globals.source_paths)
19
  SOURCE_FILE = gradio.File(
20
+ file_count = 'multiple',
21
  file_types =
22
  [
23
  '.png',
 
25
  '.webp'
26
  ],
27
  label = wording.get('source_file_label'),
28
+ value = facefusion.globals.source_paths if are_source_images else None
29
  )
30
+ source_file_names = [ source_file_value['name'] for source_file_value in SOURCE_FILE.value ] if SOURCE_FILE.value else None
31
  SOURCE_IMAGE = gradio.Image(
32
+ value = source_file_names[0] if are_source_images else None,
33
+ visible = are_source_images,
34
  show_label = False
35
  )
36
  register_ui_component('source_image', SOURCE_IMAGE)
 
40
  SOURCE_FILE.change(update, inputs = SOURCE_FILE, outputs = SOURCE_IMAGE)
41
 
42
 
43
+ def update(files : List[File]) -> gradio.Image:
44
+ file_names = [ file.name for file in files ] if files else None
45
+ if are_images(file_names):
46
+ facefusion.globals.source_paths = file_names
47
+ return gradio.Image(value = file_names[0], visible = True)
48
+ facefusion.globals.source_paths = None
49
  return gradio.Image(value = None, visible = False)
facefusion/uis/components/target.py CHANGED
@@ -1,11 +1,11 @@
1
- from typing import Any, IO, Tuple, Optional
2
  import gradio
3
 
4
  import facefusion.globals
5
  from facefusion import wording
6
- from facefusion.face_cache import clear_faces_cache
7
- from facefusion.face_reference import clear_face_reference
8
- from facefusion.utilities import is_image, is_video
9
  from facefusion.uis.core import register_ui_component
10
 
11
  TARGET_FILE : Optional[gradio.File] = None
@@ -50,9 +50,9 @@ def listen() -> None:
50
  TARGET_FILE.change(update, inputs = TARGET_FILE, outputs = [ TARGET_IMAGE, TARGET_VIDEO ])
51
 
52
 
53
- def update(file : IO[Any]) -> Tuple[gradio.Image, gradio.Video]:
54
- clear_face_reference()
55
- clear_faces_cache()
56
  if file and is_image(file.name):
57
  facefusion.globals.target_path = file.name
58
  return gradio.Image(value = file.name, visible = True), gradio.Video(value = None, visible = False)
 
1
+ from typing import Tuple, Optional
2
  import gradio
3
 
4
  import facefusion.globals
5
  from facefusion import wording
6
+ from facefusion.face_store import clear_static_faces, clear_reference_faces
7
+ from facefusion.uis.typing import File
8
+ from facefusion.filesystem import is_image, is_video
9
  from facefusion.uis.core import register_ui_component
10
 
11
  TARGET_FILE : Optional[gradio.File] = None
 
50
  TARGET_FILE.change(update, inputs = TARGET_FILE, outputs = [ TARGET_IMAGE, TARGET_VIDEO ])
51
 
52
 
53
+ def update(file : File) -> Tuple[gradio.Image, gradio.Video]:
54
+ clear_reference_faces()
55
+ clear_static_faces()
56
  if file and is_image(file.name):
57
  facefusion.globals.target_path = file.name
58
  return gradio.Image(value = file.name, visible = True), gradio.Video(value = None, visible = False)
facefusion/uis/components/temp_frame.py CHANGED
@@ -5,7 +5,7 @@ import facefusion.globals
5
  import facefusion.choices
6
  from facefusion import wording
7
  from facefusion.typing import TempFrameFormat
8
- from facefusion.utilities import is_video
9
  from facefusion.uis.core import get_ui_component
10
 
11
  TEMP_FRAME_FORMAT_DROPDOWN : Optional[gradio.Dropdown] = None
 
5
  import facefusion.choices
6
  from facefusion import wording
7
  from facefusion.typing import TempFrameFormat
8
+ from facefusion.filesystem import is_video
9
  from facefusion.uis.core import get_ui_component
10
 
11
  TEMP_FRAME_FORMAT_DROPDOWN : Optional[gradio.Dropdown] = None
facefusion/uis/components/trim_frame.py CHANGED
@@ -4,7 +4,7 @@ import gradio
4
  import facefusion.globals
5
  from facefusion import wording
6
  from facefusion.vision import count_video_frame_total
7
- from facefusion.utilities import is_video
8
  from facefusion.uis.core import get_ui_component
9
 
10
  TRIM_FRAME_START_SLIDER : Optional[gradio.Slider] = None
 
4
  import facefusion.globals
5
  from facefusion import wording
6
  from facefusion.vision import count_video_frame_total
7
+ from facefusion.filesystem import is_video
8
  from facefusion.uis.core import get_ui_component
9
 
10
  TRIM_FRAME_START_SLIDER : Optional[gradio.Slider] = None
facefusion/uis/components/webcam.py CHANGED
@@ -9,13 +9,13 @@ import gradio
9
  from tqdm import tqdm
10
 
11
  import facefusion.globals
12
- from facefusion import wording
13
  from facefusion.content_analyser import analyse_stream
14
  from facefusion.typing import Frame, Face
15
- from facefusion.face_analyser import get_one_face
16
  from facefusion.processors.frame.core import get_frame_processors_modules
17
- from facefusion.utilities import open_ffmpeg
18
- from facefusion.vision import normalize_frame_color, read_static_image
19
  from facefusion.uis.typing import StreamMode, WebcamMode
20
  from facefusion.uis.core import get_ui_component
21
 
@@ -79,30 +79,34 @@ def listen() -> None:
79
  getattr(source_image, method)(stop, cancels = start_event)
80
 
81
 
82
- def start(mode : WebcamMode, resolution : str, fps : float) -> Generator[Frame, None, None]:
83
  facefusion.globals.face_selector_mode = 'one'
84
  facefusion.globals.face_analyser_order = 'large-small'
85
- source_face = get_one_face(read_static_image(facefusion.globals.source_path))
 
86
  stream = None
87
- if mode in [ 'udp', 'v4l2' ]:
88
- stream = open_stream(mode, resolution, fps) # type: ignore[arg-type]
89
  webcam_width, webcam_height = map(int, resolution.split('x'))
90
  webcam_capture = get_webcam_capture()
91
  if webcam_capture and webcam_capture.isOpened():
92
- webcam_capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')) # type: ignore[attr-defined]
93
  webcam_capture.set(cv2.CAP_PROP_FRAME_WIDTH, webcam_width)
94
  webcam_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, webcam_height)
95
  webcam_capture.set(cv2.CAP_PROP_FPS, fps)
96
  for capture_frame in multi_process_capture(source_face, webcam_capture, fps):
97
- if mode == 'inline':
98
  yield normalize_frame_color(capture_frame)
99
  else:
100
- stream.stdin.write(capture_frame.tobytes())
 
 
 
101
  yield None
102
 
103
 
104
  def multi_process_capture(source_face : Face, webcam_capture : cv2.VideoCapture, fps : float) -> Generator[Frame, None, None]:
105
- with tqdm(desc = wording.get('processing'), unit = 'frame', ascii = ' =') as progress:
106
  with ThreadPoolExecutor(max_workers = facefusion.globals.execution_thread_count) as executor:
107
  futures = []
108
  deque_capture_frames : Deque[Frame] = deque()
@@ -137,11 +141,15 @@ def process_stream_frame(source_face : Face, temp_frame : Frame) -> Frame:
137
  return temp_frame
138
 
139
 
140
- def open_stream(mode : StreamMode, resolution : str, fps : float) -> subprocess.Popen[bytes]:
141
  commands = [ '-f', 'rawvideo', '-pix_fmt', 'bgr24', '-s', resolution, '-r', str(fps), '-i', '-' ]
142
- if mode == 'udp':
143
  commands.extend([ '-b:v', '2000k', '-f', 'mpegts', 'udp://localhost:27000?pkt_size=1316' ])
144
- if mode == 'v4l2':
145
- device_name = os.listdir('/sys/devices/virtual/video4linux')[0]
146
- commands.extend([ '-f', 'v4l2', '/dev/' + device_name ])
 
 
 
 
147
  return open_ffmpeg(commands)
 
9
  from tqdm import tqdm
10
 
11
  import facefusion.globals
12
+ from facefusion import logger, wording
13
  from facefusion.content_analyser import analyse_stream
14
  from facefusion.typing import Frame, Face
15
+ from facefusion.face_analyser import get_average_face
16
  from facefusion.processors.frame.core import get_frame_processors_modules
17
+ from facefusion.ffmpeg import open_ffmpeg
18
+ from facefusion.vision import normalize_frame_color, read_static_images
19
  from facefusion.uis.typing import StreamMode, WebcamMode
20
  from facefusion.uis.core import get_ui_component
21
 
 
79
  getattr(source_image, method)(stop, cancels = start_event)
80
 
81
 
82
+ def start(webcam_mode : WebcamMode, resolution : str, fps : float) -> Generator[Frame, None, None]:
83
  facefusion.globals.face_selector_mode = 'one'
84
  facefusion.globals.face_analyser_order = 'large-small'
85
+ source_frames = read_static_images(facefusion.globals.source_paths)
86
+ source_face = get_average_face(source_frames)
87
  stream = None
88
+ if webcam_mode in [ 'udp', 'v4l2' ]:
89
+ stream = open_stream(webcam_mode, resolution, fps) # type: ignore[arg-type]
90
  webcam_width, webcam_height = map(int, resolution.split('x'))
91
  webcam_capture = get_webcam_capture()
92
  if webcam_capture and webcam_capture.isOpened():
93
+ webcam_capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')) # type: ignore[attr-defined]
94
  webcam_capture.set(cv2.CAP_PROP_FRAME_WIDTH, webcam_width)
95
  webcam_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, webcam_height)
96
  webcam_capture.set(cv2.CAP_PROP_FPS, fps)
97
  for capture_frame in multi_process_capture(source_face, webcam_capture, fps):
98
+ if webcam_mode == 'inline':
99
  yield normalize_frame_color(capture_frame)
100
  else:
101
+ try:
102
+ stream.stdin.write(capture_frame.tobytes())
103
+ except Exception:
104
+ clear_webcam_capture()
105
  yield None
106
 
107
 
108
  def multi_process_capture(source_face : Face, webcam_capture : cv2.VideoCapture, fps : float) -> Generator[Frame, None, None]:
109
+ with tqdm(desc = wording.get('processing'), unit = 'frame', ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress:
110
  with ThreadPoolExecutor(max_workers = facefusion.globals.execution_thread_count) as executor:
111
  futures = []
112
  deque_capture_frames : Deque[Frame] = deque()
 
141
  return temp_frame
142
 
143
 
144
+ def open_stream(stream_mode : StreamMode, resolution : str, fps : float) -> subprocess.Popen[bytes]:
145
  commands = [ '-f', 'rawvideo', '-pix_fmt', 'bgr24', '-s', resolution, '-r', str(fps), '-i', '-' ]
146
+ if stream_mode == 'udp':
147
  commands.extend([ '-b:v', '2000k', '-f', 'mpegts', 'udp://localhost:27000?pkt_size=1316' ])
148
+ if stream_mode == 'v4l2':
149
+ try:
150
+ device_name = os.listdir('/sys/devices/virtual/video4linux')[0]
151
+ if device_name:
152
+ commands.extend([ '-f', 'v4l2', '/dev/' + device_name ])
153
+ except FileNotFoundError:
154
+ logger.error(wording.get('stream_not_loaded').format(stream_mode = stream_mode), __name__.upper())
155
  return open_ffmpeg(commands)
facefusion/uis/core.py CHANGED
@@ -5,9 +5,9 @@ import sys
5
  import gradio
6
 
7
  import facefusion.globals
8
- from facefusion import metadata, wording
9
  from facefusion.uis.typing import Component, ComponentName
10
- from facefusion.utilities import resolve_relative_path
11
 
12
  UI_COMPONENTS: Dict[ComponentName, Component] = {}
13
  UI_LAYOUT_MODULES : List[ModuleType] = []
@@ -27,7 +27,8 @@ def load_ui_layout_module(ui_layout : str) -> Any:
27
  for method_name in UI_LAYOUT_METHODS:
28
  if not hasattr(ui_layout_module, method_name):
29
  raise NotImplementedError
30
- except ModuleNotFoundError:
 
31
  sys.exit(wording.get('ui_layout_not_loaded').format(ui_layout = ui_layout))
32
  except NotImplementedError:
33
  sys.exit(wording.get('ui_layout_not_implemented').format(ui_layout = ui_layout))
 
5
  import gradio
6
 
7
  import facefusion.globals
8
+ from facefusion import metadata, logger, wording
9
  from facefusion.uis.typing import Component, ComponentName
10
+ from facefusion.filesystem import resolve_relative_path
11
 
12
  UI_COMPONENTS: Dict[ComponentName, Component] = {}
13
  UI_LAYOUT_MODULES : List[ModuleType] = []
 
27
  for method_name in UI_LAYOUT_METHODS:
28
  if not hasattr(ui_layout_module, method_name):
29
  raise NotImplementedError
30
+ except ModuleNotFoundError as exception:
31
+ logger.debug(exception.msg, __name__.upper())
32
  sys.exit(wording.get('ui_layout_not_loaded').format(ui_layout = ui_layout))
33
  except NotImplementedError:
34
  sys.exit(wording.get('ui_layout_not_implemented').format(ui_layout = ui_layout))
facefusion/uis/layouts/benchmark.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio
2
 
3
  import facefusion.globals
4
- from facefusion.utilities import conditional_download
5
  from facefusion.uis.components import about, frame_processors, frame_processors_options, execution, execution_thread_count, execution_queue_count, limit_resources, benchmark_options, benchmark
6
 
7
 
 
1
  import gradio
2
 
3
  import facefusion.globals
4
+ from facefusion.download import conditional_download
5
  from facefusion.uis.components import about, frame_processors, frame_processors_options, execution, execution_thread_count, execution_queue_count, limit_resources, benchmark_options, benchmark
6
 
7
 
facefusion/uis/layouts/default.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio
2
 
3
- from facefusion.uis.components import about, frame_processors, frame_processors_options, execution, execution_thread_count, execution_queue_count, limit_resources, temp_frame, output_options, common_options, source, target, output, preview, trim_frame, face_analyser, face_selector, face_mask
4
 
5
 
6
  def pre_check() -> bool:
@@ -47,7 +47,7 @@ def render() -> gradio.Blocks:
47
  with gradio.Blocks():
48
  face_selector.render()
49
  with gradio.Blocks():
50
- face_mask.render()
51
  with gradio.Blocks():
52
  face_analyser.render()
53
  return layout
@@ -69,7 +69,7 @@ def listen() -> None:
69
  preview.listen()
70
  trim_frame.listen()
71
  face_selector.listen()
72
- face_mask.listen()
73
  face_analyser.listen()
74
 
75
 
 
1
  import gradio
2
 
3
+ from facefusion.uis.components import about, frame_processors, frame_processors_options, execution, execution_thread_count, execution_queue_count, limit_resources, temp_frame, output_options, common_options, source, target, output, preview, trim_frame, face_analyser, face_selector, face_masker
4
 
5
 
6
  def pre_check() -> bool:
 
47
  with gradio.Blocks():
48
  face_selector.render()
49
  with gradio.Blocks():
50
+ face_masker.render()
51
  with gradio.Blocks():
52
  face_analyser.render()
53
  return layout
 
69
  preview.listen()
70
  trim_frame.listen()
71
  face_selector.listen()
72
+ face_masker.listen()
73
  face_analyser.listen()
74
 
75
 
facefusion/uis/typing.py CHANGED
@@ -1,6 +1,7 @@
1
- from typing import Literal
2
  import gradio
3
 
 
4
  Component = gradio.File or gradio.Image or gradio.Video or gradio.Slider
5
  ComponentName = Literal\
6
  [
@@ -17,11 +18,13 @@ ComponentName = Literal\
17
  'face_detector_model_dropdown',
18
  'face_detector_size_dropdown',
19
  'face_detector_score_slider',
 
20
  'face_mask_blur_slider',
21
  'face_mask_padding_top_slider',
22
  'face_mask_padding_bottom_slider',
23
  'face_mask_padding_left_slider',
24
  'face_mask_padding_right_slider',
 
25
  'frame_processors_checkbox_group',
26
  'face_swapper_model_dropdown',
27
  'face_enhancer_model_dropdown',
 
1
+ from typing import Literal, Any, IO
2
  import gradio
3
 
4
+ File = IO[Any]
5
  Component = gradio.File or gradio.Image or gradio.Video or gradio.Slider
6
  ComponentName = Literal\
7
  [
 
18
  'face_detector_model_dropdown',
19
  'face_detector_size_dropdown',
20
  'face_detector_score_slider',
21
+ 'face_mask_types_checkbox_group',
22
  'face_mask_blur_slider',
23
  'face_mask_padding_top_slider',
24
  'face_mask_padding_bottom_slider',
25
  'face_mask_padding_left_slider',
26
  'face_mask_padding_right_slider',
27
+ 'face_mask_region_checkbox_group',
28
  'frame_processors_checkbox_group',
29
  'face_swapper_model_dropdown',
30
  'face_enhancer_model_dropdown',