Upload 20 files
Browse files- .gitattributes +1 -0
- LivePortrait/README.md +234 -0
- LivePortrait/docs/inference.gif +0 -0
- LivePortrait/docs/showcase2.gif +3 -0
- LivePortrait/gitattributes +51 -0
- LivePortrait/gitignore +18 -0
- LivePortrait/gitkeep +0 -0
- LivePortrait/insightface/models/buffalo_l/2d106det.onnx +3 -0
- LivePortrait/insightface/models/buffalo_l/det_10g.onnx +3 -0
- LivePortrait/liveportrait/base_models/appearance_feature_extractor.pth +3 -0
- LivePortrait/liveportrait/base_models/motion_extractor.pth +3 -0
- LivePortrait/liveportrait/base_models/spade_generator.pth +3 -0
- LivePortrait/liveportrait/base_models/warping_module.pth +3 -0
- LivePortrait/liveportrait/landmark.onnx +3 -0
- LivePortrait/liveportrait/retargeting_models/stitching_retargeting_module.pth +3 -0
- LivePortrait/liveportrait_animals/base_models/appearance_feature_extractor.pth +3 -0
- LivePortrait/liveportrait_animals/base_models/motion_extractor.pth +3 -0
- LivePortrait/liveportrait_animals/base_models/spade_generator.pth +3 -0
- LivePortrait/liveportrait_animals/base_models/warping_module.pth +3 -0
- LivePortrait/liveportrait_animals/retargeting_models/stitching_retargeting_module.pth +3 -0
- LivePortrait/liveportrait_animals/xpose.pth +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
LivePortrait/docs/showcase2.gif filter=lfs diff=lfs merge=lfs -text
|
LivePortrait/README.md
ADDED
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: mit
|
3 |
+
library_name: liveportrait
|
4 |
+
---
|
5 |
+
|
6 |
+
<h1 align="center">LivePortrait: Efficient Portrait Animation with Stitching and Retargeting Control</h1>
|
7 |
+
|
8 |
+
<div align='center'>
|
9 |
+
<a href='https://github.com/cleardusk' target='_blank'><strong>Jianzhu Guo</strong></a><sup> 1†</sup> 
|
10 |
+
<a href='https://github.com/Mystery099' target='_blank'><strong>Dingyun Zhang</strong></a><sup> 1,2</sup> 
|
11 |
+
<a href='https://github.com/KwaiVGI' target='_blank'><strong>Xiaoqiang Liu</strong></a><sup> 1</sup> 
|
12 |
+
<a href='https://github.com/zzzweakman' target='_blank'><strong>Zhizhou Zhong</strong></a><sup> 1,3</sup> 
|
13 |
+
<a href='https://scholar.google.com.hk/citations?user=_8k1ubAAAAAJ' target='_blank'><strong>Yuan Zhang</strong></a><sup> 1</sup> 
|
14 |
+
</div>
|
15 |
+
|
16 |
+
<div align='center'>
|
17 |
+
<a href='https://scholar.google.com/citations?user=P6MraaYAAAAJ' target='_blank'><strong>Pengfei Wan</strong></a><sup> 1</sup> 
|
18 |
+
<a href='https://openreview.net/profile?id=~Di_ZHANG3' target='_blank'><strong>Di Zhang</strong></a><sup> 1</sup> 
|
19 |
+
</div>
|
20 |
+
|
21 |
+
<div align='center'>
|
22 |
+
<sup>1 </sup>Kuaishou Technology  <sup>2 </sup>University of Science and Technology of China  <sup>3 </sup>Fudan University 
|
23 |
+
</div>
|
24 |
+
<div align='center'>
|
25 |
+
<small><sup>†</sup> Corresponding author</small>
|
26 |
+
</div>
|
27 |
+
|
28 |
+
<div align="center" style="display: flex; justify-content: center; flex-wrap: wrap;">
|
29 |
+
<!-- <a href='LICENSE'><img src='https://img.shields.io/badge/license-MIT-yellow'></a> -->
|
30 |
+
<a href='https://arxiv.org/pdf/2407.03168'><img src='https://img.shields.io/badge/arXiv-LivePortrait-red'></a>
|
31 |
+
<a href='https://liveportrait.github.io'><img src='https://img.shields.io/badge/Project-LivePortrait-green'></a>
|
32 |
+
<a href='https://huggingface.co/spaces/KwaiVGI/liveportrait'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue'></a>
|
33 |
+
<a href="https://github.com/KwaiVGI/LivePortrait"><img src="https://img.shields.io/github/stars/KwaiVGI/LivePortrait"></a>
|
34 |
+
</div>
|
35 |
+
<br>
|
36 |
+
|
37 |
+
<p align="center">
|
38 |
+
<img src="./docs/showcase2.gif" alt="showcase">
|
39 |
+
🔥 For more results, visit our <a href="https://liveportrait.github.io/"><strong>homepage</strong></a> 🔥
|
40 |
+
</p>
|
41 |
+
|
42 |
+
|
43 |
+
|
44 |
+
## 🔥 Updates
|
45 |
+
- **`2024/08/02`**: 😸 We released a version of the **Animals model**, along with several other updates and improvements. Check out the details [**here**](https://github.com/KwaiVGI/LivePortrait/blob/main/assets/docs/changelog/2024-08-02.md)!
|
46 |
+
- **`2024/07/25`**: 📦 Windows users can now download the package from [HuggingFace](https://huggingface.co/cleardusk/LivePortrait-Windows/tree/main) or [BaiduYun](https://pan.baidu.com/s/1FWsWqKe0eNfXrwjEhhCqlw?pwd=86q2). Simply unzip and double-click `run_windows.bat` to enjoy!
|
47 |
+
- **`2024/07/24`**: 🎨 We support pose editing for source portraits in the Gradio interface. We’ve also lowered the default detection threshold to increase recall. [Have fun](https://github.com/KwaiVGI/LivePortrait/blob/main/assets/docs/changelog/2024-07-24.md)!
|
48 |
+
- **`2024/07/19`**: ✨ We support 🎞️ portrait video editing (aka v2v)! More to see [here](https://github.com/KwaiVGI/LivePortrait/blob/main/assets/docs/changelog/2024-07-19.md).
|
49 |
+
- **`2024/07/17`**: 🍎 We support macOS with Apple Silicon, modified from [jeethu](https://github.com/jeethu)'s PR [#143](https://github.com/KwaiVGI/LivePortrait/pull/143).
|
50 |
+
- **`2024/07/10`**: 💪 We support audio and video concatenating, driving video auto-cropping, and template making to protect privacy. More to see [here](https://github.com/KwaiVGI/LivePortrait/blob/main/assets/docs/changelog/2024-07-10.md).
|
51 |
+
- **`2024/07/09`**: 🤗 We released the [HuggingFace Space](https://huggingface.co/spaces/KwaiVGI/liveportrait), thanks to the HF team and [Gradio](https://github.com/gradio-app/gradio)!
|
52 |
+
- **`2024/07/04`**: 😊 We released the initial version of the inference code and models. Continuous updates, stay tuned!
|
53 |
+
- **`2024/07/04`**: 🔥 We released the [homepage](https://liveportrait.github.io) and technical report on [arXiv](https://arxiv.org/pdf/2407.03168).
|
54 |
+
|
55 |
+
|
56 |
+
## Introduction 📖
|
57 |
+
This repo, named **LivePortrait**, contains the official PyTorch implementation of our paper [LivePortrait: Efficient Portrait Animation with Stitching and Retargeting Control](https://arxiv.org/pdf/2407.03168).
|
58 |
+
We are actively updating and improving this repository. If you find any bugs or have suggestions, welcome to raise issues or submit pull requests (PR) 💖.
|
59 |
+
|
60 |
+
## Getting Started 🏁
|
61 |
+
### 1. Clone the code and prepare the environment
|
62 |
+
```bash
|
63 |
+
git clone https://github.com/KwaiVGI/LivePortrait
|
64 |
+
cd LivePortrait
|
65 |
+
|
66 |
+
# create env using conda
|
67 |
+
conda create -n LivePortrait python==3.9
|
68 |
+
conda activate LivePortrait
|
69 |
+
|
70 |
+
# install dependencies with pip
|
71 |
+
# for Linux and Windows users
|
72 |
+
pip install -r requirements.txt
|
73 |
+
# for macOS with Apple Silicon users
|
74 |
+
pip install -r requirements_macOS.txt
|
75 |
+
```
|
76 |
+
|
77 |
+
**Note:** make sure your system has [FFmpeg](https://ffmpeg.org/download.html) installed, including both `ffmpeg` and `ffprobe`!
|
78 |
+
|
79 |
+
### 2. Download pretrained weights
|
80 |
+
|
81 |
+
The easiest way to download the pretrained weights is from HuggingFace:
|
82 |
+
```bash
|
83 |
+
# first, ensure git-lfs is installed, see: https://docs.github.com/en/repositories/working-with-files/managing-large-files/installing-git-large-file-storage
|
84 |
+
git lfs install
|
85 |
+
# clone and move the weights
|
86 |
+
git clone https://huggingface.co/KwaiVGI/LivePortrait temp_pretrained_weights
|
87 |
+
mv temp_pretrained_weights/* pretrained_weights/
|
88 |
+
rm -rf temp_pretrained_weights
|
89 |
+
```
|
90 |
+
|
91 |
+
Alternatively, you can download all pretrained weights from [Google Drive](https://drive.google.com/drive/folders/1UtKgzKjFAOmZkhNK-OYT0caJ_w2XAnib) or [Baidu Yun](https://pan.baidu.com/s/1MGctWmNla_vZxDbEp2Dtzw?pwd=z5cn). Unzip and place them in `./pretrained_weights`.
|
92 |
+
|
93 |
+
Ensuring the directory structure is as follows, or contains:
|
94 |
+
```text
|
95 |
+
pretrained_weights
|
96 |
+
├── insightface
|
97 |
+
│ └── models
|
98 |
+
│ └── buffalo_l
|
99 |
+
│ ├── 2d106det.onnx
|
100 |
+
│ └── det_10g.onnx
|
101 |
+
└── liveportrait
|
102 |
+
├── base_models
|
103 |
+
│ ├── appearance_feature_extractor.pth
|
104 |
+
│ ├── motion_extractor.pth
|
105 |
+
│ ├── spade_generator.pth
|
106 |
+
│ └── warping_module.pth
|
107 |
+
├── landmark.onnx
|
108 |
+
└── retargeting_models
|
109 |
+
└── stitching_retargeting_module.pth
|
110 |
+
```
|
111 |
+
|
112 |
+
### 3. Inference 🚀
|
113 |
+
|
114 |
+
#### Fast hands-on
|
115 |
+
```bash
|
116 |
+
# For Linux and Windows
|
117 |
+
python inference.py
|
118 |
+
|
119 |
+
# For macOS with Apple Silicon, Intel not supported, this maybe 20x slower than RTX 4090
|
120 |
+
PYTORCH_ENABLE_MPS_FALLBACK=1 python inference.py
|
121 |
+
```
|
122 |
+
|
123 |
+
If the script runs successfully, you will get an output mp4 file named `animations/s6--d0_concat.mp4`. This file includes the following results: driving video, input image or video, and generated result.
|
124 |
+
|
125 |
+
<p align="center">
|
126 |
+
<img src="./docs/inference.gif" alt="image">
|
127 |
+
</p>
|
128 |
+
|
129 |
+
Or, you can change the input by specifying the `-s` and `-d` arguments:
|
130 |
+
|
131 |
+
```bash
|
132 |
+
# source input is an image
|
133 |
+
python inference.py -s assets/examples/source/s9.jpg -d assets/examples/driving/d0.mp4
|
134 |
+
|
135 |
+
# source input is a video ✨
|
136 |
+
python inference.py -s assets/examples/source/s13.mp4 -d assets/examples/driving/d0.mp4
|
137 |
+
|
138 |
+
# more options to see
|
139 |
+
python inference.py -h
|
140 |
+
```
|
141 |
+
|
142 |
+
#### Driving video auto-cropping 📢📢📢
|
143 |
+
To use your own driving video, we **recommend**: ⬇️
|
144 |
+
- Crop it to a **1:1** aspect ratio (e.g., 512x512 or 256x256 pixels), or enable auto-cropping by `--flag_crop_driving_video`.
|
145 |
+
- Focus on the head area, similar to the example videos.
|
146 |
+
- Minimize shoulder movement.
|
147 |
+
- Make sure the first frame of driving video is a frontal face with **neutral expression**.
|
148 |
+
|
149 |
+
Below is a auto-cropping case by `--flag_crop_driving_video`:
|
150 |
+
```bash
|
151 |
+
python inference.py -s assets/examples/source/s9.jpg -d assets/examples/driving/d13.mp4 --flag_crop_driving_video
|
152 |
+
```
|
153 |
+
|
154 |
+
If you find the results of auto-cropping is not well, you can modify the `--scale_crop_driving_video`, `--vy_ratio_crop_driving_video` options to adjust the scale and offset, or do it manually.
|
155 |
+
|
156 |
+
#### Motion template making
|
157 |
+
You can also use the auto-generated motion template files ending with `.pkl` to speed up inference, and **protect privacy**, such as:
|
158 |
+
```bash
|
159 |
+
python inference.py -s assets/examples/source/s9.jpg -d assets/examples/driving/d5.pkl # portrait animation
|
160 |
+
python inference.py -s assets/examples/source/s13.mp4 -d assets/examples/driving/d5.pkl # portrait video editing
|
161 |
+
```
|
162 |
+
|
163 |
+
### 4. Gradio interface 🤗
|
164 |
+
|
165 |
+
We also provide a Gradio <a href='https://github.com/gradio-app/gradio'><img src='https://img.shields.io/github/stars/gradio-app/gradio'></a> interface for a better experience, just run by:
|
166 |
+
|
167 |
+
```bash
|
168 |
+
# For Linux and Windows users (and macOS with Intel??)
|
169 |
+
python app.py
|
170 |
+
|
171 |
+
# For macOS with Apple Silicon users, Intel not supported, this maybe 20x slower than RTX 4090
|
172 |
+
PYTORCH_ENABLE_MPS_FALLBACK=1 python app.py
|
173 |
+
```
|
174 |
+
|
175 |
+
You can specify the `--server_port`, `--share`, `--server_name` arguments to satisfy your needs!
|
176 |
+
|
177 |
+
🚀 We also provide an acceleration option `--flag_do_torch_compile`. The first-time inference triggers an optimization process (about one minute), making subsequent inferences 20-30% faster. Performance gains may vary with different CUDA versions.
|
178 |
+
```bash
|
179 |
+
# enable torch.compile for faster inference
|
180 |
+
python app.py --flag_do_torch_compile
|
181 |
+
```
|
182 |
+
**Note**: This method is not supported on Windows and macOS.
|
183 |
+
|
184 |
+
**Or, try it out effortlessly on [HuggingFace](https://huggingface.co/spaces/KwaiVGI/LivePortrait) 🤗**
|
185 |
+
|
186 |
+
### 5. Inference speed evaluation 🚀🚀🚀
|
187 |
+
We have also provided a script to evaluate the inference speed of each module:
|
188 |
+
|
189 |
+
```bash
|
190 |
+
# For NVIDIA GPU
|
191 |
+
python speed.py
|
192 |
+
```
|
193 |
+
|
194 |
+
Below are the results of inferring one frame on an RTX 4090 GPU using the native PyTorch framework with `torch.compile`:
|
195 |
+
|
196 |
+
| Model | Parameters(M) | Model Size(MB) | Inference(ms) |
|
197 |
+
|-----------------------------------|:-------------:|:--------------:|:-------------:|
|
198 |
+
| Appearance Feature Extractor | 0.84 | 3.3 | 0.82 |
|
199 |
+
| Motion Extractor | 28.12 | 108 | 0.84 |
|
200 |
+
| Spade Generator | 55.37 | 212 | 7.59 |
|
201 |
+
| Warping Module | 45.53 | 174 | 5.21 |
|
202 |
+
| Stitching and Retargeting Modules | 0.23 | 2.3 | 0.31 |
|
203 |
+
|
204 |
+
*Note: The values for the Stitching and Retargeting Modules represent the combined parameter counts and total inference time of three sequential MLP networks.*
|
205 |
+
|
206 |
+
## Community Resources 🤗
|
207 |
+
|
208 |
+
Discover the invaluable resources contributed by our community to enhance your LivePortrait experience:
|
209 |
+
|
210 |
+
- [ComfyUI-LivePortraitKJ](https://github.com/kijai/ComfyUI-LivePortraitKJ) by [@kijai](https://github.com/kijai)
|
211 |
+
- [comfyui-liveportrait](https://github.com/shadowcz007/comfyui-liveportrait) by [@shadowcz007](https://github.com/shadowcz007)
|
212 |
+
- [LivePortrait In ComfyUI](https://www.youtube.com/watch?v=aFcS31OWMjE) by [@Benji](https://www.youtube.com/@TheFutureThinker)
|
213 |
+
- [LivePortrait hands-on tutorial](https://www.youtube.com/watch?v=uyjSTAOY7yI) by [@AI Search](https://www.youtube.com/@theAIsearch)
|
214 |
+
- [ComfyUI tutorial](https://www.youtube.com/watch?v=8-IcDDmiUMM) by [@Sebastian Kamph](https://www.youtube.com/@sebastiankamph)
|
215 |
+
- [Replicate Playground](https://replicate.com/fofr/live-portrait) and [cog-comfyui](https://github.com/fofr/cog-comfyui) by [@fofr](https://github.com/fofr)
|
216 |
+
|
217 |
+
And many more amazing contributions from our community!
|
218 |
+
|
219 |
+
## Acknowledgements 💐
|
220 |
+
We would like to thank the contributors of [FOMM](https://github.com/AliaksandrSiarohin/first-order-model), [Open Facevid2vid](https://github.com/zhanglonghao1992/One-Shot_Free-View_Neural_Talking_Head_Synthesis), [SPADE](https://github.com/NVlabs/SPADE), [InsightFace](https://github.com/deepinsight/insightface) repositories, for their open research and contributions.
|
221 |
+
|
222 |
+
## Citation 💖
|
223 |
+
If you find LivePortrait useful for your research, welcome to 🌟 this repo and cite our work using the following BibTeX:
|
224 |
+
```bibtex
|
225 |
+
@article{guo2024liveportrait,
|
226 |
+
title = {LivePortrait: Efficient Portrait Animation with Stitching and Retargeting Control},
|
227 |
+
author = {Guo, Jianzhu and Zhang, Dingyun and Liu, Xiaoqiang and Zhong, Zhizhou and Zhang, Yuan and Wan, Pengfei and Zhang, Di},
|
228 |
+
journal = {arXiv preprint arXiv:2407.03168},
|
229 |
+
year = {2024}
|
230 |
+
}
|
231 |
+
```
|
232 |
+
|
233 |
+
## Contact 📧
|
234 |
+
[**Jianzhu Guo (郭建珠)**](https://guojianzhu.com); **guojianzhu1994@gmail.com**
|
LivePortrait/docs/inference.gif
ADDED
LivePortrait/docs/showcase2.gif
ADDED
Git LFS Details
|
LivePortrait/gitattributes
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
liveportrait/retargeting_models/stitching_retargeting_module.pth filter=lfs diff=lfs merge=lfs -text
|
37 |
+
liveportrait/base_models/appearance_feature_extractor.pth filter=lfs diff=lfs merge=lfs -text
|
38 |
+
liveportrait/base_models/motion_extractor.pth filter=lfs diff=lfs merge=lfs -text
|
39 |
+
liveportrait/base_models/spade_generator.pth filter=lfs diff=lfs merge=lfs -text
|
40 |
+
liveportrait/base_models/warping_module.pth filter=lfs diff=lfs merge=lfs -text
|
41 |
+
insightface/models/buffalo_l/2d106det.onnx filter=lfs diff=lfs merge=lfs -text
|
42 |
+
insightface/models/buffalo_l/det_10g.onnx filter=lfs diff=lfs merge=lfs -text
|
43 |
+
liveportrait/landmark.onnx filter=lfs diff=lfs merge=lfs -text
|
44 |
+
docs/inference.gif filter=lfs diff=lfs merge=lfs -text
|
45 |
+
docs/showcase2.gif filter=lfs diff=lfs merge=lfs -text
|
46 |
+
liveportrait_animals/base_models/motion_extractor.pth filter=lfs diff=lfs merge=lfs -text
|
47 |
+
liveportrait_animals/base_models/spade_generator.pth filter=lfs diff=lfs merge=lfs -text
|
48 |
+
liveportrait_animals/base_models/warping_module.pth filter=lfs diff=lfs merge=lfs -text
|
49 |
+
liveportrait_animals/retargeting_models/stitching_retargeting_module.pth filter=lfs diff=lfs merge=lfs -text
|
50 |
+
liveportrait_animals/xpose.pth filter=lfs diff=lfs merge=lfs -text
|
51 |
+
liveportrait_animals/base_models/appearance_feature_extractor.pth filter=lfs diff=lfs merge=lfs -text
|
LivePortrait/gitignore
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
**/__pycache__/
|
4 |
+
*.py[cod]
|
5 |
+
**/*.py[cod]
|
6 |
+
*$py.class
|
7 |
+
|
8 |
+
# Model weights
|
9 |
+
#**/*.pth
|
10 |
+
#**/*.onnx
|
11 |
+
|
12 |
+
# Ipython notebook
|
13 |
+
*.ipynb
|
14 |
+
|
15 |
+
# Temporary files or benchmark resources
|
16 |
+
animations/*
|
17 |
+
tmp/*
|
18 |
+
gradio_cached_examples/
|
LivePortrait/gitkeep
ADDED
File without changes
|
LivePortrait/insightface/models/buffalo_l/2d106det.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f001b856447c413801ef5c42091ed0cd516fcd21f2d6b79635b1e733a7109dbf
|
3 |
+
size 5030888
|
LivePortrait/insightface/models/buffalo_l/det_10g.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5838f7fe053675b1c7a08b633df49e7af5495cee0493c7dcf6697200b85b5b91
|
3 |
+
size 16923827
|
LivePortrait/liveportrait/base_models/appearance_feature_extractor.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5279bb8654293dbdf327030b397f107237dd9212fb11dd75b83dfb635211ceb5
|
3 |
+
size 3387959
|
LivePortrait/liveportrait/base_models/motion_extractor.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:251e6a94ad667a1d0c69526d292677165110ef7f0cf0f6d199f0e414e8aa0ca5
|
3 |
+
size 112545506
|
LivePortrait/liveportrait/base_models/spade_generator.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4780afc7909a9f84e24c01d73b31a555ef651521a1fe3b2429bd04534d992aee
|
3 |
+
size 221813590
|
LivePortrait/liveportrait/base_models/warping_module.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2f61a6f265fe344f14132364859a78bdbbc2068577170693da57fb96d636e282
|
3 |
+
size 182180086
|
LivePortrait/liveportrait/landmark.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:31d22a5041326c31f19b78886939a634a5aedcaa5ab8b9b951a1167595d147db
|
3 |
+
size 114666491
|
LivePortrait/liveportrait/retargeting_models/stitching_retargeting_module.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3652d5a3f95099141a56986aaddec92fadf0a73c87a20fac9a2c07c32b28b611
|
3 |
+
size 2393098
|
LivePortrait/liveportrait_animals/base_models/appearance_feature_extractor.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e2cd1d5d67c0457229e9736d401d39225e096895b869f34234978082561af6de
|
3 |
+
size 3387959
|
LivePortrait/liveportrait_animals/base_models/motion_extractor.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:63c0d450099ef6ebece788ab711cb012509712e23fd1200b79fb65ef980adbb9
|
3 |
+
size 112545506
|
LivePortrait/liveportrait_animals/base_models/spade_generator.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7fafa1e31c7c72c9384310d679e32af3fbf214e241fb657df8c3b18ad826f336
|
3 |
+
size 221813590
|
LivePortrait/liveportrait_animals/base_models/warping_module.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c9719ea184ca9da059f4eee8a8c8c7c6bd46a2b1e40a241ea5490cc42ce6b79b
|
3 |
+
size 182180086
|
LivePortrait/liveportrait_animals/retargeting_models/stitching_retargeting_module.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3652d5a3f95099141a56986aaddec92fadf0a73c87a20fac9a2c07c32b28b611
|
3 |
+
size 2393098
|
LivePortrait/liveportrait_animals/xpose.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bf58e5a3c4a3a017198edc69e33f89c9a37adc856fe6b1776059b2d4a524a7dd
|
3 |
+
size 435089171
|