Spaces:
Runtime error
Runtime error
Fully specified Python version
#2
by
chris-rannou
HF staff
- opened
This view is limited to 50 files because it contains too many changes.
See the raw diff here.
- .gitattributes +0 -6
- .gitignore +2 -3
- Dockerfile +0 -107
- README.md +7 -37
- app.py +0 -148
- apps/ICON.py +6 -9
- apps/__pycache__/app.cpython-38.pyc +0 -0
- apps/app.py +21 -0
- apps/infer.py +268 -144
- assets/garment_teaser.png +0 -0
- assets/intermediate_results.png +0 -0
- assets/teaser.gif +0 -0
- assets/thumbnail.png +0 -3
- configs/icon-filter.yaml +2 -2
- configs/icon-nofilter.yaml +2 -2
- configs/pamir.yaml +2 -2
- configs/pifu.yaml +2 -2
- environment.yaml +16 -0
- examples/22097467bffc92d4a5c4246f7d4edb75.png +0 -0
- examples/44c0f84c957b6b9bdf77662af5bb7078.png +0 -0
- examples/5a6a25963db2f667441d5076972c207c.png +0 -0
- examples/8da7ceb94669c2f65cbd28022e1f9876.png +0 -0
- examples/923d65f767c85a42212cae13fba3750b.png +0 -0
- examples/959c4c726a69901ce71b93a9242ed900.png +0 -0
- examples/c9856a2bc31846d684cbb965457fad59.png +0 -0
- examples/e1e7622af7074a022f5d96dc16672517.png +0 -0
- examples/fb9d20fdb93750584390599478ecf86e.png +0 -0
- examples/segmentation/003883.jpg +0 -0
- examples/segmentation/003883.json +136 -0
- examples/segmentation/028009.jpg +0 -0
- examples/segmentation/028009.json +191 -0
- examples/slack_trial2-000150.png +0 -0
- fetch_data.sh +60 -0
- install.sh +16 -0
- lib/common/render.py +4 -9
- lib/common/train_util.py +0 -2
- lib/dataloader_demo.py +58 -0
- lib/dataset/Evaluator.py +1 -1
- lib/dataset/PIFuDataset.py +80 -7
- lib/dataset/TestDataset.py +106 -18
- lib/dataset/mesh_util.py +70 -67
- lib/net/FBNet.py +1 -2
- lib/net/HGPIFuNet.py +8 -1
- lib/net/net_util.py +1 -1
- lib/pymaf/core/path_config.py +27 -12
- lib/pymaf/models/maf_extractor.py +5 -2
- lib/pymaf/models/res_module.py +1 -1
- lib/pymaf/models/smpl.py +3 -3
- lib/pymaf/utils/imutils.py +26 -20
- lib/renderer/mesh.py +1 -1
.gitattributes
CHANGED
@@ -29,9 +29,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
29 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
30 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
31 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.obj filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*.glb filter=lfs diff=lfs merge=lfs -text
|
36 |
-
*.png filter=lfs diff=lfs merge=lfs -text
|
37 |
-
*.gif filter=lfs diff=lfs merge=lfs -text
|
|
|
29 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
30 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
31 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
.gitignore
CHANGED
@@ -4,15 +4,14 @@ data/thuman*
|
|
4 |
__pycache__
|
5 |
debug/
|
6 |
log/
|
|
|
7 |
.vscode
|
8 |
!.gitignore
|
9 |
force_push.sh
|
10 |
.idea
|
|
|
11 |
human_det/
|
12 |
kaolin/
|
13 |
neural_voxelization_layer/
|
14 |
pytorch3d/
|
15 |
force_push.sh
|
16 |
-
results/
|
17 |
-
gradio_cached_examples/
|
18 |
-
gradio_queue.db
|
|
|
4 |
__pycache__
|
5 |
debug/
|
6 |
log/
|
7 |
+
results/*
|
8 |
.vscode
|
9 |
!.gitignore
|
10 |
force_push.sh
|
11 |
.idea
|
12 |
+
smplx/
|
13 |
human_det/
|
14 |
kaolin/
|
15 |
neural_voxelization_layer/
|
16 |
pytorch3d/
|
17 |
force_push.sh
|
|
|
|
|
|
Dockerfile
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04
|
2 |
-
|
3 |
-
ARG DEBIAN_FRONTEND=noninteractive
|
4 |
-
|
5 |
-
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
|
6 |
-
curl \
|
7 |
-
git \
|
8 |
-
wget \
|
9 |
-
freeglut3-dev \
|
10 |
-
unzip \
|
11 |
-
ffmpeg \
|
12 |
-
libsm6 \
|
13 |
-
libxext6 \
|
14 |
-
libgomp1 \
|
15 |
-
libfontconfig1 \
|
16 |
-
libgl1-mesa-glx \
|
17 |
-
libgl1-mesa-dev \
|
18 |
-
libglfw3 \
|
19 |
-
libglfw3-dev \
|
20 |
-
libglew2.1 \
|
21 |
-
libglew-dev \
|
22 |
-
mesa-utils \
|
23 |
-
libc6 \
|
24 |
-
libxdamage1 \
|
25 |
-
libxfixes3 \
|
26 |
-
libxcb-glx0 \
|
27 |
-
libxcb-dri2-0 \
|
28 |
-
libxcb-dri3-0 \
|
29 |
-
libxcb-present0 \
|
30 |
-
libxcb-sync1 \
|
31 |
-
libxshmfence1 \
|
32 |
-
libxxf86vm1 \
|
33 |
-
libxrender1 \
|
34 |
-
libgbm1 \
|
35 |
-
build-essential \
|
36 |
-
libeigen3-dev \
|
37 |
-
python3.8 \
|
38 |
-
python3-pip \
|
39 |
-
python-is-python3 \
|
40 |
-
nvidia-cuda-toolkit \
|
41 |
-
&& rm -rf /var/lib/apt/lists/*
|
42 |
-
|
43 |
-
|
44 |
-
# Set up a new user named "user" with user ID 1000
|
45 |
-
RUN useradd -m -u 1000 user
|
46 |
-
|
47 |
-
# Switch to the "user" user
|
48 |
-
USER user
|
49 |
-
|
50 |
-
FROM python:3.8
|
51 |
-
|
52 |
-
ENV PYTHONUNBUFFERED=1
|
53 |
-
|
54 |
-
# ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6"
|
55 |
-
# ENV TCNN_CUDA_ARCHITECTURES=86;80;75;70;61;60
|
56 |
-
ENV FORCE_CUDA=1
|
57 |
-
|
58 |
-
# Set the environment variable to specify the GPU device
|
59 |
-
ENV CUDA_HOME=/usr/local/cuda
|
60 |
-
# ENV CUDA_DEVICE_ORDER=PCI_BUS_ID
|
61 |
-
# ENV CUDA_VISIBLE_DEVICES=0
|
62 |
-
|
63 |
-
ENV PATH=${CUDA_HOME}/bin:/home/${USER_NAME}/.local/bin:/usr/bin:${PATH}
|
64 |
-
ENV LD_LIBRARY_PATH=${CUDA_HOME}/lib64:/usr/lib:/usr/lib64:${LD_LIBRARY_PATH}
|
65 |
-
|
66 |
-
# Set home to the user's home directory
|
67 |
-
ENV HOME=/home/user \
|
68 |
-
PATH=/home/user/.local/bin:$PATH \
|
69 |
-
PYTHONPATH=$HOME/app:$PYTHONPATH \
|
70 |
-
PYTHONUNBUFFERED=1 \
|
71 |
-
GRADIO_ALLOW_FLAGGING=never \
|
72 |
-
GRADIO_NUM_PORTS=1 \
|
73 |
-
GRADIO_SERVER_NAME=0.0.0.0 \
|
74 |
-
GRADIO_THEME=huggingface \
|
75 |
-
SYSTEM=spaces
|
76 |
-
|
77 |
-
RUN pip install --upgrade pip ninja
|
78 |
-
RUN pip install setuptools==69.5.1
|
79 |
-
RUN pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
|
80 |
-
|
81 |
-
RUN python -c "import torch, os; print(torch.version.cuda); print(os.environ.get('CUDA_HOME'))"
|
82 |
-
COPY requirements.txt /tmp
|
83 |
-
RUN cd /tmp && pip install -r requirements.txt
|
84 |
-
|
85 |
-
RUN pip install https://download.is.tue.mpg.de/icon/HF/kaolin-0.11.0-cp38-cp38-linux_x86_64.whl
|
86 |
-
RUN pip install https://download.is.tue.mpg.de/icon/HF/pytorch3d-0.7.0-cp38-cp38-linux_x86_64.whl
|
87 |
-
RUN pip install pyembree
|
88 |
-
|
89 |
-
ENV TRANSFORMERS_CACHE=/tmp
|
90 |
-
ENV MPLCONFIGDIR=/tmp
|
91 |
-
|
92 |
-
# cannot cache function '_make_tree': no locator available for file '/usr/local/lib/python3.8/site-packages/pymatting/util/kdtree.py'
|
93 |
-
ENV NUMBA_CACHE_DIR=/tmp/numba_cache
|
94 |
-
ENV HF_HOME=$HOME/.cache/huggingface
|
95 |
-
RUN mkdir -p $HF_HOME
|
96 |
-
|
97 |
-
RUN chmod 777 -R $HOME
|
98 |
-
|
99 |
-
# Copy the current directory contents into the container at $HOME/app setting the owner to the user
|
100 |
-
COPY --chown=user . $HOME/app
|
101 |
-
|
102 |
-
# Set the working directory to the user's home directory
|
103 |
-
WORKDIR $HOME/app
|
104 |
-
|
105 |
-
ENV DISPLAY=:0
|
106 |
-
|
107 |
-
CMD ["python", "app.py"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
CHANGED
@@ -1,42 +1,12 @@
|
|
1 |
---
|
2 |
-
title: ICON
|
3 |
-
metaTitle:
|
4 |
emoji: 🤼
|
5 |
colorFrom: indigo
|
6 |
colorTo: yellow
|
7 |
-
sdk:
|
|
|
|
|
8 |
pinned: true
|
9 |
-
|
10 |
-
|
11 |
-
# ICON Clothed Human Digitization
|
12 |
-
### ICON: Implicit Clothed humans Obtained from Normals (CVPR 2022)
|
13 |
-
|
14 |
-
<table>
|
15 |
-
<th>
|
16 |
-
<ul>
|
17 |
-
<li><strong>Homepage</strong> <a href="http://icon.is.tue.mpg.de">icon.is.tue.mpg.de</a></li>
|
18 |
-
<li><strong>Code</strong> <a href="https://github.com/YuliangXiu/ICON">YuliangXiu/ICON</a></li>
|
19 |
-
<li><strong>Paper</strong> <a href="https://arxiv.org/abs/2112.09127">arXiv</a>, <a href="https://readpaper.com/paper/4569785684533977089">ReadPaper</a></li>
|
20 |
-
<li><strong>Chatroom</strong> <a href="https://discord.gg/Vqa7KBGRyk">Discord</a></li>
|
21 |
-
<li><strong>Colab Notebook</strong> <a href="https://colab.research.google.com/drive/1-AWeWhPvCTBX0KfMtgtMk10uPU05ihoA?usp=sharing">Google Colab</a></li>
|
22 |
-
</ul>
|
23 |
-
<a href="https://twitter.com/yuliangxiu"><img alt="Twitter Follow" src="https://img.shields.io/twitter/follow/yuliangxiu?style=social"></a>
|
24 |
-
<iframe src="https://ghbtns.com/github-btn.html?user=yuliangxiu&repo=ICON&type=star&count=true&v=2&size=small" frameborder="0" scrolling="0" width="100" height="20"></iframe>
|
25 |
-
<a href="https://youtu.be/hZd6AYin2DE"><img alt="YouTube Video Views" src="https://img.shields.io/youtube/views/hZd6AYin2DE?style=social"></a>
|
26 |
-
</th>
|
27 |
-
<th>
|
28 |
-
<iframe width="560" height="315" src="https://www.youtube.com/embed/hZd6AYin2DE" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
|
29 |
-
</th>
|
30 |
-
</table>
|
31 |
-
|
32 |
-
#### Citation
|
33 |
-
```
|
34 |
-
@inproceedings{xiu2022icon,
|
35 |
-
title = {{ICON}: {I}mplicit {C}lothed humans {O}btained from {N}ormals},
|
36 |
-
author = {Xiu, Yuliang and Yang, Jinlong and Tzionas, Dimitrios and Black, Michael J.},
|
37 |
-
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
|
38 |
-
month = {June},
|
39 |
-
year = {2022},
|
40 |
-
pages = {13296-13306}
|
41 |
-
}
|
42 |
-
```
|
|
|
1 |
---
|
2 |
+
title: ICON
|
3 |
+
metaTitle: "Image2Human by Yuliang Xiu"
|
4 |
emoji: 🤼
|
5 |
colorFrom: indigo
|
6 |
colorTo: yellow
|
7 |
+
sdk: gradio
|
8 |
+
sdk_version: 3.1.1
|
9 |
+
app_file: ./apps/app.py
|
10 |
pinned: true
|
11 |
+
python_version: 3.8.13
|
12 |
+
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
DELETED
@@ -1,148 +0,0 @@
|
|
1 |
-
# install
|
2 |
-
|
3 |
-
|
4 |
-
import glob
|
5 |
-
import gradio as gr
|
6 |
-
import numpy as np
|
7 |
-
import os
|
8 |
-
import subprocess
|
9 |
-
|
10 |
-
from apps.infer import generate_model
|
11 |
-
|
12 |
-
# running
|
13 |
-
|
14 |
-
description = '''
|
15 |
-
# ICON Clothed Human Digitization
|
16 |
-
### ICON: Implicit Clothed humans Obtained from Normals (CVPR 2022)
|
17 |
-
|
18 |
-
<table>
|
19 |
-
<th>
|
20 |
-
<ul>
|
21 |
-
<li><strong>Homepage</strong> <a href="http://icon.is.tue.mpg.de">icon.is.tue.mpg.de</a></li>
|
22 |
-
<li><strong>Code</strong> <a href="https://github.com/YuliangXiu/ICON">YuliangXiu/ICON</a></li>
|
23 |
-
<li><strong>Paper</strong> <a href="https://arxiv.org/abs/2112.09127">arXiv</a>, <a href="https://readpaper.com/paper/4569785684533977089">ReadPaper</a></li>
|
24 |
-
<li><strong>Chatroom</strong> <a href="https://discord.gg/Vqa7KBGRyk">Discord</a></li>
|
25 |
-
<li><strong>Colab Notebook</strong> <a href="https://colab.research.google.com/drive/1-AWeWhPvCTBX0KfMtgtMk10uPU05ihoA?usp=sharing">Google Colab</a></li>
|
26 |
-
</ul>
|
27 |
-
<a href="https://twitter.com/yuliangxiu"><img alt="Twitter Follow" src="https://img.shields.io/twitter/follow/yuliangxiu?style=social"></a>
|
28 |
-
<iframe src="https://ghbtns.com/github-btn.html?user=yuliangxiu&repo=ICON&type=star&count=true&v=2&size=small" frameborder="0" scrolling="0" width="100" height="20"></iframe>
|
29 |
-
<a href="https://youtu.be/hZd6AYin2DE"><img alt="YouTube Video Views" src="https://img.shields.io/youtube/views/hZd6AYin2DE?style=social"></a>
|
30 |
-
</th>
|
31 |
-
<th>
|
32 |
-
<iframe width="560" height="315" src="https://www.youtube.com/embed/hZd6AYin2DE" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
|
33 |
-
</th>
|
34 |
-
</table>
|
35 |
-
|
36 |
-
<center>
|
37 |
-
<a href="https://huggingface.co/spaces/Yuliang/ICON?duplicate=true"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-lg-dark.svg"/></a>
|
38 |
-
<h2> The reconstruction + refinement + video takes about 200 seconds for a single image. </h2>
|
39 |
-
<h2><span style="color:red"> ICON is only suitable for humanoid images and will not work well on cartoons with non-human shapes.</span></h2>
|
40 |
-
</center>
|
41 |
-
|
42 |
-
<details>
|
43 |
-
|
44 |
-
<summary>More</summary>
|
45 |
-
|
46 |
-
#### Citation
|
47 |
-
```
|
48 |
-
@inproceedings{xiu2022icon,
|
49 |
-
title = {{ICON}: {I}mplicit {C}lothed humans {O}btained from {N}ormals},
|
50 |
-
author = {Xiu, Yuliang and Yang, Jinlong and Tzionas, Dimitrios and Black, Michael J.},
|
51 |
-
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
|
52 |
-
month = {June},
|
53 |
-
year = {2022},
|
54 |
-
pages = {13296-13306}
|
55 |
-
}
|
56 |
-
```
|
57 |
-
|
58 |
-
#### Acknowledgments:
|
59 |
-
|
60 |
-
- [StyleGAN-Human, ECCV 2022](https://stylegan-human.github.io/)
|
61 |
-
- [nagolinc/styleGanHuman_and_PIFu](https://huggingface.co/spaces/nagolinc/styleGanHuman_and_PIFu)
|
62 |
-
- [radames/PIFu-Clothed-Human-Digitization](https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization)
|
63 |
-
|
64 |
-
#### Image Credits
|
65 |
-
|
66 |
-
* [Pinterest](https://www.pinterest.com/search/pins/?q=parkour&rs=sitelinks_searchbox)
|
67 |
-
|
68 |
-
#### Related works
|
69 |
-
|
70 |
-
* [ICON @ MPI](https://icon.is.tue.mpg.de/)
|
71 |
-
* [MonoPort @ USC](https://xiuyuliang.cn/monoport)
|
72 |
-
* [Phorhum @ Google](https://phorhum.github.io/)
|
73 |
-
* [PIFuHD @ Meta](https://shunsukesaito.github.io/PIFuHD/)
|
74 |
-
* [PaMIR @ Tsinghua](http://www.liuyebin.com/pamir/pamir.html)
|
75 |
-
|
76 |
-
</details>
|
77 |
-
'''
|
78 |
-
|
79 |
-
|
80 |
-
def generate_image(seed, psi):
|
81 |
-
iface = gr.Interface.load("spaces/hysts/StyleGAN-Human")
|
82 |
-
img = iface(seed, psi)
|
83 |
-
return img
|
84 |
-
|
85 |
-
|
86 |
-
model_types = ['ICON', 'PIFu']
|
87 |
-
examples_names = glob.glob('examples/*.png')
|
88 |
-
examples_types = np.random.choice(
|
89 |
-
model_types, len(examples_names), p=[0.6, 0.2, 0.2])
|
90 |
-
|
91 |
-
examples = [list(item) for item in zip(examples_names, examples_types)]
|
92 |
-
|
93 |
-
with gr.Blocks() as demo:
|
94 |
-
gr.Markdown(description)
|
95 |
-
|
96 |
-
out_lst = []
|
97 |
-
with gr.Row():
|
98 |
-
with gr.Column():
|
99 |
-
with gr.Row():
|
100 |
-
with gr.Column():
|
101 |
-
seed = gr.inputs.Slider(
|
102 |
-
0, 1000, step=1, default=0, label='Seed (For Image Generation)')
|
103 |
-
psi = gr.inputs.Slider(
|
104 |
-
0, 2, step=0.05, default=0.7, label='Truncation psi (For Image Generation)')
|
105 |
-
radio_choice = gr.Radio(
|
106 |
-
model_types, label='Method (For Reconstruction)', value='icon-filter')
|
107 |
-
inp = gr.Image(type="filepath", label="Input Image")
|
108 |
-
with gr.Row():
|
109 |
-
btn_sample = gr.Button("Generate Image")
|
110 |
-
btn_submit = gr.Button("Submit Image")
|
111 |
-
|
112 |
-
gr.Examples(examples=examples,
|
113 |
-
inputs=[inp, radio_choice],
|
114 |
-
cache_examples=False,
|
115 |
-
fn=generate_model,
|
116 |
-
outputs=out_lst)
|
117 |
-
|
118 |
-
out_vid = gr.Video(
|
119 |
-
label="Image + Normal + SMPL Body + Clothed Human")
|
120 |
-
out_vid_download = gr.File(
|
121 |
-
label="Download Video, welcome share on Twitter with #ICON")
|
122 |
-
|
123 |
-
with gr.Column():
|
124 |
-
overlap_inp = gr.Image(
|
125 |
-
type="filepath", label="Image Normal Overlap")
|
126 |
-
out_final = gr.Model3D(
|
127 |
-
clear_color=[0.0, 0.0, 0.0, 0.0], label="Clothed human")
|
128 |
-
out_final_download = gr.File(
|
129 |
-
label="Download clothed human mesh")
|
130 |
-
out_smpl = gr.Model3D(
|
131 |
-
clear_color=[0.0, 0.0, 0.0, 0.0], label="SMPL body")
|
132 |
-
out_smpl_download = gr.File(label="Download SMPL body mesh")
|
133 |
-
out_smpl_npy_download = gr.File(label="Download SMPL params")
|
134 |
-
|
135 |
-
out_lst = [out_smpl, out_smpl_download, out_smpl_npy_download,
|
136 |
-
out_final, out_final_download, out_vid, out_vid_download, overlap_inp]
|
137 |
-
|
138 |
-
btn_submit.click(fn=generate_model, inputs=[
|
139 |
-
inp, radio_choice], outputs=out_lst)
|
140 |
-
btn_sample.click(fn=generate_image, inputs=[seed, psi], outputs=inp)
|
141 |
-
|
142 |
-
if __name__ == "__main__":
|
143 |
-
|
144 |
-
# demo.launch(debug=False, enable_queue=False,
|
145 |
-
# auth=(os.environ['USER'], os.environ['PASSWORD']),
|
146 |
-
# auth_message="Register at icon.is.tue.mpg.de to get HuggingFace username and password.")
|
147 |
-
|
148 |
-
demo.launch(debug=True, enable_queue=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
apps/ICON.py
CHANGED
@@ -14,26 +14,21 @@
|
|
14 |
#
|
15 |
# Contact: ps-license@tuebingen.mpg.de
|
16 |
|
17 |
-
|
18 |
-
import os
|
19 |
-
|
20 |
from lib.common.seg3d_lossless import Seg3dLossless
|
21 |
from lib.dataset.Evaluator import Evaluator
|
22 |
from lib.net import HGPIFuNet
|
23 |
from lib.common.train_util import *
|
|
|
24 |
from lib.common.render import Render
|
25 |
from lib.dataset.mesh_util import SMPLX, update_mesh_shape_prior_losses, get_visibility
|
26 |
import warnings
|
27 |
import logging
|
28 |
import torch
|
29 |
-
import
|
30 |
import numpy as np
|
31 |
from torch import nn
|
32 |
-
import os.path as osp
|
33 |
-
|
34 |
from skimage.transform import resize
|
35 |
import pytorch_lightning as pl
|
36 |
-
from huggingface_hub import cached_download
|
37 |
|
38 |
torch.backends.cudnn.benchmark = True
|
39 |
|
@@ -102,8 +97,10 @@ class ICON(pl.LightningModule):
|
|
102 |
|
103 |
self.get_smpl_model = lambda smpl_type, gender, age, v_template: smplx.create(
|
104 |
self.smpl_data.model_dir,
|
105 |
-
kid_template_path=
|
106 |
-
|
|
|
|
|
107 |
model_type=smpl_type,
|
108 |
gender=gender,
|
109 |
age=age,
|
|
|
14 |
#
|
15 |
# Contact: ps-license@tuebingen.mpg.de
|
16 |
|
|
|
|
|
|
|
17 |
from lib.common.seg3d_lossless import Seg3dLossless
|
18 |
from lib.dataset.Evaluator import Evaluator
|
19 |
from lib.net import HGPIFuNet
|
20 |
from lib.common.train_util import *
|
21 |
+
from lib.renderer.gl.init_gl import initialize_GL_context
|
22 |
from lib.common.render import Render
|
23 |
from lib.dataset.mesh_util import SMPLX, update_mesh_shape_prior_losses, get_visibility
|
24 |
import warnings
|
25 |
import logging
|
26 |
import torch
|
27 |
+
import smplx
|
28 |
import numpy as np
|
29 |
from torch import nn
|
|
|
|
|
30 |
from skimage.transform import resize
|
31 |
import pytorch_lightning as pl
|
|
|
32 |
|
33 |
torch.backends.cudnn.benchmark = True
|
34 |
|
|
|
97 |
|
98 |
self.get_smpl_model = lambda smpl_type, gender, age, v_template: smplx.create(
|
99 |
self.smpl_data.model_dir,
|
100 |
+
kid_template_path=osp.join(
|
101 |
+
osp.realpath(self.smpl_data.model_dir),
|
102 |
+
f"{smpl_type}/{smpl_type}_kid_template.npy",
|
103 |
+
),
|
104 |
model_type=smpl_type,
|
105 |
gender=gender,
|
106 |
age=age,
|
apps/__pycache__/app.cpython-38.pyc
ADDED
Binary file (555 Bytes). View file
|
|
apps/app.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# install
|
2 |
+
|
3 |
+
import os
|
4 |
+
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
|
5 |
+
os.environ["CUDA_VISIBLE_DEVICES"]="0"
|
6 |
+
try:
|
7 |
+
os.system("bash install.sh")
|
8 |
+
except Exception as e:
|
9 |
+
print(e)
|
10 |
+
|
11 |
+
|
12 |
+
# running
|
13 |
+
|
14 |
+
import gradio as gr
|
15 |
+
|
16 |
+
def image_classifier(inp):
|
17 |
+
return {'cat': 0.3, 'dog': 0.7}
|
18 |
+
|
19 |
+
demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label")
|
20 |
+
demo.launch(auth=("icon@tue.mpg.de", "icon_2022"),
|
21 |
+
auth_message="Register at icon.is.tue.mpg.de/download to get the username and password.")
|
apps/infer.py
CHANGED
@@ -14,33 +14,34 @@
|
|
14 |
#
|
15 |
# Contact: ps-license@tuebingen.mpg.de
|
16 |
|
17 |
-
import os
|
18 |
-
import gc
|
19 |
-
|
20 |
import logging
|
|
|
21 |
from lib.common.config import cfg
|
|
|
22 |
from lib.dataset.mesh_util import (
|
23 |
load_checkpoint,
|
24 |
update_mesh_shape_prior_losses,
|
|
|
25 |
blend_rgb_norm,
|
26 |
unwrap,
|
27 |
remesh,
|
28 |
tensor2variable,
|
29 |
-
|
30 |
)
|
31 |
|
32 |
from lib.dataset.TestDataset import TestDataset
|
33 |
-
from lib.common.render import query_color
|
34 |
from lib.net.local_affine import LocalAffine
|
35 |
from pytorch3d.structures import Meshes
|
36 |
from apps.ICON import ICON
|
37 |
|
|
|
38 |
from termcolor import colored
|
|
|
39 |
import numpy as np
|
40 |
from PIL import Image
|
41 |
import trimesh
|
|
|
42 |
import numpy as np
|
43 |
-
from tqdm import tqdm
|
44 |
|
45 |
import torch
|
46 |
torch.backends.cudnn.benchmark = True
|
@@ -48,31 +49,36 @@ torch.backends.cudnn.benchmark = True
|
|
48 |
logging.getLogger("trimesh").setLevel(logging.ERROR)
|
49 |
|
50 |
|
51 |
-
|
52 |
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
-
|
61 |
-
'loop_cloth': 200,
|
62 |
-
'patience': 5,
|
63 |
-
'out_dir': './results',
|
64 |
-
'hps_type': 'pymaf',
|
65 |
-
'config': f"./configs/{model_type}.yaml"}
|
66 |
|
67 |
# cfg read and merge
|
68 |
-
cfg.merge_from_file(
|
69 |
cfg.merge_from_file("./lib/pymaf/configs/pymaf_config.yaml")
|
70 |
|
71 |
-
os.makedirs(config_dict['out_dir'], exist_ok=True)
|
72 |
-
|
73 |
cfg_show_list = [
|
74 |
"test_gpus",
|
75 |
-
[
|
76 |
"mcube_res",
|
77 |
256,
|
78 |
"clean_mesh",
|
@@ -82,21 +88,28 @@ def generate_model(in_path, model_type):
|
|
82 |
cfg.merge_from_list(cfg_show_list)
|
83 |
cfg.freeze()
|
84 |
|
85 |
-
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
86 |
-
device = torch.device(f"cuda:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
|
88 |
# load model and dataloader
|
89 |
model = ICON(cfg)
|
90 |
model = load_checkpoint(model, cfg)
|
91 |
|
92 |
dataset_param = {
|
93 |
-
'
|
94 |
-
'seg_dir':
|
95 |
'has_det': True, # w/ or w/o detection
|
96 |
-
'hps_type':
|
97 |
}
|
98 |
|
99 |
-
if
|
100 |
print(colored("PIXIE isn't compatible with PaMIR, thus switch to PyMAF", "red"))
|
101 |
dataset_param["hps_type"] = "pymaf"
|
102 |
|
@@ -126,10 +139,10 @@ def generate_model(in_path, model_type):
|
|
126 |
data["global_orient"], device=device, requires_grad=True
|
127 |
) # [1,1,3,3]
|
128 |
|
129 |
-
optimizer_smpl = torch.optim.
|
130 |
[optimed_pose, optimed_trans, optimed_betas, optimed_orient],
|
131 |
lr=1e-3,
|
132 |
-
|
133 |
)
|
134 |
scheduler_smpl = torch.optim.lr_scheduler.ReduceLROnPlateau(
|
135 |
optimizer_smpl,
|
@@ -137,45 +150,38 @@ def generate_model(in_path, model_type):
|
|
137 |
factor=0.5,
|
138 |
verbose=0,
|
139 |
min_lr=1e-5,
|
140 |
-
patience=
|
141 |
)
|
142 |
|
143 |
losses = {
|
144 |
-
# Cloth: Normal_recon - Normal_pred
|
145 |
-
"
|
146 |
-
|
147 |
-
"
|
148 |
-
# Cloth:
|
149 |
-
"
|
150 |
-
#
|
151 |
-
"
|
152 |
-
# Cloth: normal consistency
|
153 |
-
"nc": {"weight": 0, "value": 0.0},
|
154 |
-
# Cloth: laplacian smoonth
|
155 |
-
"laplacian": {"weight": 1e2, "value": 0.0},
|
156 |
-
# Body: Normal_pred - Normal_smpl
|
157 |
-
"normal": {"weight": 1e0, "value": 0.0},
|
158 |
-
# Body: Silhouette_pred - Silhouette_smpl
|
159 |
-
"silhouette": {"weight": 1e0, "value": 0.0},
|
160 |
}
|
161 |
|
162 |
# smpl optimization
|
163 |
|
164 |
-
loop_smpl = tqdm(
|
|
|
|
|
|
|
165 |
|
166 |
-
for
|
|
|
|
|
167 |
|
168 |
optimizer_smpl.zero_grad()
|
169 |
-
|
170 |
-
# 6d_rot to rot_mat
|
171 |
-
optimed_orient_mat = rot6d_to_rotmat(optimed_orient.view(-1,6)).unsqueeze(0)
|
172 |
-
optimed_pose_mat = rot6d_to_rotmat(optimed_pose.view(-1,6)).unsqueeze(0)
|
173 |
|
174 |
if dataset_param["hps_type"] != "pixie":
|
175 |
smpl_out = dataset.smpl_model(
|
176 |
betas=optimed_betas,
|
177 |
-
body_pose=
|
178 |
-
global_orient=
|
179 |
pose2rot=False,
|
180 |
)
|
181 |
|
@@ -185,8 +191,8 @@ def generate_model(in_path, model_type):
|
|
185 |
smpl_verts, _, _ = dataset.smpl_model(
|
186 |
shape_params=optimed_betas,
|
187 |
expression_params=tensor2variable(data["exp"], device),
|
188 |
-
body_pose=
|
189 |
-
global_pose=
|
190 |
jaw_pose=tensor2variable(data["jaw_pose"], device),
|
191 |
left_hand_pose=tensor2variable(
|
192 |
data["left_hand_pose"], device),
|
@@ -214,7 +220,12 @@ def generate_model(in_path, model_type):
|
|
214 |
diff_B_smpl = torch.abs(
|
215 |
in_tensor["T_normal_B"] - in_tensor["normal_B"])
|
216 |
|
217 |
-
|
|
|
|
|
|
|
|
|
|
|
218 |
|
219 |
# silhouette loss
|
220 |
smpl_arr = torch.cat([T_mask_F, T_mask_B], dim=-1)[0]
|
@@ -239,6 +250,33 @@ def generate_model(in_path, model_type):
|
|
239 |
pbar_desc += f"Total: {smpl_loss:.3f}"
|
240 |
loop_smpl.set_description(pbar_desc)
|
241 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
242 |
smpl_loss.backward()
|
243 |
optimizer_smpl.step()
|
244 |
scheduler_smpl.step(smpl_loss)
|
@@ -249,20 +287,18 @@ def generate_model(in_path, model_type):
|
|
249 |
# 1. SMPL Fitting
|
250 |
# 2. Clothes Refinement
|
251 |
|
252 |
-
os.makedirs(os.path.join(
|
253 |
"refinement"), exist_ok=True)
|
254 |
|
255 |
# visualize the final results in self-rotation mode
|
256 |
-
os.makedirs(os.path.join(
|
257 |
-
cfg.name, "vid"), exist_ok=True)
|
258 |
|
259 |
# final results rendered as image
|
260 |
# 1. Render the final fitted SMPL (xxx_smpl.png)
|
261 |
# 2. Render the final reconstructed clothed human (xxx_cloth.png)
|
262 |
# 3. Blend the original image with predicted cloth normal (xxx_overlap.png)
|
263 |
|
264 |
-
os.makedirs(os.path.join(
|
265 |
-
cfg.name, "png"), exist_ok=True)
|
266 |
|
267 |
# final reconstruction meshes
|
268 |
# 1. SMPL mesh (xxx_smpl.obj)
|
@@ -271,41 +307,54 @@ def generate_model(in_path, model_type):
|
|
271 |
# 4. remeshed clothed mesh (xxx_remesh.obj)
|
272 |
# 5. refined clothed mesh (xxx_refine.obj)
|
273 |
|
274 |
-
os.makedirs(os.path.join(
|
275 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
276 |
|
277 |
-
|
278 |
((in_tensor["normal_F"][0].permute(1, 2, 0) + 1.0) * 255.0 / 2.0)
|
279 |
.detach()
|
280 |
.cpu()
|
281 |
.numpy()
|
282 |
.astype(np.uint8)
|
283 |
)
|
284 |
-
|
285 |
-
norm_pred_B = (
|
286 |
-
((in_tensor["normal_B"][0].permute(1, 2, 0) + 1.0) * 255.0 / 2.0)
|
287 |
-
.detach()
|
288 |
-
.cpu()
|
289 |
-
.numpy()
|
290 |
-
.astype(np.uint8)
|
291 |
-
)
|
292 |
|
293 |
-
|
294 |
-
norm_orig_B = unwrap(norm_pred_B, data)
|
295 |
-
|
296 |
mask_orig = unwrap(
|
297 |
np.repeat(
|
298 |
data["mask"].permute(1, 2, 0).detach().cpu().numpy(), 3, axis=2
|
299 |
).astype(np.uint8),
|
300 |
data,
|
301 |
)
|
302 |
-
|
303 |
-
rgb_norm_B = blend_rgb_norm(data["ori_image"], norm_orig_B, mask_orig)
|
304 |
|
305 |
Image.fromarray(
|
306 |
np.concatenate(
|
307 |
-
[data["ori_image"].astype(np.uint8),
|
308 |
-
).save(os.path.join(
|
309 |
|
310 |
smpl_obj = trimesh.Trimesh(
|
311 |
in_tensor["smpl_verts"].detach().cpu()[0] *
|
@@ -314,24 +363,23 @@ def generate_model(in_path, model_type):
|
|
314 |
process=False,
|
315 |
maintains_order=True
|
316 |
)
|
317 |
-
smpl_obj.visual.vertex_colors = (smpl_obj.vertex_normals+1.0)*255.0*0.5
|
318 |
-
smpl_obj.export(
|
319 |
-
f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_smpl.obj")
|
320 |
smpl_obj.export(
|
321 |
-
f"{
|
322 |
|
323 |
smpl_info = {'betas': optimed_betas,
|
324 |
-
'pose':
|
325 |
-
'orient':
|
326 |
'trans': optimed_trans}
|
327 |
|
328 |
np.save(
|
329 |
-
f"{
|
330 |
|
331 |
# ------------------------------------------------------------------------------------------------------------------
|
332 |
|
333 |
# cloth optimization
|
334 |
|
|
|
|
|
335 |
# cloth recon
|
336 |
in_tensor.update(
|
337 |
dataset.compute_vis_cmap(
|
@@ -356,15 +404,13 @@ def generate_model(in_path, model_type):
|
|
356 |
recon_obj = trimesh.Trimesh(
|
357 |
verts_pr, faces_pr, process=False, maintains_order=True
|
358 |
)
|
359 |
-
recon_obj.visual.vertex_colors = (
|
360 |
-
recon_obj.vertex_normals+1.0)*255.0*0.5
|
361 |
recon_obj.export(
|
362 |
-
os.path.join(
|
363 |
f"obj/{data['name']}_recon.obj")
|
364 |
)
|
365 |
|
366 |
# Isotropic Explicit Remeshing for better geometry topology
|
367 |
-
verts_refine, faces_refine = remesh(os.path.join(
|
368 |
f"obj/{data['name']}_recon.obj"), 0.5, device)
|
369 |
|
370 |
# define local_affine deform verts
|
@@ -380,16 +426,26 @@ def generate_model(in_path, model_type):
|
|
380 |
factor=0.1,
|
381 |
verbose=0,
|
382 |
min_lr=1e-5,
|
383 |
-
patience=
|
384 |
)
|
385 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
386 |
final = None
|
387 |
|
388 |
-
if
|
389 |
|
390 |
-
loop_cloth = tqdm(range(
|
391 |
|
392 |
-
for
|
|
|
|
|
393 |
|
394 |
optimizer_cloth.zero_grad()
|
395 |
|
@@ -426,67 +482,135 @@ def generate_model(in_path, model_type):
|
|
426 |
loop_cloth.set_description(pbar_desc)
|
427 |
|
428 |
# update params
|
429 |
-
cloth_loss.backward()
|
430 |
optimizer_cloth.step()
|
431 |
scheduler_cloth.step(cloth_loss)
|
432 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
433 |
final = trimesh.Trimesh(
|
434 |
mesh_pr.verts_packed().detach().squeeze(0).cpu(),
|
435 |
mesh_pr.faces_packed().detach().squeeze(0).cpu(),
|
436 |
process=False, maintains_order=True
|
437 |
)
|
438 |
-
|
439 |
-
# only with front texture
|
440 |
-
tex_colors = query_color(
|
441 |
mesh_pr.verts_packed().detach().squeeze(0).cpu(),
|
442 |
mesh_pr.faces_packed().detach().squeeze(0).cpu(),
|
443 |
in_tensor["image"],
|
444 |
device=device,
|
445 |
)
|
446 |
-
|
447 |
-
# full normal textures
|
448 |
-
norm_colors = (mesh_pr.verts_normals_padded().squeeze(
|
449 |
-
0).detach().cpu() + 1.0) * 0.5 * 255.0
|
450 |
-
|
451 |
-
final.visual.vertex_colors = tex_colors
|
452 |
-
final.export(
|
453 |
-
f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_refine.obj")
|
454 |
-
|
455 |
-
final.visual.vertex_colors = norm_colors
|
456 |
final.export(
|
457 |
-
f"{
|
458 |
|
459 |
-
# always export visualized
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
# self-rotated video
|
464 |
-
dataset.render.load_meshes(
|
465 |
-
verts_lst, faces_lst)
|
466 |
-
dataset.render.get_rendered_video(
|
467 |
-
[data["ori_image"], rgb_norm_F, rgb_norm_B],
|
468 |
-
os.path.join(config_dict['out_dir'], cfg.name,
|
469 |
-
f"vid/{data['name']}_cloth.mp4"),
|
470 |
)
|
471 |
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
476 |
-
|
477 |
-
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
|
489 |
-
|
490 |
-
|
491 |
-
|
492 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
#
|
15 |
# Contact: ps-license@tuebingen.mpg.de
|
16 |
|
|
|
|
|
|
|
17 |
import logging
|
18 |
+
from lib.common.render import query_color, image2vid
|
19 |
from lib.common.config import cfg
|
20 |
+
from lib.common.cloth_extraction import extract_cloth
|
21 |
from lib.dataset.mesh_util import (
|
22 |
load_checkpoint,
|
23 |
update_mesh_shape_prior_losses,
|
24 |
+
get_optim_grid_image,
|
25 |
blend_rgb_norm,
|
26 |
unwrap,
|
27 |
remesh,
|
28 |
tensor2variable,
|
29 |
+
normal_loss
|
30 |
)
|
31 |
|
32 |
from lib.dataset.TestDataset import TestDataset
|
|
|
33 |
from lib.net.local_affine import LocalAffine
|
34 |
from pytorch3d.structures import Meshes
|
35 |
from apps.ICON import ICON
|
36 |
|
37 |
+
import os
|
38 |
from termcolor import colored
|
39 |
+
import argparse
|
40 |
import numpy as np
|
41 |
from PIL import Image
|
42 |
import trimesh
|
43 |
+
import pickle
|
44 |
import numpy as np
|
|
|
45 |
|
46 |
import torch
|
47 |
torch.backends.cudnn.benchmark = True
|
|
|
49 |
logging.getLogger("trimesh").setLevel(logging.ERROR)
|
50 |
|
51 |
|
52 |
+
if __name__ == "__main__":
|
53 |
|
54 |
+
# loading cfg file
|
55 |
+
parser = argparse.ArgumentParser()
|
56 |
+
|
57 |
+
parser.add_argument("-gpu", "--gpu_device", type=int, default=0)
|
58 |
+
parser.add_argument("-colab", action="store_true")
|
59 |
+
parser.add_argument("-loop_smpl", "--loop_smpl", type=int, default=100)
|
60 |
+
parser.add_argument("-patience", "--patience", type=int, default=5)
|
61 |
+
parser.add_argument("-vis_freq", "--vis_freq", type=int, default=10)
|
62 |
+
parser.add_argument("-loop_cloth", "--loop_cloth", type=int, default=200)
|
63 |
+
parser.add_argument("-hps_type", "--hps_type", type=str, default="pymaf")
|
64 |
+
parser.add_argument("-export_video", action="store_true")
|
65 |
+
parser.add_argument("-in_dir", "--in_dir", type=str, default="./examples")
|
66 |
+
parser.add_argument("-out_dir", "--out_dir",
|
67 |
+
type=str, default="./results")
|
68 |
+
parser.add_argument('-seg_dir', '--seg_dir', type=str, default=None)
|
69 |
+
parser.add_argument(
|
70 |
+
"-cfg", "--config", type=str, default="./configs/icon-filter.yaml"
|
71 |
+
)
|
72 |
|
73 |
+
args = parser.parse_args()
|
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
# cfg read and merge
|
76 |
+
cfg.merge_from_file(args.config)
|
77 |
cfg.merge_from_file("./lib/pymaf/configs/pymaf_config.yaml")
|
78 |
|
|
|
|
|
79 |
cfg_show_list = [
|
80 |
"test_gpus",
|
81 |
+
[args.gpu_device],
|
82 |
"mcube_res",
|
83 |
256,
|
84 |
"clean_mesh",
|
|
|
88 |
cfg.merge_from_list(cfg_show_list)
|
89 |
cfg.freeze()
|
90 |
|
91 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
|
92 |
+
device = torch.device(f"cuda:{args.gpu_device}")
|
93 |
+
|
94 |
+
if args.colab:
|
95 |
+
print(colored("colab environment...", "red"))
|
96 |
+
from tqdm.notebook import tqdm
|
97 |
+
else:
|
98 |
+
print(colored("normal environment...", "red"))
|
99 |
+
from tqdm import tqdm
|
100 |
|
101 |
# load model and dataloader
|
102 |
model = ICON(cfg)
|
103 |
model = load_checkpoint(model, cfg)
|
104 |
|
105 |
dataset_param = {
|
106 |
+
'image_dir': args.in_dir,
|
107 |
+
'seg_dir': args.seg_dir,
|
108 |
'has_det': True, # w/ or w/o detection
|
109 |
+
'hps_type': args.hps_type # pymaf/pare/pixie
|
110 |
}
|
111 |
|
112 |
+
if args.hps_type == "pixie" and "pamir" in args.config:
|
113 |
print(colored("PIXIE isn't compatible with PaMIR, thus switch to PyMAF", "red"))
|
114 |
dataset_param["hps_type"] = "pymaf"
|
115 |
|
|
|
139 |
data["global_orient"], device=device, requires_grad=True
|
140 |
) # [1,1,3,3]
|
141 |
|
142 |
+
optimizer_smpl = torch.optim.SGD(
|
143 |
[optimed_pose, optimed_trans, optimed_betas, optimed_orient],
|
144 |
lr=1e-3,
|
145 |
+
momentum=0.9,
|
146 |
)
|
147 |
scheduler_smpl = torch.optim.lr_scheduler.ReduceLROnPlateau(
|
148 |
optimizer_smpl,
|
|
|
150 |
factor=0.5,
|
151 |
verbose=0,
|
152 |
min_lr=1e-5,
|
153 |
+
patience=args.patience,
|
154 |
)
|
155 |
|
156 |
losses = {
|
157 |
+
"cloth": {"weight": 1e1, "value": 0.0}, # Cloth: Normal_recon - Normal_pred
|
158 |
+
"stiffness": {"weight": 1e5, "value": 0.0}, # Cloth: [RT]_v1 - [RT]_v2 (v1-edge-v2)
|
159 |
+
"rigid": {"weight": 1e5, "value": 0.0}, # Cloth: det(R) = 1
|
160 |
+
"edge": {"weight": 0, "value": 0.0}, # Cloth: edge length
|
161 |
+
"nc": {"weight": 0, "value": 0.0}, # Cloth: normal consistency
|
162 |
+
"laplacian": {"weight": 1e2, "value": 0.0}, # Cloth: laplacian smoonth
|
163 |
+
"normal": {"weight": 1e0, "value": 0.0}, # Body: Normal_pred - Normal_smpl
|
164 |
+
"silhouette": {"weight": 1e1, "value": 0.0}, # Body: Silhouette_pred - Silhouette_smpl
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
}
|
166 |
|
167 |
# smpl optimization
|
168 |
|
169 |
+
loop_smpl = tqdm(
|
170 |
+
range(args.loop_smpl if cfg.net.prior_type != "pifu" else 1))
|
171 |
+
|
172 |
+
per_data_lst = []
|
173 |
|
174 |
+
for i in loop_smpl:
|
175 |
+
|
176 |
+
per_loop_lst = []
|
177 |
|
178 |
optimizer_smpl.zero_grad()
|
|
|
|
|
|
|
|
|
179 |
|
180 |
if dataset_param["hps_type"] != "pixie":
|
181 |
smpl_out = dataset.smpl_model(
|
182 |
betas=optimed_betas,
|
183 |
+
body_pose=optimed_pose,
|
184 |
+
global_orient=optimed_orient,
|
185 |
pose2rot=False,
|
186 |
)
|
187 |
|
|
|
191 |
smpl_verts, _, _ = dataset.smpl_model(
|
192 |
shape_params=optimed_betas,
|
193 |
expression_params=tensor2variable(data["exp"], device),
|
194 |
+
body_pose=optimed_pose,
|
195 |
+
global_pose=optimed_orient,
|
196 |
jaw_pose=tensor2variable(data["jaw_pose"], device),
|
197 |
left_hand_pose=tensor2variable(
|
198 |
data["left_hand_pose"], device),
|
|
|
220 |
diff_B_smpl = torch.abs(
|
221 |
in_tensor["T_normal_B"] - in_tensor["normal_B"])
|
222 |
|
223 |
+
loss_F_smpl = normal_loss(
|
224 |
+
in_tensor["T_normal_F"], in_tensor["normal_F"])
|
225 |
+
loss_B_smpl = normal_loss(
|
226 |
+
in_tensor["T_normal_B"], in_tensor["normal_B"])
|
227 |
+
|
228 |
+
losses["normal"]["value"] = (loss_F_smpl + loss_B_smpl).mean()
|
229 |
|
230 |
# silhouette loss
|
231 |
smpl_arr = torch.cat([T_mask_F, T_mask_B], dim=-1)[0]
|
|
|
250 |
pbar_desc += f"Total: {smpl_loss:.3f}"
|
251 |
loop_smpl.set_description(pbar_desc)
|
252 |
|
253 |
+
if i % args.vis_freq == 0:
|
254 |
+
|
255 |
+
per_loop_lst.extend(
|
256 |
+
[
|
257 |
+
in_tensor["image"],
|
258 |
+
in_tensor["T_normal_F"],
|
259 |
+
in_tensor["normal_F"],
|
260 |
+
diff_F_smpl / 2.0,
|
261 |
+
diff_S[:, :512].unsqueeze(
|
262 |
+
0).unsqueeze(0).repeat(1, 3, 1, 1),
|
263 |
+
]
|
264 |
+
)
|
265 |
+
per_loop_lst.extend(
|
266 |
+
[
|
267 |
+
in_tensor["image"],
|
268 |
+
in_tensor["T_normal_B"],
|
269 |
+
in_tensor["normal_B"],
|
270 |
+
diff_B_smpl / 2.0,
|
271 |
+
diff_S[:, 512:].unsqueeze(
|
272 |
+
0).unsqueeze(0).repeat(1, 3, 1, 1),
|
273 |
+
]
|
274 |
+
)
|
275 |
+
per_data_lst.append(
|
276 |
+
get_optim_grid_image(
|
277 |
+
per_loop_lst, None, nrow=5, type="smpl")
|
278 |
+
)
|
279 |
+
|
280 |
smpl_loss.backward()
|
281 |
optimizer_smpl.step()
|
282 |
scheduler_smpl.step(smpl_loss)
|
|
|
287 |
# 1. SMPL Fitting
|
288 |
# 2. Clothes Refinement
|
289 |
|
290 |
+
os.makedirs(os.path.join(args.out_dir, cfg.name,
|
291 |
"refinement"), exist_ok=True)
|
292 |
|
293 |
# visualize the final results in self-rotation mode
|
294 |
+
os.makedirs(os.path.join(args.out_dir, cfg.name, "vid"), exist_ok=True)
|
|
|
295 |
|
296 |
# final results rendered as image
|
297 |
# 1. Render the final fitted SMPL (xxx_smpl.png)
|
298 |
# 2. Render the final reconstructed clothed human (xxx_cloth.png)
|
299 |
# 3. Blend the original image with predicted cloth normal (xxx_overlap.png)
|
300 |
|
301 |
+
os.makedirs(os.path.join(args.out_dir, cfg.name, "png"), exist_ok=True)
|
|
|
302 |
|
303 |
# final reconstruction meshes
|
304 |
# 1. SMPL mesh (xxx_smpl.obj)
|
|
|
307 |
# 4. remeshed clothed mesh (xxx_remesh.obj)
|
308 |
# 5. refined clothed mesh (xxx_refine.obj)
|
309 |
|
310 |
+
os.makedirs(os.path.join(args.out_dir, cfg.name, "obj"), exist_ok=True)
|
311 |
+
|
312 |
+
if cfg.net.prior_type != "pifu":
|
313 |
+
|
314 |
+
per_data_lst[0].save(
|
315 |
+
os.path.join(
|
316 |
+
args.out_dir, cfg.name, f"refinement/{data['name']}_smpl.gif"
|
317 |
+
),
|
318 |
+
save_all=True,
|
319 |
+
append_images=per_data_lst[1:],
|
320 |
+
duration=500,
|
321 |
+
loop=0,
|
322 |
+
)
|
323 |
+
|
324 |
+
if args.vis_freq == 1:
|
325 |
+
image2vid(
|
326 |
+
per_data_lst,
|
327 |
+
os.path.join(
|
328 |
+
args.out_dir, cfg.name, f"refinement/{data['name']}_smpl.avi"
|
329 |
+
),
|
330 |
+
)
|
331 |
+
|
332 |
+
per_data_lst[-1].save(
|
333 |
+
os.path.join(args.out_dir, cfg.name,
|
334 |
+
f"png/{data['name']}_smpl.png")
|
335 |
+
)
|
336 |
|
337 |
+
norm_pred = (
|
338 |
((in_tensor["normal_F"][0].permute(1, 2, 0) + 1.0) * 255.0 / 2.0)
|
339 |
.detach()
|
340 |
.cpu()
|
341 |
.numpy()
|
342 |
.astype(np.uint8)
|
343 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
344 |
|
345 |
+
norm_orig = unwrap(norm_pred, data)
|
|
|
|
|
346 |
mask_orig = unwrap(
|
347 |
np.repeat(
|
348 |
data["mask"].permute(1, 2, 0).detach().cpu().numpy(), 3, axis=2
|
349 |
).astype(np.uint8),
|
350 |
data,
|
351 |
)
|
352 |
+
rgb_norm = blend_rgb_norm(data["ori_image"], norm_orig, mask_orig)
|
|
|
353 |
|
354 |
Image.fromarray(
|
355 |
np.concatenate(
|
356 |
+
[data["ori_image"].astype(np.uint8), rgb_norm], axis=1)
|
357 |
+
).save(os.path.join(args.out_dir, cfg.name, f"png/{data['name']}_overlap.png"))
|
358 |
|
359 |
smpl_obj = trimesh.Trimesh(
|
360 |
in_tensor["smpl_verts"].detach().cpu()[0] *
|
|
|
363 |
process=False,
|
364 |
maintains_order=True
|
365 |
)
|
|
|
|
|
|
|
366 |
smpl_obj.export(
|
367 |
+
f"{args.out_dir}/{cfg.name}/obj/{data['name']}_smpl.obj")
|
368 |
|
369 |
smpl_info = {'betas': optimed_betas,
|
370 |
+
'pose': optimed_pose,
|
371 |
+
'orient': optimed_orient,
|
372 |
'trans': optimed_trans}
|
373 |
|
374 |
np.save(
|
375 |
+
f"{args.out_dir}/{cfg.name}/obj/{data['name']}_smpl.npy", smpl_info, allow_pickle=True)
|
376 |
|
377 |
# ------------------------------------------------------------------------------------------------------------------
|
378 |
|
379 |
# cloth optimization
|
380 |
|
381 |
+
per_data_lst = []
|
382 |
+
|
383 |
# cloth recon
|
384 |
in_tensor.update(
|
385 |
dataset.compute_vis_cmap(
|
|
|
404 |
recon_obj = trimesh.Trimesh(
|
405 |
verts_pr, faces_pr, process=False, maintains_order=True
|
406 |
)
|
|
|
|
|
407 |
recon_obj.export(
|
408 |
+
os.path.join(args.out_dir, cfg.name,
|
409 |
f"obj/{data['name']}_recon.obj")
|
410 |
)
|
411 |
|
412 |
# Isotropic Explicit Remeshing for better geometry topology
|
413 |
+
verts_refine, faces_refine = remesh(os.path.join(args.out_dir, cfg.name,
|
414 |
f"obj/{data['name']}_recon.obj"), 0.5, device)
|
415 |
|
416 |
# define local_affine deform verts
|
|
|
426 |
factor=0.1,
|
427 |
verbose=0,
|
428 |
min_lr=1e-5,
|
429 |
+
patience=args.patience,
|
430 |
)
|
431 |
|
432 |
+
with torch.no_grad():
|
433 |
+
per_loop_lst = []
|
434 |
+
rotate_recon_lst = dataset.render.get_rgb_image(cam_ids=[
|
435 |
+
0, 1, 2, 3])
|
436 |
+
per_loop_lst.extend(rotate_recon_lst)
|
437 |
+
per_data_lst.append(get_optim_grid_image(
|
438 |
+
per_loop_lst, None, type="cloth"))
|
439 |
+
|
440 |
final = None
|
441 |
|
442 |
+
if args.loop_cloth > 0:
|
443 |
|
444 |
+
loop_cloth = tqdm(range(args.loop_cloth))
|
445 |
|
446 |
+
for i in loop_cloth:
|
447 |
+
|
448 |
+
per_loop_lst = []
|
449 |
|
450 |
optimizer_cloth.zero_grad()
|
451 |
|
|
|
482 |
loop_cloth.set_description(pbar_desc)
|
483 |
|
484 |
# update params
|
485 |
+
cloth_loss.backward(retain_graph=True)
|
486 |
optimizer_cloth.step()
|
487 |
scheduler_cloth.step(cloth_loss)
|
488 |
|
489 |
+
# for vis
|
490 |
+
with torch.no_grad():
|
491 |
+
if i % args.vis_freq == 0:
|
492 |
+
|
493 |
+
rotate_recon_lst = dataset.render.get_rgb_image(cam_ids=[
|
494 |
+
0, 1, 2, 3])
|
495 |
+
|
496 |
+
per_loop_lst.extend(
|
497 |
+
[
|
498 |
+
in_tensor["image"],
|
499 |
+
in_tensor["P_normal_F"],
|
500 |
+
in_tensor["normal_F"],
|
501 |
+
diff_F_cloth / 2.0,
|
502 |
+
]
|
503 |
+
)
|
504 |
+
per_loop_lst.extend(
|
505 |
+
[
|
506 |
+
in_tensor["image"],
|
507 |
+
in_tensor["P_normal_B"],
|
508 |
+
in_tensor["normal_B"],
|
509 |
+
diff_B_cloth / 2.0,
|
510 |
+
]
|
511 |
+
)
|
512 |
+
per_loop_lst.extend(rotate_recon_lst)
|
513 |
+
per_data_lst.append(
|
514 |
+
get_optim_grid_image(
|
515 |
+
per_loop_lst, None, type="cloth")
|
516 |
+
)
|
517 |
+
|
518 |
+
# gif for optimization
|
519 |
+
per_data_lst[1].save(
|
520 |
+
os.path.join(
|
521 |
+
args.out_dir, cfg.name, f"refinement/{data['name']}_cloth.gif"
|
522 |
+
),
|
523 |
+
save_all=True,
|
524 |
+
append_images=per_data_lst[2:],
|
525 |
+
duration=500,
|
526 |
+
loop=0,
|
527 |
+
)
|
528 |
+
|
529 |
+
if args.vis_freq == 1:
|
530 |
+
image2vid(
|
531 |
+
per_data_lst,
|
532 |
+
os.path.join(
|
533 |
+
args.out_dir, cfg.name, f"refinement/{data['name']}_cloth.avi"
|
534 |
+
),
|
535 |
+
)
|
536 |
+
|
537 |
final = trimesh.Trimesh(
|
538 |
mesh_pr.verts_packed().detach().squeeze(0).cpu(),
|
539 |
mesh_pr.faces_packed().detach().squeeze(0).cpu(),
|
540 |
process=False, maintains_order=True
|
541 |
)
|
542 |
+
final_colors = query_color(
|
|
|
|
|
543 |
mesh_pr.verts_packed().detach().squeeze(0).cpu(),
|
544 |
mesh_pr.faces_packed().detach().squeeze(0).cpu(),
|
545 |
in_tensor["image"],
|
546 |
device=device,
|
547 |
)
|
548 |
+
final.visual.vertex_colors = final_colors
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
549 |
final.export(
|
550 |
+
f"{args.out_dir}/{cfg.name}/obj/{data['name']}_refine.obj")
|
551 |
|
552 |
+
# always export visualized png regardless of the cloth refinment
|
553 |
+
per_data_lst[-1].save(
|
554 |
+
os.path.join(args.out_dir, cfg.name,
|
555 |
+
f"png/{data['name']}_cloth.png")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
556 |
)
|
557 |
|
558 |
+
# always export visualized video regardless of the cloth refinment
|
559 |
+
if args.export_video:
|
560 |
+
if final is not None:
|
561 |
+
verts_lst = [verts_pr, final.vertices]
|
562 |
+
faces_lst = [faces_pr, final.faces]
|
563 |
+
else:
|
564 |
+
verts_lst = [verts_pr]
|
565 |
+
faces_lst = [faces_pr]
|
566 |
+
|
567 |
+
# self-rotated video
|
568 |
+
dataset.render.load_meshes(
|
569 |
+
verts_lst, faces_lst)
|
570 |
+
dataset.render.get_rendered_video(
|
571 |
+
[data["ori_image"], rgb_norm],
|
572 |
+
os.path.join(args.out_dir, cfg.name,
|
573 |
+
f"vid/{data['name']}_cloth.mp4"),
|
574 |
+
)
|
575 |
+
|
576 |
+
# garment extraction from deepfashion images
|
577 |
+
if not (args.seg_dir is None):
|
578 |
+
if final is not None:
|
579 |
+
recon_obj = final.copy()
|
580 |
+
|
581 |
+
os.makedirs(os.path.join(
|
582 |
+
args.out_dir, cfg.name, "clothes"), exist_ok=True)
|
583 |
+
os.makedirs(os.path.join(args.out_dir, cfg.name,
|
584 |
+
"clothes", "info"), exist_ok=True)
|
585 |
+
for seg in data['segmentations']:
|
586 |
+
# These matrices work for PyMaf, not sure about the other hps type
|
587 |
+
K = np.array([[1.0000, 0.0000, 0.0000, 0.0000],
|
588 |
+
[0.0000, 1.0000, 0.0000, 0.0000],
|
589 |
+
[0.0000, 0.0000, -0.5000, 0.0000],
|
590 |
+
[-0.0000, -0.0000, 0.5000, 1.0000]]).T
|
591 |
+
|
592 |
+
R = np.array([[-1., 0., 0.],
|
593 |
+
[0., 1., 0.],
|
594 |
+
[0., 0., -1.]])
|
595 |
+
|
596 |
+
t = np.array([[-0., -0., 100.]])
|
597 |
+
clothing_obj = extract_cloth(recon_obj, seg, K, R, t, smpl_obj)
|
598 |
+
if clothing_obj is not None:
|
599 |
+
cloth_type = seg['type'].replace(' ', '_')
|
600 |
+
cloth_info = {
|
601 |
+
'betas': optimed_betas,
|
602 |
+
'body_pose': optimed_pose,
|
603 |
+
'global_orient': optimed_orient,
|
604 |
+
'pose2rot': False,
|
605 |
+
'clothing_type': cloth_type,
|
606 |
+
}
|
607 |
+
|
608 |
+
file_id = f"{data['name']}_{cloth_type}"
|
609 |
+
with open(os.path.join(args.out_dir, cfg.name, "clothes", "info", f"{file_id}_info.pkl"), 'wb') as fp:
|
610 |
+
pickle.dump(cloth_info, fp)
|
611 |
+
|
612 |
+
clothing_obj.export(os.path.join(
|
613 |
+
args.out_dir, cfg.name, "clothes", f"{file_id}.obj"))
|
614 |
+
else:
|
615 |
+
print(
|
616 |
+
f"Unable to extract clothing of type {seg['type']} from image {data['name']}")
|
assets/garment_teaser.png
CHANGED
Git LFS Details
|
assets/intermediate_results.png
CHANGED
Git LFS Details
|
assets/teaser.gif
CHANGED
Git LFS Details
|
assets/thumbnail.png
DELETED
Git LFS Details
|
configs/icon-filter.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
name: icon-filter
|
2 |
ckpt_dir: "./data/ckpt/"
|
3 |
-
resume_path: "
|
4 |
-
normal_path: "
|
5 |
|
6 |
test_mode: True
|
7 |
batch_size: 1
|
|
|
1 |
name: icon-filter
|
2 |
ckpt_dir: "./data/ckpt/"
|
3 |
+
resume_path: "./data/ckpt/icon-filter.ckpt"
|
4 |
+
normal_path: "./data/ckpt/normal.ckpt"
|
5 |
|
6 |
test_mode: True
|
7 |
batch_size: 1
|
configs/icon-nofilter.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
name: icon-nofilter
|
2 |
ckpt_dir: "./data/ckpt/"
|
3 |
-
resume_path: "
|
4 |
-
normal_path: "
|
5 |
|
6 |
test_mode: True
|
7 |
batch_size: 1
|
|
|
1 |
name: icon-nofilter
|
2 |
ckpt_dir: "./data/ckpt/"
|
3 |
+
resume_path: "./data/ckpt/icon-nofilter.ckpt"
|
4 |
+
normal_path: "./data/ckpt/normal.ckpt"
|
5 |
|
6 |
test_mode: True
|
7 |
batch_size: 1
|
configs/pamir.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
name: pamir
|
2 |
ckpt_dir: "./data/ckpt/"
|
3 |
-
resume_path: "
|
4 |
-
normal_path: "
|
5 |
|
6 |
test_mode: True
|
7 |
batch_size: 1
|
|
|
1 |
name: pamir
|
2 |
ckpt_dir: "./data/ckpt/"
|
3 |
+
resume_path: "./data/ckpt/pamir.ckpt"
|
4 |
+
normal_path: "./data/ckpt/normal.ckpt"
|
5 |
|
6 |
test_mode: True
|
7 |
batch_size: 1
|
configs/pifu.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
name: pifu
|
2 |
ckpt_dir: "./data/ckpt/"
|
3 |
-
resume_path: "
|
4 |
-
normal_path: "
|
5 |
|
6 |
test_mode: True
|
7 |
batch_size: 1
|
|
|
1 |
name: pifu
|
2 |
ckpt_dir: "./data/ckpt/"
|
3 |
+
resume_path: "./data/ckpt/pifu.ckpt"
|
4 |
+
normal_path: "./data/ckpt/normal.ckpt"
|
5 |
|
6 |
test_mode: True
|
7 |
batch_size: 1
|
environment.yaml
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: icon
|
2 |
+
channels:
|
3 |
+
- pytorch-lts
|
4 |
+
- nvidia
|
5 |
+
- conda-forge
|
6 |
+
- fvcore
|
7 |
+
- iopath
|
8 |
+
- bottler
|
9 |
+
- defaults
|
10 |
+
dependencies:
|
11 |
+
- pytorch
|
12 |
+
- torchvision
|
13 |
+
- fvcore
|
14 |
+
- iopath
|
15 |
+
- nvidiacub
|
16 |
+
- pyembree
|
examples/22097467bffc92d4a5c4246f7d4edb75.png
CHANGED
Git LFS Details
|
examples/44c0f84c957b6b9bdf77662af5bb7078.png
CHANGED
Git LFS Details
|
examples/5a6a25963db2f667441d5076972c207c.png
CHANGED
Git LFS Details
|
examples/8da7ceb94669c2f65cbd28022e1f9876.png
CHANGED
Git LFS Details
|
examples/923d65f767c85a42212cae13fba3750b.png
CHANGED
Git LFS Details
|
examples/959c4c726a69901ce71b93a9242ed900.png
ADDED
examples/c9856a2bc31846d684cbb965457fad59.png
CHANGED
Git LFS Details
|
examples/e1e7622af7074a022f5d96dc16672517.png
CHANGED
Git LFS Details
|
examples/fb9d20fdb93750584390599478ecf86e.png
CHANGED
Git LFS Details
|
examples/segmentation/003883.jpg
ADDED
examples/segmentation/003883.json
ADDED
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"item2": {
|
3 |
+
"segmentation": [
|
4 |
+
[
|
5 |
+
232.29572649572654, 34.447388414055126, 237.0364672364673,
|
6 |
+
40.57084520417861, 244.9377018043686, 47.089363722697165,
|
7 |
+
252.04881291547974, 49.65726495726508, 262.5179487179489,
|
8 |
+
51.43504273504287, 269.233998100665, 50.447388414055204,
|
9 |
+
277.5446343779678, 49.12725546058881, 285.64339981006657,
|
10 |
+
46.16429249762584, 294.9273504273506, 41.22602089268754,
|
11 |
+
299.9377967711301, 36.514245014245084, 304.67853751187084,
|
12 |
+
30.588319088319132, 306.0612535612536, 25.65004748338083,
|
13 |
+
307.64150047483383, 23.477207977207982, 311.19705603038943,
|
14 |
+
24.859924026590704, 317.12298195631536, 28.020417853751216,
|
15 |
+
323.04890788224134, 29.008072174738874, 331.34520417853764,
|
16 |
+
30.193257359924065, 339.4439696106365, 34.7364672364673,
|
17 |
+
346.75261158594515, 39.279677113010536, 350.11063627730323,
|
18 |
+
44.61301044634389, 355.00541310541314, 61.422317188983875,
|
19 |
+
358.9560303893638, 77.6198480531815, 362.1165242165243,
|
20 |
+
90.26182336182353, 364.88195631528976, 103.29886039886063,
|
21 |
+
367.6473884140552, 118.11367521367552, 369.42516619183294,
|
22 |
+
129.37293447293484, 369.2324786324788, 132.60550807217476,
|
23 |
+
365.6769230769232, 134.77834757834762, 359.15840455840464,
|
24 |
+
138.3339031339032, 353.43000949667623, 140.70427350427357,
|
25 |
+
351.4547008547009, 141.4943969610637, 351.25716999050337,
|
26 |
+
138.5314339981007, 351.05963912630585, 136.75365622032294,
|
27 |
+
345.7263057929725, 137.34624881291552, 337.8250712250712,
|
28 |
+
139.51908831908838, 331.5040835707502, 141.09933523266864,
|
29 |
+
324.7880341880341, 143.66723646723653, 322.2201329534662,
|
30 |
+
146.43266856600198, 322.2201329534662, 151.5684710351378,
|
31 |
+
323.0102564102563, 160.6548907882243, 324.95185185185176,
|
32 |
+
173.44615384615395, 325.34691358024685, 190.23627730294416,
|
33 |
+
325.93950617283946, 205.64368471035164, 325.93950617283946,
|
34 |
+
215.71775878442577, 325.93950617283946, 220.06343779677147,
|
35 |
+
322.7790123456789, 223.22393162393197, 315.0753086419752,
|
36 |
+
228.55726495726532, 309.34691358024673, 230.53257359924066,
|
37 |
+
290.1866096866098, 230.87929724596398, 263.91500474833805,
|
38 |
+
229.6941120607788, 236.45821462488112, 229.29905033238373,
|
39 |
+
218.48290598290572, 226.73114909781583, 202.65650522317188,
|
40 |
+
224.82811016144353, 197.71823361823357, 221.07502374169044,
|
41 |
+
195.15033238366567, 214.55650522317188, 195.74292497625825,
|
42 |
+
200.53181386514711, 197.125641025641, 180.5811965811964,
|
43 |
+
197.33285849952523, 164.68736942070285, 198.51804368471042,
|
44 |
+
154.21823361823365, 198.51804368471042, 138.61329534662863,
|
45 |
+
193.5797720797721, 136.4404558404558, 185.08594491927823,
|
46 |
+
133.08243114909774, 177.77730294396957, 128.73675213675205,
|
47 |
+
174.41927825261152, 128.53922127255453, 173.82668566001894,
|
48 |
+
133.2799620132953, 174.02421652421646, 136.24292497625825,
|
49 |
+
172.83903133903127, 137.03304843304838, 167.11063627730283,
|
50 |
+
134.86020892687554, 159.9995251661917, 130.51452991452985,
|
51 |
+
159.01187084520404, 129.1318138651471, 159.60446343779662,
|
52 |
+
123.60094966761622, 162.6012345679013, 111.57578347578357,
|
53 |
+
165.95925925925934, 98.53874643874646, 170.30493827160504,
|
54 |
+
82.7362773029439, 173.92307692307693, 70.05584045584048,
|
55 |
+
177.08357075023744, 54.84596391263053, 180.58129154795822,
|
56 |
+
41.73190883190885, 183.14919278252614, 34.423266856600165,
|
57 |
+
188.51623931623936, 30.279962013295354, 195.6273504273505,
|
58 |
+
25.539221272554588, 201.75080721747398, 22.971320037986676,
|
59 |
+
211.23228869895553, 22.37872744539408, 221.10883190883212,
|
60 |
+
20.996011396011355, 224.8619183285852, 20.996011396011355,
|
61 |
+
226.04710351377042, 23.56391263057927, 229.01006647673339,
|
62 |
+
30.279962013295354
|
63 |
+
]
|
64 |
+
],
|
65 |
+
"category_id": 1,
|
66 |
+
"category_name": "short sleeve top"
|
67 |
+
},
|
68 |
+
"item1": {
|
69 |
+
"segmentation": [
|
70 |
+
[
|
71 |
+
201.51804815682925, 224.7401022799914, 218.41555508203712,
|
72 |
+
227.23317707223518, 236.42109524824218, 228.89522693373104,
|
73 |
+
256.91971020669104, 229.44924355422967, 280.188408267633,
|
74 |
+
230.2802684849776, 296.53189857234224, 230.2802684849776,
|
75 |
+
313.7064138077994, 229.72625186447897, 315.32667803111013,
|
76 |
+
236.8076070743661, 317.8197528233539, 240.96273172810572,
|
77 |
+
318.65077775410185, 246.2258896228426, 321.4208608565949,
|
78 |
+
253.15109737907534, 322.8059024078415, 265.0624547197956,
|
79 |
+
324.74496057958663, 273.6497123375242, 325.9612827615598,
|
80 |
+
284.4076070743661, 325.40726614106114, 299.9200724483274,
|
81 |
+
324.29923290006394, 316.8175793735353, 322.0831664180694,
|
82 |
+
325.9588536117625, 320.16803750266354, 336.5366716386107,
|
83 |
+
316.0129128489239, 344.01589601534204, 315.18188791817596,
|
84 |
+
357.86631152780745, 312.4118048156829, 368.1156190070319,
|
85 |
+
308.5336884721926, 378.64193479650567, 306.31762199019806,
|
86 |
+
385.29013424248905, 305.76360536969946, 398.3095248242066,
|
87 |
+
305.48659705945016, 409.6668655444283, 304.94393777967184,
|
88 |
+
419.3418708715109, 302.7278712976774, 427.0981035584915,
|
89 |
+
301.3428297464308, 433.74630300447495, 301.3428297464308,
|
90 |
+
445.3806520349459, 300.5118048156829, 461.72414233965515,
|
91 |
+
299.89735776688684, 467.352311953974, 297.9582995951417,
|
92 |
+
477.60161943319844, 295.1882164926486, 491.7290432559132,
|
93 |
+
293.52616663115276, 497.2692094608994, 291.8641167696569,
|
94 |
+
503.36339228638417, 291.3101001491583, 510.8426166631155,
|
95 |
+
289.37104197741314, 513.8897080758579, 287.4433411463882,
|
96 |
+
519.2043682079693, 283.0112081823993, 519.7583848284679,
|
97 |
+
275.5319838056679, 519.4813765182186, 270.26882591093107,
|
98 |
+
518.096334966972, 265.8366929469421, 513.6642020029831,
|
99 |
+
263.62062646494763, 509.78608565949276, 264.7286597059449,
|
100 |
+
498.9827615597697, 265.2826763264435, 478.76115491157015,
|
101 |
+
266.1137012571914, 467.1268058810992, 266.1137012571914,
|
102 |
+
454.6614319198803, 264.17464308544623, 441.64204133816276,
|
103 |
+
263.06660984444903, 424.19051779245626, 261.5834221180482,
|
104 |
+
407.2581504368212, 259.92137225655233, 396.45482633709815,
|
105 |
+
257.1512891540592, 380.1113360323889, 257.42829746430857,
|
106 |
+
359.05870445344146, 256.8742808438099, 338.56008949499255,
|
107 |
+
256.8742808438099, 321.3855742595354, 254.10419774131685,
|
108 |
+
320.5545493287875, 251.05710632857443, 326.6487321542723,
|
109 |
+
249.39505646707858, 339.1141061154912, 249.11804815682927,
|
110 |
+
356.28862135094835, 248.28702322608135, 372.3551033454083,
|
111 |
+
245.23993181333896, 387.59056040912026, 243.5766673769444,
|
112 |
+
409.1404219049649, 241.91461751544855, 424.92989558917554,
|
113 |
+
240.52957596420202, 440.4423609631369, 238.86752610270617,
|
114 |
+
455.40080971659955, 238.86752610270617, 470.91327509056083,
|
115 |
+
238.31350948220754, 486.42574046452216, 238.81966759002768,
|
116 |
+
501.19639889196685, 239.6506925207756, 511.168698060942,
|
117 |
+
236.0495844875346, 515.6008310249309, 229.40138504155118,
|
118 |
+
519.4789473684212, 221.6451523545705, 520.3099722991692,
|
119 |
+
216.65900277008296, 517.2628808864267, 213.33490304709125,
|
120 |
+
509.50664819944615, 208.3487534626037, 491.50110803324105,
|
121 |
+
205.8556786703599, 475.1576177285318, 203.63961218836545,
|
122 |
+
460.75318559556774, 203.63961218836545, 443.3016620498613,
|
123 |
+
203.63961218836545, 421.9720221606645, 200.59252077562303,
|
124 |
+
415.60083102493036, 197.5052844662264, 406.9847858512679,
|
125 |
+
195.28921798423193, 392.0263370978052, 193.35015981248677,
|
126 |
+
370.97370551885774, 190.857085020243, 343.82689111442545,
|
127 |
+
187.8099936075006, 322.77425953547794, 187.0028979330919,
|
128 |
+
309.89237161730256, 186.17187300234397, 291.33281483059886,
|
129 |
+
188.11093117408916, 266.67907521841033, 191.15802258683155,
|
130 |
+
250.3355849137011, 196.69818879181773, 234.82311953973982
|
131 |
+
]
|
132 |
+
],
|
133 |
+
"category_id": 8,
|
134 |
+
"category_name": "trousers"
|
135 |
+
}
|
136 |
+
}
|
examples/segmentation/028009.jpg
ADDED
examples/segmentation/028009.json
ADDED
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"item2": {
|
3 |
+
"segmentation": [
|
4 |
+
[
|
5 |
+
314.7474747474744, 204.84848484848482, 328.9696969696967,
|
6 |
+
209.7373737373737, 342.74747474747454, 211.95959595959593,
|
7 |
+
360.0808080808079, 211.07070707070704, 375.19191919191906,
|
8 |
+
210.18181818181816, 384.5252525252524, 207.07070707070704,
|
9 |
+
390.30303030303025, 204.84848484848482, 396.080808080808,
|
10 |
+
201.29292929292924, 402.3030303030303, 204.40404040404036,
|
11 |
+
412.969696969697, 203.9595959595959, 425.8585858585859,
|
12 |
+
206.18181818181813, 434.3030303030304, 211.95959595959593,
|
13 |
+
439.63636363636374, 223.0707070707071, 444.0808080808082,
|
14 |
+
234.18181818181824, 448.52525252525265, 250.62626262626276,
|
15 |
+
449.41414141414157, 260.848484848485, 452.08080808080825,
|
16 |
+
279.0707070707073, 456.08080808080825, 300.84848484848516,
|
17 |
+
457.858585858586, 308.40404040404076, 460.5252525252526,
|
18 |
+
315.7575757575756, 460.96969696969705, 329.97979797979787,
|
19 |
+
460.5252525252526, 345.9797979797979, 456.969696969697,
|
20 |
+
363.75757575757575, 453.41414141414145, 373.5353535353536,
|
21 |
+
450.3030303030303, 385.97979797979804, 447.1919191919192,
|
22 |
+
393.9797979797981, 443.6363636363636, 401.9797979797981,
|
23 |
+
438.3030303030303, 403.7575757575759, 433.85858585858585,
|
24 |
+
401.09090909090924, 430.7474747474747, 393.0909090909092,
|
25 |
+
426.7474747474747, 383.3131313131314, 424.9696969696969,
|
26 |
+
374.8686868686869, 424.9696969696969, 369.0909090909091,
|
27 |
+
423.63636363636357, 363.3131313131313, 423.63636363636357,
|
28 |
+
359.3131313131313, 423.63636363636357, 352.6464646464646,
|
29 |
+
420.9696969696969, 350.86868686868684, 422.74747474747466,
|
30 |
+
345.53535353535347, 422.74747474747466, 340.64646464646455,
|
31 |
+
422.74747474747466, 332.2020202020201, 421.8585858585858,
|
32 |
+
321.53535353535335, 418.74747474747466, 313.0909090909089,
|
33 |
+
416.5252525252524, 306.4242424242422, 412.9696969696969,
|
34 |
+
314.8686868686867, 410.3030303030302, 320.20202020202004,
|
35 |
+
411.6363636363635, 327.3131313131312, 414.74747474747466,
|
36 |
+
336.2020202020201, 418.74747474747466, 351.7575757575757,
|
37 |
+
420.9696969696969, 365.0909090909091, 423.1919191919191,
|
38 |
+
377.0909090909091, 423.1919191919191, 385.0909090909092,
|
39 |
+
424.5252525252525, 398.42424242424255, 396.0808080808079,
|
40 |
+
398.42424242424255, 374.7474747474745, 400.6464646464648,
|
41 |
+
354.7474747474744, 400.6464646464648, 331.6363636363632,
|
42 |
+
400.6464646464648, 313.41414141414094, 400.6464646464648,
|
43 |
+
305.4141414141409, 399.3131313131314, 297.4141414141409,
|
44 |
+
396.6464646464648, 284.525252525252, 396.2020202020203,
|
45 |
+
282.8686868686866, 391.59595959595964, 282.42424242424215,
|
46 |
+
373.81818181818176, 282.42424242424215, 358.26262626262616,
|
47 |
+
281.09090909090884, 334.70707070707056, 281.5353535353533,
|
48 |
+
313.37373737373713, 283.31313131313107, 297.3737373737371,
|
49 |
+
282.8686868686866, 283.1515151515148, 280.6464646464644,
|
50 |
+
266.7070707070703, 271.313131313131, 253.3737373737369,
|
51 |
+
264.6464646464643, 246.70707070707022, 257.5353535353532,
|
52 |
+
239.59595959595907, 249.9797979797976, 228.9292929292924,
|
53 |
+
242.42424242424204, 220.92929292929236, 233.17171717171723,
|
54 |
+
209.01010101010093, 225.1717171717172, 194.78787878787867,
|
55 |
+
222.06060606060606, 185.4545454545453, 224.2828282828283,
|
56 |
+
179.6767676767675, 230.0606060606061, 171.67676767676747,
|
57 |
+
232.72727272727278, 169.89898989898967, 243.83838383838392,
|
58 |
+
167.67676767676744, 256.2828282828284, 165.4545454545452,
|
59 |
+
274.06060606060623, 165.4545454545452, 291.8383838383841,
|
60 |
+
167.67676767676744, 302.5050505050508, 168.1212121212119,
|
61 |
+
310.94949494949526, 177.0101010101008, 314.0606060606064,
|
62 |
+
181.45454545454527, 314.94949494949526, 187.2323232323231,
|
63 |
+
312.7272727272731, 193.01010101010087, 307.8383838383842,
|
64 |
+
191.2323232323231, 302.94949494949526, 193.45454545454533,
|
65 |
+
292.727272727273, 193.45454545454533, 290.50505050505075,
|
66 |
+
195.67676767676755, 287.39393939393966, 197.45454545454533,
|
67 |
+
285.61616161616183, 197.45454545454533, 283.3939393939396,
|
68 |
+
193.89898989898978, 278.94949494949515, 197.45454545454533,
|
69 |
+
274.94949494949515, 199.67676767676755, 279.83838383838406,
|
70 |
+
201.45454545454535, 286.50505050505075, 201.45454545454535,
|
71 |
+
291.8383838383841, 201.8989898989898, 296.2828282828286,
|
72 |
+
202.7878787878787, 303.3939393939397, 202.34343434343424
|
73 |
+
]
|
74 |
+
],
|
75 |
+
"category_id": 2,
|
76 |
+
"category_name": "long sleeve top"
|
77 |
+
},
|
78 |
+
"item1": {
|
79 |
+
"segmentation": [
|
80 |
+
[
|
81 |
+
346.9494949494949, 660.6868686868687, 397.6161616161618,
|
82 |
+
661.5757575757576, 398.06060606060623, 674.0202020202021,
|
83 |
+
398.94949494949515, 691.3535353535356, 397.6161616161618,
|
84 |
+
710.0202020202022, 395.838383838384, 726.0202020202023,
|
85 |
+
393.1717171717173, 742.0202020202023, 346.9494949494949,
|
86 |
+
738.9090909090912, 346.50505050505046, 724.2424242424245,
|
87 |
+
347.3939393939394, 713.5757575757578, 348.72727272727275,
|
88 |
+
706.0202020202022, 349.17171717171715, 686.0202020202022,
|
89 |
+
348.72727272727275, 675.7979797979799, 347.3939393939394,
|
90 |
+
667.7979797979799
|
91 |
+
],
|
92 |
+
[
|
93 |
+
283.71717171717165, 396.68686868686876, 289.9393939393939,
|
94 |
+
396.68686868686876, 303.27272727272725, 397.1313131313132,
|
95 |
+
312.16161616161617, 399.7979797979799, 334.3838383838385,
|
96 |
+
400.68686868686876, 351.7171717171719, 400.68686868686876,
|
97 |
+
361.93939393939417, 401.5757575757577, 376.60606060606085,
|
98 |
+
401.5757575757577, 390.82828282828314, 398.46464646464653,
|
99 |
+
410.3838383838388, 397.5757575757577, 425.0505050505055,
|
100 |
+
394.46464646464653, 431.71717171717216, 422.9090909090911,
|
101 |
+
434.38383838383885, 447.79797979798, 430.38383838383885,
|
102 |
+
478.0202020202024, 423.2727272727277, 507.79797979798025,
|
103 |
+
418.3838383838388, 530.0202020202025, 411.8787878787878,
|
104 |
+
557.3333333333333, 403.43434343434336, 590.6666666666666,
|
105 |
+
400.7676767676767, 611.5555555555557, 399.8787878787878,
|
106 |
+
619.1111111111112, 399.8787878787878, 630.6666666666669,
|
107 |
+
398.10101010101, 635.1111111111113, 399.43434343434336,
|
108 |
+
641.7777777777779, 399.43434343434336, 656.4444444444447,
|
109 |
+
398.10101010101, 662.666666666667, 347.4343434343432, 662.666666666667,
|
110 |
+
346.1010101010098, 637.7777777777779, 347.4343434343432,
|
111 |
+
610.6666666666667, 349.21212121212096, 576.4444444444445,
|
112 |
+
350.98989898989873, 556.4444444444443, 349.6565656565654,
|
113 |
+
541.3333333333331, 348.32323232323205, 535.9999999999998,
|
114 |
+
348.32323232323205, 523.5555555555553, 349.21212121212096,
|
115 |
+
505.33333333333303, 342.5454545454543, 511.5555555555553,
|
116 |
+
338.9898989898987, 516.8888888888887, 334.5454545454542,
|
117 |
+
523.5555555555553, 325.6565656565653, 543.111111111111,
|
118 |
+
319.87878787878753, 556.4444444444443, 314.1010101010097,
|
119 |
+
568.4444444444443, 307.8787878787875, 583.1111111111111,
|
120 |
+
300.3232323232319, 608.0000000000001, 298.10101010100965,
|
121 |
+
617.7777777777778, 298.5454545454541, 624.0000000000001,
|
122 |
+
295.43434343434296, 628.0000000000001, 293.2121212121208,
|
123 |
+
628.0000000000001, 293.6565656565652, 632.4444444444446,
|
124 |
+
291.43434343434296, 638.6666666666669, 290.54545454545405,
|
125 |
+
644.4444444444447, 292.3232323232319, 648.8888888888891,
|
126 |
+
303.8787878787875, 667.1111111111114, 313.65656565656525,
|
127 |
+
684.0000000000003, 319.87878787878753, 700.8888888888893,
|
128 |
+
322.54545454545416, 712.8888888888894, 324.323232323232,
|
129 |
+
720.0000000000005, 327.87878787878753, 731.5555555555561,
|
130 |
+
330.9898989898987, 738.6666666666672, 331.87878787878753,
|
131 |
+
743.1111111111117, 334.5454545454542, 745.7777777777783,
|
132 |
+
336.3232323232325, 749.1313131313133, 338.54545454545473,
|
133 |
+
754.0202020202022, 338.54545454545473, 757.5757575757577,
|
134 |
+
341.6565656565658, 760.6868686868688, 344.76767676767696,
|
135 |
+
767.3535353535356, 345.2121212121214, 770.9090909090911,
|
136 |
+
346.9898989898992, 754.0202020202022, 347.43434343434365,
|
137 |
+
738.909090909091, 393.2121212121216, 740.6868686868687,
|
138 |
+
389.65656565656604, 764.6868686868688, 386.5454545454549,
|
139 |
+
784.2424242424245, 384.3232323232327, 806.9090909090912,
|
140 |
+
382.54545454545485, 812.686868686869, 381.13131313131316,
|
141 |
+
818.7070707070708, 378.020202020202, 828.4848484848485,
|
142 |
+
375.35353535353534, 839.5959595959597, 374.9090909090909,
|
143 |
+
854.2626262626264, 373.1313131313131, 856.9292929292931,
|
144 |
+
376.24242424242425, 864.9292929292931, 372.24242424242425,
|
145 |
+
874.2626262626264, 366.4646464646464, 880.9292929292932,
|
146 |
+
357.13131313131305, 872.9292929292932, 345.13131313131305,
|
147 |
+
868.0404040404043, 337.131313131313, 867.1515151515154,
|
148 |
+
337.131313131313, 856.0404040404042, 338.4646464646463,
|
149 |
+
850.7070707070709, 336.2424242424241, 846.2626262626264,
|
150 |
+
335.3535353535352, 841.3737373737375, 338.4646464646463,
|
151 |
+
827.5959595959597, 342.0202020202019, 815.5959595959596,
|
152 |
+
344.6868686868686, 809.3737373737374, 344.6868686868686,
|
153 |
+
796.4848484848484, 344.6868686868686, 786.7070707070707,
|
154 |
+
346.0202020202019, 779.151515151515, 344.24242424242414,
|
155 |
+
776.0404040404039, 343.3535353535352, 786.2626262626262,
|
156 |
+
342.0202020202019, 796.0404040404039, 338.90909090909076,
|
157 |
+
801.8181818181818, 333.57575757575745, 809.3737373737374,
|
158 |
+
326.02020202020185, 813.8181818181819, 320.242424242424,
|
159 |
+
812.4848484848485, 318.02020202020185, 810.7070707070707,
|
160 |
+
317.13131313131294, 807.1515151515151, 315.79797979797956,
|
161 |
+
803.5959595959596, 313.57575757575734, 799.5959595959596,
|
162 |
+
311.3535353535351, 793.8181818181818, 306.90909090909065,
|
163 |
+
791.1515151515151, 305.57575757575734, 787.5959595959595,
|
164 |
+
304.242424242424, 782.7070707070706, 302.02020202020174,
|
165 |
+
776.4848484848484, 298.90909090909065, 773.8181818181816,
|
166 |
+
294.90909090909065, 771.151515151515, 290.34343434343435,
|
167 |
+
758.909090909091, 284.5656565656566, 742.020202020202,
|
168 |
+
278.78787878787875, 729.5757575757575, 270.3434343434343,
|
169 |
+
713.131313131313, 257.8989898989898, 689.1313131313129,
|
170 |
+
247.2323232323231, 669.1313131313128, 239.23232323232307,
|
171 |
+
657.5757575757573, 233.89898989898973, 642.9090909090905,
|
172 |
+
233.0101010101008, 634.0202020202016, 233.45454545454527,
|
173 |
+
630.0202020202016, 235.23232323232304, 611.7979797979793,
|
174 |
+
241.93939393939402, 583.0707070707073, 245.93939393939405,
|
175 |
+
567.5151515151516, 251.2727272727274, 540.4040404040404,
|
176 |
+
256.1616161616163, 518.6262626262626, 260.60606060606074,
|
177 |
+
501.2929292929292, 263.7171717171719, 493.7373737373736,
|
178 |
+
268.16161616161634, 481.73737373737356, 270.38383838383857,
|
179 |
+
469.73737373737356, 272.6060606060608, 462.18181818181796,
|
180 |
+
276.1616161616164, 457.7373737373735, 276.1616161616164,
|
181 |
+
454.1818181818179, 277.05050505050525, 450.1818181818179,
|
182 |
+
278.828282828283, 433.292929292929, 278.3838383838386,
|
183 |
+
419.0707070707067, 278.828282828283, 417.29292929292893,
|
184 |
+
281.0505050505053, 414.1818181818178, 281.93939393939417,
|
185 |
+
404.8484848484844, 283.71717171717194, 401.2929292929289
|
186 |
+
]
|
187 |
+
],
|
188 |
+
"category_id": 8,
|
189 |
+
"category_name": "trousers"
|
190 |
+
}
|
191 |
+
}
|
examples/slack_trial2-000150.png
ADDED
fetch_data.sh
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
urle () { [[ "${1}" ]] || return 1; local LANG=C i x; for (( i = 0; i < ${#1}; i++ )); do x="${1:i:1}"; [[ "${x}" == [a-zA-Z0-9.~-] ]] && echo -n "${x}" || printf '%%%02X' "'${x}"; done; echo; }
|
3 |
+
|
4 |
+
mkdir -p data/smpl_related/models
|
5 |
+
|
6 |
+
# username and password input
|
7 |
+
echo -e "\nYou need to register at https://icon.is.tue.mpg.de/, according to Installation Instruction."
|
8 |
+
read -p "Username (ICON):" username
|
9 |
+
read -p "Password (ICON):" password
|
10 |
+
username=$(urle $username)
|
11 |
+
password=$(urle $password)
|
12 |
+
|
13 |
+
# SMPL (Male, Female)
|
14 |
+
echo -e "\nDownloading SMPL..."
|
15 |
+
wget --post-data "username=$username&password=$password" 'https://download.is.tue.mpg.de/download.php?domain=smpl&sfile=SMPL_python_v.1.0.0.zip&resume=1' -O './data/smpl_related/models/SMPL_python_v.1.0.0.zip' --no-check-certificate --continue
|
16 |
+
unzip data/smpl_related/models/SMPL_python_v.1.0.0.zip -d data/smpl_related/models
|
17 |
+
mv data/smpl_related/models/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl data/smpl_related/models/smpl/SMPL_FEMALE.pkl
|
18 |
+
mv data/smpl_related/models/smpl/models/basicmodel_m_lbs_10_207_0_v1.0.0.pkl data/smpl_related/models/smpl/SMPL_MALE.pkl
|
19 |
+
cd data/smpl_related/models
|
20 |
+
rm -rf *.zip __MACOSX smpl/models smpl/smpl_webuser
|
21 |
+
cd ../../..
|
22 |
+
|
23 |
+
# SMPL (Neutral, from SMPLIFY)
|
24 |
+
echo -e "\nDownloading SMPLify..."
|
25 |
+
wget --post-data "username=$username&password=$password" 'https://download.is.tue.mpg.de/download.php?domain=smplify&sfile=mpips_smplify_public_v2.zip&resume=1' -O './data/smpl_related/models/mpips_smplify_public_v2.zip' --no-check-certificate --continue
|
26 |
+
unzip data/smpl_related/models/mpips_smplify_public_v2.zip -d data/smpl_related/models
|
27 |
+
mv data/smpl_related/models/smplify_public/code/models/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl data/smpl_related/models/smpl/SMPL_NEUTRAL.pkl
|
28 |
+
cd data/smpl_related/models
|
29 |
+
rm -rf *.zip smplify_public
|
30 |
+
cd ../../..
|
31 |
+
|
32 |
+
# ICON
|
33 |
+
echo -e "\nDownloading ICON..."
|
34 |
+
wget --post-data "username=$username&password=$password" 'https://download.is.tue.mpg.de/download.php?domain=icon&sfile=icon_data.zip&resume=1' -O './data/icon_data.zip' --no-check-certificate --continue
|
35 |
+
cd data && unzip icon_data.zip
|
36 |
+
mv smpl_data smpl_related/
|
37 |
+
rm -f icon_data.zip
|
38 |
+
cd ..
|
39 |
+
|
40 |
+
function download_for_training () {
|
41 |
+
|
42 |
+
# SMPL-X (optional)
|
43 |
+
echo -e "\nDownloading SMPL-X..."
|
44 |
+
wget --post-data "username=$1&password=$2" 'https://download.is.tue.mpg.de/download.php?domain=smplx&sfile=models_smplx_v1_1.zip&resume=1' -O './data/smpl_related/models/models_smplx_v1_1.zip' --no-check-certificate --continue
|
45 |
+
unzip data/smpl_related/models/models_smplx_v1_1.zip -d data/smpl_related
|
46 |
+
rm -f data/smpl_related/models/models_smplx_v1_1.zip
|
47 |
+
|
48 |
+
# SMIL (optional)
|
49 |
+
echo -e "\nDownloading SMIL..."
|
50 |
+
wget --post-data "username=$1&password=$2" 'https://download.is.tue.mpg.de/download.php?domain=agora&sfile=smpl_kid_template.npy&resume=1' -O './data/smpl_related/models/smpl/smpl_kid_template.npy' --no-check-certificate --continue
|
51 |
+
wget --post-data "username=$1&password=$2" 'https://download.is.tue.mpg.de/download.php?domain=agora&sfile=smplx_kid_template.npy&resume=1' -O './data/smpl_related/models/smplx/smplx_kid_template.npy' --no-check-certificate --continue
|
52 |
+
}
|
53 |
+
|
54 |
+
|
55 |
+
read -p "(optional) Download models used for training (y/n)?" choice
|
56 |
+
case "$choice" in
|
57 |
+
y|Y ) download_for_training $username $password;;
|
58 |
+
n|N ) echo "Great job! Try the demo for now!";;
|
59 |
+
* ) echo "Invalid input! Please use y|Y or n|N";;
|
60 |
+
esac
|
install.sh
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# # conda installation
|
2 |
+
# wget https://repo.anaconda.com/miniconda/Miniconda3-py38_4.10.3-Linux-x86_64.sh
|
3 |
+
# chmod +x Miniconda3-py38_4.10.3-Linux-x86_64.sh
|
4 |
+
# bash Miniconda3-py38_4.10.3-Linux-x86_64.sh -b -f -p /home/user/.local
|
5 |
+
# rm Miniconda3-py38_4.10.3-Linux-x86_64.sh
|
6 |
+
# conda config --env --set always_yes true
|
7 |
+
# conda update -n base -c defaults conda -y
|
8 |
+
|
9 |
+
# # conda environment setup
|
10 |
+
# conda env create -f environment.yaml
|
11 |
+
# conda init bash
|
12 |
+
# source /home/user/.bashrc
|
13 |
+
# source activate icon
|
14 |
+
nvidia-smi
|
15 |
+
pip install torch==1.8.2 torchvision==0.9.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cu111
|
16 |
+
pip install -r requirement.txt
|
lib/common/render.py
CHANGED
@@ -33,15 +33,14 @@ from pytorch3d.renderer import (
|
|
33 |
)
|
34 |
from pytorch3d.renderer.mesh import TexturesVertex
|
35 |
from pytorch3d.structures import Meshes
|
36 |
-
|
37 |
-
import os, subprocess
|
38 |
-
|
39 |
from lib.dataset.mesh_util import SMPLX, get_visibility
|
|
|
40 |
import lib.common.render_utils as util
|
41 |
import torch
|
42 |
import numpy as np
|
43 |
from PIL import Image
|
44 |
from tqdm import tqdm
|
|
|
45 |
import cv2
|
46 |
import math
|
47 |
from termcolor import colored
|
@@ -327,10 +326,8 @@ class Render:
|
|
327 |
|
328 |
def get_rendered_video(self, images, save_path):
|
329 |
|
330 |
-
tmp_path = save_path.replace('cloth', 'tmp')
|
331 |
-
|
332 |
self.cam_pos = []
|
333 |
-
for angle in range(
|
334 |
self.cam_pos.append(
|
335 |
(
|
336 |
100.0 * math.cos(np.pi / 180 * angle),
|
@@ -345,7 +342,7 @@ class Render:
|
|
345 |
|
346 |
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
|
347 |
video = cv2.VideoWriter(
|
348 |
-
|
349 |
new_shape[1] * len(images), self.size)
|
350 |
)
|
351 |
|
@@ -375,8 +372,6 @@ class Render:
|
|
375 |
video.write(final_img)
|
376 |
|
377 |
video.release()
|
378 |
-
|
379 |
-
os.system(f'ffmpeg -y -loglevel quiet -stats -i {tmp_path} -c:v libx264 {save_path}')
|
380 |
|
381 |
def get_silhouette_image(self, cam_ids=[0, 2]):
|
382 |
|
|
|
33 |
)
|
34 |
from pytorch3d.renderer.mesh import TexturesVertex
|
35 |
from pytorch3d.structures import Meshes
|
|
|
|
|
|
|
36 |
from lib.dataset.mesh_util import SMPLX, get_visibility
|
37 |
+
|
38 |
import lib.common.render_utils as util
|
39 |
import torch
|
40 |
import numpy as np
|
41 |
from PIL import Image
|
42 |
from tqdm import tqdm
|
43 |
+
import os
|
44 |
import cv2
|
45 |
import math
|
46 |
from termcolor import colored
|
|
|
326 |
|
327 |
def get_rendered_video(self, images, save_path):
|
328 |
|
|
|
|
|
329 |
self.cam_pos = []
|
330 |
+
for angle in range(360):
|
331 |
self.cam_pos.append(
|
332 |
(
|
333 |
100.0 * math.cos(np.pi / 180 * angle),
|
|
|
342 |
|
343 |
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
|
344 |
video = cv2.VideoWriter(
|
345 |
+
save_path, fourcc, 30, (self.size * len(self.meshes) +
|
346 |
new_shape[1] * len(images), self.size)
|
347 |
)
|
348 |
|
|
|
372 |
video.write(final_img)
|
373 |
|
374 |
video.release()
|
|
|
|
|
375 |
|
376 |
def get_silhouette_image(self, cam_ids=[0, 2]):
|
377 |
|
lib/common/train_util.py
CHANGED
@@ -32,8 +32,6 @@ import os
|
|
32 |
from termcolor import colored
|
33 |
|
34 |
|
35 |
-
|
36 |
-
|
37 |
def reshape_sample_tensor(sample_tensor, num_views):
|
38 |
if num_views == 1:
|
39 |
return sample_tensor
|
|
|
32 |
from termcolor import colored
|
33 |
|
34 |
|
|
|
|
|
35 |
def reshape_sample_tensor(sample_tensor, num_views):
|
36 |
if num_views == 1:
|
37 |
return sample_tensor
|
lib/dataloader_demo.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
from lib.common.config import get_cfg_defaults
|
3 |
+
from lib.dataset.PIFuDataset import PIFuDataset
|
4 |
+
|
5 |
+
if __name__ == '__main__':
|
6 |
+
|
7 |
+
parser = argparse.ArgumentParser()
|
8 |
+
parser.add_argument('-v',
|
9 |
+
'--show',
|
10 |
+
action='store_true',
|
11 |
+
help='vis sampler 3D')
|
12 |
+
parser.add_argument('-s',
|
13 |
+
'--speed',
|
14 |
+
action='store_true',
|
15 |
+
help='vis sampler 3D')
|
16 |
+
parser.add_argument('-l',
|
17 |
+
'--list',
|
18 |
+
action='store_true',
|
19 |
+
help='vis sampler 3D')
|
20 |
+
parser.add_argument('-c',
|
21 |
+
'--config',
|
22 |
+
default='./configs/train/icon-filter.yaml',
|
23 |
+
help='vis sampler 3D')
|
24 |
+
args_c = parser.parse_args()
|
25 |
+
|
26 |
+
args = get_cfg_defaults()
|
27 |
+
args.merge_from_file(args_c.config)
|
28 |
+
|
29 |
+
dataset = PIFuDataset(args, split='train', vis=args_c.show)
|
30 |
+
print(f"Number of subjects :{len(dataset.subject_list)}")
|
31 |
+
data_dict = dataset[0]
|
32 |
+
|
33 |
+
if args_c.list:
|
34 |
+
for k in data_dict.keys():
|
35 |
+
if not hasattr(data_dict[k], "shape"):
|
36 |
+
print(f"{k}: {data_dict[k]}")
|
37 |
+
else:
|
38 |
+
print(f"{k}: {data_dict[k].shape}")
|
39 |
+
|
40 |
+
if args_c.show:
|
41 |
+
# for item in dataset:
|
42 |
+
item = dataset[0]
|
43 |
+
dataset.visualize_sampling3D(item, mode='occ')
|
44 |
+
|
45 |
+
if args_c.speed:
|
46 |
+
# original: 2 it/s
|
47 |
+
# smpl online compute: 2 it/s
|
48 |
+
# normal online compute: 1.5 it/s
|
49 |
+
from tqdm import tqdm
|
50 |
+
for item in tqdm(dataset):
|
51 |
+
# pass
|
52 |
+
for k in item.keys():
|
53 |
+
if 'voxel' in k:
|
54 |
+
if not hasattr(item[k], "shape"):
|
55 |
+
print(f"{k}: {item[k]}")
|
56 |
+
else:
|
57 |
+
print(f"{k}: {item[k].shape}")
|
58 |
+
print("--------------------")
|
lib/dataset/Evaluator.py
CHANGED
@@ -15,11 +15,11 @@
|
|
15 |
#
|
16 |
# Contact: ps-license@tuebingen.mpg.de
|
17 |
|
18 |
-
|
19 |
from lib.renderer.gl.normal_render import NormalRender
|
20 |
from lib.dataset.mesh_util import projection
|
21 |
from lib.common.render import Render
|
22 |
from PIL import Image
|
|
|
23 |
import numpy as np
|
24 |
import torch
|
25 |
from torch import nn
|
|
|
15 |
#
|
16 |
# Contact: ps-license@tuebingen.mpg.de
|
17 |
|
|
|
18 |
from lib.renderer.gl.normal_render import NormalRender
|
19 |
from lib.dataset.mesh_util import projection
|
20 |
from lib.common.render import Render
|
21 |
from PIL import Image
|
22 |
+
import os
|
23 |
import numpy as np
|
24 |
import torch
|
25 |
from torch import nn
|
lib/dataset/PIFuDataset.py
CHANGED
@@ -9,12 +9,12 @@ import os.path as osp
|
|
9 |
import numpy as np
|
10 |
from PIL import Image
|
11 |
import random
|
12 |
-
import os
|
13 |
import trimesh
|
14 |
import torch
|
|
|
15 |
from kaolin.ops.mesh import check_sign
|
16 |
import torchvision.transforms as transforms
|
17 |
-
from
|
18 |
|
19 |
|
20 |
class PIFuDataset():
|
@@ -343,9 +343,9 @@ class PIFuDataset():
|
|
343 |
torch.as_tensor(smpl_param['full_pose'][0])).numpy()
|
344 |
smpl_betas = smpl_param["betas"]
|
345 |
|
346 |
-
smpl_path =
|
347 |
-
tetra_path =
|
348 |
-
"tetra_male_adult_smpl.npz")
|
349 |
|
350 |
smpl_model = TetraSMPLModel(smpl_path, tetra_path, 'adult')
|
351 |
|
@@ -365,7 +365,7 @@ class PIFuDataset():
|
|
365 |
verts = (np.concatenate([smpl_model.verts, smpl_model.verts_added],
|
366 |
axis=0) * smplx_param["scale"] + smplx_param["translation"]
|
367 |
) * self.datasets_dict[data_dict['dataset']]['scale']
|
368 |
-
faces = np.loadtxt(
|
369 |
dtype=np.int32) - 1
|
370 |
|
371 |
pad_v_num = int(8000 - verts.shape[0])
|
@@ -586,4 +586,77 @@ class PIFuDataset():
|
|
586 |
labels = torch.from_numpy(labels).float()
|
587 |
normals = torch.from_numpy(normals).float()
|
588 |
|
589 |
-
return {'samples_geo': samples, 'labels_geo': labels}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
import numpy as np
|
10 |
from PIL import Image
|
11 |
import random
|
|
|
12 |
import trimesh
|
13 |
import torch
|
14 |
+
import vedo
|
15 |
from kaolin.ops.mesh import check_sign
|
16 |
import torchvision.transforms as transforms
|
17 |
+
from ipdb import set_trace
|
18 |
|
19 |
|
20 |
class PIFuDataset():
|
|
|
343 |
torch.as_tensor(smpl_param['full_pose'][0])).numpy()
|
344 |
smpl_betas = smpl_param["betas"]
|
345 |
|
346 |
+
smpl_path = osp.join(self.smplx.model_dir, "smpl/SMPL_MALE.pkl")
|
347 |
+
tetra_path = osp.join(self.smplx.tedra_dir,
|
348 |
+
"tetra_male_adult_smpl.npz")
|
349 |
|
350 |
smpl_model = TetraSMPLModel(smpl_path, tetra_path, 'adult')
|
351 |
|
|
|
365 |
verts = (np.concatenate([smpl_model.verts, smpl_model.verts_added],
|
366 |
axis=0) * smplx_param["scale"] + smplx_param["translation"]
|
367 |
) * self.datasets_dict[data_dict['dataset']]['scale']
|
368 |
+
faces = np.loadtxt(osp.join(self.smplx.tedra_dir, "tetrahedrons_male_adult.txt"),
|
369 |
dtype=np.int32) - 1
|
370 |
|
371 |
pad_v_num = int(8000 - verts.shape[0])
|
|
|
586 |
labels = torch.from_numpy(labels).float()
|
587 |
normals = torch.from_numpy(normals).float()
|
588 |
|
589 |
+
return {'samples_geo': samples, 'labels_geo': labels}
|
590 |
+
|
591 |
+
def visualize_sampling3D(self, data_dict, mode='vis'):
|
592 |
+
|
593 |
+
# create plot
|
594 |
+
vp = vedo.Plotter(title="", size=(1500, 1500), axes=0, bg='white')
|
595 |
+
vis_list = []
|
596 |
+
|
597 |
+
assert mode in ['vis', 'sdf', 'normal', 'cmap', 'occ']
|
598 |
+
|
599 |
+
# sdf-1 cmap-3 norm-3 vis-1
|
600 |
+
if mode == 'vis':
|
601 |
+
labels = data_dict[f'smpl_feat'][:, [-1]] # visibility
|
602 |
+
colors = np.concatenate([labels, labels, labels], axis=1)
|
603 |
+
elif mode == 'occ':
|
604 |
+
labels = data_dict[f'labels_geo'][..., None] # occupancy
|
605 |
+
colors = np.concatenate([labels, labels, labels], axis=1)
|
606 |
+
elif mode == 'sdf':
|
607 |
+
labels = data_dict[f'smpl_feat'][:, [0]] # sdf
|
608 |
+
labels -= labels.min()
|
609 |
+
labels /= labels.max()
|
610 |
+
colors = np.concatenate([labels, labels, labels], axis=1)
|
611 |
+
elif mode == 'normal':
|
612 |
+
labels = data_dict[f'smpl_feat'][:, -4:-1] # normal
|
613 |
+
colors = (labels + 1.0) * 0.5
|
614 |
+
elif mode == 'cmap':
|
615 |
+
labels = data_dict[f'smpl_feat'][:, -7:-4] # colormap
|
616 |
+
colors = np.array(labels)
|
617 |
+
|
618 |
+
points = projection(data_dict['samples_geo'], data_dict['calib'])
|
619 |
+
verts = projection(data_dict['verts'], data_dict['calib'])
|
620 |
+
points[:, 1] *= -1
|
621 |
+
verts[:, 1] *= -1
|
622 |
+
|
623 |
+
# create a mesh
|
624 |
+
mesh = trimesh.Trimesh(verts, data_dict['faces'], process=True)
|
625 |
+
mesh.visual.vertex_colors = [128.0, 128.0, 128.0, 255.0]
|
626 |
+
vis_list.append(mesh)
|
627 |
+
|
628 |
+
if 'voxel_verts' in data_dict.keys():
|
629 |
+
print(colored("voxel verts", "green"))
|
630 |
+
voxel_verts = data_dict['voxel_verts'] * 2.0
|
631 |
+
voxel_faces = data_dict['voxel_faces']
|
632 |
+
voxel_verts[:, 1] *= -1
|
633 |
+
voxel = trimesh.Trimesh(
|
634 |
+
voxel_verts, voxel_faces[:, [0, 2, 1]], process=False, maintain_order=True)
|
635 |
+
voxel.visual.vertex_colors = [0.0, 128.0, 0.0, 255.0]
|
636 |
+
vis_list.append(voxel)
|
637 |
+
|
638 |
+
if 'smpl_verts' in data_dict.keys():
|
639 |
+
print(colored("smpl verts", "green"))
|
640 |
+
smplx_verts = data_dict['smpl_verts']
|
641 |
+
smplx_faces = data_dict['smpl_faces']
|
642 |
+
smplx_verts[:, 1] *= -1
|
643 |
+
smplx = trimesh.Trimesh(
|
644 |
+
smplx_verts, smplx_faces[:, [0, 2, 1]], process=False, maintain_order=True)
|
645 |
+
smplx.visual.vertex_colors = [128.0, 128.0, 0.0, 255.0]
|
646 |
+
vis_list.append(smplx)
|
647 |
+
|
648 |
+
# create a picure
|
649 |
+
img_pos = [1.0, 0.0, -1.0]
|
650 |
+
for img_id, img_key in enumerate(['normal_F', 'image', 'T_normal_B']):
|
651 |
+
image_arr = (data_dict[img_key].detach().cpu().permute(
|
652 |
+
1, 2, 0).numpy() + 1.0) * 0.5 * 255.0
|
653 |
+
image_dim = image_arr.shape[0]
|
654 |
+
image = vedo.Picture(image_arr).scale(
|
655 |
+
2.0 / image_dim).pos(-1.0, -1.0, img_pos[img_id])
|
656 |
+
vis_list.append(image)
|
657 |
+
|
658 |
+
# create a pointcloud
|
659 |
+
pc = vedo.Points(points, r=15, c=np.float32(colors))
|
660 |
+
vis_list.append(pc)
|
661 |
+
|
662 |
+
vp.show(*vis_list, bg="white", axes=1.0, interactive=True)
|
lib/dataset/TestDataset.py
CHANGED
@@ -15,9 +15,7 @@
|
|
15 |
#
|
16 |
# Contact: ps-license@tuebingen.mpg.de
|
17 |
|
18 |
-
import
|
19 |
-
|
20 |
-
import lib.smplx as smplx
|
21 |
from lib.pymaf.utils.geometry import rotation_matrix_to_angle_axis, batch_rodrigues
|
22 |
from lib.pymaf.utils.imutils import process_image
|
23 |
from lib.pymaf.core import path_config
|
@@ -27,12 +25,14 @@ from lib.common.render import Render
|
|
27 |
from lib.dataset.body_model import TetraSMPLModel
|
28 |
from lib.dataset.mesh_util import get_visibility, SMPLX
|
29 |
import os.path as osp
|
|
|
30 |
import torch
|
|
|
31 |
import numpy as np
|
32 |
import random
|
|
|
33 |
from termcolor import colored
|
34 |
from PIL import ImageFile
|
35 |
-
from huggingface_hub import cached_download
|
36 |
|
37 |
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
38 |
|
@@ -42,7 +42,7 @@ class TestDataset():
|
|
42 |
|
43 |
random.seed(1993)
|
44 |
|
45 |
-
self.
|
46 |
self.seg_dir = cfg['seg_dir']
|
47 |
self.has_det = cfg['has_det']
|
48 |
self.hps_type = cfg['hps_type']
|
@@ -51,7 +51,19 @@ class TestDataset():
|
|
51 |
|
52 |
self.device = device
|
53 |
|
54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
# smpl related
|
57 |
self.smpl_data = SMPLX()
|
@@ -100,9 +112,9 @@ class TestDataset():
|
|
100 |
def compute_voxel_verts(self, body_pose, global_orient, betas, trans,
|
101 |
scale):
|
102 |
|
103 |
-
smpl_path =
|
104 |
-
tetra_path =
|
105 |
-
'tetra_neutral_adult_smpl.npz')
|
106 |
smpl_model = TetraSMPLModel(smpl_path, tetra_path, 'adult')
|
107 |
|
108 |
pose = torch.cat([global_orient[0], body_pose[0]], dim=0)
|
@@ -112,8 +124,8 @@ class TestDataset():
|
|
112 |
verts = np.concatenate(
|
113 |
[smpl_model.verts, smpl_model.verts_added],
|
114 |
axis=0) * scale.item() + trans.detach().cpu().numpy()
|
115 |
-
faces = np.loadtxt(
|
116 |
-
'tetrahedrons_neutral_adult.txt'),
|
117 |
dtype=np.int32) - 1
|
118 |
|
119 |
pad_v_num = int(8000 - verts.shape[0])
|
@@ -148,7 +160,7 @@ class TestDataset():
|
|
148 |
|
149 |
if self.seg_dir is None:
|
150 |
img_icon, img_hps, img_ori, img_mask, uncrop_param = process_image(
|
151 |
-
img_path, self.hps_type, 512, self.device)
|
152 |
|
153 |
data_dict = {
|
154 |
'name': img_name,
|
@@ -160,7 +172,7 @@ class TestDataset():
|
|
160 |
|
161 |
else:
|
162 |
img_icon, img_hps, img_ori, img_mask, uncrop_param, segmentations = process_image(
|
163 |
-
img_path, self.hps_type, 512, self.device,
|
164 |
seg_path=os.path.join(self.seg_dir, f'{img_name}.json'))
|
165 |
data_dict = {
|
166 |
'name': img_name,
|
@@ -233,11 +245,6 @@ class TestDataset():
|
|
233 |
# body_pose - [1, 23, 3, 3] / [1, 21, 3, 3]
|
234 |
# global_orient - [1, 1, 3, 3]
|
235 |
# smpl_verts - [1, 6890, 3] / [1, 10475, 3]
|
236 |
-
|
237 |
-
# from rot_mat to rot_6d for better optimization
|
238 |
-
N_body = data_dict["body_pose"].shape[1]
|
239 |
-
data_dict["body_pose"] = data_dict["body_pose"][:, :, :, :2].reshape(1, N_body,-1)
|
240 |
-
data_dict["global_orient"] = data_dict["global_orient"][:, :, :, :2].reshape(1, 1,-1)
|
241 |
|
242 |
return data_dict
|
243 |
|
@@ -252,3 +259,84 @@ class TestDataset():
|
|
252 |
# render optimized mesh (normal, T_normal, image [-1,1])
|
253 |
self.render.load_meshes(verts, faces)
|
254 |
return self.render.get_depth_map(cam_ids=[0, 2])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
#
|
16 |
# Contact: ps-license@tuebingen.mpg.de
|
17 |
|
18 |
+
import smplx
|
|
|
|
|
19 |
from lib.pymaf.utils.geometry import rotation_matrix_to_angle_axis, batch_rodrigues
|
20 |
from lib.pymaf.utils.imutils import process_image
|
21 |
from lib.pymaf.core import path_config
|
|
|
25 |
from lib.dataset.body_model import TetraSMPLModel
|
26 |
from lib.dataset.mesh_util import get_visibility, SMPLX
|
27 |
import os.path as osp
|
28 |
+
import os
|
29 |
import torch
|
30 |
+
import glob
|
31 |
import numpy as np
|
32 |
import random
|
33 |
+
import human_det
|
34 |
from termcolor import colored
|
35 |
from PIL import ImageFile
|
|
|
36 |
|
37 |
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
38 |
|
|
|
42 |
|
43 |
random.seed(1993)
|
44 |
|
45 |
+
self.image_dir = cfg['image_dir']
|
46 |
self.seg_dir = cfg['seg_dir']
|
47 |
self.has_det = cfg['has_det']
|
48 |
self.hps_type = cfg['hps_type']
|
|
|
51 |
|
52 |
self.device = device
|
53 |
|
54 |
+
if self.has_det:
|
55 |
+
self.det = human_det.Detection()
|
56 |
+
else:
|
57 |
+
self.det = None
|
58 |
+
|
59 |
+
keep_lst = sorted(glob.glob(f"{self.image_dir}/*"))
|
60 |
+
img_fmts = ['jpg', 'png', 'jpeg', "JPG", 'bmp']
|
61 |
+
keep_lst = [
|
62 |
+
item for item in keep_lst if item.split(".")[-1] in img_fmts
|
63 |
+
]
|
64 |
+
|
65 |
+
self.subject_list = sorted(
|
66 |
+
[item for item in keep_lst if item.split(".")[-1] in img_fmts])
|
67 |
|
68 |
# smpl related
|
69 |
self.smpl_data = SMPLX()
|
|
|
112 |
def compute_voxel_verts(self, body_pose, global_orient, betas, trans,
|
113 |
scale):
|
114 |
|
115 |
+
smpl_path = osp.join(self.smpl_data.model_dir, "smpl/SMPL_NEUTRAL.pkl")
|
116 |
+
tetra_path = osp.join(self.smpl_data.tedra_dir,
|
117 |
+
'tetra_neutral_adult_smpl.npz')
|
118 |
smpl_model = TetraSMPLModel(smpl_path, tetra_path, 'adult')
|
119 |
|
120 |
pose = torch.cat([global_orient[0], body_pose[0]], dim=0)
|
|
|
124 |
verts = np.concatenate(
|
125 |
[smpl_model.verts, smpl_model.verts_added],
|
126 |
axis=0) * scale.item() + trans.detach().cpu().numpy()
|
127 |
+
faces = np.loadtxt(osp.join(self.smpl_data.tedra_dir,
|
128 |
+
'tetrahedrons_neutral_adult.txt'),
|
129 |
dtype=np.int32) - 1
|
130 |
|
131 |
pad_v_num = int(8000 - verts.shape[0])
|
|
|
160 |
|
161 |
if self.seg_dir is None:
|
162 |
img_icon, img_hps, img_ori, img_mask, uncrop_param = process_image(
|
163 |
+
img_path, self.det, self.hps_type, 512, self.device)
|
164 |
|
165 |
data_dict = {
|
166 |
'name': img_name,
|
|
|
172 |
|
173 |
else:
|
174 |
img_icon, img_hps, img_ori, img_mask, uncrop_param, segmentations = process_image(
|
175 |
+
img_path, self.det, self.hps_type, 512, self.device,
|
176 |
seg_path=os.path.join(self.seg_dir, f'{img_name}.json'))
|
177 |
data_dict = {
|
178 |
'name': img_name,
|
|
|
245 |
# body_pose - [1, 23, 3, 3] / [1, 21, 3, 3]
|
246 |
# global_orient - [1, 1, 3, 3]
|
247 |
# smpl_verts - [1, 6890, 3] / [1, 10475, 3]
|
|
|
|
|
|
|
|
|
|
|
248 |
|
249 |
return data_dict
|
250 |
|
|
|
259 |
# render optimized mesh (normal, T_normal, image [-1,1])
|
260 |
self.render.load_meshes(verts, faces)
|
261 |
return self.render.get_depth_map(cam_ids=[0, 2])
|
262 |
+
|
263 |
+
def visualize_alignment(self, data):
|
264 |
+
|
265 |
+
import vedo
|
266 |
+
import trimesh
|
267 |
+
|
268 |
+
if self.hps_type != 'pixie':
|
269 |
+
smpl_out = self.smpl_model(betas=data['betas'],
|
270 |
+
body_pose=data['body_pose'],
|
271 |
+
global_orient=data['global_orient'],
|
272 |
+
pose2rot=False)
|
273 |
+
smpl_verts = (
|
274 |
+
(smpl_out.vertices + data['trans']) * data['scale']).detach().cpu().numpy()[0]
|
275 |
+
else:
|
276 |
+
smpl_verts, _, _ = self.smpl_model(shape_params=data['betas'],
|
277 |
+
expression_params=data['exp'],
|
278 |
+
body_pose=data['body_pose'],
|
279 |
+
global_pose=data['global_orient'],
|
280 |
+
jaw_pose=data['jaw_pose'],
|
281 |
+
left_hand_pose=data['left_hand_pose'],
|
282 |
+
right_hand_pose=data['right_hand_pose'])
|
283 |
+
|
284 |
+
smpl_verts = (
|
285 |
+
(smpl_verts + data['trans']) * data['scale']).detach().cpu().numpy()[0]
|
286 |
+
|
287 |
+
smpl_verts *= np.array([1.0, -1.0, -1.0])
|
288 |
+
faces = data['smpl_faces'][0].detach().cpu().numpy()
|
289 |
+
|
290 |
+
image_P = data['image']
|
291 |
+
image_F, image_B = self.render_normal(smpl_verts, faces)
|
292 |
+
|
293 |
+
# create plot
|
294 |
+
vp = vedo.Plotter(title="", size=(1500, 1500))
|
295 |
+
vis_list = []
|
296 |
+
|
297 |
+
image_F = (
|
298 |
+
0.5 * (1.0 + image_F[0].permute(1, 2, 0).detach().cpu().numpy()) * 255.0)
|
299 |
+
image_B = (
|
300 |
+
0.5 * (1.0 + image_B[0].permute(1, 2, 0).detach().cpu().numpy()) * 255.0)
|
301 |
+
image_P = (
|
302 |
+
0.5 * (1.0 + image_P[0].permute(1, 2, 0).detach().cpu().numpy()) * 255.0)
|
303 |
+
|
304 |
+
vis_list.append(vedo.Picture(image_P*0.5+image_F *
|
305 |
+
0.5).scale(2.0/image_P.shape[0]).pos(-1.0, -1.0, 1.0))
|
306 |
+
vis_list.append(vedo.Picture(image_F).scale(
|
307 |
+
2.0/image_F.shape[0]).pos(-1.0, -1.0, -0.5))
|
308 |
+
vis_list.append(vedo.Picture(image_B).scale(
|
309 |
+
2.0/image_B.shape[0]).pos(-1.0, -1.0, -1.0))
|
310 |
+
|
311 |
+
# create a mesh
|
312 |
+
mesh = trimesh.Trimesh(smpl_verts, faces, process=False)
|
313 |
+
mesh.visual.vertex_colors = [200, 200, 0]
|
314 |
+
vis_list.append(mesh)
|
315 |
+
|
316 |
+
vp.show(*vis_list, bg="white", axes=1, interactive=True)
|
317 |
+
|
318 |
+
|
319 |
+
if __name__ == '__main__':
|
320 |
+
|
321 |
+
cfg.merge_from_file("./configs/icon-filter.yaml")
|
322 |
+
cfg.merge_from_file('./lib/pymaf/configs/pymaf_config.yaml')
|
323 |
+
|
324 |
+
cfg_show_list = [
|
325 |
+
'test_gpus', ['0'], 'mcube_res', 512, 'clean_mesh', False
|
326 |
+
]
|
327 |
+
|
328 |
+
cfg.merge_from_list(cfg_show_list)
|
329 |
+
cfg.freeze()
|
330 |
+
|
331 |
+
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
|
332 |
+
device = torch.device('cuda:0')
|
333 |
+
|
334 |
+
dataset = TestDataset(
|
335 |
+
{
|
336 |
+
'image_dir': "./examples",
|
337 |
+
'has_det': True, # w/ or w/o detection
|
338 |
+
'hps_type': 'bev' # pymaf/pare/pixie/hybrik/bev
|
339 |
+
}, device)
|
340 |
+
|
341 |
+
for i in range(len(dataset)):
|
342 |
+
dataset.visualize_alignment(dataset[i])
|
lib/dataset/mesh_util.py
CHANGED
@@ -17,17 +17,18 @@
|
|
17 |
|
18 |
import numpy as np
|
19 |
import cv2
|
|
|
20 |
import torch
|
21 |
import torchvision
|
22 |
import trimesh
|
23 |
from pytorch3d.io import load_obj
|
|
|
24 |
from termcolor import colored
|
|
|
25 |
from scipy.spatial import cKDTree
|
26 |
|
27 |
from pytorch3d.structures import Meshes
|
28 |
import torch.nn.functional as F
|
29 |
-
|
30 |
-
import os
|
31 |
from lib.pymaf.utils.imutils import uncrop
|
32 |
from lib.common.render_utils import Pytorch3dRasterizer, face_vertices
|
33 |
|
@@ -41,24 +42,6 @@ from pytorch3d.loss import (
|
|
41 |
mesh_normal_consistency
|
42 |
)
|
43 |
|
44 |
-
from huggingface_hub import hf_hub_download, hf_hub_url, cached_download
|
45 |
-
|
46 |
-
def rot6d_to_rotmat(x):
|
47 |
-
"""Convert 6D rotation representation to 3x3 rotation matrix.
|
48 |
-
Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019
|
49 |
-
Input:
|
50 |
-
(B,6) Batch of 6-D rotation representations
|
51 |
-
Output:
|
52 |
-
(B,3,3) Batch of corresponding rotation matrices
|
53 |
-
"""
|
54 |
-
x = x.view(-1, 3, 2)
|
55 |
-
a1 = x[:, :, 0]
|
56 |
-
a2 = x[:, :, 1]
|
57 |
-
b1 = F.normalize(a1)
|
58 |
-
b2 = F.normalize(a2 - torch.einsum("bi,bi->b", b1, a2).unsqueeze(-1) * b1)
|
59 |
-
b3 = torch.cross(b1, b2)
|
60 |
-
return torch.stack((b1, b2, b3), dim=-1)
|
61 |
-
|
62 |
|
63 |
def tensor2variable(tensor, device):
|
64 |
# [1,23,3,3]
|
@@ -137,18 +120,32 @@ def mesh_edge_loss(meshes, target_length: float = 0.0):
|
|
137 |
|
138 |
|
139 |
def remesh(obj_path, perc, device):
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
|
|
|
|
148 |
|
149 |
return verts_pr, faces_pr
|
150 |
|
151 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
def get_mask(tensor, dim):
|
153 |
|
154 |
mask = torch.abs(tensor).sum(dim=dim, keepdims=True) > 0.0
|
@@ -208,31 +205,33 @@ def load_checkpoint(model, cfg):
|
|
208 |
|
209 |
device = torch.device(f"cuda:{cfg['test_gpus'][0]}")
|
210 |
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
'
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
|
|
|
|
236 |
|
237 |
model_dict.update(main_dict)
|
238 |
model_dict.update(normal_dict)
|
@@ -253,7 +252,7 @@ def load_checkpoint(model, cfg):
|
|
253 |
|
254 |
def read_smpl_constants(folder):
|
255 |
"""Load smpl vertex code"""
|
256 |
-
smpl_vtx_std = np.loadtxt(
|
257 |
min_x = np.min(smpl_vtx_std[:, 0])
|
258 |
max_x = np.max(smpl_vtx_std[:, 0])
|
259 |
min_y = np.min(smpl_vtx_std[:, 1])
|
@@ -266,12 +265,12 @@ def read_smpl_constants(folder):
|
|
266 |
smpl_vtx_std[:, 2] = (smpl_vtx_std[:, 2] - min_z) / (max_z - min_z)
|
267 |
smpl_vertex_code = np.float32(np.copy(smpl_vtx_std))
|
268 |
"""Load smpl faces & tetrahedrons"""
|
269 |
-
smpl_faces = np.loadtxt(
|
270 |
dtype=np.int32) - 1
|
271 |
smpl_face_code = (smpl_vertex_code[smpl_faces[:, 0]] +
|
272 |
smpl_vertex_code[smpl_faces[:, 1]] +
|
273 |
smpl_vertex_code[smpl_faces[:, 2]]) / 3.0
|
274 |
-
smpl_tetras = np.loadtxt(
|
275 |
dtype=np.int32) - 1
|
276 |
|
277 |
return smpl_vertex_code, smpl_face_code, smpl_faces, smpl_tetras
|
@@ -397,12 +396,11 @@ def cal_sdf_batch(verts, faces, cmaps, vis, points):
|
|
397 |
bary_weights = barycentric_coordinates_of_projection(
|
398 |
points.view(-1, 3), closest_triangles)
|
399 |
|
400 |
-
pts_cmap = (closest_cmaps*bary_weights[:, :, None]).sum(1).unsqueeze(0)
|
401 |
pts_vis = (closest_vis*bary_weights[:,
|
402 |
:, None]).sum(1).unsqueeze(0).ge(1e-1)
|
403 |
pts_norm = (closest_normals*bary_weights[:, :, None]).sum(
|
404 |
1).unsqueeze(0) * torch.tensor([-1.0, 1.0, -1.0]).type_as(normals)
|
405 |
-
pts_norm = F.normalize(pts_norm, dim=2)
|
406 |
pts_dist = torch.sqrt(residues) / torch.sqrt(torch.tensor(3))
|
407 |
|
408 |
pts_signs = 2.0 * (check_sign(verts, faces[0], points).float() - 0.5)
|
@@ -844,21 +842,26 @@ def mesh_move(mesh_lst, step, scale=1.0):
|
|
844 |
|
845 |
class SMPLX():
|
846 |
def __init__(self):
|
847 |
-
|
848 |
-
REPO_ID = "Yuliang/SMPL"
|
849 |
|
850 |
-
self.
|
851 |
-
|
852 |
-
|
853 |
-
self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
854 |
|
855 |
self.faces = np.load(self.faces_path)
|
856 |
self.verts = np.load(self.smplx_verts_path)
|
857 |
self.smpl_verts = np.load(self.smpl_verts_path)
|
858 |
|
859 |
-
self.model_dir =
|
860 |
-
self.tedra_dir =
|
861 |
-
|
862 |
def get_smpl_mat(self, vert_ids):
|
863 |
|
864 |
mat = torch.as_tensor(np.load(self.cmap_vert_path)).float()
|
|
|
17 |
|
18 |
import numpy as np
|
19 |
import cv2
|
20 |
+
import pymeshlab
|
21 |
import torch
|
22 |
import torchvision
|
23 |
import trimesh
|
24 |
from pytorch3d.io import load_obj
|
25 |
+
import os
|
26 |
from termcolor import colored
|
27 |
+
import os.path as osp
|
28 |
from scipy.spatial import cKDTree
|
29 |
|
30 |
from pytorch3d.structures import Meshes
|
31 |
import torch.nn.functional as F
|
|
|
|
|
32 |
from lib.pymaf.utils.imutils import uncrop
|
33 |
from lib.common.render_utils import Pytorch3dRasterizer, face_vertices
|
34 |
|
|
|
42 |
mesh_normal_consistency
|
43 |
)
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
def tensor2variable(tensor, device):
|
47 |
# [1,23,3,3]
|
|
|
120 |
|
121 |
|
122 |
def remesh(obj_path, perc, device):
|
123 |
+
|
124 |
+
ms = pymeshlab.MeshSet()
|
125 |
+
ms.load_new_mesh(obj_path)
|
126 |
+
ms.laplacian_smooth()
|
127 |
+
ms.remeshing_isotropic_explicit_remeshing(
|
128 |
+
targetlen=pymeshlab.Percentage(perc), adaptive=True)
|
129 |
+
ms.save_current_mesh(obj_path.replace("recon", "remesh"))
|
130 |
+
polished_mesh = trimesh.load_mesh(obj_path.replace("recon", "remesh"))
|
131 |
+
verts_pr = torch.tensor(polished_mesh.vertices).float().unsqueeze(0).to(device)
|
132 |
+
faces_pr = torch.tensor(polished_mesh.faces).long().unsqueeze(0).to(device)
|
133 |
|
134 |
return verts_pr, faces_pr
|
135 |
|
136 |
|
137 |
+
def possion(mesh, obj_path):
|
138 |
+
|
139 |
+
mesh.export(obj_path)
|
140 |
+
ms = pymeshlab.MeshSet()
|
141 |
+
ms.load_new_mesh(obj_path)
|
142 |
+
ms.surface_reconstruction_screened_poisson(depth=10)
|
143 |
+
ms.set_current_mesh(1)
|
144 |
+
ms.save_current_mesh(obj_path)
|
145 |
+
|
146 |
+
return trimesh.load(obj_path)
|
147 |
+
|
148 |
+
|
149 |
def get_mask(tensor, dim):
|
150 |
|
151 |
mask = torch.abs(tensor).sum(dim=dim, keepdims=True) > 0.0
|
|
|
205 |
|
206 |
device = torch.device(f"cuda:{cfg['test_gpus'][0]}")
|
207 |
|
208 |
+
if os.path.exists(cfg.resume_path) and cfg.resume_path.endswith("ckpt"):
|
209 |
+
main_dict = torch.load(cfg.resume_path,
|
210 |
+
map_location=device)['state_dict']
|
211 |
+
|
212 |
+
main_dict = {
|
213 |
+
k: v
|
214 |
+
for k, v in main_dict.items()
|
215 |
+
if k in model_dict and v.shape == model_dict[k].shape and (
|
216 |
+
'reconEngine' not in k) and ("normal_filter" not in k) and (
|
217 |
+
'voxelization' not in k)
|
218 |
+
}
|
219 |
+
print(colored(f"Resume MLP weights from {cfg.resume_path}", 'green'))
|
220 |
+
|
221 |
+
if os.path.exists(cfg.normal_path) and cfg.normal_path.endswith("ckpt"):
|
222 |
+
normal_dict = torch.load(cfg.normal_path,
|
223 |
+
map_location=device)['state_dict']
|
224 |
+
|
225 |
+
for key in normal_dict.keys():
|
226 |
+
normal_dict = rename(normal_dict, key,
|
227 |
+
key.replace("netG", "netG.normal_filter"))
|
228 |
+
|
229 |
+
normal_dict = {
|
230 |
+
k: v
|
231 |
+
for k, v in normal_dict.items()
|
232 |
+
if k in model_dict and v.shape == model_dict[k].shape
|
233 |
+
}
|
234 |
+
print(colored(f"Resume normal model from {cfg.normal_path}", 'green'))
|
235 |
|
236 |
model_dict.update(main_dict)
|
237 |
model_dict.update(normal_dict)
|
|
|
252 |
|
253 |
def read_smpl_constants(folder):
|
254 |
"""Load smpl vertex code"""
|
255 |
+
smpl_vtx_std = np.loadtxt(os.path.join(folder, 'vertices.txt'))
|
256 |
min_x = np.min(smpl_vtx_std[:, 0])
|
257 |
max_x = np.max(smpl_vtx_std[:, 0])
|
258 |
min_y = np.min(smpl_vtx_std[:, 1])
|
|
|
265 |
smpl_vtx_std[:, 2] = (smpl_vtx_std[:, 2] - min_z) / (max_z - min_z)
|
266 |
smpl_vertex_code = np.float32(np.copy(smpl_vtx_std))
|
267 |
"""Load smpl faces & tetrahedrons"""
|
268 |
+
smpl_faces = np.loadtxt(os.path.join(folder, 'faces.txt'),
|
269 |
dtype=np.int32) - 1
|
270 |
smpl_face_code = (smpl_vertex_code[smpl_faces[:, 0]] +
|
271 |
smpl_vertex_code[smpl_faces[:, 1]] +
|
272 |
smpl_vertex_code[smpl_faces[:, 2]]) / 3.0
|
273 |
+
smpl_tetras = np.loadtxt(os.path.join(folder, 'tetrahedrons.txt'),
|
274 |
dtype=np.int32) - 1
|
275 |
|
276 |
return smpl_vertex_code, smpl_face_code, smpl_faces, smpl_tetras
|
|
|
396 |
bary_weights = barycentric_coordinates_of_projection(
|
397 |
points.view(-1, 3), closest_triangles)
|
398 |
|
399 |
+
pts_cmap = (closest_cmaps*bary_weights[:, :, None]).sum(1).unsqueeze(0)
|
400 |
pts_vis = (closest_vis*bary_weights[:,
|
401 |
:, None]).sum(1).unsqueeze(0).ge(1e-1)
|
402 |
pts_norm = (closest_normals*bary_weights[:, :, None]).sum(
|
403 |
1).unsqueeze(0) * torch.tensor([-1.0, 1.0, -1.0]).type_as(normals)
|
|
|
404 |
pts_dist = torch.sqrt(residues) / torch.sqrt(torch.tensor(3))
|
405 |
|
406 |
pts_signs = 2.0 * (check_sign(verts, faces[0], points).float() - 0.5)
|
|
|
842 |
|
843 |
class SMPLX():
|
844 |
def __init__(self):
|
|
|
|
|
845 |
|
846 |
+
self.current_dir = osp.join(osp.dirname(__file__),
|
847 |
+
"../../data/smpl_related")
|
848 |
+
|
849 |
+
self.smpl_verts_path = osp.join(self.current_dir,
|
850 |
+
"smpl_data/smpl_verts.npy")
|
851 |
+
self.smplx_verts_path = osp.join(self.current_dir,
|
852 |
+
"smpl_data/smplx_verts.npy")
|
853 |
+
self.faces_path = osp.join(self.current_dir,
|
854 |
+
"smpl_data/smplx_faces.npy")
|
855 |
+
self.cmap_vert_path = osp.join(self.current_dir,
|
856 |
+
"smpl_data/smplx_cmap.npy")
|
857 |
|
858 |
self.faces = np.load(self.faces_path)
|
859 |
self.verts = np.load(self.smplx_verts_path)
|
860 |
self.smpl_verts = np.load(self.smpl_verts_path)
|
861 |
|
862 |
+
self.model_dir = osp.join(self.current_dir, "models")
|
863 |
+
self.tedra_dir = osp.join(self.current_dir, "../tedra_data")
|
864 |
+
|
865 |
def get_smpl_mat(self, vert_ids):
|
866 |
|
867 |
mat = torch.as_tensor(np.load(self.cmap_vert_path)).float()
|
lib/net/FBNet.py
CHANGED
@@ -81,8 +81,7 @@ def define_G(input_nc,
|
|
81 |
# print(netG)
|
82 |
if len(gpu_ids) > 0:
|
83 |
assert (torch.cuda.is_available())
|
84 |
-
|
85 |
-
netG = netG.to(device)
|
86 |
netG.apply(weights_init)
|
87 |
return netG
|
88 |
|
|
|
81 |
# print(netG)
|
82 |
if len(gpu_ids) > 0:
|
83 |
assert (torch.cuda.is_available())
|
84 |
+
netG.cuda(gpu_ids[0])
|
|
|
85 |
netG.apply(weights_init)
|
86 |
return netG
|
87 |
|
lib/net/HGPIFuNet.py
CHANGED
@@ -15,7 +15,7 @@
|
|
15 |
#
|
16 |
# Contact: ps-license@tuebingen.mpg.de
|
17 |
|
18 |
-
|
19 |
from lib.dataset.mesh_util import cal_sdf_batch, feat_select, read_smpl_constants
|
20 |
from lib.net.NormalNet import NormalNet
|
21 |
from lib.net.MLP import MLP
|
@@ -26,6 +26,7 @@ from termcolor import colored
|
|
26 |
from lib.net.BasePIFuNet import BasePIFuNet
|
27 |
import torch.nn as nn
|
28 |
import torch
|
|
|
29 |
|
30 |
|
31 |
maskout = False
|
@@ -293,8 +294,14 @@ class HGPIFuNet(BasePIFuNet):
|
|
293 |
# smpl_cmap [B, N, 3]
|
294 |
# smpl_vis [B, N, 1]
|
295 |
|
|
|
|
|
|
|
|
|
296 |
feat_lst = [smpl_sdf]
|
297 |
if 'cmap' in self.smpl_feats:
|
|
|
|
|
298 |
feat_lst.append(smpl_cmap)
|
299 |
if 'norm' in self.smpl_feats:
|
300 |
feat_lst.append(smpl_norm)
|
|
|
15 |
#
|
16 |
# Contact: ps-license@tuebingen.mpg.de
|
17 |
|
18 |
+
from lib.net.voxelize import Voxelization
|
19 |
from lib.dataset.mesh_util import cal_sdf_batch, feat_select, read_smpl_constants
|
20 |
from lib.net.NormalNet import NormalNet
|
21 |
from lib.net.MLP import MLP
|
|
|
26 |
from lib.net.BasePIFuNet import BasePIFuNet
|
27 |
import torch.nn as nn
|
28 |
import torch
|
29 |
+
import os
|
30 |
|
31 |
|
32 |
maskout = False
|
|
|
294 |
# smpl_cmap [B, N, 3]
|
295 |
# smpl_vis [B, N, 1]
|
296 |
|
297 |
+
# set ourlier point features as uniform values
|
298 |
+
smpl_outlier = torch.abs(smpl_sdf).ge(self.sdf_clip)
|
299 |
+
smpl_sdf[smpl_outlier] = torch.sign(smpl_sdf[smpl_outlier])
|
300 |
+
|
301 |
feat_lst = [smpl_sdf]
|
302 |
if 'cmap' in self.smpl_feats:
|
303 |
+
smpl_cmap[smpl_outlier.repeat(
|
304 |
+
1, 1, 3)] = smpl_sdf[smpl_outlier].repeat(1, 1, 3)
|
305 |
feat_lst.append(smpl_cmap)
|
306 |
if 'norm' in self.smpl_feats:
|
307 |
feat_lst.append(smpl_norm)
|
lib/net/net_util.py
CHANGED
@@ -316,7 +316,7 @@ class Vgg19(torch.nn.Module):
|
|
316 |
class VGGLoss(nn.Module):
|
317 |
def __init__(self):
|
318 |
super(VGGLoss, self).__init__()
|
319 |
-
self.vgg = Vgg19()
|
320 |
self.criterion = nn.L1Loss()
|
321 |
self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0]
|
322 |
|
|
|
316 |
class VGGLoss(nn.Module):
|
317 |
def __init__(self):
|
318 |
super(VGGLoss, self).__init__()
|
319 |
+
self.vgg = Vgg19().cuda()
|
320 |
self.criterion = nn.L1Loss()
|
321 |
self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0]
|
322 |
|
lib/pymaf/core/path_config.py
CHANGED
@@ -6,18 +6,33 @@ for the datasets and data files necessary to run the code.
|
|
6 |
Things you need to change: *_ROOT that indicate the path to each dataset
|
7 |
"""
|
8 |
import os
|
9 |
-
from huggingface_hub import hf_hub_url, cached_download
|
10 |
|
11 |
# pymaf
|
12 |
-
pymaf_data_dir =
|
13 |
-
|
14 |
-
SMPL_MODEL_DIR = os.path.join(smpl_data_dir, 'models/smpl')
|
15 |
|
16 |
-
SMPL_MEAN_PARAMS =
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
Things you need to change: *_ROOT that indicate the path to each dataset
|
7 |
"""
|
8 |
import os
|
|
|
9 |
|
10 |
# pymaf
|
11 |
+
pymaf_data_dir = os.path.join(os.path.dirname(__file__),
|
12 |
+
"../../../data/pymaf_data")
|
|
|
13 |
|
14 |
+
SMPL_MEAN_PARAMS = os.path.join(pymaf_data_dir, 'smpl_mean_params.npz')
|
15 |
+
SMPL_MODEL_DIR = os.path.join(pymaf_data_dir, '../smpl_related/models/smpl')
|
16 |
+
|
17 |
+
CUBE_PARTS_FILE = os.path.join(pymaf_data_dir, 'cube_parts.npy')
|
18 |
+
JOINT_REGRESSOR_TRAIN_EXTRA = os.path.join(pymaf_data_dir,
|
19 |
+
'J_regressor_extra.npy')
|
20 |
+
JOINT_REGRESSOR_H36M = os.path.join(pymaf_data_dir, 'J_regressor_h36m.npy')
|
21 |
+
VERTEX_TEXTURE_FILE = os.path.join(pymaf_data_dir, 'vertex_texture.npy')
|
22 |
+
SMPL_MEAN_PARAMS = os.path.join(pymaf_data_dir, 'smpl_mean_params.npz')
|
23 |
+
SMPL_MODEL_DIR = os.path.join(pymaf_data_dir, '../smpl_related/models/smpl')
|
24 |
+
CHECKPOINT_FILE = os.path.join(pymaf_data_dir,
|
25 |
+
'pretrained_model/PyMAF_model_checkpoint.pt')
|
26 |
+
|
27 |
+
# pare
|
28 |
+
pare_data_dir = os.path.join(os.path.dirname(__file__),
|
29 |
+
"../../../data/pare_data")
|
30 |
+
CFG = os.path.join(pare_data_dir, 'pare/checkpoints/pare_w_3dpw_config.yaml')
|
31 |
+
CKPT = os.path.join(pare_data_dir,
|
32 |
+
'pare/checkpoints/pare_w_3dpw_checkpoint.ckpt')
|
33 |
+
|
34 |
+
# hybrik
|
35 |
+
hybrik_data_dir = os.path.join(os.path.dirname(__file__),
|
36 |
+
"../../../data/hybrik_data")
|
37 |
+
HYBRIK_CFG = os.path.join(hybrik_data_dir, 'hybrik_config.yaml')
|
38 |
+
HYBRIK_CKPT = os.path.join(hybrik_data_dir, 'pretrained_w_cam.pth')
|
lib/pymaf/models/maf_extractor.py
CHANGED
@@ -3,13 +3,13 @@
|
|
3 |
from packaging import version
|
4 |
import torch
|
5 |
import scipy
|
|
|
6 |
import numpy as np
|
7 |
import torch.nn as nn
|
8 |
import torch.nn.functional as F
|
9 |
|
10 |
from lib.common.config import cfg
|
11 |
from lib.pymaf.utils.geometry import projection
|
12 |
-
from lib.pymaf.core.path_config import MESH_DOWNSAMPLEING
|
13 |
|
14 |
import logging
|
15 |
|
@@ -48,7 +48,10 @@ class MAF_Extractor(nn.Module):
|
|
48 |
|
49 |
# downsample SMPL mesh and assign part labels
|
50 |
# from https://github.com/nkolot/GraphCMR/blob/master/data/mesh_downsampling.npz
|
51 |
-
|
|
|
|
|
|
|
52 |
allow_pickle=True,
|
53 |
encoding='latin1')
|
54 |
|
|
|
3 |
from packaging import version
|
4 |
import torch
|
5 |
import scipy
|
6 |
+
import os
|
7 |
import numpy as np
|
8 |
import torch.nn as nn
|
9 |
import torch.nn.functional as F
|
10 |
|
11 |
from lib.common.config import cfg
|
12 |
from lib.pymaf.utils.geometry import projection
|
|
|
13 |
|
14 |
import logging
|
15 |
|
|
|
48 |
|
49 |
# downsample SMPL mesh and assign part labels
|
50 |
# from https://github.com/nkolot/GraphCMR/blob/master/data/mesh_downsampling.npz
|
51 |
+
mesh_downsampling_path = os.path.join(
|
52 |
+
os.path.dirname(__file__),
|
53 |
+
"../../../data/pymaf_data/mesh_downsampling.npz")
|
54 |
+
smpl_mesh_graph = np.load(mesh_downsampling_path,
|
55 |
allow_pickle=True,
|
56 |
encoding='latin1')
|
57 |
|
lib/pymaf/models/res_module.py
CHANGED
@@ -4,11 +4,11 @@ from __future__ import absolute_import
|
|
4 |
from __future__ import division
|
5 |
from __future__ import print_function
|
6 |
|
|
|
7 |
import torch
|
8 |
import torch.nn as nn
|
9 |
import torch.nn.functional as F
|
10 |
from collections import OrderedDict
|
11 |
-
import os
|
12 |
from lib.pymaf.core.cfgs import cfg
|
13 |
|
14 |
import logging
|
|
|
4 |
from __future__ import division
|
5 |
from __future__ import print_function
|
6 |
|
7 |
+
import os
|
8 |
import torch
|
9 |
import torch.nn as nn
|
10 |
import torch.nn.functional as F
|
11 |
from collections import OrderedDict
|
|
|
12 |
from lib.pymaf.core.cfgs import cfg
|
13 |
|
14 |
import logging
|
lib/pymaf/models/smpl.py
CHANGED
@@ -2,9 +2,9 @@
|
|
2 |
|
3 |
import torch
|
4 |
import numpy as np
|
5 |
-
from
|
6 |
-
from
|
7 |
-
from
|
8 |
from collections import namedtuple
|
9 |
|
10 |
from lib.pymaf.core import path_config, constants
|
|
|
2 |
|
3 |
import torch
|
4 |
import numpy as np
|
5 |
+
from smplx import SMPL as _SMPL
|
6 |
+
from smplx.body_models import ModelOutput
|
7 |
+
from smplx.lbs import vertices2joints
|
8 |
from collections import namedtuple
|
9 |
|
10 |
from lib.pymaf.core import path_config, constants
|
lib/pymaf/utils/imutils.py
CHANGED
@@ -1,14 +1,15 @@
|
|
1 |
"""
|
2 |
This file contains functions that are used to perform data augmentation.
|
3 |
"""
|
|
|
4 |
import cv2
|
5 |
import io
|
6 |
import torch
|
7 |
import numpy as np
|
|
|
8 |
from PIL import Image
|
9 |
-
from rembg import remove
|
10 |
-
|
11 |
-
from torchvision.models import detection
|
12 |
|
13 |
from lib.pymaf.core import constants
|
14 |
from lib.pymaf.utils.streamer import aug_matrix
|
@@ -44,7 +45,6 @@ def get_bbox(img, det):
|
|
44 |
bbox = bboxes[0, 0, 0].cpu().numpy()
|
45 |
|
46 |
return bbox
|
47 |
-
# Michael Black is
|
48 |
|
49 |
|
50 |
def get_transformer(input_res):
|
@@ -86,7 +86,7 @@ def get_transformer(input_res):
|
|
86 |
return [image_to_tensor, mask_to_tensor, image_to_pymaf_tensor, image_to_pixie_tensor, image_to_hybrik_tensor]
|
87 |
|
88 |
|
89 |
-
def process_image(img_file, hps_type, input_res=512, device=None, seg_path=None):
|
90 |
"""Read image, do preprocessing and possibly crop it according to the bounding box.
|
91 |
If there are bounding box annotations, use them to crop the image.
|
92 |
If no bounding box is specified but openpose detections are available, use them to get the bounding box.
|
@@ -104,19 +104,21 @@ def process_image(img_file, hps_type, input_res=512, device=None, seg_path=None)
|
|
104 |
img_for_crop = cv2.warpAffine(img_ori, M[0:2, :],
|
105 |
(input_res*2, input_res*2), flags=cv2.INTER_CUBIC)
|
106 |
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
|
|
|
|
120 |
|
121 |
scale = max(height, width) / 180
|
122 |
|
@@ -127,8 +129,12 @@ def process_image(img_file, hps_type, input_res=512, device=None, seg_path=None)
|
|
127 |
img_np, cropping_parameters = crop(
|
128 |
img_for_crop, center, scale, (input_res, input_res))
|
129 |
|
130 |
-
|
131 |
-
|
|
|
|
|
|
|
|
|
132 |
# for icon
|
133 |
img_rgb = image_to_tensor(img_pil.convert("RGB"))
|
134 |
img_mask = torch.tensor(1.0) - (mask_to_tensor(img_pil.split()[-1]) <
|
|
|
1 |
"""
|
2 |
This file contains functions that are used to perform data augmentation.
|
3 |
"""
|
4 |
+
from turtle import reset
|
5 |
import cv2
|
6 |
import io
|
7 |
import torch
|
8 |
import numpy as np
|
9 |
+
import scipy.misc
|
10 |
from PIL import Image
|
11 |
+
from rembg.bg import remove
|
12 |
+
import human_det
|
|
|
13 |
|
14 |
from lib.pymaf.core import constants
|
15 |
from lib.pymaf.utils.streamer import aug_matrix
|
|
|
45 |
bbox = bboxes[0, 0, 0].cpu().numpy()
|
46 |
|
47 |
return bbox
|
|
|
48 |
|
49 |
|
50 |
def get_transformer(input_res):
|
|
|
86 |
return [image_to_tensor, mask_to_tensor, image_to_pymaf_tensor, image_to_pixie_tensor, image_to_hybrik_tensor]
|
87 |
|
88 |
|
89 |
+
def process_image(img_file, det, hps_type, input_res=512, device=None, seg_path=None):
|
90 |
"""Read image, do preprocessing and possibly crop it according to the bounding box.
|
91 |
If there are bounding box annotations, use them to crop the image.
|
92 |
If no bounding box is specified but openpose detections are available, use them to get the bounding box.
|
|
|
104 |
img_for_crop = cv2.warpAffine(img_ori, M[0:2, :],
|
105 |
(input_res*2, input_res*2), flags=cv2.INTER_CUBIC)
|
106 |
|
107 |
+
if det is not None:
|
108 |
+
|
109 |
+
# detection for bbox
|
110 |
+
bbox = get_bbox(img_for_crop, det)
|
111 |
+
|
112 |
+
width = bbox[2] - bbox[0]
|
113 |
+
height = bbox[3] - bbox[1]
|
114 |
+
center = np.array([(bbox[0] + bbox[2]) / 2.0,
|
115 |
+
(bbox[1] + bbox[3]) / 2.0])
|
116 |
+
|
117 |
+
else:
|
118 |
+
# Assume that the person is centerered in the image
|
119 |
+
height = img_for_crop.shape[0]
|
120 |
+
width = img_for_crop.shape[1]
|
121 |
+
center = np.array([width // 2, height // 2])
|
122 |
|
123 |
scale = max(height, width) / 180
|
124 |
|
|
|
129 |
img_np, cropping_parameters = crop(
|
130 |
img_for_crop, center, scale, (input_res, input_res))
|
131 |
|
132 |
+
with torch.no_grad():
|
133 |
+
buf = io.BytesIO()
|
134 |
+
Image.fromarray(img_np).save(buf, format='png')
|
135 |
+
img_pil = Image.open(
|
136 |
+
io.BytesIO(remove(buf.getvalue()))).convert("RGBA")
|
137 |
+
|
138 |
# for icon
|
139 |
img_rgb = image_to_tensor(img_pil.convert("RGB"))
|
140 |
img_mask = torch.tensor(1.0) - (mask_to_tensor(img_pil.split()[-1]) <
|
lib/renderer/mesh.py
CHANGED
@@ -18,7 +18,7 @@
|
|
18 |
from lib.dataset.mesh_util import SMPLX
|
19 |
from lib.common.render_utils import face_vertices
|
20 |
import numpy as np
|
21 |
-
import
|
22 |
import trimesh
|
23 |
import torch
|
24 |
import torch.nn.functional as F
|
|
|
18 |
from lib.dataset.mesh_util import SMPLX
|
19 |
from lib.common.render_utils import face_vertices
|
20 |
import numpy as np
|
21 |
+
import smplx
|
22 |
import trimesh
|
23 |
import torch
|
24 |
import torch.nn.functional as F
|