Spaces:
Runtime error
Runtime error
File size: 6,613 Bytes
2d5f249 a5aeba8 fe5fe63 2d5f249 96d77c3 2d5f249 96d77c3 fe5fe63 2d5f249 b0f2345 2d5f249 b0f2345 d46c752 b0f2345 2d5f249 de9b060 b0f2345 2d5f249 a5aeba8 de9b060 fe5fe63 2d5f249 a5aeba8 2d5f249 b151252 2d5f249 a5aeba8 2d5f249 b151252 2d5f249 eea1e27 2d5f249 fe5fe63 a5aeba8 2d5f249 a5aeba8 2d5f249 b151252 a5aeba8 b151252 3df5b98 b151252 3df5b98 2d5f249 de9b060 96d77c3 2d5f249 a5aeba8 2d5f249 a3f1f2c 2d5f249 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
# install
import glob
import gradio as gr
import os
import numpy as np
import subprocess
if os.getenv('SYSTEM') == 'spaces':
subprocess.run('pip install pyembree'.split())
subprocess.run(
'pip install git+https://github.com/YuliangXiu/rembg.git@hf'.split())
subprocess.run(
'pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html'.split())
subprocess.run(
'pip install https://download.is.tue.mpg.de/icon/HF/kaolin-0.11.0-cp38-cp38-linux_x86_64.whl'.split())
subprocess.run(
'pip install https://download.is.tue.mpg.de/icon/HF/pytorch3d-0.7.0-cp38-cp38-linux_x86_64.whl'.split())
subprocess.run(
'pip install git+https://github.com/Project-Splinter/human_det.git'.split())
subprocess.run(
'pip install git+https://github.com/YuliangXiu/neural_voxelization_layer.git'.split())
from apps.infer import generate_model
# running
description = '''
# ICON Clothed Human Digitization
### ICON: Implicit Clothed humans Obtained from Normals (CVPR 2022)
<table>
<th>
<ul>
<li><strong>Homepage</strong> <a href="http://icon.is.tue.mpg.de">icon.is.tue.mpg.de</a></li>
<li><strong>Code</strong> <a href="https://github.com/YuliangXiu/ICON">YuliangXiu/ICON</a></li>
<li><strong>Paper</strong> <a href="https://arxiv.org/abs/2112.09127">arXiv</a>, <a href="https://readpaper.com/paper/4569785684533977089">ReadPaper</a></li>
<li><strong>Chatroom</strong> <a href="https://discord.gg/Vqa7KBGRyk">Discord</a></li>
<li><strong>Colab Notebook</strong> <a href="https://colab.research.google.com/drive/1-AWeWhPvCTBX0KfMtgtMk10uPU05ihoA?usp=sharing">Google Colab</a></li>
</ul>
<a href="https://twitter.com/yuliangxiu"><img alt="Twitter Follow" src="https://img.shields.io/twitter/follow/yuliangxiu?style=social"></a>
<iframe src="https://ghbtns.com/github-btn.html?user=yuliangxiu&repo=ICON&type=star&count=true&v=2&size=small" frameborder="0" scrolling="0" width="100" height="20"></iframe>
<a href="https://youtu.be/hZd6AYin2DE"><img alt="YouTube Video Views" src="https://img.shields.io/youtube/views/hZd6AYin2DE?style=social"></a>
</th>
<th>
<iframe width="560" height="315" src="https://www.youtube.com/embed/hZd6AYin2DE" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
</th>
</table>
<h4> The reconstruction + refinement + video take about 200 seconds for single image. <span style="color:red"> If ERROR, try "Submit Image" again.</span></h4>
<details>
<summary>More</summary>
#### Citation
```
@inproceedings{xiu2022icon,
title = {{ICON}: {I}mplicit {C}lothed humans {O}btained from {N}ormals},
author = {Xiu, Yuliang and Yang, Jinlong and Tzionas, Dimitrios and Black, Michael J.},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2022},
pages = {13296-13306}
}
```
#### Acknowledgments:
- [StyleGAN-Human, ECCV 2022](https://stylegan-human.github.io/)
- [nagolinc/styleGanHuman_and_PIFu](https://huggingface.co/spaces/nagolinc/styleGanHuman_and_PIFu)
- [radames/PIFu-Clothed-Human-Digitization](https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization)
#### Image Credits
* [Pinterest](https://www.pinterest.com/search/pins/?q=parkour&rs=sitelinks_searchbox)
#### Related works
* [ICON @ MPI](https://icon.is.tue.mpg.de/)
* [MonoPort @ USC](https://xiuyuliang.cn/monoport)
* [Phorhum @ Google](https://phorhum.github.io/)
* [PIFuHD @ Meta](https://shunsukesaito.github.io/PIFuHD/)
* [PaMIR @ Tsinghua](http://www.liuyebin.com/pamir/pamir.html)
</details>
'''
def generate_image(seed, psi):
iface = gr.Interface.load("spaces/hysts/StyleGAN-Human")
img = iface(seed, psi)
return img
model_types = ['ICON', 'PIFu', 'PaMIR']
examples_names = glob.glob('examples/*.png')
examples_types = np.random.choice(
model_types, len(examples_names), p=[0.6, 0.2, 0.2])
examples = [list(item) for item in zip(examples_names, examples_types)]
with gr.Blocks() as demo:
gr.Markdown(description)
out_lst = []
with gr.Row():
with gr.Column():
with gr.Row():
with gr.Column():
seed = gr.inputs.Slider(
0, 1000, step=1, default=0, label='Seed (For Image Generation)')
psi = gr.inputs.Slider(
0, 2, step=0.05, default=0.7, label='Truncation psi (For Image Generation)')
radio_choice = gr.Radio(
model_types, label='Method (For Reconstruction)', value='icon-filter')
inp = gr.Image(type="filepath", label="Input Image")
with gr.Row():
btn_sample = gr.Button("Generate Image")
btn_submit = gr.Button("Submit Image")
gr.Examples(examples=examples,
inputs=[inp, radio_choice],
cache_examples=False,
fn=generate_model,
outputs=out_lst)
out_vid = gr.Video(
label="Image + Normal + SMPL Body + Clothed Human")
out_vid_download = gr.File(
label="Download Video, welcome share on Twitter with #ICON")
with gr.Column():
overlap_inp = gr.Image(
type="filepath", label="Image Normal Overlap")
out_final = gr.Model3D(
clear_color=[0.0, 0.0, 0.0, 0.0], label="Clothed human")
out_final_download = gr.File(
label="Download clothed human mesh")
out_smpl = gr.Model3D(
clear_color=[0.0, 0.0, 0.0, 0.0], label="SMPL body")
out_smpl_download = gr.File(label="Download SMPL body mesh")
out_smpl_npy_download = gr.File(label="Download SMPL params")
out_lst = [out_smpl, out_smpl_download, out_smpl_npy_download,
out_final, out_final_download, out_vid, out_vid_download, overlap_inp]
btn_submit.click(fn=generate_model, inputs=[
inp, radio_choice], outputs=out_lst)
btn_sample.click(fn=generate_image, inputs=[seed, psi], outputs=inp)
if __name__ == "__main__":
# demo.launch(debug=False, enable_queue=False,
# auth=(os.environ['USER'], os.environ['PASSWORD']),
# auth_message="Register at icon.is.tue.mpg.de to get HuggingFace username and password.")
demo.launch(debug=True, enable_queue=True)
|