|
import shlex |
|
import subprocess |
|
subprocess.run( |
|
shlex.split( |
|
"pip install package/onnxruntime_gpu-1.17.0-cp310-cp310-manylinux_2_28_x86_64.whl --force-reinstall --no-deps" |
|
) |
|
) |
|
subprocess.run( |
|
shlex.split( |
|
"pip install package/nvdiffrast-0.3.1.torch-cp310-cp310-linux_x86_64.whl --force-reinstall --no-deps" |
|
) |
|
) |
|
|
|
if __name__ == "__main__": |
|
import os |
|
import sys |
|
sys.path.append(os.curdir) |
|
import torch |
|
torch.set_float32_matmul_precision('medium') |
|
torch.backends.cuda.matmul.allow_tf32 = True |
|
torch.set_grad_enabled(False) |
|
|
|
import fire |
|
import gradio as gr |
|
from gradio_app.gradio_3dgen import create_ui as create_3d_ui |
|
from gradio_app.all_models import model_zoo |
|
|
|
|
|
_TITLE = '''Unique3D: High-Quality and Efficient 3D Mesh Generation from a Single Image''' |
|
_DESCRIPTION = ''' |
|
|
|
<div> |
|
<a style="display:inline-block" href='https://github.com/AiuniAI/Unique3D'><img alt="GitHub Repo stars" src="https://img.shields.io/github/stars/AiuniAI/Unique3D?style=social"> |
|
</a> |
|
<img alt="GitHub License" src="https://img.shields.io/github/license/AiuniAI/Unique3D"> |
|
</div> |
|
|
|
# [Paper](https://arxiv.org/abs/2405.20343) | [Project page](https://wukailu.github.io/Unique3D/) | [Huggingface Demo](https://huggingface.co/spaces/Wuvin/Unique3D) | [Gradio Demo](https://u45213-bcf9-ef67553e.westx.seetacloud.com:8443/) | [Online Demo](https://www.aiuni.ai/) |
|
|
|
* High-fidelity and diverse textured meshes generated by Unique3D from single-view images. |
|
|
|
* The demo is still under construction, and more features are expected to be implemented soon. |
|
|
|
* The demo takes around 50 seconds on L4, and about 60 seconds on Huggingface ZeroGPU. |
|
|
|
* If the Huggingface Demo unfortunately hangs or is very crowded, you can use the Gradio Demo or Online Demo. The Online Demo is free to try, and the registration invitation code is `aiuni24`. However, the Online Demo is slightly different from the Gradio Demo, in that the inference speed is slower, and the generation results is less stable, but the quality of the texture is better. |
|
|
|
|
|
''' |
|
|
|
def launch(): |
|
model_zoo.init_models() |
|
|
|
with gr.Blocks( |
|
title=_TITLE, |
|
|
|
) as demo: |
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
gr.Markdown('# ' + _TITLE) |
|
gr.Markdown(_DESCRIPTION) |
|
create_3d_ui("wkl") |
|
|
|
demo.queue().launch(share=True) |
|
|
|
if __name__ == '__main__': |
|
fire.Fire(launch) |
|
|