Spaces:
Running
on
L4
Running
on
L4
File size: 1,257 Bytes
99b3515 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import gradio as gr
import torch
from gradio_depth_pred import create_demo as create_depth_pred_demo
from gradio_im_to_3d import create_demo as create_im_to_3d_demo
from gradio_pano_to_3d import create_demo as create_pano_to_3d_demo
css = """
#img-display-container {
max-height: 50vh;
}
#img-display-input {
max-height: 40vh;
}
#img-display-output {
max-height: 40vh;
}
"""
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
model = torch.hub.load('isl-org/ZoeDepth', "ZoeD_N", pretrained=True).to(DEVICE).eval()
title = "# ZoeDepth"
description = """Official demo for **ZoeDepth: Zero-shot Transfer by Combining Relative and Metric Depth**.
ZoeDepth is a deep learning model for metric depth estimation from a single image.
Please refer to our [paper](https://arxiv.org/abs/2302.12288) or [github](https://github.com/isl-org/ZoeDepth) for more details."""
with gr.Blocks(css=css) as demo:
gr.Markdown(title)
gr.Markdown(description)
with gr.Tab("Depth Prediction"):
create_depth_pred_demo(model)
with gr.Tab("Image to 3D"):
create_im_to_3d_demo(model)
with gr.Tab("360 Panorama to 3D"):
create_pano_to_3d_demo(model)
if __name__ == '__main__':
demo.queue().launch() |