merve's picture
merve HF staff
Update app.py
d643ca3 verified
raw
history blame
1.43 kB
import gradio as gr
from transformers import pipeline
import torch
import numpy as np
from PIL import Image
import gradio as gr
from gradio_client import Client
import os
import spaces
import json
dpt_beit = pipeline(task = "depth-estimation", model="Intel/dpt-beit-base-384")
depth_anything = pipeline(task = "depth-estimation", model="nielsr/depth-anything-small")
dpt_large = pipeline(task = "depth-estimation", model="intel/dpt-large")
@spaces.GPU
def depth_anything_inference(image_path):
return depth_anything(image_path)["depth"]
@spaces.GPU
def dpt_beit_inference(image):
return dpt_beit(image)["depth"]
@spaces.GPU
def dpt_large_inference(image):
return dpt_large(image)["depth"]
def infer(image):
return dpt_large_inference(image), dpt_beit_inference(image), depth_anything_inference(image)
css = """
#mkd {
height: 500px;
overflow: auto;
border: 1px solid #ccc;
}
"""
with gr.Blocks(css=css) as demo:
gr.HTML("<h1><center>Compare Depth Estimation Models<center><h1>")
with gr.Column():
with gr.Row():
input_img = gr.Image(label="Input Image")
with gr.Row():
output_1 = gr.Image(type="pil", label="DPT-Large")
output_2 = gr.Image(type="pil", label="DPT with BeiT Backbone")
output_3 = gr.Image(type="pil", label="Depth Anything")
input_img.change(infer, [input_img], [output_1, output_2, output_3])
demo.launch(debug=True)