Spaces:
Runtime error
Runtime error
modify app
Browse files
README.md
CHANGED
@@ -7,5 +7,3 @@ sdk: gradio
|
|
7 |
app_file: app.py
|
8 |
pinned: false
|
9 |
---
|
10 |
-
|
11 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
|
|
7 |
app_file: app.py
|
8 |
pinned: false
|
9 |
---
|
|
|
|
app.py
CHANGED
@@ -16,14 +16,12 @@ from focusondepth.model_definition import FocusOnDepth
|
|
16 |
AutoConfig.register("focusondepth", FocusOnDepthConfig)
|
17 |
AutoModel.register(FocusOnDepthConfig, FocusOnDepth)
|
18 |
|
19 |
-
original_image_cache = {}
|
20 |
transform = transforms.Compose([
|
21 |
transforms.Resize((384, 384)),
|
22 |
transforms.ToTensor(),
|
23 |
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
|
24 |
])
|
25 |
model = AutoModel.from_pretrained('ybelkada/focusondepth', trust_remote_code=True)
|
26 |
-
# model.load_state_dict(torch.load('./focusondepth/FocusOnDepth_vit_base_patch16_384.p', map_location=torch.device('cpu'))['model_state_dict'])
|
27 |
|
28 |
@torch.no_grad()
|
29 |
def inference(input_image):
|
@@ -42,6 +40,20 @@ def inference(input_image):
|
|
42 |
|
43 |
return [depth.resize(original_size, resample=Image.BICUBIC), segmentation.resize(original_size, resample=Image.NEAREST)]
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
iface = gr.Interface(
|
46 |
fn=inference,
|
47 |
inputs=gr.inputs.Image(label="Input Image"),
|
@@ -49,5 +61,9 @@ iface = gr.Interface(
|
|
49 |
gr.outputs.Image(label="Depth Map:"),
|
50 |
gr.outputs.Image(label="Segmentation Map:"),
|
51 |
],
|
|
|
|
|
|
|
|
|
52 |
)
|
53 |
iface.launch()
|
|
|
16 |
AutoConfig.register("focusondepth", FocusOnDepthConfig)
|
17 |
AutoModel.register(FocusOnDepthConfig, FocusOnDepth)
|
18 |
|
|
|
19 |
transform = transforms.Compose([
|
20 |
transforms.Resize((384, 384)),
|
21 |
transforms.ToTensor(),
|
22 |
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
|
23 |
])
|
24 |
model = AutoModel.from_pretrained('ybelkada/focusondepth', trust_remote_code=True)
|
|
|
25 |
|
26 |
@torch.no_grad()
|
27 |
def inference(input_image):
|
|
|
40 |
|
41 |
return [depth.resize(original_size, resample=Image.BICUBIC), segmentation.resize(original_size, resample=Image.NEAREST)]
|
42 |
|
43 |
+
description = """
|
44 |
+
<center>
|
45 |
+
Can a single model predict both segmentation and depth estimation? At least, if the segmentation is constrained for a single class, the answer is yes! <br>
|
46 |
+
In this project, we use a DPT model to predict the depth and the segmentation mask of the class human, of an image. This model could be potentially used for an autofocus application where you would need the segmentation mask of the humans on the picture, as well as the depth estimation of the scene<br>
|
47 |
+
</center>
|
48 |
+
"""
|
49 |
+
title="""
|
50 |
+
FocusOnDepth - A single DPT encoder for Dense Prediction Tasks
|
51 |
+
"""
|
52 |
+
css = """
|
53 |
+
"""
|
54 |
+
article = "<div style='text-align: center;'><a href='https://github.com/isl-org/DPT' target='_blank'>Original Paper</a> | <a href='https://github.com/antocad/FocusOnDepth' target='_blank'>Extended Version</a></div>"
|
55 |
+
|
56 |
+
|
57 |
iface = gr.Interface(
|
58 |
fn=inference,
|
59 |
inputs=gr.inputs.Image(label="Input Image"),
|
|
|
61 |
gr.outputs.Image(label="Depth Map:"),
|
62 |
gr.outputs.Image(label="Segmentation Map:"),
|
63 |
],
|
64 |
+
description=description,
|
65 |
+
title=title,
|
66 |
+
css=css,
|
67 |
+
article=article,
|
68 |
)
|
69 |
iface.launch()
|