File size: 3,090 Bytes
17a270d
 
edb8a3c
 
 
 
 
 
5a4b7bc
 
edb8a3c
 
 
 
 
 
 
 
 
5a4b7bc
 
 
 
 
 
 
 
edb8a3c
5a4b7bc
edb8a3c
 
 
 
 
5a4b7bc
edb8a3c
 
 
5a4b7bc
 
 
 
 
 
 
 
 
 
 
 
edb8a3c
5a4b7bc
 
 
 
 
 
 
 
 
edb8a3c
 
5a4b7bc
edb8a3c
5a4b7bc
 
 
 
edb8a3c
 
 
17a270d
10b1625
5a4b7bc
 
 
10b1625
 
 
5a4b7bc
 
 
 
10b1625
 
15ba74e
edb8a3c
5a4b7bc
15ab33f
10b1625
5a4b7bc
 
05f97e4
ed944c6
05f97e4
5a4b7bc
15ba74e
17a270d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import gradio as gr

from layers import BilinearUpSampling2D
import matplotlib.pyplot as plt
import numpy as np

from huggingface_hub import from_pretrained_keras

from einops import repeat

custom_objects = {'BilinearUpSampling2D': BilinearUpSampling2D, 'depth_loss_function': None}
print('Loading model...')
model = from_pretrained_keras("keras-io/monocular-depth-estimation", custom_objects=custom_objects, compile=False)
print('Successfully loaded model...')

import importlib
import utils
importlib.reload(utils)

def layer_over_image(raw_image, filter, custom_color = [0, 0, 0]):
    # print(raw_image[:, :, 0])
    out_image = raw_image
    out_image[:,:,0] = raw_image[:, :, 0] * filter
    out_image[:,:,1] = raw_image[:, :, 1] * filter
    out_image[:,:,2] = raw_image[:, :, 2] * filter
    return raw_image

def infer(image, min_th, max_th):
    # print('_'*20)
    inputs = utils.load_images([image])
    outputs = utils.predict(model, inputs)

    plasma = plt.get_cmap('plasma')
    rescaled = outputs[0][:, :, 0]
    # print("Min Max Bef", np.min(rescaled), np.max(rescaled))
    rescaled = rescaled - np.min(rescaled)
    rescaled = rescaled / np.max(rescaled)

    im_heat = plasma(rescaled)[:, :, :3]

    # print("Min Max Aft", np.min(rescaled), np.max(rescaled))

    # print("Shape Scaled:",rescaled.shape)
    filt_base = rescaled
    filt_base = repeat(filt_base, "h w -> (h 2) (w 2)")
    filt_arr_min = (filt_base > min_th/100)
    filt_arr_max = (filt_base < max_th/100)

    filt_arr = filt_arr_min * filt_base * filt_arr_max
    im_heat_filt = plasma(filt_arr)[:, :, :3]

    if max_th < 100:
        image_emph = layer_over_image(image, filt_arr_max)
    else:
        image_emph = image
    
    if min_th > 0:
        image_emph = layer_over_image(image, filt_arr_min)
    else:
        image_emph = image


    # print("filt arr min", filt_arr_min)

    # print("Shape Image:",image.shape)
    # print("Shape Image filt:",im_heat_filt.shape)
    # print("Shape Image Heat:",im_heat.shape)
    return im_heat, image_emph

# def detr(im):
#     return im

gr_input = [
    gr.inputs.Image(label="Image", type="numpy", shape=(640, 480))
    ,gr.inputs.Slider(minimum=0, maximum=100, step=0.5, default=0, label="Minimum Threshold")
    ,gr.inputs.Slider(minimum=0, maximum=100, step=0.5, default=100, label="Maximum Threshold")
]

gr_output = [
    gr.outputs.Image(type="pil",label="Depth HeatMap"),
    # gr.outputs.Image(type="pil",label="Filtered Image"),
    # gr.outputs.Image(type="pil",label="Before"),
    gr.outputs.Image(type="pil",label="Important Areas")
]

iface = gr.Interface(
    fn=infer,
    title="Monocular Depth Filter",
    description = "Used Keras Monocular Depth Estimation Model for estimating the depth of areas in an image. User defines a threshold and the app filters out the image to only show the certain areas",
    inputs  = gr_input,
    outputs = gr_output,
    examples=[
        ["examples/00015_colors.png", 7, 85]
        ,["examples/car.JPG", 0, 30]
        ,["examples/dash.jpg", 10, 57.5]
        ]
    )
iface.launch()