Armandoliv commited on
Commit
65f93b2
1 Parent(s): d3ba75d

Create new file

Browse files
Files changed (1) hide show
  1. app.py +183 -0
app.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import segmentation_models as sm
2
+ import numpy as np
3
+ import os
4
+ import cv2
5
+ import keras
6
+ import matplotlib.colors as colorsHTML
7
+ from PIL import Image
8
+ import gradio as gr
9
+
10
+ import os
11
+ os.system('wget https://huggingface.co/Armandoliv/cars-parts-segmentation-unet-resnet18/resolve/main/best_model.h5')
12
+
13
+ c= ['_background_', 'back_bumper', 'back_glass', 'back_left_door','back_left_light',
14
+ 'back_right_door', 'back_right_light', 'front_bumper','front_glass',
15
+ 'front_left_door', 'front_left_light', 'front_right_door', 'front_right_light', 'hood', 'left_mirror',
16
+ 'right_mirror', 'tailgate', 'trunk', 'wheel']
17
+
18
+ colors = [ (245,255,250), (75,0,130), (0,255,0), (32,178,170),(0,0,255), (0,255,255), (255,0,255), (128,0,128), (255,140,0),
19
+ (85,107,47), (102,205,170), (0,191,255), (255,0,0), (255,228,196), (205,133,63),
20
+ (220,20,60), (255,69,0), (143,188,143), (255,255,0)]
21
+
22
+
23
+ sm.set_framework('tf.keras')
24
+
25
+ sm.framework()
26
+
27
+ BACKBONE = 'resnet18'
28
+ n_classes = 19
29
+ activation = 'softmax'
30
+
31
+ #create model
32
+ model = sm.Unet(BACKBONE, classes=n_classes, activation=activation)
33
+
34
+ # load best weights
35
+ model.load_weights('/content/best_model.h5')
36
+
37
+ def get_colored_segmentation_image(seg_arr, n_classes, colors=colors):
38
+ output_height = seg_arr.shape[0]
39
+ output_width = seg_arr.shape[1]
40
+
41
+ seg_img = np.zeros((output_height, output_width, 3))
42
+
43
+ for c in range(n_classes):
44
+ seg_arr_c = seg_arr[:, :] == c
45
+ # print(sum(sum(seg_arr_c)), colors[c] )
46
+ seg_img[:, :, 0] += ((seg_arr_c)*(colors[c][0])).astype('uint8')
47
+ seg_img[:, :, 1] += ((seg_arr_c)*(colors[c][1])).astype('uint8')
48
+ seg_img[:, :, 2] += ((seg_arr_c)*(colors[c][2])).astype('uint8')
49
+
50
+ return seg_img/255
51
+
52
+ def get_legends(class_names, colors, tags):
53
+
54
+ n_classes = len(class_names)
55
+ legend = np.zeros(((len(class_names) * 25) + 25, 125, 3),
56
+ dtype="uint8") + 255
57
+
58
+ class_names_colors = enumerate(zip(class_names[:n_classes],
59
+ colors[:n_classes]))
60
+ j = 0
61
+ for (i, (class_name, color)) in class_names_colors:
62
+ if i in tags:
63
+ color = [int(c) for c in color]
64
+ cv2.putText(legend, class_name, (5, (j * 25) + 17),
65
+ cv2.FONT_HERSHEY_COMPLEX, 0.35, (0, 0, 0), 1)
66
+ cv2.rectangle(legend, (100, (j* 25)), (125, (j * 25) + 25),
67
+ tuple(color), -1)
68
+ j +=1
69
+ return legend
70
+
71
+
72
+
73
+ def preprocess_image(path_img):
74
+ img = Image.open(path_img)
75
+ ww = 512
76
+ hh = 512
77
+ img.thumbnail((hh, ww))
78
+ i = np.array(img)
79
+ ht, wd, cc= i.shape
80
+
81
+ # create new image of desired size and color (blue) for padding
82
+ color = (0,0,0)
83
+ result = np.full((hh,ww,cc), color, dtype=np.uint8)
84
+
85
+ # copy img image into center of result image
86
+ result[:ht, :wd] = img
87
+ return result, ht, wd
88
+
89
+ def concat_lengends(seg_img, legend_img):
90
+
91
+ new_h = np.maximum(seg_img.shape[0], legend_img.shape[0])
92
+ new_w = seg_img.shape[1] + legend_img.shape[1]
93
+
94
+ out_img = np.zeros((new_h, new_w, 3)).astype('uint8') + legend_img[0, 0, 0]
95
+
96
+ out_img[:legend_img.shape[0], : legend_img.shape[1]] = np.copy(legend_img)
97
+ out_img[:seg_img.shape[0], legend_img.shape[1]:] = np.copy(seg_img)
98
+
99
+ return out_img
100
+
101
+ def main_convert(filename):
102
+
103
+ print(filename)
104
+ #load the image
105
+ img_path = filename
106
+ img = Image.open(img_path).convert("RGB")
107
+ tags = []
108
+
109
+ #preprocess the image
110
+ img_scaled_arr = preprocess_image(img_path)
111
+ image = np.expand_dims(img_scaled_arr[0], axis=0)
112
+
113
+ #make the predictions
114
+ pr_mask = model.predict(image).squeeze()
115
+ pr_mask_int = np.zeros((pr_mask.shape[0],pr_mask.shape[1]))
116
+
117
+ #filter the smallest noisy segments
118
+ kernel = np.ones((5, 5), 'uint8')
119
+
120
+ for i in range(1,19):
121
+ array_one = np.round(pr_mask[:,:,i])
122
+ op = cv2.morphologyEx(array_one, cv2.MORPH_OPEN, kernel)
123
+ if sum(sum(op ==1)) > 100:
124
+ tags.append(i)
125
+ pr_mask_int[op ==1] = i
126
+
127
+ img_segmented = np.array(Image.fromarray(pr_mask_int[:img_scaled_arr[1], :img_scaled_arr[2]]).resize(img.size))
128
+
129
+ seg = get_colored_segmentation_image(img_segmented,19, colors=colors)
130
+
131
+ fused_img = ((np.array(img)/255)/2 + seg/2).astype('float32')
132
+
133
+ seg = Image.fromarray((seg*255).astype(np.uint8))
134
+ fused_img = Image.fromarray((fused_img *255).astype(np.uint8))
135
+
136
+ #get the legends
137
+ legend_predicted = get_legends(c, colors, tags)
138
+
139
+ final_img = concat_lengends(np.array(fused_img), np.array(legend_predicted))
140
+
141
+ return final_img, seg
142
+
143
+
144
+
145
+ inputs = [gr.Image(type="filepath", label="Car Image")]
146
+ outputs = [gr.Image(type="PIL.Image", label="Detected Segments Image"),gr.Image(type="PIL.Image", label="Segment Image")]
147
+
148
+
149
+ title = "Car Parts Segmentation APP"
150
+ description = """This demo uses AI Models to detect 18 parts of cars: \n
151
+ 1: background,
152
+ 2: back bumper,
153
+ 3: back glass,
154
+ 4: back left door,
155
+ 5: back left light,
156
+ 6: back right door,
157
+ 7: back right light,
158
+ 8: front bumper,
159
+ 9: front glass,
160
+ 10: front left door,
161
+ 11: front left light,
162
+ 12: front right door,
163
+ 13: front right light,
164
+ 14: hood,
165
+ 15: left mirror,
166
+ 16: right mirror,
167
+ 17: tailgate,
168
+ 18: trunk,
169
+ 19: wheel"""
170
+
171
+ examples = [['test_image.jpeg']]
172
+ io = gr.Interface(fn=main_convert, inputs=inputs, outputs=outputs, title=title, description=description, examples=examples,
173
+ css= """.gr-button-primary { background: -webkit-linear-gradient(
174
+ 90deg, #355764 0%, #55a8a1 100% ) !important; background: #355764;
175
+ background: linear-gradient(
176
+ 90deg, #355764 0%, #55a8a1 100% ) !important;
177
+ background: -moz-linear-gradient( 90deg, #355764 0%, #55a8a1 100% ) !important;
178
+ background: -webkit-linear-gradient(
179
+ 90deg, #355764 0%, #55a8a1 100% ) !important;
180
+ color:white !important}"""
181
+ )
182
+
183
+ io.launch()