File size: 6,029 Bytes
65f93b2 7986d52 65f93b2 09f48a8 65f93b2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 |
import segmentation_models as sm
import numpy as np
import os
import cv2
import keras
import matplotlib.colors as colorsHTML
from PIL import Image
import gradio as gr
import os
os.system('wget https://huggingface.co/Armandoliv/cars-parts-segmentation-unet-resnet18/resolve/main/best_model.h5')
os.system('pip -qq install pycocotools @ git+https://github.com/philferriere/cocoapi.git@2929bd2ef6b451054755dfd7ceb09278f935f7ad#subdirectory=PythonAPI')
c= ['_background_', 'back_bumper', 'back_glass', 'back_left_door','back_left_light',
'back_right_door', 'back_right_light', 'front_bumper','front_glass',
'front_left_door', 'front_left_light', 'front_right_door', 'front_right_light', 'hood', 'left_mirror',
'right_mirror', 'tailgate', 'trunk', 'wheel']
colors = [ (245,255,250), (75,0,130), (0,255,0), (32,178,170),(0,0,255), (0,255,255), (255,0,255), (128,0,128), (255,140,0),
(85,107,47), (102,205,170), (0,191,255), (255,0,0), (255,228,196), (205,133,63),
(220,20,60), (255,69,0), (143,188,143), (255,255,0)]
sm.set_framework('tf.keras')
sm.framework()
BACKBONE = 'resnet18'
n_classes = 19
activation = 'softmax'
#create model
model = sm.Unet(BACKBONE, classes=n_classes, activation=activation)
# load best weights
model.load_weights('best_model.h5')
def get_colored_segmentation_image(seg_arr, n_classes, colors=colors):
output_height = seg_arr.shape[0]
output_width = seg_arr.shape[1]
seg_img = np.zeros((output_height, output_width, 3))
for c in range(n_classes):
seg_arr_c = seg_arr[:, :] == c
# print(sum(sum(seg_arr_c)), colors[c] )
seg_img[:, :, 0] += ((seg_arr_c)*(colors[c][0])).astype('uint8')
seg_img[:, :, 1] += ((seg_arr_c)*(colors[c][1])).astype('uint8')
seg_img[:, :, 2] += ((seg_arr_c)*(colors[c][2])).astype('uint8')
return seg_img/255
def get_legends(class_names, colors, tags):
n_classes = len(class_names)
legend = np.zeros(((len(class_names) * 25) + 25, 125, 3),
dtype="uint8") + 255
class_names_colors = enumerate(zip(class_names[:n_classes],
colors[:n_classes]))
j = 0
for (i, (class_name, color)) in class_names_colors:
if i in tags:
color = [int(c) for c in color]
cv2.putText(legend, class_name, (5, (j * 25) + 17),
cv2.FONT_HERSHEY_COMPLEX, 0.35, (0, 0, 0), 1)
cv2.rectangle(legend, (100, (j* 25)), (125, (j * 25) + 25),
tuple(color), -1)
j +=1
return legend
def preprocess_image(path_img):
img = Image.open(path_img)
ww = 512
hh = 512
img.thumbnail((hh, ww))
i = np.array(img)
ht, wd, cc= i.shape
# create new image of desired size and color (blue) for padding
color = (0,0,0)
result = np.full((hh,ww,cc), color, dtype=np.uint8)
# copy img image into center of result image
result[:ht, :wd] = img
return result, ht, wd
def concat_lengends(seg_img, legend_img):
new_h = np.maximum(seg_img.shape[0], legend_img.shape[0])
new_w = seg_img.shape[1] + legend_img.shape[1]
out_img = np.zeros((new_h, new_w, 3)).astype('uint8') + legend_img[0, 0, 0]
out_img[:legend_img.shape[0], : legend_img.shape[1]] = np.copy(legend_img)
out_img[:seg_img.shape[0], legend_img.shape[1]:] = np.copy(seg_img)
return out_img
def main_convert(filename):
print(filename)
#load the image
img_path = filename
img = Image.open(img_path).convert("RGB")
tags = []
#preprocess the image
img_scaled_arr = preprocess_image(img_path)
image = np.expand_dims(img_scaled_arr[0], axis=0)
#make the predictions
pr_mask = model.predict(image).squeeze()
pr_mask_int = np.zeros((pr_mask.shape[0],pr_mask.shape[1]))
#filter the smallest noisy segments
kernel = np.ones((5, 5), 'uint8')
for i in range(1,19):
array_one = np.round(pr_mask[:,:,i])
op = cv2.morphologyEx(array_one, cv2.MORPH_OPEN, kernel)
if sum(sum(op ==1)) > 100:
tags.append(i)
pr_mask_int[op ==1] = i
img_segmented = np.array(Image.fromarray(pr_mask_int[:img_scaled_arr[1], :img_scaled_arr[2]]).resize(img.size))
seg = get_colored_segmentation_image(img_segmented,19, colors=colors)
fused_img = ((np.array(img)/255)/2 + seg/2).astype('float32')
seg = Image.fromarray((seg*255).astype(np.uint8))
fused_img = Image.fromarray((fused_img *255).astype(np.uint8))
#get the legends
legend_predicted = get_legends(c, colors, tags)
final_img = concat_lengends(np.array(fused_img), np.array(legend_predicted))
return final_img, seg
inputs = [gr.Image(type="filepath", label="Car Image")]
outputs = [gr.Image(type="PIL.Image", label="Detected Segments Image"),gr.Image(type="PIL.Image", label="Segment Image")]
title = "Car Parts Segmentation APP"
description = """This demo uses AI Models to detect 18 parts of cars: \n
1: background,
2: back bumper,
3: back glass,
4: back left door,
5: back left light,
6: back right door,
7: back right light,
8: front bumper,
9: front glass,
10: front left door,
11: front left light,
12: front right door,
13: front right light,
14: hood,
15: left mirror,
16: right mirror,
17: tailgate,
18: trunk,
19: wheel"""
examples = [['test_image.jpeg']]
io = gr.Interface(fn=main_convert, inputs=inputs, outputs=outputs, title=title, description=description, examples=examples,
css= """.gr-button-primary { background: -webkit-linear-gradient(
90deg, #355764 0%, #55a8a1 100% ) !important; background: #355764;
background: linear-gradient(
90deg, #355764 0%, #55a8a1 100% ) !important;
background: -moz-linear-gradient( 90deg, #355764 0%, #55a8a1 100% ) !important;
background: -webkit-linear-gradient(
90deg, #355764 0%, #55a8a1 100% ) !important;
color:white !important}"""
)
io.launch() |