Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
from keras.models import load_model
|
3 |
from patchify import patchify, unpatchify
|
@@ -120,6 +121,8 @@ def process_input_image(input_image):
|
|
120 |
my_app = gr.Blocks()
|
121 |
with my_app:
|
122 |
gr.Markdown("Satellite Image Segmentation Application UI with Gradio")
|
|
|
|
|
123 |
with gr.Tabs():
|
124 |
with gr.TabItem("Select your image"):
|
125 |
with gr.Row():
|
@@ -127,7 +130,7 @@ with my_app:
|
|
127 |
img_source = gr.Image(label="Please select source Image")
|
128 |
source_image_loader = gr.Button("Load above Image")
|
129 |
with gr.Column():
|
130 |
-
output_label = gr.Label(label="Image Info")
|
131 |
img_output = gr.Image(label="Image Output")
|
132 |
source_image_loader.click(
|
133 |
process_input_image,
|
@@ -136,7 +139,171 @@ with my_app:
|
|
136 |
)
|
137 |
|
138 |
# Launch the app
|
139 |
-
my_app.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
|
141 |
|
142 |
|
|
|
1 |
+
|
2 |
import gradio as gr
|
3 |
from keras.models import load_model
|
4 |
from patchify import patchify, unpatchify
|
|
|
121 |
my_app = gr.Blocks()
|
122 |
with my_app:
|
123 |
gr.Markdown("Satellite Image Segmentation Application UI with Gradio")
|
124 |
+
gr.Markdown("Building: #3C1098,Land (unpaved area): #8429F6,Road: #6EC1E4,Vegetation: #FEDD3A,Water: #E2A929,Unlabeled: #9B9B9B")
|
125 |
+
gr.Markdown("Building: Purple,Land (unpaved area): Violet, Road:Blue, Vegetation: Gold/yellow, Water: Copper, Unlabeled: Gray")
|
126 |
with gr.Tabs():
|
127 |
with gr.TabItem("Select your image"):
|
128 |
with gr.Row():
|
|
|
130 |
img_source = gr.Image(label="Please select source Image")
|
131 |
source_image_loader = gr.Button("Load above Image")
|
132 |
with gr.Column():
|
133 |
+
output_label = gr.Label(label="Prediction Image Info ")
|
134 |
img_output = gr.Image(label="Image Output")
|
135 |
source_image_loader.click(
|
136 |
process_input_image,
|
|
|
139 |
)
|
140 |
|
141 |
# Launch the app
|
142 |
+
my_app.launch(share=True)
|
143 |
+
|
144 |
+
|
145 |
+
|
146 |
+
|
147 |
+
|
148 |
+
|
149 |
+
|
150 |
+
|
151 |
+
|
152 |
+
|
153 |
+
|
154 |
+
|
155 |
+
|
156 |
+
|
157 |
+
|
158 |
+
|
159 |
+
|
160 |
+
|
161 |
+
|
162 |
+
|
163 |
+
|
164 |
+
|
165 |
+
|
166 |
+
|
167 |
+
|
168 |
+
# import gradio as gr
|
169 |
+
# from keras.models import load_model
|
170 |
+
# from patchify import patchify, unpatchify
|
171 |
+
# import numpy as np
|
172 |
+
# import cv2
|
173 |
+
# from sklearn.preprocessing import MinMaxScaler
|
174 |
+
# import matplotlib.pyplot as plt
|
175 |
+
|
176 |
+
# # Define colors for classes
|
177 |
+
# class_building = np.array([60, 16, 152])
|
178 |
+
# class_land = np.array([132, 41, 246])
|
179 |
+
# class_road = np.array([110, 193, 228])
|
180 |
+
# class_vegetation = np.array([254, 221, 58])
|
181 |
+
# class_water = np.array([226, 169, 41])
|
182 |
+
# class_unlabeled = np.array([155, 155, 155])
|
183 |
+
|
184 |
+
# # Number of classes in your segmentation task
|
185 |
+
# total_classes = 6 # Update this with your total number of classes
|
186 |
+
|
187 |
+
# # Define custom loss functions
|
188 |
+
# def jaccard_coef(y_true, y_pred):
|
189 |
+
# smooth = 1e-12
|
190 |
+
# intersection = K.sum(K.abs(y_true * y_pred), axis=[1,2,3])
|
191 |
+
# union = K.sum(y_true,[1,2,3])+K.sum(y_pred,[1,2,3])-intersection
|
192 |
+
# jac = K.mean((intersection + smooth) / (union + smooth), axis=0)
|
193 |
+
# return jac
|
194 |
+
|
195 |
+
# def dice_loss(y_true, y_pred):
|
196 |
+
# smooth = 1e-12
|
197 |
+
# intersection = K.sum(y_true * y_pred, axis=[1,2,3])
|
198 |
+
# union = K.sum(y_true, axis=[1,2,3]) + K.sum(y_pred, axis=[1,2,3])
|
199 |
+
# dice = K.mean((2.0 * intersection + smooth) / (union + smooth), axis=0)
|
200 |
+
# return 1.0 - dice
|
201 |
+
|
202 |
+
# def focal_loss(y_true, y_pred, alpha=0.25, gamma=2.0):
|
203 |
+
# y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())
|
204 |
+
# ce_loss = -y_true * K.log(y_pred)
|
205 |
+
# weight = alpha * y_true * K.pow((1 - y_pred), gamma)
|
206 |
+
# fl_loss = ce_loss * weight
|
207 |
+
# return K.mean(K.sum(fl_loss, axis=-1))
|
208 |
+
|
209 |
+
# def total_loss(y_true, y_pred):
|
210 |
+
# return dice_loss(y_true, y_pred) + (1 * focal_loss(y_true, y_pred))
|
211 |
+
|
212 |
+
# # Load the pre-trained model
|
213 |
+
# model_path = 'satmodel.h5' # Replace with your model path
|
214 |
+
# model = load_model(model_path, custom_objects={'total_loss': total_loss, 'jaccard_coef': jaccard_coef, 'dice_loss': dice_loss, 'focal_loss': focal_loss})
|
215 |
+
|
216 |
+
# # MinMaxScaler for normalization
|
217 |
+
# minmaxscaler = MinMaxScaler()
|
218 |
+
|
219 |
+
# # Function to predict the full image
|
220 |
+
# def predict_full_image(image, patch_size, model):
|
221 |
+
# original_shape = image.shape
|
222 |
+
# print(f"Original image shape: {original_shape}")
|
223 |
+
|
224 |
+
# # Pad image to make its dimensions divisible by the patch size
|
225 |
+
# pad_height = (patch_size - image.shape[0] % patch_size) % patch_size
|
226 |
+
# pad_width = (patch_size - image.shape[1] % patch_size) % patch_size
|
227 |
+
# image = np.pad(image, ((0, pad_height), (0, pad_width), (0, 0)), mode='constant', constant_values=0)
|
228 |
+
# padded_shape = image.shape
|
229 |
+
# print(f"Padded image shape: {padded_shape}")
|
230 |
+
|
231 |
+
# # Normalize the image
|
232 |
+
# image = minmaxscaler.fit_transform(image.reshape(-1, image.shape[-1])).reshape(image.shape)
|
233 |
+
|
234 |
+
# # Create patches
|
235 |
+
# patched_images = patchify(image, (patch_size, patch_size, 3), step=patch_size)
|
236 |
+
# print(f"Patched image shape: {patched_images.shape}")
|
237 |
+
|
238 |
+
# predicted_patches = []
|
239 |
+
|
240 |
+
# # Predict on each patch
|
241 |
+
# for i in range(patched_images.shape[0]):
|
242 |
+
# for j in range(patched_images.shape[1]):
|
243 |
+
# single_patch = patched_images[i, j, 0]
|
244 |
+
# single_patch = np.expand_dims(single_patch, axis=0)
|
245 |
+
# prediction = model.predict(single_patch)
|
246 |
+
# predicted_patches.append(prediction[0])
|
247 |
+
|
248 |
+
# # Reshape predicted patches
|
249 |
+
# predicted_patches = np.array(predicted_patches)
|
250 |
+
# print(f"Predicted patches shape: {predicted_patches.shape}")
|
251 |
+
|
252 |
+
# predicted_patches = predicted_patches.reshape(patched_images.shape[0], patched_images.shape[1], patch_size, patch_size, total_classes)
|
253 |
+
# print(f"Reshaped predicted patches shape: {predicted_patches.shape}")
|
254 |
+
|
255 |
+
# # Unpatchify the image
|
256 |
+
# reconstructed_image = np.zeros((padded_shape[0], padded_shape[1], total_classes))
|
257 |
+
# for i in range(patched_images.shape[0]):
|
258 |
+
# for j in range(patched_images.shape[1]):
|
259 |
+
# reconstructed_image[i * patch_size:(i + 1) * patch_size, j * patch_size:(j + 1) * patch_size, :] = predicted_patches[i, j]
|
260 |
+
# print(f"Reconstructed image shape (with padding): {reconstructed_image.shape}")
|
261 |
+
|
262 |
+
# # Remove padding
|
263 |
+
# reconstructed_image = reconstructed_image[:original_shape[0], :original_shape[1]]
|
264 |
+
# print(f"Final reconstructed image shape: {reconstructed_image.shape}")
|
265 |
+
|
266 |
+
# return reconstructed_image
|
267 |
+
|
268 |
+
# # Function to process the input image
|
269 |
+
# def process_input_image(input_image):
|
270 |
+
# image_patch_size = 256
|
271 |
+
# predicted_full_image = predict_full_image(input_image, image_patch_size, model)
|
272 |
+
|
273 |
+
# # Convert the predictions to RGB
|
274 |
+
# predicted_full_image_rgb = np.zeros_like(input_image)
|
275 |
+
|
276 |
+
# # Map the predicted class labels to RGB colors
|
277 |
+
# predicted_full_image_rgb[predicted_full_image.argmax(axis=-1) == 0] = class_water
|
278 |
+
# predicted_full_image_rgb[predicted_full_image.argmax(axis=-1) == 1] = class_land
|
279 |
+
# predicted_full_image_rgb[predicted_full_image.argmax(axis=-1) == 2] = class_road
|
280 |
+
# predicted_full_image_rgb[predicted_full_image.argmax(axis=-1) == 3] = class_building
|
281 |
+
# predicted_full_image_rgb[predicted_full_image.argmax(axis=-1) == 4] = class_vegetation
|
282 |
+
# predicted_full_image_rgb[predicted_full_image.argmax(axis=-1) == 5] = class_unlabeled
|
283 |
+
|
284 |
+
# return "Image processed", predicted_full_image_rgb
|
285 |
+
|
286 |
+
# # Gradio application
|
287 |
+
# my_app = gr.Blocks()
|
288 |
+
# with my_app:
|
289 |
+
# gr.Markdown("Satellite Image Segmentation Application UI with Gradio")
|
290 |
+
# with gr.Tabs():
|
291 |
+
# with gr.TabItem("Select your image"):
|
292 |
+
# with gr.Row():
|
293 |
+
# with gr.Column():
|
294 |
+
# img_source = gr.Image(label="Please select source Image")
|
295 |
+
# source_image_loader = gr.Button("Load above Image")
|
296 |
+
# with gr.Column():
|
297 |
+
# output_label = gr.Label(label="Image Info")
|
298 |
+
# img_output = gr.Image(label="Image Output")
|
299 |
+
# source_image_loader.click(
|
300 |
+
# process_input_image,
|
301 |
+
# inputs=[img_source],
|
302 |
+
# outputs=[output_label, img_output]
|
303 |
+
# )
|
304 |
+
|
305 |
+
# # Launch the app
|
306 |
+
# my_app.launch()
|
307 |
|
308 |
|
309 |
|