basab1142 commited on
Commit
d1aedac
·
verified ·
1 Parent(s): defb017

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +418 -412
app.py CHANGED
@@ -1,412 +1,418 @@
1
- import streamlit as st
2
- import cv2
3
- import numpy as np
4
- from PIL import Image
5
- import time
6
- from streamlit_drawable_canvas import st_canvas
7
- import matplotlib.pylab as plt
8
- from estimate_homography import calculate_homography, fit_image_in_target_space
9
-
10
- stitched_image_rgb, stitched_result = None, None
11
-
12
- # Function to load an image from uploaded file
13
- def load_image(uploaded_file):
14
- img = cv2.imdecode(np.frombuffer(uploaded_file.read(), np.uint8), cv2.IMREAD_GRAYSCALE)
15
- return img
16
-
17
- # Function to compute stereo vision and disparity map
18
- def compute_stereo_vision(img1, img2):
19
- # Feature Detection and Matching using ORB (ORB is a good alternative for uncalibrated cameras)
20
- orb = cv2.ORB_create() # ORB is a good alternative to SIFT for uncalibrated cameras
21
- kp1, des1 = orb.detectAndCompute(img1, None)
22
- kp2, des2 = orb.detectAndCompute(img2, None)
23
-
24
- # BFMatcher with default params
25
- bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
26
- matches = bf.match(des1, des2)
27
-
28
- # Sort matches by distance
29
- matches = sorted(matches, key=lambda x: x.distance)
30
-
31
- # Estimate the Fundamental Matrix
32
- pts1 = np.array([kp1[m.queryIdx].pt for m in matches])
33
- pts2 = np.array([kp2[m.trainIdx].pt for m in matches])
34
-
35
- # Fundamental matrix using RANSAC to reject outliers
36
- F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_RANSAC)
37
-
38
- # Estimate the Camera Pose (Rotation and Translation)
39
- K = np.eye(3) # Assuming no camera calibration
40
- E = K.T @ F @ K # Essential matrix
41
- _, R, T, _ = cv2.recoverPose(E, pts1, pts2)
42
-
43
- # Stereo Rectification
44
- stereo_rectify = cv2.stereoRectify(K, None, K, None, img1.shape[::-1], R, T, alpha=0)
45
- left_map_x, left_map_y = cv2.initUndistortRectifyMap(K, None, R, K, img1.shape[::-1], cv2.CV_32F)
46
- right_map_x, right_map_y = cv2.initUndistortRectifyMap(K, None, R, K, img2.shape[::-1], cv2.CV_32F)
47
-
48
- # Apply the rectification transformations to the images
49
- img1_rectified = cv2.remap(img1, left_map_x, left_map_y, interpolation=cv2.INTER_LINEAR)
50
- img2_rectified = cv2.remap(img2, right_map_x, right_map_y, interpolation=cv2.INTER_LINEAR)
51
-
52
- # Resize img2_rectified to match img1_rectified size (if necessary)
53
- if img1_rectified.shape != img2_rectified.shape:
54
- img2_rectified = cv2.resize(img2_rectified, (img1_rectified.shape[1], img1_rectified.shape[0]))
55
-
56
- # Disparity Map Computation using StereoBM
57
- stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15)
58
- disparity = stereo.compute(img1_rectified, img2_rectified)
59
-
60
- return disparity, img1_rectified, img2_rectified
61
-
62
-
63
- def run_point_est(world_pts, img_pts, img):
64
- if isinstance(img_pts, list):
65
- img_pts = np.array(img_pts)
66
-
67
- if isinstance(world_pts, list):
68
- world_pts = np.array(world_pts)
69
-
70
- # Plot the original image with marked points
71
- st.write("Original Image with Points")
72
- plt.figure()
73
- plt.imshow(img)
74
- plt.scatter(img_pts[:, 0], img_pts[:, 1], color='red')
75
- plt.axis("off")
76
- plt.title("Original Image with img points marked in red")
77
- st.pyplot(plt)
78
-
79
- H = calculate_homography(img_pts, world_pts) # img_pts = H * world_pts
80
-
81
- #### Cross check ####
82
- t_one = np.ones((img_pts.shape[0], 1))
83
- t_out_pts = np.concatenate((world_pts, t_one), axis=1)
84
- x = np.matmul(H, t_out_pts.T)
85
- x = x / x[-1, :]
86
-
87
- st.write("Given Image Points:", img_pts)
88
- st.write("Calculated Image Points:", x.T)
89
- st.write("Homography Matrix (OpenCV):", cv2.findHomography(world_pts, img_pts)[0])
90
- st.write("Calculated Homography Matrix:", H)
91
-
92
- #####################
93
- h, w, _ = img.shape
94
- corners_img = np.array([[0, 0], [w, 0], [w, h], [0, h]])
95
- H_inv = np.linalg.inv(H)
96
- t_out_pts = np.concatenate((corners_img, t_one), axis=1)
97
- world_crd_corners = np.matmul(H_inv, t_out_pts.T)
98
- world_crd_corners = world_crd_corners / world_crd_corners[-1, :] # Normalize
99
-
100
- min_crd = np.amin(world_crd_corners.T, axis=0)
101
- max_crd = np.amax(world_crd_corners.T, axis=0)
102
-
103
- offset = min_crd.astype(np.int64)
104
- offset[2] = 0
105
-
106
- width_world = np.ceil(max_crd - min_crd)[0] + 1
107
- height_world = np.ceil(max_crd - min_crd)[1] + 1
108
-
109
- world_img = np.zeros((int(height_world), int(width_world), 3), dtype=np.uint8)
110
- mask = np.ones((int(height_world), int(width_world)))
111
-
112
- out = fit_image_in_target_space(img, world_img, mask, H, offset)
113
-
114
- st.write("Corrected Image")
115
- plt.figure()
116
- plt.imshow(out)
117
- plt.axis("off")
118
- plt.title("Corrected Image with Point Point Correspondence")
119
- st.pyplot(plt)
120
-
121
-
122
- # Function to stitch images
123
- def stitch_images(images):
124
- stitcher = cv2.Stitcher_create() if cv2.__version__.startswith('4') else cv2.createStitcher()
125
- status, stitched_image = stitcher.stitch(images)
126
- if status == cv2.Stitcher_OK:
127
- return stitched_image, status
128
- else:
129
- return None, status
130
-
131
- # Function to match features
132
- def match_features(images):
133
- if len(images) < 2:
134
- return None, "At least two images are required for feature matching."
135
-
136
- gray1 = cv2.cvtColor(images[0], cv2.COLOR_BGR2GRAY)
137
- gray2 = cv2.cvtColor(images[1], cv2.COLOR_BGR2GRAY)
138
-
139
- sift = cv2.SIFT_create()
140
- keypoints1, descriptors1 = sift.detectAndCompute(gray1, None)
141
- keypoints2, descriptors2 = sift.detectAndCompute(gray2, None)
142
-
143
- bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
144
- matches = bf.match(descriptors1, descriptors2)
145
- matches = sorted(matches, key=lambda x: x.distance)
146
-
147
- matched_image = cv2.drawMatches(images[0], keypoints1, images[1], keypoints2, matches[:50], None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
148
- return matched_image, None
149
-
150
- # Function to cartoonify an image
151
- def cartoonify_image(image):
152
- # Convert to grayscale
153
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
154
-
155
- gray_blur = cv2.medianBlur(gray, 7)
156
-
157
- edges = cv2.adaptiveThreshold(
158
- gray_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 10
159
- )
160
-
161
- color = cv2.bilateralFilter(image, 9, 250, 250)
162
-
163
- cartoon = cv2.bitwise_and(color, color, mask=edges)
164
-
165
- return cartoon
166
-
167
- # Streamlit layout and UI
168
- st.set_page_config(page_title="Image Stitching and Feature Matching", layout="wide")
169
- st.title("Image Stitching and Feature Matching Application")
170
-
171
- # State to store captured images
172
- if "captured_images" not in st.session_state:
173
- st.session_state["captured_images"] = []
174
-
175
- if "stitched_image" not in st.session_state:
176
- st.session_state["stitched_image"] = None
177
- # Sidebar for displaying captured images
178
- st.sidebar.header("Captured Images")
179
- if st.session_state["captured_images"]:
180
- placeholder = st.sidebar.empty()
181
- with placeholder.container():
182
- for i, img in enumerate(st.session_state["captured_images"]):
183
- img_thumbnail = cv2.resize(img, (100, 100))
184
- st.image(cv2.cvtColor(img_thumbnail, cv2.COLOR_BGR2RGB), caption=f"Image {i+1}", use_container_width =False)
185
- if st.button(f"Delete Image {i+1}", key=f"delete_{i}"):
186
- st.session_state["captured_images"].pop(i)
187
- placeholder.empty() # Clear and refresh the sidebar
188
- break
189
-
190
- # Capture the image from camera input
191
- st.header("Upload or Capture Images")
192
- uploaded_files = st.file_uploader("Upload images", type=["jpg", "jpeg", "png"], accept_multiple_files=True)
193
- captured_image = st.camera_input("Take a picture using your camera")
194
-
195
- if st.button("Add Captured Image"):
196
- if captured_image:
197
- captured_image_array = cv2.cvtColor(np.array(Image.open(captured_image)), cv2.COLOR_RGB2BGR)
198
- st.session_state["captured_images"].append(captured_image_array)
199
- st.success(f"Captured image {len(st.session_state['captured_images'])} added!")
200
-
201
- # Combine uploaded and captured images
202
- images = [cv2.cvtColor(np.array(Image.open(file)), cv2.COLOR_RGB2BGR) for file in uploaded_files]
203
- images.extend(st.session_state["captured_images"])
204
-
205
- st.write(f"Total images: {len(images)}")
206
-
207
- # Placeholder for dynamic updates
208
- loading_placeholder = st.empty()
209
-
210
- # Function to show the loading animation
211
- def show_loading_bar(placeholder):
212
- with placeholder:
213
- st.write("Processing images... Please wait.")
214
- time.sleep(2)
215
-
216
- if st.button("Stitch Images"):
217
- if len(images) < 2:
218
- st.error("Please provide at least two images for stitching.")
219
- else:
220
- show_loading_bar(loading_placeholder)
221
- stitched_result, status = stitch_images(images)
222
- loading_placeholder.empty()
223
- if stitched_result is not None:
224
- stitched_image_rgb = cv2.cvtColor(stitched_result, cv2.COLOR_BGR2RGB)
225
- st.image(stitched_image_rgb, caption="Stitched Image", use_container_width=True)
226
- st.session_state["stitched_image"] = stitched_image_rgb
227
- st.success("Stitching completed successfully!")
228
- else:
229
- st.error(f"Stitching failed with status: {status}.")
230
- if st.button("Show Matching Features"):
231
- if len(images) < 2:
232
- st.error("Please provide at least two images for feature matching.")
233
- else:
234
- show_loading_bar(loading_placeholder)
235
- matched_image, error = match_features(images)
236
- loading_placeholder.empty()
237
- if matched_image is not None:
238
- matched_image_rgb = cv2.cvtColor(matched_image, cv2.COLOR_BGR2RGB)
239
- st.image(matched_image_rgb, caption="Feature Matching Visualization", use_container_width=True)
240
- st.success("Feature matching completed successfully!")
241
- else:
242
- st.error(error)
243
-
244
- if st.session_state["stitched_image"] is not None:
245
- st.header("Homography Transformation on Stitched Image")
246
-
247
- st.write("### Select Points on Stitched Image")
248
- stitched_image = st.session_state["stitched_image"]
249
- image = Image.fromarray(cv2.cvtColor(stitched_image, cv2.COLOR_BGR2RGB))
250
-
251
- canvas_result = st_canvas(
252
- fill_color="rgba(255, 0, 0, 0.3)",
253
- stroke_width=3,
254
- background_image=image,
255
- update_streamlit=True,
256
- drawing_mode="point",
257
- height=image.height,
258
- width=image.width,
259
- key="canvas",
260
- )
261
-
262
- img_pts = []
263
-
264
- if canvas_result.json_data is not None:
265
- for obj in canvas_result.json_data["objects"]:
266
- if obj["type"] == "circle":
267
- x = obj["left"] + obj["width"] / 2
268
- y = obj["top"] + obj["height"] / 2
269
- img_pts.append([int(x), int(y)])
270
-
271
- if img_pts:
272
- st.write("### Selected Image Points")
273
- st.write(img_pts)
274
-
275
- st.write("### Enter Corresponding World Points")
276
- world_pts = st.text_area(
277
- "Enter world points as a list of tuples (e.g., [(0, 0), (300, 0), (0, 400), (300, 400)])",
278
- value="[(0, 0), (300, 0), (0, 400), (300, 400)]",
279
- )
280
-
281
- if st.button("Run Homography Transformation"):
282
- try:
283
- world_pts = eval(world_pts)
284
- if len(world_pts) != len(img_pts):
285
- st.error("The number of world points must match the number of image points.")
286
- else:
287
- run_point_est(world_pts, img_pts, stitched_image)
288
- except Exception as e:
289
- st.error(f"Error: {e}")
290
-
291
-
292
- if "stitched_image" in st.session_state:
293
- st.header("Cartoonify & Do Homography on Your Stitched Image")
294
- if st.button("Cartoonify Stitched Image"):
295
- cartoon = cartoonify_image(cv2.cvtColor(st.session_state["stitched_image"], cv2.COLOR_RGB2BGR))
296
- st.image(cv2.cvtColor(cartoon, cv2.COLOR_BGR2RGB), caption="Cartoonified Image", use_container_width=True)
297
- st.success("Cartoonification completed successfully!")
298
-
299
- # Upload images
300
- st.subheader("Upload Left and Right Images")
301
- left_image_file = st.file_uploader("Choose the Left Image", type=["jpg", "png", "jpeg"])
302
- right_image_file = st.file_uploader("Choose the Right Image", type=["jpg", "png", "jpeg"])
303
-
304
- # Check if both images are uploaded
305
- if left_image_file and right_image_file:
306
- # Load the uploaded images
307
- img1 = load_image(left_image_file)
308
- img2 = load_image(right_image_file)
309
-
310
- # Display the uploaded images
311
- st.image(img1, caption="Left Image", use_container_width =True)
312
- st.image(img2, caption="Right Image", use_container_width =True)
313
-
314
- # Compute the stereo vision and disparity map
315
- disparity, img1_rectified, img2_rectified = compute_stereo_vision(img1, img2)
316
-
317
- # Display the rectified images
318
- # st.subheader("Rectified Left Image")
319
- # st.image(img1_rectified, caption="Rectified Left Image", use_container_width =True)
320
-
321
- # st.subheader("Rectified Right Image")
322
- # st.image(img2_rectified, caption="Rectified Right Image", use_container_width =True)
323
-
324
- # Show the disparity map
325
- fig, ax = plt.subplots()
326
- st.subheader("Disparity Map")
327
- plt.imshow(disparity, cmap='gray')
328
- plt.title("Disparity Map")
329
- plt.colorbar()
330
- st.pyplot(fig)
331
-
332
- # # Optionally: Display an anaglyph or combined view of the images
333
- # anaglyph = cv2.merge([img1_rectified, np.zeros_like(img1_rectified), img2_rectified])
334
- # st.subheader("Anaglyph Stereo View")
335
- # st.image(anaglyph, caption="Anaglyph Stereo View", use_container_width =True)
336
-
337
-
338
-
339
- # if "img_pts" not in st.session_state:
340
- # st.session_state["img_pts"] = []
341
-
342
- # if "world_pts" not in st.session_state:
343
- # st.session_state["world_pts"] = []
344
-
345
- # if "homography_ready" not in st.session_state:
346
- # st.session_state["homography_ready"] = False
347
-
348
- # if st.button('Homography Transformation'):
349
- # if st.session_state["stitched_image"] is not None:
350
- # st.write("### Select Points on Stitched Image")
351
- # stitched_image = st.session_state["stitched_image"]
352
- # image = Image.fromarray(cv2.cvtColor(stitched_image, cv2.COLOR_BGR2RGB))
353
-
354
- # # Display canvas for selecting points
355
- # canvas_result = st_canvas(
356
- # fill_color="rgba(255, 0, 0, 0.3)",
357
- # stroke_width=3,
358
- # background_image=image,
359
- # update_streamlit=True,
360
- # drawing_mode="point",
361
- # height=image.height,
362
- # width=image.width,
363
- # key="canvas",
364
- # )
365
-
366
- # # Collect selected points
367
- # if canvas_result.json_data is not None:
368
- # img_pts_temp = []
369
- # for obj in canvas_result.json_data["objects"]:
370
- # if obj["type"] == "circle":
371
- # x = obj["left"] + obj["width"] / 2
372
- # y = obj["top"] + obj["height"] / 2
373
- # img_pts_temp.append([int(x), int(y)])
374
-
375
- # # Only update points if there are new ones
376
- # if img_pts_temp:
377
- # st.session_state["img_pts"] = img_pts_temp
378
-
379
- # # Display the selected points
380
- # if st.session_state["img_pts"]:
381
- # st.write("### Selected Image Points")
382
- # st.write(st.session_state["img_pts"])
383
-
384
- # # Input world points
385
- # world_pts_input = st.text_area(
386
- # "Enter world points as a list of tuples (e.g., [(0, 0), (300, 0), (0, 400), (300, 400)])",
387
- # value="[(0, 0), (300, 0), (0, 400), (300, 400)]",
388
- # )
389
-
390
- # if st.button("Confirm Points and Run Homography"):
391
- # try:
392
- # st.session_state["world_pts"] = eval(world_pts_input)
393
- # if len(st.session_state["world_pts"]) != len(st.session_state["img_pts"]):
394
- # st.error("The number of world points must match the number of image points.")
395
- # else:
396
- # st.session_state["homography_ready"] = True
397
- # st.success("Points confirmed! Ready for homography transformation.")
398
- # except Exception as e:
399
- # st.error(f"Error parsing world points: {e}")
400
-
401
- # # Perform homography transformation
402
- # if st.session_state.get("homography_ready"):
403
- # st.write("### Running Homography Transformation...")
404
- # try:
405
- # run_point_est(
406
- # st.session_state["world_pts"],
407
- # st.session_state["img_pts"],
408
- # st.session_state["stitched_image"],
409
- # )
410
- # st.session_state["homography_ready"] = False # Reset the flag after execution
411
- # except Exception as e:
412
- # st.error(f"Error during homography transformation: {e}")
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import cv2
3
+ import numpy as np
4
+ from PIL import Image
5
+ import time
6
+ from streamlit_drawable_canvas import st_canvas
7
+ import matplotlib.pylab as plt
8
+ from estimate_homography import calculate_homography, fit_image_in_target_space
9
+
10
+ stitched_image_rgb, stitched_result = None, None
11
+
12
+ # Function to load an image from uploaded file
13
+ def load_image(uploaded_file):
14
+ img = cv2.imdecode(np.frombuffer(uploaded_file.read(), np.uint8), cv2.IMREAD_GRAYSCALE)
15
+ return img
16
+
17
+ # Function to compute stereo vision and disparity map
18
+ def compute_stereo_vision(img1, img2):
19
+ # Feature Detection and Matching using ORB (ORB is a good alternative for uncalibrated cameras)
20
+ orb = cv2.ORB_create() # ORB is a good alternative to SIFT for uncalibrated cameras
21
+ kp1, des1 = orb.detectAndCompute(img1, None)
22
+ kp2, des2 = orb.detectAndCompute(img2, None)
23
+
24
+ # BFMatcher with default params
25
+ bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
26
+ matches = bf.match(des1, des2)
27
+
28
+ # Sort matches by distance
29
+ matches = sorted(matches, key=lambda x: x.distance)
30
+
31
+ # Estimate the Fundamental Matrix
32
+ pts1 = np.array([kp1[m.queryIdx].pt for m in matches])
33
+ pts2 = np.array([kp2[m.trainIdx].pt for m in matches])
34
+
35
+ # Fundamental matrix using RANSAC to reject outliers
36
+ F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_RANSAC)
37
+
38
+ # Estimate the Camera Pose (Rotation and Translation)
39
+ K = np.eye(3) # Assuming no camera calibration
40
+ E = K.T @ F @ K # Essential matrix
41
+ _, R, T, _ = cv2.recoverPose(E, pts1, pts2)
42
+
43
+ # Stereo Rectification
44
+ stereo_rectify = cv2.stereoRectify(K, None, K, None, img1.shape[::-1], R, T, alpha=0)
45
+ left_map_x, left_map_y = cv2.initUndistortRectifyMap(K, None, R, K, img1.shape[::-1], cv2.CV_32F)
46
+ right_map_x, right_map_y = cv2.initUndistortRectifyMap(K, None, R, K, img2.shape[::-1], cv2.CV_32F)
47
+
48
+ # Apply the rectification transformations to the images
49
+ img1_rectified = cv2.remap(img1, left_map_x, left_map_y, interpolation=cv2.INTER_LINEAR)
50
+ img2_rectified = cv2.remap(img2, right_map_x, right_map_y, interpolation=cv2.INTER_LINEAR)
51
+
52
+ # Resize img2_rectified to match img1_rectified size (if necessary)
53
+ if img1_rectified.shape != img2_rectified.shape:
54
+ img2_rectified = cv2.resize(img2_rectified, (img1_rectified.shape[1], img1_rectified.shape[0]))
55
+
56
+ # Disparity Map Computation using StereoBM
57
+ stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15)
58
+ disparity = stereo.compute(img1_rectified, img2_rectified)
59
+
60
+ return disparity, img1_rectified, img2_rectified
61
+
62
+
63
+ def run_point_est(world_pts, img_pts, img):
64
+ if isinstance(img_pts, list):
65
+ img_pts = np.array(img_pts)
66
+
67
+ if isinstance(world_pts, list):
68
+ world_pts = np.array(world_pts)
69
+
70
+ # Plot the original image with marked points
71
+ st.write("Original Image with Points")
72
+ plt.figure()
73
+ plt.imshow(img)
74
+ plt.scatter(img_pts[:, 0], img_pts[:, 1], color='red')
75
+ plt.axis("off")
76
+ plt.title("Original Image with img points marked in red")
77
+ st.pyplot(plt)
78
+
79
+ H = calculate_homography(img_pts, world_pts) # img_pts = H * world_pts
80
+
81
+ #### Cross check ####
82
+ t_one = np.ones((img_pts.shape[0], 1))
83
+ t_out_pts = np.concatenate((world_pts, t_one), axis=1)
84
+ x = np.matmul(H, t_out_pts.T)
85
+ x = x / x[-1, :]
86
+
87
+ st.write("Given Image Points:", img_pts)
88
+ st.write("Calculated Image Points:", x.T)
89
+ st.write("Homography Matrix (OpenCV):", cv2.findHomography(world_pts, img_pts)[0])
90
+ st.write("Calculated Homography Matrix:", H)
91
+
92
+ #####################
93
+ h, w, _ = img.shape
94
+ corners_img = np.array([[0, 0], [w, 0], [w, h], [0, h]])
95
+ H_inv = np.linalg.inv(H)
96
+ t_out_pts = np.concatenate((corners_img, t_one), axis=1)
97
+ world_crd_corners = np.matmul(H_inv, t_out_pts.T)
98
+ world_crd_corners = world_crd_corners / world_crd_corners[-1, :] # Normalize
99
+
100
+ min_crd = np.amin(world_crd_corners.T, axis=0)
101
+ max_crd = np.amax(world_crd_corners.T, axis=0)
102
+
103
+ offset = min_crd.astype(np.int64)
104
+ offset[2] = 0
105
+
106
+ width_world = np.ceil(max_crd - min_crd)[0] + 1
107
+ height_world = np.ceil(max_crd - min_crd)[1] + 1
108
+
109
+ world_img = np.zeros((int(height_world), int(width_world), 3), dtype=np.uint8)
110
+ mask = np.ones((int(height_world), int(width_world)))
111
+
112
+ out = fit_image_in_target_space(img, world_img, mask, H, offset)
113
+
114
+ st.write("Corrected Image")
115
+ plt.figure()
116
+ plt.imshow(out)
117
+ plt.axis("off")
118
+ plt.title("Corrected Image with Point Point Correspondence")
119
+ st.pyplot(plt)
120
+
121
+
122
+ # Function to stitch images
123
+ def stitch_images(images):
124
+ stitcher = cv2.Stitcher_create() if cv2.__version__.startswith('4') else cv2.createStitcher()
125
+ status, stitched_image = stitcher.stitch(images)
126
+ if status == cv2.Stitcher_OK:
127
+ return stitched_image, status
128
+ else:
129
+ return None, status
130
+
131
+ # Function to match features
132
+ def match_features(images):
133
+ if len(images) < 2:
134
+ return None, "At least two images are required for feature matching."
135
+
136
+ gray1 = cv2.cvtColor(images[0], cv2.COLOR_BGR2GRAY)
137
+ gray2 = cv2.cvtColor(images[1], cv2.COLOR_BGR2GRAY)
138
+
139
+ sift = cv2.SIFT_create()
140
+ keypoints1, descriptors1 = sift.detectAndCompute(gray1, None)
141
+ keypoints2, descriptors2 = sift.detectAndCompute(gray2, None)
142
+
143
+ bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
144
+ matches = bf.match(descriptors1, descriptors2)
145
+ matches = sorted(matches, key=lambda x: x.distance)
146
+
147
+ matched_image = cv2.drawMatches(images[0], keypoints1, images[1], keypoints2, matches[:50], None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
148
+ return matched_image, None
149
+
150
+ # Function to cartoonify an image
151
+ def cartoonify_image(image):
152
+ # Convert to grayscale
153
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
154
+
155
+ gray_blur = cv2.medianBlur(gray, 7)
156
+
157
+ edges = cv2.adaptiveThreshold(
158
+ gray_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 10
159
+ )
160
+
161
+ color = cv2.bilateralFilter(image, 9, 250, 250)
162
+
163
+ cartoon = cv2.bitwise_and(color, color, mask=edges)
164
+
165
+ return cartoon
166
+
167
+ # Streamlit layout and UI
168
+ st.set_page_config(page_title="Image Stitching and Feature Matching", layout="wide")
169
+ st.title("Image Stitching and Feature Matching Application")
170
+
171
+ # State to store captured images
172
+ if "captured_images" not in st.session_state:
173
+ st.session_state["captured_images"] = []
174
+
175
+ if "stitched_image" not in st.session_state:
176
+ st.session_state["stitched_image"] = None
177
+ # Sidebar for displaying captured images
178
+ st.sidebar.header("Captured Images")
179
+ if st.session_state["captured_images"]:
180
+ placeholder = st.sidebar.empty()
181
+ with placeholder.container():
182
+ for i, img in enumerate(st.session_state["captured_images"]):
183
+ img_thumbnail = cv2.resize(img, (100, 100))
184
+ st.image(cv2.cvtColor(img_thumbnail, cv2.COLOR_BGR2RGB), caption=f"Image {i+1}", use_container_width =False)
185
+ if st.button(f"Delete Image {i+1}", key=f"delete_{i}"):
186
+ st.session_state["captured_images"].pop(i)
187
+ placeholder.empty() # Clear and refresh the sidebar
188
+ break
189
+
190
+ # Capture the image from camera input
191
+ st.header("Upload or Capture Images")
192
+ uploaded_files = st.file_uploader("Upload images", type=["jpg", "jpeg", "png"], accept_multiple_files=True)
193
+ captured_image = st.camera_input("Take a picture using your camera")
194
+
195
+ if st.button("Add Captured Image"):
196
+ if captured_image:
197
+ captured_image_array = cv2.cvtColor(np.array(Image.open(captured_image)), cv2.COLOR_RGB2BGR)
198
+ st.session_state["captured_images"].append(captured_image_array)
199
+ st.success(f"Captured image {len(st.session_state['captured_images'])} added!")
200
+
201
+ # Combine uploaded and captured images
202
+ images = [cv2.cvtColor(np.array(Image.open(file)), cv2.COLOR_RGB2BGR) for file in uploaded_files]
203
+ images.extend(st.session_state["captured_images"])
204
+
205
+ st.write(f"Total images: {len(images)}")
206
+
207
+ # Placeholder for dynamic updates
208
+ loading_placeholder = st.empty()
209
+
210
+ # Function to show the loading animation
211
+ def show_loading_bar(placeholder):
212
+ with placeholder:
213
+ st.write("Processing images... Please wait.")
214
+ time.sleep(2)
215
+
216
+ if st.button("Stitch Images"):
217
+ if len(images) < 2:
218
+ st.error("Please provide at least two images for stitching.")
219
+ else:
220
+ show_loading_bar(loading_placeholder)
221
+ stitched_result, status = stitch_images(images)
222
+ loading_placeholder.empty()
223
+ if stitched_result is not None:
224
+ stitched_image_rgb = cv2.cvtColor(stitched_result, cv2.COLOR_BGR2RGB)
225
+ st.image(stitched_image_rgb, caption="Stitched Image", use_container_width=True)
226
+ st.session_state["stitched_image"] = stitched_image_rgb
227
+ st.success("Stitching completed successfully!")
228
+ else:
229
+ st.error(f"Stitching failed with status: {status}.")
230
+
231
+ # Always display the stitched image if it exists in the session state
232
+ if "stitched_image" in st.session_state and st.session_state["stitched_image"] is not None:
233
+ st.header("Stitched Image")
234
+ st.image(st.session_state["stitched_image"], caption="Stitched Image", use_container_width=True)
235
+
236
+ if st.button("Show Matching Features"):
237
+ if len(images) < 2:
238
+ st.error("Please provide at least two images for feature matching.")
239
+ else:
240
+ show_loading_bar(loading_placeholder)
241
+ matched_image, error = match_features(images)
242
+ loading_placeholder.empty()
243
+ if matched_image is not None:
244
+ matched_image_rgb = cv2.cvtColor(matched_image, cv2.COLOR_BGR2RGB)
245
+ st.image(matched_image_rgb, caption="Feature Matching Visualization", use_container_width=True)
246
+ st.success("Feature matching completed successfully!")
247
+ else:
248
+ st.error(error)
249
+
250
+ if st.session_state["stitched_image"] is not None:
251
+ st.header("Homography Transformation on Stitched Image")
252
+
253
+ st.write("### Select Points on Stitched Image")
254
+ stitched_image = st.session_state["stitched_image"]
255
+ image = Image.fromarray(cv2.cvtColor(stitched_image, cv2.COLOR_BGR2RGB))
256
+
257
+ canvas_result = st_canvas(
258
+ fill_color="rgba(255, 0, 0, 0.3)",
259
+ stroke_width=3,
260
+ background_image=image,
261
+ update_streamlit=True,
262
+ drawing_mode="point",
263
+ height=image.height,
264
+ width=image.width,
265
+ key="canvas",
266
+ )
267
+
268
+ img_pts = []
269
+
270
+ if canvas_result.json_data is not None:
271
+ for obj in canvas_result.json_data["objects"]:
272
+ if obj["type"] == "circle":
273
+ x = obj["left"] + obj["width"] / 2
274
+ y = obj["top"] + obj["height"] / 2
275
+ img_pts.append([int(x), int(y)])
276
+
277
+ if img_pts:
278
+ st.write("### Selected Image Points")
279
+ st.write(img_pts)
280
+
281
+ st.write("### Enter Corresponding World Points")
282
+ world_pts = st.text_area(
283
+ "Enter world points as a list of tuples (e.g., [(0, 0), (300, 0), (0, 400), (300, 400)])",
284
+ value="[(0, 0), (300, 0), (0, 400), (300, 400)]",
285
+ )
286
+
287
+ if st.button("Run Homography Transformation"):
288
+ try:
289
+ world_pts = eval(world_pts)
290
+ if len(world_pts) != len(img_pts):
291
+ st.error("The number of world points must match the number of image points.")
292
+ else:
293
+ run_point_est(world_pts, img_pts, stitched_image)
294
+ except Exception as e:
295
+ st.error(f"Error: {e}")
296
+
297
+
298
+ if "stitched_image" in st.session_state:
299
+ st.header("Cartoonify & Do Homography on Your Stitched Image")
300
+ if st.button("Cartoonify Stitched Image"):
301
+ cartoon = cartoonify_image(cv2.cvtColor(st.session_state["stitched_image"], cv2.COLOR_RGB2BGR))
302
+ st.image(cv2.cvtColor(cartoon, cv2.COLOR_BGR2RGB), caption="Cartoonified Image", use_container_width=True)
303
+ st.success("Cartoonification completed successfully!")
304
+
305
+ # Upload images
306
+ st.subheader("Upload Left and Right Images")
307
+ left_image_file = st.file_uploader("Choose the Left Image", type=["jpg", "png", "jpeg"])
308
+ right_image_file = st.file_uploader("Choose the Right Image", type=["jpg", "png", "jpeg"])
309
+
310
+ # Check if both images are uploaded
311
+ if left_image_file and right_image_file:
312
+ # Load the uploaded images
313
+ img1 = load_image(left_image_file)
314
+ img2 = load_image(right_image_file)
315
+
316
+ # Display the uploaded images
317
+ st.image(img1, caption="Left Image", use_container_width =True)
318
+ st.image(img2, caption="Right Image", use_container_width =True)
319
+
320
+ # Compute the stereo vision and disparity map
321
+ disparity, img1_rectified, img2_rectified = compute_stereo_vision(img1, img2)
322
+
323
+ # Display the rectified images
324
+ # st.subheader("Rectified Left Image")
325
+ # st.image(img1_rectified, caption="Rectified Left Image", use_container_width =True)
326
+
327
+ # st.subheader("Rectified Right Image")
328
+ # st.image(img2_rectified, caption="Rectified Right Image", use_container_width =True)
329
+
330
+ # Show the disparity map
331
+ fig, ax = plt.subplots()
332
+ st.subheader("Disparity Map")
333
+ plt.imshow(disparity, cmap='gray')
334
+ plt.title("Disparity Map")
335
+ plt.colorbar()
336
+ st.pyplot(fig)
337
+
338
+ # # Optionally: Display an anaglyph or combined view of the images
339
+ # anaglyph = cv2.merge([img1_rectified, np.zeros_like(img1_rectified), img2_rectified])
340
+ # st.subheader("Anaglyph Stereo View")
341
+ # st.image(anaglyph, caption="Anaglyph Stereo View", use_container_width =True)
342
+
343
+
344
+
345
+ # if "img_pts" not in st.session_state:
346
+ # st.session_state["img_pts"] = []
347
+
348
+ # if "world_pts" not in st.session_state:
349
+ # st.session_state["world_pts"] = []
350
+
351
+ # if "homography_ready" not in st.session_state:
352
+ # st.session_state["homography_ready"] = False
353
+
354
+ # if st.button('Homography Transformation'):
355
+ # if st.session_state["stitched_image"] is not None:
356
+ # st.write("### Select Points on Stitched Image")
357
+ # stitched_image = st.session_state["stitched_image"]
358
+ # image = Image.fromarray(cv2.cvtColor(stitched_image, cv2.COLOR_BGR2RGB))
359
+
360
+ # # Display canvas for selecting points
361
+ # canvas_result = st_canvas(
362
+ # fill_color="rgba(255, 0, 0, 0.3)",
363
+ # stroke_width=3,
364
+ # background_image=image,
365
+ # update_streamlit=True,
366
+ # drawing_mode="point",
367
+ # height=image.height,
368
+ # width=image.width,
369
+ # key="canvas",
370
+ # )
371
+
372
+ # # Collect selected points
373
+ # if canvas_result.json_data is not None:
374
+ # img_pts_temp = []
375
+ # for obj in canvas_result.json_data["objects"]:
376
+ # if obj["type"] == "circle":
377
+ # x = obj["left"] + obj["width"] / 2
378
+ # y = obj["top"] + obj["height"] / 2
379
+ # img_pts_temp.append([int(x), int(y)])
380
+
381
+ # # Only update points if there are new ones
382
+ # if img_pts_temp:
383
+ # st.session_state["img_pts"] = img_pts_temp
384
+
385
+ # # Display the selected points
386
+ # if st.session_state["img_pts"]:
387
+ # st.write("### Selected Image Points")
388
+ # st.write(st.session_state["img_pts"])
389
+
390
+ # # Input world points
391
+ # world_pts_input = st.text_area(
392
+ # "Enter world points as a list of tuples (e.g., [(0, 0), (300, 0), (0, 400), (300, 400)])",
393
+ # value="[(0, 0), (300, 0), (0, 400), (300, 400)]",
394
+ # )
395
+
396
+ # if st.button("Confirm Points and Run Homography"):
397
+ # try:
398
+ # st.session_state["world_pts"] = eval(world_pts_input)
399
+ # if len(st.session_state["world_pts"]) != len(st.session_state["img_pts"]):
400
+ # st.error("The number of world points must match the number of image points.")
401
+ # else:
402
+ # st.session_state["homography_ready"] = True
403
+ # st.success("Points confirmed! Ready for homography transformation.")
404
+ # except Exception as e:
405
+ # st.error(f"Error parsing world points: {e}")
406
+
407
+ # # Perform homography transformation
408
+ # if st.session_state.get("homography_ready"):
409
+ # st.write("### Running Homography Transformation...")
410
+ # try:
411
+ # run_point_est(
412
+ # st.session_state["world_pts"],
413
+ # st.session_state["img_pts"],
414
+ # st.session_state["stitched_image"],
415
+ # )
416
+ # st.session_state["homography_ready"] = False # Reset the flag after execution
417
+ # except Exception as e:
418
+ # st.error(f"Error during homography transformation: {e}")