Daniel Cerda Escobar commited on
Commit
8f7c813
β€’
1 Parent(s): f7aa9c6

Update app

Browse files
Files changed (1) hide show
  1. app.py +214 -214
app.py CHANGED
@@ -14,89 +14,89 @@ from streamlit_image_comparison import image_comparison
14
  #MMDET_YOLOX_TINY_CONFIG_URL = "https://huggingface.co/fcakyon/mmdet-yolox-tiny/raw/main/yolox_tiny_8x8_300e_coco.py"
15
  #MMDET_YOLOX_TINY_CONFIG_PATH = "config.py"
16
 
17
- #YOLOV8M_MODEL_URL = "https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m.pt"
18
- #YOLOV8M_MODEL_PATH = "tests/data/models/yolov8/yolov8m.pt"
19
 
20
 
21
  #YOLOV8M_MODEL_PATH = 'models/yolov8m.pt'
22
 
23
 
24
- # IMAGE_TO_URL = {
25
- # "apple_tree.jpg": "https://user-images.githubusercontent.com/34196005/142730935-2ace3999-a47b-49bb-83e0-2bdd509f1c90.jpg",
26
- # "highway.jpg": "https://user-images.githubusercontent.com/34196005/142730936-1b397756-52e5-43be-a949-42ec0134d5d8.jpg",
27
- # "highway2.jpg": "https://user-images.githubusercontent.com/34196005/142742871-bf485f84-0355-43a3-be86-96b44e63c3a2.jpg",
28
- # "highway3.jpg": "https://user-images.githubusercontent.com/34196005/142742872-1fefcc4d-d7e6-4c43-bbb7-6b5982f7e4ba.jpg",
29
- # "highway2-yolov8m.jpg": "https://user-images.githubusercontent.com/34196005/143309873-c0c1f31c-c42e-4a36-834e-da0a2336bb19.jpg",
30
- # "highway2-sahi.jpg": "https://user-images.githubusercontent.com/34196005/143309867-42841f5a-9181-4d22-b570-65f90f2da231.jpg",
31
- # }
32
 
33
 
34
- # @st.cache_data(show_spinner=False)
35
- # def download_comparison_images():
36
- # sahi.utils.file.download_from_url(
37
- # "https://user-images.githubusercontent.com/34196005/143309873-c0c1f31c-c42e-4a36-834e-da0a2336bb19.jpg",
38
- # "highway2-yolov8m.jpg",
39
- # )
40
- # sahi.utils.file.download_from_url(
41
- # "https://user-images.githubusercontent.com/34196005/143309867-42841f5a-9181-4d22-b570-65f90f2da231.jpg",
42
- # "highway2-sahi.jpg",
43
- # )
44
 
45
 
46
- # @st.cache_data(show_spinner=False)
47
- # def get_model():
48
 
49
- # sahi.utils.file.download_from_url(
50
- # YOLOV8M_MODEL_URL,
51
- # YOLOV8M_MODEL_PATH,
52
- # )
53
- # #sahi.utils.file.download_from_url(
54
- # # MMDET_YOLOX_TINY_MODEL_URL,
55
- # # MMDET_YOLOX_TINY_MODEL_PATH,
56
- # #)
57
- # #sahi.utils.file.download_from_url(
58
- # # MMDET_YOLOX_TINY_CONFIG_URL,
59
- # # MMDET_YOLOX_TINY_CONFIG_PATH,
60
- # #)
61
 
62
- # #sahi.utils.yolov8.download_yolov8m_model(destination_path = YOLOV8M_MODEL_PATH)
63
-
64
- # detection_model = AutoDetectionModel.from_pretrained(
65
- # model_type='yolov8',
66
- # model_path=YOLOV8M_MODEL_PATH,
67
- # #config_path=MMDET_YOLOX_TINY_CONFIG_PATH,
68
- # confidence_threshold=0.5,
69
- # device="cpu",
70
- # )
71
- # return detection_model
72
-
73
-
74
- # class SpinnerTexts:
75
- # def __init__(self):
76
- # self.ind_history_list = []
77
- # self.text_list = [
78
- # "Meanwhile check out [MMDetection Colab notebook of SAHI](https://colab.research.google.com/github/obss/sahi/blob/main/demo/inference_for_mmdetection.ipynb)!",
79
- # "Meanwhile check out [YOLOv5 Colab notebook of SAHI](https://colab.research.google.com/github/obss/sahi/blob/main/demo/inference_for_yolov5.ipynb)!",
80
- # "Meanwhile check out [aerial object detection with SAHI](https://blog.ml6.eu/how-to-detect-small-objects-in-very-large-images-70234bab0f98?gi=b434299595d4)!",
81
- # "Meanwhile check out [COCO Utilities of SAHI](https://github.com/obss/sahi/blob/main/docs/COCO.md)!",
82
- # "Meanwhile check out [FiftyOne utilities of SAHI](https://github.com/obss/sahi#fiftyone-utilities)!",
83
- # "Meanwhile [give a Github star to SAHI](https://github.com/obss/sahi/stargazers)!",
84
- # "Meanwhile see [how easy is to install SAHI](https://github.com/obss/sahi#getting-started)!",
85
- # "Meanwhile check out [Medium blogpost of SAHI](https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80)!",
86
- # "Meanwhile try out [YOLOv5 HF Spaces demo of SAHI](https://huggingface.co/spaces/fcakyon/sahi-yolov5)!",
87
- # ]
88
-
89
- # def _store(self, ind):
90
- # if len(self.ind_history_list) == 6:
91
- # self.ind_history_list.pop(0)
92
- # self.ind_history_list.append(ind)
93
-
94
- # def get(self):
95
- # ind = 0
96
- # while ind in self.ind_history_list:
97
- # ind = random.randint(0, len(self.text_list) - 1)
98
- # self._store(ind)
99
- # return self.text_list[ind]
100
 
101
 
102
  st.set_page_config(
@@ -106,16 +106,16 @@ st.set_page_config(
106
  initial_sidebar_state="auto",
107
  )
108
 
109
- # download_comparison_images()
110
 
111
- # if "last_spinner_texts" not in st.session_state:
112
- # st.session_state["last_spinner_texts"] = SpinnerTexts()
113
 
114
- # if "output_1" not in st.session_state:
115
- # st.session_state["output_1"] = Image.open("highway2-yolov8m.jpg")
116
 
117
- # if "output_2" not in st.session_state:
118
- # st.session_state["output_2"] = Image.open("highway2-sahi.jpg")
119
 
120
  st.markdown(
121
  """
@@ -126,139 +126,139 @@ st.markdown(
126
  """,
127
  unsafe_allow_html=True,
128
  )
129
- # # st.markdown(
130
- # # """
131
- # # <p style='text-align: center'>
132
- # # <a href='https://github.com/obss/sahi' target='_blank'>SAHI Github</a> | <a href='https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox' target='_blank'>YOLOX Github</a> | <a href='https://huggingface.co/spaces/fcakyon/sahi-yolov5' target='_blank'>SAHI+YOLOv5 Demo</a>
133
- # # <br />
134
- # # Follow me for more! <a href='https://twitter.com/fcakyon' target='_blank'> <img src="https://img.icons8.com/color/48/000000/twitter--v1.png" height="30"></a><a href='https://github.com/fcakyon' target='_blank'><img src="https://img.icons8.com/fluency/48/000000/github.png" height="27"></a><a href='https://www.linkedin.com/in/fcakyon/' target='_blank'><img src="https://img.icons8.com/fluency/48/000000/linkedin.png" height="30"></a> <a href='https://fcakyon.medium.com/' target='_blank'><img src="https://img.icons8.com/ios-filled/48/000000/medium-monogram.png" height="26"></a>
135
- # # </p>
136
- # # """,
137
- # # unsafe_allow_html=True,
138
- # # )
139
-
140
- # st.write("##")
141
-
142
- # with st.expander("Usage"):
143
- # st.markdown(
144
- # """
145
- # <p>
146
- # 1. Upload or select the input image πŸ–ΌοΈ
147
- # <br />
148
- # 2. (Optional) Set SAHI parameters βœ”οΈ
149
- # <br />
150
- # 3. Press to "πŸš€ Perform Prediction"
151
- # <br />
152
- # 4. Enjoy sliding image comparison πŸ”₯
153
- # </p>
154
- # """,
155
- # unsafe_allow_html=True,
156
- # )
157
-
158
- # st.write("##")
159
-
160
- # col1, col2, col3 = st.columns([6, 1, 6])
161
- # with col1:
162
- # st.markdown(f"##### Set input image:")
163
-
164
- # # set input image by upload
165
- # image_file = st.file_uploader(
166
- # "Upload an image to test:", type=["jpg", "jpeg", "png"]
167
- # )
168
-
169
- # # set input image from exapmles
170
- # def slider_func(option):
171
- # option_to_id = {
172
- # "apple_tree.jpg": str(1),
173
- # "highway.jpg": str(2),
174
- # "highway2.jpg": str(3),
175
- # "highway3.jpg": str(4),
176
- # }
177
- # return option_to_id[option]
178
-
179
- # slider = st.select_slider(
180
- # "Or select from example images:",
181
- # options=["apple_tree.jpg", "highway.jpg", "highway2.jpg", "highway3.jpg"],
182
- # format_func=slider_func,
183
- # value="highway2.jpg",
184
- # )
185
-
186
- # # visualize input image
187
- # if image_file is not None:
188
- # image = Image.open(image_file)
189
- # else:
190
- # image = sahi.utils.cv.read_image_as_pil(IMAGE_TO_URL[slider])
191
- # st.image(image, width=300)
192
-
193
- # with col3:
194
- # st.markdown(f"##### Set SAHI parameters:")
195
-
196
- # slice_size = st.number_input("slice_size", min_value=256, value=512, step=256)
197
- # overlap_ratio = st.number_input(
198
- # "overlap_ratio", min_value=0.0, max_value=0.6, value=0.2, step=0.2
199
- # )
200
- # #postprocess_type = st.selectbox(
201
- # # "postprocess_type", options=["NMS", "GREEDYNMM"], index=0
202
- # #)
203
- # #postprocess_match_metric = st.selectbox(
204
- # # "postprocess_match_metric", options=["IOU", "IOS"], index=0
205
- # #)
206
- # postprocess_match_threshold = st.number_input(
207
- # "postprocess_match_threshold", value=0.5, step=0.1
208
- # )
209
- # #postprocess_class_agnostic = st.checkbox("postprocess_class_agnostic", value=True)
210
-
211
- # col1, col2, col3 = st.columns([4, 3, 4])
212
- # with col2:
213
- # submit = st.button("πŸš€ Perform Prediction")
214
-
215
- # if submit:
216
- # # perform prediction
217
- # with st.spinner(
218
- # text="Downloading model weight.. "
219
- # + st.session_state["last_spinner_texts"].get()
220
- # ):
221
- # detection_model = get_model()
222
-
223
- # image_size = 1280
224
-
225
- # with st.spinner(
226
- # text="Performing prediction.. " + st.session_state["last_spinner_texts"].get()
227
- # ):
228
- # output_1, output_2 = sahi_yolov8m_inference(
229
- # image,
230
- # detection_model,
231
- # image_size=image_size,
232
- # slice_height=slice_size,
233
- # slice_width=slice_size,
234
- # overlap_height_ratio=overlap_ratio,
235
- # overlap_width_ratio=overlap_ratio,
236
- # #postprocess_type=postprocess_type,
237
- # #postprocess_match_metric=postprocess_match_metric,
238
- # postprocess_match_threshold=postprocess_match_threshold,
239
- # #postprocess_class_agnostic=postprocess_class_agnostic,
240
- # )
241
-
242
- # st.session_state["output_1"] = output_1
243
- # st.session_state["output_2"] = output_2
244
-
245
- # st.markdown(f"##### YOLOv8 Standard vs SAHI Prediction:")
246
- # static_component = image_comparison(
247
- # img1=st.session_state["output_1"],
248
- # img2=st.session_state["output_2"],
249
- # label1="YOLOX",
250
- # label2="SAHI+YOLOX",
251
- # width=700,
252
- # starting_position=50,
253
- # show_labels=True,
254
- # make_responsive=True,
255
- # in_memory=True,
256
  # )
257
- # # st.markdown(
258
- # # """
259
- # # <p style='text-align: center'>
260
- # # prepared with <a href='https://github.com/fcakyon/streamlit-image-comparison' target='_blank'>streamlit-image-comparison</a>
261
- # # </p>
262
- # # """,
263
- # # unsafe_allow_html=True,
264
- # # )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  #MMDET_YOLOX_TINY_CONFIG_URL = "https://huggingface.co/fcakyon/mmdet-yolox-tiny/raw/main/yolox_tiny_8x8_300e_coco.py"
15
  #MMDET_YOLOX_TINY_CONFIG_PATH = "config.py"
16
 
17
+ YOLOV8M_MODEL_URL = "https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m.pt"
18
+ YOLOV8M_MODEL_PATH = "tests/data/models/yolov8/yolov8m.pt"
19
 
20
 
21
  #YOLOV8M_MODEL_PATH = 'models/yolov8m.pt'
22
 
23
 
24
+ IMAGE_TO_URL = {
25
+ "apple_tree.jpg": "https://user-images.githubusercontent.com/34196005/142730935-2ace3999-a47b-49bb-83e0-2bdd509f1c90.jpg",
26
+ "highway.jpg": "https://user-images.githubusercontent.com/34196005/142730936-1b397756-52e5-43be-a949-42ec0134d5d8.jpg",
27
+ "highway2.jpg": "https://user-images.githubusercontent.com/34196005/142742871-bf485f84-0355-43a3-be86-96b44e63c3a2.jpg",
28
+ "highway3.jpg": "https://user-images.githubusercontent.com/34196005/142742872-1fefcc4d-d7e6-4c43-bbb7-6b5982f7e4ba.jpg",
29
+ "highway2-yolov8m.jpg": "https://user-images.githubusercontent.com/34196005/143309873-c0c1f31c-c42e-4a36-834e-da0a2336bb19.jpg",
30
+ "highway2-sahi.jpg": "https://user-images.githubusercontent.com/34196005/143309867-42841f5a-9181-4d22-b570-65f90f2da231.jpg",
31
+ }
32
 
33
 
34
+ @st.cache_data(show_spinner=False)
35
+ def download_comparison_images():
36
+ sahi.utils.file.download_from_url(
37
+ "https://user-images.githubusercontent.com/34196005/143309873-c0c1f31c-c42e-4a36-834e-da0a2336bb19.jpg",
38
+ "highway2-yolov8m.jpg",
39
+ )
40
+ sahi.utils.file.download_from_url(
41
+ "https://user-images.githubusercontent.com/34196005/143309867-42841f5a-9181-4d22-b570-65f90f2da231.jpg",
42
+ "highway2-sahi.jpg",
43
+ )
44
 
45
 
46
+ @st.cache_data(show_spinner=False)
47
+ def get_model():
48
 
49
+ sahi.utils.file.download_from_url(
50
+ YOLOV8M_MODEL_URL,
51
+ YOLOV8M_MODEL_PATH,
52
+ )
53
+ #sahi.utils.file.download_from_url(
54
+ # MMDET_YOLOX_TINY_MODEL_URL,
55
+ # MMDET_YOLOX_TINY_MODEL_PATH,
56
+ #)
57
+ #sahi.utils.file.download_from_url(
58
+ # MMDET_YOLOX_TINY_CONFIG_URL,
59
+ # MMDET_YOLOX_TINY_CONFIG_PATH,
60
+ #)
61
 
62
+ #sahi.utils.yolov8.download_yolov8m_model(destination_path = YOLOV8M_MODEL_PATH)
63
+
64
+ detection_model = AutoDetectionModel.from_pretrained(
65
+ model_type='yolov8',
66
+ model_path=YOLOV8M_MODEL_PATH,
67
+ #config_path=MMDET_YOLOX_TINY_CONFIG_PATH,
68
+ confidence_threshold=0.5,
69
+ device="cpu",
70
+ )
71
+ return detection_model
72
+
73
+
74
+ class SpinnerTexts:
75
+ def __init__(self):
76
+ self.ind_history_list = []
77
+ self.text_list = [
78
+ "Meanwhile check out [MMDetection Colab notebook of SAHI](https://colab.research.google.com/github/obss/sahi/blob/main/demo/inference_for_mmdetection.ipynb)!",
79
+ "Meanwhile check out [YOLOv5 Colab notebook of SAHI](https://colab.research.google.com/github/obss/sahi/blob/main/demo/inference_for_yolov5.ipynb)!",
80
+ "Meanwhile check out [aerial object detection with SAHI](https://blog.ml6.eu/how-to-detect-small-objects-in-very-large-images-70234bab0f98?gi=b434299595d4)!",
81
+ "Meanwhile check out [COCO Utilities of SAHI](https://github.com/obss/sahi/blob/main/docs/COCO.md)!",
82
+ "Meanwhile check out [FiftyOne utilities of SAHI](https://github.com/obss/sahi#fiftyone-utilities)!",
83
+ "Meanwhile [give a Github star to SAHI](https://github.com/obss/sahi/stargazers)!",
84
+ "Meanwhile see [how easy is to install SAHI](https://github.com/obss/sahi#getting-started)!",
85
+ "Meanwhile check out [Medium blogpost of SAHI](https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80)!",
86
+ "Meanwhile try out [YOLOv5 HF Spaces demo of SAHI](https://huggingface.co/spaces/fcakyon/sahi-yolov5)!",
87
+ ]
88
+
89
+ def _store(self, ind):
90
+ if len(self.ind_history_list) == 6:
91
+ self.ind_history_list.pop(0)
92
+ self.ind_history_list.append(ind)
93
+
94
+ def get(self):
95
+ ind = 0
96
+ while ind in self.ind_history_list:
97
+ ind = random.randint(0, len(self.text_list) - 1)
98
+ self._store(ind)
99
+ return self.text_list[ind]
100
 
101
 
102
  st.set_page_config(
 
106
  initial_sidebar_state="auto",
107
  )
108
 
109
+ download_comparison_images()
110
 
111
+ if "last_spinner_texts" not in st.session_state:
112
+ st.session_state["last_spinner_texts"] = SpinnerTexts()
113
 
114
+ if "output_1" not in st.session_state:
115
+ st.session_state["output_1"] = Image.open("highway2-yolov8m.jpg")
116
 
117
+ if "output_2" not in st.session_state:
118
+ st.session_state["output_2"] = Image.open("highway2-sahi.jpg")
119
 
120
  st.markdown(
121
  """
 
126
  """,
127
  unsafe_allow_html=True,
128
  )
129
+ # st.markdown(
130
+ # """
131
+ # <p style='text-align: center'>
132
+ # <a href='https://github.com/obss/sahi' target='_blank'>SAHI Github</a> | <a href='https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox' target='_blank'>YOLOX Github</a> | <a href='https://huggingface.co/spaces/fcakyon/sahi-yolov5' target='_blank'>SAHI+YOLOv5 Demo</a>
133
+ # <br />
134
+ # Follow me for more! <a href='https://twitter.com/fcakyon' target='_blank'> <img src="https://img.icons8.com/color/48/000000/twitter--v1.png" height="30"></a><a href='https://github.com/fcakyon' target='_blank'><img src="https://img.icons8.com/fluency/48/000000/github.png" height="27"></a><a href='https://www.linkedin.com/in/fcakyon/' target='_blank'><img src="https://img.icons8.com/fluency/48/000000/linkedin.png" height="30"></a> <a href='https://fcakyon.medium.com/' target='_blank'><img src="https://img.icons8.com/ios-filled/48/000000/medium-monogram.png" height="26"></a>
135
+ # </p>
136
+ # """,
137
+ # unsafe_allow_html=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
  # )
139
+
140
+ st.write("##")
141
+
142
+ with st.expander("Usage"):
143
+ st.markdown(
144
+ """
145
+ <p>
146
+ 1. Upload or select the input image πŸ–ΌοΈ
147
+ <br />
148
+ 2. (Optional) Set SAHI parameters βœ”οΈ
149
+ <br />
150
+ 3. Press to "πŸš€ Perform Prediction"
151
+ <br />
152
+ 4. Enjoy sliding image comparison πŸ”₯
153
+ </p>
154
+ """,
155
+ unsafe_allow_html=True,
156
+ )
157
+
158
+ st.write("##")
159
+
160
+ col1, col2, col3 = st.columns([6, 1, 6])
161
+ with col1:
162
+ st.markdown(f"##### Set input image:")
163
+
164
+ # set input image by upload
165
+ image_file = st.file_uploader(
166
+ "Upload an image to test:", type=["jpg", "jpeg", "png"]
167
+ )
168
+
169
+ # set input image from exapmles
170
+ def slider_func(option):
171
+ option_to_id = {
172
+ "apple_tree.jpg": str(1),
173
+ "highway.jpg": str(2),
174
+ "highway2.jpg": str(3),
175
+ "highway3.jpg": str(4),
176
+ }
177
+ return option_to_id[option]
178
+
179
+ slider = st.select_slider(
180
+ "Or select from example images:",
181
+ options=["apple_tree.jpg", "highway.jpg", "highway2.jpg", "highway3.jpg"],
182
+ format_func=slider_func,
183
+ value="highway2.jpg",
184
+ )
185
+
186
+ # visualize input image
187
+ if image_file is not None:
188
+ image = Image.open(image_file)
189
+ else:
190
+ image = sahi.utils.cv.read_image_as_pil(IMAGE_TO_URL[slider])
191
+ st.image(image, width=300)
192
+
193
+ with col3:
194
+ st.markdown(f"##### Set SAHI parameters:")
195
+
196
+ slice_size = st.number_input("slice_size", min_value=256, value=512, step=256)
197
+ overlap_ratio = st.number_input(
198
+ "overlap_ratio", min_value=0.0, max_value=0.6, value=0.2, step=0.2
199
+ )
200
+ #postprocess_type = st.selectbox(
201
+ # "postprocess_type", options=["NMS", "GREEDYNMM"], index=0
202
+ #)
203
+ #postprocess_match_metric = st.selectbox(
204
+ # "postprocess_match_metric", options=["IOU", "IOS"], index=0
205
+ #)
206
+ postprocess_match_threshold = st.number_input(
207
+ "postprocess_match_threshold", value=0.5, step=0.1
208
+ )
209
+ #postprocess_class_agnostic = st.checkbox("postprocess_class_agnostic", value=True)
210
+
211
+ col1, col2, col3 = st.columns([4, 3, 4])
212
+ with col2:
213
+ submit = st.button("πŸš€ Perform Prediction")
214
+
215
+ if submit:
216
+ # perform prediction
217
+ with st.spinner(
218
+ text="Downloading model weight.. "
219
+ + st.session_state["last_spinner_texts"].get()
220
+ ):
221
+ detection_model = get_model()
222
+
223
+ image_size = 1280
224
+
225
+ with st.spinner(
226
+ text="Performing prediction.. " + st.session_state["last_spinner_texts"].get()
227
+ ):
228
+ output_1, output_2 = sahi_yolov8m_inference(
229
+ image,
230
+ detection_model,
231
+ image_size=image_size,
232
+ slice_height=slice_size,
233
+ slice_width=slice_size,
234
+ overlap_height_ratio=overlap_ratio,
235
+ overlap_width_ratio=overlap_ratio,
236
+ #postprocess_type=postprocess_type,
237
+ #postprocess_match_metric=postprocess_match_metric,
238
+ postprocess_match_threshold=postprocess_match_threshold,
239
+ #postprocess_class_agnostic=postprocess_class_agnostic,
240
+ )
241
+
242
+ st.session_state["output_1"] = output_1
243
+ st.session_state["output_2"] = output_2
244
+
245
+ st.markdown(f"##### YOLOv8 Standard vs SAHI Prediction:")
246
+ static_component = image_comparison(
247
+ img1=st.session_state["output_1"],
248
+ img2=st.session_state["output_2"],
249
+ label1="YOLOX",
250
+ label2="SAHI+YOLOX",
251
+ width=700,
252
+ starting_position=50,
253
+ show_labels=True,
254
+ make_responsive=True,
255
+ in_memory=True,
256
+ )
257
+ # st.markdown(
258
+ # """
259
+ # <p style='text-align: center'>
260
+ # prepared with <a href='https://github.com/fcakyon/streamlit-image-comparison' target='_blank'>streamlit-image-comparison</a>
261
+ # </p>
262
+ # """,
263
+ # unsafe_allow_html=True,
264
+ # )