import streamlit as st from PIL import Image import random #from sahi.utils.yolov8 from sahi import AutoDetectionModel from utils import sahi_yolov8m_inference import sahi.utils.file from streamlit_image_comparison import image_comparison #import sahi.utils.mmdet #MMDET_YOLOX_TINY_MODEL_URL = "https://huggingface.co/fcakyon/mmdet-yolox-tiny/resolve/main/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth" #MMDET_YOLOX_TINY_MODEL_PATH = "yolox.pt" #MMDET_YOLOX_TINY_CONFIG_URL = "https://huggingface.co/fcakyon/mmdet-yolox-tiny/raw/main/yolox_tiny_8x8_300e_coco.py" #MMDET_YOLOX_TINY_CONFIG_PATH = "config.py" YOLOV8M_MODEL_URL = "https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m.pt" YOLOV8M_MODEL_PATH = "tests/data/models/yolov8/yolov8m.pt" #YOLOV8M_MODEL_PATH = 'models/yolov8m.pt' IMAGE_TO_URL = { "apple_tree.jpg": "https://user-images.githubusercontent.com/34196005/142730935-2ace3999-a47b-49bb-83e0-2bdd509f1c90.jpg", "highway.jpg": "https://user-images.githubusercontent.com/34196005/142730936-1b397756-52e5-43be-a949-42ec0134d5d8.jpg", "highway2.jpg": "https://user-images.githubusercontent.com/34196005/142742871-bf485f84-0355-43a3-be86-96b44e63c3a2.jpg", "highway3.jpg": "https://user-images.githubusercontent.com/34196005/142742872-1fefcc4d-d7e6-4c43-bbb7-6b5982f7e4ba.jpg", "highway2-yolov8m.jpg": "https://user-images.githubusercontent.com/34196005/143309873-c0c1f31c-c42e-4a36-834e-da0a2336bb19.jpg", "highway2-sahi.jpg": "https://user-images.githubusercontent.com/34196005/143309867-42841f5a-9181-4d22-b570-65f90f2da231.jpg", } @st.cache_data(show_spinner=False) def download_comparison_images(): sahi.utils.file.download_from_url( "https://user-images.githubusercontent.com/34196005/143309873-c0c1f31c-c42e-4a36-834e-da0a2336bb19.jpg", "highway2-yolov8m.jpg", ) sahi.utils.file.download_from_url( "https://user-images.githubusercontent.com/34196005/143309867-42841f5a-9181-4d22-b570-65f90f2da231.jpg", "highway2-sahi.jpg", ) @st.cache_data(show_spinner=False) def get_model(): sahi.utils.file.download_from_url( YOLOV8M_MODEL_URL, YOLOV8M_MODEL_PATH, ) #sahi.utils.file.download_from_url( # MMDET_YOLOX_TINY_MODEL_URL, # MMDET_YOLOX_TINY_MODEL_PATH, #) #sahi.utils.file.download_from_url( # MMDET_YOLOX_TINY_CONFIG_URL, # MMDET_YOLOX_TINY_CONFIG_PATH, #) #sahi.utils.yolov8.download_yolov8m_model(destination_path = YOLOV8M_MODEL_PATH) detection_model = AutoDetectionModel.from_pretrained( model_type='yolov8', model_path=YOLOV8M_MODEL_PATH, #config_path=MMDET_YOLOX_TINY_CONFIG_PATH, confidence_threshold=0.5, device="cpu", ) return detection_model class SpinnerTexts: def __init__(self): self.ind_history_list = [] self.text_list = [ "Meanwhile check out [MMDetection Colab notebook of SAHI](https://colab.research.google.com/github/obss/sahi/blob/main/demo/inference_for_mmdetection.ipynb)!", "Meanwhile check out [YOLOv5 Colab notebook of SAHI](https://colab.research.google.com/github/obss/sahi/blob/main/demo/inference_for_yolov5.ipynb)!", "Meanwhile check out [aerial object detection with SAHI](https://blog.ml6.eu/how-to-detect-small-objects-in-very-large-images-70234bab0f98?gi=b434299595d4)!", "Meanwhile check out [COCO Utilities of SAHI](https://github.com/obss/sahi/blob/main/docs/COCO.md)!", "Meanwhile check out [FiftyOne utilities of SAHI](https://github.com/obss/sahi#fiftyone-utilities)!", "Meanwhile [give a Github star to SAHI](https://github.com/obss/sahi/stargazers)!", "Meanwhile see [how easy is to install SAHI](https://github.com/obss/sahi#getting-started)!", "Meanwhile check out [Medium blogpost of SAHI](https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80)!", "Meanwhile try out [YOLOv5 HF Spaces demo of SAHI](https://huggingface.co/spaces/fcakyon/sahi-yolov5)!", ] def _store(self, ind): if len(self.ind_history_list) == 6: self.ind_history_list.pop(0) self.ind_history_list.append(ind) def get(self): ind = 0 while ind in self.ind_history_list: ind = random.randint(0, len(self.text_list) - 1) self._store(ind) return self.text_list[ind] st.set_page_config( page_title="small object detection with sahi + yolov8", page_icon="🚀", layout="centered", initial_sidebar_state="auto", ) download_comparison_images() if "last_spinner_texts" not in st.session_state: st.session_state["last_spinner_texts"] = SpinnerTexts() if "output_1" not in st.session_state: st.session_state["output_1"] = Image.open("highway2-yolov8m.jpg") if "output_2" not in st.session_state: st.session_state["output_2"] = Image.open("highway2-sahi.jpg") st.markdown( """

Small Object Detection
with SAHI + YOLOv8

""", unsafe_allow_html=True, ) # st.markdown( # """ #

# SAHI Github | YOLOX Github | SAHI+YOLOv5 Demo #
# Follow me for more! #

# """, # unsafe_allow_html=True, # ) st.write("##") with st.expander("Usage"): st.markdown( """

1. Upload or select the input image 🖼️
2. (Optional) Set SAHI parameters ✔️
3. Press to "🚀 Perform Prediction"
4. Enjoy sliding image comparison 🔥

""", unsafe_allow_html=True, ) st.write("##") col1, col2, col3 = st.columns([6, 1, 6]) with col1: st.markdown(f"##### Set input image:") # set input image by upload image_file = st.file_uploader( "Upload an image to test:", type=["jpg", "jpeg", "png"] ) # set input image from exapmles def slider_func(option): option_to_id = { "apple_tree.jpg": str(1), "highway.jpg": str(2), "highway2.jpg": str(3), "highway3.jpg": str(4), } return option_to_id[option] slider = st.select_slider( "Or select from example images:", options=["apple_tree.jpg", "highway.jpg", "highway2.jpg", "highway3.jpg"], format_func=slider_func, value="highway2.jpg", ) # visualize input image if image_file is not None: image = Image.open(image_file) else: image = sahi.utils.cv.read_image_as_pil(IMAGE_TO_URL[slider]) st.image(image, width=300) with col3: st.markdown(f"##### Set SAHI parameters:") slice_size = st.number_input("slice_size", min_value=256, value=512, step=256) overlap_ratio = st.number_input( "overlap_ratio", min_value=0.0, max_value=0.6, value=0.2, step=0.2 ) #postprocess_type = st.selectbox( # "postprocess_type", options=["NMS", "GREEDYNMM"], index=0 #) #postprocess_match_metric = st.selectbox( # "postprocess_match_metric", options=["IOU", "IOS"], index=0 #) postprocess_match_threshold = st.number_input( "postprocess_match_threshold", value=0.5, step=0.1 ) #postprocess_class_agnostic = st.checkbox("postprocess_class_agnostic", value=True) col1, col2, col3 = st.columns([4, 3, 4]) with col2: submit = st.button("🚀 Perform Prediction") if submit: # perform prediction with st.spinner( text="Downloading model weight.. " + st.session_state["last_spinner_texts"].get() ): detection_model = get_model() image_size = 1280 with st.spinner( text="Performing prediction.. " + st.session_state["last_spinner_texts"].get() ): output_1, output_2 = sahi_yolov8m_inference( image, detection_model, image_size=image_size, slice_height=slice_size, slice_width=slice_size, overlap_height_ratio=overlap_ratio, overlap_width_ratio=overlap_ratio, #postprocess_type=postprocess_type, #postprocess_match_metric=postprocess_match_metric, postprocess_match_threshold=postprocess_match_threshold, #postprocess_class_agnostic=postprocess_class_agnostic, ) st.session_state["output_1"] = output_1 st.session_state["output_2"] = output_2 st.markdown(f"##### YOLOv8 Standard vs SAHI Prediction:") static_component = image_comparison( img1=st.session_state["output_1"], img2=st.session_state["output_2"], label1="YOLOX", label2="SAHI+YOLOX", width=700, starting_position=50, show_labels=True, make_responsive=True, in_memory=True, ) # st.markdown( # """ #

# prepared with streamlit-image-comparison #

# """, # unsafe_allow_html=True, # )