mbar0075 commited on
Commit
3559658
β€’
1 Parent(s): 614a4fc

Updated with YOLO11

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +57 -25
  3. requirements.txt +6 -3
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Segment Something
3
  emoji: πŸ–ΌοΈ
4
  colorFrom: green
5
  colorTo: red
 
1
  ---
2
+ title: Segmentation-Playground
3
  emoji: πŸ–ΌοΈ
4
  colorFrom: green
5
  colorTo: red
app.py CHANGED
@@ -5,34 +5,66 @@ import numpy as np
5
  import supervision as sv
6
  from inference import get_model
7
  import warnings
 
8
 
9
  warnings.filterwarnings("ignore")
10
 
11
  MARKDOWN = """
12
- <h1 style='text-align: center'>Segment Something πŸ–ΌοΈ</h1>
13
- Welcome to Segment Something! Just a simple demo to showcase the instance segmentation capabilities of various YOLOv8 segmentation models. πŸš€πŸ”πŸ‘€
14
 
15
  A simple project just for fun for on the go instance segmentation. πŸŽ‰
16
 
17
  Inspired from YOLO-ARENA by SkalskiP. πŸ™
18
 
19
- Powered by Roboflow [Inference](https://github.com/roboflow/inference) and
20
- [Supervision](https://github.com/roboflow/supervision). πŸ”₯
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  """
22
 
23
  IMAGE_EXAMPLES = [
24
- ['https://media.roboflow.com/supervision/image-examples/people-walking.png', 0.3, 0.3, 0.3],
25
- ['https://media.roboflow.com/supervision/image-examples/vehicles.png', 0.3, 0.3, 0.3],
26
- ['https://media.roboflow.com/supervision/image-examples/basketball-1.png', 0.3, 0.3, 0.3],
27
  ]
28
 
29
- YOLO_V8N_MODEL = get_model(model_id="yolov8n-seg-640")
30
- YOLO_V8S_MODEL = get_model(model_id="yolov8s-seg-640")
31
- YOLO_V8M_MODEL = get_model(model_id="yolov8m-seg-640")
32
 
33
- LABEL_ANNOTATORS = sv.LabelAnnotator(text_color=sv.Color.black())
34
  MASK_ANNOTATORS = sv.MaskAnnotator()
35
- BOUNDING_BOX_ANNOTATORS = sv.BoundingBoxAnnotator()
36
 
37
 
38
  def detect_and_annotate(
@@ -42,12 +74,12 @@ def detect_and_annotate(
42
  iou_threshold: float,
43
  class_id_mapping: dict = None
44
  ) -> np.ndarray:
45
- result = model.infer(
46
  input_image,
47
- confidence=confidence_threshold,
48
- iou_threshold=iou_threshold
49
  )[0]
50
- detections = sv.Detections.from_inference(result)
51
 
52
  if class_id_mapping:
53
  detections.class_id = np.array([
@@ -84,11 +116,11 @@ def process_image(
84
  iou_threshold = 0.3 # Default value, adjust as necessary
85
 
86
  yolo_v8n_annotated_image = detect_and_annotate(
87
- YOLO_V8N_MODEL, input_image, yolo_v8_confidence_threshold, iou_threshold)
88
  yolo_v8s_annotated_image = detect_and_annotate(
89
- YOLO_V8S_MODEL, input_image, yolo_v9_confidence_threshold, iou_threshold)
90
  yolo_8m_annotated_image = detect_and_annotate(
91
- YOLO_V8M_MODEL, input_image, yolo_v10_confidence_threshold, iou_threshold)
92
 
93
  return (
94
  yolo_v8n_annotated_image,
@@ -102,7 +134,7 @@ yolo_v8N_confidence_threshold_component = gr.Slider(
102
  maximum=1.0,
103
  value=0.3,
104
  step=0.01,
105
- label="YOLOv8N Confidence Threshold",
106
  info=(
107
  "The confidence threshold for the YOLO model. Lower the threshold to "
108
  "reduce false negatives, enhancing the model's sensitivity to detect "
@@ -115,7 +147,7 @@ yolo_v8S_confidence_threshold_component = gr.Slider(
115
  maximum=1.0,
116
  value=0.3,
117
  step=0.01,
118
- label="YOLOv8S Confidence Threshold",
119
  info=(
120
  "The confidence threshold for the YOLO model. Lower the threshold to "
121
  "reduce false negatives, enhancing the model's sensitivity to detect "
@@ -128,7 +160,7 @@ yolo_v8M_confidence_threshold_component = gr.Slider(
128
  maximum=1.0,
129
  value=0.3,
130
  step=0.01,
131
- label="YOLOv8M Confidence Threshold",
132
  info=(
133
  "The confidence threshold for the YOLO model. Lower the threshold to "
134
  "reduce false negatives, enhancing the model's sensitivity to detect "
@@ -166,16 +198,16 @@ with gr.Blocks() as demo:
166
  )
167
  yolo_v8n_output_image_component = gr.Image(
168
  type='pil',
169
- label='YOLOv8N'
170
  )
171
  with gr.Row():
172
  yolo_v8s_output_image_component = gr.Image(
173
  type='pil',
174
- label='YOLOv8S'
175
  )
176
  yolo_v8m_output_image_component = gr.Image(
177
  type='pil',
178
- label='YOLOv8M'
179
  )
180
  submit_button_component = gr.Button(
181
  value='Submit',
 
5
  import supervision as sv
6
  from inference import get_model
7
  import warnings
8
+ from ultralytics import YOLO
9
 
10
  warnings.filterwarnings("ignore")
11
 
12
  MARKDOWN = """
13
+ <h1 style='text-align: left'>Segmentation-Playground πŸ–ΌοΈ</h1>
14
+ Welcome to Segmentation-Playground! This demo showcases the segmentation capabilities of various YOLO models pre-trained on the COCO Dataset. πŸš€πŸ”πŸ‘€
15
 
16
  A simple project just for fun for on the go instance segmentation. πŸŽ‰
17
 
18
  Inspired from YOLO-ARENA by SkalskiP. πŸ™
19
 
20
+ - **YOLOv8**
21
+ <div style="display: flex; align-items: center;">
22
+ <a href="https://docs.ultralytics.com/models/yolov8/" style="margin-right: 10px;">
23
+ <img src="https://badges.aleen42.com/src/github.svg">
24
+ </a>
25
+ <a href="https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/train-yolov8-object-detection-on-custom-dataset.ipynb" style="margin-right: 10px;">
26
+ <img src="https://colab.research.google.com/assets/colab-badge.svg">
27
+ </a>
28
+ </div>
29
+ - **YOLOv9**
30
+ <div style="display: flex; align-items: center;">
31
+ <a href="https://github.com/WongKinYiu/yolov9" style="margin-right: 10px;">
32
+ <img src="https://badges.aleen42.com/src/github.svg">
33
+ </a>
34
+ <a href="https://arxiv.org/abs/2402.13616" style="margin-right: 10px;">
35
+ <img src="https://img.shields.io/badge/arXiv-2402.13616-b31b1b.svg">
36
+ </a>
37
+ <a href="https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/train-yolov9-object-detection-on-custom-dataset.ipynb" style="margin-right: 10px;">
38
+ <img src="https://colab.research.google.com/assets/colab-badge.svg">
39
+ </a>
40
+ </div>
41
+ - **YOLO11**
42
+ <div style="display: flex; align-items: center;">
43
+ <a href="https://docs.ultralytics.com/models/yolo11/" style="margin-right: 10px;">
44
+ <img src="https://badges.aleen42.com/src/github.svg">
45
+ </a>
46
+ <a href="https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/train-yolov8-object-detection-on-custom-dataset.ipynb" style="margin-right: 10px;">
47
+ <img src="https://colab.research.google.com/assets/colab-badge.svg">
48
+ </a>
49
+ </div>
50
+
51
+ Powered by Roboflow [Inference](https://github.com/roboflow/inference),
52
+ [Supervision](https://github.com/roboflow/supervision) and [Ultralytics](https://github.com/ultralytics/ultralytics).πŸ”₯
53
  """
54
 
55
  IMAGE_EXAMPLES = [
56
+ ['https://media.roboflow.com/supervision/image-examples/people-walking.png', 0.3, 0.3, 0.3, 0.5],
57
+ ['https://media.roboflow.com/supervision/image-examples/vehicles.png', 0.3, 0.3, 0.3, 0.5],
58
+ ['https://media.roboflow.com/supervision/image-examples/basketball-1.png', 0.3, 0.3, 0.3, 0.5],
59
  ]
60
 
61
+ YOLO_V8S_MODEL = YOLO("yolov8m-seg.pt")
62
+ YOLO_V9S_MODEL = YOLO("yolov9e-seg.pt")
63
+ YOLO_11S_MODEL = YOLO("yolo11m-seg.pt")
64
 
65
+ LABEL_ANNOTATORS = sv.LabelAnnotator()
66
  MASK_ANNOTATORS = sv.MaskAnnotator()
67
+ BOUNDING_BOX_ANNOTATORS = sv.BoxAnnotator()
68
 
69
 
70
  def detect_and_annotate(
 
74
  iou_threshold: float,
75
  class_id_mapping: dict = None
76
  ) -> np.ndarray:
77
+ result = model(
78
  input_image,
79
+ conf=confidence_threshold,
80
+ iou=iou_threshold
81
  )[0]
82
+ detections = sv.Detections.from_ultralytics(result)
83
 
84
  if class_id_mapping:
85
  detections.class_id = np.array([
 
116
  iou_threshold = 0.3 # Default value, adjust as necessary
117
 
118
  yolo_v8n_annotated_image = detect_and_annotate(
119
+ YOLO_V8S_MODEL, input_image, yolo_v8_confidence_threshold, iou_threshold)
120
  yolo_v8s_annotated_image = detect_and_annotate(
121
+ YOLO_V9S_MODEL, input_image, yolo_v9_confidence_threshold, iou_threshold)
122
  yolo_8m_annotated_image = detect_and_annotate(
123
+ YOLO_11S_MODEL, input_image, yolo_v10_confidence_threshold, iou_threshold)
124
 
125
  return (
126
  yolo_v8n_annotated_image,
 
134
  maximum=1.0,
135
  value=0.3,
136
  step=0.01,
137
+ label="YOLOv8m Confidence Threshold",
138
  info=(
139
  "The confidence threshold for the YOLO model. Lower the threshold to "
140
  "reduce false negatives, enhancing the model's sensitivity to detect "
 
147
  maximum=1.0,
148
  value=0.3,
149
  step=0.01,
150
+ label="YOLOv9e Confidence Threshold",
151
  info=(
152
  "The confidence threshold for the YOLO model. Lower the threshold to "
153
  "reduce false negatives, enhancing the model's sensitivity to detect "
 
160
  maximum=1.0,
161
  value=0.3,
162
  step=0.01,
163
+ label="YOLO11m Confidence Threshold",
164
  info=(
165
  "The confidence threshold for the YOLO model. Lower the threshold to "
166
  "reduce false negatives, enhancing the model's sensitivity to detect "
 
198
  )
199
  yolo_v8n_output_image_component = gr.Image(
200
  type='pil',
201
+ label='YOLOv8m'
202
  )
203
  with gr.Row():
204
  yolo_v8s_output_image_component = gr.Image(
205
  type='pil',
206
+ label='YOLOv9e'
207
  )
208
  yolo_v8m_output_image_component = gr.Image(
209
  type='pil',
210
+ label='YOLO11m'
211
  )
212
  submit_button_component = gr.Button(
213
  value='Submit',
requirements.txt CHANGED
@@ -1,5 +1,8 @@
1
  setuptools<70.0.0
2
  awscli==1.29.54
3
- gradio==4.19.2
4
- inference==0.13.0
5
- supervision==0.20.0
 
 
 
 
1
  setuptools<70.0.0
2
  awscli==1.29.54
3
+ gradio
4
+ inference
5
+ supervision
6
+ ultralytics
7
+ dill
8
+ timm