Martin Tomov commited on
Commit
e7e717e
·
verified ·
1 Parent(s): 5bd1d98

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -21
app.py CHANGED
@@ -49,35 +49,31 @@ class DetectionResult:
49
  def annotate(image: Union[Image.Image, np.ndarray], detection_results: List[DetectionResult]) -> np.ndarray:
50
  image_cv2 = np.array(image) if isinstance(image, Image.Image) else image
51
  image_cv2 = cv2.cvtColor(image_cv2, cv2.COLOR_RGB2BGR)
52
-
53
- # Create a completely yellow background
54
- yellow_background = np.full(image_cv2.shape, (0, 255, 255), dtype=np.uint8)
55
-
56
  for detection in detection_results:
 
 
57
  box = detection.box
58
  mask = detection.mask
59
-
60
- # Drawing bounding box with yellow fill
61
- cv2.rectangle(yellow_background, (box.xmin, box.ymin), (box.xmax, box.ymax), (0, 255, 255), cv2.FILLED)
62
-
 
 
63
  if mask is not None:
64
- mask = mask.astype(bool)
65
- mask_3_channel = np.stack([mask, mask, mask], axis=-1)
66
-
67
- # Extract insect region using mask
68
- insect_region = image_cv2 * mask_3_channel
69
-
70
- # Overlay insect region within the bounding box on the yellow background
71
- yellow_background[mask_3_channel] = insect_region[mask_3_channel]
72
 
73
- return cv2.cvtColor(yellow_background, cv2.COLOR_BGR2RGB)
74
 
75
  def plot_detections(image: Union[Image.Image, np.ndarray], detections: List[DetectionResult]) -> np.ndarray:
76
  annotated_image = annotate(image, detections)
77
  return annotated_image
78
 
79
  def load_image(image: Union[str, Image.Image]) -> Image.Image:
80
- if isinstance(image, str) and image.startsWith("http"):
81
  image = Image.open(requests.get(image, stream=True).raw).convert("RGB")
82
  elif isinstance(image, str):
83
  image = Image.open(image).convert("RGB")
@@ -110,7 +106,7 @@ def refine_masks(masks: torch.BoolTensor, polygon_refinement: bool = False) -> L
110
  return list(masks)
111
 
112
  @spaces.GPU
113
- def detect(image: Image.Image, labels: List[str], threshold: float = 0.3, detector_id: Optional[str] = None) -> List[DetectionResult]:
114
  detector_id = detector_id if detector_id else "IDEA-Research/grounding-dino-base"
115
  object_detector = pipeline(model=detector_id, task="zero-shot-object-detection", device="cuda")
116
  labels = [label if label.endswith(".") else label+"." for label in labels]
@@ -163,7 +159,7 @@ def create_yellow_background_with_insects(image: np.ndarray, detections: List[De
163
  yellow_background = cv2.cvtColor(yellow_background, cv2.COLOR_BGR2RGB)
164
  return yellow_background
165
 
166
- def run_length_encoding(mask: np.ndarray) -> List[int]:
167
  pixels = mask.flatten()
168
  rle = []
169
  last_val = 0
@@ -180,7 +176,7 @@ def run_length_encoding(mask: np.ndarray) -> List[int]:
180
  rle.append(count)
181
  return rle
182
 
183
- def detections_to_json(detections: List[DetectionResult]) -> List[Dict[str, Any]]:
184
  detections_list = []
185
  for detection in detections:
186
  detection_dict = {
 
49
  def annotate(image: Union[Image.Image, np.ndarray], detection_results: List[DetectionResult]) -> np.ndarray:
50
  image_cv2 = np.array(image) if isinstance(image, Image.Image) else image
51
  image_cv2 = cv2.cvtColor(image_cv2, cv2.COLOR_RGB2BGR)
52
+
 
 
 
53
  for detection in detection_results:
54
+ label = detection.label
55
+ score = detection.score
56
  box = detection.box
57
  mask = detection.mask
58
+ color = np.random.randint(0, 256, size=3).tolist()
59
+
60
+ cv2.rectangle(image_cv2, (box.xmin, box.ymin), (box.xmax, box.ymax), color, 2)
61
+ cv2.putText(image_cv2, f'{label}: {score:.2f}', (box.xmin, box.ymin - 10),
62
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
63
+
64
  if mask is not None:
65
+ mask_uint8 = (mask * 255).astype(np.uint8)
66
+ contours, _ = cv2.findContours(mask_uint8, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
67
+ cv2.drawContours(image_cv2, contours, -1, color, 2)
 
 
 
 
 
68
 
69
+ return cv2.cvtColor(image_cv2, cv2.COLOR_BGR2RGB)
70
 
71
  def plot_detections(image: Union[Image.Image, np.ndarray], detections: List[DetectionResult]) -> np.ndarray:
72
  annotated_image = annotate(image, detections)
73
  return annotated_image
74
 
75
  def load_image(image: Union[str, Image.Image]) -> Image.Image:
76
+ if isinstance(image, str) and image.startswith("http"):
77
  image = Image.open(requests.get(image, stream=True).raw).convert("RGB")
78
  elif isinstance(image, str):
79
  image = Image.open(image).convert("RGB")
 
106
  return list(masks)
107
 
108
  @spaces.GPU
109
+ def detect(image: Image.Image, labels: List[str], threshold: float = 0.3, detector_id: Optional[str] = None) -> List[Dict[str, Any]]:
110
  detector_id = detector_id if detector_id else "IDEA-Research/grounding-dino-base"
111
  object_detector = pipeline(model=detector_id, task="zero-shot-object-detection", device="cuda")
112
  labels = [label if label.endswith(".") else label+"." for label in labels]
 
159
  yellow_background = cv2.cvtColor(yellow_background, cv2.COLOR_BGR2RGB)
160
  return yellow_background
161
 
162
+ def run_length_encoding(mask):
163
  pixels = mask.flatten()
164
  rle = []
165
  last_val = 0
 
176
  rle.append(count)
177
  return rle
178
 
179
+ def detections_to_json(detections):
180
  detections_list = []
181
  for detection in detections:
182
  detection_dict = {