File size: 5,496 Bytes
034b730 23f48d8 034b730 357c750 034b730 357c750 034b730 23f48d8 034b730 23f48d8 034b730 23f48d8 034b730 357c750 034b730 357c750 034b730 357c750 034b730 357c750 034b730 23f48d8 034b730 357c750 034b730 23f48d8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 |
import cv2
import numpy as np
from dora import DoraStatus
CAMERA_WIDTH = 960
CAMERA_HEIGHT = 540
FONT = cv2.FONT_HERSHEY_SIMPLEX
writer = cv2.VideoWriter(
"output01.avi",
cv2.VideoWriter_fourcc(*"MJPG"),
60,
(CAMERA_WIDTH, CAMERA_HEIGHT),
)
GOAL_OBJECTIVES = [10, 0]
import numpy as np
def find_largest_gap_midpoint(bboxes, image_width, goal_x):
"""
Find the x-coordinate of the midpoint of the largest gap along the x-axis where no bounding boxes overlap.
Parameters:
- bboxes (np.array): A numpy array where each row represents a bounding box with
the format [min_x, min_y, max_x, max_y, confidence, label].
- image_width (int): The width of the image in pixels.
Returns:
- int: The x-coordinate of the midpoint of the largest gap where no bounding boxes overlap.
"""
if bboxes.size == 0:
# No bounding boxes, return the midpoint of the image as the largest gap
return image_width // 2
events = []
for bbox in bboxes:
min_x, max_x = bbox[0], bbox[2]
events.append((min_x, "enter"))
events.append((max_x, "exit"))
# Include image boundaries as part of the events
events.append(
(0, "exit")
) # Start of the image, considered an 'exit' point for logic simplicity
events.append(
(image_width, "enter")
) # End of the image, considered an 'enter' point
# Sort events, with exits before enters at the same position to ensure gap calculation correctness
events.sort(key=lambda x: (x[0], x[1] == "enter"))
# Sweep line algorithm to find the largest gap
current_boxes = 1
last_x = 0
largest_gap = 0
gap_start_x = None
largest_gap_mid = None # Midpoint of the largest gap
for x, event_type in events:
if current_boxes == 0 and gap_start_x is not None:
# Calculate gap
gap = x - gap_start_x
if gap > largest_gap:
largest_gap = gap
gap_end_x = gap_start_x + x
largest_gap_mid = (gap_start_x + x) // 2
if goal_x < gap_end_x and goal_x > gap_start_x:
return goal_x
return largest_gap_mid
# elif goal_x > gap_end_x:
# return max(gap_end_x - 50, largest_gap_mid)
# elif goal_x < gap_start_x:
# return min(gap_start_x + 50, largest_gap_mid)
if event_type == "enter":
current_boxes += 1
if current_boxes == 1:
gap_start_x = None # No longer in a gap
elif event_type == "exit":
current_boxes -= 1
if current_boxes == 0:
gap_start_x = x # Start of a potential gap
return largest_gap_mid
class Operator:
"""
Plot image and bounding box
"""
def __init__(self):
self.bboxs = []
self.buffer = ""
self.submitted = []
self.lines = []
self.gap_x = CAMERA_WIDTH // 2
self.position = [0, 0, 0]
def on_event(
self,
dora_event,
send_output,
):
if dora_event["type"] == "INPUT":
id = dora_event["id"]
value = dora_event["value"]
if id == "position":
value = dora_event["value"].to_numpy()
[x, y, z] = value
self.position = [x, y, z]
if id == "image":
image = (
value.to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)).copy()
)
cv2.resize(image, (CAMERA_HEIGHT * 2, CAMERA_WIDTH * 2))
cv2.putText(
image, self.buffer, (20, 14 + 15 * 25), FONT, 0.5, (190, 250, 0), 2
)
i = 0
for text in self.submitted[::-1]:
color = (
(0, 255, 190)
if text["role"] == "user_message"
else (0, 190, 255)
)
cv2.putText(
image,
text["content"],
(
20,
14 + (13 - i) * 25,
),
FONT,
0.5,
color,
2,
)
i += 1
writer.write(image)
cv2.resize(image, (CAMERA_HEIGHT * 3, CAMERA_WIDTH * 3))
cv2.imshow("frame", image)
if cv2.waitKey(1) & 0xFF == ord("q"):
return DoraStatus.STOP
elif id == "keyboard_buffer":
self.buffer = value[0].as_py()
elif id == "bbox":
self.bboxs = value.to_numpy().reshape((-1, 6))
self.gap_x = find_largest_gap_midpoint(
self.bboxs, image_width=CAMERA_WIDTH, goal_x=10
)
elif "message" in id:
self.submitted += [
{
"role": id,
"content": value[0]
.as_py()
.replace("\n", " ")
.replace("- ", ""),
}
]
return DoraStatus.CONTINUE
## Angle = Arctan Proj Object y / x
## Relation linearire 0 - 60 ; 0 - CAMERA_WIDTH
|