|
import cv2 |
|
import numpy as np |
|
|
|
from dora import DoraStatus |
|
|
|
|
|
CAMERA_WIDTH = 960 |
|
CAMERA_HEIGHT = 540 |
|
|
|
FONT = cv2.FONT_HERSHEY_SIMPLEX |
|
|
|
writer = cv2.VideoWriter( |
|
"output01.avi", |
|
cv2.VideoWriter_fourcc(*"MJPG"), |
|
60, |
|
(CAMERA_WIDTH, CAMERA_HEIGHT), |
|
) |
|
|
|
GOAL_OBJECTIVES = [10, 0] |
|
|
|
import numpy as np |
|
|
|
|
|
def find_largest_gap_midpoint(bboxes, image_width, goal_x): |
|
""" |
|
Find the x-coordinate of the midpoint of the largest gap along the x-axis where no bounding boxes overlap. |
|
|
|
Parameters: |
|
- bboxes (np.array): A numpy array where each row represents a bounding box with |
|
the format [min_x, min_y, max_x, max_y, confidence, label]. |
|
- image_width (int): The width of the image in pixels. |
|
|
|
Returns: |
|
- int: The x-coordinate of the midpoint of the largest gap where no bounding boxes overlap. |
|
""" |
|
if bboxes.size == 0: |
|
|
|
return image_width // 2 |
|
|
|
events = [] |
|
for bbox in bboxes: |
|
min_x, max_x = bbox[0], bbox[2] |
|
events.append((min_x, "enter")) |
|
events.append((max_x, "exit")) |
|
|
|
|
|
events.append( |
|
(0, "exit") |
|
) |
|
events.append( |
|
(image_width, "enter") |
|
) |
|
|
|
|
|
events.sort(key=lambda x: (x[0], x[1] == "enter")) |
|
|
|
|
|
current_boxes = 1 |
|
last_x = 0 |
|
largest_gap = 0 |
|
gap_start_x = None |
|
largest_gap_mid = None |
|
|
|
for x, event_type in events: |
|
if current_boxes == 0 and gap_start_x is not None: |
|
|
|
gap = x - gap_start_x |
|
if gap > largest_gap: |
|
largest_gap = gap |
|
gap_end_x = gap_start_x + x |
|
largest_gap_mid = (gap_start_x + x) // 2 |
|
if goal_x < gap_end_x and goal_x > gap_start_x: |
|
return goal_x |
|
return largest_gap_mid |
|
|
|
|
|
|
|
|
|
|
|
if event_type == "enter": |
|
current_boxes += 1 |
|
if current_boxes == 1: |
|
gap_start_x = None |
|
elif event_type == "exit": |
|
current_boxes -= 1 |
|
if current_boxes == 0: |
|
gap_start_x = x |
|
|
|
return largest_gap_mid |
|
|
|
|
|
class Operator: |
|
""" |
|
Plot image and bounding box |
|
""" |
|
|
|
def __init__(self): |
|
self.bboxs = [] |
|
self.buffer = "" |
|
self.submitted = [] |
|
self.lines = [] |
|
self.gap_x = CAMERA_WIDTH // 2 |
|
self.position = [0, 0, 0] |
|
|
|
def on_event( |
|
self, |
|
dora_event, |
|
send_output, |
|
): |
|
if dora_event["type"] == "INPUT": |
|
id = dora_event["id"] |
|
value = dora_event["value"] |
|
|
|
if id == "position": |
|
|
|
value = dora_event["value"].to_numpy() |
|
[x, y, z] = value |
|
self.position = [x, y, z] |
|
|
|
if id == "image": |
|
|
|
image = ( |
|
value.to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)).copy() |
|
) |
|
cv2.resize(image, (CAMERA_HEIGHT * 2, CAMERA_WIDTH * 2)) |
|
|
|
cv2.putText( |
|
image, self.buffer, (20, 14 + 15 * 25), FONT, 0.5, (190, 250, 0), 2 |
|
) |
|
cv2.putText( |
|
image, |
|
f"pos: {self.position}", |
|
(20, 20), |
|
FONT, |
|
0.5, |
|
(190, 250, 100), |
|
2, |
|
) |
|
|
|
i = 0 |
|
for text in self.submitted[::-1]: |
|
color = ( |
|
(0, 255, 190) |
|
if text["role"] == "user_message" |
|
else (0, 190, 255) |
|
) |
|
cv2.putText( |
|
image, |
|
text["content"], |
|
( |
|
20, |
|
14 + (13 - i) * 25, |
|
), |
|
FONT, |
|
0.5, |
|
color, |
|
2, |
|
) |
|
i += 1 |
|
writer.write(image) |
|
cv2.resize(image, (CAMERA_HEIGHT * 3, CAMERA_WIDTH * 3)) |
|
cv2.imshow("frame", image) |
|
if cv2.waitKey(1) & 0xFF == ord("q"): |
|
return DoraStatus.STOP |
|
elif id == "keyboard_buffer": |
|
self.buffer = value[0].as_py() |
|
elif id == "bbox": |
|
self.bboxs = value.to_numpy().reshape((-1, 6)) |
|
|
|
self.gap_x = find_largest_gap_midpoint( |
|
self.bboxs, image_width=CAMERA_WIDTH, goal_x=10 |
|
) |
|
elif "message" in id: |
|
self.submitted += [ |
|
{ |
|
"role": id, |
|
"content": value[0] |
|
.as_py() |
|
.replace("\n", " ") |
|
.replace("- ", ""), |
|
} |
|
] |
|
|
|
return DoraStatus.CONTINUE |
|
|
|
|
|
|
|
|
|
|
|
|