File size: 7,120 Bytes
fe79d42 23f48d8 fe79d42 23f48d8 fe79d42 23f48d8 fe79d42 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 |
import time
import numpy as np
import pyarrow as pa
from dora import DoraStatus
GOAL = np.array([10, 20])
HOME_TO_KITCHEN = np.array([[0.5, 0], [0.5, -5.0], [1.0, 7.0]])
KITCHEN_TO_HOME = np.array([[2.0, 0.0], [0.0, 0.0]])
CAMERA_WIDTH = 960
CAMERA_HEIGHT = 540
def check_clear_road(bboxes, image_width, goal_x):
"""
Find the x-coordinate of the midpoint of the largest gap along the x-axis where no bounding boxes overlap.
Parameters:
- bboxes (np.array): A numpy array where each row represents a bounding box with
the format [min_x, min_y, max_x, max_y, confidence, label].
- image_width (int): The width of the image in pixels.
Returns:
- int: The x-coordinate of the midpoint of the largest gap where no bounding boxes overlap.
"""
if bboxes.size == 0:
# No bounding boxes, return the midpoint of the image as the largest gap
return goal_x
events = []
for bbox in bboxes:
min_x, max_x = bbox[0], bbox[2]
events.append((min_x, "enter"))
events.append((max_x, "exit"))
# Include image boundaries as part of the events
events.append(
(0, "exit")
) # Start of the image, considered an 'exit' point for logic simplicity
events.append(
(image_width, "enter")
) # End of the image, considered an 'enter' point
# Sort events, with exits before enters at the same position to ensure gap calculation correctness
events.sort(key=lambda x: (x[0], x[1] == "enter"))
# Sweep line algorithm to find the largest gap
current_boxes = 1
last_x = 0
largest_gap = 0
gap_start_x = None
largest_gap_mid = None # Midpoint of the largest gap
for x, event_type in events:
if current_boxes == 0 and gap_start_x is not None:
# Calculate gap
gap = x - gap_start_x
gap_end_x = gap_start_x + x
if goal_x < gap_end_x and goal_x > gap_start_x:
return True
elif goal_x < gap_start_x:
return False
if event_type == "enter":
current_boxes += 1
if current_boxes == 1:
gap_start_x = None # No longer in a gap
elif event_type == "exit":
current_boxes -= 1
if current_boxes == 0:
gap_start_x = x # Start of a potential gap
return False
class Operator:
def __init__(self):
self.bboxs = None
self.time = time.time()
self.position = [0, 0, 0]
self.waypoints = None
self.tf = np.array([[1, 0], [0, 1]])
self.count = 0
self.completed = True
self.image = None
def on_event(
self,
dora_event: dict,
send_output,
) -> DoraStatus:
global POSITION_GOAL, GIMBAL_GOAL
if dora_event["type"] == "INPUT":
id = dora_event["id"]
if id == "tick":
self.time = time.time()
elif id == "image":
value = dora_event["value"].to_numpy()
self.image = value.reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
elif id == "control_reply":
value = dora_event["value"].to_numpy()[0]
if value == self.count:
self.completed = True
elif id == "set_goal":
print("got goal:", dora_event["value"], flush=True)
if len(dora_event["value"]) > 0:
self.waypoints = dora_event["value"].to_numpy().reshape((-1, 2))
elif id == "position":
## No bounding box yet
if self.waypoints is None:
print("no waypoint", flush=True)
return DoraStatus.CONTINUE
if self.completed == False:
print("not completed", flush=True)
return DoraStatus.CONTINUE
value = dora_event["value"].to_numpy()
[x, y, z] = value
self.position = [x, y, z]
# Remove waypoints if completed
if (
len(self.waypoints) > 0
and np.linalg.norm(self.waypoints[0] - [x, y]) < 0.2
):
self.waypoints = self.waypoints[1:]
print("removing waypoints", flush=True)
if len(self.waypoints) == 0:
print("goal reached", flush=True)
send_output("goal_reached", pa.array(self.image.ravel()))
self.waypoints = None
return DoraStatus.CONTINUE
z = np.deg2rad(z)
self.tf = np.array([[np.cos(z), -np.sin(z)], [np.sin(z), np.cos(z)]])
goal = self.tf.dot(self.waypoints[0])
goal_camera_x = (
CAMERA_WIDTH * np.arctan2(goal[1], goal[0]) / np.pi
) + CAMERA_WIDTH / 2
goal_angle = np.arctan2(goal[1], goal[0]) * 180 / np.pi
print(
"position",
[x, y],
"goal:",
goal,
"Goal angle: ",
np.arctan2(goal[1], goal[0]) * 180 / np.pi,
"z: ",
np.rad2deg(z),
"x: ",
goal_camera_x,
"count: ",
self.count,
flush=True,
)
if True: # check_clear_road(self.bboxs, CAMERA_WIDTH, goal_camera_x):
self.count += 1
self.completed = False
send_output(
"control",
pa.array(
[
{
"action": "gimbal",
"value": [0.0, goal_angle],
"count": self.count,
},
# {
# "value": [
# 0.0,
# 0.0,
# -goal_angle,
# 0.0,
# 50,
# ],
# "action": "control",
# },
{
"value": [
self.waypoints[0][0],
self.waypoints[0][1],
0.0, # -goal_angle,
0.6,
0.0, # 50,
],
"action": "control",
},
]
),
dora_event["metadata"],
)
return DoraStatus.CONTINUE
|