File size: 6,721 Bytes
fe79d42 037f860 fe79d42 0827444 fe79d42 037f860 fe79d42 c3afd26 fe79d42 037f860 fe79d42 037f860 8852f54 fe79d42 c3afd26 8852f54 fe79d42 8852f54 fe79d42 8852f54 fe79d42 8852f54 fe79d42 0827444 037f860 23f48d8 8852f54 fe79d42 c3afd26 fe79d42 c3afd26 0827444 c3afd26 fe79d42 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 |
import time
import numpy as np
import pyarrow as pa
from dora import DoraStatus
from constants import LOCATION
CAMERA_WIDTH = 1280
CAMERA_HEIGHT = 720
def check_clear_road(bboxes, image_width, goal_x):
"""
Find the x-coordinate of the midpoint of the largest gap along the x-axis where no bounding boxes overlap.
Parameters:
- bboxes (np.array): A numpy array where each row represents a bounding box with
the format [min_x, min_y, max_x, max_y, confidence, label].
- image_width (int): The width of the image in pixels.
Returns:
- int: The x-coordinate of the midpoint of the largest gap where no bounding boxes overlap.
"""
if bboxes.size == 0:
# No bounding boxes, return the midpoint of the image as the largest gap
return goal_x
events = []
for bbox in bboxes:
min_x, max_x = bbox[0], bbox[2]
events.append((min_x, "enter"))
events.append((max_x, "exit"))
# Include image boundaries as part of the events
events.append(
(0, "exit")
) # Start of the image, considered an 'exit' point for logic simplicity
events.append(
(image_width, "enter")
) # End of the image, considered an 'enter' point
# Sort events, with exits before enters at the same position to ensure gap calculation correctness
events.sort(key=lambda x: (x[0], x[1] == "enter"))
# Sweep line algorithm to find the largest gap
current_boxes = 1
last_x = 0
largest_gap = 0
gap_start_x = None
largest_gap_mid = None # Midpoint of the largest gap
for x, event_type in events:
if current_boxes == 0 and gap_start_x is not None:
# Calculate gap
gap = x - gap_start_x
gap_end_x = gap_start_x + x
if goal_x < gap_end_x and goal_x > gap_start_x:
return True
elif goal_x < gap_start_x:
return False
if event_type == "enter":
current_boxes += 1
if current_boxes == 1:
gap_start_x = None # No longer in a gap
elif event_type == "exit":
current_boxes -= 1
if current_boxes == 0:
gap_start_x = x # Start of a potential gap
return False
class Operator:
def __init__(self):
self.bboxs = None
self.time = time.time()
self.position = [0, 0, 0]
self.waypoints = None
self.tf = np.array([[1, 0], [0, 1]])
self.count = 0
self.completed = True
self.image = None
self.goal = ""
self.current_location = "HOME"
def on_event(
self,
dora_event: dict,
send_output,
) -> DoraStatus:
if dora_event["type"] == "INPUT":
id = dora_event["id"]
if id == "image":
value = dora_event["value"].to_numpy()
self.image = value.reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
elif id == "control_reply":
value = dora_event["value"].to_numpy()[0]
if value == self.count:
self.completed = True
elif id == "set_goal":
self.goal = dora_event["value"][0].as_py()
print("got goal:", self.goal, flush=True)
if len(dora_event["value"]) > 0:
if self.goal != "":
self.waypoints = LOCATION[self.current_location][self.goal]
elif id == "position":
print("got position:", dora_event["value"], flush=True)
value = dora_event["value"].to_numpy()
[x, y, z] = value
self.position = [x, y, z]
if self.image is None:
print("no image", flush=True)
return DoraStatus.CONTINUE
## No bounding box yet
if self.completed == False:
print("not completed", flush=True)
return DoraStatus.CONTINUE
if self.waypoints is None:
print("no waypoint", flush=True)
return DoraStatus.CONTINUE
# Set Waypoints to None if goal reached
# Remove waypoints if completed
elif (
self.waypoints.shape[0] == 1
and np.linalg.norm(self.waypoints[0] - np.array([x, y])) < 0.2
):
print(f"goal {self.goal} reached", flush=True)
self.current_location = self.goal
send_output(
f"reached_{self.goal.lower()}", pa.array(self.image.ravel())
)
self.waypoints = None
return DoraStatus.CONTINUE
elif (
self.waypoints.size > 0
and np.linalg.norm(self.waypoints[0] - np.array([x, y])) < 0.1
):
self.waypoints = self.waypoints[1:]
print("removing waypoints", flush=True)
z = np.deg2rad(z)
self.tf = np.array([[np.cos(z), -np.sin(z)], [np.sin(z), np.cos(z)]])
goal = self.tf.dot(self.waypoints[0] - np.array([x, y]))
goal_camera_x = (
CAMERA_WIDTH * np.arctan2(goal[1], goal[0]) / np.pi
) + CAMERA_WIDTH / 2
goal_angle = np.arctan2(goal[1], goal[0]) * 180 / np.pi
print(
"position",
[x, y],
"goal:",
goal,
"Goal angle: ",
np.arctan2(goal[1], goal[0]) * 180 / np.pi,
"z: ",
np.rad2deg(z),
"x: ",
goal_camera_x,
"count: ",
self.count,
flush=True,
)
self.count += 1
self.completed = False
message = pa.array(
[
self.waypoints[0][0] - x,
self.waypoints[0][1] - y,
0.0, # -goal_angle,
0.8,
0.0, # 50,
10.0,
float(int(goal_angle)),
self.count,
]
)
print("sending:", message, flush=True)
send_output(
"control",
message,
dora_event["metadata"],
)
return DoraStatus.CONTINUE
|