haixuantao commited on
Commit
fe79d42
1 Parent(s): ffc2aa4

Adding policy node for controlling the robot

Browse files
README.md CHANGED
@@ -51,7 +51,7 @@ dora up
51
  dora start graphs/dataflow_robot_vlm.yml --attach --hot-reload
52
  ```
53
 
54
- Current way to interact is by typing a question to the VLM
55
 
56
  ## Running the demo without robot
57
 
@@ -64,4 +64,12 @@ dora up
64
  dora start graphs/dataflow_vlm_basic.yml --attach --hot-reload
65
  ```
66
 
67
- Current way to interact is by typing a question to the VLM
 
 
 
 
 
 
 
 
 
51
  dora start graphs/dataflow_robot_vlm.yml --attach --hot-reload
52
  ```
53
 
54
+ Current way to interact is by press up arrow key on laptop to record a message and send to the VLM
55
 
56
  ## Running the demo without robot
57
 
 
64
  dora start graphs/dataflow_vlm_basic.yml --attach --hot-reload
65
  ```
66
 
67
+ Current way to interact is by press up arrow key on laptop to record a message and send to the VLM
68
+
69
+ ## Kill process in case of failure
70
+
71
+ Due to a Python GIL issue, we currently meed to kill processes manually. You can use the following command to do so:
72
+
73
+ ```bash
74
+ pkill -f 'import dora;'
75
+ ```
graphs/dataflow_robot_vlm.yml CHANGED
@@ -5,20 +5,8 @@ nodes:
5
  python: ../operators/plot.py
6
  inputs:
7
  image: webcam/image
8
- assistant_message: vlm/assistant_message
9
  user_message: whisper/text
10
-
11
- - id: vlm
12
- operator:
13
- python: ../operators/idefics2_op.py
14
- inputs:
15
- image:
16
- source: webcam/image
17
- queue_size: 1
18
- instruction: whisper/text
19
- control_reply: robot/control_reply
20
- outputs:
21
- - assistant_message
22
 
23
  - id: robot
24
  operator:
@@ -27,10 +15,10 @@ nodes:
27
  conda_env: robomaster
28
  inputs:
29
  tick: dora/timer/millis/750
30
- control: whisper/text
31
- assistant_message: vlm/assistant_message
32
  outputs:
33
  - control_reply
 
34
 
35
  - id: webcam
36
  custom:
@@ -42,6 +30,40 @@ nodes:
42
  operator:
43
  python: ../operators/whisper_op.py
44
  inputs:
45
- audio: dora/timer/millis/500
46
  outputs:
47
- - text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  python: ../operators/plot.py
6
  inputs:
7
  image: webcam/image
 
8
  user_message: whisper/text
9
+ position: robot/position
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  - id: robot
12
  operator:
 
15
  conda_env: robomaster
16
  inputs:
17
  tick: dora/timer/millis/750
18
+ planning_control: planning/control
 
19
  outputs:
20
  - control_reply
21
+ - position
22
 
23
  - id: webcam
24
  custom:
 
30
  operator:
31
  python: ../operators/whisper_op.py
32
  inputs:
33
+ audio: dora/timer/millis/1000
34
  outputs:
35
+ - text
36
+
37
+ - id: llm
38
+ operator:
39
+ python: ../operators/llm_op.py
40
+ inputs:
41
+ text: whisper/text
42
+ reloaded: policy/reloaded
43
+ outputs:
44
+ - init
45
+
46
+ - id: policy
47
+ operator:
48
+ python: ../operators/policy.py
49
+ inputs:
50
+ init: llm/init
51
+ goal_reached: planning/goal_reached
52
+ outputs:
53
+ - set_goal
54
+ - reloaded
55
+
56
+ - id: planning
57
+ operator:
58
+ python: ../operators/planning_op.py
59
+ inputs:
60
+ position: robot/position
61
+ control_reply: robot/control_reply
62
+ set_goal: policy/set_goal
63
+ image: webcam/image
64
+ outputs:
65
+ - control
66
+ - goal_reached
67
+
68
+
69
+
graphs/dataflow_vlm_basic.yml CHANGED
@@ -5,9 +5,9 @@ nodes:
5
  python: ../operators/plot.py
6
  inputs:
7
  image: webcam/image
8
- keyboard_buffer: keyboard/buffer
9
- user_message: keyboard/submitted
10
  assistant_message: vlm/assistant_message
 
11
 
12
  - id: vlm
13
  operator:
@@ -16,7 +16,7 @@ nodes:
16
  image:
17
  source: webcam/image
18
  queue_size: 1
19
- instruction: keyboard/submitted
20
  outputs:
21
  - assistant_message
22
 
@@ -28,9 +28,18 @@ nodes:
28
  outputs:
29
  - image
30
 
31
- - id: keyboard
32
- custom:
33
- source: ../operators/keyboard_op.py
 
 
 
 
 
 
 
 
 
 
34
  outputs:
35
- - buffer
36
- - submitted
 
5
  python: ../operators/plot.py
6
  inputs:
7
  image: webcam/image
8
+ user_message: whisper/text
 
9
  assistant_message: vlm/assistant_message
10
+ bbox: object_detection/bbox
11
 
12
  - id: vlm
13
  operator:
 
16
  image:
17
  source: webcam/image
18
  queue_size: 1
19
+ instruction: whisper/text
20
  outputs:
21
  - assistant_message
22
 
 
28
  outputs:
29
  - image
30
 
31
+ - id: whisper
32
+ operator:
33
+ python: ../operators/whisper_op.py
34
+ inputs:
35
+ audio: dora/timer/millis/1000
36
+ outputs:
37
+ - text
38
+
39
+ - id: object_detection
40
+ operator:
41
+ python: ../operators/object_detection.py
42
+ inputs:
43
+ image: webcam/image
44
  outputs:
45
+ - bbox
 
operators/llm_op.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dora import DoraStatus
2
+ import pylcs
3
+ import os
4
+ import pyarrow as pa
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer
6
+
7
+
8
+ import re
9
+ import time
10
+
11
+ CHATGPT = False
12
+ MODEL_NAME_OR_PATH = "TheBloke/deepseek-coder-6.7B-instruct-GPTQ"
13
+
14
+ CODE_MODIFIER_TEMPLATE = """
15
+ ### Instruction
16
+ Respond with one block of modified code only in ```python block. No explaination.
17
+
18
+ ```python
19
+ {code}
20
+ ```
21
+
22
+ {user_message}
23
+
24
+ ### Response:
25
+ """
26
+
27
+
28
+ model = AutoModelForCausalLM.from_pretrained(
29
+ MODEL_NAME_OR_PATH,
30
+ device_map="auto",
31
+ trust_remote_code=True,
32
+ revision="main",
33
+ ).to("cuda:0")
34
+
35
+
36
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME_OR_PATH, use_fast=True)
37
+
38
+
39
+ def extract_python_code_blocks(text):
40
+ """
41
+ Extracts Python code blocks from the given text that are enclosed in triple backticks with a python language identifier.
42
+
43
+ Parameters:
44
+ - text: A string that may contain one or more Python code blocks.
45
+
46
+ Returns:
47
+ - A list of strings, where each string is a block of Python code extracted from the text.
48
+ """
49
+ pattern = r"```python\n(.*?)\n```"
50
+ matches = re.findall(pattern, text, re.DOTALL)
51
+ if len(matches) == 0:
52
+ pattern = r"```python\n(.*?)(?:\n```|$)"
53
+ matches = re.findall(pattern, text, re.DOTALL)
54
+ if len(matches) == 0:
55
+ return [text]
56
+ else:
57
+ matches = [remove_last_line(matches[0])]
58
+
59
+ return matches
60
+
61
+
62
+ def remove_last_line(python_code):
63
+ """
64
+ Removes the last line from a given string of Python code.
65
+
66
+ Parameters:
67
+ - python_code: A string representing Python source code.
68
+
69
+ Returns:
70
+ - A string with the last line removed.
71
+ """
72
+ lines = python_code.split("\n") # Split the string into lines
73
+ if lines: # Check if there are any lines to remove
74
+ lines.pop() # Remove the last line
75
+ return "\n".join(lines) # Join the remaining lines back into a string
76
+
77
+
78
+ def calculate_similarity(source, target):
79
+ """
80
+ Calculate a similarity score between the source and target strings.
81
+ This uses the edit distance relative to the length of the strings.
82
+ """
83
+ edit_distance = pylcs.edit_distance(source, target)
84
+ max_length = max(len(source), len(target))
85
+ # Normalize the score by the maximum possible edit distance (the length of the longer string)
86
+ similarity = 1 - (edit_distance / max_length)
87
+ return similarity
88
+
89
+
90
+ def find_best_match_location(source_code, target_block):
91
+ """
92
+ Find the best match for the target_block within the source_code by searching line by line,
93
+ considering blocks of varying lengths.
94
+ """
95
+ source_lines = source_code.split("\n")
96
+ target_lines = target_block.split("\n")
97
+
98
+ best_similarity = 0
99
+ best_start_index = 0
100
+ best_end_index = -1
101
+
102
+ # Iterate over the source lines to find the best matching range for all lines in target_block
103
+ for start_index in range(len(source_lines) - len(target_lines) + 1):
104
+ for end_index in range(start_index + len(target_lines), len(source_lines) + 1):
105
+ current_window = "\n".join(source_lines[start_index:end_index])
106
+ current_similarity = calculate_similarity(current_window, target_block)
107
+ if current_similarity > best_similarity:
108
+ best_similarity = current_similarity
109
+ best_start_index = start_index
110
+ best_end_index = end_index
111
+
112
+ # Convert line indices back to character indices for replacement
113
+ char_start_index = len("\n".join(source_lines[:best_start_index])) + (
114
+ 1 if best_start_index > 0 else 0
115
+ )
116
+ char_end_index = len("\n".join(source_lines[:best_end_index]))
117
+
118
+ return char_start_index, char_end_index
119
+
120
+
121
+ def replace_code_in_source(source_code, replacement_block: str):
122
+ """
123
+ Replace the best matching block in the source_code with the replacement_block, considering variable block lengths.
124
+ """
125
+ replacement_block = extract_python_code_blocks(replacement_block)[0]
126
+ start_index, end_index = find_best_match_location(source_code, replacement_block)
127
+ if start_index != -1 and end_index != -1:
128
+ # Replace the best matching part with the replacement block
129
+ new_source = (
130
+ source_code[:start_index] + replacement_block + source_code[end_index:]
131
+ )
132
+ return new_source
133
+ else:
134
+ return source_code
135
+
136
+
137
+ class Operator:
138
+
139
+ def on_event(
140
+ self,
141
+ dora_event,
142
+ send_output,
143
+ ) -> DoraStatus:
144
+ if dora_event["type"] == "INPUT" and dora_event["id"] == "text":
145
+ input = dora_event["value"][0].as_py()
146
+ # Path to the current file
147
+ current_file_path = __file__
148
+
149
+ # Directory of the current file
150
+ current_directory = os.path.dirname(current_file_path)
151
+ path = current_directory + "/policy.py"
152
+
153
+ with open(path, "r", encoding="utf8") as f:
154
+ code = f.read()
155
+
156
+ user_message = input
157
+ start_llm = time.time()
158
+
159
+ output = self.ask_llm(
160
+ CODE_MODIFIER_TEMPLATE.format(code=code, user_message=user_message)
161
+ )
162
+
163
+ source_code = replace_code_in_source(code, output)
164
+ print("response time:", time.time() - start_llm, flush=True)
165
+
166
+ print("response: ", output, flush=True)
167
+ with open(path, "w") as file:
168
+ file.write(source_code)
169
+ time.sleep(10)
170
+ send_output("init", pa.array([]))
171
+
172
+ return DoraStatus.CONTINUE
173
+
174
+ def ask_llm(self, prompt):
175
+
176
+ # Generate output
177
+ # prompt = PROMPT_TEMPLATE.format(system_message=system_message, prompt=prompt))
178
+ input = tokenizer(prompt, return_tensors="pt")
179
+ input_ids = input.input_ids.cuda()
180
+
181
+ # add attention mask here
182
+ attention_mask = input.attention_mask.cuda()
183
+
184
+ output = model.generate(
185
+ inputs=input_ids,
186
+ temperature=0.7,
187
+ do_sample=True,
188
+ top_p=0.95,
189
+ top_k=40,
190
+ max_new_tokens=512,
191
+ attention_mask=attention_mask,
192
+ eos_token_id=tokenizer.eos_token_id,
193
+ )
194
+ # Get the tokens from the output, decode them, print them
195
+
196
+ # Get text between im_start and im_end
197
+ return tokenizer.decode(output[0], skip_special_tokens=True)[len(prompt) :]
198
+
199
+
200
+ if __name__ == "__main__":
201
+ op = Operator()
202
+
203
+ # Path to the current file
204
+ current_file_path = __file__
205
+
206
+ # Directory of the current file
207
+ current_directory = os.path.dirname(current_file_path)
208
+
209
+ path = current_directory + "/policy.py"
210
+ with open(path, "r", encoding="utf8") as f:
211
+ raw = f.read()
212
+
213
+ op.on_event(
214
+ {
215
+ "type": "INPUT",
216
+ "id": "text",
217
+ "value": pa.array(
218
+ [
219
+ {
220
+ "path": path,
221
+ "user_message": "set the goal to kitchen. When you are in the kitchen ask the model if there is someone with blue shirt, if there is speak and say can I have coffee, if there is no one set the goal to home ",
222
+ },
223
+ ]
224
+ ),
225
+ "metadata": [],
226
+ },
227
+ print,
228
+ )
operators/planning_op.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import numpy as np
3
+ import pyarrow as pa
4
+ from dora import DoraStatus
5
+
6
+ GOAL = np.array([10, 20])
7
+
8
+ HOME_TO_KITCHEN = np.array([[0.5, 0], [0.5, -5.0], [1.0, 7.0]])
9
+ KITCHEN_TO_HOME = np.array([[2.0, 0.0], [0.0, 0.0]])
10
+
11
+ CAMERA_WIDTH = 960
12
+ CAMERA_HEIGHT = 540
13
+
14
+
15
+ def check_clear_road(bboxes, image_width, goal_x):
16
+ """
17
+ Find the x-coordinate of the midpoint of the largest gap along the x-axis where no bounding boxes overlap.
18
+
19
+ Parameters:
20
+ - bboxes (np.array): A numpy array where each row represents a bounding box with
21
+ the format [min_x, min_y, max_x, max_y, confidence, label].
22
+ - image_width (int): The width of the image in pixels.
23
+
24
+ Returns:
25
+ - int: The x-coordinate of the midpoint of the largest gap where no bounding boxes overlap.
26
+ """
27
+ if bboxes.size == 0:
28
+ # No bounding boxes, return the midpoint of the image as the largest gap
29
+ return goal_x
30
+
31
+ events = []
32
+ for bbox in bboxes:
33
+ min_x, max_x = bbox[0], bbox[2]
34
+ events.append((min_x, "enter"))
35
+ events.append((max_x, "exit"))
36
+
37
+ # Include image boundaries as part of the events
38
+ events.append(
39
+ (0, "exit")
40
+ ) # Start of the image, considered an 'exit' point for logic simplicity
41
+ events.append(
42
+ (image_width, "enter")
43
+ ) # End of the image, considered an 'enter' point
44
+
45
+ # Sort events, with exits before enters at the same position to ensure gap calculation correctness
46
+ events.sort(key=lambda x: (x[0], x[1] == "enter"))
47
+
48
+ # Sweep line algorithm to find the largest gap
49
+ current_boxes = 1
50
+ last_x = 0
51
+ largest_gap = 0
52
+ gap_start_x = None
53
+ largest_gap_mid = None # Midpoint of the largest gap
54
+
55
+ for x, event_type in events:
56
+ if current_boxes == 0 and gap_start_x is not None:
57
+ # Calculate gap
58
+ gap = x - gap_start_x
59
+ gap_end_x = gap_start_x + x
60
+ if goal_x < gap_end_x and goal_x > gap_start_x:
61
+ return True
62
+ elif goal_x < gap_start_x:
63
+ return False
64
+ if event_type == "enter":
65
+ current_boxes += 1
66
+ if current_boxes == 1:
67
+ gap_start_x = None # No longer in a gap
68
+ elif event_type == "exit":
69
+ current_boxes -= 1
70
+ if current_boxes == 0:
71
+ gap_start_x = x # Start of a potential gap
72
+
73
+ return False
74
+
75
+
76
+ class Operator:
77
+ def __init__(self):
78
+ self.bboxs = None
79
+ self.time = time.time()
80
+ self.position = [0, 0, 0]
81
+ self.waypoints = None
82
+ self.tf = np.array([[1, 0], [0, 1]])
83
+ self.count = 0
84
+ self.completed = True
85
+ self.image = None
86
+
87
+ def on_event(
88
+ self,
89
+ dora_event: dict,
90
+ send_output,
91
+ ) -> DoraStatus:
92
+ global POSITION_GOAL, GIMBAL_GOAL
93
+ if dora_event["type"] == "INPUT":
94
+ id = dora_event["id"]
95
+ if id == "tick":
96
+ self.time = time.time()
97
+ elif id == "image":
98
+ value = dora_event["value"].to_numpy()
99
+
100
+ self.image = value.reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
101
+ elif id == "control_reply":
102
+ value = dora_event["value"].to_numpy()[0]
103
+ if value == self.count:
104
+ self.completed = True
105
+ elif id == "set_goal":
106
+ print("got goal:", dora_event["value"], flush=True)
107
+
108
+ if len(dora_event["value"]) > 0:
109
+ self.waypoints = dora_event["value"].to_numpy().reshape((-1, 2))
110
+ elif id == "position":
111
+ ## No bounding box yet
112
+ if self.waypoints is None or len(self.waypoints) == 0:
113
+ print("no waypoint", flush=True)
114
+ return DoraStatus.CONTINUE
115
+ if self.completed == False:
116
+ print("not completed", flush=True)
117
+ return DoraStatus.CONTINUE
118
+ value = dora_event["value"].to_numpy()
119
+ [x, y, z] = value
120
+ self.position = [x, y, z]
121
+
122
+ # Remove waypoints if completed
123
+ if (
124
+ len(self.waypoints) > 0
125
+ and np.linalg.norm(self.waypoints[0] - [x, y]) < 0.2
126
+ ):
127
+ self.waypoints = self.waypoints[1:]
128
+ print("removing waypoints", flush=True)
129
+ if len(self.waypoints) == 0:
130
+ print("no waypoint", flush=True)
131
+ send_output("goal_reached", pa.array(self.image.ravel()))
132
+ return DoraStatus.CONTINUE
133
+
134
+ z = np.deg2rad(z)
135
+ self.tf = np.array([[np.cos(z), -np.sin(z)], [np.sin(z), np.cos(z)]])
136
+ goal = self.tf.dot(self.waypoints[0])
137
+ goal_camera_x = (
138
+ CAMERA_WIDTH * np.arctan2(goal[1], goal[0]) / np.pi
139
+ ) + CAMERA_WIDTH / 2
140
+ goal_angle = np.arctan2(goal[1], goal[0]) * 180 / np.pi
141
+ print(
142
+ "position",
143
+ [x, y],
144
+ "goal:",
145
+ goal,
146
+ "Goal angle: ",
147
+ np.arctan2(goal[1], goal[0]) * 180 / np.pi,
148
+ "z: ",
149
+ np.rad2deg(z),
150
+ "x: ",
151
+ goal_camera_x,
152
+ "count: ",
153
+ self.count,
154
+ flush=True,
155
+ )
156
+
157
+ if True: # check_clear_road(self.bboxs, CAMERA_WIDTH, goal_camera_x):
158
+ self.count += 1
159
+ self.completed = False
160
+ send_output(
161
+ "control",
162
+ pa.array(
163
+ [
164
+ {
165
+ "action": "gimbal",
166
+ "value": [0.0, goal_angle],
167
+ "count": self.count,
168
+ },
169
+ # {
170
+ # "value": [
171
+ # 0.0,
172
+ # 0.0,
173
+ # -goal_angle,
174
+ # 0.0,
175
+ # 50,
176
+ # ],
177
+ # "action": "control",
178
+ # },
179
+ {
180
+ "value": [
181
+ goal[0],
182
+ goal[1],
183
+ 0.0, # -goal_angle,
184
+ 0.6,
185
+ 0.0, # 50,
186
+ ],
187
+ "action": "control",
188
+ },
189
+ ]
190
+ ),
191
+ dora_event["metadata"],
192
+ )
193
+
194
+ return DoraStatus.CONTINUE
operators/policy.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dora import DoraStatus
2
+ import numpy as np
3
+ import pyarrow as pa
4
+ from idefics2_utils import ask_vlm
5
+ import pyttsx3
6
+
7
+
8
+ KITCHEN = np.array([[0.5, 0], [0.5, -0.5], [1.0, -1.0]]).ravel()
9
+ HOME = np.array([[0.5, -0.5], [0, 0]]).ravel()
10
+
11
+
12
+ ## Policy Operator
13
+ class Operator:
14
+ def __init__(self):
15
+ engine = pyttsx3.init("espeak")
16
+ voices = engine.getProperty("voices")
17
+ engine.setProperty("voice", voices[3].id)
18
+ self.engine = engine
19
+
20
+ def speak(self, text: str):
21
+ self.engine.say(text)
22
+
23
+ # Ask vision model for information
24
+ def ask_model(self, image: np.ndarray, text: str) -> str:
25
+ text = ask_vlm(image, text)
26
+ return "Yes, " in text
27
+
28
+ def on_event(
29
+ self,
30
+ dora_event: dict,
31
+ send_output,
32
+ ) -> DoraStatus:
33
+ if dora_event["type"] == "INPUT":
34
+ id = dora_event["id"]
35
+ # On initialization
36
+ if id == "init":
37
+ send_output("set_goal", pa.array([]))
38
+
39
+ # On destination goal reached
40
+ elif id == "goal_reached":
41
+ image = dora_event["value"].to_numpy().reshape((540, 960, 3))
42
+ pass
43
+
44
+ return DoraStatus.CONTINUE
operators/robot.py CHANGED
@@ -3,6 +3,7 @@ from typing import Callable, Optional, Union
3
  from enum import Enum
4
  from dora import DoraStatus
5
 
 
6
  import pyarrow as pa
7
 
8
 
@@ -86,6 +87,8 @@ class Operator:
86
  )
87
  self.backlog = []
88
  self.last_control = ""
 
 
89
 
90
  def execute_backlog(self):
91
  if len(self.backlog) > 0:
@@ -96,6 +99,8 @@ class Operator:
96
  self.event = self.ep_robot.chassis.move(
97
  x=x, y=y, z=z, xy_speed=xy_speed, z_speed=z_speed
98
  )
 
 
99
  elif command["action"] == "gimbal":
100
  [pitch, yaw] = command["value"]
101
  print(command, flush=True)
@@ -111,6 +116,7 @@ class Operator:
111
  event_type = dora_event["type"]
112
  if event_type == "INPUT":
113
  if dora_event["id"] == "tick":
 
114
  if not (
115
  self.event is not None
116
  and not (self.event._event.isSet() and self.event.is_completed)
@@ -119,7 +125,11 @@ class Operator:
119
  self.execute_backlog()
120
  else:
121
  print(f"sending control reply: {self.last_control}", flush=True)
122
- send_output("control_reply", pa.array([self.last_control]))
 
 
 
 
123
  elif dora_event["id"] == "control":
124
  raw_command = dora_event["value"][0].as_py()
125
  print(raw_command, flush=True)
@@ -151,8 +161,13 @@ class Operator:
151
  cmd = Command.NOD_YES
152
  else:
153
  cmd = Command.UNKNOWN
154
- if len(self.backlog) == 0:
155
  self.backlog += cmd.value
156
  self.execute_backlog()
 
 
 
 
 
 
157
 
158
  return DoraStatus.CONTINUE
 
3
  from enum import Enum
4
  from dora import DoraStatus
5
 
6
+ import numpy as np
7
  import pyarrow as pa
8
 
9
 
 
87
  )
88
  self.backlog = []
89
  self.last_control = ""
90
+ self.position = np.array([0, 0, 0])
91
+ self.count = -1
92
 
93
  def execute_backlog(self):
94
  if len(self.backlog) > 0:
 
99
  self.event = self.ep_robot.chassis.move(
100
  x=x, y=y, z=z, xy_speed=xy_speed, z_speed=z_speed
101
  )
102
+ self.position = np.array([x, y, z])
103
+
104
  elif command["action"] == "gimbal":
105
  [pitch, yaw] = command["value"]
106
  print(command, flush=True)
 
116
  event_type = dora_event["type"]
117
  if event_type == "INPUT":
118
  if dora_event["id"] == "tick":
119
+
120
  if not (
121
  self.event is not None
122
  and not (self.event._event.isSet() and self.event.is_completed)
 
125
  self.execute_backlog()
126
  else:
127
  print(f"sending control reply: {self.last_control}", flush=True)
128
+ send_output("position", pa.array(self.position))
129
+ send_output("control_reply", pa.array([self.count]))
130
+ elif self.event is None:
131
+ send_output("position", pa.array(self.position))
132
+
133
  elif dora_event["id"] == "control":
134
  raw_command = dora_event["value"][0].as_py()
135
  print(raw_command, flush=True)
 
161
  cmd = Command.NOD_YES
162
  else:
163
  cmd = Command.UNKNOWN
 
164
  self.backlog += cmd.value
165
  self.execute_backlog()
166
+ elif dora_event["id"] == "planning_control":
167
+ command = dora_event["value"].to_pylist()
168
+ self.count = command[0]["count"]
169
+ if len(self.backlog) == 0:
170
+ self.backlog += command
171
+ self.execute_backlog()
172
 
173
  return DoraStatus.CONTINUE
operators/whisper_op.py CHANGED
@@ -11,7 +11,7 @@ import sounddevice as sd
11
  model = whisper.load_model("base")
12
 
13
  SAMPLE_RATE = 16000
14
- MAX_DURATION = 5
15
 
16
 
17
  class Operator:
 
11
  model = whisper.load_model("base")
12
 
13
  SAMPLE_RATE = 16000
14
+ MAX_DURATION = 15
15
 
16
 
17
  class Operator: