haixuantao commited on
Commit
10e96ef
1 Parent(s): d4a3c67

Simplifying robomaster codebase

Browse files
operators/deepseek_op.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dora import DoraStatus
2
+ import pylcs
3
+ import textwrap
4
+ import os
5
+ import pyarrow as pa
6
+ import numpy as np
7
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
8
+ import json
9
+
10
+ import re
11
+
12
+
13
+ def extract_python_code_blocks(text):
14
+ """
15
+ Extracts Python code blocks from the given text that are enclosed in triple backticks with a python language identifier.
16
+
17
+ Parameters:
18
+ - text: A string that may contain one or more Python code blocks.
19
+
20
+ Returns:
21
+ - A list of strings, where each string is a block of Python code extracted from the text.
22
+ """
23
+ pattern = r"```python\n(.*?)\n```"
24
+ matches = re.findall(pattern, text, re.DOTALL)
25
+ if len(matches) == 0:
26
+ pattern = r"```python\n(.*?)(?:\n```|$)"
27
+ matches = re.findall(pattern, text, re.DOTALL)
28
+ if len(matches) == 0:
29
+ return [text]
30
+
31
+ return matches
32
+
33
+
34
+ def extract_json_code_blocks(text):
35
+ """
36
+ Extracts json code blocks from the given text that are enclosed in triple backticks with a json language identifier.
37
+
38
+ Parameters:
39
+ - text: A string that may contain one or more json code blocks.
40
+
41
+ Returns:
42
+ - A list of strings, where each string is a block of json code extracted from the text.
43
+ """
44
+ pattern = r"```json\n(.*?)\n```"
45
+ matches = re.findall(pattern, text, re.DOTALL)
46
+ if len(matches) == 0:
47
+ pattern = r"```json\n(.*?)(?:\n```|$)"
48
+ matches = re.findall(pattern, text, re.DOTALL)
49
+ if len(matches) == 0:
50
+ return [text]
51
+
52
+ return matches
53
+
54
+
55
+ def remove_last_line(python_code):
56
+ """
57
+ Removes the last line from a given string of Python code.
58
+
59
+ Parameters:
60
+ - python_code: A string representing Python source code.
61
+
62
+ Returns:
63
+ - A string with the last line removed.
64
+ """
65
+ lines = python_code.split("\n") # Split the string into lines
66
+ if lines: # Check if there are any lines to remove
67
+ lines.pop() # Remove the last line
68
+ return "\n".join(lines) # Join the remaining lines back into a string
69
+
70
+
71
+ def calculate_similarity(source, target):
72
+ """
73
+ Calculate a similarity score between the source and target strings.
74
+ This uses the edit distance relative to the length of the strings.
75
+ """
76
+ edit_distance = pylcs.edit_distance(source, target)
77
+ max_length = max(len(source), len(target))
78
+ # Normalize the score by the maximum possible edit distance (the length of the longer string)
79
+ similarity = 1 - (edit_distance / max_length)
80
+ return similarity
81
+
82
+
83
+ def find_best_match_location(source_code, target_block):
84
+ """
85
+ Find the best match for the target_block within the source_code by searching line by line,
86
+ considering blocks of varying lengths.
87
+ """
88
+ source_lines = source_code.split("\n")
89
+ target_lines = target_block.split("\n")
90
+
91
+ best_similarity = 0
92
+ best_start_index = -1
93
+ best_end_index = -1
94
+
95
+ # Iterate over the source lines to find the best matching range for all lines in target_block
96
+ for start_index in range(len(source_lines) - len(target_lines) + 1):
97
+ for end_index in range(start_index + len(target_lines), len(source_lines) + 1):
98
+ current_window = "\n".join(source_lines[start_index:end_index])
99
+ current_similarity = calculate_similarity(current_window, target_block)
100
+ if current_similarity > best_similarity:
101
+ best_similarity = current_similarity
102
+ best_start_index = start_index
103
+ best_end_index = end_index
104
+
105
+ # Convert line indices back to character indices for replacement
106
+ char_start_index = len("\n".join(source_lines[:best_start_index])) + (
107
+ 1 if best_start_index > 0 else 0
108
+ )
109
+ char_end_index = len("\n".join(source_lines[:best_end_index]))
110
+
111
+ return char_start_index, char_end_index
112
+
113
+
114
+ def replace_code_in_source(source_code, replacement_block: str):
115
+ """
116
+ Replace the best matching block in the source_code with the replacement_block, considering variable block lengths.
117
+ """
118
+ replacement_block = extract_python_code_blocks(replacement_block)[0]
119
+ print("replacement_block: ", replacement_block)
120
+ replacement_block = remove_last_line(replacement_block)
121
+ start_index, end_index = find_best_match_location(source_code, replacement_block)
122
+
123
+ if start_index != -1 and end_index != -1:
124
+ # Replace the best matching part with the replacement block
125
+ new_source = (
126
+ source_code[:start_index] + replacement_block + source_code[end_index:]
127
+ )
128
+ return new_source
129
+ else:
130
+ return source_code
131
+
132
+
133
+ def save_as(content, path):
134
+ # use at the end of replace_2 as save_as(end_result, "file_path")
135
+ with open(path, "w") as file:
136
+ file.write(content)
137
+
138
+
139
+ class Operator:
140
+ def __init__(self):
141
+ # Load tokenizer
142
+ model_name_or_path = "TheBloke/deepseek-coder-6.7B-instruct-GPTQ"
143
+ # To use a different branch, change revision
144
+ # For example: revision="gptq-4bit-32g-actorder_True"
145
+ self.model = AutoModelForCausalLM.from_pretrained(
146
+ model_name_or_path,
147
+ device_map="auto",
148
+ trust_remote_code=False,
149
+ revision="main",
150
+ )
151
+
152
+ self.tokenizer = AutoTokenizer.from_pretrained(
153
+ model_name_or_path, use_fast=True
154
+ )
155
+
156
+ def on_event(
157
+ self,
158
+ dora_event,
159
+ send_output,
160
+ ) -> DoraStatus:
161
+ if dora_event["type"] == "INPUT":
162
+ input = dora_event["value"][0].as_py()
163
+
164
+ if False:
165
+ with open(input["path"], "r", encoding="utf8") as f:
166
+ raw = f.read()
167
+ prompt = f"{raw} \n {input['query']}. "
168
+ print("prompt: ", prompt, flush=True)
169
+ output = self.ask_mistral(
170
+ "You're a python code expert. Respond with the small modified code only. No explaination",
171
+ prompt,
172
+ )
173
+ print("output: {}".format(output))
174
+
175
+ source_code = replace_code_in_source(raw, output)
176
+ send_output(
177
+ "output_file",
178
+ pa.array(
179
+ [
180
+ {
181
+ "raw": source_code,
182
+ # "path": input["path"],
183
+ # "response": output,
184
+ # "prompt": prompt,
185
+ }
186
+ ]
187
+ ),
188
+ dora_event["metadata"],
189
+ )
190
+ else:
191
+ output = self.ask_mistral(
192
+ """You're a json expert. Format your response as a json with a topic and a data field in a ```json block. No explaination needed. No code needed.
193
+ The schema for those json are:
194
+ - led: Int[3] (min: 0, max: 255) # RGB values
195
+ - blaster: Int (min: 0, max: 128)
196
+ - control: Int[3] (min: -1, max: 1)
197
+ - rotation: Int[2] (min: -55, max: 55)
198
+ - message: String
199
+
200
+ The response should look like this:
201
+ ```json
202
+ {
203
+ "topic": "led",
204
+ "data": [255, 0, 0]
205
+ }
206
+ ```
207
+ """,
208
+ input["query"],
209
+ )
210
+ output = extract_json_code_blocks(output)[0]
211
+ print("output: {}".format(output), flush=True)
212
+ try:
213
+ output = json.loads(output)
214
+ if not isinstance(output["data"], list):
215
+ output["data"] = [output["data"]]
216
+
217
+ if output["topic"] in [
218
+ "led",
219
+ "blaster",
220
+ "control",
221
+ "rotation",
222
+ "text",
223
+ ]:
224
+ print("output", output)
225
+ send_output(
226
+ output["topic"],
227
+ pa.array(output["data"]),
228
+ dora_event["metadata"],
229
+ )
230
+ except:
231
+ print("Could not parse json")
232
+ # if data is not iterable, put data in a list
233
+
234
+ return DoraStatus.CONTINUE
235
+
236
+ def ask_mistral(self, system_message, prompt):
237
+ prompt_template = f"""
238
+ ### Instruction
239
+ {system_message}
240
+
241
+ {prompt}
242
+
243
+ ### Response:
244
+ """
245
+
246
+ # Generate output
247
+
248
+ input = self.tokenizer(prompt_template, return_tensors="pt")
249
+ input_ids = input.input_ids.cuda()
250
+
251
+ # add attention mask here
252
+ attention_mask = input["attention_mask"]
253
+
254
+ output = self.model.generate(
255
+ inputs=input_ids,
256
+ temperature=0.7,
257
+ do_sample=True,
258
+ top_p=0.95,
259
+ top_k=40,
260
+ max_new_tokens=512,
261
+ attention_mask=attention_mask,
262
+ eos_token_id=self.tokenizer.eos_token_id,
263
+ )
264
+ # Get the tokens from the output, decode them, print them
265
+
266
+ # Get text between im_start and im_end
267
+ return self.tokenizer.decode(output[0], skip_special_tokens=True)[
268
+ len(prompt_template) :
269
+ ]
270
+
271
+
272
+ if __name__ == "__main__":
273
+ op = Operator()
274
+
275
+ # Path to the current file
276
+ current_file_path = __file__
277
+
278
+ # Directory of the current file
279
+ current_directory = os.path.dirname(current_file_path)
280
+
281
+ path = current_directory + "plot.py"
282
+ with open(path, "r", encoding="utf8") as f:
283
+ raw = f.read()
284
+
285
+ op.on_event(
286
+ {
287
+ "type": "INPUT",
288
+ "id": "tick",
289
+ "value": pa.array(
290
+ [
291
+ {
292
+ "raw": raw,
293
+ "path": path,
294
+ "query": "Send message my name is Carlito",
295
+ }
296
+ ]
297
+ ),
298
+ "metadata": [],
299
+ },
300
+ print,
301
+ )
operators/file_saver_op.py CHANGED
@@ -18,7 +18,7 @@ class Operator:
18
  dora_event,
19
  send_output,
20
  ) -> DoraStatus:
21
- if dora_event["type"] == "INPUT" and dora_event["id"] == "chatgpt_output_file":
22
  input = dora_event["value"][0].as_py()
23
 
24
  with open(input["path"], "r") as file:
@@ -41,28 +41,4 @@ class Operator:
41
  ),
42
  dora_event["metadata"],
43
  )
44
- if dora_event["type"] == "INPUT" and dora_event["id"] in [
45
- "revert",
46
- "error",
47
- "failed",
48
- ]:
49
- if self.last_path == "":
50
- return DoraStatus.CONTINUE
51
-
52
- with open(self.last_path, "w") as file:
53
- file.write(self.last_file)
54
-
55
- send_output(
56
- "saved_file",
57
- pa.array(
58
- [
59
- {
60
- "raw": self.last_file,
61
- "path": self.last_path,
62
- "origin": dora_event["id"],
63
- }
64
- ]
65
- ),
66
- self.last_metadata,
67
- )
68
  return DoraStatus.CONTINUE
 
18
  dora_event,
19
  send_output,
20
  ) -> DoraStatus:
21
+ if dora_event["type"] == "INPUT":
22
  input = dora_event["value"][0].as_py()
23
 
24
  with open(input["path"], "r") as file:
 
41
  ),
42
  dora_event["metadata"],
43
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  return DoraStatus.CONTINUE
operators/keybinding_op.py DELETED
@@ -1,30 +0,0 @@
1
- import pynput
2
- import pyarrow as pa
3
-
4
- from dora import Node
5
-
6
- node = Node()
7
-
8
-
9
- def on_key_release(key):
10
- try:
11
- if key.char == "1":
12
- print("Key 'm' pressed up")
13
- node.send_output("mic_on", pa.array([]))
14
- elif key.char == "2":
15
- print("Key '2' pressed up")
16
- node.send_output("revert", pa.array([]))
17
- elif key.char == "3":
18
- print("Key '3' pressed up")
19
- node.send_output("failed", pa.array([]))
20
- elif key.char == "4":
21
- print("Key '4' pressed up")
22
- node.send_output("error", pa.array([]))
23
- elif key.char == "0":
24
- exit()
25
-
26
- except AttributeError:
27
- pass
28
-
29
-
30
- pynput.keyboard.Listener(on_release=on_key_release).run()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
operators/keyboard_op.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pynput import keyboard
2
+ from pynput.keyboard import Key, Events
3
+ import pyarrow as pa
4
+ from dora import Node
5
+ from tkinter import Tk
6
+ import tkinter as tk
7
+
8
+
9
+ node = Node()
10
+ buffer_text = ""
11
+ ctrl = False
12
+ submitted_text = []
13
+ cursor = 0
14
+
15
+ NODE_TOPIC = ["record", "send", "ask", "change"]
16
+
17
+ with keyboard.Events() as events:
18
+ while True:
19
+ dora_event = node.next(0.01)
20
+ if (
21
+ dora_event is not None
22
+ and dora_event["type"] == "INPUT"
23
+ and dora_event["id"] == "recording"
24
+ ):
25
+ buffer_text += dora_event["value"][0].as_py()
26
+ node.send_output("buffer", pa.array([buffer_text]))
27
+ continue
28
+
29
+ event = events.get(1.0)
30
+ if event is not None and isinstance(event, Events.Press):
31
+ if hasattr(event.key, "char"):
32
+ cursor = 0
33
+ if ctrl and event.key.char == "v":
34
+ r = Tk()
35
+ r.update()
36
+ try:
37
+ selection = r.clipboard_get()
38
+ r.withdraw()
39
+ r.update()
40
+ except tk.TclError:
41
+ selection = ""
42
+ r.destroy()
43
+ buffer_text += selection
44
+ node.send_output("buffer", pa.array([buffer_text]))
45
+ elif ctrl and event.key.char == "c":
46
+ r = Tk()
47
+ r.clipboard_clear()
48
+ r.clipboard_append(buffer_text)
49
+ r.update()
50
+ r.destroy()
51
+ elif ctrl and event.key.char == "x":
52
+ r = Tk()
53
+ r.clipboard_clear()
54
+ r.clipboard_append(buffer_text)
55
+ r.update()
56
+ r.destroy()
57
+ buffer_text = ""
58
+ node.send_output("buffer", pa.array([buffer_text]))
59
+ else:
60
+ buffer_text += event.key.char
61
+ node.send_output("buffer", pa.array([buffer_text]))
62
+ else:
63
+ if event.key == Key.backspace:
64
+ buffer_text = buffer_text[:-1]
65
+ node.send_output("buffer", pa.array([buffer_text]))
66
+ elif event.key == Key.esc:
67
+ buffer_text = ""
68
+ node.send_output("buffer", pa.array([buffer_text]))
69
+ elif event.key == Key.enter:
70
+ node.send_output("submitted", pa.array([buffer_text]))
71
+ first_word = buffer_text.split(" ")[0]
72
+ if first_word in NODE_TOPIC:
73
+ node.send_output(first_word, pa.array([buffer_text]))
74
+ submitted_text.append(buffer_text)
75
+ buffer_text = ""
76
+ node.send_output("buffer", pa.array([buffer_text]))
77
+ elif event.key == Key.ctrl:
78
+ ctrl = True
79
+ elif event.key == Key.space:
80
+ buffer_text += " "
81
+ node.send_output("buffer", pa.array([buffer_text]))
82
+ elif event.key == Key.up:
83
+ if len(submitted_text) > 0:
84
+ cursor = max(cursor - 1, -len(submitted_text))
85
+ buffer_text = submitted_text[cursor]
86
+ node.send_output("buffer", pa.array([buffer_text]))
87
+ elif event.key == Key.down:
88
+ if len(submitted_text) > 0:
89
+ cursor = min(cursor + 1, 0)
90
+ buffer_text = submitted_text[cursor]
91
+ node.send_output("buffer", pa.array([buffer_text]))
92
+ elif event is not None and isinstance(event, Events.Release):
93
+ if event.key == Key.ctrl:
94
+ ctrl = False
operators/llm_op.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dora import DoraStatus
2
+ import pylcs
3
+ import os
4
+ import pyarrow as pa
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer
6
+ import json
7
+
8
+ import re
9
+ import time
10
+
11
+ MODEL_NAME_OR_PATH = "TheBloke/deepseek-coder-6.7B-instruct-GPTQ"
12
+ # MODEL_NAME_OR_PATH = "hanspeterlyngsoeraaschoujensen/deepseek-math-7b-instruct-GPTQ"
13
+
14
+ CODE_MODIFIER_TEMPLATE = """
15
+ ### Instruction
16
+ Respond with the whole modified code only in ```python block. No explaination.
17
+
18
+ ```python
19
+ {code}
20
+ ```
21
+
22
+ {user_message}
23
+
24
+ ### Response:
25
+ """
26
+
27
+
28
+ MESSAGE_SENDER_TEMPLATE = """
29
+ ### Instruction
30
+ You're a json expert. Format your response as a json with a topic and a data field in a ```json block. No explaination needed. No code needed.
31
+ The schema for those json are:
32
+ - led: Int[3] (min: 0, max: 255)
33
+ - blaster: Int (min: 0, max: 128)
34
+ - control: Int[3] (min: -1, max: 1)
35
+ - rotation: Int[2] (min: -55, max: 55)
36
+ - line: Int[4]
37
+
38
+ The response should look like this:
39
+ ```json
40
+
41
+ [
42
+ {{ "topic": "line", "data": [10, 10, 90, 10] }},
43
+ ]
44
+ ```
45
+
46
+ {user_message}
47
+
48
+ ### Response:
49
+ """
50
+
51
+ ASSISTANT_TEMPLATE = """
52
+ ### Instruction
53
+ You're a helpuf assistant named dora.
54
+ Reply with a short message. No code needed.
55
+
56
+ User {user_message}
57
+
58
+ ### Response:
59
+ """
60
+
61
+
62
+ model = AutoModelForCausalLM.from_pretrained(
63
+ MODEL_NAME_OR_PATH,
64
+ device_map="auto",
65
+ trust_remote_code=True,
66
+ revision="main",
67
+ )
68
+
69
+
70
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME_OR_PATH, use_fast=True)
71
+
72
+
73
+ def extract_python_code_blocks(text):
74
+ """
75
+ Extracts Python code blocks from the given text that are enclosed in triple backticks with a python language identifier.
76
+
77
+ Parameters:
78
+ - text: A string that may contain one or more Python code blocks.
79
+
80
+ Returns:
81
+ - A list of strings, where each string is a block of Python code extracted from the text.
82
+ """
83
+ pattern = r"```python\n(.*?)\n```"
84
+ matches = re.findall(pattern, text, re.DOTALL)
85
+ if len(matches) == 0:
86
+ pattern = r"```python\n(.*?)(?:\n```|$)"
87
+ matches = re.findall(pattern, text, re.DOTALL)
88
+ if len(matches) == 0:
89
+ return [text]
90
+ else:
91
+ matches = [remove_last_line(matches[0])]
92
+
93
+ return matches
94
+
95
+
96
+ def extract_json_code_blocks(text):
97
+ """
98
+ Extracts json code blocks from the given text that are enclosed in triple backticks with a json language identifier.
99
+
100
+ Parameters:
101
+ - text: A string that may contain one or more json code blocks.
102
+
103
+ Returns:
104
+ - A list of strings, where each string is a block of json code extracted from the text.
105
+ """
106
+ pattern = r"```json\n(.*?)\n```"
107
+ matches = re.findall(pattern, text, re.DOTALL)
108
+ if len(matches) == 0:
109
+ pattern = r"```json\n(.*?)(?:\n```|$)"
110
+ matches = re.findall(pattern, text, re.DOTALL)
111
+ if len(matches) == 0:
112
+ return [text]
113
+
114
+ return matches
115
+
116
+
117
+ def remove_last_line(python_code):
118
+ """
119
+ Removes the last line from a given string of Python code.
120
+
121
+ Parameters:
122
+ - python_code: A string representing Python source code.
123
+
124
+ Returns:
125
+ - A string with the last line removed.
126
+ """
127
+ lines = python_code.split("\n") # Split the string into lines
128
+ if lines: # Check if there are any lines to remove
129
+ lines.pop() # Remove the last line
130
+ return "\n".join(lines) # Join the remaining lines back into a string
131
+
132
+
133
+ def calculate_similarity(source, target):
134
+ """
135
+ Calculate a similarity score between the source and target strings.
136
+ This uses the edit distance relative to the length of the strings.
137
+ """
138
+ edit_distance = pylcs.edit_distance(source, target)
139
+ max_length = max(len(source), len(target))
140
+ # Normalize the score by the maximum possible edit distance (the length of the longer string)
141
+ similarity = 1 - (edit_distance / max_length)
142
+ return similarity
143
+
144
+
145
+ def find_best_match_location(source_code, target_block):
146
+ """
147
+ Find the best match for the target_block within the source_code by searching line by line,
148
+ considering blocks of varying lengths.
149
+ """
150
+ source_lines = source_code.split("\n")
151
+ target_lines = target_block.split("\n")
152
+
153
+ best_similarity = 0
154
+ best_start_index = 0
155
+ best_end_index = -1
156
+
157
+ # Iterate over the source lines to find the best matching range for all lines in target_block
158
+ for start_index in range(len(source_lines) - len(target_lines) + 1):
159
+ for end_index in range(start_index + len(target_lines), len(source_lines) + 1):
160
+ current_window = "\n".join(source_lines[start_index:end_index])
161
+ current_similarity = calculate_similarity(current_window, target_block)
162
+ if current_similarity > best_similarity:
163
+ best_similarity = current_similarity
164
+ best_start_index = start_index
165
+ best_end_index = end_index
166
+
167
+ # Convert line indices back to character indices for replacement
168
+ char_start_index = len("\n".join(source_lines[:best_start_index])) + (
169
+ 1 if best_start_index > 0 else 0
170
+ )
171
+ char_end_index = len("\n".join(source_lines[:best_end_index]))
172
+
173
+ return char_start_index, char_end_index
174
+
175
+
176
+ def replace_code_in_source(source_code, replacement_block: str):
177
+ """
178
+ Replace the best matching block in the source_code with the replacement_block, considering variable block lengths.
179
+ """
180
+ replacement_block = extract_python_code_blocks(replacement_block)[0]
181
+ start_index, end_index = find_best_match_location(source_code, replacement_block)
182
+ if start_index != -1 and end_index != -1:
183
+ # Replace the best matching part with the replacement block
184
+ new_source = (
185
+ source_code[:start_index] + replacement_block + source_code[end_index:]
186
+ )
187
+ return new_source
188
+ else:
189
+ return source_code
190
+
191
+
192
+ class Operator:
193
+
194
+ def on_event(
195
+ self,
196
+ dora_event,
197
+ send_output,
198
+ ) -> DoraStatus:
199
+ if dora_event["type"] == "INPUT" and dora_event["id"] == "code_modifier":
200
+ input = dora_event["value"][0].as_py()
201
+
202
+ with open(input["path"], "r", encoding="utf8") as f:
203
+ code = f.read()
204
+
205
+ user_message = input["user_message"]
206
+ start_llm = time.time()
207
+ output = self.ask_llm(
208
+ CODE_MODIFIER_TEMPLATE.format(code=code, user_message=user_message)
209
+ )
210
+
211
+ source_code = replace_code_in_source(code, output)
212
+ print("response time:", time.time() - start_llm, flush=True)
213
+ send_output(
214
+ "modified_file",
215
+ pa.array(
216
+ [
217
+ {
218
+ "raw": source_code,
219
+ "path": input["path"],
220
+ "response": output,
221
+ "prompt": input["user_message"],
222
+ }
223
+ ]
224
+ ),
225
+ dora_event["metadata"],
226
+ )
227
+ print("response: ", output, flush=True)
228
+ send_output(
229
+ "assistant_message",
230
+ pa.array([output]),
231
+ dora_event["metadata"],
232
+ )
233
+ elif dora_event["type"] == "INPUT" and dora_event["id"] == "message_sender":
234
+ user_message = dora_event["value"][0].as_py()
235
+ output = self.ask_llm(
236
+ MESSAGE_SENDER_TEMPLATE.format(user_message=user_message)
237
+ )
238
+ outputs = extract_json_code_blocks(output)[0]
239
+ print("response: ", output, flush=True)
240
+ try:
241
+ outputs = json.loads(outputs)
242
+ if not isinstance(outputs, list):
243
+ outputs = [outputs]
244
+ for output in outputs:
245
+ if not isinstance(output["data"], list):
246
+ output["data"] = [output["data"]]
247
+
248
+ if output["topic"] in ["led", "blaster"]:
249
+ send_output(
250
+ output["topic"],
251
+ pa.array(output["data"]),
252
+ dora_event["metadata"],
253
+ )
254
+ else:
255
+ print("Could not find the topic: {}".format(output["topic"]))
256
+ except:
257
+ print("Could not parse json")
258
+ # if data is not iterable, put data in a list
259
+ elif dora_event["type"] == "INPUT" and dora_event["id"] == "assistant":
260
+ user_message = dora_event["value"][0].as_py()
261
+ output = self.ask_llm(ASSISTANT_TEMPLATE.format(user_message=user_message))
262
+ send_output(
263
+ "assistant_message",
264
+ pa.array([output]),
265
+ dora_event["metadata"],
266
+ )
267
+ return DoraStatus.CONTINUE
268
+
269
+ def ask_llm(self, prompt):
270
+
271
+ # Generate output
272
+ # prompt = PROMPT_TEMPLATE.format(system_message=system_message, prompt=prompt))
273
+ input = tokenizer(prompt, return_tensors="pt")
274
+ input_ids = input.input_ids.cuda()
275
+
276
+ # add attention mask here
277
+ attention_mask = input["attention_mask"]
278
+
279
+ output = model.generate(
280
+ inputs=input_ids,
281
+ temperature=0.7,
282
+ do_sample=True,
283
+ top_p=0.95,
284
+ top_k=40,
285
+ max_new_tokens=512,
286
+ attention_mask=attention_mask,
287
+ eos_token_id=tokenizer.eos_token_id,
288
+ )
289
+ # Get the tokens from the output, decode them, print them
290
+
291
+ # Get text between im_start and im_end
292
+ return tokenizer.decode(output[0], skip_special_tokens=True)[len(prompt) :]
293
+
294
+ def ask_chatgpt(self, prompt):
295
+ from openai import OpenAI
296
+
297
+ client = OpenAI()
298
+ print("---asking chatgpt: ", prompt, flush=True)
299
+ response = client.chat.completions.create(
300
+ model="gpt-4-turbo-preview",
301
+ messages=[
302
+ {"role": "system", "content": "You are a helpful assistant."},
303
+ {"role": "user", "content": prompt},
304
+ ],
305
+ )
306
+ answer = response.choices[0].message.content
307
+
308
+ print("Done", flush=True)
309
+ return answer
310
+
311
+
312
+ if __name__ == "__main__":
313
+ op = Operator()
314
+
315
+ # Path to the current file
316
+ current_file_path = __file__
317
+
318
+ # Directory of the current file
319
+ current_directory = os.path.dirname(current_file_path)
320
+
321
+ path = current_directory + "object_detection.py"
322
+ with open(path, "r", encoding="utf8") as f:
323
+ raw = f.read()
324
+
325
+ op.on_event(
326
+ {
327
+ "type": "INPUT",
328
+ "id": "message_sender",
329
+ "value": pa.array(
330
+ [
331
+ {
332
+ "path": path,
333
+ "user_message": "send a star ",
334
+ },
335
+ ]
336
+ ),
337
+ "metadata": [],
338
+ },
339
+ print,
340
+ )
operators/microphone_op.py CHANGED
@@ -1,8 +1,3 @@
1
- # Run this in the consol first :
2
-
3
- # pip install sounddevice numpy
4
- import time
5
-
6
  import numpy as np
7
  import pyarrow as pa
8
  import sounddevice as sd
@@ -30,11 +25,12 @@ class Operator:
30
  samplerate=SAMPLE_RATE,
31
  channels=1,
32
  dtype=np.int16,
33
- blocking=False,
34
  )
35
- time.sleep(MAX_DURATION)
36
 
37
  audio_data = audio_data.ravel().astype(np.float32) / 32768.0
38
  if len(audio_data) > 0:
39
  send_output("audio", pa.array(audio_data), dora_event["metadata"])
 
 
40
  return DoraStatus.CONTINUE
 
 
 
 
 
 
1
  import numpy as np
2
  import pyarrow as pa
3
  import sounddevice as sd
 
25
  samplerate=SAMPLE_RATE,
26
  channels=1,
27
  dtype=np.int16,
28
+ blocking=True,
29
  )
 
30
 
31
  audio_data = audio_data.ravel().astype(np.float32) / 32768.0
32
  if len(audio_data) > 0:
33
  send_output("audio", pa.array(audio_data), dora_event["metadata"])
34
+ elif dora_event["type"] == "INPUT":
35
+ print("Microphone is not recording", dora_event["value"][0].as_py())
36
  return DoraStatus.CONTINUE
operators/mistral_op.py CHANGED
@@ -81,7 +81,7 @@ class Operator:
81
  if dora_event["type"] == "INPUT":
82
  input = dora_event["value"][0].as_py()
83
 
84
- if not "send" in input["query"]:
85
  with open(input["path"], "r", encoding="utf8") as f:
86
  raw = f.read()
87
  prompt = f"{raw[:400]} \n\n {input['query']}. "
@@ -114,6 +114,7 @@ The schema for those json are:
114
  - led: Int[3] (min: 0, max: 255)
115
  - blaster: Int (min: 0, max: 128)
116
  - control: Int[3] (min: -1, max: 1)
 
117
 
118
 
119
  """,
@@ -122,19 +123,19 @@ The schema for those json are:
122
  print("output: {}".format(output), flush=True)
123
  try:
124
  output = json.loads(output)
 
 
 
 
 
 
 
 
 
 
125
  except:
126
  print("Could not parse json")
127
  # if data is not iterable, put data in a list
128
- if not isinstance(output["data"], list):
129
- output["data"] = [output["data"]]
130
-
131
- if output["topic"] in ["led", "blaster", "control"]:
132
- print("output", output)
133
- send_output(
134
- output["topic"],
135
- pa.array(output["data"]),
136
- dora_event["metadata"],
137
- )
138
 
139
  return DoraStatus.CONTINUE
140
 
@@ -178,7 +179,7 @@ if __name__ == "__main__":
178
  {
179
  "raw": raw,
180
  "path": path,
181
- "query": "Can you send red on topic led",
182
  }
183
  ]
184
  ),
 
81
  if dora_event["type"] == "INPUT":
82
  input = dora_event["value"][0].as_py()
83
 
84
+ if False:
85
  with open(input["path"], "r", encoding="utf8") as f:
86
  raw = f.read()
87
  prompt = f"{raw[:400]} \n\n {input['query']}. "
 
114
  - led: Int[3] (min: 0, max: 255)
115
  - blaster: Int (min: 0, max: 128)
116
  - control: Int[3] (min: -1, max: 1)
117
+ - rotation: Int[2] (min: -55, max: 55)
118
 
119
 
120
  """,
 
123
  print("output: {}".format(output), flush=True)
124
  try:
125
  output = json.loads(output)
126
+ if not isinstance(output["data"], list):
127
+ output["data"] = [output["data"]]
128
+
129
+ if output["topic"] in ["led", "blaster", "control", "rotation"]:
130
+ print("output", output)
131
+ send_output(
132
+ output["topic"],
133
+ pa.array(output["data"]),
134
+ dora_event["metadata"],
135
+ )
136
  except:
137
  print("Could not parse json")
138
  # if data is not iterable, put data in a list
 
 
 
 
 
 
 
 
 
 
139
 
140
  return DoraStatus.CONTINUE
141
 
 
179
  {
180
  "raw": raw,
181
  "path": path,
182
+ "query": "le control a 1 0 0",
183
  }
184
  ]
185
  ),
operators/object_detection.py CHANGED
@@ -9,40 +9,32 @@ pa.array([])
9
  CAMERA_WIDTH = 960
10
  CAMERA_HEIGHT = 540
11
 
 
 
12
 
13
  class Operator:
14
  """
15
- Object Detection: Infering object from images using Deep Learning model YOLOv8.
16
  """
17
 
18
- def __init__(self):
19
- self.model = YOLO("yolov8n.pt")
20
-
21
  def on_event(
22
  self,
23
  dora_event,
24
  send_output,
25
  ) -> DoraStatus:
26
  if dora_event["type"] == "INPUT":
27
- return self.on_input(dora_event, send_output)
28
- return DoraStatus.CONTINUE
 
 
 
 
 
 
 
 
 
 
 
29
 
30
- def on_input(
31
- self,
32
- dora_input,
33
- send_output,
34
- ) -> DoraStatus:
35
- """Handle image"""
36
-
37
- frame = dora_input["value"].to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
38
- frame = frame[:, :, ::-1] # OpenCV image (BGR to RGB)
39
- results = self.model(frame, verbose=False) # includes NMS
40
- # Process results
41
- boxes = np.array(results[0].boxes.xyxy.cpu())
42
- conf = np.array(results[0].boxes.conf.cpu())
43
- # COCO Label
44
- label = np.array(results[0].boxes.cls.cpu())
45
- # concatenate them together
46
- arrays = np.concatenate((boxes, conf[:, None], label[:, None]), axis=1)
47
- send_output("bbox", pa.array(arrays.ravel()), dora_input["metadata"])
48
  return DoraStatus.CONTINUE
 
9
  CAMERA_WIDTH = 960
10
  CAMERA_HEIGHT = 540
11
 
12
+ model = YOLO("yolov8n.pt")
13
+
14
 
15
  class Operator:
16
  """
17
+ Infering object from images
18
  """
19
 
 
 
 
20
  def on_event(
21
  self,
22
  dora_event,
23
  send_output,
24
  ) -> DoraStatus:
25
  if dora_event["type"] == "INPUT":
26
+ frame = (
27
+ dora_event["value"].to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
28
+ )
29
+ frame = frame[:, :, ::-1] # OpenCV image (BGR to RGB)
30
+ results = self.model(frame, verbose=False) # includes NMS
31
+ # Process results for "ABC" only
32
+ boxes = np.array(results[0].boxes.xyxy.cpu())
33
+ conf = np.array(results[0].boxes.conf.cpu())
34
+ label = np.array(results[0].boxes.cls.cpu())
35
+ # concatenate them together
36
+ arrays = np.concatenate((boxes, conf[:, None], label[:, None]), axis=1)
37
+
38
+ send_output("bbox", pa.array(arrays.ravel()), dora_event["metadata"])
39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  return DoraStatus.CONTINUE
operators/planning_op.py CHANGED
@@ -3,6 +3,22 @@ import numpy as np
3
  import pyarrow as pa
4
  from dora import DoraStatus
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  CAMERA_WIDTH = 960
8
  CAMERA_HEIGHT = 540
@@ -19,6 +35,7 @@ class Operator:
19
  dora_event: dict,
20
  send_output,
21
  ) -> DoraStatus:
 
22
  if dora_event["type"] == "INPUT":
23
  if dora_event["id"] == "tick":
24
  self.time = time.time()
@@ -26,4 +43,28 @@ class Operator:
26
  bboxs = dora_event["value"].to_numpy()
27
  self.bboxs = np.reshape(bboxs, (-1, 6))
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  return DoraStatus.CONTINUE
 
3
  import pyarrow as pa
4
  from dora import DoraStatus
5
 
6
+ # front-back: [-1, 1]
7
+ X = 0
8
+ # left-right: [-1,1]
9
+ Y = 0
10
+ SPEED = 0.5
11
+ Z = 0
12
+ # pitch-axis angle in degrees(int): [-55, 55]
13
+ PITCH = 0
14
+ # yaw-axis angle in degrees(int): [-55, 55]
15
+ ROTATION = 0
16
+ # RGB LED(int) [0, 255]
17
+ RGB = [0, 0, 0]
18
+ BRIGHTNESS = [0] # [0, 128]
19
+
20
+ GOAL_OBJECTIVES = [X, Y, Z]
21
+ GIMBAL_POSITION_GOAL = [PITCH, ROTATION]
22
 
23
  CAMERA_WIDTH = 960
24
  CAMERA_HEIGHT = 540
 
35
  dora_event: dict,
36
  send_output,
37
  ) -> DoraStatus:
38
+ global X, Y, SPEED, PITCH, ROTATION, RGB, BRIGHTNESS, GOAL_OBJECTIVES, GIMBAL_POSITION_GOAL
39
  if dora_event["type"] == "INPUT":
40
  if dora_event["id"] == "tick":
41
  self.time = time.time()
 
43
  bboxs = dora_event["value"].to_numpy()
44
  self.bboxs = np.reshape(bboxs, (-1, 6))
45
 
46
+ elif dora_event["id"] == "position":
47
+ [x, y, z, gimbal_pitch, gimbal_yaw] = dora_event["value"].to_numpy()
48
+ self.position = [x, y, z]
49
+ direction = np.clip(
50
+ np.array(GOAL_OBJECTIVES) - np.array(self.position), -1, 1
51
+ )
52
+
53
+ if any(abs(direction) > 0.1):
54
+ x = direction[0]
55
+ y = direction[1]
56
+ z = direction[2]
57
+
58
+ send_output(
59
+ "control",
60
+ pa.array([x, y, z]),
61
+ dora_event["metadata"],
62
+ )
63
+
64
+ send_output(
65
+ "gimbal_control",
66
+ pa.array([PITCH, ROTATION]),
67
+ dora_event["metadata"],
68
+ )
69
+
70
  return DoraStatus.CONTINUE
operators/plot.py CHANGED
@@ -1,118 +1,104 @@
1
  import os
2
- from typing import Callable, Optional, Union
3
-
4
  import cv2
5
- import numpy as np
6
- import pyarrow as pa
7
- from utils import LABELS
8
 
9
  from dora import DoraStatus
 
10
 
11
- pa.array([])
12
 
13
  CI = os.environ.get("CI")
 
14
  CAMERA_WIDTH = 960
15
  CAMERA_HEIGHT = 540
16
 
17
- font = cv2.FONT_HERSHEY_SIMPLEX
18
-
19
- writer = cv2.VideoWriter(
20
- "output01.avi",
21
- cv2.VideoWriter_fourcc(*"MJPG"),
22
- 30,
23
- (CAMERA_WIDTH, CAMERA_HEIGHT),
24
- )
25
 
26
 
27
  class Operator:
28
  """
29
- Plot image on window and additional context
30
  """
31
 
32
  def __init__(self):
33
- self.image = []
34
  self.bboxs = []
35
- self.bounding_box_messages = 0
36
- self.image_messages = 0
37
- self.text_whisper = ""
38
 
39
  def on_event(
40
  self,
41
- dora_event: dict,
42
- send_output: Callable[[str, Union[bytes, pa.UInt8Array], Optional[dict]], None],
43
- ) -> DoraStatus:
44
  if dora_event["type"] == "INPUT":
45
- return self.on_input(dora_event, send_output)
46
- return DoraStatus.CONTINUE
47
-
48
- def on_input(
49
- self,
50
- dora_input: dict,
51
- send_output: Callable[[str, Union[bytes, pa.UInt8Array], Optional[dict]], None],
52
- ) -> DoraStatus:
53
- """
54
- Plot image on window and additional context
55
- """
56
- if dora_input["id"] == "image":
57
- frame = (
58
- dora_input["value"]
59
- .to_numpy()
60
- .reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
61
- .copy() # copy the image because we want to modify it below
62
- )
63
- self.image = frame
64
-
65
- self.image_messages += 1
66
-
67
- elif dora_input["id"] == "text" and len(self.image) != 0:
68
- self.text_whisper = dora_input["value"][0].as_py()
69
- elif dora_input["id"] == "bbox" and len(self.image) != 0:
70
- bboxs = dora_input["value"].to_numpy()
71
- self.bboxs = np.reshape(bboxs, (-1, 6))
72
-
73
- self.bounding_box_messages += 1
74
-
75
- for bbox in self.bboxs:
76
- [
77
- min_x,
78
- min_y,
79
- max_x,
80
- max_y,
81
- confidence,
82
- label,
83
- ] = bbox
84
- cv2.rectangle(
85
- self.image,
86
- (int(min_x), int(min_y)),
87
- (int(max_x), int(max_y)),
88
- (0, 255, 0),
89
- 2,
90
- )
91
-
92
- d = ((12 * 22) / (max_y - (CAMERA_HEIGHT / 2))) / 2.77 - 0.08
93
- cv2.putText(
94
- self.image,
95
- LABELS[int(label)] + f", d={d:.2f}",
96
- (int(max_x), int(max_y)),
97
- font,
98
- 0.75,
99
- (0, 255, 0),
100
- 2,
101
- 1,
102
- )
103
-
104
- if len(self.image) > 0:
105
- cv2.putText(
106
- self.image, self.text_whisper, (20, 35), font, 1, (250, 250, 250), 2, 1
107
- )
108
-
109
- if CI != "true":
110
- writer.write(self.image)
111
- cv2.imshow("frame", self.image)
112
- if cv2.waitKey(1) & 0xFF == ord("q"):
113
- return DoraStatus.STOP
114
 
115
  return DoraStatus.CONTINUE
116
-
117
- def __del__(self):
118
- cv2.destroyAllWindows()
 
1
  import os
 
 
2
  import cv2
3
+
 
 
4
 
5
  from dora import DoraStatus
6
+ from utils import LABELS
7
 
 
8
 
9
  CI = os.environ.get("CI")
10
+
11
  CAMERA_WIDTH = 960
12
  CAMERA_HEIGHT = 540
13
 
14
+ FONT = cv2.FONT_HERSHEY_SIMPLEX
 
 
 
 
 
 
 
15
 
16
 
17
  class Operator:
18
  """
19
+ Plot image and bounding box
20
  """
21
 
22
  def __init__(self):
 
23
  self.bboxs = []
24
+ self.buffer = ""
25
+ self.submitted = []
26
+ self.lines = []
27
 
28
  def on_event(
29
  self,
30
+ dora_event,
31
+ send_output,
32
+ ):
33
  if dora_event["type"] == "INPUT":
34
+ id = dora_event["id"]
35
+ value = dora_event["value"]
36
+ if id == "image":
37
+
38
+ image = (
39
+ value.to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)).copy()
40
+ )
41
+
42
+ for bbox in self.bboxs:
43
+ [
44
+ min_x,
45
+ min_y,
46
+ max_x,
47
+ max_y,
48
+ confidence,
49
+ label,
50
+ ] = bbox
51
+ cv2.rectangle(
52
+ image,
53
+ (int(min_x), int(min_y)),
54
+ (int(max_x), int(max_y)),
55
+ (0, 255, 0),
56
+ )
57
+ cv2.putText(
58
+ image,
59
+ f"{LABELS[int(label)]}, {confidence:0.2f}",
60
+ (int(max_x), int(max_y)),
61
+ FONT,
62
+ 0.5,
63
+ (0, 255, 0),
64
+ )
65
+
66
+ cv2.putText(image, self.buffer, (20, 12 * 25), FONT, 1, (190, 250, 0))
67
+
68
+ for i, text in enumerate(self.submitted[::-1]):
69
+ cv2.putText(
70
+ image,
71
+ text["content"],
72
+ (20, 25 + (10 - i) * 25),
73
+ FONT,
74
+ 1,
75
+ (0, 255, 190),
76
+ )
77
+
78
+ for line in self.lines:
79
+ cv2.line(
80
+ image,
81
+ (int(line[0]), int(line[1])),
82
+ (int(line[2]), int(line[3])),
83
+ (0, 0, 255),
84
+ )
85
+
86
+ if CI != "true":
87
+ cv2.imshow("frame", image)
88
+ if cv2.waitKey(1) & 0xFF == ord("q"):
89
+ return DoraStatus.STOP
90
+ elif id == "bbox":
91
+ self.bboxs = value.to_numpy().reshape((-1, 6))
92
+ elif id == "keyboard_buffer":
93
+ self.buffer = value[0].as_py()
94
+ elif id == "line":
95
+ self.lines += [value.to_pylist()]
96
+ elif "message" in id:
97
+ self.submitted += [
98
+ {
99
+ "role": id,
100
+ "content": value[0].as_py(),
101
+ }
102
+ ]
103
 
104
  return DoraStatus.CONTINUE
 
 
 
operators/robot.py CHANGED
@@ -51,7 +51,7 @@ class Operator:
51
  ):
52
  [x, y, z] = dora_event["value"].to_numpy()
53
  self.event = self.ep_robot.chassis.move(
54
- x=x, y=y, z=z, xy_speed=0.5, z_speed=0
55
  )
56
  self.position[0] += x
57
  self.position[1] += y
@@ -69,16 +69,14 @@ class Operator:
69
  [
70
  gimbal_pitch,
71
  gimbal_yaw,
72
- gimbal_pitch_speed,
73
- gimbal_yaw_speed,
74
  ] = dora_event["value"].to_numpy()
75
 
76
  if self.gimbal_position != [gimbal_pitch, gimbal_yaw]:
77
  self.event = self.ep_robot.gimbal.moveto(
78
  pitch=gimbal_pitch,
79
  yaw=gimbal_yaw,
80
- pitch_speed=gimbal_pitch_speed,
81
- yaw_speed=gimbal_yaw_speed,
82
  )
83
  self.gimbal_position[0] = gimbal_pitch
84
  self.gimbal_position[1] = gimbal_yaw
 
51
  ):
52
  [x, y, z] = dora_event["value"].to_numpy()
53
  self.event = self.ep_robot.chassis.move(
54
+ x=x, y=y, z=z, xy_speed=0.8, z_speed=0.8
55
  )
56
  self.position[0] += x
57
  self.position[1] += y
 
69
  [
70
  gimbal_pitch,
71
  gimbal_yaw,
 
 
72
  ] = dora_event["value"].to_numpy()
73
 
74
  if self.gimbal_position != [gimbal_pitch, gimbal_yaw]:
75
  self.event = self.ep_robot.gimbal.moveto(
76
  pitch=gimbal_pitch,
77
  yaw=gimbal_yaw,
78
+ pitch_speed=20,
79
+ yaw_speed=20,
80
  )
81
  self.gimbal_position[0] = gimbal_pitch
82
  self.gimbal_position[1] = gimbal_yaw
operators/sentence_transformers_op.py CHANGED
@@ -12,12 +12,13 @@ SHOULD_NOT_BE_INCLUDED = [
12
  "utils.py",
13
  "sentence_transformers_op.py",
14
  "chatgpt_op.py",
15
- "mistral_op.py",
16
  ]
17
 
18
  SHOULD_BE_INCLUDED = [
19
- "planning_op.py",
20
  "object_detection.py",
 
21
  "plot.py",
22
  ]
23
 
@@ -43,7 +44,6 @@ def get_all_functions(path):
43
 
44
 
45
  def search(query_embedding, corpus_embeddings, paths, raw, k=5, file_extension=None):
46
- # TODO: filtering by file extension
47
  cos_scores = util.cos_sim(query_embedding, corpus_embeddings)[0]
48
  top_results = torch.topk(cos_scores, k=min(k, len(cos_scores)), sorted=True)
49
  out = []
@@ -83,15 +83,9 @@ class Operator:
83
  self.raw,
84
  )
85
  [raw, path, score] = output[0:3]
86
- print(
87
- (
88
- score,
89
- pa.array([{"raw": raw, "path": path, "query": values[0]}]),
90
- )
91
- )
92
  send_output(
93
  "raw_file",
94
- pa.array([{"raw": raw, "path": path, "query": values[0]}]),
95
  dora_event["metadata"],
96
  )
97
  else:
 
12
  "utils.py",
13
  "sentence_transformers_op.py",
14
  "chatgpt_op.py",
15
+ "llm_op.py",
16
  ]
17
 
18
  SHOULD_BE_INCLUDED = [
19
+ "webcam.py",
20
  "object_detection.py",
21
+ "planning_op.py",
22
  "plot.py",
23
  ]
24
 
 
44
 
45
 
46
  def search(query_embedding, corpus_embeddings, paths, raw, k=5, file_extension=None):
 
47
  cos_scores = util.cos_sim(query_embedding, corpus_embeddings)[0]
48
  top_results = torch.topk(cos_scores, k=min(k, len(cos_scores)), sorted=True)
49
  out = []
 
83
  self.raw,
84
  )
85
  [raw, path, score] = output[0:3]
 
 
 
 
 
 
86
  send_output(
87
  "raw_file",
88
+ pa.array([{"raw": raw, "path": path, "user_message": values[0]}]),
89
  dora_event["metadata"],
90
  )
91
  else:
operators/whisper_op.py CHANGED
@@ -1,10 +1,3 @@
1
- # Run this in the consol first :
2
-
3
- # pip install sounddevice numpy scipy pydub keyboard
4
-
5
- # Don't forget to install whisper
6
-
7
-
8
  import pyarrow as pa
9
  import whisper
10
 
@@ -27,14 +20,6 @@ class Operator:
27
  if dora_event["type"] == "INPUT":
28
  audio = dora_event["value"].to_numpy()
29
  audio = whisper.pad_or_trim(audio)
30
-
31
- ## make log-Mel spectrogram and move to the same device as the model
32
- # mel = whisper.log_mel_spectrogram(audio).to(model.device)
33
-
34
- ## decode the audio
35
- # result = whisper.decode(model, mel, options)
36
  result = model.transcribe(audio, language="en")
37
- text = result["text"]
38
- print(text, flush=True)
39
- send_output("text", pa.array([text]), dora_event["metadata"])
40
  return DoraStatus.CONTINUE
 
 
 
 
 
 
 
 
1
  import pyarrow as pa
2
  import whisper
3
 
 
20
  if dora_event["type"] == "INPUT":
21
  audio = dora_event["value"].to_numpy()
22
  audio = whisper.pad_or_trim(audio)
 
 
 
 
 
 
23
  result = model.transcribe(audio, language="en")
24
+ send_output("text", pa.array([result["text"]]), dora_event["metadata"])
 
 
25
  return DoraStatus.CONTINUE