haixuantao commited on
Commit
8852f54
1 Parent(s): c3afd26

updating clarity of the video

Browse files
graphs/dataflow_robot_vlm.yml CHANGED
@@ -16,6 +16,7 @@ nodes:
16
  inputs:
17
  tick: dora/timer/millis/750
18
  planning_control: planning/control
 
19
  outputs:
20
  - control_reply
21
  - position
@@ -33,6 +34,7 @@ nodes:
33
  audio: dora/timer/millis/1000
34
  outputs:
35
  - text
 
36
 
37
  - id: llm
38
  operator:
@@ -48,7 +50,8 @@ nodes:
48
  python: ../operators/policy.py
49
  inputs:
50
  init: llm/init
51
- goal_reached: planning/goal_reached
 
52
  outputs:
53
  - go_to
54
  - reloaded
@@ -65,7 +68,8 @@ nodes:
65
  queue_size: 1
66
  outputs:
67
  - control
68
- - goal_reached
 
69
 
70
 
71
 
 
16
  inputs:
17
  tick: dora/timer/millis/750
18
  planning_control: planning/control
19
+ led: whisper/led
20
  outputs:
21
  - control_reply
22
  - position
 
34
  audio: dora/timer/millis/1000
35
  outputs:
36
  - text
37
+ - led
38
 
39
  - id: llm
40
  operator:
 
50
  python: ../operators/policy.py
51
  inputs:
52
  init: llm/init
53
+ reached_kitchen: planning/reached_kitchen
54
+ reached_living_room: planning/reached_living_room
55
  outputs:
56
  - go_to
57
  - reloaded
 
68
  queue_size: 1
69
  outputs:
70
  - control
71
+ - reached_kitchen
72
+ - reached_living_room
73
 
74
 
75
 
operators/llm_op.py CHANGED
@@ -3,7 +3,7 @@ import pylcs
3
  import os
4
  import pyarrow as pa
5
  from transformers import AutoModelForCausalLM, AutoTokenizer
6
-
7
 
8
  import re
9
  import time
@@ -142,6 +142,7 @@ class Operator:
142
  dora_event,
143
  send_output,
144
  ) -> DoraStatus:
 
145
  if dora_event["type"] == "INPUT" and dora_event["id"] == "text":
146
  input = dora_event["value"][0].as_py()
147
  # Path to the current file
@@ -167,7 +168,14 @@ class Operator:
167
  print("response: ", output, flush=True)
168
  with open(path, "w") as file:
169
  file.write(source_code)
170
- time.sleep(8)
 
 
 
 
 
 
 
171
  send_output("init", pa.array([]))
172
 
173
  ## Stopping to liberate GPU space
@@ -222,7 +230,7 @@ if __name__ == "__main__":
222
  [
223
  {
224
  "path": path,
225
- "user_message": "Ask model if there is someone with a red shirt, if there is, say I'm bringing coffee, and go to the kitchen, if no one go home",
226
  },
227
  ]
228
  ),
 
3
  import os
4
  import pyarrow as pa
5
  from transformers import AutoModelForCausalLM, AutoTokenizer
6
+ import torch
7
 
8
  import re
9
  import time
 
142
  dora_event,
143
  send_output,
144
  ) -> DoraStatus:
145
+ global model, tokenizer
146
  if dora_event["type"] == "INPUT" and dora_event["id"] == "text":
147
  input = dora_event["value"][0].as_py()
148
  # Path to the current file
 
168
  print("response: ", output, flush=True)
169
  with open(path, "w") as file:
170
  file.write(source_code)
171
+ del model
172
+ del tokenizer
173
+ # model will still be on cache until its place is taken by other objects so also execute the below lines
174
+ import gc # garbage collect library
175
+
176
+ gc.collect()
177
+ torch.cuda.empty_cache()
178
+ time.sleep(9)
179
  send_output("init", pa.array([]))
180
 
181
  ## Stopping to liberate GPU space
 
230
  [
231
  {
232
  "path": path,
233
+ "user_message": "go to the living room, ask the model if there is people, if there is, say i'm going to go get coffee for you, then go to the kitchen, when you reach the kitchen, check with the model if there is a person and say can i have a coffee please, then wait 10 sec and go back to the living room",
234
  },
235
  ]
236
  ),
operators/planning_op.py CHANGED
@@ -2,7 +2,7 @@ import time
2
  import numpy as np
3
  import pyarrow as pa
4
  from dora import DoraStatus
5
-
6
 
7
  CAMERA_WIDTH = 960
8
  CAMERA_HEIGHT = 540
@@ -100,31 +100,47 @@ class Operator:
100
 
101
  if len(dora_event["value"]) > 0:
102
  self.waypoints = dora_event["value"].to_numpy().reshape((-1, 2))
 
103
  elif id == "position":
104
  print("got position:", dora_event["value"], flush=True)
105
- ## No bounding box yet
106
- if self.waypoints is None:
107
- print("no waypoint", flush=True)
 
 
108
  return DoraStatus.CONTINUE
 
109
  if self.completed == False:
110
  print("not completed", flush=True)
111
  return DoraStatus.CONTINUE
112
- value = dora_event["value"].to_numpy()
113
- [x, y, z] = value
114
- self.position = [x, y, z]
115
 
 
 
 
 
116
  # Remove waypoints if completed
117
- if (
118
- len(self.waypoints) > 0
119
- and np.linalg.norm(self.waypoints[0] - [x, y]) < 0.1
120
  ):
121
- self.waypoints = self.waypoints[1:]
122
- print("removing waypoints", flush=True)
123
- if len(self.waypoints) == 0:
124
  print("goal reached", flush=True)
125
- send_output("goal_reached", pa.array(self.image.ravel()))
 
 
 
 
 
 
 
 
126
  self.waypoints = None
127
  return DoraStatus.CONTINUE
 
 
 
 
 
 
128
 
129
  z = np.deg2rad(z)
130
  self.tf = np.array([[np.cos(z), -np.sin(z)], [np.sin(z), np.cos(z)]])
@@ -156,7 +172,7 @@ class Operator:
156
  [
157
  {
158
  "action": "gimbal",
159
- "value": [0.0, goal_angle],
160
  "count": self.count,
161
  },
162
  {
@@ -164,7 +180,7 @@ class Operator:
164
  self.waypoints[0][0] - x,
165
  self.waypoints[0][1] - y,
166
  0.0, # -goal_angle,
167
- 0.6,
168
  0.0, # 50,
169
  ],
170
  "action": "control",
 
2
  import numpy as np
3
  import pyarrow as pa
4
  from dora import DoraStatus
5
+ from constants import KITCHEN, LIVING_ROOM
6
 
7
  CAMERA_WIDTH = 960
8
  CAMERA_HEIGHT = 540
 
100
 
101
  if len(dora_event["value"]) > 0:
102
  self.waypoints = dora_event["value"].to_numpy().reshape((-1, 2))
103
+
104
  elif id == "position":
105
  print("got position:", dora_event["value"], flush=True)
106
+ value = dora_event["value"].to_numpy()
107
+ [x, y, z] = value
108
+ self.position = [x, y, z]
109
+ if self.image is None:
110
+ print("no image", flush=True)
111
  return DoraStatus.CONTINUE
112
+ ## No bounding box yet
113
  if self.completed == False:
114
  print("not completed", flush=True)
115
  return DoraStatus.CONTINUE
 
 
 
116
 
117
+ if self.waypoints is None:
118
+ print("no waypoint", flush=True)
119
+ return DoraStatus.CONTINUE
120
+ # Set Waypoints to None if goal reached
121
  # Remove waypoints if completed
122
+ elif (
123
+ self.waypoints.shape[0] == 1
124
+ and np.linalg.norm(self.waypoints[0] - np.array([x, y])) < 0.2
125
  ):
 
 
 
126
  print("goal reached", flush=True)
127
+ goal = self.waypoints[0]
128
+ if np.linalg.norm(KITCHEN[-1] - goal) < 0.2:
129
+ send_output("reached_kitchen", pa.array(self.image.ravel()))
130
+ elif np.linalg.norm(LIVING_ROOM[-1] - goal) < 0.2:
131
+ send_output("reached_living_room", pa.array(self.image.ravel()))
132
+ else:
133
+ raise ValueError(
134
+ "Could not find goal reached: ", goal, "pos:", self.position
135
+ )
136
  self.waypoints = None
137
  return DoraStatus.CONTINUE
138
+ elif (
139
+ self.waypoints.size > 0
140
+ and np.linalg.norm(self.waypoints[0] - np.array([x, y])) < 0.1
141
+ ):
142
+ self.waypoints = self.waypoints[1:]
143
+ print("removing waypoints", flush=True)
144
 
145
  z = np.deg2rad(z)
146
  self.tf = np.array([[np.cos(z), -np.sin(z)], [np.sin(z), np.cos(z)]])
 
172
  [
173
  {
174
  "action": "gimbal",
175
+ "value": [10.0, goal_angle],
176
  "count": self.count,
177
  },
178
  {
 
180
  self.waypoints[0][0] - x,
181
  self.waypoints[0][1] - y,
182
  0.0, # -goal_angle,
183
+ 0.8,
184
  0.0, # 50,
185
  ],
186
  "action": "control",
operators/plot.py CHANGED
@@ -127,6 +127,15 @@ class Operator:
127
  cv2.putText(
128
  image, self.buffer, (20, 14 + 15 * 25), FONT, 0.5, (190, 250, 0), 2
129
  )
 
 
 
 
 
 
 
 
 
130
 
131
  i = 0
132
  for text in self.submitted[::-1]:
 
127
  cv2.putText(
128
  image, self.buffer, (20, 14 + 15 * 25), FONT, 0.5, (190, 250, 0), 2
129
  )
130
+ cv2.putText(
131
+ image,
132
+ f"pos: {self.position}",
133
+ (20, 20),
134
+ FONT,
135
+ 0.5,
136
+ (190, 250, 100),
137
+ 2,
138
+ )
139
 
140
  i = 0
141
  for text in self.submitted[::-1]:
operators/policy.py CHANGED
@@ -10,6 +10,9 @@ KITCHEN = np.array([[0.0, -0.2], [-1.0, -0.3], [-2.0, -0.5]]).ravel()
10
 
11
  ## Policy Operator
12
  class Operator:
 
 
 
13
  def speak(self, text: str):
14
  speak(text)
15
 
@@ -28,4 +31,5 @@ class Operator:
28
  elif id == "reached_kitchen":
29
  image = event["value"].to_numpy().reshape((540, 960, 3))
30
  pass
 
31
  return DoraStatus.CONTINUE
 
10
 
11
  ## Policy Operator
12
  class Operator:
13
+ def __init__(self):
14
+ pass
15
+
16
  def speak(self, text: str):
17
  speak(text)
18
 
 
31
  elif id == "reached_kitchen":
32
  image = event["value"].to_numpy().reshape((540, 960, 3))
33
  pass
34
+
35
  return DoraStatus.CONTINUE
operators/robot.py CHANGED
@@ -1,4 +1,4 @@
1
- from robomaster import robot
2
  from typing import Callable, Optional
3
  from dora import DoraStatus
4
 
@@ -26,6 +26,7 @@ class Operator:
26
  self.position = np.array([0.0, 0.0, 0.0])
27
  self.count = -1
28
  self.event = None
 
29
 
30
  def execute_backlog(self):
31
  if len(self.backlog) > 0:
@@ -74,5 +75,12 @@ class Operator:
74
  if len(self.backlog) == 0:
75
  self.backlog += command
76
  self.execute_backlog()
77
-
 
 
 
 
 
 
 
78
  return DoraStatus.CONTINUE
 
1
+ from robomaster import robot, led
2
  from typing import Callable, Optional
3
  from dora import DoraStatus
4
 
 
26
  self.position = np.array([0.0, 0.0, 0.0])
27
  self.count = -1
28
  self.event = None
29
+ self.rgb = [0, 0, 0]
30
 
31
  def execute_backlog(self):
32
  if len(self.backlog) > 0:
 
75
  if len(self.backlog) == 0:
76
  self.backlog += command
77
  self.execute_backlog()
78
+ elif dora_event["id"] == "led":
79
+ [r, g, b] = dora_event["value"].to_numpy()
80
+ rgb = [r, g, b]
81
+ if rgb != self.rgb:
82
+ self.ep_robot.led.set_led(
83
+ comp=led.COMP_ALL, r=r, g=g, b=b, effect=led.EFFECT_ON
84
+ )
85
+ self.rgb = rgb
86
  return DoraStatus.CONTINUE
operators/utils.py CHANGED
@@ -82,7 +82,7 @@ def ask_vlm(image, instruction):
82
  inputs = {k: torch.tensor(v).to(DEVICE) for k, v in inputs.items()}
83
 
84
  generated_ids = model.generate(
85
- **inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=10
86
  )
87
  generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
88
 
 
82
  inputs = {k: torch.tensor(v).to(DEVICE) for k, v in inputs.items()}
83
 
84
  generated_ids = model.generate(
85
+ **inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=50
86
  )
87
  generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
88
 
operators/whisper_op.py CHANGED
@@ -11,7 +11,7 @@ import sounddevice as sd
11
  model = whisper.load_model("base")
12
 
13
  SAMPLE_RATE = 16000
14
- MAX_DURATION = 15
15
 
16
 
17
  class Operator:
@@ -24,12 +24,13 @@ class Operator:
24
  dora_event,
25
  send_output,
26
  ) -> DoraStatus:
 
27
  if dora_event["type"] == "INPUT":
28
  ## Check for keyboard event
29
  with keyboard.Events() as events:
30
  event = events.get(1.0)
31
  if event is not None and event.key == Key.up:
32
-
33
  ## Microphone
34
  audio_data = sd.rec(
35
  int(SAMPLE_RATE * MAX_DURATION),
@@ -47,4 +48,11 @@ class Operator:
47
  send_output(
48
  "text", pa.array([result["text"]]), dora_event["metadata"]
49
  )
 
 
 
 
 
 
 
50
  return DoraStatus.CONTINUE
 
11
  model = whisper.load_model("base")
12
 
13
  SAMPLE_RATE = 16000
14
+ MAX_DURATION = 20
15
 
16
 
17
  class Operator:
 
24
  dora_event,
25
  send_output,
26
  ) -> DoraStatus:
27
+ global model
28
  if dora_event["type"] == "INPUT":
29
  ## Check for keyboard event
30
  with keyboard.Events() as events:
31
  event = events.get(1.0)
32
  if event is not None and event.key == Key.up:
33
+ send_output("led", pa.array([0, 255, 0]))
34
  ## Microphone
35
  audio_data = sd.rec(
36
  int(SAMPLE_RATE * MAX_DURATION),
 
48
  send_output(
49
  "text", pa.array([result["text"]]), dora_event["metadata"]
50
  )
51
+ send_output("led", pa.array([0, 0, 255]))
52
+ del model
53
+
54
+ import gc # garbage collect library
55
+
56
+ gc.collect()
57
+
58
  return DoraStatus.CONTINUE