haixuantao commited on
Commit
0827444
1 Parent(s): 9b97c01

Improving overall components

Browse files
graphs/dataflow_robot_vlm.yml CHANGED
@@ -1,18 +1,22 @@
1
  nodes:
2
  ### Camera
3
- - id: plot_bot
4
- operator:
5
- python: ../operators/plot.py
6
  inputs:
7
  image: webcam/image
8
- user_message: whisper/text
9
- position: robot/position
 
 
 
 
 
10
 
11
  - id: robot
12
- operator:
13
- python:
14
- source: ../operators/robot.py
15
- conda_env: robomaster
16
  inputs:
17
  tick: dora/timer/millis/750
18
  planning_control: planning/control
@@ -28,34 +32,31 @@ nodes:
28
  - image
29
 
30
  - id: whisper
31
- operator:
32
- python: ../operators/whisper_op.py
33
  inputs:
34
  audio: dora/timer/millis/1000
35
  outputs:
36
- - text
 
37
  - led
38
 
39
  - id: llm
40
  operator:
41
  python: ../operators/llm_op.py
42
  inputs:
43
- text: whisper/text
44
- reloaded: policy/reloaded
45
- outputs:
46
- - init
47
 
48
  - id: policy
49
  operator:
50
  python: ../operators/policy.py
51
  inputs:
52
- init: llm/init
53
  reached_kitchen: planning/reached_kitchen
54
  reached_living_room: planning/reached_living_room
55
  reached_office: planning/reached_office
56
  outputs:
57
  - go_to
58
- - reloaded
59
 
60
  - id: planning
61
  operator:
 
1
  nodes:
2
  ### Camera
3
+ - id: rerun
4
+ custom:
5
+ source: dora-rerun
6
  inputs:
7
  image: webcam/image
8
+ textlog_llm: whisper/text_llm
9
+ textlog_policy: whisper/text_policy
10
+ envs:
11
+ IMAGE_WIDTH: 1280
12
+ IMAGE_HEIGHT: 720
13
+ IMAGE_DEPTH: 3
14
+ RERUN_MEMORY_LIMIT: 10%
15
 
16
  - id: robot
17
+ custom:
18
+ source: /home/peter/miniconda3/envs/robomaster/bin/python
19
+ args: ../operators/robot.py
 
20
  inputs:
21
  tick: dora/timer/millis/750
22
  planning_control: planning/control
 
32
  - image
33
 
34
  - id: whisper
35
+ custom:
36
+ source: ../operators/whisper_op.py
37
  inputs:
38
  audio: dora/timer/millis/1000
39
  outputs:
40
+ - text_policy
41
+ - text_llm
42
  - led
43
 
44
  - id: llm
45
  operator:
46
  python: ../operators/llm_op.py
47
  inputs:
48
+ text: whisper/text_llm
 
 
 
49
 
50
  - id: policy
51
  operator:
52
  python: ../operators/policy.py
53
  inputs:
54
+ speech: whisper/text_policy
55
  reached_kitchen: planning/reached_kitchen
56
  reached_living_room: planning/reached_living_room
57
  reached_office: planning/reached_office
58
  outputs:
59
  - go_to
 
60
 
61
  - id: planning
62
  operator:
operators/constants.py CHANGED
@@ -7,14 +7,27 @@ import numpy as np
7
 
8
  LOCATION = {
9
  "HOME": {
10
- "OFFICE": np.array([[-1.3, 0.0], [-1.3, 0.5]]),
11
- "KITCHEN": np.array([[-1.0, -0.5], [1.5, 0.0], [1.5, 0.5]]),
12
  "LIVING_ROOM": np.array([[0.0, 0.3], [-1.5, 0.7]]),
13
  },
14
- "OFFICE": {
15
- "KITCHEN": np.array([[-1.0, -0.5], [2.5, 0.0], [2.5, 0.5]]),
16
- },
17
  "KITCHEN": {
18
- "OFFICE": np.array([[2.5, 0.0], [-1.5, -0.5], [-1.5, 0.5]]),
 
19
  },
20
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  LOCATION = {
9
  "HOME": {
10
+ "OFFICE": np.array([[1.0, 1.0]]),
11
+ "KITCHEN": np.array([[0.0, 1.0]]),
12
  "LIVING_ROOM": np.array([[0.0, 0.3], [-1.5, 0.7]]),
13
  },
14
+ "OFFICE": {"KITCHEN": np.array([[0.0, 1.0]]), "HOME": np.array([0.0, 0.0])},
 
 
15
  "KITCHEN": {
16
+ "OFFICE": np.array([[1.0, 1.0]]),
17
+ "HOME": np.array([0.0, 0.0]),
18
  },
19
  }
20
+
21
+ # LOCATION = {
22
+ # "HOME": {
23
+ # "OFFICE": np.array([[0.0, 0.3], [-1.5, 0.7]]),
24
+ # "KITCHEN": np.array([[-1.0, -0.5], [1.5, 0.0], [1.5, 0.5]]),
25
+ # "LIVING_ROOM": np.array([[0.0, 0.3], [-1.5, 0.7]]),
26
+ # },
27
+ # "OFFICE": {
28
+ # "KITCHEN": np.array([[0.0, 0.5], [1.5, -1.0]]),
29
+ # },
30
+ # "KITCHEN": {
31
+ # "OFFICE": np.array([[-1.5, -0.5], [-1.5, 1.0]]),
32
+ # },
33
+ # }
operators/idefics2_op_demo.py CHANGED
@@ -67,6 +67,9 @@ def ask_vlm(image, instruction):
67
  return generated_texts[0].split("\nAssistant: ")[1]
68
 
69
 
 
 
 
70
  class Operator:
71
  def __init__(self):
72
  self.image = None
@@ -91,17 +94,25 @@ class Operator:
91
  "speak",
92
  pa.array([output]),
93
  )
94
- """
95
- if "sofa" in output:
 
 
 
 
 
 
 
 
 
96
  send_output(
97
  "control",
98
- pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 50.0, 50.0]),
99
  )
100
- elif "back" in self.text:
101
  send_output(
102
  "control",
103
- pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
104
  )
105
- """
106
 
107
  return DoraStatus.CONTINUE
 
67
  return generated_texts[0].split("\nAssistant: ")[1]
68
 
69
 
70
+ import time
71
+
72
+
73
  class Operator:
74
  def __init__(self):
75
  self.image = None
 
94
  "speak",
95
  pa.array([output]),
96
  )
97
+ if "yes" in output:
98
+ send_output(
99
+ "control",
100
+ pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 50.0, 0.0]),
101
+ )
102
+ time.sleep(2)
103
+ send_output(
104
+ "control",
105
+ pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0]),
106
+ )
107
+ elif "no" in output:
108
  send_output(
109
  "control",
110
+ pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 50.0]),
111
  )
112
+ time.sleep(2)
113
  send_output(
114
  "control",
115
+ pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]),
116
  )
 
117
 
118
  return DoraStatus.CONTINUE
operators/llm_op.py CHANGED
@@ -174,10 +174,6 @@ class Operator:
174
 
175
  gc.collect()
176
  torch.cuda.empty_cache()
177
- time.sleep(6)
178
- if not self.policy_init:
179
- send_output("init", pa.array([]))
180
- self.policy_init = True
181
 
182
  return DoraStatus.CONTINUE
183
 
 
174
 
175
  gc.collect()
176
  torch.cuda.empty_cache()
 
 
 
 
177
 
178
  return DoraStatus.CONTINUE
179
 
operators/planning_op.py CHANGED
@@ -4,8 +4,8 @@ import pyarrow as pa
4
  from dora import DoraStatus
5
  from constants import LOCATION
6
 
7
- CAMERA_WIDTH = 960
8
- CAMERA_HEIGHT = 540
9
 
10
 
11
  def check_clear_road(bboxes, image_width, goal_x):
@@ -127,7 +127,7 @@ class Operator:
127
  self.waypoints.shape[0] == 1
128
  and np.linalg.norm(self.waypoints[0] - np.array([x, y])) < 0.2
129
  ):
130
- print("goal reached", flush=True)
131
  self.current_location = self.goal
132
  send_output(
133
  f"reached_{self.goal.lower()}", pa.array(self.image.ravel())
@@ -169,21 +169,14 @@ class Operator:
169
 
170
  message = pa.array(
171
  [
172
- {
173
- "action": "gimbal",
174
- "value": [10.0, float(int(goal_angle))],
175
- "count": self.count,
176
- },
177
- {
178
- "value": [
179
- self.waypoints[0][0] - x,
180
- self.waypoints[0][1] - y,
181
- 0.0, # -goal_angle,
182
- 0.8,
183
- 0.0, # 50,
184
- ],
185
- "action": "control",
186
- },
187
  ]
188
  )
189
  print("sending:", message, flush=True)
 
4
  from dora import DoraStatus
5
  from constants import LOCATION
6
 
7
+ CAMERA_WIDTH = 1280
8
+ CAMERA_HEIGHT = 720
9
 
10
 
11
  def check_clear_road(bboxes, image_width, goal_x):
 
127
  self.waypoints.shape[0] == 1
128
  and np.linalg.norm(self.waypoints[0] - np.array([x, y])) < 0.2
129
  ):
130
+ print(f"goal {self.goal} reached", flush=True)
131
  self.current_location = self.goal
132
  send_output(
133
  f"reached_{self.goal.lower()}", pa.array(self.image.ravel())
 
169
 
170
  message = pa.array(
171
  [
172
+ self.waypoints[0][0] - x,
173
+ self.waypoints[0][1] - y,
174
+ 0.0, # -goal_angle,
175
+ 0.8,
176
+ 0.0, # 50,
177
+ 10.0,
178
+ float(int(goal_angle)),
179
+ self.count,
 
 
 
 
 
 
 
180
  ]
181
  )
182
  print("sending:", message, flush=True)
operators/policy.py CHANGED
@@ -1,26 +1,26 @@
1
  import pyarrow as pa
2
  from dora import DoraStatus
3
- from utils import speak, play
4
  from time import sleep
5
 
6
 
7
  class Operator:
8
  def __init__(self):
9
  self.location = ["KITCHEN", "OFFICE"]
10
- self.music = ["office.mp3"]
11
 
12
  def speak(self, text: str):
13
  speak(text)
14
 
15
- def play(self, file: str):
16
- play(file)
17
-
18
- def on_event(self, event: dict, send_output) -> DoraStatus:
19
  if event["type"] == "INPUT":
20
  id = event["id"]
21
  # On initialization
22
- if id == "init":
23
- send_output("go_to", pa.array([""]))
 
 
 
24
  elif id == "reached_office":
25
  pass
26
  elif id == "reached_kitchen":
 
1
  import pyarrow as pa
2
  from dora import DoraStatus
3
+ from utils import speak
4
  from time import sleep
5
 
6
 
7
  class Operator:
8
  def __init__(self):
9
  self.location = ["KITCHEN", "OFFICE"]
10
+ self.current_location = "KITCHEN"
11
 
12
  def speak(self, text: str):
13
  speak(text)
14
 
15
+ def on_event(self, event, send_output):
 
 
 
16
  if event["type"] == "INPUT":
17
  id = event["id"]
18
  # On initialization
19
+ if id == "speech":
20
+ text: str = event["value"][0].as_py().lower()
21
+ if "stop" in text:
22
+ return DoraStatus.STOP
23
+ # send_output("go_to", pa.array([""]))
24
  elif id == "reached_office":
25
  pass
26
  elif id == "reached_kitchen":
operators/robot.py CHANGED
@@ -1,86 +1,64 @@
1
  from robomaster import robot, led
2
- from typing import Callable, Optional
3
- from dora import DoraStatus
4
 
 
 
5
  import numpy as np
6
  import pyarrow as pa
7
 
8
 
9
  CONN = "ap"
10
 
11
- print("Initialization...", flush=True)
12
 
 
 
 
 
13
 
14
- class Operator:
15
- def __init__(self):
16
- self.ep_robot = robot.Robot()
17
- print("Initializing robot...", flush=True)
18
- assert self.ep_robot.initialize(conn_type=CONN), "Could not initialize ep_robot"
19
- assert self.ep_robot.camera.start_video_stream(
20
- display=False
21
- ), "Could not start video stream"
22
 
23
- self.ep_robot.gimbal.recenter().wait_for_completed()
24
- self.backlog = []
25
- self.last_control = ""
26
- self.position = np.array([0.0, 0.0, 0.0])
27
- self.count = -1
28
- self.event = None
29
- self.rgb = [0, 0, 0]
30
 
31
- def execute_backlog(self):
32
- if len(self.backlog) > 0:
33
- command = self.backlog.pop(0)
34
- print(command, flush=True)
35
- if command["action"] == "control":
36
- [x, y, z, xy_speed, z_speed] = command["value"]
37
- self.position += np.array([x, y, z])
38
- self.event = self.ep_robot.chassis.move(
39
- x=x, y=y, z=z, xy_speed=xy_speed, z_speed=z_speed
40
- )
41
-
42
- elif command["action"] == "gimbal":
43
- [pitch, yaw] = command["value"]
44
- self.event = self.ep_robot.gimbal.moveto(
45
- pitch=pitch, yaw=yaw, pitch_speed=50.0, yaw_speed=50.0
46
- )
47
 
48
- def on_event(
49
- self,
50
- dora_event: str,
51
- send_output: Callable[[str, pa.Array, Optional[dict]], None],
52
- ) -> DoraStatus:
53
- event_type = dora_event["type"]
54
- if event_type == "INPUT":
55
- if dora_event["id"] == "tick":
56
 
57
- if not (
58
- self.event is not None
59
- and not (self.event._event.isSet() and self.event.is_completed)
60
- ):
61
- if len(self.backlog) > 0:
62
- self.execute_backlog()
63
- else:
64
- print(f"sending control reply: {self.count}", flush=True)
65
- send_output("position", pa.array(self.position))
66
- send_output("control_reply", pa.array([self.count]))
67
- return DoraStatus.CONTINUE
68
 
69
- print("sending position", flush=True)
70
- send_output("position", pa.array(self.position))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
- elif dora_event["id"] == "planning_control":
73
- command = dora_event["value"].to_pylist()
74
- self.count = command[0]["count"]
75
- if len(self.backlog) == 0:
76
- self.backlog += command
77
- self.execute_backlog()
78
- elif dora_event["id"] == "led":
79
- [r, g, b] = dora_event["value"].to_numpy()
80
- rgb = [r, g, b]
81
- if rgb != self.rgb:
82
- self.ep_robot.led.set_led(
83
- comp=led.COMP_ALL, r=r, g=g, b=b, effect=led.EFFECT_ON
84
- )
85
- self.rgb = rgb
86
- return DoraStatus.CONTINUE
 
1
  from robomaster import robot, led
 
 
2
 
3
+ from dora import Node
4
+ from time import sleep
5
  import numpy as np
6
  import pyarrow as pa
7
 
8
 
9
  CONN = "ap"
10
 
 
11
 
12
+ ep_robot = robot.Robot()
13
+ print("Initializing robot...", flush=True)
14
+ assert ep_robot.initialize(conn_type=CONN), "Could not initialize ep_robot"
15
+ assert ep_robot.camera.start_video_stream(display=False), "Could not start video stream"
16
 
17
+ node = Node()
18
+ ep_robot.gimbal.recenter().wait_for_completed()
19
+ backlog = []
20
+ last_control = ""
21
+ position = np.array([0.0, 0.0, 0.0])
22
+ count = -1
23
+ event = None
24
+ rgb = [0, 0, 0]
25
 
 
 
 
 
 
 
 
26
 
27
+ def wait(event):
28
+ if event is not None and not (event._event.isSet() and event.is_completed):
29
+ sleep(1)
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
 
 
 
 
 
 
 
 
31
 
32
+ for dora_event in node:
33
+ event_type = dora_event["type"]
34
+ if event_type == "INPUT":
35
+ if dora_event["id"] == "tick":
36
+ node.send_output("position", pa.array(position))
37
+ node.send_output("control_reply", pa.array([count]))
 
 
 
 
 
38
 
39
+ elif dora_event["id"] == "planning_control":
40
+ [x, y, z, xy_speed, z_speed, pitch, yaw, count] = dora_event[
41
+ "value"
42
+ ].to_numpy()
43
+ if any([pitch, yaw]):
44
+ event = ep_robot.gimbal.moveto(
45
+ pitch=pitch, yaw=yaw, pitch_speed=60.0, yaw_speed=50.0
46
+ )
47
+ wait(event)
48
+ sleep(2)
49
+ if any([x, y, z]):
50
+ event = ep_robot.chassis.move(
51
+ x=x, y=y, z=z, xy_speed=xy_speed, z_speed=z_speed
52
+ )
53
+ position = position + np.array([x, y, z])
54
+ wait(event)
55
+ sleep(6)
56
 
57
+ elif dora_event["id"] == "led":
58
+ [r, g, b] = dora_event["value"].to_numpy()
59
+ rgb = [r, g, b]
60
+ if rgb != rgb:
61
+ ep_robot.led.set_led(
62
+ comp=led.COMP_ALL, r=r, g=g, b=b, effect=led.EFFECT_ON
63
+ )
64
+ rgb = rgb
 
 
 
 
 
 
 
operators/whisper_op.py CHANGED
@@ -16,7 +16,7 @@ SAMPLE_RATE = 16000
16
  MAX_DURATION = 30
17
 
18
  policy_init = True
19
-
20
  node = Node()
21
 
22
  for dora_event in node:
@@ -26,7 +26,7 @@ for dora_event in node:
26
  event = events.get(1.0)
27
  if (
28
  event is not None
29
- and event.key == Key.alt_r
30
  and isinstance(event, Events.Press)
31
  ):
32
 
@@ -45,13 +45,36 @@ for dora_event in node:
45
  and isinstance(event, Events.Release)
46
  ):
47
  sd.stop()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  audio = audio_data.ravel().astype(np.float32) / 32768.0
49
 
50
  ## Speech to text
51
  audio = whisper.pad_or_trim(audio)
52
  result = model.transcribe(audio, language="en")
53
  node.send_output(
54
- "text", pa.array([result["text"]]), dora_event["metadata"]
55
  )
56
  # send_output("led", pa.array([0, 0, 255]))
57
 
 
16
  MAX_DURATION = 30
17
 
18
  policy_init = True
19
+ audio_data = None
20
  node = Node()
21
 
22
  for dora_event in node:
 
26
  event = events.get(1.0)
27
  if (
28
  event is not None
29
+ and (event.key == Key.alt_r or event.key == Key.ctrl_r)
30
  and isinstance(event, Events.Press)
31
  ):
32
 
 
45
  and isinstance(event, Events.Release)
46
  ):
47
  sd.stop()
48
+ if audio_data is None:
49
+ continue
50
+ audio = audio_data.ravel().astype(np.float32) / 32768.0
51
+
52
+ ## Speech to text
53
+ audio = whisper.pad_or_trim(audio)
54
+ result = model.transcribe(audio, language="en")
55
+ node.send_output(
56
+ "text_llm", pa.array([result["text"]]), dora_event["metadata"]
57
+ )
58
+ # send_output("led", pa.array([0, 0, 255]))
59
+
60
+ gc.collect()
61
+ torch.cuda.empty_cache()
62
+
63
+ elif (
64
+ event is not None
65
+ and event.key == Key.ctrl_r
66
+ and isinstance(event, Events.Release)
67
+ ):
68
+ sd.stop()
69
+ if audio_data is None:
70
+ continue
71
  audio = audio_data.ravel().astype(np.float32) / 32768.0
72
 
73
  ## Speech to text
74
  audio = whisper.pad_or_trim(audio)
75
  result = model.transcribe(audio, language="en")
76
  node.send_output(
77
+ "text_policy", pa.array([result["text"]]), dora_event["metadata"]
78
  )
79
  # send_output("led", pa.array([0, 0, 255]))
80