haixuantao commited on
Commit
23f48d8
1 Parent(s): fe79d42

Fix demo testing

Browse files
graphs/dataflow_robot_vlm.yml CHANGED
@@ -50,7 +50,7 @@ nodes:
50
  init: llm/init
51
  goal_reached: planning/goal_reached
52
  outputs:
53
- - set_goal
54
  - reloaded
55
 
56
  - id: planning
@@ -59,7 +59,7 @@ nodes:
59
  inputs:
60
  position: robot/position
61
  control_reply: robot/control_reply
62
- set_goal: policy/set_goal
63
  image: webcam/image
64
  outputs:
65
  - control
 
50
  init: llm/init
51
  goal_reached: planning/goal_reached
52
  outputs:
53
+ - go_to
54
  - reloaded
55
 
56
  - id: planning
 
59
  inputs:
60
  position: robot/position
61
  control_reply: robot/control_reply
62
+ set_goal: policy/go_to
63
  image: webcam/image
64
  outputs:
65
  - control
operators/llm_op.py CHANGED
@@ -30,6 +30,7 @@ model = AutoModelForCausalLM.from_pretrained(
30
  device_map="auto",
31
  trust_remote_code=True,
32
  revision="main",
 
33
  ).to("cuda:0")
34
 
35
 
@@ -166,9 +167,12 @@ class Operator:
166
  print("response: ", output, flush=True)
167
  with open(path, "w") as file:
168
  file.write(source_code)
169
- time.sleep(10)
170
  send_output("init", pa.array([]))
171
 
 
 
 
172
  return DoraStatus.CONTINUE
173
 
174
  def ask_llm(self, prompt):
@@ -218,7 +222,7 @@ if __name__ == "__main__":
218
  [
219
  {
220
  "path": path,
221
- "user_message": "set the goal to kitchen. When you are in the kitchen ask the model if there is someone with blue shirt, if there is speak and say can I have coffee, if there is no one set the goal to home ",
222
  },
223
  ]
224
  ),
 
30
  device_map="auto",
31
  trust_remote_code=True,
32
  revision="main",
33
+ max_length=1024,
34
  ).to("cuda:0")
35
 
36
 
 
167
  print("response: ", output, flush=True)
168
  with open(path, "w") as file:
169
  file.write(source_code)
170
+ time.sleep(8)
171
  send_output("init", pa.array([]))
172
 
173
+ ## Stopping to liberate GPU space
174
+ return DoraStatus.STOP
175
+
176
  return DoraStatus.CONTINUE
177
 
178
  def ask_llm(self, prompt):
 
222
  [
223
  {
224
  "path": path,
225
+ "user_message": "Ask model if there is someone with a red shirt, if there is, say I'm bringing coffee, and go to the kitchen, if no one go home",
226
  },
227
  ]
228
  ),
operators/planning_op.py CHANGED
@@ -109,7 +109,7 @@ class Operator:
109
  self.waypoints = dora_event["value"].to_numpy().reshape((-1, 2))
110
  elif id == "position":
111
  ## No bounding box yet
112
- if self.waypoints is None or len(self.waypoints) == 0:
113
  print("no waypoint", flush=True)
114
  return DoraStatus.CONTINUE
115
  if self.completed == False:
@@ -126,10 +126,11 @@ class Operator:
126
  ):
127
  self.waypoints = self.waypoints[1:]
128
  print("removing waypoints", flush=True)
129
- if len(self.waypoints) == 0:
130
- print("no waypoint", flush=True)
131
- send_output("goal_reached", pa.array(self.image.ravel()))
132
- return DoraStatus.CONTINUE
 
133
 
134
  z = np.deg2rad(z)
135
  self.tf = np.array([[np.cos(z), -np.sin(z)], [np.sin(z), np.cos(z)]])
@@ -178,8 +179,8 @@ class Operator:
178
  # },
179
  {
180
  "value": [
181
- goal[0],
182
- goal[1],
183
  0.0, # -goal_angle,
184
  0.6,
185
  0.0, # 50,
 
109
  self.waypoints = dora_event["value"].to_numpy().reshape((-1, 2))
110
  elif id == "position":
111
  ## No bounding box yet
112
+ if self.waypoints is None:
113
  print("no waypoint", flush=True)
114
  return DoraStatus.CONTINUE
115
  if self.completed == False:
 
126
  ):
127
  self.waypoints = self.waypoints[1:]
128
  print("removing waypoints", flush=True)
129
+ if len(self.waypoints) == 0:
130
+ print("goal reached", flush=True)
131
+ send_output("goal_reached", pa.array(self.image.ravel()))
132
+ self.waypoints = None
133
+ return DoraStatus.CONTINUE
134
 
135
  z = np.deg2rad(z)
136
  self.tf = np.array([[np.cos(z), -np.sin(z)], [np.sin(z), np.cos(z)]])
 
179
  # },
180
  {
181
  "value": [
182
+ self.waypoints[0][0],
183
+ self.waypoints[0][1],
184
  0.0, # -goal_angle,
185
  0.6,
186
  0.0, # 50,
operators/plot.py CHANGED
@@ -1,5 +1,5 @@
1
  import cv2
2
-
3
 
4
  from dora import DoraStatus
5
 
@@ -16,6 +16,78 @@ writer = cv2.VideoWriter(
16
  (CAMERA_WIDTH, CAMERA_HEIGHT),
17
  )
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  class Operator:
21
  """
@@ -27,6 +99,8 @@ class Operator:
27
  self.buffer = ""
28
  self.submitted = []
29
  self.lines = []
 
 
30
 
31
  def on_event(
32
  self,
@@ -36,6 +110,13 @@ class Operator:
36
  if dora_event["type"] == "INPUT":
37
  id = dora_event["id"]
38
  value = dora_event["value"]
 
 
 
 
 
 
 
39
  if id == "image":
40
 
41
  image = (
@@ -74,6 +155,12 @@ class Operator:
74
  return DoraStatus.STOP
75
  elif id == "keyboard_buffer":
76
  self.buffer = value[0].as_py()
 
 
 
 
 
 
77
  elif "message" in id:
78
  self.submitted += [
79
  {
@@ -86,3 +173,8 @@ class Operator:
86
  ]
87
 
88
  return DoraStatus.CONTINUE
 
 
 
 
 
 
1
  import cv2
2
+ import numpy as np
3
 
4
  from dora import DoraStatus
5
 
 
16
  (CAMERA_WIDTH, CAMERA_HEIGHT),
17
  )
18
 
19
+ GOAL_OBJECTIVES = [10, 0]
20
+
21
+ import numpy as np
22
+
23
+
24
+ def find_largest_gap_midpoint(bboxes, image_width, goal_x):
25
+ """
26
+ Find the x-coordinate of the midpoint of the largest gap along the x-axis where no bounding boxes overlap.
27
+
28
+ Parameters:
29
+ - bboxes (np.array): A numpy array where each row represents a bounding box with
30
+ the format [min_x, min_y, max_x, max_y, confidence, label].
31
+ - image_width (int): The width of the image in pixels.
32
+
33
+ Returns:
34
+ - int: The x-coordinate of the midpoint of the largest gap where no bounding boxes overlap.
35
+ """
36
+ if bboxes.size == 0:
37
+ # No bounding boxes, return the midpoint of the image as the largest gap
38
+ return image_width // 2
39
+
40
+ events = []
41
+ for bbox in bboxes:
42
+ min_x, max_x = bbox[0], bbox[2]
43
+ events.append((min_x, "enter"))
44
+ events.append((max_x, "exit"))
45
+
46
+ # Include image boundaries as part of the events
47
+ events.append(
48
+ (0, "exit")
49
+ ) # Start of the image, considered an 'exit' point for logic simplicity
50
+ events.append(
51
+ (image_width, "enter")
52
+ ) # End of the image, considered an 'enter' point
53
+
54
+ # Sort events, with exits before enters at the same position to ensure gap calculation correctness
55
+ events.sort(key=lambda x: (x[0], x[1] == "enter"))
56
+
57
+ # Sweep line algorithm to find the largest gap
58
+ current_boxes = 1
59
+ last_x = 0
60
+ largest_gap = 0
61
+ gap_start_x = None
62
+ largest_gap_mid = None # Midpoint of the largest gap
63
+
64
+ for x, event_type in events:
65
+ if current_boxes == 0 and gap_start_x is not None:
66
+ # Calculate gap
67
+ gap = x - gap_start_x
68
+ if gap > largest_gap:
69
+ largest_gap = gap
70
+ gap_end_x = gap_start_x + x
71
+ largest_gap_mid = (gap_start_x + x) // 2
72
+ if goal_x < gap_end_x and goal_x > gap_start_x:
73
+ return goal_x
74
+ return largest_gap_mid
75
+ # elif goal_x > gap_end_x:
76
+ # return max(gap_end_x - 50, largest_gap_mid)
77
+ # elif goal_x < gap_start_x:
78
+ # return min(gap_start_x + 50, largest_gap_mid)
79
+
80
+ if event_type == "enter":
81
+ current_boxes += 1
82
+ if current_boxes == 1:
83
+ gap_start_x = None # No longer in a gap
84
+ elif event_type == "exit":
85
+ current_boxes -= 1
86
+ if current_boxes == 0:
87
+ gap_start_x = x # Start of a potential gap
88
+
89
+ return largest_gap_mid
90
+
91
 
92
  class Operator:
93
  """
 
99
  self.buffer = ""
100
  self.submitted = []
101
  self.lines = []
102
+ self.gap_x = CAMERA_WIDTH // 2
103
+ self.position = [0, 0, 0]
104
 
105
  def on_event(
106
  self,
 
110
  if dora_event["type"] == "INPUT":
111
  id = dora_event["id"]
112
  value = dora_event["value"]
113
+
114
+ if id == "position":
115
+
116
+ value = dora_event["value"].to_numpy()
117
+ [x, y, z] = value
118
+ self.position = [x, y, z]
119
+
120
  if id == "image":
121
 
122
  image = (
 
155
  return DoraStatus.STOP
156
  elif id == "keyboard_buffer":
157
  self.buffer = value[0].as_py()
158
+ elif id == "bbox":
159
+ self.bboxs = value.to_numpy().reshape((-1, 6))
160
+
161
+ self.gap_x = find_largest_gap_midpoint(
162
+ self.bboxs, image_width=CAMERA_WIDTH, goal_x=10
163
+ )
164
  elif "message" in id:
165
  self.submitted += [
166
  {
 
173
  ]
174
 
175
  return DoraStatus.CONTINUE
176
+
177
+
178
+ ## Angle = Arctan Proj Object y / x
179
+
180
+ ## Relation linearire 0 - 60 ; 0 - CAMERA_WIDTH
operators/policy.py CHANGED
@@ -1,42 +1,27 @@
1
- from dora import DoraStatus
2
  import numpy as np
3
  import pyarrow as pa
4
- from idefics2_utils import ask_vlm
5
- import pyttsx3
6
-
7
 
8
- KITCHEN = np.array([[0.5, 0], [0.5, -0.5], [1.0, -1.0]]).ravel()
9
- HOME = np.array([[0.5, -0.5], [0, 0]]).ravel()
 
10
 
11
 
12
  ## Policy Operator
13
  class Operator:
14
- def __init__(self):
15
- engine = pyttsx3.init("espeak")
16
- voices = engine.getProperty("voices")
17
- engine.setProperty("voice", voices[3].id)
18
- self.engine = engine
19
 
20
- def speak(self, text: str):
21
- self.engine.say(text)
22
-
23
- # Ask vision model for information
24
- def ask_model(self, image: np.ndarray, text: str) -> str:
25
  text = ask_vlm(image, text)
26
  return "Yes, " in text
27
 
28
- def on_event(
29
- self,
30
- dora_event: dict,
31
- send_output,
32
- ) -> DoraStatus:
33
  if dora_event["type"] == "INPUT":
34
  id = dora_event["id"]
35
- # On initialization
36
  if id == "init":
37
- send_output("set_goal", pa.array([]))
38
-
39
- # On destination goal reached
40
  elif id == "goal_reached":
41
  image = dora_event["value"].to_numpy().reshape((540, 960, 3))
42
  pass
 
 
1
  import numpy as np
2
  import pyarrow as pa
3
+ from dora import DoraStatus
4
+ from utils import ask_vlm, speak
 
5
 
6
+ COUCH = np.array([[0.5, 0], [0.5, 0.5]]).ravel()
7
+ KITCHEN = np.array([[0.5, 0.0], [1.0, -1.0]]).ravel()
8
+ HOME = np.array([[0.5, 0.0], [0.0, 0.0]]).ravel()
9
 
10
 
11
  ## Policy Operator
12
  class Operator:
13
+ def speak(text: str):
14
+ speak(text)
 
 
 
15
 
16
+ def ask_model(self, image, text: str) -> bool:
 
 
 
 
17
  text = ask_vlm(image, text)
18
  return "Yes, " in text
19
 
20
+ def on_event(self, dora_event: dict, send_output) -> DoraStatus:
 
 
 
 
21
  if dora_event["type"] == "INPUT":
22
  id = dora_event["id"]
 
23
  if id == "init":
24
+ send_output("go_to", pa.array([]))
 
 
25
  elif id == "goal_reached":
26
  image = dora_event["value"].to_numpy().reshape((540, 960, 3))
27
  pass
operators/utils.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ from transformers import AutoProcessor, AutoModelForVision2Seq, AwqConfig
5
+
6
+ import numpy as np
7
+ import pyttsx3
8
+
9
+
10
+ START_TO_COUCH = np.array([[0.5, 0], [0.5, 0.5]]).ravel()
11
+ COUCH_TO_KITCHEN = np.array([[0.5, -0.5], [1.0, -1.0]]).ravel()
12
+ KITCHEN_TO_START = np.array([[0.5, -0.5], [0, 0]]).ravel()
13
+
14
+ engine = pyttsx3.init("espeak")
15
+ voices = engine.getProperty("voices")
16
+ engine.setProperty("voice", voices[3].id)
17
+
18
+
19
+ def speak(text):
20
+ print(f"said {text}", flush=True)
21
+ engine.say(text)
22
+ engine.runAndWait()
23
+
24
+
25
+ MODE = "quantized"
26
+ DEVICE = "cuda"
27
+ PROCESSOR = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-tfrm-compatible")
28
+ BAD_WORDS_IDS = PROCESSOR.tokenizer(
29
+ ["<image>", "<fake_token_around_image>"], add_special_tokens=False
30
+ ).input_ids
31
+ EOS_WORDS_IDS = PROCESSOR.tokenizer(
32
+ "<end_of_utterance>", add_special_tokens=False
33
+ ).input_ids + [PROCESSOR.tokenizer.eos_token_id]
34
+
35
+ # Load model
36
+ if MODE == "regular":
37
+ model = AutoModelForVision2Seq.from_pretrained(
38
+ "HuggingFaceM4/idefics2-tfrm-compatible",
39
+ torch_dtype=torch.float16,
40
+ trust_remote_code=True,
41
+ _attn_implementation="flash_attention_2",
42
+ revision="3dc93be345d64fb6b1c550a233fe87ddb36f183d",
43
+ ).to(DEVICE)
44
+ elif MODE == "quantized":
45
+ quant_path = "HuggingFaceM4/idefics2-tfrm-compatible-AWQ"
46
+ model = AutoModelForVision2Seq.from_pretrained(
47
+ quant_path, trust_remote_code=True
48
+ ).to(DEVICE)
49
+ elif MODE == "fused_quantized":
50
+ quant_path = "HuggingFaceM4/idefics2-tfrm-compatible-AWQ"
51
+ quantization_config = AwqConfig(
52
+ bits=4,
53
+ fuse_max_seq_len=4096,
54
+ modules_to_fuse={
55
+ "attention": ["q_proj", "k_proj", "v_proj", "o_proj"],
56
+ "mlp": ["gate_proj", "up_proj", "down_proj"],
57
+ "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"],
58
+ "use_alibi": False,
59
+ "num_attention_heads": 32,
60
+ "num_key_value_heads": 8,
61
+ "hidden_size": 4096,
62
+ },
63
+ )
64
+ model = AutoModelForVision2Seq.from_pretrained(
65
+ quant_path, quantization_config=quantization_config, trust_remote_code=True
66
+ ).to(DEVICE)
67
+ else:
68
+ raise ValueError("Unknown mode")
69
+
70
+
71
+ def ask_vlm(image, instruction):
72
+ prompts = [
73
+ "User:",
74
+ image,
75
+ f"{instruction}.<end_of_utterance>\n",
76
+ "Assistant:",
77
+ ]
78
+ inputs = PROCESSOR(prompts)
79
+ inputs = {k: torch.tensor(v).to(DEVICE) for k, v in inputs.items()}
80
+
81
+ generated_ids = model.generate(
82
+ **inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=10
83
+ )
84
+ generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
85
+ return generated_texts[0].split("\nAssistant: ")[1]