haixuantao commited on
Commit
357c750
1 Parent(s): 034b730

Adding latest `WIP`

Browse files
.gitignore CHANGED
@@ -3,4 +3,9 @@ graphs/yolov5n.pt
3
  operators/__pycache__/
4
  __pycache__/
5
  *.avi
6
- *.txt
 
 
 
 
 
 
3
  operators/__pycache__/
4
  __pycache__/
5
  *.avi
6
+ *.txt
7
+
8
+
9
+ ## TODO:
10
+ - [ ] Make human direct using voice
11
+ # - [ ] Make robot talk
README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Getting Started
2
+
3
+ Create a new conda environment for robomaster
4
+
5
+ ```bash
6
+ conda create -n robomaster python=3.8
7
+ pip install robomaster dora-rs
8
+ ```
9
+
10
+ Create a new conda environment for idefics2. This requirements file suppose that your using cu122.
11
+
12
+ ```bash
13
+ conda create -n idefics2 python=3.10
14
+ pip install -r requirements.txt
15
+ ```
16
+
17
+ ```bash
18
+ export HF_TOKEN=<TOKEN>
19
+ dora up
20
+ dora start graphs/dataflow_robot_vlm.yml --attach --hot-reload
21
+ ```
graphs/dataflow_robot_vlm.yml CHANGED
@@ -11,25 +11,26 @@ nodes:
11
 
12
  - id: vlm
13
  operator:
14
- python: ../operators/chatgpt_op.py
15
  inputs:
16
  image:
17
  source: webcam/image
18
  queue_size: 1
19
  instruction: keyboard/submitted
 
20
  outputs:
21
  - assistant_message
22
 
23
  - id: robot
24
  operator:
25
- python: ../operators/robot.py
 
 
26
  inputs:
27
- tick:
28
- source: dora/timer/millis/2000
29
- queue_size: 1
30
- control:
31
- source: vlm/assistant_message
32
- queue_size: 1
33
 
34
  - id: webcam
35
  custom:
@@ -43,3 +44,19 @@ nodes:
43
  outputs:
44
  - buffer
45
  - submitted
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  - id: vlm
13
  operator:
14
+ python: ../operators/idefics2_op.py
15
  inputs:
16
  image:
17
  source: webcam/image
18
  queue_size: 1
19
  instruction: keyboard/submitted
20
+ control_reply: robot/control_reply
21
  outputs:
22
  - assistant_message
23
 
24
  - id: robot
25
  operator:
26
+ python:
27
+ source: ../operators/robot.py
28
+ conda_env: robomaster
29
  inputs:
30
+ tick: dora/timer/millis/750
31
+ control: keyboard/submitted
32
+ outputs:
33
+ - control_reply
 
 
34
 
35
  - id: webcam
36
  custom:
 
44
  outputs:
45
  - buffer
46
  - submitted
47
+
48
+ - id: whisper
49
+ operator:
50
+ python: ../operators/whisper_op.py
51
+ inputs:
52
+ audio: microphone/audio
53
+ outputs:
54
+ - text
55
+
56
+ - id: microphone
57
+ operator:
58
+ python: ../operators/microphone_op.py
59
+ inputs:
60
+ record: keyboard/submitted
61
+ outputs:
62
+ - audio
operators/chatgpt_op.py DELETED
@@ -1,159 +0,0 @@
1
- from dora import DoraStatus
2
- import os
3
- import pyarrow as pa
4
-
5
-
6
- import requests
7
-
8
- import os
9
-
10
- import base64
11
- import requests
12
- from io import BytesIO
13
- import numpy as np
14
- import cv2
15
-
16
-
17
- def encode_numpy_image(np_image):
18
- # Convert the NumPy array to a PIL Image
19
- cv2.resize(np_image, (512, 512))
20
- _, buffer = cv2.imencode(
21
- ".png", np_image
22
- ) # You can change '.png' to another format if needed
23
-
24
- # Convert the buffer to a byte stream
25
- byte_stream = BytesIO(buffer)
26
-
27
- # Encode the byte stream to base64
28
- base64_encoded_image = base64.b64encode(byte_stream.getvalue()).decode("utf-8")
29
- return base64_encoded_image
30
-
31
-
32
- CAMERA_WIDTH = 640
33
- CAMERA_HEIGHT = 480
34
-
35
- API_KEY = os.getenv("OPENAI_API_KEY")
36
-
37
-
38
- MESSAGE_SENDER_TEMPLATE = """
39
- You control a robot. Don't get too close to objects.
40
-
41
- {user_message}
42
-
43
- Respond with only one of the following actions:
44
- - FORWARD
45
- - BACKWARD
46
- - TURN_RIGHT
47
- - TURN_LEFT
48
- - NOD_YES
49
- - NOD_NO
50
- - STOP
51
-
52
- You're last 5 actions where:
53
- {actions}
54
- """
55
-
56
-
57
- import time
58
-
59
-
60
- def understand_image(image, user_message, actions):
61
- # Getting the base64 string
62
- base64_image = encode_numpy_image(image)
63
- headers = {"Content-Type": "application/json", "Authorization": f"Bearer {API_KEY}"}
64
-
65
- now = time.time()
66
- payload = {
67
- "model": "gpt-4-vision-preview",
68
- "messages": [
69
- {
70
- "role": "user",
71
- "content": [
72
- {
73
- "type": "text",
74
- "text": MESSAGE_SENDER_TEMPLATE.format(
75
- user_message="\n".join(user_message),
76
- actions="\n".join(actions[:-5]),
77
- ),
78
- },
79
- {
80
- "type": "image_url",
81
- "image_url": {
82
- "url": f"data:image/jpeg;base64,{base64_image}",
83
- "detail": "low",
84
- },
85
- },
86
- ],
87
- }
88
- ],
89
- "max_tokens": 50,
90
- }
91
-
92
- response = requests.post(
93
- "https://api.openai.com/v1/chat/completions", headers=headers, json=payload
94
- )
95
-
96
- print("resp:", time.time() - now)
97
- return response.json()["choices"][0]["message"]["content"]
98
-
99
-
100
- class Operator:
101
- def __init__(self):
102
- self.actions = []
103
- self.instruction = []
104
-
105
- def on_event(
106
- self,
107
- dora_event,
108
- send_output,
109
- ) -> DoraStatus:
110
- if dora_event["type"] == "INPUT":
111
- if dora_event["id"] == "image":
112
- image = (
113
- dora_event["value"]
114
- .to_numpy()
115
- .reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
116
- .copy()
117
- )
118
- output = understand_image(image, self.instruction, self.actions)
119
- self.actions.append(output)
120
- print("response: ", output, flush=True)
121
-
122
- send_output(
123
- "assistant_message",
124
- pa.array([f"{output}"]),
125
- dora_event["metadata"],
126
- )
127
- elif dora_event["id"] == "instruction":
128
- self.instruction.append(dora_event["value"][0].as_py())
129
- print("instructions: ", self.instruction, flush=True)
130
- return DoraStatus.CONTINUE
131
-
132
-
133
- if __name__ == "__main__":
134
- op = Operator()
135
-
136
- # Path to the current file
137
- current_file_path = __file__
138
-
139
- # Directory of the current file
140
- current_directory = os.path.dirname(current_file_path)
141
-
142
- path = current_directory + "/test_image.jpg"
143
-
144
- op.on_event(
145
- {
146
- "type": "INPUT",
147
- "id": "code_modifier",
148
- "value": pa.array(
149
- [
150
- {
151
- "path": path,
152
- "user_message": "change planning to make gimbal follow bounding box ",
153
- },
154
- ]
155
- ),
156
- "metadata": [],
157
- },
158
- print,
159
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
operators/idefics2_op.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dora import DoraStatus
2
+ import os
3
+ import pyarrow as pa
4
+
5
+
6
+ import cv2
7
+
8
+ from idefics2_utils import ask_vlm
9
+
10
+
11
+ from RealtimeTTS import TextToAudioStream, SystemEngine
12
+
13
+ engine = SystemEngine()
14
+ stream = TextToAudioStream(engine)
15
+
16
+ CAMERA_WIDTH = 960
17
+ CAMERA_HEIGHT = 540
18
+
19
+
20
+ FONT = cv2.FONT_HERSHEY_SIMPLEX
21
+
22
+ import pyttsx3
23
+
24
+ engine = pyttsx3.init("espeak")
25
+ voices = engine.getProperty("voices")
26
+ engine.setProperty("voice", voices[11].id) # English
27
+
28
+
29
+ def speak(text):
30
+ engine.say(text)
31
+ engine.runAndWait()
32
+
33
+
34
+ class Operator:
35
+ def __init__(self):
36
+ self.completed = True
37
+ self.instruction = "What is in the image?"
38
+ self.last_message = ""
39
+
40
+ def on_event(
41
+ self,
42
+ dora_event,
43
+ send_output,
44
+ ) -> DoraStatus:
45
+ if dora_event["type"] == "INPUT":
46
+ if dora_event["id"] == "image":
47
+ if True:
48
+ image = (
49
+ dora_event["value"]
50
+ .to_numpy()
51
+ .reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
52
+ .copy()
53
+ )
54
+ cv2.imshow("frame2", image)
55
+ if cv2.waitKey(1) & 0xFF == ord("q"):
56
+ return DoraStatus.CONTINUE
57
+ output = ask_vlm(image, self.instruction)
58
+ cv2.putText(
59
+ image,
60
+ output,
61
+ (20, 14 + 15 * 25),
62
+ FONT,
63
+ 0.5,
64
+ (190, 250, 0),
65
+ 2,
66
+ )
67
+
68
+ if self.last_message != output:
69
+ speak(output)
70
+ print("response: ", output, flush=True)
71
+ send_output(
72
+ "assistant_message",
73
+ pa.array([output]),
74
+ dora_event["metadata"],
75
+ )
76
+
77
+ # stream.feed(output)
78
+
79
+ # stream.play()
80
+ self.last_message = output
81
+ self.completed = False
82
+ else:
83
+ print("Command not complete", flush=True)
84
+ elif dora_event["id"] == "instruction":
85
+ self.instruction = dora_event["value"][0].as_py()
86
+ print("instructions: ", self.instruction, flush=True)
87
+ elif dora_event["id"] == "control_reply":
88
+ control_reply = dora_event["value"][0].as_py()
89
+
90
+ if self.last_message == control_reply:
91
+ self.completed = True
92
+ else:
93
+ print(
94
+ f"expected: {self.last_message}, but got: {control_reply}",
95
+ flush=True,
96
+ )
97
+ return DoraStatus.CONTINUE
operators/idefics2_utils.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import torch
3
+ from PIL import Image
4
+ from io import BytesIO
5
+
6
+ from transformers import AutoProcessor, AutoModelForVision2Seq, AwqConfig
7
+
8
+
9
+ MODE = "quantized"
10
+ DEVICE = "cuda"
11
+ PROCESSOR = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-tfrm-compatible")
12
+ BAD_WORDS_IDS = PROCESSOR.tokenizer(
13
+ ["<image>", "<fake_token_around_image>"], add_special_tokens=False
14
+ ).input_ids
15
+ EOS_WORDS_IDS = PROCESSOR.tokenizer(
16
+ "<end_of_utterance>", add_special_tokens=False
17
+ ).input_ids + [PROCESSOR.tokenizer.eos_token_id]
18
+
19
+ # Load model
20
+ if MODE == "regular":
21
+ model = AutoModelForVision2Seq.from_pretrained(
22
+ "HuggingFaceM4/idefics2-tfrm-compatible",
23
+ torch_dtype=torch.float16,
24
+ trust_remote_code=True,
25
+ _attn_implementation="flash_attention_2",
26
+ revision="3dc93be345d64fb6b1c550a233fe87ddb36f183d",
27
+ ).to(DEVICE)
28
+ elif MODE == "quantized":
29
+ quant_path = "HuggingFaceM4/idefics2-tfrm-compatible-AWQ"
30
+ model = AutoModelForVision2Seq.from_pretrained(
31
+ quant_path, trust_remote_code=True
32
+ ).to(DEVICE)
33
+ elif MODE == "fused_quantized":
34
+ quant_path = "HuggingFaceM4/idefics2-tfrm-compatible-AWQ"
35
+ quantization_config = AwqConfig(
36
+ bits=4,
37
+ fuse_max_seq_len=4096,
38
+ modules_to_fuse={
39
+ "attention": ["q_proj", "k_proj", "v_proj", "o_proj"],
40
+ "mlp": ["gate_proj", "up_proj", "down_proj"],
41
+ "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"],
42
+ "use_alibi": False,
43
+ "num_attention_heads": 32,
44
+ "num_key_value_heads": 8,
45
+ "hidden_size": 4096,
46
+ },
47
+ )
48
+ model = AutoModelForVision2Seq.from_pretrained(
49
+ quant_path, quantization_config=quantization_config, trust_remote_code=True
50
+ ).to(DEVICE)
51
+ else:
52
+ raise ValueError("Unknown mode")
53
+
54
+
55
+ def ask_vlm(image, instruction):
56
+ prompts = [
57
+ "User:",
58
+ image,
59
+ f"{instruction}.<end_of_utterance>\n",
60
+ "Assistant:",
61
+ ]
62
+ inputs = PROCESSOR(prompts)
63
+ inputs = {k: torch.tensor(v).to(DEVICE) for k, v in inputs.items()}
64
+
65
+ generated_ids = model.generate(
66
+ **inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=10
67
+ )
68
+ generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
69
+ return generated_texts[0].split("\nAssistant: ")[1]
operators/opencv_stream.py CHANGED
@@ -7,8 +7,9 @@ node = Node()
7
  TCP_STREAM_URL = "tcp://192.168.2.1:40921"
8
  # Global variables, change it to adapt your needs
9
 
10
- CAMERA_WIDTH = 640
11
- CAMERA_HEIGHT = 480
 
12
 
13
  # Create a VideoCapture object using the TCP stream URL
14
  cap = cv2.VideoCapture(TCP_STREAM_URL)
 
7
  TCP_STREAM_URL = "tcp://192.168.2.1:40921"
8
  # Global variables, change it to adapt your needs
9
 
10
+
11
+ CAMERA_WIDTH = 960
12
+ CAMERA_HEIGHT = 540
13
 
14
  # Create a VideoCapture object using the TCP stream URL
15
  cap = cv2.VideoCapture(TCP_STREAM_URL)
operators/plot.py CHANGED
@@ -4,15 +4,15 @@ import cv2
4
  from dora import DoraStatus
5
 
6
 
7
- CAMERA_WIDTH = 640
8
- CAMERA_HEIGHT = 480
9
 
10
  FONT = cv2.FONT_HERSHEY_SIMPLEX
11
 
12
  writer = cv2.VideoWriter(
13
  "output01.avi",
14
  cv2.VideoWriter_fourcc(*"MJPG"),
15
- 30,
16
  (CAMERA_WIDTH, CAMERA_HEIGHT),
17
  )
18
 
@@ -41,9 +41,10 @@ class Operator:
41
  image = (
42
  value.to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)).copy()
43
  )
 
44
 
45
  cv2.putText(
46
- image, self.buffer, (20, 14 + 15 * 25), FONT, 0.8, (190, 250, 0), 2
47
  )
48
 
49
  i = 0
@@ -61,12 +62,13 @@ class Operator:
61
  14 + (13 - i) * 25,
62
  ),
63
  FONT,
64
- 0.8,
65
  color,
66
  2,
67
  )
68
  i += 1
69
  writer.write(image)
 
70
  cv2.imshow("frame", image)
71
  if cv2.waitKey(1) & 0xFF == ord("q"):
72
  return DoraStatus.STOP
@@ -76,7 +78,10 @@ class Operator:
76
  self.submitted += [
77
  {
78
  "role": id,
79
- "content": value[0].as_py(),
 
 
 
80
  }
81
  ]
82
 
 
4
  from dora import DoraStatus
5
 
6
 
7
+ CAMERA_WIDTH = 960
8
+ CAMERA_HEIGHT = 540
9
 
10
  FONT = cv2.FONT_HERSHEY_SIMPLEX
11
 
12
  writer = cv2.VideoWriter(
13
  "output01.avi",
14
  cv2.VideoWriter_fourcc(*"MJPG"),
15
+ 60,
16
  (CAMERA_WIDTH, CAMERA_HEIGHT),
17
  )
18
 
 
41
  image = (
42
  value.to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)).copy()
43
  )
44
+ cv2.resize(image, (CAMERA_HEIGHT * 2, CAMERA_WIDTH * 2))
45
 
46
  cv2.putText(
47
+ image, self.buffer, (20, 14 + 15 * 25), FONT, 0.5, (190, 250, 0), 2
48
  )
49
 
50
  i = 0
 
62
  14 + (13 - i) * 25,
63
  ),
64
  FONT,
65
+ 0.5,
66
  color,
67
  2,
68
  )
69
  i += 1
70
  writer.write(image)
71
+ cv2.resize(image, (CAMERA_HEIGHT * 3, CAMERA_WIDTH * 3))
72
  cv2.imshow("frame", image)
73
  if cv2.waitKey(1) & 0xFF == ord("q"):
74
  return DoraStatus.STOP
 
78
  self.submitted += [
79
  {
80
  "role": id,
81
+ "content": value[0]
82
+ .as_py()
83
+ .replace("\n", " ")
84
+ .replace("- ", ""),
85
  }
86
  ]
87
 
operators/robot.py CHANGED
@@ -12,12 +12,12 @@ CONN = "ap"
12
  class Command(Enum):
13
  NOD_YES = [
14
  {"action": "gimbal", "value": [20.0, 0.0]},
15
- {"action": "gimbal", "value": [0.0, 0.0]},
16
  ]
17
  NOD_NO = [
18
- {"action": "gimbal", "value": [0.0, -20.0]},
19
- {"action": "gimbal", "value": [0.0, 20.0]},
20
- {"action": "gimbal", "value": [0.0, 0.0]},
21
  ]
22
  FORWARD = [
23
  {
@@ -28,29 +28,38 @@ class Command(Enum):
28
  BACKWARD = [
29
  {
30
  "action": "control",
31
- "value": [-0.5, 0.0, 0.0, 0.6, 0],
32
- }
33
  ]
34
- TURN_LEFT = [
35
- {"action": "gimbal", "value": [0.0, -45.0]},
36
  {
37
  "action": "control",
38
- "value": [0.0, 0.0, 45.0, 0.0, 50],
39
  },
40
  ]
41
- TURN_RIGHT = [
42
- {"action": "gimbal", "value": [0.0, 45.0]},
43
  {
44
- "value": [0.0, 0.0, -45.0, 0.0, 50],
45
  "action": "control",
 
46
  },
47
  ]
48
- UNKNOWN = [
 
49
  {
50
- "value": [0.0, 0.0, 0.0, 0.0, 0],
51
  "action": "control",
52
- }
53
  ]
 
 
 
 
 
 
 
 
54
  # STOP = [0, 0, 0, 0]
55
  # COMPLETED = [0, 0, 0, 0]
56
 
@@ -72,40 +81,63 @@ class Operator:
72
  ), "Could not start video stream"
73
 
74
  self.ep_robot.gimbal.recenter().wait_for_completed()
 
 
 
75
  self.backlog = []
76
- self.event = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
  def on_event(
79
  self,
80
  dora_event: str,
81
- send_output: Callable[[str, Union[bytes, pa.UInt8Array], Optional[dict]], None],
82
  ) -> DoraStatus:
83
  event_type = dora_event["type"]
84
  if event_type == "INPUT":
85
- if not (
86
- self.event is not None
87
- and not (self.event._event.isSet() and self.event.is_completed)
88
- ):
89
- if dora_event["id"] == "tick":
90
  if len(self.backlog) > 0:
91
- command = self.backlog.pop(0)
92
- print(command, flush=True)
93
- if command["action"] == "control":
94
- [x, y, z, xy_speed, z_speed] = command["value"]
95
- print(command, flush=True)
96
- self.event = self.ep_robot.chassis.move(
97
- x=x, y=y, z=z, xy_speed=xy_speed, z_speed=z_speed
98
- )
99
- elif command["action"] == "gimbal":
100
- [pitch, yaw] = command["value"]
101
- print(command, flush=True)
102
- self.event = self.ep_robot.gimbal.moveto(
103
- pitch=pitch, yaw=yaw, pitch_speed=0.0, yaw_speed=50.0
104
- )
105
- elif dora_event["id"] == "control":
106
- raw_command = dora_event["value"][0].as_py()
107
- print(raw_command, flush=True)
108
- cmd = Command.parse(raw_command)
 
 
 
109
  self.backlog += cmd.value
 
110
 
111
  return DoraStatus.CONTINUE
 
12
  class Command(Enum):
13
  NOD_YES = [
14
  {"action": "gimbal", "value": [20.0, 0.0]},
15
+ {"action": "gimbal", "value": [-5.0, 0.0]},
16
  ]
17
  NOD_NO = [
18
+ {"action": "gimbal", "value": [-5, -55.0]},
19
+ {"action": "gimbal", "value": [-5, 55.0]},
20
+ {"action": "gimbal", "value": [-5.0, 0.0]},
21
  ]
22
  FORWARD = [
23
  {
 
28
  BACKWARD = [
29
  {
30
  "action": "control",
31
+ "value": [-0.5, 0, 180.0, 0.6, 30],
32
+ },
33
  ]
34
+ LEFT = [
35
+ {"action": "gimbal", "value": [-5, -30.0]},
36
  {
37
  "action": "control",
38
+ "value": [0.2, -0.2, 30.0, 0.6, 30],
39
  },
40
  ]
41
+ SLIGHT_LEFT = [
42
+ {"action": "gimbal", "value": [-0.0, -15.0]},
43
  {
 
44
  "action": "control",
45
+ "value": [0.3, -0.1, 15.0, 0.6, 50],
46
  },
47
  ]
48
+ RIGHT = [
49
+ {"action": "gimbal", "value": [-5, 30.0]},
50
  {
51
+ "value": [0.2, 0.2, -30.0, 0.6, 30],
52
  "action": "control",
53
+ },
54
  ]
55
+ SLIGHT_RIGHT = [
56
+ {"action": "gimbal", "value": [-20.0, 15.0]},
57
+ {
58
+ "value": [0.3, 0.1, -15.0, 0.6, 50],
59
+ "action": "control",
60
+ },
61
+ ]
62
+ UNKNOWN = []
63
  # STOP = [0, 0, 0, 0]
64
  # COMPLETED = [0, 0, 0, 0]
65
 
 
81
  ), "Could not start video stream"
82
 
83
  self.ep_robot.gimbal.recenter().wait_for_completed()
84
+ self.event = self.ep_robot.gimbal.moveto(
85
+ pitch=-5, yaw=0, pitch_speed=50.0, yaw_speed=50.0
86
+ )
87
  self.backlog = []
88
+ self.last_control = ""
89
+
90
+ def execute_backlog(self):
91
+ if len(self.backlog) > 0:
92
+ command = self.backlog.pop(0)
93
+ if command["action"] == "control":
94
+ [x, y, z, xy_speed, z_speed] = command["value"]
95
+ print(command, flush=True)
96
+ self.event = self.ep_robot.chassis.move(
97
+ x=x, y=y, z=z, xy_speed=xy_speed, z_speed=z_speed
98
+ )
99
+ elif command["action"] == "gimbal":
100
+ [pitch, yaw] = command["value"]
101
+ print(command, flush=True)
102
+ self.event = self.ep_robot.gimbal.moveto(
103
+ pitch=pitch, yaw=yaw, pitch_speed=50.0, yaw_speed=50.0
104
+ )
105
 
106
  def on_event(
107
  self,
108
  dora_event: str,
109
+ send_output: Callable[[str, pa.Array, Optional[dict]], None],
110
  ) -> DoraStatus:
111
  event_type = dora_event["type"]
112
  if event_type == "INPUT":
113
+ if dora_event["id"] == "tick":
114
+ if not (
115
+ self.event is not None
116
+ and not (self.event._event.isSet() and self.event.is_completed)
117
+ ):
118
  if len(self.backlog) > 0:
119
+ self.execute_backlog()
120
+ else:
121
+ print(f"sending control reply: {self.last_control}", flush=True)
122
+ send_output("control_reply", pa.array([self.last_control]))
123
+ elif dora_event["id"] == "control":
124
+ raw_command = dora_event["value"][0].as_py()
125
+ print(raw_command, flush=True)
126
+ self.last_control = raw_command
127
+ if "but" in raw_command:
128
+ cmd = Command.NOD_NO
129
+ elif "right" in raw_command:
130
+ cmd = Command.RIGHT
131
+ elif "left" in raw_command:
132
+ cmd = Command.LEFT
133
+ elif "forward" in raw_command:
134
+ cmd = Command.FORWARD
135
+ elif "behind" in raw_command:
136
+ cmd = Command.BACKWARD
137
+ else:
138
+ cmd = Command.UNKNOWN
139
+ if len(self.backlog) == 0:
140
  self.backlog += cmd.value
141
+ self.execute_backlog()
142
 
143
  return DoraStatus.CONTINUE
tests/test_idefics2.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import torch
3
+ from PIL import Image
4
+ from io import BytesIO
5
+
6
+ from transformers import AutoProcessor, AutoModelForVision2Seq, AwqConfig
7
+
8
+
9
+ MODE = "quantized"
10
+ DEVICE = "cuda"
11
+ PROCESSOR = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-tfrm-compatible")
12
+ BAD_WORDS_IDS = PROCESSOR.tokenizer(
13
+ ["<image>", "<fake_token_around_image>"], add_special_tokens=False
14
+ ).input_ids
15
+ EOS_WORDS_IDS = PROCESSOR.tokenizer(
16
+ "<end_of_utterance>", add_special_tokens=False
17
+ ).input_ids + [PROCESSOR.tokenizer.eos_token_id]
18
+
19
+ # Load model
20
+ if MODE == "regular":
21
+ model = AutoModelForVision2Seq.from_pretrained(
22
+ "HuggingFaceM4/idefics2-tfrm-compatible",
23
+ torch_dtype=torch.float16,
24
+ trust_remote_code=True,
25
+ _attn_implementation="flash_attention_2",
26
+ revision="3dc93be345d64fb6b1c550a233fe87ddb36f183d",
27
+ ).to(DEVICE)
28
+ elif MODE == "quantized":
29
+ quant_path = "HuggingFaceM4/idefics2-tfrm-compatible-AWQ"
30
+ model = AutoModelForVision2Seq.from_pretrained(
31
+ quant_path, trust_remote_code=True
32
+ ).to(DEVICE)
33
+ elif MODE == "fused_quantized":
34
+ quant_path = "HuggingFaceM4/idefics2-tfrm-compatible-AWQ"
35
+ quantization_config = AwqConfig(
36
+ bits=4,
37
+ fuse_max_seq_len=4096,
38
+ modules_to_fuse={
39
+ "attention": ["q_proj", "k_proj", "v_proj", "o_proj"],
40
+ "mlp": ["gate_proj", "up_proj", "down_proj"],
41
+ "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"],
42
+ "use_alibi": False,
43
+ "num_attention_heads": 32,
44
+ "num_key_value_heads": 8,
45
+ "hidden_size": 4096,
46
+ },
47
+ )
48
+ model = AutoModelForVision2Seq.from_pretrained(
49
+ quant_path,
50
+ quantization_config=quantization_config,
51
+ trust_remote_code=True,
52
+ ).to(DEVICE)
53
+ else:
54
+ raise ValueError("Unknown mode")
55
+
56
+
57
+ def download_image(url):
58
+ try:
59
+ # Send a GET request to the URL to download the image
60
+ response = requests.get(url)
61
+ # Check if the request was successful (status code 200)
62
+ if response.status_code == 200:
63
+ # Open the image using PIL
64
+ image = Image.open(BytesIO(response.content))
65
+ # Return the PIL image object
66
+ return image
67
+ else:
68
+ print(f"Failed to download image. Status code: {response.status_code}")
69
+ return None
70
+ except Exception as e:
71
+ print(f"An error occurred: {e}")
72
+ return None
73
+
74
+
75
+ # Create inputs
76
+ image1 = download_image(
77
+ "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
78
+ )
79
+
80
+
81
+ def ask_vlm(image, instruction):
82
+ prompts = [
83
+ "User:",
84
+ image,
85
+ f"{instruction}.<end_of_utterance>\n",
86
+ "Assistant:",
87
+ ]
88
+ inputs = PROCESSOR(prompts)
89
+ inputs = {k: torch.tensor(v).to(DEVICE) for k, v in inputs.items()}
90
+ generated_ids = model.generate(
91
+ **inputs,
92
+ bad_words_ids=BAD_WORDS_IDS,
93
+ max_new_tokens=100,
94
+ )
95
+ generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
96
+ return generated_texts
97
+
98
+
99
+ import time
100
+
101
+ model.eval()
102
+ now = time.time()
103
+ print(ask_vlm(image1, "What is this?")[0].split("\nAssistant: ")[1])
104
+
105
+ print("resp:", time.time() - now)
106
+ import time
107
+
108
+ now = time.time()
109
+
110
+ print(ask_vlm(image1, "What is this?")[0].split("\nAssistant: ")[1])
tests/test_idefix2.py DELETED
@@ -1,154 +0,0 @@
1
- import os
2
- import torch
3
- import requests
4
-
5
- from io import BytesIO
6
- from PIL import Image
7
- from transformers import AutoModelForCausalLM, AutoProcessor
8
-
9
- from transformers.image_utils import (
10
- to_numpy_array,
11
- PILImageResampling,
12
- ChannelDimension,
13
- )
14
- from transformers.image_transforms import resize, to_channel_dimension_format
15
-
16
-
17
- API_TOKEN = os.getenv("HF_TOKEN")
18
-
19
- DEVICE = torch.device("cuda")
20
- PROCESSOR = AutoProcessor.from_pretrained(
21
- "HuggingFaceM4/tr_272_bis_opt_step_15000_merge",
22
- token=API_TOKEN,
23
- )
24
- MODEL = AutoModelForCausalLM.from_pretrained(
25
- "HuggingFaceM4/tr_272_bis_opt_step_15000_merge",
26
- token=API_TOKEN,
27
- trust_remote_code=True,
28
- torch_dtype=torch.bfloat16,
29
- ).to(DEVICE)
30
- image_seq_len = MODEL.config.perceiver_config.resampler_n_latents
31
- BOS_TOKEN = PROCESSOR.tokenizer.bos_token
32
- BAD_WORDS_IDS = PROCESSOR.tokenizer(
33
- ["<image>", "<fake_token_around_image>"], add_special_tokens=False
34
- ).input_ids
35
-
36
-
37
- def convert_to_rgb(image):
38
- # `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background
39
- # for transparent images. The call to `alpha_composite` handles this case
40
- if image.mode == "RGB":
41
- return image
42
-
43
- image_rgba = image.convert("RGBA")
44
- background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
45
- alpha_composite = Image.alpha_composite(background, image_rgba)
46
- alpha_composite = alpha_composite.convert("RGB")
47
- return alpha_composite
48
-
49
-
50
- # The processor is the same as the Idefics processor except for the BILINEAR interpolation,
51
- # so this is a hack in order to redefine ONLY the transform method
52
- def custom_transform(x):
53
- x = convert_to_rgb(x)
54
- x = to_numpy_array(x)
55
-
56
- height, width = x.shape[:2]
57
- aspect_ratio = width / height
58
- if width >= height and width > 980:
59
- width = 980
60
- height = int(width / aspect_ratio)
61
- elif height > width and height > 980:
62
- height = 980
63
- width = int(height * aspect_ratio)
64
- width = max(width, 378)
65
- height = max(height, 378)
66
-
67
- x = resize(x, (height, width), resample=PILImageResampling.BILINEAR)
68
- x = PROCESSOR.image_processor.rescale(x, scale=1 / 255)
69
- x = PROCESSOR.image_processor.normalize(
70
- x,
71
- mean=PROCESSOR.image_processor.image_mean,
72
- std=PROCESSOR.image_processor.image_std,
73
- )
74
- x = to_channel_dimension_format(x, ChannelDimension.FIRST)
75
- x = torch.tensor(x)
76
- return x
77
-
78
-
79
- def download_image(url):
80
- try:
81
- # Send a GET request to the URL to download the image
82
- response = requests.get(url)
83
- # Check if the request was successful (status code 200)
84
- if response.status_code == 200:
85
- # Open the image using PIL
86
- image = Image.open(BytesIO(response.content))
87
- # Return the PIL image object
88
- return image
89
- else:
90
- print(f"Failed to download image. Status code: {response.status_code}")
91
- return None
92
- except Exception as e:
93
- print(f"An error occurred: {e}")
94
- return None
95
-
96
-
97
- # Create text token inputs
98
- image_seq = "<image>" * image_seq_len
99
-
100
- instruction = "What is this?"
101
- # Create pixel inputs
102
- image = download_image(
103
- "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
104
- )
105
-
106
-
107
- def ask_vlm(instruction, image):
108
-
109
- inputs = PROCESSOR.tokenizer(
110
- [
111
- f"{BOS_TOKEN}<fake_token_around_image>{image_seq}<fake_token_around_image>{instruction}",
112
- ],
113
- return_tensors="pt",
114
- add_special_tokens=False,
115
- padding=True,
116
- )
117
-
118
- raw_images = [
119
- [image],
120
- ]
121
- output_images = [
122
- [PROCESSOR.image_processor(img, transform=custom_transform) for img in img_list]
123
- for img_list in raw_images
124
- ]
125
- total_batch_size = len(output_images)
126
- max_num_images = max([len(img_l) for img_l in output_images])
127
- max_height = max([i.size(2) for img_l in output_images for i in img_l])
128
- max_width = max([i.size(3) for img_l in output_images for i in img_l])
129
- padded_image_tensor = torch.zeros(
130
- total_batch_size, max_num_images, 3, max_height, max_width
131
- )
132
- padded_pixel_attention_masks = torch.zeros(
133
- total_batch_size, max_num_images, max_height, max_width, dtype=torch.bool
134
- )
135
- for batch_idx, img_l in enumerate(output_images):
136
- for img_idx, img in enumerate(img_l):
137
- im_height, im_width = img.size()[2:]
138
- padded_image_tensor[batch_idx, img_idx, :, :im_height, :im_width] = img
139
- padded_pixel_attention_masks[batch_idx, img_idx, :im_height, :im_width] = (
140
- True
141
- )
142
-
143
- inputs["pixel_values"] = padded_image_tensor
144
- inputs["pixel_attention_mask"] = padded_pixel_attention_masks
145
- inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
146
-
147
- generated_ids = MODEL.generate(
148
- **inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=10
149
- )
150
- generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
151
- return generated_texts
152
-
153
-
154
- print(ask_vlm(instruction, image))