haixuantao commited on
Commit
1646f18
1 Parent(s): be75a6c

fix minimized demo

Browse files
graphs/dataflow_robot_vlm_minimize.yml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ nodes:
2
+ - id: webcam
3
+ custom:
4
+ source: ../operators/opencv_stream.py
5
+ outputs:
6
+ - image
7
+ - id: idefics2
8
+ operator:
9
+ python: ../operators/idefics2_op.py
10
+ inputs:
11
+ image:
12
+ source: webcam/image
13
+ queue_size: 1
14
+ outputs:
15
+ - speak
16
+ - control
17
+ - id: robot
18
+ custom:
19
+ source: /home/peter/miniconda3/envs/robomaster/bin/python
20
+ args: ../operators/robot_minimize.py
21
+ inputs:
22
+ control: idefics2/control
23
+ - id: parler
24
+ operator:
25
+ python: ../operators/parler_op.py
26
+ inputs:
27
+ text:
28
+ source: idefics2/speak
29
+ queue_size: 1
30
+ - id: plot_bot
31
+ operator:
32
+ python: ../operators/plot.py
33
+ inputs:
34
+ image: webcam/image
operators/idefics2_op.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dora import DoraStatus
2
+ import pyarrow as pa
3
+ from transformers import AutoProcessor, AutoModelForVision2Seq, AwqConfig
4
+ import torch
5
+ import time
6
+
7
+ CAMERA_WIDTH = 960
8
+ CAMERA_HEIGHT = 540
9
+ PROCESSOR = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-tfrm-compatible")
10
+ BAD_WORDS_IDS = PROCESSOR.tokenizer(
11
+ ["<image>", "<fake_token_around_image>"], add_special_tokens=False
12
+ ).input_ids
13
+ EOS_WORDS_IDS = PROCESSOR.tokenizer(
14
+ "<end_of_utterance>", add_special_tokens=False
15
+ ).input_ids + [PROCESSOR.tokenizer.eos_token_id]
16
+ model = AutoModelForVision2Seq.from_pretrained(
17
+ "HuggingFaceM4/idefics2-tfrm-compatible-AWQ",
18
+ quantization_config=AwqConfig(
19
+ bits=4,
20
+ fuse_max_seq_len=4096,
21
+ modules_to_fuse={
22
+ "attention": ["q_proj", "k_proj", "v_proj", "o_proj"],
23
+ "mlp": ["gate_proj", "up_proj", "down_proj"],
24
+ "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"],
25
+ "use_alibi": False,
26
+ "num_attention_heads": 32,
27
+ "num_key_value_heads": 8,
28
+ "hidden_size": 4096,
29
+ },
30
+ ),
31
+ trust_remote_code=True,
32
+ ).to("cuda")
33
+
34
+
35
+ def reset_awq_cache(model):
36
+ """
37
+ Simple method to reset the AWQ fused modules cache
38
+ """
39
+ from awq.modules.fused.attn import QuantAttentionFused
40
+
41
+ for name, module in model.named_modules():
42
+ if isinstance(module, QuantAttentionFused):
43
+ module.start_pos = 0
44
+
45
+
46
+ def ask_vlm(image, instruction):
47
+ global model
48
+ prompts = [
49
+ "User:",
50
+ image,
51
+ f"{instruction}.<end_of_utterance>\n",
52
+ "Assistant:",
53
+ ]
54
+ inputs = {k: torch.tensor(v).to("cuda") for k, v in PROCESSOR(prompts).items()}
55
+
56
+ generated_ids = model.generate(
57
+ **inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=25, repetition_penalty=1.2
58
+ )
59
+ generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
60
+ reset_awq_cache(model)
61
+ return generated_texts[0].split("\nAssistant: ")[1]
62
+
63
+
64
+ class Operator:
65
+ def __init__(self):
66
+ self.state = "person"
67
+ self.last_output = False
68
+
69
+ def on_event(
70
+ self,
71
+ dora_event,
72
+ send_output,
73
+ ) -> DoraStatus:
74
+ if dora_event["type"] == "INPUT":
75
+ image = (
76
+ dora_event["value"].to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
77
+ )
78
+
79
+ if self.state == "person":
80
+ output = ask_vlm(image, "Can you read the note?").lower()
81
+ print(output, flush=True)
82
+ if "coffee" in output or "tea" in output or "water" in output:
83
+ send_output(
84
+ "control",
85
+ pa.array([-3.0, 0.0, 0.0, 0.8, 0.0, 10.0, 180.0]),
86
+ )
87
+ send_output(
88
+ "speak",
89
+ pa.array([output + ". Going to the kitchen."]),
90
+ )
91
+ time.sleep(10)
92
+ self.state = "coffee"
93
+ self.last_output = False
94
+ elif not self.last_output:
95
+ self.last_output = True
96
+ send_output(
97
+ "speak",
98
+ pa.array([output]),
99
+ )
100
+ time.sleep(4)
101
+
102
+ elif self.state == "coffee":
103
+ output = ask_vlm(image, "Is there a person with a hands up?").lower()
104
+ print(output, flush=True)
105
+ if "yes" in output:
106
+ send_output(
107
+ "speak",
108
+ pa.array([output + ". Going to the office."]),
109
+ )
110
+ send_output(
111
+ "control",
112
+ pa.array([2.0, 0.0, 0.0, 0.8, 0.0, 10.0, 0.0]),
113
+ )
114
+ time.sleep(10)
115
+ self.state = "person"
116
+ self.last_output = False
117
+ elif not self.last_output:
118
+ self.last_output = True
119
+ send_output(
120
+ "speak",
121
+ pa.array([output]),
122
+ )
123
+ time.sleep(4)
124
+
125
+ return DoraStatus.CONTINUE
operators/parler_op.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from parler_tts import ParlerTTSForConditionalGeneration
2
+ from transformers import AutoTokenizer
3
+ import soundfile as sf
4
+ import pygame
5
+ from dora import DoraStatus
6
+
7
+ model = ParlerTTSForConditionalGeneration.from_pretrained(
8
+ "parler-tts/parler_tts_mini_v0.1"
9
+ ).to("cuda:0")
10
+ tokenizer = AutoTokenizer.from_pretrained("parler-tts/parler_tts_mini_v0.1")
11
+
12
+ pygame.mixer.init()
13
+
14
+ input_ids = tokenizer(
15
+ "A female speaker with a slightly low-pitched voice delivers her words quite expressively, in a very confined sounding environment with clear audio quality. She speaks very fast.",
16
+ return_tensors="pt",
17
+ ).input_ids.to("cuda:0")
18
+
19
+
20
+ class Operator:
21
+ def on_event(
22
+ self,
23
+ dora_event,
24
+ send_output,
25
+ ):
26
+ if dora_event["type"] == "INPUT":
27
+ generation = model.generate(
28
+ max_new_tokens=300,
29
+ input_ids=input_ids,
30
+ prompt_input_ids=tokenizer(
31
+ dora_event["value"][0].as_py(), return_tensors="pt"
32
+ ).input_ids.to("cuda:0"),
33
+ )
34
+ print(dora_event["value"][0].as_py(), flush=True)
35
+ sf.write(
36
+ f"parler_tts_out.wav",
37
+ generation.cpu().numpy().squeeze(),
38
+ model.config.sampling_rate,
39
+ )
40
+
41
+ while pygame.mixer.get_busy():
42
+ pass
43
+ pygame.mixer.music.load(f"parler_tts_out.wav")
44
+ pygame.mixer.music.play()
45
+
46
+ return DoraStatus.CONTINUE
operators/robot_minimize.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from robomaster import robot
2
+ from time import sleep
3
+
4
+
5
+ def wait(event):
6
+ if event is not None and not (event._event.isSet() and event.is_completed):
7
+ sleep(1)
8
+
9
+
10
+ ep_robot = robot.Robot()
11
+ assert ep_robot.initialize(conn_type="ap"), "Could not initialize ep_robot"
12
+ assert ep_robot.camera.start_video_stream(display=False), "Could not start video stream"
13
+ ep_robot.gimbal.recenter().wait_for_completed()
14
+
15
+ from dora import Node
16
+
17
+ node = Node()
18
+
19
+ for dora_event in node:
20
+ if dora_event["type"] == "INPUT":
21
+ [x, y, z, xy_speed, z_speed, pitch, yaw] = dora_event["value"].to_numpy()
22
+ print(dora_event["value"].to_numpy())
23
+ event = ep_robot.gimbal.moveto(
24
+ pitch=pitch, yaw=yaw, pitch_speed=50.0, yaw_speed=50.0
25
+ )
26
+ wait(event)
27
+ sleep(4)
28
+ event = ep_robot.chassis.move(x=x, y=y, z=z, xy_speed=xy_speed, z_speed=z_speed)
29
+ wait(event)
30
+ sleep(6)