haixuantao
commited on
Commit
•
810a70f
1
Parent(s):
7195c95
Adding more info on the README
Browse files- README.md +23 -0
- graphs/dataflow_vlm_basic.yml +12 -4
- operators/webcam.py +76 -0
- operators/whisper_op copy.py +25 -0
README.md
CHANGED
@@ -15,6 +15,14 @@ pip install -r requirements.txt
|
|
15 |
pip install -e <PATH TO LATEST TRANSFOMERS VERSION>
|
16 |
```
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
```bash
|
19 |
export HF_TOKEN=<TOKEN>
|
20 |
conda activate idefics2
|
@@ -23,3 +31,18 @@ conda activate idefics2
|
|
23 |
dora up
|
24 |
dora start graphs/dataflow_robot_vlm.yml --attach --hot-reload
|
25 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
pip install -e <PATH TO LATEST TRANSFOMERS VERSION>
|
16 |
```
|
17 |
|
18 |
+
## Running the demo
|
19 |
+
|
20 |
+
Make sure to be connected using the wifi hotspot of the robomaster which is the most stable one.
|
21 |
+
|
22 |
+
The default password for the hotpsot is: 12341234
|
23 |
+
|
24 |
+
You might need to have a second wifi card if you want to be able to run the demo with internet on.
|
25 |
+
|
26 |
```bash
|
27 |
export HF_TOKEN=<TOKEN>
|
28 |
conda activate idefics2
|
|
|
31 |
dora up
|
32 |
dora start graphs/dataflow_robot_vlm.yml --attach --hot-reload
|
33 |
```
|
34 |
+
|
35 |
+
Current way to interact is by typing a question to the VLM
|
36 |
+
|
37 |
+
## Running the demo without robot
|
38 |
+
|
39 |
+
```bash
|
40 |
+
export HF_TOKEN=<TOKEN>
|
41 |
+
conda activate idefics2
|
42 |
+
# This requires dora==0.3.3, update with:
|
43 |
+
# cargo install dora-cli
|
44 |
+
dora up
|
45 |
+
dora start graphs/dataflow_vlm_basic.yml --attach --hot-reload
|
46 |
+
```
|
47 |
+
|
48 |
+
Current way to interact is by typing a question to the VLM
|
graphs/dataflow_vlm_basic.yml
CHANGED
@@ -5,17 +5,18 @@ nodes:
|
|
5 |
python: ../operators/plot.py
|
6 |
inputs:
|
7 |
image: webcam/image
|
8 |
-
|
9 |
-
|
10 |
assistant_message: vlm/assistant_message
|
11 |
|
12 |
- id: vlm
|
13 |
operator:
|
14 |
-
python: ../operators/
|
15 |
inputs:
|
16 |
image:
|
17 |
source: webcam/image
|
18 |
queue_size: 1
|
|
|
19 |
outputs:
|
20 |
- assistant_message
|
21 |
|
@@ -23,6 +24,13 @@ nodes:
|
|
23 |
operator:
|
24 |
python: ../operators/webcam.py
|
25 |
inputs:
|
26 |
-
tick: dora/timer/millis/
|
27 |
outputs:
|
28 |
- image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
python: ../operators/plot.py
|
6 |
inputs:
|
7 |
image: webcam/image
|
8 |
+
keyboard_buffer: keyboard/buffer
|
9 |
+
user_message: keyboard/submitted
|
10 |
assistant_message: vlm/assistant_message
|
11 |
|
12 |
- id: vlm
|
13 |
operator:
|
14 |
+
python: ../operators/idefics2_op.py
|
15 |
inputs:
|
16 |
image:
|
17 |
source: webcam/image
|
18 |
queue_size: 1
|
19 |
+
instruction: keyboard/submitted
|
20 |
outputs:
|
21 |
- assistant_message
|
22 |
|
|
|
24 |
operator:
|
25 |
python: ../operators/webcam.py
|
26 |
inputs:
|
27 |
+
tick: dora/timer/millis/50
|
28 |
outputs:
|
29 |
- image
|
30 |
+
|
31 |
+
- id: keyboard
|
32 |
+
custom:
|
33 |
+
source: ../operators/keyboard_op.py
|
34 |
+
outputs:
|
35 |
+
- buffer
|
36 |
+
- submitted
|
operators/webcam.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
|
4 |
+
import cv2
|
5 |
+
import numpy as np
|
6 |
+
import pyarrow as pa
|
7 |
+
|
8 |
+
from dora import DoraStatus
|
9 |
+
|
10 |
+
CAMERA_WIDTH = 960
|
11 |
+
CAMERA_HEIGHT = 540
|
12 |
+
|
13 |
+
CAMERA_INDEX = int(os.getenv("CAMERA_INDEX", 0))
|
14 |
+
CI = os.environ.get("CI")
|
15 |
+
|
16 |
+
font = cv2.FONT_HERSHEY_SIMPLEX
|
17 |
+
|
18 |
+
|
19 |
+
class Operator:
|
20 |
+
"""
|
21 |
+
Sending image from webcam to the dataflow
|
22 |
+
"""
|
23 |
+
|
24 |
+
def __init__(self):
|
25 |
+
self.video_capture = cv2.VideoCapture(CAMERA_INDEX)
|
26 |
+
self.start_time = time.time()
|
27 |
+
self.video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, CAMERA_WIDTH)
|
28 |
+
self.video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, CAMERA_HEIGHT)
|
29 |
+
self.failure_count = 0
|
30 |
+
|
31 |
+
def on_event(
|
32 |
+
self,
|
33 |
+
dora_event: str,
|
34 |
+
send_output,
|
35 |
+
) -> DoraStatus:
|
36 |
+
event_type = dora_event["type"]
|
37 |
+
if event_type == "INPUT":
|
38 |
+
ret, frame = self.video_capture.read()
|
39 |
+
if ret:
|
40 |
+
frame = cv2.resize(frame, (CAMERA_WIDTH, CAMERA_HEIGHT))
|
41 |
+
self.failure_count = 0
|
42 |
+
## Push an error image in case the camera is not available.
|
43 |
+
else:
|
44 |
+
if self.failure_count > 10:
|
45 |
+
frame = np.zeros((CAMERA_HEIGHT, CAMERA_WIDTH, 3), dtype=np.uint8)
|
46 |
+
cv2.putText(
|
47 |
+
frame,
|
48 |
+
"No Webcam was found at index %d" % (CAMERA_INDEX),
|
49 |
+
(int(30), int(30)),
|
50 |
+
font,
|
51 |
+
0.75,
|
52 |
+
(255, 255, 255),
|
53 |
+
2,
|
54 |
+
1,
|
55 |
+
)
|
56 |
+
else:
|
57 |
+
self.failure_count += 1
|
58 |
+
return DoraStatus.CONTINUE
|
59 |
+
|
60 |
+
send_output(
|
61 |
+
"image",
|
62 |
+
pa.array(frame.ravel()),
|
63 |
+
dora_event["metadata"],
|
64 |
+
)
|
65 |
+
elif event_type == "STOP":
|
66 |
+
print("received stop")
|
67 |
+
else:
|
68 |
+
print("received unexpected event:", event_type)
|
69 |
+
|
70 |
+
if time.time() - self.start_time < 20 or CI != "true":
|
71 |
+
return DoraStatus.CONTINUE
|
72 |
+
else:
|
73 |
+
return DoraStatus.STOP
|
74 |
+
|
75 |
+
def __del__(self):
|
76 |
+
self.video_capture.release()
|
operators/whisper_op copy.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pyarrow as pa
|
2 |
+
import whisper
|
3 |
+
|
4 |
+
from dora import DoraStatus
|
5 |
+
|
6 |
+
|
7 |
+
model = whisper.load_model("base")
|
8 |
+
|
9 |
+
|
10 |
+
class Operator:
|
11 |
+
"""
|
12 |
+
Transforming Speech to Text using OpenAI Whisper model
|
13 |
+
"""
|
14 |
+
|
15 |
+
def on_event(
|
16 |
+
self,
|
17 |
+
dora_event,
|
18 |
+
send_output,
|
19 |
+
) -> DoraStatus:
|
20 |
+
if dora_event["type"] == "INPUT":
|
21 |
+
audio = dora_event["value"].to_numpy()
|
22 |
+
audio = whisper.pad_or_trim(audio)
|
23 |
+
result = model.transcribe(audio, language="en")
|
24 |
+
send_output("text", pa.array([result["text"]]), dora_event["metadata"])
|
25 |
+
return DoraStatus.CONTINUE
|