haixuantao
commited on
Commit
•
c94c99e
1
Parent(s):
b7adc94
Modify data organization
Browse files- README.md +3 -3
- graphs/{mistral_output_file.arrow → 10635604-cfd4-4134-9477-c68ca69e3930/mistral_output_file.arrow} +2 -2
- graphs/{raw_file.arrow → 10635604-cfd4-4134-9477-c68ca69e3930/raw_file.arrow} +2 -2
- graphs/{saved_file.arrow → 10635604-cfd4-4134-9477-c68ca69e3930/saved_file.arrow} +2 -2
- graphs/d0d8b39d-58ae-4a5a-b5e8-d2b89af125ed/mistral_output_file.arrow +3 -0
- graphs/d0d8b39d-58ae-4a5a-b5e8-d2b89af125ed/raw_file.arrow +3 -0
- graphs/d0d8b39d-58ae-4a5a-b5e8-d2b89af125ed/saved_file.arrow +3 -0
- graphs/dataflow.yml +4 -2
- operators/chatgpt_op.py +12 -5
- operators/file_saver_op.py +7 -1
- operators/keybinding_op.py +4 -1
- operators/mistral_op.py +8 -1
- operators/planning_op.py +2 -10
README.md
CHANGED
@@ -3,15 +3,15 @@ configs:
|
|
3 |
- config_name: mistral_output_file
|
4 |
data_files:
|
5 |
- split: train
|
6 |
-
path: graphs
|
7 |
- config_name: raw_file
|
8 |
data_files:
|
9 |
- split: train
|
10 |
-
path: graphs
|
11 |
- config_name: saved_file
|
12 |
data_files:
|
13 |
- split: train
|
14 |
-
path: graphs
|
15 |
license: apache-2.0
|
16 |
language:
|
17 |
- en
|
|
|
3 |
- config_name: mistral_output_file
|
4 |
data_files:
|
5 |
- split: train
|
6 |
+
path: graphs/*/mistral_output_file.arrow
|
7 |
- config_name: raw_file
|
8 |
data_files:
|
9 |
- split: train
|
10 |
+
path: graphs/*/raw_file.arrow
|
11 |
- config_name: saved_file
|
12 |
data_files:
|
13 |
- split: train
|
14 |
+
path: graphs/*/saved_file.arrow
|
15 |
license: apache-2.0
|
16 |
language:
|
17 |
- en
|
graphs/{mistral_output_file.arrow → 10635604-cfd4-4134-9477-c68ca69e3930/mistral_output_file.arrow}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:42a0eb9953098fa6a72e28382dcecd62ca4a628f34c5686d4e2c6ce825000771
|
3 |
+
size 10672
|
graphs/{raw_file.arrow → 10635604-cfd4-4134-9477-c68ca69e3930/raw_file.arrow}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8566dc6dd7ed29321a6d2dd665c435fad5929fd840d5aacab17e38ecec665126
|
3 |
+
size 9384
|
graphs/{saved_file.arrow → 10635604-cfd4-4134-9477-c68ca69e3930/saved_file.arrow}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b7d4b3ae423e99fb56053a91b51f94e46232f7e392601764d1929fe2386ec952
|
3 |
+
size 18144
|
graphs/d0d8b39d-58ae-4a5a-b5e8-d2b89af125ed/mistral_output_file.arrow
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:40d5f7b5ae3c7b42f14b903aa5cb522aa4af7217cc3e80723ab5919131dc29ba
|
3 |
+
size 15696
|
graphs/d0d8b39d-58ae-4a5a-b5e8-d2b89af125ed/raw_file.arrow
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d674448240e5d59e3b06cbf8a8ce8f66e864ea3e98e63aaf3dea479fc71a6c05
|
3 |
+
size 13984
|
graphs/d0d8b39d-58ae-4a5a-b5e8-d2b89af125ed/saved_file.arrow
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bd880b9252c382904bcd9f1746b2ade08b29a570a62268d645c2540470112731
|
3 |
+
size 22832
|
graphs/dataflow.yml
CHANGED
@@ -88,8 +88,9 @@ nodes:
|
|
88 |
source: ../operators/keybinding_op.py
|
89 |
outputs:
|
90 |
- mic_on
|
91 |
-
-
|
92 |
- failed
|
|
|
93 |
|
94 |
- id: microphone
|
95 |
operator:
|
@@ -140,7 +141,8 @@ nodes:
|
|
140 |
inputs:
|
141 |
chatgpt_output_file: chatgpt/output_file
|
142 |
mistral_output_file: mistral/output_file
|
143 |
-
|
|
|
144 |
failed: keyboard/failed
|
145 |
outputs:
|
146 |
- saved_file
|
|
|
88 |
source: ../operators/keybinding_op.py
|
89 |
outputs:
|
90 |
- mic_on
|
91 |
+
- revert
|
92 |
- failed
|
93 |
+
- error
|
94 |
|
95 |
- id: microphone
|
96 |
operator:
|
|
|
141 |
inputs:
|
142 |
chatgpt_output_file: chatgpt/output_file
|
143 |
mistral_output_file: mistral/output_file
|
144 |
+
error: keyboard/error
|
145 |
+
revert: keyboard/revert
|
146 |
failed: keyboard/failed
|
147 |
outputs:
|
148 |
- saved_file
|
operators/chatgpt_op.py
CHANGED
@@ -5,7 +5,7 @@ from openai import OpenAI
|
|
5 |
def ask_gpt(prompt, raw):
|
6 |
client = OpenAI()
|
7 |
|
8 |
-
|
9 |
"this is a python code :\n"
|
10 |
+ "```python\n"
|
11 |
+ raw
|
@@ -18,12 +18,12 @@ def ask_gpt(prompt, raw):
|
|
18 |
model="gpt-4-1106-preview",
|
19 |
messages=[
|
20 |
{"role": "system", "content": "You are a helpful assistant."},
|
21 |
-
{"role": "user", "content":
|
22 |
],
|
23 |
)
|
24 |
|
25 |
answer = response.choices[0].message.content
|
26 |
-
return answer
|
27 |
|
28 |
|
29 |
def extract_command(gptCommand):
|
@@ -76,14 +76,21 @@ class Operator:
|
|
76 |
with open(input["path"], "r", encoding="utf8") as f:
|
77 |
raw = f.read()
|
78 |
print("--- Asking chatGPT ", flush=True)
|
79 |
-
response = ask_gpt(input["query"], raw)
|
80 |
blocks = extract_command(response)
|
81 |
print(response, flush=True)
|
82 |
print(blocks[0], input["path"], flush=True)
|
83 |
send_output(
|
84 |
"output_file",
|
85 |
pa.array(
|
86 |
-
[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
),
|
88 |
dora_event["metadata"],
|
89 |
)
|
|
|
5 |
def ask_gpt(prompt, raw):
|
6 |
client = OpenAI()
|
7 |
|
8 |
+
prompt = (
|
9 |
"this is a python code :\n"
|
10 |
+ "```python\n"
|
11 |
+ raw
|
|
|
18 |
model="gpt-4-1106-preview",
|
19 |
messages=[
|
20 |
{"role": "system", "content": "You are a helpful assistant."},
|
21 |
+
{"role": "user", "content": prompt},
|
22 |
],
|
23 |
)
|
24 |
|
25 |
answer = response.choices[0].message.content
|
26 |
+
return prompt, answer
|
27 |
|
28 |
|
29 |
def extract_command(gptCommand):
|
|
|
76 |
with open(input["path"], "r", encoding="utf8") as f:
|
77 |
raw = f.read()
|
78 |
print("--- Asking chatGPT ", flush=True)
|
79 |
+
prompt, response = ask_gpt(input["query"], raw)
|
80 |
blocks = extract_command(response)
|
81 |
print(response, flush=True)
|
82 |
print(blocks[0], input["path"], flush=True)
|
83 |
send_output(
|
84 |
"output_file",
|
85 |
pa.array(
|
86 |
+
[
|
87 |
+
{
|
88 |
+
"raw": blocks[0],
|
89 |
+
"path": input["path"],
|
90 |
+
"response": response,
|
91 |
+
"prompt": prompt,
|
92 |
+
}
|
93 |
+
]
|
94 |
),
|
95 |
dora_event["metadata"],
|
96 |
)
|
operators/file_saver_op.py
CHANGED
@@ -41,7 +41,13 @@ class Operator:
|
|
41 |
),
|
42 |
dora_event["metadata"],
|
43 |
)
|
44 |
-
if dora_event["type"] == "INPUT" and dora_event["id"] in [
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
with open(self.last_path, "w") as file:
|
47 |
file.write(self.last_file)
|
|
|
41 |
),
|
42 |
dora_event["metadata"],
|
43 |
)
|
44 |
+
if dora_event["type"] == "INPUT" and dora_event["id"] in [
|
45 |
+
"revert",
|
46 |
+
"error",
|
47 |
+
"failed",
|
48 |
+
]:
|
49 |
+
if self.last_path == "":
|
50 |
+
return DoraStatus.CONTINUE
|
51 |
|
52 |
with open(self.last_path, "w") as file:
|
53 |
file.write(self.last_file)
|
operators/keybinding_op.py
CHANGED
@@ -13,10 +13,13 @@ def on_key_release(key):
|
|
13 |
node.send_output("mic_on", pa.array([]))
|
14 |
elif key.char == "2":
|
15 |
print("Key '2' pressed up")
|
16 |
-
node.send_output("
|
17 |
elif key.char == "3":
|
18 |
print("Key '3' pressed up")
|
19 |
node.send_output("failed", pa.array([]))
|
|
|
|
|
|
|
20 |
elif key.char == "0":
|
21 |
exit()
|
22 |
|
|
|
13 |
node.send_output("mic_on", pa.array([]))
|
14 |
elif key.char == "2":
|
15 |
print("Key '2' pressed up")
|
16 |
+
node.send_output("revert", pa.array([]))
|
17 |
elif key.char == "3":
|
18 |
print("Key '3' pressed up")
|
19 |
node.send_output("failed", pa.array([]))
|
20 |
+
elif key.char == "4":
|
21 |
+
print("Key '4' pressed up")
|
22 |
+
node.send_output("error", pa.array([]))
|
23 |
elif key.char == "0":
|
24 |
exit()
|
25 |
|
operators/mistral_op.py
CHANGED
@@ -93,7 +93,14 @@ class Operator:
|
|
93 |
send_output(
|
94 |
"output_file",
|
95 |
pa.array(
|
96 |
-
[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
),
|
98 |
dora_event["metadata"],
|
99 |
)
|
|
|
93 |
send_output(
|
94 |
"output_file",
|
95 |
pa.array(
|
96 |
+
[
|
97 |
+
{
|
98 |
+
"raw": source_code,
|
99 |
+
"path": input["path"],
|
100 |
+
"response": output,
|
101 |
+
"prompt": prompt,
|
102 |
+
}
|
103 |
+
]
|
104 |
),
|
105 |
dora_event["metadata"],
|
106 |
)
|
operators/planning_op.py
CHANGED
@@ -11,7 +11,7 @@ PITCH = 0
|
|
11 |
# yaw-axis angle in degrees(int): [-55, 55]
|
12 |
ROTATION = 0
|
13 |
# RGB LED(int) [0, 255]
|
14 |
-
RGB = [
|
15 |
BRIGHTNESS = [0] # [0, 128]
|
16 |
|
17 |
GOAL_OBJECTIVES = [X, Y, 0]
|
@@ -64,15 +64,7 @@ class Operator:
|
|
64 |
bboxs, (-1, 6)
|
65 |
) # [ min_x, min_y, max_x, max_y, confidence, label ]
|
66 |
if len(self.bboxs) > 0:
|
67 |
-
|
68 |
-
target_bbox = max(self.bboxs, key=lambda x: x[4])
|
69 |
-
bbox_center_x = (target_bbox[0] + target_bbox[2]) / 2.0
|
70 |
-
ROTATION = np.clip(
|
71 |
-
int((bbox_center_x - CAMERA_WIDTH / 2) * 55 / (CAMERA_WIDTH / 2)),
|
72 |
-
-55,
|
73 |
-
55,
|
74 |
-
)
|
75 |
-
self.objects_distances = estimated_distance(target_bbox[3])
|
76 |
|
77 |
elif dora_event["id"] == "position":
|
78 |
[x, y, z, gimbal_pitch, gimbal_yaw] = dora_event["value"].to_numpy()
|
|
|
11 |
# yaw-axis angle in degrees(int): [-55, 55]
|
12 |
ROTATION = 0
|
13 |
# RGB LED(int) [0, 255]
|
14 |
+
RGB = [0, 0, 0]
|
15 |
BRIGHTNESS = [0] # [0, 128]
|
16 |
|
17 |
GOAL_OBJECTIVES = [X, Y, 0]
|
|
|
64 |
bboxs, (-1, 6)
|
65 |
) # [ min_x, min_y, max_x, max_y, confidence, label ]
|
66 |
if len(self.bboxs) > 0:
|
67 |
+
self.objects_distances = estimated_distance(self.bboxs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
elif dora_event["id"] == "position":
|
70 |
[x, y, z, gimbal_pitch, gimbal_yaw] = dora_event["value"].to_numpy()
|