|
from dora import DoraStatus |
|
import os |
|
import pyarrow as pa |
|
|
|
|
|
import requests |
|
|
|
import os |
|
|
|
import base64 |
|
import requests |
|
from io import BytesIO |
|
import numpy as np |
|
import cv2 |
|
|
|
|
|
def encode_numpy_image(np_image): |
|
|
|
cv2.resize(np_image, (512, 512)) |
|
_, buffer = cv2.imencode( |
|
".png", np_image |
|
) |
|
|
|
|
|
byte_stream = BytesIO(buffer) |
|
|
|
|
|
base64_encoded_image = base64.b64encode(byte_stream.getvalue()).decode("utf-8") |
|
return base64_encoded_image |
|
|
|
|
|
CAMERA_WIDTH = 640 |
|
CAMERA_HEIGHT = 480 |
|
|
|
API_KEY = os.getenv("OPENAI_API_KEY") |
|
|
|
|
|
MESSAGE_SENDER_TEMPLATE = """ |
|
You control a robot. Don't get too close to objects. |
|
|
|
{user_message} |
|
|
|
Respond with only one of the following actions: |
|
- FORWARD |
|
- BACKWARD |
|
- TURN_RIGHT |
|
- TURN_LEFT |
|
- NOD_YES |
|
- NOD_NO |
|
- STOP |
|
|
|
You're last 5 actions where: |
|
{actions} |
|
""" |
|
|
|
|
|
import time |
|
|
|
|
|
def understand_image(image, user_message, actions): |
|
|
|
base64_image = encode_numpy_image(image) |
|
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {API_KEY}"} |
|
|
|
now = time.time() |
|
payload = { |
|
"model": "gpt-4-vision-preview", |
|
"messages": [ |
|
{ |
|
"role": "user", |
|
"content": [ |
|
{ |
|
"type": "text", |
|
"text": MESSAGE_SENDER_TEMPLATE.format( |
|
user_message="\n".join(user_message), |
|
actions="\n".join(actions[:-5]), |
|
), |
|
}, |
|
{ |
|
"type": "image_url", |
|
"image_url": { |
|
"url": f"data:image/jpeg;base64,{base64_image}", |
|
"detail": "low", |
|
}, |
|
}, |
|
], |
|
} |
|
], |
|
"max_tokens": 50, |
|
} |
|
|
|
response = requests.post( |
|
"https://api.openai.com/v1/chat/completions", headers=headers, json=payload |
|
) |
|
|
|
print("resp:", time.time() - now) |
|
return response.json()["choices"][0]["message"]["content"] |
|
|
|
|
|
class Operator: |
|
def __init__(self): |
|
self.actions = [] |
|
self.instruction = [] |
|
|
|
def on_event( |
|
self, |
|
dora_event, |
|
send_output, |
|
) -> DoraStatus: |
|
if dora_event["type"] == "INPUT": |
|
if dora_event["id"] == "image": |
|
image = ( |
|
dora_event["value"] |
|
.to_numpy() |
|
.reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)) |
|
.copy() |
|
) |
|
output = understand_image(image, self.instruction, self.actions) |
|
self.actions.append(output) |
|
print("response: ", output, flush=True) |
|
|
|
send_output( |
|
"assistant_message", |
|
pa.array([f"{output}"]), |
|
dora_event["metadata"], |
|
) |
|
elif dora_event["id"] == "instruction": |
|
self.instruction.append(dora_event["value"][0].as_py()) |
|
print("instructions: ", self.instruction, flush=True) |
|
return DoraStatus.CONTINUE |
|
|
|
|
|
if __name__ == "__main__": |
|
op = Operator() |
|
|
|
|
|
current_file_path = __file__ |
|
|
|
|
|
current_directory = os.path.dirname(current_file_path) |
|
|
|
path = current_directory + "/test_image.jpg" |
|
|
|
op.on_event( |
|
{ |
|
"type": "INPUT", |
|
"id": "code_modifier", |
|
"value": pa.array( |
|
[ |
|
{ |
|
"path": path, |
|
"user_message": "change planning to make gimbal follow bounding box ", |
|
}, |
|
] |
|
), |
|
"metadata": [], |
|
}, |
|
print, |
|
) |
|
|