haixuantao's picture
initial commit
034b730
raw
history blame
7.78 kB
from dora import DoraStatus
import pylcs
import os
import pyarrow as pa
from transformers import AutoModelForCausalLM, AutoTokenizer
import json
import re
import time
import torch
import requests
from io import BytesIO
from PIL import Image
from transformers import AutoModelForCausalLM, AutoProcessor
from transformers.image_utils import (
to_numpy_array,
PILImageResampling,
ChannelDimension,
)
from transformers.image_transforms import resize, to_channel_dimension_format
API_TOKEN = os.getenv("HF_TOKEN")
DEVICE = torch.device("cuda")
PROCESSOR = AutoProcessor.from_pretrained(
"HuggingFaceM4/tr_272_bis_opt_step_15000_merge",
token=API_TOKEN,
)
MODEL = AutoModelForCausalLM.from_pretrained(
"HuggingFaceM4/tr_272_bis_opt_step_15000_merge",
token=API_TOKEN,
trust_remote_code=True,
torch_dtype=torch.bfloat16,
).to(DEVICE)
image_seq_len = MODEL.config.perceiver_config.resampler_n_latents
BOS_TOKEN = PROCESSOR.tokenizer.bos_token
BAD_WORDS_IDS = PROCESSOR.tokenizer(
["<image>", "<fake_token_around_image>"], add_special_tokens=False
).input_ids
CHATGPT = True
MODEL_NAME_OR_PATH = "TheBloke/deepseek-coder-6.7B-instruct-GPTQ"
MESSAGE_SENDER_TEMPLATE = """
### Instruction
You're a json expert. Format your response as a json with a topic and a data field in a ```json block. No explaination needed. No code needed.
The schema for those json are:
- forward
- backward
- left
- right
The response should look like this:
```json
[
{{ "topic": "control", "data": "forward" }},
]
```
{user_message}
### Response:
"""
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME_OR_PATH,
device_map="auto",
trust_remote_code=True,
revision="main",
)
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME_OR_PATH, use_fast=True)
def extract_json_code_blocks(text):
"""
Extracts json code blocks from the given text that are enclosed in triple backticks with a json language identifier.
Parameters:
- text: A string that may contain one or more json code blocks.
Returns:
- A list of strings, where each string is a block of json code extracted from the text.
"""
pattern = r"```json\n(.*?)\n```"
matches = re.findall(pattern, text, re.DOTALL)
if len(matches) == 0:
pattern = r"```json\n(.*?)(?:\n```|$)"
matches = re.findall(pattern, text, re.DOTALL)
if len(matches) == 0:
return [text]
return matches
from openai import OpenAI
import os
import base64
import requests
API_TOKEN = os.getenv("HF_TOKEN")
# Function to encode the image
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
def understand_image(image_path):
# Getting the base64 string
base64_image = encode_image(image_path)
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}
payload = {
"model": "gpt-4-vision-preview",
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "What’s in this image? Describe it in a short sentence",
},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
},
],
}
],
"max_tokens": 300,
}
response = requests.post(
"https://api.openai.com/v1/chat/completions", headers=headers, json=payload
)
print(response.json()["choices"][0]["message"]["content"])
class Operator:
def on_event(
self,
dora_event,
send_output,
) -> DoraStatus:
if dora_event["type"] == "INPUT" and dora_event["id"] == "message_sender":
user_message = dora_event["value"][0].as_py()
output = self.ask_llm(
MESSAGE_SENDER_TEMPLATE.format(user_message=user_message)
)
outputs = extract_json_code_blocks(output)[0]
print("response: ", output, flush=True)
try:
outputs = json.loads(outputs)
if not isinstance(outputs, list):
outputs = [outputs]
for output in outputs:
if not isinstance(output["data"], list):
output["data"] = [output["data"]]
if output["topic"] in ["led", "blaster"]:
send_output(
output["topic"],
pa.array(output["data"]),
dora_event["metadata"],
)
send_output(
"assistant_message",
pa.array([f"sent: {output}"]),
dora_event["metadata"],
)
else:
send_output(
"assistant_message",
pa.array(
[f"Could not send as topic was not available: {output}"]
),
dora_event["metadata"],
)
except:
send_output(
"assistant_message",
pa.array([f"Could not parse json: {outputs}"]),
dora_event["metadata"],
)
# if data is not iterable, put data in a list
return DoraStatus.CONTINUE
def ask_llm(self, prompt):
# Generate output
# prompt = PROMPT_TEMPLATE.format(system_message=system_message, prompt=prompt))
input = tokenizer(prompt, return_tensors="pt")
input_ids = input.input_ids.cuda()
# add attention mask here
attention_mask = input["attention_mask"]
output = model.generate(
inputs=input_ids,
temperature=0.7,
do_sample=True,
top_p=0.95,
top_k=40,
max_new_tokens=512,
attention_mask=attention_mask,
eos_token_id=tokenizer.eos_token_id,
)
# Get the tokens from the output, decode them, print them
# Get text between im_start and im_end
return tokenizer.decode(output[0], skip_special_tokens=True)[len(prompt) :]
def ask_chatgpt(self, prompt):
from openai import OpenAI
client = OpenAI()
print("---asking chatgpt: ", prompt, flush=True)
response = client.chat.completions.create(
model="gpt-4-turbo-preview",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
],
)
answer = response.choices[0].message.content
print("Done", flush=True)
return answer
if __name__ == "__main__":
op = Operator()
# Path to the current file
current_file_path = __file__
# Directory of the current file
current_directory = os.path.dirname(current_file_path)
path = current_directory + "/planning_op.py"
with open(path, "r", encoding="utf8") as f:
raw = f.read()
op.on_event(
{
"type": "INPUT",
"id": "code_modifier",
"value": pa.array(
[
{
"path": path,
"user_message": "change planning to make gimbal follow bounding box ",
},
]
),
"metadata": [],
},
print,
)