dora-idefics2 / operators /idefics2_op_demo.py
haixuantao's picture
Adding couple of working nodes
533dd7a
raw
history blame
3.29 kB
from dora import DoraStatus
import pyarrow as pa
from transformers import AutoProcessor, AutoModelForVision2Seq, AwqConfig
import torch
import gc
CAMERA_WIDTH = 1280
CAMERA_HEIGHT = 720
PROCESSOR = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-tfrm-compatible")
BAD_WORDS_IDS = PROCESSOR.tokenizer(
["<image>", "<fake_token_around_image>"], add_special_tokens=False
).input_ids
EOS_WORDS_IDS = PROCESSOR.tokenizer(
"<end_of_utterance>", add_special_tokens=False
).input_ids + [PROCESSOR.tokenizer.eos_token_id]
model = AutoModelForVision2Seq.from_pretrained(
"HuggingFaceM4/idefics2-tfrm-compatible-AWQ",
quantization_config=AwqConfig(
bits=4,
fuse_max_seq_len=4096,
modules_to_fuse={
"attention": ["q_proj", "k_proj", "v_proj", "o_proj"],
"mlp": ["gate_proj", "up_proj", "down_proj"],
"layernorm": ["input_layernorm", "post_attention_layernorm", "norm"],
"use_alibi": False,
"num_attention_heads": 32,
"num_key_value_heads": 8,
"hidden_size": 4096,
},
),
trust_remote_code=True,
).to("cuda")
def reset_awq_cache(model):
"""
Simple method to reset the AWQ fused modules cache
"""
from awq.modules.fused.attn import QuantAttentionFused
for name, module in model.named_modules():
if isinstance(module, QuantAttentionFused):
module.start_pos = 0
def ask_vlm(image, instruction):
global model
prompts = [
"User:",
image,
f"{instruction}.<end_of_utterance>\n",
"Assistant:",
]
inputs = {k: torch.tensor(v).to("cuda") for k, v in PROCESSOR(prompts).items()}
generated_ids = model.generate(
**inputs,
bad_words_ids=BAD_WORDS_IDS,
max_new_tokens=25,
repetition_penalty=1.2,
)
generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
reset_awq_cache(model)
gc.collect()
torch.cuda.empty_cache()
return generated_texts[0].split("\nAssistant: ")[1]
class Operator:
def __init__(self):
self.image = None
self.text = None
def on_event(
self,
dora_event,
send_output,
) -> DoraStatus:
if dora_event["type"] == "INPUT":
if dora_event["id"] == "image":
self.image = (
dora_event["value"]
.to_numpy()
.reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
)
elif dora_event["id"] == "text":
self.text = dora_event["value"][0].as_py()
output = ask_vlm(self.image, self.text).lower()
send_output(
"speak",
pa.array([output]),
)
"""
if "sofa" in output:
send_output(
"control",
pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 50.0, 50.0]),
)
elif "back" in self.text:
send_output(
"control",
pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
)
"""
return DoraStatus.CONTINUE