File size: 3,646 Bytes
533dd7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0827444
 
 
533dd7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0827444
 
 
 
 
 
 
 
 
 
 
533dd7a
 
0827444
533dd7a
0827444
533dd7a
 
0827444
533dd7a
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
from dora import DoraStatus
import pyarrow as pa
from transformers import AutoProcessor, AutoModelForVision2Seq, AwqConfig
import torch
import gc

CAMERA_WIDTH = 1280
CAMERA_HEIGHT = 720
PROCESSOR = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-tfrm-compatible")
BAD_WORDS_IDS = PROCESSOR.tokenizer(
    ["<image>", "<fake_token_around_image>"], add_special_tokens=False
).input_ids
EOS_WORDS_IDS = PROCESSOR.tokenizer(
    "<end_of_utterance>", add_special_tokens=False
).input_ids + [PROCESSOR.tokenizer.eos_token_id]
model = AutoModelForVision2Seq.from_pretrained(
    "HuggingFaceM4/idefics2-tfrm-compatible-AWQ",
    quantization_config=AwqConfig(
        bits=4,
        fuse_max_seq_len=4096,
        modules_to_fuse={
            "attention": ["q_proj", "k_proj", "v_proj", "o_proj"],
            "mlp": ["gate_proj", "up_proj", "down_proj"],
            "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"],
            "use_alibi": False,
            "num_attention_heads": 32,
            "num_key_value_heads": 8,
            "hidden_size": 4096,
        },
    ),
    trust_remote_code=True,
).to("cuda")


def reset_awq_cache(model):
    """
    Simple method to reset the AWQ fused modules cache
    """
    from awq.modules.fused.attn import QuantAttentionFused

    for name, module in model.named_modules():
        if isinstance(module, QuantAttentionFused):
            module.start_pos = 0


def ask_vlm(image, instruction):
    global model
    prompts = [
        "User:",
        image,
        f"{instruction}.<end_of_utterance>\n",
        "Assistant:",
    ]
    inputs = {k: torch.tensor(v).to("cuda") for k, v in PROCESSOR(prompts).items()}

    generated_ids = model.generate(
        **inputs,
        bad_words_ids=BAD_WORDS_IDS,
        max_new_tokens=25,
        repetition_penalty=1.2,
    )
    generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
    reset_awq_cache(model)

    gc.collect()
    torch.cuda.empty_cache()
    return generated_texts[0].split("\nAssistant: ")[1]


import time


class Operator:
    def __init__(self):
        self.image = None
        self.text = None

    def on_event(
        self,
        dora_event,
        send_output,
    ) -> DoraStatus:
        if dora_event["type"] == "INPUT":
            if dora_event["id"] == "image":
                self.image = (
                    dora_event["value"]
                    .to_numpy()
                    .reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
                )
            elif dora_event["id"] == "text":
                self.text = dora_event["value"][0].as_py()
                output = ask_vlm(self.image, self.text).lower()
                send_output(
                    "speak",
                    pa.array([output]),
                )
                if "yes" in output:
                    send_output(
                        "control",
                        pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 50.0, 0.0]),
                    )
                    time.sleep(2)
                    send_output(
                        "control",
                        pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0]),
                    )
                elif "no" in output:
                    send_output(
                        "control",
                        pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 50.0]),
                    )
                    time.sleep(2)
                    send_output(
                        "control",
                        pa.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]),
                    )

        return DoraStatus.CONTINUE