File size: 4,236 Bytes
1646f18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
from dora import DoraStatus
import pyarrow as pa
from transformers import AutoProcessor, AutoModelForVision2Seq, AwqConfig
import torch
import time

CAMERA_WIDTH = 960
CAMERA_HEIGHT = 540
PROCESSOR = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-tfrm-compatible")
BAD_WORDS_IDS = PROCESSOR.tokenizer(
    ["<image>", "<fake_token_around_image>"], add_special_tokens=False
).input_ids
EOS_WORDS_IDS = PROCESSOR.tokenizer(
    "<end_of_utterance>", add_special_tokens=False
).input_ids + [PROCESSOR.tokenizer.eos_token_id]
model = AutoModelForVision2Seq.from_pretrained(
    "HuggingFaceM4/idefics2-tfrm-compatible-AWQ",
    quantization_config=AwqConfig(
        bits=4,
        fuse_max_seq_len=4096,
        modules_to_fuse={
            "attention": ["q_proj", "k_proj", "v_proj", "o_proj"],
            "mlp": ["gate_proj", "up_proj", "down_proj"],
            "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"],
            "use_alibi": False,
            "num_attention_heads": 32,
            "num_key_value_heads": 8,
            "hidden_size": 4096,
        },
    ),
    trust_remote_code=True,
).to("cuda")


def reset_awq_cache(model):
    """
    Simple method to reset the AWQ fused modules cache
    """
    from awq.modules.fused.attn import QuantAttentionFused

    for name, module in model.named_modules():
        if isinstance(module, QuantAttentionFused):
            module.start_pos = 0


def ask_vlm(image, instruction):
    global model
    prompts = [
        "User:",
        image,
        f"{instruction}.<end_of_utterance>\n",
        "Assistant:",
    ]
    inputs = {k: torch.tensor(v).to("cuda") for k, v in PROCESSOR(prompts).items()}

    generated_ids = model.generate(
        **inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=25, repetition_penalty=1.2
    )
    generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
    reset_awq_cache(model)
    return generated_texts[0].split("\nAssistant: ")[1]


class Operator:
    def __init__(self):
        self.state = "person"
        self.last_output = False

    def on_event(
        self,
        dora_event,
        send_output,
    ) -> DoraStatus:
        if dora_event["type"] == "INPUT":
            image = (
                dora_event["value"].to_numpy().reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
            )

            if self.state == "person":
                output = ask_vlm(image, "Can you read the note?").lower()
                print(output, flush=True)
                if "coffee" in output or "tea" in output or "water" in output:
                    send_output(
                        "control",
                        pa.array([-3.0, 0.0, 0.0, 0.8, 0.0, 10.0, 180.0]),
                    )
                    send_output(
                        "speak",
                        pa.array([output + ". Going to the kitchen."]),
                    )
                    time.sleep(10)
                    self.state = "coffee"
                    self.last_output = False
                elif not self.last_output:
                    self.last_output = True
                    send_output(
                        "speak",
                        pa.array([output]),
                    )
                    time.sleep(4)

            elif self.state == "coffee":
                output = ask_vlm(image, "Is there a person with a hands up?").lower()
                print(output, flush=True)
                if "yes" in output:
                    send_output(
                        "speak",
                        pa.array([output + ". Going to the office."]),
                    )
                    send_output(
                        "control",
                        pa.array([2.0, 0.0, 0.0, 0.8, 0.0, 10.0, 0.0]),
                    )
                    time.sleep(10)
                    self.state = "person"
                    self.last_output = False
                elif not self.last_output:
                    self.last_output = True
                    send_output(
                        "speak",
                        pa.array([output]),
                    )
                    time.sleep(4)

        return DoraStatus.CONTINUE