haixuantao commited on
Commit
1a7150e
1 Parent(s): 23f48d8

replace idefics by a policy

Browse files
operators/idefics2_op.py DELETED
@@ -1,61 +0,0 @@
1
- from dora import DoraStatus
2
- import pyarrow as pa
3
-
4
-
5
- import cv2
6
-
7
- from idefics2_utils import ask_vlm
8
- import pyttsx3
9
-
10
-
11
- CAMERA_WIDTH = 960
12
- CAMERA_HEIGHT = 540
13
-
14
-
15
- FONT = cv2.FONT_HERSHEY_SIMPLEX
16
-
17
-
18
- engine = pyttsx3.init("espeak")
19
- voices = engine.getProperty("voices")
20
- engine.setProperty("voice", voices[11].id) # English
21
-
22
-
23
- def speak(text):
24
- engine.say(text)
25
- engine.runAndWait()
26
-
27
-
28
- class Operator:
29
- def __init__(self):
30
- self.instruction = "What is in the image?"
31
- self.last_message = ""
32
- self.image = None
33
-
34
- def on_event(
35
- self,
36
- dora_event,
37
- send_output,
38
- ) -> DoraStatus:
39
- if dora_event["type"] == "INPUT":
40
- if dora_event["id"] == "image":
41
- self.image = (
42
- dora_event["value"]
43
- .to_numpy()
44
- .reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3))
45
- )
46
- elif dora_event["id"] == "instruction":
47
- self.instruction = dora_event["value"][0].as_py()
48
- print("instructions: ", self.instruction, flush=True)
49
-
50
- if self.image is not None:
51
- output = ask_vlm(self.image, self.instruction)
52
- speak(output)
53
- print("response: ", output, flush=True)
54
- send_output(
55
- "assistant_message",
56
- pa.array([output]),
57
- dora_event["metadata"],
58
- )
59
-
60
- self.last_message = output
61
- return DoraStatus.CONTINUE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
operators/idefics2_utils.py DELETED
@@ -1,69 +0,0 @@
1
- import requests
2
- import torch
3
- from PIL import Image
4
- from io import BytesIO
5
-
6
- from transformers import AutoProcessor, AutoModelForVision2Seq, AwqConfig
7
-
8
-
9
- MODE = "quantized"
10
- DEVICE = "cuda"
11
- PROCESSOR = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-tfrm-compatible")
12
- BAD_WORDS_IDS = PROCESSOR.tokenizer(
13
- ["<image>", "<fake_token_around_image>"], add_special_tokens=False
14
- ).input_ids
15
- EOS_WORDS_IDS = PROCESSOR.tokenizer(
16
- "<end_of_utterance>", add_special_tokens=False
17
- ).input_ids + [PROCESSOR.tokenizer.eos_token_id]
18
-
19
- # Load model
20
- if MODE == "regular":
21
- model = AutoModelForVision2Seq.from_pretrained(
22
- "HuggingFaceM4/idefics2-tfrm-compatible",
23
- torch_dtype=torch.float16,
24
- trust_remote_code=True,
25
- _attn_implementation="flash_attention_2",
26
- revision="3dc93be345d64fb6b1c550a233fe87ddb36f183d",
27
- ).to(DEVICE)
28
- elif MODE == "quantized":
29
- quant_path = "HuggingFaceM4/idefics2-tfrm-compatible-AWQ"
30
- model = AutoModelForVision2Seq.from_pretrained(
31
- quant_path, trust_remote_code=True
32
- ).to(DEVICE)
33
- elif MODE == "fused_quantized":
34
- quant_path = "HuggingFaceM4/idefics2-tfrm-compatible-AWQ"
35
- quantization_config = AwqConfig(
36
- bits=4,
37
- fuse_max_seq_len=4096,
38
- modules_to_fuse={
39
- "attention": ["q_proj", "k_proj", "v_proj", "o_proj"],
40
- "mlp": ["gate_proj", "up_proj", "down_proj"],
41
- "layernorm": ["input_layernorm", "post_attention_layernorm", "norm"],
42
- "use_alibi": False,
43
- "num_attention_heads": 32,
44
- "num_key_value_heads": 8,
45
- "hidden_size": 4096,
46
- },
47
- )
48
- model = AutoModelForVision2Seq.from_pretrained(
49
- quant_path, quantization_config=quantization_config, trust_remote_code=True
50
- ).to(DEVICE)
51
- else:
52
- raise ValueError("Unknown mode")
53
-
54
-
55
- def ask_vlm(image, instruction):
56
- prompts = [
57
- "User:",
58
- image,
59
- f"{instruction}.<end_of_utterance>\n",
60
- "Assistant:",
61
- ]
62
- inputs = PROCESSOR(prompts)
63
- inputs = {k: torch.tensor(v).to(DEVICE) for k, v in inputs.items()}
64
-
65
- generated_ids = model.generate(
66
- **inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=10
67
- )
68
- generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
69
- return generated_texts[0].split("\nAssistant: ")[1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
operators/policy.py CHANGED
@@ -10,7 +10,7 @@ HOME = np.array([[0.5, 0.0], [0.0, 0.0]]).ravel()
10
 
11
  ## Policy Operator
12
  class Operator:
13
- def speak(text: str):
14
  speak(text)
15
 
16
  def ask_model(self, image, text: str) -> bool:
@@ -21,9 +21,17 @@ class Operator:
21
  if dora_event["type"] == "INPUT":
22
  id = dora_event["id"]
23
  if id == "init":
24
- send_output("go_to", pa.array([]))
25
  elif id == "goal_reached":
 
26
  image = dora_event["value"].to_numpy().reshape((540, 960, 3))
27
- pass
 
 
 
 
 
 
 
28
 
29
  return DoraStatus.CONTINUE
 
10
 
11
  ## Policy Operator
12
  class Operator:
13
+ def speak(self, text: str):
14
  speak(text)
15
 
16
  def ask_model(self, image, text: str) -> bool:
 
21
  if dora_event["type"] == "INPUT":
22
  id = dora_event["id"]
23
  if id == "init":
24
+ send_output("go_to", pa.array(COUCH))
25
  elif id == "goal_reached":
26
+ print("goal reached", flush=True)
27
  image = dora_event["value"].to_numpy().reshape((540, 960, 3))
28
+ if self.ask_model(image, "Is there anyone with a bruise shirt?"):
29
+ self.speak("I'm gonna go get coffee.")
30
+ send_output("go_to", pa.array(KITCHEN))
31
+ self.speak("I'm going to the kitchen.")
32
+ else:
33
+ self.speak("There's no one with a bruise shirt.")
34
+ send_output("go_to", pa.array(COUCH))
35
+ self.speak("I'm going to the couch.")
36
 
37
  return DoraStatus.CONTINUE
operators/utils.py CHANGED
@@ -22,6 +22,8 @@ def speak(text):
22
  engine.runAndWait()
23
 
24
 
 
 
25
  MODE = "quantized"
26
  DEVICE = "cuda"
27
  PROCESSOR = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-tfrm-compatible")
@@ -75,6 +77,7 @@ def ask_vlm(image, instruction):
75
  f"{instruction}.<end_of_utterance>\n",
76
  "Assistant:",
77
  ]
 
78
  inputs = PROCESSOR(prompts)
79
  inputs = {k: torch.tensor(v).to(DEVICE) for k, v in inputs.items()}
80
 
@@ -82,4 +85,39 @@ def ask_vlm(image, instruction):
82
  **inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=10
83
  )
84
  generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
85
- return generated_texts[0].split("\nAssistant: ")[1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  engine.runAndWait()
23
 
24
 
25
+ speak("hello")
26
+
27
  MODE = "quantized"
28
  DEVICE = "cuda"
29
  PROCESSOR = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-tfrm-compatible")
 
77
  f"{instruction}.<end_of_utterance>\n",
78
  "Assistant:",
79
  ]
80
+ speak(instruction)
81
  inputs = PROCESSOR(prompts)
82
  inputs = {k: torch.tensor(v).to(DEVICE) for k, v in inputs.items()}
83
 
 
85
  **inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=10
86
  )
87
  generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
88
+
89
+ text = generated_texts[0].split("\nAssistant: ")[1]
90
+ speak(text)
91
+ return text
92
+
93
+
94
+ # import requests
95
+ # import torch
96
+ # from PIL import Image
97
+ # from io import BytesIO
98
+
99
+
100
+ # def download_image(url):
101
+ # try:
102
+ # # Send a GET request to the URL to download the image
103
+ # response = requests.get(url)
104
+ # # Check if the request was successful (status code 200)
105
+ # if response.status_code == 200:
106
+ # # Open the image using PIL
107
+ # image = Image.open(BytesIO(response.content))
108
+ # # Return the PIL image object
109
+ # return image
110
+ # else:
111
+ # print(f"Failed to download image. Status code: {response.status_code}")
112
+ # return None
113
+ # except Exception as e:
114
+ # print(f"An error occurred: {e}")
115
+ # return None
116
+
117
+
118
+ # # Create inputs
119
+ # image1 = download_image(
120
+ # "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
121
+ # )
122
+
123
+ # print(ask_vlm(image1, "What is this?"))