haixuantao's picture
updating clarity of the video
8852f54
raw
history blame
3.57 kB
import torch
from transformers import AutoProcessor, AutoModelForVision2Seq, AwqConfig
import numpy as np
import pyttsx3
START_TO_COUCH = np.array([[0.5, 0], [0.5, 0.5]]).ravel()
COUCH_TO_KITCHEN = np.array([[0.5, -0.5], [1.0, -1.0]]).ravel()
KITCHEN_TO_START = np.array([[0.5, -0.5], [0, 0]]).ravel()
engine = pyttsx3.init("espeak")
voices = engine.getProperty("voices")
engine.setProperty("voice", voices[3].id)
def speak(text):
print(f"said {text}", flush=True)
engine.say(text)
engine.runAndWait()
speak("hello")
MODE = "quantized"
DEVICE = "cuda"
PROCESSOR = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-tfrm-compatible")
BAD_WORDS_IDS = PROCESSOR.tokenizer(
["<image>", "<fake_token_around_image>"], add_special_tokens=False
).input_ids
EOS_WORDS_IDS = PROCESSOR.tokenizer(
"<end_of_utterance>", add_special_tokens=False
).input_ids + [PROCESSOR.tokenizer.eos_token_id]
# Load model
if MODE == "regular":
model = AutoModelForVision2Seq.from_pretrained(
"HuggingFaceM4/idefics2-tfrm-compatible",
torch_dtype=torch.float16,
trust_remote_code=True,
_attn_implementation="flash_attention_2",
revision="3dc93be345d64fb6b1c550a233fe87ddb36f183d",
).to(DEVICE)
elif MODE == "quantized":
quant_path = "HuggingFaceM4/idefics2-tfrm-compatible-AWQ"
model = AutoModelForVision2Seq.from_pretrained(
quant_path, trust_remote_code=True
).to(DEVICE)
elif MODE == "fused_quantized":
quant_path = "HuggingFaceM4/idefics2-tfrm-compatible-AWQ"
quantization_config = AwqConfig(
bits=4,
fuse_max_seq_len=4096,
modules_to_fuse={
"attention": ["q_proj", "k_proj", "v_proj", "o_proj"],
"mlp": ["gate_proj", "up_proj", "down_proj"],
"layernorm": ["input_layernorm", "post_attention_layernorm", "norm"],
"use_alibi": False,
"num_attention_heads": 32,
"num_key_value_heads": 8,
"hidden_size": 4096,
},
)
model = AutoModelForVision2Seq.from_pretrained(
quant_path, quantization_config=quantization_config, trust_remote_code=True
).to(DEVICE)
else:
raise ValueError("Unknown mode")
def ask_vlm(image, instruction):
prompts = [
"User:",
image,
f"{instruction}.<end_of_utterance>\n",
"Assistant:",
]
speak(instruction)
inputs = PROCESSOR(prompts)
inputs = {k: torch.tensor(v).to(DEVICE) for k, v in inputs.items()}
generated_ids = model.generate(
**inputs, bad_words_ids=BAD_WORDS_IDS, max_new_tokens=50
)
generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
text = generated_texts[0].split("\nAssistant: ")[1]
speak(text)
return text
# import requests
# import torch
# from PIL import Image
# from io import BytesIO
# def download_image(url):
# try:
# # Send a GET request to the URL to download the image
# response = requests.get(url)
# # Check if the request was successful (status code 200)
# if response.status_code == 200:
# # Open the image using PIL
# image = Image.open(BytesIO(response.content))
# # Return the PIL image object
# return image
# else:
# print(f"Failed to download image. Status code: {response.status_code}")
# return None
# except Exception as e:
# print(f"An error occurred: {e}")
# return None
# # Create inputs
# image1 = download_image(
# "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
# )
# print(ask_vlm(image1, "What is this?"))