File size: 3,747 Bytes
357c750 be75a6c 841b7c4 357c750 be75a6c 357c750 be75a6c 357c750 be75a6c 357c750 be75a6c 357c750 be75a6c 357c750 be75a6c 357c750 be75a6c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
import requests
import torch
from PIL import Image
from io import BytesIO
from transformers import (
AutoProcessor,
AutoModelForVision2Seq,
AwqConfig,
)
import awq_ext
import time
MODE = "fused_quantized"
DEVICE = "cuda"
PROCESSOR = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-tfrm-compatible")
BAD_WORDS_IDS = PROCESSOR.tokenizer(
["<image>", "<fake_token_around_image>"], add_special_tokens=False
).input_ids
EOS_WORDS_IDS = PROCESSOR.tokenizer(
"<end_of_utterance>", add_special_tokens=False
).input_ids + [PROCESSOR.tokenizer.eos_token_id]
# Load model
if MODE == "regular":
model = AutoModelForVision2Seq.from_pretrained(
"HuggingFaceM4/idefics2-tfrm-compatible",
torch_dtype=torch.float16,
trust_remote_code=True,
_attn_implementation="flash_attention_2",
revision="3dc93be345d64fb6b1c550a233fe87ddb36f183d",
).to(DEVICE)
elif MODE == "quantized":
quant_path = "HuggingFaceM4/idefics2-tfrm-compatible-AWQ"
model = AutoModelForVision2Seq.from_pretrained(
quant_path, trust_remote_code=True
).to(DEVICE)
elif MODE == "fused_quantized":
quant_path = "HuggingFaceM4/idefics2-tfrm-compatible-AWQ"
quantization_config = AwqConfig(
bits=4,
fuse_max_seq_len=4096,
modules_to_fuse={
"attention": ["q_proj", "k_proj", "v_proj", "o_proj"],
"mlp": ["gate_proj", "up_proj", "down_proj"],
"layernorm": ["input_layernorm", "post_attention_layernorm", "norm"],
"use_alibi": False,
"num_attention_heads": 32,
"num_key_value_heads": 8,
"hidden_size": 4096,
},
)
model = AutoModelForVision2Seq.from_pretrained(
quant_path,
quantization_config=quantization_config,
trust_remote_code=True,
).to(DEVICE)
else:
raise ValueError("Unknown mode")
def download_image(url):
try:
# Send a GET request to the URL to download the image
response = requests.get(url)
# Check if the request was successful (status code 200)
if response.status_code == 200:
# Open the image using PIL
image = Image.open(BytesIO(response.content))
# Return the PIL image object
return image
else:
print(f"Failed to download image. Status code: {response.status_code}")
return None
except Exception as e:
print(f"An error occurred: {e}")
return None
# Create inputs
image1 = download_image(
"https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
)
def reset_awq_cache(model):
"""
Simple method to reset the AWQ fused modules cache
"""
from awq.modules.fused.attn import QuantAttentionFused
for name, module in model.named_modules():
if isinstance(module, QuantAttentionFused):
module.start_pos = 0
def ask_vlm(image, instruction):
global model
prompts = [
"User:",
image,
f"{instruction}.<end_of_utterance>\n",
"Assistant:",
]
inputs = PROCESSOR(prompts)
inputs = {k: torch.tensor(v).to(DEVICE) for k, v in inputs.items()}
generated_ids = model.generate(
**inputs,
bad_words_ids=BAD_WORDS_IDS,
max_new_tokens=100,
)
generated_texts = PROCESSOR.batch_decode(generated_ids, skip_special_tokens=True)
reset_awq_cache(model)
return generated_texts
now = time.time()
print(ask_vlm(image1, "What is this?")[0].split("\nAssistant: ")[1])
print("resp:", time.time() - now)
import time
now = time.time()
print(ask_vlm(image1, "What is this?")[0].split("\nAssistant: ")[1])
print("resp:", time.time() - now)
|