Spaces:
Runtime error
Runtime error
# -*- coding: utf-8 -*- | |
from PIL import Image | |
from transformers import AutoTokenizer, AutoModel, AutoImageProcessor, AutoModelForCausalLM | |
from transformers.generation.configuration_utils import GenerationConfig | |
import torch | |
from emu3.mllm.processing_emu3 import Emu3Processor | |
# model path | |
EMU_HUB = "BAAI/Emu3-Chat" | |
VQ_HUB = "BAAI/Emu3-VisionTokenizer" | |
# prepare model and processor | |
model = AutoModelForCausalLM.from_pretrained( | |
EMU_HUB, | |
device_map="cuda:0", | |
torch_dtype=torch.bfloat16, | |
attn_implementation="flash_attention_2", | |
trust_remote_code=True, | |
) | |
tokenizer = AutoTokenizer.from_pretrained(EMU_HUB, trust_remote_code=True) | |
image_processor = AutoImageProcessor.from_pretrained(VQ_HUB, trust_remote_code=True) | |
image_tokenizer = AutoModel.from_pretrained(VQ_HUB, device_map="cuda:0", trust_remote_code=True).eval() | |
processor = Emu3Processor(image_processor, image_tokenizer, tokenizer) | |
# prepare input | |
text = "Please describe the image" | |
image = Image.open("assets/demo.png") | |
inputs = processor( | |
text=text, | |
image=image, | |
mode='U', | |
padding_side="left", | |
padding="longest", | |
return_tensors="pt", | |
) | |
# prepare hyper parameters | |
GENERATION_CONFIG = GenerationConfig(pad_token_id=tokenizer.pad_token_id, bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.eos_token_id) | |
# generate | |
outputs = model.generate( | |
inputs.input_ids.to("cuda:0"), | |
GENERATION_CONFIG, | |
max_new_tokens=320, | |
) | |
outputs = outputs[:, inputs.input_ids.shape[-1]:] | |
print(processor.batch_decode(outputs, skip_special_tokens=True)[0]) | |