File size: 2,494 Bytes
fc58ca0 1f9f48a fc58ca0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
---
license: mit
---
You should follow the two steps
1. Install libraries and dowloand github package [Meteor](https://github.com/ByungKwanLee/Meteor)
```bash
bash install
pip install -r requirements.txt
```
2. Run the file: demo.py in [Meteor](https://github.com/ByungKwanLee/Meteor)
You can choose prompt type: text_only or with_image!
Enjoy Meteor!
```python
import time
import torch
from config import *
from PIL import Image
from utils.utils import *
import torch.nn.functional as F
from meteor.load_mmamba import load_mmamba
from meteor.load_meteor import load_meteor
from torchvision.transforms.functional import pil_to_tensor
# User prompt
prompt_type='with_image' # text_only / with_image
img_path='figures/demo.png'
question='Provide the detail of the image'
# loading meteor model
mmamba = load_mmamba('BK-Lee/Meteor-Mamba').cuda()
meteor, tok_meteor = load_meteor('BK-Lee/Meteor-MLM', bits=4)
# freeze model
freeze_model(mmamba)
freeze_model(meteor)
# Device
device = torch.cuda.current_device()
# prompt type -> input prompt
image_token_number = int((490/14)**2)
if prompt_type == 'with_image':
# Image Load
image = F.interpolate(pil_to_tensor(Image.open(img_path).convert("RGB")).unsqueeze(0), size=(490, 490), mode='bicubic').squeeze(0)
inputs = [{'image': image, 'question': question}]
elif prompt_type=='text_only':
inputs = [{'question': question}]
# Generate
with torch.inference_mode():
# Meteor Mamba
mmamba_inputs = mmamba.eval_process(inputs=inputs, tokenizer=tok_meteor, device=device, img_token_number=image_token_number)
if 'image' in mmamba_inputs.keys():
clip_features = meteor.clip_features(mmamba_inputs['image'])
mmamba_inputs.update({"image_features": clip_features})
mmamba_outputs = mmamba(**mmamba_inputs)
# Meteor
meteor_inputs = meteor.eval_process(inputs=inputs, data='demo', tokenizer=tok_meteor, device=device, img_token_number=image_token_number)
if 'image' in mmamba_inputs.keys():
meteor_inputs.update({"image_features": clip_features})
meteor_inputs.update({"tor_features": mmamba_outputs.tor_features})
# Generation
generate_ids = meteor.generate(**meteor_inputs, do_sample=True, max_new_tokens=128, top_p=0.95, temperature=0.9, use_cache=True)
# Text decoding
decoded_text = tok_meteor.batch_decode(generate_ids, skip_special_tokens=True)[0].split('assistant\n')[-1].split('[U')[0].strip()
print(decoded_text)
# Paper arxiv.org/abs/2405.15574
``` |