llava-test-env / app.py
Sri harsha Patallapalli
adding chat
f7a3e67
import torch
from transformers import AutoModel, AutoTokenizer, AutoProcessor, LlavaForConditionalGeneration
import gradio as gr
# Specify CPU usage
device = torch.device("cpu")
model_id = "hitmanonholiday/llava-1.5-7b-4bit-finetuned3"
# Load the tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_id)
# Load the model without quantization
model = LlavaForConditionalGeneration.from_pretrained(
model_id,
torch_dtype=torch.float32 # Use float32 for CPU compatibility
).to(device)
# Load the processor (if needed)
processor = AutoProcessor.from_pretrained(model_id)
processor.tokenizer = tokenizer
# Define the chat template (if using Gradio)
LLAVA_CHAT_TEMPLATE = """A chat between a curious user and an artificial intelligence assistant.
The assistant gives helpful, detailed, and polite answers to the user's questions.
{% for message in messages %}{% if message['role'] == 'user' %}
USER: {% else %}ASSISTANT: {% endif %}{% for item in message['content'] %}{% if item['type'] == 'text' %}{{ item['text'] }}{% elif item['type'] == 'image' %}<image>{% endif %}{% endfor %}
{% if message['role'] == 'user' %} {% else %}{{eos_token}}{% endif %}{% endfor %}"""
tokenizer.chat_template = LLAVA_CHAT_TEMPLATE
# Define the prediction function (if using Gradio)
def predict(image, text):
# Process the image (if needed)
inputs = processor(images=image, text=text, return_tensors="pt").to(device)
# Generate response
with torch.no_grad():
outputs = model.generate(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'])
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
# Define Gradio interface (if using Gradio)
inputs = [
gr.inputs.Image(type="pil", label="Upload an image"),
gr.inputs.Textbox(lines=2, placeholder="Type your text here...", label="Input Text")
]
outputs = gr.outputs.Textbox(label="Output")
gr.Interface(fn=predict, inputs=inputs, outputs=outputs, title="LLAVA Multimodal Chatbot").launch(share=True)