Requirements
%%capture
import torch
major_version, minor_version = torch.cuda.get_device_capability()
!pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"
if major_version >= 8:
!pip install --no-deps packaging ninja einops flash-attn xformers trl peft accelerate bitsandbytes
else:
!pip install --no-deps xformers trl peft accelerate bitsandbytes
pass
!pip install gradio
Inference
import torch
from transformers import AutoTokenizer
from peft import AutoPeftModelForCausalLM
import gradio as gr
model_id = "DisgustingOzil/MIstral_Pak_Law"
dtype = torch.float16
load_in_4bit = True
model = AutoPeftModelForCausalLM.from_pretrained(
model_id,
load_in_4bit=load_in_4bit,
torch_dtype=dtype,
).to("cuda")
tokenizer = AutoTokenizer.from_pretrained(model_id)
def generate_response(context, question):
alpaca_prompt = f"""Given the context and a specific question, generate a comprehensive and detailed response that accurately addresses the query.
### Context:
{context}
### Question:
{question}
### Answer:
"""
inputs = tokenizer(
[alpaca_prompt], return_tensors="pt", padding=True, truncation=True, max_length=2048
).to("cuda")
outputs = model.generate(**inputs, max_new_tokens=64, use_cache=True)
response = tokenizer.batch_decode(outputs, skip_special_tokens=True)
summary_start_index = response[0].find("### Answer:")
summary_text = response[0][summary_start_index:].replace("### Answer:", "").strip()
return summary_text
iface = gr.Interface(
fn=generate_response,
inputs=[gr.Textbox(label="Context"), gr.Textbox(label="Question")],
outputs=gr.Textbox(label="Answer"),
title="Abandoned Properties Act 1975 Query",
description="Enter the context and a specific question to generate a response based on the Abandoned Properties (Taking Over and Management) Act 1975.",
)
iface.launch(debug=True)