Model Card for Model ID
A Gemma-2b finetuned LoRA trained on science Q&A
- Developed by: Venkat
How to Get Started with the Model
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
from peft import PeftModel
from typing import Optional
import time
import os
def generate_prompt(input_text: str, instruction: Optional[str] = None) -> str:
text = f"### Question: {input_text}\n\n### Answer: "
if instruction:
text = f"### Instruction: {instruction}\n\n{text}"
return text
huggingface_token = os.environ.get('HUGGINGFACE_TOKEN')
base_model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", token=huggingface_token)
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b", token=huggingface_token)
lora_model = PeftModel.from_pretrained(base_model, "vdpappu/lora_medicalqa")
merged_model = lora_model.merge_and_unload()
eos_token = '<eos>'
eos_token_id = tokenizer.encode(eos_token, add_special_tokens=False)[-1]
generation_config = GenerationConfig(
eos_token_id=tokenizer.eos_token_id,
min_length=5,
max_length=200,
do_sample=True,
temperature=0.7,
top_p=0.9,
top_k=50,
repetition_penalty=1.5,
no_repeat_ngram_size=3,
early_stopping=True
)
instruction = "If you are a doctor, please answer the medical questions based on the patient's description."
question = """I am a 40 year old female and have have some mildly high blood pressure readings over the past couple years.
I am not over weight and fairly physically active. My reading can vary quite a bit but my systolic is usually 120-135,
my diastolic can be around 84 up to 95 at times. I read and have gotten some conflicting recommendations if I need BP meds,
it seems that systolic is the number of more concern, is this correct or is that just for older adults?
Since I am young I would rather not be on BP meds if I do not have to. Are any supplements recommended besides reducing salt, diet, exercise,
all these things I have already done. Thank you for your answer!"""
prompt = generate_prompt(input_text=question)
with torch.no_grad():
inputs = tokenizer(prompt, return_tensors="pt")
output = merged_model.generate(**inputs, generation_config=generation_config)
response = tokenizer.decode(output[0], skip_special_tokens=True)
print(response)
- PEFT 0.12.0
- Downloads last month
- 9
Inference API (serverless) does not yet support peft models for this pipeline type.
Model tree for vdpappu/lora_medicalqa
Base model
google/gemma-2b