LovnishVerma's picture
Update app.py
f235b5d
raw
history blame
304 Bytes
from transformers import LlamaForCausalLM, LlamaTokenizer
model_name = "mlabonne/llama-2-7b-guanaco"
# Load the model
model = LlamaForCausalLM.from_pretrained(model_name)
# Load the tokenizer
tokenizer = LlamaTokenizer.from_pretrained(model_name)
# Now, you can use the model and tokenizer as needed