Spaces:
Paused
Paused
File size: 1,099 Bytes
1e2a35a 676808b 1e2a35a 676808b e75cab0 1e2a35a 2f052b0 1e2a35a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import os
hf_cache_folder = './huggingface_cache/'
os.makedirs(hf_cache_folder, exist_ok=True)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_id = "CohereForAI/c4ai-command-r-v01-4bit"
tokenizer = AutoTokenizer.from_pretrained(model_id, cache_dir=hf_cache_folder)
model = AutoModelForCausalLM.from_pretrained(model_id, cache_dir=hf_cache_folder, device_map="auto")
## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
async def get_answer_from_llm(question: str = None):
# Format message with the command-r chat template
messages = [{"role": "user", "content": f"{question}"}]
input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
gen_tokens = model.generate(
input_ids,
max_new_tokens=100,
do_sample=True,
temperature=0.3,
)
gen_text = tokenizer.decode(gen_tokens[0])
return gen_text
|