moriire commited on
Commit
4cb18b7
·
verified ·
1 Parent(s): 6f7d1cb

Update app/llm.py

Browse files
Files changed (1) hide show
  1. app/llm.py +3 -3
app/llm.py CHANGED
@@ -30,7 +30,7 @@ class ChatModel(BaseModel):
30
  mirostat_tau: float=4.0
31
  mirostat_eta: float=1.1
32
  llm_chat = llama_cpp.Llama.from_pretrained(
33
- repo_id="moriire/healthcare-ai-q4_k_m",
34
  filename="*.gguf",
35
  #tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("moriire/healthcare-ai-q4_k_m"),
36
  verbose=False,
@@ -39,8 +39,8 @@ llm_chat = llama_cpp.Llama.from_pretrained(
39
  #chat_format="llama-2"
40
  )
41
  llm_generate = llama_cpp.Llama.from_pretrained(
42
- repo_id="moriire/healthcare-ai-q8_0",
43
- filename="healthcare-ai-q8_0-unsloth.Q8_0.gguf",
44
  #tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("moriire/healthcare-ai-q8_0"),
45
  verbose=False,
46
  n_ctx=4096,
 
30
  mirostat_tau: float=4.0
31
  mirostat_eta: float=1.1
32
  llm_chat = llama_cpp.Llama.from_pretrained(
33
+ repo_id="moriire/healthcare-ai-q2_k",
34
  filename="*.gguf",
35
  #tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("moriire/healthcare-ai-q4_k_m"),
36
  verbose=False,
 
39
  #chat_format="llama-2"
40
  )
41
  llm_generate = llama_cpp.Llama.from_pretrained(
42
+ repo_id="moriire/healthcare-ai-q2_k",
43
+ filename="healthcare-ai-q2_k-unsloth.Q2_k.gguf",
44
  #tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("moriire/healthcare-ai-q8_0"),
45
  verbose=False,
46
  n_ctx=4096,