vagrillo commited on
Commit
3a655b5
·
verified ·
1 Parent(s): e13c97e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -1
app.py CHANGED
@@ -7,7 +7,10 @@ from ctransformers import AutoModelForCausalLM
7
 
8
  #llm = AutoModelForCausalLM.from_pretrained("Open-Orca/Mistral-7B-OpenOrca",gpu_layers=0, max_new_tokens = 1000, context_length = 10000)
9
 
10
- llm = AutoModelForCausalLM.from_pretrained("NousResearch/Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf",gpu_layers=0, max_new_tokens = 500, cotext_length = 2048)
 
 
 
11
  # llm = AutoModelForCausalLM.from_pretrained("/home/ubuntu/.cache/gpt4all/Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf",gpu_layers=0, max_new_tokens = 500, context_length = 6000)
12
  #llm = AutoModelForCausalLM.from_pretrained("Meta-Llama-3-8B-Instruct.Q4_0.gguf",gpu_layers=0, max_new_tokens = 500, context_length = 6000)
13
  #Meta-Llama-3-8B-Instruct.Q4_0.gguf
 
7
 
8
  #llm = AutoModelForCausalLM.from_pretrained("Open-Orca/Mistral-7B-OpenOrca",gpu_layers=0, max_new_tokens = 1000, context_length = 10000)
9
 
10
+ #llm = AutoModelForCausalLM.from_pretrained("NousResearch/Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf",gpu_layers=0, max_new_tokens = 500, cotext_length = 2048)
11
+
12
+ llm = AutoModelForCausalLM.from_pretrained("NousResearch/Nous-Hermes-2-Mistral-7B-DPO",gpu_layers=0, max_new_tokens = 500, cotext_length = 2048)
13
+
14
  # llm = AutoModelForCausalLM.from_pretrained("/home/ubuntu/.cache/gpt4all/Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf",gpu_layers=0, max_new_tokens = 500, context_length = 6000)
15
  #llm = AutoModelForCausalLM.from_pretrained("Meta-Llama-3-8B-Instruct.Q4_0.gguf",gpu_layers=0, max_new_tokens = 500, context_length = 6000)
16
  #Meta-Llama-3-8B-Instruct.Q4_0.gguf