vagrillo commited on
Commit
0787c0d
·
verified ·
1 Parent(s): 0669e01

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -7,8 +7,10 @@ from ctransformers import AutoModelForCausalLM
7
 
8
  #llm = AutoModelForCausalLM.from_pretrained("Open-Orca/Mistral-7B-OpenOrca",gpu_layers=0, max_new_tokens = 1000, context_length = 10000)
9
 
10
- llm = AutoModelForCausalLM.from_pretrained("NousResearch/Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf",gpu_layers=0, max_new_tokens = 500, context_length = 2048)
11
  # llm = AutoModelForCausalLM.from_pretrained("/home/ubuntu/.cache/gpt4all/Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf",gpu_layers=0, max_new_tokens = 500, context_length = 6000)
 
 
12
 
13
 
14
 
 
7
 
8
  #llm = AutoModelForCausalLM.from_pretrained("Open-Orca/Mistral-7B-OpenOrca",gpu_layers=0, max_new_tokens = 1000, context_length = 10000)
9
 
10
+ #llm = AutoModelForCausalLM.from_pretrained("NousResearch/Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf",gpu_layers=0, max_new_tokens = 500, context_length = 2048)
11
  # llm = AutoModelForCausalLM.from_pretrained("/home/ubuntu/.cache/gpt4all/Nous-Hermes-2-Mistral-7B-DPO.Q4_0.gguf",gpu_layers=0, max_new_tokens = 500, context_length = 6000)
12
+ llm = AutoModelForCausalLM.from_pretrained("Meta-Llama-3-8B-Instruct.Q4_0.gguf",gpu_layers=0, max_new_tokens = 500, context_length = 6000)
13
+ #Meta-Llama-3-8B-Instruct.Q4_0.gguf
14
 
15
 
16