Update app.py
Browse files
app.py
CHANGED
@@ -25,7 +25,7 @@ llm_chat = llama_cpp.Llama.from_pretrained(
|
|
25 |
verbose=False,
|
26 |
n_ctx=1024,
|
27 |
n_gpu_layers=0,
|
28 |
-
chat_format="llama-2"
|
29 |
)
|
30 |
llm_generate = llama_cpp.Llama.from_pretrained(
|
31 |
repo_id="Qwen/Qwen1.5-0.5B-Chat-GGUF",
|
@@ -36,8 +36,8 @@ llm_generate = llama_cpp.Llama.from_pretrained(
|
|
36 |
n_gpu_layers=0,
|
37 |
mirostat_mode=2,
|
38 |
mirostat_tau=4.0,
|
39 |
-
mirostat_eta=1.1
|
40 |
-
chat_format="llama-2"
|
41 |
)
|
42 |
# Logger setup
|
43 |
logging.basicConfig(level=logging.INFO)
|
|
|
25 |
verbose=False,
|
26 |
n_ctx=1024,
|
27 |
n_gpu_layers=0,
|
28 |
+
#chat_format="llama-2"
|
29 |
)
|
30 |
llm_generate = llama_cpp.Llama.from_pretrained(
|
31 |
repo_id="Qwen/Qwen1.5-0.5B-Chat-GGUF",
|
|
|
36 |
n_gpu_layers=0,
|
37 |
mirostat_mode=2,
|
38 |
mirostat_tau=4.0,
|
39 |
+
mirostat_eta=1.1
|
40 |
+
#chat_format="llama-2"
|
41 |
)
|
42 |
# Logger setup
|
43 |
logging.basicConfig(level=logging.INFO)
|