Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -30,7 +30,6 @@ css = """
|
|
30 |
def respond(
|
31 |
message,
|
32 |
history: list[tuple[str, str]],
|
33 |
-
system_message,
|
34 |
max_tokens,
|
35 |
temperature,
|
36 |
top_p,
|
@@ -42,7 +41,13 @@ def respond(
|
|
42 |
from llama_cpp_agent.providers import LlamaCppPythonProvider
|
43 |
from llama_cpp_agent.chat_history import BasicChatHistory
|
44 |
from llama_cpp_agent.chat_history.messages import Roles
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
llm = Llama(
|
47 |
model_path=f"models/{model}",
|
48 |
n_gpu_layers=81,
|
|
|
30 |
def respond(
|
31 |
message,
|
32 |
history: list[tuple[str, str]],
|
|
|
33 |
max_tokens,
|
34 |
temperature,
|
35 |
top_p,
|
|
|
41 |
from llama_cpp_agent.providers import LlamaCppPythonProvider
|
42 |
from llama_cpp_agent.chat_history import BasicChatHistory
|
43 |
from llama_cpp_agent.chat_history.messages import Roles
|
44 |
+
print(message)
|
45 |
+
print(history)
|
46 |
+
print(max_tokens)
|
47 |
+
print(temperature)
|
48 |
+
print(top_p)
|
49 |
+
print(model)
|
50 |
+
|
51 |
llm = Llama(
|
52 |
model_path=f"models/{model}",
|
53 |
n_gpu_layers=81,
|