Spaces:
Sleeping
Sleeping
vidhiparikh
commited on
Commit
•
436241a
1
Parent(s):
5e1713a
Update app.py
Browse files
app.py
CHANGED
@@ -13,6 +13,8 @@ from sentence_transformers import SentenceTransformer, util
|
|
13 |
from langchain.callbacks.manager import CallbackManager
|
14 |
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
15 |
|
|
|
|
|
16 |
# Customized file paths
|
17 |
pdf_files = ["CV_Vidhi_Parikh.pdf"]
|
18 |
|
@@ -73,7 +75,8 @@ callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
|
|
73 |
# Function to create a conversational chain
|
74 |
def create_conversational_chain(database):
|
75 |
llama_llm = LlamaCpp(
|
76 |
-
|
|
|
77 |
temperature=0.75,
|
78 |
max_tokens=200,
|
79 |
top_p=1,
|
|
|
13 |
from langchain.callbacks.manager import CallbackManager
|
14 |
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
15 |
|
16 |
+
from ctransformers import AutoModelForCausalLM
|
17 |
+
|
18 |
# Customized file paths
|
19 |
pdf_files = ["CV_Vidhi_Parikh.pdf"]
|
20 |
|
|
|
75 |
# Function to create a conversational chain
|
76 |
def create_conversational_chain(database):
|
77 |
llama_llm = LlamaCpp(
|
78 |
+
# Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system.
|
79 |
+
llm = AutoModelForCausalLM.from_pretrained("TheBloke/Llama-2-7b-Chat-GGUF", model_file="llama-2-7b-chat.q4_K_M.gguf", model_type="llama", gpu_layers=0),
|
80 |
temperature=0.75,
|
81 |
max_tokens=200,
|
82 |
top_p=1,
|