Mattral commited on
Commit
7179cc3
1 Parent(s): fe7df4f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -6,6 +6,7 @@ from PyPDF2 import PdfReader
6
  from langchain.text_splitter import RecursiveCharacterTextSplitter
7
  from langchain.callbacks.manager import CallbackManager
8
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
 
9
  from langchain.vectorstores import Qdrant
10
  from qdrant_client.http import models
11
  from ctransformers import AutoModelForCausalLM
@@ -18,7 +19,7 @@ print("Embedding model loaded...")
18
  callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
19
 
20
  llm = AutoModelForCausalLM.from_pretrained(
21
- "refuelai/Llama-3-Refueled",
22
  model_file="llama-2-7b-chat.Q3_K_S.gguf",
23
  model_type="llama",
24
  temperature=0.2,
@@ -28,6 +29,7 @@ llm = AutoModelForCausalLM.from_pretrained(
28
 
29
  print("LLM loaded...")
30
 
 
31
  def chat(files, question):
32
  def get_chunks(text):
33
  text_splitter = RecursiveCharacterTextSplitter(
 
6
  from langchain.text_splitter import RecursiveCharacterTextSplitter
7
  from langchain.callbacks.manager import CallbackManager
8
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
9
+
10
  from langchain.vectorstores import Qdrant
11
  from qdrant_client.http import models
12
  from ctransformers import AutoModelForCausalLM
 
19
  callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
20
 
21
  llm = AutoModelForCausalLM.from_pretrained(
22
+ "TheBloke/Llama-2-7B-Chat-GGUF",
23
  model_file="llama-2-7b-chat.Q3_K_S.gguf",
24
  model_type="llama",
25
  temperature=0.2,
 
29
 
30
  print("LLM loaded...")
31
 
32
+
33
  def chat(files, question):
34
  def get_chunks(text):
35
  text_splitter = RecursiveCharacterTextSplitter(