Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -16,7 +16,8 @@ from langchain.vectorstores import Chroma
|
|
16 |
|
17 |
### 3. For Querying LLM
|
18 |
# for loading HuggingFace LLM models from the hub
|
19 |
-
from langchain.llms import HuggingFaceHub
|
|
|
20 |
# for querying LLM conveniently using the context
|
21 |
from langchain.chains.question_answering import load_qa_chain
|
22 |
|
@@ -42,8 +43,13 @@ chroma_db = Chroma(persist_directory='chromadb_earnings_transcripts_extracted/ch
|
|
42 |
model = "mistralai/Mistral-7B-Instruct-v0.1"
|
43 |
|
44 |
# This is an inference endpoint API from huggingface, the model is not run locally, it is run on huggingface
|
45 |
-
hf_llm = HuggingFaceHub(repo_id=model,model_kwargs={'temperature':0.5,"max_new_tokens":300})
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
def source_question_answer(query:str,vectorstore:Chroma=chroma_db,llm:HuggingFaceHub=hf_llm):
|
49 |
"""
|
|
|
16 |
|
17 |
### 3. For Querying LLM
|
18 |
# for loading HuggingFace LLM models from the hub
|
19 |
+
#from langchain.llms import HuggingFaceHub
|
20 |
+
from langchain_community.llms import HuggingFaceEndpoint
|
21 |
# for querying LLM conveniently using the context
|
22 |
from langchain.chains.question_answering import load_qa_chain
|
23 |
|
|
|
43 |
model = "mistralai/Mistral-7B-Instruct-v0.1"
|
44 |
|
45 |
# This is an inference endpoint API from huggingface, the model is not run locally, it is run on huggingface
|
46 |
+
# hf_llm = HuggingFaceHub(repo_id=model,model_kwargs={'temperature':0.5,"max_new_tokens":300})
|
47 |
+
hf_llm = HuggingFaceEndpoint(
|
48 |
+
endpoint_url=model,
|
49 |
+
huggingfacehub_api_token=os.environ['HUGGINGFACEHUB_API_TOKEN'],
|
50 |
+
task="text-generation",
|
51 |
+
max_new_tokens=512
|
52 |
+
)
|
53 |
|
54 |
def source_question_answer(query:str,vectorstore:Chroma=chroma_db,llm:HuggingFaceHub=hf_llm):
|
55 |
"""
|