Spaces:
Sleeping
Sleeping
Commit
•
807d3fc
1
Parent(s):
bfb5920
Update app.py (#4)
Browse files- Update app.py (e7f77a8622e201f6165a1d1100dcd7ffc1543125)
Co-authored-by: Shivansh Mathur <ShivanshMathur007@users.noreply.huggingface.co>
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
from langchain.vectorstores import FAISS
|
2 |
-
# from langchain.chains import ConversationalRetrievalChain
|
3 |
from langchain.chains import RetrievalQA
|
4 |
from langchain.llms import HuggingFaceHub
|
5 |
import gradio as gr
|
@@ -9,36 +8,19 @@ from langchain_experimental.agents.agent_toolkits.csv.base import create_csv_age
|
|
9 |
from langchain.document_loaders import PyPDFDirectoryLoader
|
10 |
from langchain.document_loaders.csv_loader import CSVLoader
|
11 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
12 |
-
from langchain.memory import ConversationSummaryBufferMemory
|
13 |
import io
|
14 |
import contextlib
|
15 |
|
16 |
|
17 |
-
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-
|
18 |
vector_store= FAISS.load_local("vector_db/", embeddings)
|
19 |
|
20 |
repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1"
|
21 |
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.01, "max_new_tokens": 2048})
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
# memory_key='chat_history',
|
27 |
-
# max_token_limit=300,
|
28 |
-
# return_messages=True)
|
29 |
-
|
30 |
-
# retriever = vector_store.as_retriever(
|
31 |
-
# search_type="similarity",
|
32 |
-
# search_kwargs={"k": 10, "include_metadata": True})
|
33 |
-
|
34 |
-
# qa = ConversationalRetrievalChain.from_llm(
|
35 |
-
# llm=llm,
|
36 |
-
# memory=memory,
|
37 |
-
# chain_type="stuff",
|
38 |
-
# retriever=retriever,
|
39 |
-
# return_source_documents=True,
|
40 |
-
# get_chat_history=lambda h : h,
|
41 |
-
# verbose=True)
|
42 |
|
43 |
agent=create_csv_agent(llm,['data/Gretel_Data.csv','data/RAN_Data _T.csv'],verbose=True)
|
44 |
|
@@ -98,4 +80,5 @@ demo1=gr.ChatInterface(
|
|
98 |
stop_btn="Stop",
|
99 |
)
|
100 |
demo2=gr.TabbedInterface([demo,demo1],["RAG","AGENT"], title='INCEDO', theme=gr.themes.Soft())
|
101 |
-
demo2.launch(auth=("admin", "Sam&Clara"))
|
|
|
|
1 |
from langchain.vectorstores import FAISS
|
|
|
2 |
from langchain.chains import RetrievalQA
|
3 |
from langchain.llms import HuggingFaceHub
|
4 |
import gradio as gr
|
|
|
8 |
from langchain.document_loaders import PyPDFDirectoryLoader
|
9 |
from langchain.document_loaders.csv_loader import CSVLoader
|
10 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
|
11 |
import io
|
12 |
import contextlib
|
13 |
|
14 |
|
15 |
+
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
16 |
vector_store= FAISS.load_local("vector_db/", embeddings)
|
17 |
|
18 |
repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1"
|
19 |
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.01, "max_new_tokens": 2048})
|
20 |
|
21 |
+
retriever = vector_store.as_retriever(
|
22 |
+
search_type="similarity",
|
23 |
+
search_kwargs={"k":3, "include_metadata": True})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
agent=create_csv_agent(llm,['data/Gretel_Data.csv','data/RAN_Data _T.csv'],verbose=True)
|
26 |
|
|
|
80 |
stop_btn="Stop",
|
81 |
)
|
82 |
demo2=gr.TabbedInterface([demo,demo1],["RAG","AGENT"], title='INCEDO', theme=gr.themes.Soft())
|
83 |
+
demo2.launch(share=True,debug=True,auth=("admin", "Sam&Clara"))
|
84 |
+
|