Yadvendra commited on
Commit
d8d204a
·
verified ·
1 Parent(s): ac0f728

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +107 -0
app.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from dotenv import load_dotenv
4
+ import streamlit as st
5
+ from langchain_community.document_loaders import UnstructuredPDFLoader
6
+ from langchain_text_splitters.character import CharacterTextSplitter
7
+ from langchain_community.vectorstores import FAISS
8
+ from langchain_community.embeddings import HuggingFaceEmbeddings
9
+ from langchain_groq import ChatGroq
10
+ from langchain.memory import ConversationBufferMemory
11
+ from langchain.chains import ConversationalRetrievalChain
12
+
13
+
14
+ # load the environment variables
15
+ load_dotenv()
16
+
17
+ working_dir = os.path.dirname(os.path.abspath(__file__))
18
+
19
+
20
+ def load_document(file_path):
21
+ loader = UnstructuredPDFLoader(file_path)
22
+ documents = loader.load()
23
+ return documents
24
+
25
+
26
+ def setup_vectorstore(documents):
27
+ embeddings = HuggingFaceEmbeddings()
28
+ text_splitter = CharacterTextSplitter(
29
+ separator="/n",
30
+ chunk_size=1000,
31
+ chunk_overlap=200
32
+ )
33
+ doc_chunks = text_splitter.split_documents(documents)
34
+ vectorstore = FAISS.from_documents(doc_chunks, embeddings)
35
+ return vectorstore
36
+
37
+
38
+ def create_chain(vectorstore):
39
+ llm = ChatGroq(
40
+ model="llama-3.1-70b-versatile",
41
+ temperature=0
42
+ )
43
+ retriever = vectorstore.as_retriever()
44
+ memory = ConversationBufferMemory(
45
+ llm=llm,
46
+ output_key="answer",
47
+ memory_key="chat_history",
48
+ return_messages=True
49
+ )
50
+ chain = ConversationalRetrievalChain.from_llm(
51
+ llm=llm,
52
+ retriever=retriever,
53
+ chain_type="map_reduce",
54
+ memory=memory,
55
+ verbose=True
56
+ )
57
+ return chain
58
+
59
+ st.set_page_config(
60
+ page_title="Chat with Doc",
61
+ page_icon="📄",
62
+ layout="centered"
63
+ )
64
+
65
+ st.title("🦙 Chat with Doc - LLAMA 3.1")
66
+
67
+ # initialize the chat history in streamlit session state
68
+ if "chat_history" not in st.session_state:
69
+ st.session_state.chat_history = []
70
+
71
+
72
+ uploaded_file = st.file_uploader(label="Upload your pdf file", type=["pdf"])
73
+
74
+ if uploaded_file:
75
+ file_path = f"{working_dir}/{uploaded_file.name}"
76
+ with open(file_path, "wb") as f:
77
+ f.write(uploaded_file.getbuffer())
78
+
79
+
80
+ if "vectorstore" not in st.session_state:
81
+ st.session_state.vectorstore = setup_vectorstore(load_document(file_path))
82
+
83
+ if "conversation_chain" not in st.session_state:
84
+ st.session_state.conversation_chain = create_chain(st.session_state.vectorstore)
85
+
86
+ for message in st.session_state.chat_history:
87
+ with st.chat_message(message["role"]):
88
+ st.markdown(message["content"])
89
+
90
+
91
+ user_input = st.chat_input("Ask Llama...")
92
+
93
+
94
+ if user_input:
95
+ st.session_state.chat_history.append({"role": "user", "content": user_input})
96
+
97
+ with st.chat_message("user"):
98
+ st.markdown(user_input)
99
+
100
+
101
+ with st.chat_message("assistant"):
102
+ response = st.session_state.conversation_chain({"question": user_input})
103
+ assistant_response = response["answer"]
104
+ st.markdown(assistant_response)
105
+ st.session_state.chat_history.append({"role": "assistant", "content": assistant_response})
106
+
107
+