amine-01 commited on
Commit
62d725c
Β·
verified Β·
1 Parent(s): d2b6de9

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +123 -0
app.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from config.globals import SPEAKER_TYPES, initial_prompt
3
+ from langchain.prompts import PromptTemplate
4
+ from langchain.chains.question_answering import load_qa_chain
5
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
6
+ from langchain_community.vectorstores import Chroma
7
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
8
+ from dotenv import load_dotenv
9
+ import PyPDF2
10
+ import os
11
+ import io
12
+ from langchain.document_loaders import PyPDFDirectoryLoader
13
+ from langchain.embeddings import SentenceTransformerEmbeddings
14
+ from langchain_core.output_parsers import StrOutputParser
15
+ from langchain_core.runnables import RunnablePassthrough
16
+
17
+ # --- Your RAG chatbot logic ---
18
+ source_data_folder = "MyData"
19
+ text_splitter = RecursiveCharacterTextSplitter(
20
+ separators=["\n\n", "\n", ". ", " ", ""],
21
+ chunk_size=2000,
22
+ chunk_overlap=200
23
+ )
24
+ embeddings_model = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
25
+ path_db = "/content/VectorDB"
26
+ llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro", google_api_key="AIzaSyAnsIVS4x_7lJLe9AYXGLV8FRwUTQkB-1w")
27
+
28
+ # --- Streamlit app starts here ---
29
+ # Set up the Streamlit app configuration
30
+ st.set_page_config(
31
+ page_title="Gemini Pro RAG App",
32
+ page_icon="πŸ”",
33
+ layout="wide",
34
+ initial_sidebar_state="expanded",
35
+ )
36
+
37
+ # Initialize session state for chat history and vectorstore (PDF context)
38
+ if 'chat_history' not in st.session_state:
39
+ st.session_state.chat_history = [initial_prompt]
40
+ if 'vectorstore' not in st.session_state:
41
+ st.session_state.vectorstore = None
42
+
43
+ # Function to clear chat history
44
+ def clear_chat_history():
45
+ st.session_state.chat_history = [initial_prompt]
46
+
47
+ # Extract text from PDF
48
+ def extract_text_from_pdf(pdf_bytes):
49
+ pdf_reader = PyPDF2.PdfReader(io.BytesIO(pdf_bytes))
50
+ text = ""
51
+ for page in pdf_reader.pages:
52
+ text += page.extract_text()
53
+ return text
54
+
55
+ # Initialize vectorstore
56
+ def initialize_vector_index(text):
57
+ docs = [{'page_content': text}]
58
+ splits = text_splitter.split_documents(docs)
59
+ vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings_model, persist_directory=path_db)
60
+ return vectorstore
61
+
62
+ # Sidebar configuration
63
+ with st.sidebar:
64
+ st.title('πŸ” Gemini RAG Chatbot')
65
+ st.write('This chatbot uses the Gemini Pro API with RAG capabilities.')
66
+ st.button('Clear Chat History', on_click=clear_chat_history, type='primary')
67
+ uploaded_file = st.file_uploader("Upload a PDF file", type=["pdf"], help="Upload your PDF file here to start the analysis.")
68
+ if uploaded_file is not None:
69
+ st.success("PDF File Uploaded Successfully!")
70
+ text = extract_text_from_pdf(uploaded_file.read())
71
+ vectorstore = initialize_vector_index(text)
72
+ st.session_state.vectorstore = vectorstore
73
+
74
+ # Main interface
75
+ st.header('Gemini Pro RAG Chatbot')
76
+ st.subheader('Upload a PDF and ask questions about its content!')
77
+
78
+ # Display the welcome prompt if chat history is only the initial prompt
79
+ if len(st.session_state.chat_history) == 1:
80
+ with st.chat_message(SPEAKER_TYPES.BOT, avatar="πŸ”"):
81
+ st.write(initial_prompt['content'])
82
+
83
+ # Get user input
84
+ prompt = st.chat_input("Ask a question about the PDF content:", key="user_input")
85
+
86
+ # Function to get a response from RAG chain
87
+ def get_rag_response(prompt):
88
+ retriever = st.session_state.vectorstore.as_retriever() # Use the stored vectorstore retriever
89
+ rag_chain = (
90
+ {"context": retriever | format_docs, "question": RunnablePassthrough()}
91
+ | prompt
92
+ | llm
93
+ | StrOutputParser()
94
+ )
95
+ response = rag_chain.invoke(prompt)
96
+ return response
97
+
98
+ # Handle the user prompt and generate response
99
+ if prompt:
100
+ # Add user prompt to chat history
101
+ st.session_state.chat_history.append({'role': SPEAKER_TYPES.USER, 'content': prompt})
102
+
103
+ # Display chat messages from the chat history
104
+ for message in st.session_state.chat_history[1:]:
105
+ with st.chat_message(message["role"], avatar="πŸ‘€" if message['role'] == SPEAKER_TYPES.USER else "πŸ”"):
106
+ st.write(message["content"])
107
+
108
+ # Get the response using the RAG chain
109
+ with st.spinner(text='Generating response...'):
110
+ response_text = get_rag_response(prompt)
111
+ st.session_state.chat_history.append({'role': SPEAKER_TYPES.BOT, 'content': response_text})
112
+
113
+ # Display the bot response
114
+ with st.chat_message(SPEAKER_TYPES.BOT, avatar="πŸ”"):
115
+ st.write(response_text)
116
+
117
+ # Add footer for additional information or credits
118
+ st.markdown("""
119
+ <hr>
120
+ <div style="text-align: center;">
121
+ <small>Powered by Gemini Pro API | Developed by Christian Thomas BADOLO</small>
122
+ </div>
123
+ """, unsafe_allow_html=True)