Rahatara commited on
Commit
56a2342
·
verified ·
1 Parent(s): 2da429d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -0
app.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import fitz # PyMuPDF
4
+ from sentence_transformers import SentenceTransformer
5
+ import numpy as np
6
+ import faiss
7
+ from typing import List, Tuple, Dict
8
+
9
+ # Placeholder for the app's state
10
+ class MyApp:
11
+ def __init__(self) -> None:
12
+ self.documents = []
13
+ self.embeddings = None
14
+ self.index = None
15
+ self.model = SentenceTransformer('all-MiniLM-L6-v2')
16
+
17
+ def load_pdfs(self, files: List[gr.File]) -> str:
18
+ """Extracts text from multiple PDF files and stores them."""
19
+ self.documents = []
20
+ for file in files:
21
+ doc = fitz.open(stream=file.read(), filetype="pdf")
22
+ for page_num in range(len(doc)):
23
+ page = doc[page_num]
24
+ text = page.get_text()
25
+ self.documents.append({
26
+ "file_name": file.name,
27
+ "page": page_num + 1,
28
+ "content": text
29
+ })
30
+ return f"Processed {len(files)} PDFs successfully!"
31
+
32
+ def build_vector_db(self) -> str:
33
+ """Builds a vector database using the content of the PDFs."""
34
+ if not self.documents:
35
+ return "No documents to process."
36
+ contents = [doc["content"] for doc in self.documents]
37
+ self.embeddings = self.model.encode(contents, show_progress_bar=True)
38
+ self.index = faiss.IndexFlatL2(self.embeddings.shape[1])
39
+ self.index.add(np.array(self.embeddings))
40
+ return "Vector database built successfully!"
41
+
42
+ def search_documents(self, query: str, k: int = 3) -> List[Dict]:
43
+ """Searches for relevant document snippets using vector similarity."""
44
+ if not self.index:
45
+ return [{"content": "Vector database is not built."}]
46
+ query_embedding = self.model.encode([query], show_progress_bar=False)
47
+ D, I = self.index.search(np.array(query_embedding), k)
48
+ results = [self.documents[i] for i in I[0]]
49
+ return results if results else [{"content": "No relevant documents found."}]
50
+
51
+ app = MyApp()
52
+
53
+ def upload_files(files: List[gr.File]) -> str:
54
+ return app.load_pdfs(files)
55
+
56
+ def build_vector_db() -> str:
57
+ return app.build_vector_db()
58
+
59
+ def respond(message: str, history: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str]], str]:
60
+ # Retrieve relevant documents
61
+ retrieved_docs = app.search_documents(message)
62
+ context = "\n".join(
63
+ [f"File: {doc['file_name']}, Page: {doc['page']}\n{doc['content']}" for doc in retrieved_docs]
64
+ )
65
+
66
+ # Generate response (Placeholder for actual model inference)
67
+ response_content = f"Simulated response based on the following context:\n{context}"
68
+
69
+ # Append the message and generated response to the chat history
70
+ history.append((message, response_content))
71
+ return history, ""
72
+
73
+ with gr.Blocks() as demo:
74
+ gr.Markdown("# PDF Chatbot")
75
+ gr.Markdown("Upload your PDFs, build a vector database, and start querying your documents.")
76
+
77
+ with gr.Row():
78
+ with gr.Column():
79
+ upload_btn = gr.File(label="Upload PDFs", file_types=[".pdf"], file_count="multiple")
80
+ upload_message = gr.Textbox(label="Upload Status", lines=2)
81
+ build_db_btn = gr.Button("Build Vector Database")
82
+ db_message = gr.Textbox(label="DB Build Status", lines=2)
83
+
84
+ upload_btn.change(upload_files, inputs=[upload_btn], outputs=[upload_message])
85
+ build_db_btn.click(build_vector_db, inputs=[], outputs=[db_message])
86
+
87
+ with gr.Column():
88
+ chatbot = gr.Chatbot(label="Chat Responses")
89
+ query_input = gr.Textbox(label="Enter your query here")
90
+ submit_btn = gr.Button("Submit")
91
+ submit_btn.click(respond, inputs=[query_input, chatbot], outputs=[chatbot, query_input])
92
+
93
+ demo.launch()