File size: 2,970 Bytes
73e00ea
 
 
cabb4c3
73e00ea
cabb4c3
 
73e00ea
cabb4c3
 
73e00ea
cabb4c3
 
 
 
 
 
73e00ea
cabb4c3
73e00ea
cabb4c3
 
 
 
 
 
 
 
 
 
 
73e00ea
 
 
 
 
 
 
 
 
 
cabb4c3
73e00ea
 
 
cabb4c3
 
 
 
 
 
 
 
73e00ea
cabb4c3
73e00ea
cabb4c3
 
73e00ea
 
cabb4c3
 
73e00ea
cabb4c3
 
 
 
73e00ea
 
 
 
 
 
 
 
 
cabb4c3
 
73e00ea
 
cabb4c3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
from PyPDF2 import PdfReader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from transformers import pipeline
import streamlit as st
import requests
from io import BytesIO

# Set up Hugging Face model pipeline for text generation
pipe = pipeline("text-generation", model="meta-llama/Llama-Guard-3-8B-INT8")

# List of GitHub PDF URLs
PDF_URLS = [
    "https://github.com/TahirSher/GenAI_Lawyers_Guide/blob/main/bi%20pat%20graphs.pdf",
    "https://github.com/TahirSher/GenAI_Lawyers_Guide/blob/main/bi-partite.pdf",
    # Add more document links as needed
]

def fetch_pdf_text_from_github(urls):
    text = ""
    for url in urls:
        response = requests.get(url)
        if response.status_code == 200:
            pdf_file = BytesIO(response.content)
            pdf_reader = PdfReader(pdf_file)
            for page in pdf_reader.pages:
                page_text = page.extract_text()
                if page_text:
                    text += page_text
        else:
            st.error(f"Failed to fetch PDF from URL: {url}")
    return text

@st.cache_data
def get_text_chunks(text):
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
    chunks = text_splitter.split_text(text)
    return chunks

@st.cache_resource
def load_or_create_vector_store(text_chunks):
    embeddings = FAISS.get_default_embeddings()
    vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
    return vector_store

def generate_answer(user_question, context_text):
    # Format the input message for the pipeline
    messages = [
        {"role": "user", "content": f"Context: {context_text}\nQuestion: {user_question}"}
    ]
    # Generate response using the pipeline
    response = pipe(messages, max_length=250, do_sample=True)
    return response[0]['generated_text'][:250]  # Limit response to 250 characters

def user_input(user_question, vector_store):
    docs = vector_store.similarity_search(user_question)
    context_text = " ".join([doc.page_content for doc in docs])
    return generate_answer(user_question, context_text)

def main():
    st.set_page_config(page_title="RAG-based PDF Chat", layout="centered", page_icon="πŸ“„")
    st.title("πŸ“„ Query PDF Documents on GitHub")

    # Load documents from GitHub
    raw_text = fetch_pdf_text_from_github(PDF_URLS)
    text_chunks = get_text_chunks(raw_text)
    vector_store = load_or_create_vector_store(text_chunks)

    # User question input
    user_question = st.text_input("Ask a Question:", placeholder="Type your question here...")

    if st.button("Get Response"):
        if not user_question:
            st.warning("Please enter a question before submitting.")
        else:
            with st.spinner("Generating response..."):
                answer = user_input(user_question, vector_store)
                st.markdown(f"**πŸ€– AI:** {answer}")

if __name__ == "__main__":
    main()