import os import streamlit as st from llm import load_llm, response_generator from vector_store import load_vector_store, process_pdf from uuid import uuid4 # repo_id = "Qwen/Qwen2.5-0.5B-Instruct-GGUF" # filename = "qwen2.5-0.5b-instruct-q8_0.gguf" repo_id = "Qwen/Qwen2.5-3B-Instruct-GGUF" filename = "qwen2.5-3b-instruct-q5_k_m.gguf" llm = load_llm(repo_id, filename) st.title("PDF QA") # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [] # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): if message["role"] == "user": st.markdown(message["content"]) else: st.code(message["content"]) # Accept user input if prompt := st.chat_input("What is up?"): # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) # Display user message in chat message container with st.chat_message("user"): st.markdown(prompt) # Display assistant response in chat message container with st.chat_message("assistant"): vector_store = load_vector_store() retriever = vector_store.as_retriever() docs = retriever.get_relevant_documents(prompt) response = response_generator(llm, st.session_state.messages, prompt, retriever) st.markdown(response["answer"]) # Add assistant response to chat history st.session_state.messages.append( {"role": "assistant", "content": response["answer"]} ) with st.sidebar: st.title("PDFs") st.write("Upload your pdfs here") uploaded_files = st.file_uploader( "Choose a PDF file", accept_multiple_files=True, type="pdf" ) if uploaded_files is not None: vector_store = load_vector_store() for uploaded_file in uploaded_files: temp_dir = "./temp" if not os.path.exists(temp_dir): os.makedirs(temp_dir) temp_file = f"./temp/{uploaded_file.name}-{uuid4()}.pdf" with open(temp_file, "wb") as file: file.write(uploaded_file.getvalue()) st.write("filename:", uploaded_file.name) process_pdf(temp_file, vector_store) st.success("PDFs uploaded successfully. ✅")