import os from dotenv import load_dotenv from langchain_openai import OpenAIEmbeddings from langchain_chroma import Chroma # 이 줄을 수정 from langchain.chains import ConversationalRetrievalChain from langchain_openai import ChatOpenAI from langchain.docstore.document import Document from langchain.text_splitter import RecursiveCharacterTextSplitter import pdfplumber from concurrent.futures import ThreadPoolExecutor # Load environment variables load_dotenv() # Set OpenAI API key api_key = os.getenv("OPENAI_API_KEY") if not api_key: raise ValueError("API key not found. Please set the OPENAI_API_KEY environment variable.") os.environ["OPENAI_API_KEY"] = api_key def load_retrieval_qa_chain(): # Load embeddings embeddings = OpenAIEmbeddings() # Load vector store vectorstore = Chroma(persist_directory="./chroma_db", embedding_function=embeddings) # Initialize ChatOpenAI model llm = ChatOpenAI(model_name="gpt-4o-mini", temperature=0) # "gpt-4o-mini # Create ConversationalRetrievalChain qa_chain = ConversationalRetrievalChain.from_llm( llm, vectorstore.as_retriever(), return_source_documents=True ) return qa_chain def extract_text_from_pdf(file_path): documents = [] with pdfplumber.open(file_path) as pdf: for page_num, page in enumerate(pdf.pages): text = page.extract_text() if text: # Split text into chunks text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100) chunks = text_splitter.split_text(text) for chunk in chunks: doc = Document(page_content=chunk, metadata={"source": os.path.basename(file_path), "page": page_num + 1}) documents.append(doc) return documents def embed_documents(): embeddings = OpenAIEmbeddings(api_key=os.getenv("OPENAI_API_KEY")) vectorstore = Chroma(persist_directory="./chroma_db", embedding_function=embeddings) pdf_files = [f for f in os.listdir("./documents") if f.endswith('.pdf')] documents = [] with ThreadPoolExecutor() as executor: results = executor.map(extract_text_from_pdf, [f"./documents/{pdf_file}" for pdf_file in pdf_files]) for result in results: documents.extend(result) vectorstore.add_documents(documents) def update_embeddings(): embeddings = OpenAIEmbeddings(api_key=os.getenv("OPENAI_API_KEY")) vectorstore = Chroma(persist_directory="./chroma_db", embedding_function=embeddings) # Retrieve existing documents existing_files = set() for doc in vectorstore.similarity_search(""): existing_files.add(doc.metadata["source"]) pdf_files = [f for f in os.listdir("./documents") if f.endswith('.pdf')] new_files = [f for f in pdf_files if f not in existing_files] documents = [] with ThreadPoolExecutor() as executor: results = executor.map(extract_text_from_pdf, [f"./documents/{pdf_file}" for pdf_file in new_files]) for result in results: documents.extend(result) vectorstore.add_documents(documents) # Generate answer for a query def get_answer(qa_chain, query, chat_history): formatted_history = [(q, a) for q, a in zip(chat_history[::2], chat_history[1::2])] response = qa_chain.invoke({"question": query, "chat_history": formatted_history}) answer = response["answer"] source_docs = response.get("source_documents", []) source_texts = [f"{os.path.basename(doc.metadata['source'])} (Page {doc.metadata['page']})" for doc in source_docs] return {"answer": answer, "sources": source_texts} # Example usage if __name__ == "__main__": update_embeddings() # Update embeddings with new documents qa_chain = load_retrieval_qa_chain() question = """당신은 RAG(Retrieval-Augmented Generation) 기반 AI 어시스턴트입니다. 다음 지침을 따라 사용자 질문에 답하세요: 1. 검색 결과 활용: 제공된 검색 결과를 분석하고 관련 정보를 사용해 답변하세요. 2. 정확성 유지: 정보의 정확성을 확인하고, 불확실한 경우 이를 명시하세요. 3. 간결한 응답: 질문에 직접 답하고 핵심 내용에 집중하세요. 4. 추가 정보 제안: 관련된 추가 정보가 있다면 언급하세요. 5. 윤리성 고려: 객관적이고 중립적인 태도를 유지하세요. 6. 한계 인정: 답변할 수 없는 경우 솔직히 인정하세요. 7. 대화 유지: 자연스럽게 대화를 이어가고, 필요시 후속 질문을 제안하세요. 항상 정확하고 유용한 정보를 제공하는 것을 목표로 하세요.""" response = get_answer(qa_chain, question, []) print(f"Question: {question}") print(f"Answer: {response['answer']}") print(f"Sources: {response['sources']}")