Update rag_system.py
Browse files- rag_system.py +23 -2
rag_system.py
CHANGED
@@ -12,6 +12,7 @@ from langchain.retrievers import ContextualCompressionRetriever
|
|
12 |
from langchain.retrievers.document_compressors import LLMChainExtractor
|
13 |
from langgraph.graph import Graph
|
14 |
from langchain_core.runnables import RunnablePassthrough, RunnableLambda
|
|
|
15 |
|
16 |
# Load environment variables
|
17 |
load_dotenv()
|
@@ -41,11 +42,31 @@ def load_retrieval_qa_chain():
|
|
41 |
base_retriever=vectorstore.as_retriever()
|
42 |
)
|
43 |
|
44 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
qa_chain = ConversationalRetrievalChain.from_llm(
|
46 |
llm,
|
47 |
retriever=compression_retriever,
|
48 |
-
return_source_documents=True
|
|
|
49 |
)
|
50 |
|
51 |
return qa_chain
|
|
|
12 |
from langchain.retrievers.document_compressors import LLMChainExtractor
|
13 |
from langgraph.graph import Graph
|
14 |
from langchain_core.runnables import RunnablePassthrough, RunnableLambda
|
15 |
+
from langchain.prompts import PromptTemplate
|
16 |
|
17 |
# Load environment variables
|
18 |
load_dotenv()
|
|
|
42 |
base_retriever=vectorstore.as_retriever()
|
43 |
)
|
44 |
|
45 |
+
# Define your instruction/prompt
|
46 |
+
instruction = """λΉμ μ RAG(Retrieval-Augmented Generation) κΈ°λ° AI μ΄μμ€ν΄νΈμ
λλ€. λ€μ μ§μΉ¨μ λ°λΌ μ¬μ©μ μ§λ¬Έμ λ΅νμΈμ:
|
47 |
+
|
48 |
+
1. κ²μ κ²°κ³Ό νμ©: μ 곡λ κ²μ κ²°κ³Όλ₯Ό λΆμνκ³ κ΄λ ¨ μ 보λ₯Ό μ¬μ©ν΄ λ΅λ³νμΈμ.
|
49 |
+
2. μ νμ± μ μ§: μ 보μ μ νμ±μ νμΈνκ³ , λΆνμ€ν κ²½μ° μ΄λ₯Ό λͺ
μνμΈμ.
|
50 |
+
3. κ°κ²°ν μλ΅: μ§λ¬Έμ μ§μ λ΅νκ³ ν΅μ¬ λ΄μ©μ μ§μ€νμΈμ.
|
51 |
+
4. μΆκ° μ 보 μ μ: κ΄λ ¨λ μΆκ° μ λ³΄κ° μλ€λ©΄ μΈκΈνμΈμ.
|
52 |
+
5. μ€λ¦¬μ± κ³ λ €: κ°κ΄μ μ΄κ³ μ€λ¦½μ μΈ νλλ₯Ό μ μ§νμΈμ.
|
53 |
+
6. νκ³ μΈμ : λ΅λ³ν μ μλ κ²½μ° μμ§ν μΈμ νμΈμ.
|
54 |
+
7. λν μ μ§: μμ°μ€λ½κ² λνλ₯Ό μ΄μ΄κ°κ³ , νμμ νμ μ§λ¬Έμ μ μνμΈμ.
|
55 |
+
|
56 |
+
νμ μ ννκ³ μ μ©ν μ 보λ₯Ό μ 곡νλ κ²μ λͺ©νλ‘ νμΈμ."""
|
57 |
+
|
58 |
+
# Create a prompt template
|
59 |
+
prompt_template = PromptTemplate(
|
60 |
+
input_variables=["context", "question"],
|
61 |
+
template=instruction + "\n\nContext: {context}\n\nQuestion: {question}\n\nAnswer:"
|
62 |
+
)
|
63 |
+
|
64 |
+
# Create ConversationalRetrievalChain with the new retriever and prompt
|
65 |
qa_chain = ConversationalRetrievalChain.from_llm(
|
66 |
llm,
|
67 |
retriever=compression_retriever,
|
68 |
+
return_source_documents=True,
|
69 |
+
combine_docs_chain_kwargs={"prompt": prompt_template}
|
70 |
)
|
71 |
|
72 |
return qa_chain
|