JUNGU commited on
Commit
413ca2d
β€’
1 Parent(s): 14bbe59

Update rag_system.py

Browse files
Files changed (1) hide show
  1. rag_system.py +23 -2
rag_system.py CHANGED
@@ -12,6 +12,7 @@ from langchain.retrievers import ContextualCompressionRetriever
12
  from langchain.retrievers.document_compressors import LLMChainExtractor
13
  from langgraph.graph import Graph
14
  from langchain_core.runnables import RunnablePassthrough, RunnableLambda
 
15
 
16
  # Load environment variables
17
  load_dotenv()
@@ -41,11 +42,31 @@ def load_retrieval_qa_chain():
41
  base_retriever=vectorstore.as_retriever()
42
  )
43
 
44
- # Create ConversationalRetrievalChain with the new retriever
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  qa_chain = ConversationalRetrievalChain.from_llm(
46
  llm,
47
  retriever=compression_retriever,
48
- return_source_documents=True
 
49
  )
50
 
51
  return qa_chain
 
12
  from langchain.retrievers.document_compressors import LLMChainExtractor
13
  from langgraph.graph import Graph
14
  from langchain_core.runnables import RunnablePassthrough, RunnableLambda
15
+ from langchain.prompts import PromptTemplate
16
 
17
  # Load environment variables
18
  load_dotenv()
 
42
  base_retriever=vectorstore.as_retriever()
43
  )
44
 
45
+ # Define your instruction/prompt
46
+ instruction = """당신은 RAG(Retrieval-Augmented Generation) 기반 AI μ–΄μ‹œμŠ€ν„΄νŠΈμž…λ‹ˆλ‹€. λ‹€μŒ 지침을 따라 μ‚¬μš©μž μ§ˆλ¬Έμ— λ‹΅ν•˜μ„Έμš”:
47
+
48
+ 1. 검색 κ²°κ³Ό ν™œμš©: 제곡된 검색 κ²°κ³Όλ₯Ό λΆ„μ„ν•˜κ³  κ΄€λ ¨ 정보λ₯Ό μ‚¬μš©ν•΄ λ‹΅λ³€ν•˜μ„Έμš”.
49
+ 2. μ •ν™•μ„± μœ μ§€: μ •λ³΄μ˜ 정확성을 ν™•μΈν•˜κ³ , λΆˆν™•μ‹€ν•œ 경우 이λ₯Ό λͺ…μ‹œν•˜μ„Έμš”.
50
+ 3. κ°„κ²°ν•œ 응닡: μ§ˆλ¬Έμ— 직접 λ‹΅ν•˜κ³  핡심 λ‚΄μš©μ— μ§‘μ€‘ν•˜μ„Έμš”.
51
+ 4. μΆ”κ°€ 정보 μ œμ•ˆ: κ΄€λ ¨λœ μΆ”κ°€ 정보가 μžˆλ‹€λ©΄ μ–ΈκΈ‰ν•˜μ„Έμš”.
52
+ 5. μœ€λ¦¬μ„± κ³ λ €: 객관적이고 쀑립적인 νƒœλ„λ₯Ό μœ μ§€ν•˜μ„Έμš”.
53
+ 6. ν•œκ³„ 인정: λ‹΅λ³€ν•  수 μ—†λŠ” 경우 μ†”μ§νžˆ μΈμ •ν•˜μ„Έμš”.
54
+ 7. λŒ€ν™” μœ μ§€: μžμ—°μŠ€λŸ½κ²Œ λŒ€ν™”λ₯Ό 이어가고, ν•„μš”μ‹œ 후속 μ§ˆλ¬Έμ„ μ œμ•ˆν•˜μ„Έμš”.
55
+
56
+ 항상 μ •ν™•ν•˜κ³  μœ μš©ν•œ 정보λ₯Ό μ œκ³΅ν•˜λŠ” 것을 λͺ©ν‘œλ‘œ ν•˜μ„Έμš”."""
57
+
58
+ # Create a prompt template
59
+ prompt_template = PromptTemplate(
60
+ input_variables=["context", "question"],
61
+ template=instruction + "\n\nContext: {context}\n\nQuestion: {question}\n\nAnswer:"
62
+ )
63
+
64
+ # Create ConversationalRetrievalChain with the new retriever and prompt
65
  qa_chain = ConversationalRetrievalChain.from_llm(
66
  llm,
67
  retriever=compression_retriever,
68
+ return_source_documents=True,
69
+ combine_docs_chain_kwargs={"prompt": prompt_template}
70
  )
71
 
72
  return qa_chain