Update prompt
Browse files- utils/llm_generation.py +30 -16
utils/llm_generation.py
CHANGED
@@ -3,6 +3,7 @@ from langchain.prompts import (
|
|
3 |
SystemMessagePromptTemplate,
|
4 |
HumanMessagePromptTemplate,
|
5 |
)
|
|
|
6 |
from langchain_community.chat_models import ChatOpenAI
|
7 |
from langchain.chains import LLMChain
|
8 |
from langchain.schema import Document
|
@@ -38,31 +39,43 @@ class LLMGeneration:
|
|
38 |
},
|
39 |
)
|
40 |
|
41 |
-
self.
|
42 |
|
43 |
-
def
|
44 |
# System message for the chain
|
45 |
-
system_message = (
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
51 |
)
|
52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
def create_human_message_prompt(self, query: str, docs: List[Document]):
|
54 |
|
55 |
# Prepare the context from the retrieved chunks
|
56 |
-
context = "\n\n".join(
|
|
|
|
|
57 |
|
58 |
# Human message, instructing the assistant to use the context and produce a structured JSON answer
|
59 |
-
human_message = f"""
|
60 |
-
|
61 |
{context}
|
62 |
|
63 |
-
|
64 |
-
|
65 |
-
Question: {query}"""
|
66 |
|
67 |
return HumanMessagePromptTemplate.from_template(human_message)
|
68 |
|
@@ -70,7 +83,8 @@ class LLMGeneration:
|
|
70 |
|
71 |
# Create the prompt template
|
72 |
prompt = ChatPromptTemplate.from_messages(
|
73 |
-
|
|
|
74 |
)
|
75 |
|
76 |
# Create and run the chain with the hypothetical gpt-40-mini model
|
@@ -81,4 +95,4 @@ class LLMGeneration:
|
|
81 |
result = chain.invoke({})
|
82 |
|
83 |
print(f"Query: {query} Result: {result}")
|
84 |
-
return result.get("answer", "")
|
|
|
3 |
SystemMessagePromptTemplate,
|
4 |
HumanMessagePromptTemplate,
|
5 |
)
|
6 |
+
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
|
7 |
from langchain_community.chat_models import ChatOpenAI
|
8 |
from langchain.chains import LLMChain
|
9 |
from langchain.schema import Document
|
|
|
39 |
},
|
40 |
)
|
41 |
|
42 |
+
self.create_initial_prompt()
|
43 |
|
44 |
+
def create_initial_prompt(self):
|
45 |
# System message for the chain
|
46 |
+
system_message = SystemMessage(
|
47 |
+
content=(
|
48 |
+
"You are a helpful assistant. Use ONLY the provided context to answer the user's question mentioned in <question> tag"
|
49 |
+
"""Answer the following question in a structured JSON format"""
|
50 |
+
"Adhere to the json schema stricly. Make sure the answer is exact match."
|
51 |
+
"""If you do not find the answer in the provided contexts mentioned in <context> tag, return '{"answer": "Data Not Available"}'."""
|
52 |
+
)
|
53 |
)
|
54 |
|
55 |
+
few_shots = [
|
56 |
+
HumanMessage(
|
57 |
+
content="<context>John traveled to Paris last summer. He stayed at a small boutique hotel and visited the Louvre museum.</context>"
|
58 |
+
"<question>Where did John travel?</question>"
|
59 |
+
),
|
60 |
+
AIMessage(
|
61 |
+
content="""{"answer": "Paris"}"""
|
62 |
+
),
|
63 |
+
]
|
64 |
+
|
65 |
+
self.initial_prompt_messages = [system_message] + few_shots
|
66 |
+
|
67 |
def create_human_message_prompt(self, query: str, docs: List[Document]):
|
68 |
|
69 |
# Prepare the context from the retrieved chunks
|
70 |
+
context = "\n\n".join(
|
71 |
+
[f"<context>{doc.page_content}</context>" for doc in docs]
|
72 |
+
)
|
73 |
|
74 |
# Human message, instructing the assistant to use the context and produce a structured JSON answer
|
75 |
+
human_message = f"""
|
|
|
76 |
{context}
|
77 |
|
78 |
+
<question>{query}</question>"""
|
|
|
|
|
79 |
|
80 |
return HumanMessagePromptTemplate.from_template(human_message)
|
81 |
|
|
|
83 |
|
84 |
# Create the prompt template
|
85 |
prompt = ChatPromptTemplate.from_messages(
|
86 |
+
self.initial_prompt_messages
|
87 |
+
+ [self.create_human_message_prompt(query, docs)]
|
88 |
)
|
89 |
|
90 |
# Create and run the chain with the hypothetical gpt-40-mini model
|
|
|
95 |
result = chain.invoke({})
|
96 |
|
97 |
print(f"Query: {query} Result: {result}")
|
98 |
+
return json.loads(result.get("text", "{}")).get("answer", "")
|