File size: 5,278 Bytes
9f58735 ba9b4f4 9f58735 24412da 9f58735 ba9b4f4 9f58735 ba9b4f4 24412da 9f58735 ba9b4f4 9f58735 ba9b4f4 69992ee ba9b4f4 24412da 9f58735 24412da 9f58735 ba9b4f4 9f58735 ba9b4f4 9f58735 ba9b4f4 9f58735 24412da 9f58735 24412da 9f58735 ba9b4f4 9f58735 24412da 9f58735 ba9b4f4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
from langchain_community.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.schema import Document
from typing import List
import json
json_schema = {
"name": "generate_answer",
"strict": True,
"schema": {
"type": "object",
"properties": {
"answer": {"type": "string"},
},
"required": ["answer"],
"additionalProperties": False,
},
}
class LLMGeneration:
def __init__(self, llm_model_name="gpt-4o-mini"):
"""
Initialize the LLMGeneration class with a specified LLM model.
Parameters:
llm_model_name (str): The name of the LLM model to be used. Default is "gpt-4o-mini".
Returns:
None
"""
self.llm_model_name = llm_model_name
self.llm = ChatOpenAI(
model_name=self.llm_model_name,
temperature=0.1,
model_kwargs={
"response_format": {
"type": "json_schema",
"json_schema": json_schema,
}
},
)
self.create_initial_prompt()
def create_initial_prompt(self):
"""
Prepares the initial prompt for the LLMChain.
This function creates a system message and few-shot examples for the LLMChain.
The system message instructs the assistant to use the provided context to answer the user's question,
and to follow a structured JSON format for the answer. It also specifies the conditions for providing an answer.
The few-shot examples include a context and a question, along with the expected answer in JSON format.
Returns:
None. The initial prompt messages are stored in the `initial_prompt_messages` attribute of the class instance.
"""
# System message for the chain
system_message = SystemMessage(
content=(
"You are a helpful assistant. Use ONLY the provided context to answer the user's question mentioned in <question> tag"
"""Answer the following question in a structured JSON format"""
"Adhere to the json schema stricly. Make sure the answer is exact match."
"""If you do not find the answer in the provided contexts mentioned in <context> tag, return '{"answer": "Data Not Available"}'."""
)
)
few_shots = [
HumanMessage(
content="<context>John traveled to Paris last summer. He stayed at a small boutique hotel and visited the Louvre museum.</context>"
"<question>Where did John travel?</question>"
),
AIMessage(content="""{"answer": "Paris"}"""),
]
self.initial_prompt_messages = [system_message] + few_shots
def create_human_message_prompt(self, query: str, docs: List[Document]) -> HumanMessagePromptTemplate:
"""
Prepares a human message prompt for the LLMChain.
This function constructs a human message that includes the provided context and a question.
The context is extracted from the list of documents and formatted as per the required structure.
The question is included in the human message.
Parameters:
query (str): The user's question for which an answer needs to be generated.
docs (List[Document]): A list of documents retrieved from the search engine. Each document contains a 'page_content' attribute.
Returns:
HumanMessagePromptTemplate: A human message prompt template that can be used with the LLMChain.
"""
# Prepare the context from the retrieved chunks
context = "\n\n".join(
[f"<context>{doc.page_content}</context>" for doc in docs]
)
# Human message, instructing the assistant to use the context and produce a structured JSON answer
human_message = f"""
{context}
<question>{query}</question>"""
return HumanMessagePromptTemplate.from_template(human_message)
def generate_answer(self, query: str, docs: List[Document]) -> str:
"""
Generate an answer to the user's query using the provided documents and the LLM model.
Parameters:
query (str): The user's question for which an answer needs to be generated.
docs (List[Document]): A list of documents retrieved from the search engine. Each document contains a 'page_content' attribute.
Returns:
str: The answer to the user's query. If no answer is found, returns an empty string.
"""
# Create the prompt template
prompt = ChatPromptTemplate.from_messages(
self.initial_prompt_messages
+ [self.create_human_message_prompt(query, docs)]
)
# Create and run the chain with the gpt-40-mini model
chain = LLMChain(
llm=self.llm,
prompt=prompt,
)
result = chain.invoke({})
print(f"Query: {query} Result: {result}")
return json.loads(result.get("text", "{}")).get("answer", "")
|