AskMyPDF / utils /llm_generation.py
agoyal496's picture
Formatting
69992ee
raw
history blame
3.19 kB
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
from langchain_community.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.schema import Document
from typing import List
import json
json_schema = {
"name": "generate_answer",
"strict": True,
"schema": {
"type": "object",
"properties": {
"answer": {"type": "string"},
},
"required": ["answer"],
"additionalProperties": False,
},
}
class LLMGeneration:
def __init__(self, llm_model_name="gpt-4o-mini"):
self.llm_model_name = llm_model_name
self.llm = ChatOpenAI(
model_name=self.llm_model_name,
temperature=0.1,
model_kwargs={
"response_format": {
"type": "json_schema",
"json_schema": json_schema,
}
},
)
self.create_initial_prompt()
def create_initial_prompt(self):
# System message for the chain
system_message = SystemMessage(
content=(
"You are a helpful assistant. Use ONLY the provided context to answer the user's question mentioned in <question> tag"
"""Answer the following question in a structured JSON format"""
"Adhere to the json schema stricly. Make sure the answer is exact match."
"""If you do not find the answer in the provided contexts mentioned in <context> tag, return '{"answer": "Data Not Available"}'."""
)
)
few_shots = [
HumanMessage(
content="<context>John traveled to Paris last summer. He stayed at a small boutique hotel and visited the Louvre museum.</context>"
"<question>Where did John travel?</question>"
),
AIMessage(content="""{"answer": "Paris"}"""),
]
self.initial_prompt_messages = [system_message] + few_shots
def create_human_message_prompt(self, query: str, docs: List[Document]):
# Prepare the context from the retrieved chunks
context = "\n\n".join(
[f"<context>{doc.page_content}</context>" for doc in docs]
)
# Human message, instructing the assistant to use the context and produce a structured JSON answer
human_message = f"""
{context}
<question>{query}</question>"""
return HumanMessagePromptTemplate.from_template(human_message)
def generate_answer(self, query: str, docs: List[Document]):
# Create the prompt template
prompt = ChatPromptTemplate.from_messages(
self.initial_prompt_messages
+ [self.create_human_message_prompt(query, docs)]
)
# Create and run the chain with the hypothetical gpt-40-mini model
chain = LLMChain(
llm=self.llm,
prompt=prompt,
)
result = chain.invoke({})
print(f"Query: {query} Result: {result}")
return json.loads(result.get("text", "{}")).get("answer", "")