AskMyPDF / utils /llm_generation.py
agoyal496's picture
add llm generation
9f58735
raw
history blame
2.67 kB
from langchain.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain_community.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.schema import Document
from typing import List
import json
json_schema = {
"name": "generate_answer",
"strict": True,
"schema": {
"type": "object",
"properties": {
"answer": {"type": "string"},
},
"required": ["answer"],
"additionalProperties": False,
},
}
class LLMGeneration:
def __init__(self, llm_model_name="gpt-4o-mini"):
self.llm_model_name = llm_model_name
self.llm = ChatOpenAI(
model_name=self.llm_model_name,
temperature=0.1,
model_kwargs={
"response_format": {
"type": "json_schema",
"json_schema": json_schema,
}
},
)
self.create_system_message()
def create_system_message(self):
# System message for the chain
system_message = (
"You are a helpful assistant. Use ONLY the provided context to answer the user's question. "
'If you cannot find the answer in the context, return answer key as "Data Not Available".'
)
self.system_message_prompt = SystemMessagePromptTemplate.from_template(
system_message
)
def create_human_message_prompt(self, query: str, docs: List[Document]):
# Prepare the context from the retrieved chunks
context = "\n\n".join([doc.page_content for doc in docs])
# Human message, instructing the assistant to use the context and produce a structured JSON answer
human_message = f"""Given the following context:
{context}
Answer the following question in a structured JSON format with a key called "answer". If you do not find the answer in the provided context, return "answer" as "No answer".
Question: {query}"""
return HumanMessagePromptTemplate.from_template(human_message)
def generate_answer(self, query: str, docs: List[Document]):
# Create the prompt template
prompt = ChatPromptTemplate.from_messages(
[self.system_message_prompt, self.create_human_message_prompt(query, docs)]
)
# Create and run the chain with the hypothetical gpt-40-mini model
chain = LLMChain(
llm=self.llm,
prompt=prompt,
)
result = chain.invoke({})
print(f"Query: {query} Result: {result}")
return result.get("answer", "")