from langchain.prompts import ( ChatPromptTemplate, HumanMessagePromptTemplate, ) from langchain_core.messages import SystemMessage, HumanMessage, AIMessage from langchain_community.chat_models import ChatOpenAI from langchain.chains import LLMChain from langchain.schema import Document from typing import List import json json_schema = { "name": "generate_answer", "strict": True, "schema": { "type": "object", "properties": { "answer": {"type": "string"}, }, "required": ["answer"], "additionalProperties": False, }, } class LLMGeneration: def __init__(self, llm_model_name="gpt-4o-mini"): self.llm_model_name = llm_model_name self.llm = ChatOpenAI( model_name=self.llm_model_name, temperature=0.1, model_kwargs={ "response_format": { "type": "json_schema", "json_schema": json_schema, } }, ) self.create_initial_prompt() def create_initial_prompt(self): # System message for the chain system_message = SystemMessage( content=( "You are a helpful assistant. Use ONLY the provided context to answer the user's question mentioned in tag" """Answer the following question in a structured JSON format""" "Adhere to the json schema stricly. Make sure the answer is exact match." """If you do not find the answer in the provided contexts mentioned in tag, return '{"answer": "Data Not Available"}'.""" ) ) few_shots = [ HumanMessage( content="John traveled to Paris last summer. He stayed at a small boutique hotel and visited the Louvre museum." "Where did John travel?" ), AIMessage(content="""{"answer": "Paris"}"""), ] self.initial_prompt_messages = [system_message] + few_shots def create_human_message_prompt(self, query: str, docs: List[Document]): # Prepare the context from the retrieved chunks context = "\n\n".join( [f"{doc.page_content}" for doc in docs] ) # Human message, instructing the assistant to use the context and produce a structured JSON answer human_message = f""" {context} {query}""" return HumanMessagePromptTemplate.from_template(human_message) def generate_answer(self, query: str, docs: List[Document]): # Create the prompt template prompt = ChatPromptTemplate.from_messages( self.initial_prompt_messages + [self.create_human_message_prompt(query, docs)] ) # Create and run the chain with the hypothetical gpt-40-mini model chain = LLMChain( llm=self.llm, prompt=prompt, ) result = chain.invoke({}) print(f"Query: {query} Result: {result}") return json.loads(result.get("text", "{}")).get("answer", "")