|
from langchain.prompts import ( |
|
ChatPromptTemplate, |
|
SystemMessagePromptTemplate, |
|
HumanMessagePromptTemplate, |
|
) |
|
from langchain_community.chat_models import ChatOpenAI |
|
from langchain.chains import LLMChain |
|
from langchain.schema import Document |
|
from typing import List |
|
import json |
|
|
|
|
|
json_schema = { |
|
"name": "generate_answer", |
|
"strict": True, |
|
"schema": { |
|
"type": "object", |
|
"properties": { |
|
"answer": {"type": "string"}, |
|
}, |
|
"required": ["answer"], |
|
"additionalProperties": False, |
|
}, |
|
} |
|
|
|
|
|
class LLMGeneration: |
|
def __init__(self, llm_model_name="gpt-4o-mini"): |
|
self.llm_model_name = llm_model_name |
|
self.llm = ChatOpenAI( |
|
model_name=self.llm_model_name, |
|
temperature=0.1, |
|
model_kwargs={ |
|
"response_format": { |
|
"type": "json_schema", |
|
"json_schema": json_schema, |
|
} |
|
}, |
|
) |
|
|
|
self.create_system_message() |
|
|
|
def create_system_message(self): |
|
|
|
system_message = ( |
|
"You are a helpful assistant. Use ONLY the provided context to answer the user's question. " |
|
'If you cannot find the answer in the context, return answer key as "Data Not Available".' |
|
) |
|
self.system_message_prompt = SystemMessagePromptTemplate.from_template( |
|
system_message |
|
) |
|
|
|
def create_human_message_prompt(self, query: str, docs: List[Document]): |
|
|
|
|
|
context = "\n\n".join([doc.page_content for doc in docs]) |
|
|
|
|
|
human_message = f"""Given the following context: |
|
|
|
{context} |
|
|
|
Answer the following question in a structured JSON format with a key called "answer". If you do not find the answer in the provided context, return "answer" as "No answer". |
|
|
|
Question: {query}""" |
|
|
|
return HumanMessagePromptTemplate.from_template(human_message) |
|
|
|
def generate_answer(self, query: str, docs: List[Document]): |
|
|
|
|
|
prompt = ChatPromptTemplate.from_messages( |
|
[self.system_message_prompt, self.create_human_message_prompt(query, docs)] |
|
) |
|
|
|
|
|
chain = LLMChain( |
|
llm=self.llm, |
|
prompt=prompt, |
|
) |
|
result = chain.invoke({}) |
|
|
|
print(f"Query: {query} Result: {result}") |
|
return result.get("answer", "") |
|
|