File size: 2,672 Bytes
9f58735
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
from langchain.prompts import (
    ChatPromptTemplate,
    SystemMessagePromptTemplate,
    HumanMessagePromptTemplate,
)
from langchain_community.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.schema import Document
from typing import List
import json


json_schema = {
    "name": "generate_answer",
    "strict": True,
    "schema": {
        "type": "object",
        "properties": {
            "answer": {"type": "string"},
        },
        "required": ["answer"],
        "additionalProperties": False,
    },
}


class LLMGeneration:
    def __init__(self, llm_model_name="gpt-4o-mini"):
        self.llm_model_name = llm_model_name
        self.llm = ChatOpenAI(
            model_name=self.llm_model_name,
            temperature=0.1,
            model_kwargs={
                "response_format": {
                    "type": "json_schema",
                    "json_schema": json_schema,
                }
            },
        )

        self.create_system_message()

    def create_system_message(self):
        # System message for the chain
        system_message = (
            "You are a helpful assistant. Use ONLY the provided context to answer the user's question. "
            'If you cannot find the answer in the context, return answer key as  "Data Not Available".'
        )
        self.system_message_prompt = SystemMessagePromptTemplate.from_template(
            system_message
        )

    def create_human_message_prompt(self, query: str, docs: List[Document]):

        # Prepare the context from the retrieved chunks
        context = "\n\n".join([doc.page_content for doc in docs])

        # Human message, instructing the assistant to use the context and produce a structured JSON answer
        human_message = f"""Given the following context:

        {context}

        Answer the following question in a structured JSON format with a key called "answer". If you do not find the answer in the provided context, return "answer" as "No answer".

        Question: {query}"""

        return HumanMessagePromptTemplate.from_template(human_message)

    def generate_answer(self, query: str, docs: List[Document]):

        # Create the prompt template
        prompt = ChatPromptTemplate.from_messages(
            [self.system_message_prompt, self.create_human_message_prompt(query, docs)]
        )

        # Create and run the chain with the hypothetical gpt-40-mini model
        chain = LLMChain(
            llm=self.llm,
            prompt=prompt,
        )
        result = chain.invoke({})

        print(f"Query: {query} Result: {result}")
        return result.get("answer", "")