add llm generation
Browse files- utils/llm_generation.py +84 -0
utils/llm_generation.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import (
|
2 |
+
ChatPromptTemplate,
|
3 |
+
SystemMessagePromptTemplate,
|
4 |
+
HumanMessagePromptTemplate,
|
5 |
+
)
|
6 |
+
from langchain_community.chat_models import ChatOpenAI
|
7 |
+
from langchain.chains import LLMChain
|
8 |
+
from langchain.schema import Document
|
9 |
+
from typing import List
|
10 |
+
import json
|
11 |
+
|
12 |
+
|
13 |
+
json_schema = {
|
14 |
+
"name": "generate_answer",
|
15 |
+
"strict": True,
|
16 |
+
"schema": {
|
17 |
+
"type": "object",
|
18 |
+
"properties": {
|
19 |
+
"answer": {"type": "string"},
|
20 |
+
},
|
21 |
+
"required": ["answer"],
|
22 |
+
"additionalProperties": False,
|
23 |
+
},
|
24 |
+
}
|
25 |
+
|
26 |
+
|
27 |
+
class LLMGeneration:
|
28 |
+
def __init__(self, llm_model_name="gpt-4o-mini"):
|
29 |
+
self.llm_model_name = llm_model_name
|
30 |
+
self.llm = ChatOpenAI(
|
31 |
+
model_name=self.llm_model_name,
|
32 |
+
temperature=0.1,
|
33 |
+
model_kwargs={
|
34 |
+
"response_format": {
|
35 |
+
"type": "json_schema",
|
36 |
+
"json_schema": json_schema,
|
37 |
+
}
|
38 |
+
},
|
39 |
+
)
|
40 |
+
|
41 |
+
self.create_system_message()
|
42 |
+
|
43 |
+
def create_system_message(self):
|
44 |
+
# System message for the chain
|
45 |
+
system_message = (
|
46 |
+
"You are a helpful assistant. Use ONLY the provided context to answer the user's question. "
|
47 |
+
'If you cannot find the answer in the context, return answer key as "Data Not Available".'
|
48 |
+
)
|
49 |
+
self.system_message_prompt = SystemMessagePromptTemplate.from_template(
|
50 |
+
system_message
|
51 |
+
)
|
52 |
+
|
53 |
+
def create_human_message_prompt(self, query: str, docs: List[Document]):
|
54 |
+
|
55 |
+
# Prepare the context from the retrieved chunks
|
56 |
+
context = "\n\n".join([doc.page_content for doc in docs])
|
57 |
+
|
58 |
+
# Human message, instructing the assistant to use the context and produce a structured JSON answer
|
59 |
+
human_message = f"""Given the following context:
|
60 |
+
|
61 |
+
{context}
|
62 |
+
|
63 |
+
Answer the following question in a structured JSON format with a key called "answer". If you do not find the answer in the provided context, return "answer" as "No answer".
|
64 |
+
|
65 |
+
Question: {query}"""
|
66 |
+
|
67 |
+
return HumanMessagePromptTemplate.from_template(human_message)
|
68 |
+
|
69 |
+
def generate_answer(self, query: str, docs: List[Document]):
|
70 |
+
|
71 |
+
# Create the prompt template
|
72 |
+
prompt = ChatPromptTemplate.from_messages(
|
73 |
+
[self.system_message_prompt, self.create_human_message_prompt(query, docs)]
|
74 |
+
)
|
75 |
+
|
76 |
+
# Create and run the chain with the hypothetical gpt-40-mini model
|
77 |
+
chain = LLMChain(
|
78 |
+
llm=self.llm,
|
79 |
+
prompt=prompt,
|
80 |
+
)
|
81 |
+
result = chain.invoke({})
|
82 |
+
|
83 |
+
print(f"Query: {query} Result: {result}")
|
84 |
+
return result.get("answer", "")
|