markpeace commited on
Commit
a93b719
1 Parent(s): 33b89d4
agent/_create.py CHANGED
@@ -98,6 +98,9 @@ def agent(payload):
98
  print(response[-1].content)
99
  '''
100
 
101
- print(response[-1].content)
102
- return response[-1].content
 
 
 
103
 
 
98
  print(response[-1].content)
99
  '''
100
 
101
+ response = response[-1].content[:-1] + ', "thread_id": "' + str(memory.thread_id) + '"}'
102
+
103
+
104
+ print(response);
105
+ return response
106
 
agent/datastructures.py CHANGED
@@ -26,7 +26,6 @@ class FrontEndActions(BaseModel):
26
  class ResponseSchema(BaseModel):
27
  """Always use this to format the final response to the user. This will be passed back to the frontend."""
28
  message: str = Field(description="final answer to respond to the user")
29
- thread_id: int = Field(description="The ID of the checkpointer memory thread that this response is associated with. This is used to keep track of the conversation.")
30
  tools: List[str] = Field(description="A list of the tools used to generate the response.")
31
  actions: List[FrontEndActions] = Field(description="List of suggested actions that should be passed back to the frontend to display. The use will click these to enact them. ")
32
 
 
26
  class ResponseSchema(BaseModel):
27
  """Always use this to format the final response to the user. This will be passed back to the frontend."""
28
  message: str = Field(description="final answer to respond to the user")
 
29
  tools: List[str] = Field(description="A list of the tools used to generate the response.")
30
  actions: List[FrontEndActions] = Field(description="List of suggested actions that should be passed back to the frontend to display. The use will click these to enact them. ")
31
 
agent/jsonencoder.py CHANGED
@@ -1,16 +1,9 @@
1
  from langchain_openai import ChatOpenAI
2
- from agent.datastructures import parser
3
 
4
 
5
  model = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
6
-
7
- #from langchain_core.utils.function_calling import convert_pydantic_to_openai_function
8
- #from agent.datastructures import ResponseSchema
9
-
10
- #converted_tools = [convert_pydantic_to_openai_function(ResponseSchema)]
11
-
12
- #model.bind_functions(convert_pydantic_to_openai_function(ResponseSchema))
13
-
14
 
15
  from langchain.prompts import ChatPromptTemplate,PromptTemplate, MessagesPlaceholder,SystemMessagePromptTemplate
16
 
@@ -30,9 +23,7 @@ prompt = PromptTemplate(
30
 
31
  prompt = ChatPromptTemplate.from_messages(
32
  [
33
- ("system", "The thread_id of this conversation is {thread_id}."),
34
  ("system", "You will be given the chat so far, you should render the final answer as a JSON object"),
35
- SystemMessagePromptTemplate.from_template("{format_instructions}").format(format_instructions=parser.get_format_instructions()),
36
  MessagesPlaceholder(variable_name="conversation"),
37
  ]
38
  )
 
1
  from langchain_openai import ChatOpenAI
2
+ from agent.datastructures import parser,ResponseSchema
3
 
4
 
5
  model = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
6
+ model.with_structured_output(ResponseSchema)
 
 
 
 
 
 
 
7
 
8
  from langchain.prompts import ChatPromptTemplate,PromptTemplate, MessagesPlaceholder,SystemMessagePromptTemplate
9
 
 
23
 
24
  prompt = ChatPromptTemplate.from_messages(
25
  [
 
26
  ("system", "You will be given the chat so far, you should render the final answer as a JSON object"),
 
27
  MessagesPlaceholder(variable_name="conversation"),
28
  ]
29
  )
agent/prompt.py CHANGED
@@ -40,7 +40,7 @@ prompt = {
40
 
41
  1: ChatPromptTemplate.from_messages([
42
  ("system", "The thread_id of this conversation is {thread_id}."),
43
- ("system", "In your answer you should list the tools used to produce this answer"),
44
  MessagesPlaceholder(variable_name="conversation")
45
  ])
46
 
 
40
 
41
  1: ChatPromptTemplate.from_messages([
42
  ("system", "The thread_id of this conversation is {thread_id}."),
43
+ ("system", "You should always "),
44
  MessagesPlaceholder(variable_name="conversation")
45
  ])
46
 
agent/toolset.py CHANGED
@@ -9,7 +9,9 @@ from langchain_core.pydantic_v1 import BaseModel, Field
9
  def frequently_asked_questions(input: str):
10
 
11
  """
12
- Please always use this tool if the user has questions about our offer
 
 
13
  """
14
 
15
  # Load from local storage
@@ -21,7 +23,7 @@ def frequently_asked_questions(input: str):
21
  llm=ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0),
22
  chain_type="stuff",
23
  return_source_documents=False,
24
- retriever=persisted_vectorstore.as_retriever(search_type="similarity_score_threshold",search_kwargs={"k":3, "score_threshold":0.5}))
25
  result = qa.invoke(input)
26
  return result
27
 
@@ -32,6 +34,8 @@ def check_eligibility(input: str):
32
  """
33
  from flask import request
34
 
 
 
35
  from langchain_community.document_loaders import WebBaseLoader
36
  document = WebBaseLoader("https://rise.mmu.ac.uk/wp-content/themes/rise/helpers/user/student_eligibility/chatbotquery.php?query=eligibility&wpid="+request.values.get("user_id")).load()
37
  return document[0].page_content
 
9
  def frequently_asked_questions(input: str):
10
 
11
  """
12
+ You MUST use this tool to answer questions that users have.
13
+ Never try to answer a question without a tool.
14
+ If you cannot find the answer using this tool, you should pass the 'refer' action
15
  """
16
 
17
  # Load from local storage
 
23
  llm=ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0),
24
  chain_type="stuff",
25
  return_source_documents=False,
26
+ retriever=persisted_vectorstore.as_retriever(search_type="similarity_score_threshold",search_kwargs={"k":2, "score_threshold":0.75}))
27
  result = qa.invoke(input)
28
  return result
29
 
 
34
  """
35
  from flask import request
36
 
37
+ print("https://rise.mmu.ac.uk/wp-content/themes/rise/helpers/user/student_eligibility/chatbotquery.php?query=eligibility&wpid="+request.values.get("user_id"))
38
+
39
  from langchain_community.document_loaders import WebBaseLoader
40
  document = WebBaseLoader("https://rise.mmu.ac.uk/wp-content/themes/rise/helpers/user/student_eligibility/chatbotquery.php?query=eligibility&wpid="+request.values.get("user_id")).load()
41
  return document[0].page_content