basic FAQ functionality is working
Browse files- agent/_create.py +11 -7
- agent/datastructures.py +1 -1
- agent/prompt.py +20 -3
- agent/toolset.py +0 -7
- app.py +5 -2
- requirements.txt +3 -13
- train/faq.py +2 -1
agent/_create.py
CHANGED
@@ -1,11 +1,12 @@
|
|
1 |
|
2 |
def agent(payload):
|
3 |
|
4 |
-
|
5 |
-
|
6 |
from langchain_openai import ChatOpenAI
|
7 |
-
|
8 |
-
|
|
|
|
|
9 |
|
10 |
from langgraph.prebuilt import ToolInvocation
|
11 |
import json
|
@@ -44,18 +45,21 @@ def agent(payload):
|
|
44 |
|
45 |
app = workflow.compile(checkpointer=memory.checkpoints)
|
46 |
|
47 |
-
|
48 |
from agent.prompt import prompt
|
49 |
prompt=prompt[memory.isNew]
|
50 |
|
51 |
-
input = payload.get("input") or "
|
52 |
|
53 |
prompt = prompt.format(input=input, thread_id=memory.thread_id)
|
54 |
|
|
|
|
|
55 |
response = app.invoke(prompt, {"configurable": {"thread_id": memory.thread_id}})
|
56 |
print(response[-1].content)
|
57 |
|
58 |
-
|
|
|
|
|
59 |
|
60 |
return response[-1].content
|
61 |
|
|
|
1 |
|
2 |
def agent(payload):
|
3 |
|
4 |
+
|
|
|
5 |
from langchain_openai import ChatOpenAI
|
6 |
+
llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
|
7 |
+
|
8 |
+
from agent.toolset import tool_executor, converted_tools
|
9 |
+
model = llm.bind_functions(converted_tools)
|
10 |
|
11 |
from langgraph.prebuilt import ToolInvocation
|
12 |
import json
|
|
|
45 |
|
46 |
app = workflow.compile(checkpointer=memory.checkpoints)
|
47 |
|
|
|
48 |
from agent.prompt import prompt
|
49 |
prompt=prompt[memory.isNew]
|
50 |
|
51 |
+
input = payload.get("input") or "What is Rise for?"
|
52 |
|
53 |
prompt = prompt.format(input=input, thread_id=memory.thread_id)
|
54 |
|
55 |
+
|
56 |
+
|
57 |
response = app.invoke(prompt, {"configurable": {"thread_id": memory.thread_id}})
|
58 |
print(response[-1].content)
|
59 |
|
60 |
+
#for s in app.stream(prompt, {"configurable": {"thread_id": memory.thread_id}}):
|
61 |
+
# print(list(s.values())[0])
|
62 |
+
# print("----")
|
63 |
|
64 |
return response[-1].content
|
65 |
|
agent/datastructures.py
CHANGED
@@ -22,7 +22,7 @@ class FrontEndActions(BaseModel):
|
|
22 |
type: ActionTypes = Field(description="This should be a string that identifies the type of action. It can be one of: SuggestGoal, SuggestActivity")
|
23 |
|
24 |
class ResponseSchema(BaseModel):
|
25 |
-
"""
|
26 |
message: str = Field(description="final answer to respond to the user")
|
27 |
thread_id: int = Field(description="The ID of the checkpointer memory thread that this response is associated with. This is used to keep track of the conversation.")
|
28 |
tools: Optional[List[str]] = Field(description="A list of the tools used to generate the response.")
|
|
|
22 |
type: ActionTypes = Field(description="This should be a string that identifies the type of action. It can be one of: SuggestGoal, SuggestActivity")
|
23 |
|
24 |
class ResponseSchema(BaseModel):
|
25 |
+
"""Always use this to format the final response to the user. This will be passed back to the frontend."""
|
26 |
message: str = Field(description="final answer to respond to the user")
|
27 |
thread_id: int = Field(description="The ID of the checkpointer memory thread that this response is associated with. This is used to keep track of the conversation.")
|
28 |
tools: Optional[List[str]] = Field(description="A list of the tools used to generate the response.")
|
agent/prompt.py
CHANGED
@@ -10,13 +10,30 @@ prompt = {
|
|
10 |
1: # IF THE THREAD IS NEW, THE CHATBOT NEEDS TO BE PUMP-PROMPTED
|
11 |
ChatPromptTemplate.from_messages([
|
12 |
SystemMessagePromptTemplate.from_template("""
|
|
|
|
|
13 |
|
14 |
-
You are
|
|
|
|
|
|
|
|
|
15 |
|
16 |
-
|
17 |
|
18 |
-
|
|
|
|
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
"""
|
21 |
|
22 |
).format(response_format=parser.get_format_instructions()),
|
|
|
10 |
1: # IF THE THREAD IS NEW, THE CHATBOT NEEDS TO BE PUMP-PROMPTED
|
11 |
ChatPromptTemplate.from_messages([
|
12 |
SystemMessagePromptTemplate.from_template("""
|
13 |
+
|
14 |
+
You are a backend service to a JavaScript frontend chatbot hosted on another website.
|
15 |
|
16 |
+
You are interacting with students from Manchester Metropolitan University as part of its mission to help students to make the most of their time here.
|
17 |
+
|
18 |
+
Your tone should be personable and enthusiastic
|
19 |
+
|
20 |
+
### ANSWERING QUESTIONS ABOUT RISE AND FUTURE ME ###
|
21 |
|
22 |
+
You have been provided with the FrequentlyAskedQuestions tool to answer questions that students might have about the Rise programme and Future me initiative. Please rely on this tool and do not make up answers if you are unsure.
|
23 |
|
24 |
+
If a question seems relevant to Rise and Future me, but you are unsure of the answer, you are able to refer it to the Rise team using the EmailTeam tool. Before you do this, please confirm with the user.
|
25 |
+
|
26 |
+
###########
|
27 |
|
28 |
+
### PROVIDING A FINAL ANSWER ###
|
29 |
+
|
30 |
+
{response_format}
|
31 |
+
|
32 |
+
Never output anything outside of the JSON Blob beginning with '{{' and ending with '}}'
|
33 |
+
|
34 |
+
###########
|
35 |
+
|
36 |
+
|
37 |
"""
|
38 |
|
39 |
).format(response_format=parser.get_format_instructions()),
|
agent/toolset.py
CHANGED
@@ -49,13 +49,6 @@ def recommend_activity(profile: str) -> str:
|
|
49 |
|
50 |
tools = [frequently_asked_questions]
|
51 |
|
52 |
-
|
53 |
-
## NEW FROM HERE
|
54 |
-
|
55 |
-
from langchain_community.tools.tavily_search import TavilySearchResults
|
56 |
-
|
57 |
-
tools = [TavilySearchResults(max_results=1)]
|
58 |
-
|
59 |
from langgraph.prebuilt import ToolExecutor
|
60 |
|
61 |
tool_executor = ToolExecutor(tools)
|
|
|
49 |
|
50 |
tools = [frequently_asked_questions]
|
51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
from langgraph.prebuilt import ToolExecutor
|
53 |
|
54 |
tool_executor = ToolExecutor(tools)
|
app.py
CHANGED
@@ -26,5 +26,8 @@ def train_products():
|
|
26 |
from train.products import train
|
27 |
return train();
|
28 |
|
29 |
-
from
|
30 |
-
|
|
|
|
|
|
|
|
26 |
from train.products import train
|
27 |
return train();
|
28 |
|
29 |
+
#from train.faq import train
|
30 |
+
#train();
|
31 |
+
|
32 |
+
#from agent._create import agent
|
33 |
+
#agent({})
|
requirements.txt
CHANGED
@@ -3,6 +3,7 @@ Flask
|
|
3 |
Flask-cors
|
4 |
gunicorn
|
5 |
python-dotenv
|
|
|
6 |
|
7 |
#LLM
|
8 |
bs4
|
@@ -13,19 +14,8 @@ transformers
|
|
13 |
sentence-transformers
|
14 |
datasets
|
15 |
faiss-cpu
|
16 |
-
|
17 |
-
#LLAMA TEST
|
18 |
-
torch
|
19 |
-
transformers
|
20 |
-
langchain
|
21 |
-
chromadb
|
22 |
-
xformers
|
23 |
-
sentence_transformers
|
24 |
-
tokenizers
|
25 |
-
optimum
|
26 |
-
auto-gptq
|
27 |
-
unstructured
|
28 |
|
29 |
#LANGRAPH
|
30 |
Langgraph
|
31 |
-
Operator
|
|
|
3 |
Flask-cors
|
4 |
gunicorn
|
5 |
python-dotenv
|
6 |
+
playwright
|
7 |
|
8 |
#LLM
|
9 |
bs4
|
|
|
14 |
sentence-transformers
|
15 |
datasets
|
16 |
faiss-cpu
|
17 |
+
langchain-fireworks
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
#LANGRAPH
|
20 |
Langgraph
|
21 |
+
#Operator
|
train/faq.py
CHANGED
@@ -6,6 +6,7 @@ def train():
|
|
6 |
from langchain_community.document_loaders import WebBaseLoader
|
7 |
|
8 |
documents = WebBaseLoader("https://rise.mmu.ac.uk/what-is-rise/").load()
|
|
|
9 |
|
10 |
# Split document in chunks
|
11 |
text_splitter = RecursiveCharacterTextSplitter(
|
@@ -21,4 +22,4 @@ def train():
|
|
21 |
# Persist the vectors locally on disk
|
22 |
vectorstore.save_local("_rise_faq_db");
|
23 |
|
24 |
-
return {"trained":"success"}
|
|
|
6 |
from langchain_community.document_loaders import WebBaseLoader
|
7 |
|
8 |
documents = WebBaseLoader("https://rise.mmu.ac.uk/what-is-rise/").load()
|
9 |
+
documents[0].page_content = documents[0].page_content.split("Everything You Need To Know About Rise – Students")[1].strip();
|
10 |
|
11 |
# Split document in chunks
|
12 |
text_splitter = RecursiveCharacterTextSplitter(
|
|
|
22 |
# Persist the vectors locally on disk
|
23 |
vectorstore.save_local("_rise_faq_db");
|
24 |
|
25 |
+
return {"trained":"success"}
|