messing
Browse files- __pycache__/app.cpython-39.pyc +0 -0
- app.py +47 -32
- test2.py +94 -0
- test3.py +81 -0
__pycache__/app.cpython-39.pyc
CHANGED
Binary files a/__pycache__/app.cpython-39.pyc and b/__pycache__/app.cpython-39.pyc differ
|
|
app.py
CHANGED
@@ -1,54 +1,69 @@
|
|
1 |
-
#
|
2 |
-
|
3 |
from flask import Flask,request
|
4 |
from dotenv import load_dotenv
|
5 |
|
6 |
-
from langchain.agents import tool
|
7 |
-
|
8 |
-
|
9 |
# Initializing flask app
|
10 |
app = Flask(__name__)
|
11 |
load_dotenv()
|
12 |
|
|
|
|
|
|
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
|
21 |
|
22 |
-
|
23 |
-
|
|
|
|
|
|
|
24 |
|
25 |
-
|
26 |
-
"
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
|
|
|
|
|
32 |
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
|
|
37 |
|
|
|
38 |
|
39 |
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
agent = create_openai_functions_agent(llm, tools, prompt)
|
49 |
|
50 |
-
|
51 |
|
52 |
-
|
|
|
53 |
|
54 |
-
return response
|
|
|
1 |
+
#ESTABLISH THE SERVER
|
|
|
2 |
from flask import Flask,request
|
3 |
from dotenv import load_dotenv
|
4 |
|
|
|
|
|
|
|
5 |
# Initializing flask app
|
6 |
app = Flask(__name__)
|
7 |
load_dotenv()
|
8 |
|
9 |
+
@app.route("/", methods=['GET','POST'])
|
10 |
+
def index():
|
11 |
+
from typing import List
|
12 |
|
13 |
+
from langchain.prompts import PromptTemplate
|
14 |
+
from langchain_core.output_parsers import JsonOutputParser
|
15 |
+
from langchain_core.pydantic_v1 import BaseModel, Field
|
16 |
+
from langchain_openai import ChatOpenAI
|
17 |
+
from langchain_community.tools.convert_to_openai import format_tool_to_openai_function
|
18 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
19 |
|
20 |
|
21 |
+
# Define your desired data structure.
|
22 |
+
class FrontEndActions(BaseModel):
|
23 |
+
"""Structure to pass actions back to the frontend"""
|
24 |
+
text: str = Field(description="The text to display on the button")
|
25 |
+
type: str = Field(description="This should be a string that identifies the type of action. It can be one of: SuggestGoal, SuggestRiseActivity")
|
26 |
|
27 |
+
class ResponseSchema(BaseModel):
|
28 |
+
"""Final response to the question being asked"""
|
29 |
+
message: str = Field(description="final answer to respond to the user")
|
30 |
+
#characters: str = Field(description="number of characters in the answer")
|
31 |
+
#actions: List[FrontEndActions] = Field(description="List of suggested actions that should be passed back to the frontend to display. The use will click these to enact them. ")
|
32 |
+
#tokens: int = Field(description="Count the number of used to produce the response")
|
33 |
|
34 |
+
# Set up a parser + inject instructions into the prompt template.
|
35 |
+
parser = JsonOutputParser(pydantic_object=ResponseSchema)
|
36 |
|
37 |
+
prompt = PromptTemplate(
|
38 |
+
template="""Answer the user query.\n{format_instructions}\n{input}\n{agent_scratchpad}""",
|
39 |
+
input_variables=["input"],
|
40 |
+
partial_variables={"format_instructions": parser.get_format_instructions()}
|
41 |
+
)
|
42 |
|
43 |
+
print(parser)
|
44 |
|
45 |
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
|
46 |
|
47 |
+
from langchain.agents import tool
|
48 |
+
|
49 |
+
@tool
|
50 |
+
def get_word_length():
|
51 |
+
"""Returns the length of a word."""
|
52 |
+
return 1
|
53 |
+
|
54 |
+
|
55 |
+
tools = [get_word_length]
|
56 |
+
|
57 |
+
from langchain_openai import ChatOpenAI
|
58 |
+
|
59 |
+
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
|
60 |
+
from langchain.agents import create_openai_functions_agent
|
61 |
|
62 |
agent = create_openai_functions_agent(llm, tools, prompt)
|
63 |
|
64 |
+
from langchain.agents import AgentExecutor
|
65 |
|
66 |
+
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
67 |
+
response = agent_executor.invoke({"input": "What are you?"})
|
68 |
|
69 |
+
return response['output']
|
test2.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#import json
|
2 |
+
|
3 |
+
from flask import Flask,request
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
|
6 |
+
from langchain.agents import tool
|
7 |
+
|
8 |
+
|
9 |
+
# Initializing flask app
|
10 |
+
app = Flask(__name__)
|
11 |
+
load_dotenv()
|
12 |
+
|
13 |
+
|
14 |
+
@tool
|
15 |
+
def FAQ(question: str):
|
16 |
+
"""Answers the question 1+1"""
|
17 |
+
return 23
|
18 |
+
|
19 |
+
tools=[FAQ]
|
20 |
+
|
21 |
+
|
22 |
+
@app.route('/', methods=['GET','POST'])
|
23 |
+
def index():
|
24 |
+
|
25 |
+
from langchain_openai import ChatOpenAI
|
26 |
+
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
|
27 |
+
from langchain.agents import AgentExecutor
|
28 |
+
from typing import List
|
29 |
+
from pydantic import BaseModel, Field
|
30 |
+
import json
|
31 |
+
|
32 |
+
from langchain.utils.openai_functions import convert_pydantic_to_openai_function
|
33 |
+
from langchain.agents.format_scratchpad import format_to_openai_function_messages
|
34 |
+
|
35 |
+
from langchain_core.agents import AgentActionMessageLog, AgentFinish
|
36 |
+
|
37 |
+
|
38 |
+
class Response(BaseModel):
|
39 |
+
"""Final response to the question being asked. This is consumed by a frontend chatbot engine that has the ability to execute suggested actions"""
|
40 |
+
|
41 |
+
message: str = Field(description="The final answer to be displayed to the user")
|
42 |
+
tokens: int = Field(description="Count the number of tokens used to produce the response")
|
43 |
+
actions: List[int] = Field(
|
44 |
+
description="List of actions to be executed. Only include an action if it contains relevant information"
|
45 |
+
)
|
46 |
+
|
47 |
+
|
48 |
+
def parse(output): ##I DON'T UNDERSTAND THIS :)
|
49 |
+
if "function_call" not in output.additional_kwargs: return AgentFinish(return_values={"output": output.content}, log=output.content)
|
50 |
+
function_call = output.additional_kwargs["function_call"]
|
51 |
+
name = function_call["name"]
|
52 |
+
inputs = json.loads(function_call["arguments"])
|
53 |
+
if name == "Response":
|
54 |
+
return AgentFinish(return_values=inputs, log=str(function_call))
|
55 |
+
else:
|
56 |
+
return AgentActionMessageLog(
|
57 |
+
tool=name, tool_input=inputs, log="", message_log=[output]
|
58 |
+
)
|
59 |
+
|
60 |
+
prompt = ChatPromptTemplate.from_messages(
|
61 |
+
[
|
62 |
+
("system", "You are a helpful assistant"),
|
63 |
+
|
64 |
+
("user", "{input}"),
|
65 |
+
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
66 |
+
]
|
67 |
+
)
|
68 |
+
|
69 |
+
llm = ChatOpenAI(temperature=2)
|
70 |
+
|
71 |
+
llm_with_tools = llm.bind(
|
72 |
+
functions=[
|
73 |
+
convert_pydantic_to_openai_function(Response), #RESPONSE SCHEMA
|
74 |
+
]
|
75 |
+
)
|
76 |
+
|
77 |
+
agent = (
|
78 |
+
{
|
79 |
+
"input": lambda x: x["input"],
|
80 |
+
"agent_scratchpad": lambda x: format_to_openai_function_messages(
|
81 |
+
x["intermediate_steps"]
|
82 |
+
),
|
83 |
+
}
|
84 |
+
| prompt
|
85 |
+
| llm_with_tools
|
86 |
+
| parse
|
87 |
+
)
|
88 |
+
|
89 |
+
agent_executor = AgentExecutor(tools=[], agent=agent, verbose=True, handle_parsing_errors="Check your output and make sure it conforms, use the Action/Action Input syntax")
|
90 |
+
|
91 |
+
return agent_executor.invoke(
|
92 |
+
{"input": "what did the president say about kentaji brown jackson"},
|
93 |
+
return_only_outputs=True,
|
94 |
+
)
|
test3.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ESTABLISH THE SERVER
|
2 |
+
from flask import Flask,request
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
|
5 |
+
# Initializing flask app
|
6 |
+
app = Flask(__name__)
|
7 |
+
load_dotenv()
|
8 |
+
|
9 |
+
@app.route("/", methods=['GET','POST'])
|
10 |
+
def index():
|
11 |
+
|
12 |
+
import os
|
13 |
+
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
|
14 |
+
from langchain.chat_models import ChatOpenAI
|
15 |
+
|
16 |
+
from langchain.output_parsers import PydanticOutputParser
|
17 |
+
from pydantic import BaseModel, Field
|
18 |
+
from typing import List
|
19 |
+
|
20 |
+
# Define a new Pydantic model with field descriptions and tailored for Twitter.
|
21 |
+
|
22 |
+
class FrontEndActions(BaseModel):
|
23 |
+
"""Structure to pass actions back to the frontend"""
|
24 |
+
text: str = Field(description="The text to display on the button")
|
25 |
+
type: str = Field(description="This should be a string that identifies the type of action. It can be one of: SuggestGoal, SuggestRiseActivity")
|
26 |
+
|
27 |
+
class ResponseFormat(BaseModel):
|
28 |
+
"""Final response to the question being asked"""
|
29 |
+
message: str = Field(description="The final answer to respond to the user")
|
30 |
+
chat_summary: str = Field(description="Summarise what the user has asked and how you have responded in this chat in a way that so that you can remember the conversation")
|
31 |
+
actions: List[FrontEndActions] = Field(description="List of suggested actions that should be passed back to the frontend to display. The use will click these to enact them. ")
|
32 |
+
tokens: int = Field(description="Count the number of used to produce the response")
|
33 |
+
|
34 |
+
# Instantiate the parser with the new model.
|
35 |
+
parser = PydanticOutputParser(pydantic_object=ResponseFormat)
|
36 |
+
|
37 |
+
# Update the prompt to match the new query and desired format.
|
38 |
+
prompt = ChatPromptTemplate(
|
39 |
+
messages=[
|
40 |
+
HumanMessagePromptTemplate.from_template(
|
41 |
+
"""
|
42 |
+
|
43 |
+
You are a coach supporting students at post-92 university in the UK. It's students are diverse, and many come from non-traditional backgrounds and minority ethnic groups. Some may have ambitions for particular careers, others may not - and many may not be confident or have the social and financial advantages to reach their goals.
|
44 |
+
|
45 |
+
Your purpose is to help students to set aims (long term ambitions), break them into goals (things they want to achieve during their time at university) and objectives (smart targets).
|
46 |
+
|
47 |
+
If a student has a sense of what they want to achieve, you should help them to create smart targets. If they don't, you should be reassuring that its ok not to have clear goals yet, but help them to reflect and form some ambitions. These could be career-oriented, or they could be about succeeding in, and making the most of, their university experience.
|
48 |
+
|
49 |
+
You should be assertive in opening up and guiding the conversation.
|
50 |
+
|
51 |
+
\n{format_instructions}\n{question}
|
52 |
+
|
53 |
+
"""
|
54 |
+
)
|
55 |
+
],
|
56 |
+
input_variables=["question"],
|
57 |
+
partial_variables={
|
58 |
+
"format_instructions": parser.get_format_instructions(),
|
59 |
+
},
|
60 |
+
)
|
61 |
+
|
62 |
+
chat_model = ChatOpenAI(
|
63 |
+
model="gpt-3.5-turbo",
|
64 |
+
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
65 |
+
max_tokens=1000
|
66 |
+
)
|
67 |
+
|
68 |
+
# Generate the input using the updated prompt.
|
69 |
+
user_query = (
|
70 |
+
"""
|
71 |
+
|
72 |
+
I would like to be a teacher, can you give me some goals to achieve this?
|
73 |
+
|
74 |
+
"""
|
75 |
+
)
|
76 |
+
_input = prompt.format_prompt(question=user_query)
|
77 |
+
|
78 |
+
output = chat_model(_input.to_messages())
|
79 |
+
parsed = parser.parse(output.content)
|
80 |
+
|
81 |
+
return parsed.dict()
|