rise-ai / structure.py
markpeace's picture
first commit
76c5345
raw
history blame
3.5 kB
from dotenv import load_dotenv
load_dotenv();
from typing import List
from langchain.utils.openai_functions import convert_pydantic_to_openai_function
from pydantic import BaseModel, Field
class FrontendActions(BaseModel):
"""Commands and options that can be passed to the frontend chatbot to elicit a response or student action"""
label: str = Field(description="Label which will appear on the frontend chatbot button. Omit this is you are just making a server request for more information")
link: str = Field(description="Link to take the user to a different place.")
class Response(BaseModel):
"""Final response to the question being asked. This will be passed to the frontend chatbot for processing"""
message: str = Field(description="The final answer to respond to the user")
tokens: int = Field(description="Count the number of used to produce the response")
#actions: List[FrontendActions] = Field(description="List of actions taken to produce the response.")
import json
from langchain_core.agents import AgentActionMessageLog, AgentFinish
def parse(output):
# If no function was invoked, return to user
if "function_call" not in output.additional_kwargs:
return AgentFinish(return_values={"output": output.content}, log=output.content)
# Parse out the function call
function_call = output.additional_kwargs["function_call"]
name = function_call["name"]
inputs = json.loads(function_call["arguments"])
# If the Response function was invoked, return to the user with the function inputs
if name == "Response":
return AgentFinish(return_values=inputs, log=str(function_call))
# Otherwise, return an agent action
else:
return AgentActionMessageLog(
tool=name, tool_input=inputs, log="", message_log=[output]
)
from langchain.agents import tool
@tool
def placeholder():
"""This is just a placeholder function"""
return "placeholder"
tools=[placeholder]
from langchain.agents import AgentExecutor
from langchain.agents.format_scratchpad import format_to_openai_function_messages
from langchain_community.tools.convert_to_openai import format_tool_to_openai_function
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_messages(
[
("system", "Please only make two suggestions at a time, and output a JSON object using the response scheme provided to you in the associated tool. If you suggest objectives and goals, please make them actions in the schema with the link 'plan'"),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
llm = ChatOpenAI(model="gpt-4",temperature=0)
llm_with_tools = llm.bind(
functions=[
# The retriever tool
format_tool_to_openai_function(placeholder),
# Response schema
convert_pydantic_to_openai_function(Response),
]
)
agent = (
{
"input": lambda x: x["input"],
# Format agent scratchpad from intermediate steps
"agent_scratchpad": lambda x: format_to_openai_function_messages(
x["intermediate_steps"]
),
}
| prompt
| llm_with_tools
| parse
)
agent_executor = AgentExecutor(tools=[placeholder], agent=agent, verbose=True)
agent_executor.invoke(
{"input": "Can you suggest to me some actions I could take to become a teacher?"},
return_only_outputs=True,
)