DylanonWic's picture
Upload 11 files
edd1568 verified
raw
history blame
3.2 kB
import os
import utils
utils.load_env()
# os.environ['LANGCHAIN_TRACING_V2'] = "true"
from typing import Literal
from langchain_core.messages import HumanMessage
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import END, StateGraph, MessagesState
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.agents import AgentExecutor, create_openai_tools_agent
import functools
import tools
# define tools node.
tool_node = tools.tool_node
# load models.
llm = ChatOpenAI(model="gpt-4o-mini")
# Define the function that determines whether to continue or not
def should_continue(state: MessagesState) -> Literal["tools", END]:
messages = state['messages']
last_message = messages[-1]
# If the LLM makes a tool call, then we route to the "tools" node
if last_message.tool_calls:
return "tools"
# Otherwise, we stop (reply to the user)
return END
# Define the function that calls the model
# Define the function that calls the model
def call_model(state: MessagesState):
messages = state['messages']
# Check if this is the first message in the conversation
if len(messages) == 1:
# Add the initial prompt before the user's first message
initial_prompt = HumanMessage(content="You are a market analyst specializing in feasibility studies. Your task is to analyze the potential of a new business location based on specific criteria. You will gather data from Google Maps about the presence and distribution of relevant points of interest, such as coffee shops, restaurants, or other businesses, near a specified location.")
messages.insert(0, initial_prompt)
# Call the model with the updated messages
response = llm.invoke(messages)
# Return the updated messages state
return {"messages": [response]}
# Define a new graph
workflow = StateGraph(MessagesState)
# Define the two nodes we will cycle between
workflow.add_node("agent", call_model)
workflow.add_node("tools", tool_node)
# Set the entrypoint as `agent`
workflow.set_entry_point("agent")
# We now add a conditional edge
workflow.add_conditional_edges(
# First, we define the start node. We use `agent`.
# This means these are the edges taken after the `agent` node is called.
"agent",
# Next, we pass in the function that will determine which node is called next.
should_continue,
)
# We now add a normal edge from `tools` to `agent`.
# This means that after `tools` is called, `agent` node is called next.
workflow.add_edge("tools", 'agent')
# Initialize memory to persist state between graph runs
checkpointer = MemorySaver()
# Finally, we compile it!
# This compiles it into a LangChain Runnable,
# meaning you can use it as you would any other runnable.
# Note that we're (optionally) passing the memory when compiling the graph
app = workflow.compile(checkpointer=checkpointer)
def submitUserMessage(message:str):
final_state = app.invoke(
{"messages": [HumanMessage(content=message)]},
config={"configurable": {"thread_id": 42}}
)
return final_state["messages"][-1].content