Spaces:
Runtime error
Runtime error
File size: 3,196 Bytes
115169a edd1568 115169a edd1568 115169a edd1568 115169a edd1568 115169a edd1568 115169a edd1568 115169a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
import os
import utils
utils.load_env()
# os.environ['LANGCHAIN_TRACING_V2'] = "true"
from typing import Literal
from langchain_core.messages import HumanMessage
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import END, StateGraph, MessagesState
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.agents import AgentExecutor, create_openai_tools_agent
import functools
import tools
# define tools node.
tool_node = tools.tool_node
# load models.
llm = ChatOpenAI(model="gpt-4o-mini")
# Define the function that determines whether to continue or not
def should_continue(state: MessagesState) -> Literal["tools", END]:
messages = state['messages']
last_message = messages[-1]
# If the LLM makes a tool call, then we route to the "tools" node
if last_message.tool_calls:
return "tools"
# Otherwise, we stop (reply to the user)
return END
# Define the function that calls the model
# Define the function that calls the model
def call_model(state: MessagesState):
messages = state['messages']
# Check if this is the first message in the conversation
if len(messages) == 1:
# Add the initial prompt before the user's first message
initial_prompt = HumanMessage(content="You are a market analyst specializing in feasibility studies. Your task is to analyze the potential of a new business location based on specific criteria. You will gather data from Google Maps about the presence and distribution of relevant points of interest, such as coffee shops, restaurants, or other businesses, near a specified location.")
messages.insert(0, initial_prompt)
# Call the model with the updated messages
response = llm.invoke(messages)
# Return the updated messages state
return {"messages": [response]}
# Define a new graph
workflow = StateGraph(MessagesState)
# Define the two nodes we will cycle between
workflow.add_node("agent", call_model)
workflow.add_node("tools", tool_node)
# Set the entrypoint as `agent`
workflow.set_entry_point("agent")
# We now add a conditional edge
workflow.add_conditional_edges(
# First, we define the start node. We use `agent`.
# This means these are the edges taken after the `agent` node is called.
"agent",
# Next, we pass in the function that will determine which node is called next.
should_continue,
)
# We now add a normal edge from `tools` to `agent`.
# This means that after `tools` is called, `agent` node is called next.
workflow.add_edge("tools", 'agent')
# Initialize memory to persist state between graph runs
checkpointer = MemorySaver()
# Finally, we compile it!
# This compiles it into a LangChain Runnable,
# meaning you can use it as you would any other runnable.
# Note that we're (optionally) passing the memory when compiling the graph
app = workflow.compile(checkpointer=checkpointer)
def submitUserMessage(message:str):
final_state = app.invoke(
{"messages": [HumanMessage(content=message)]},
config={"configurable": {"thread_id": 42}}
)
return final_state["messages"][-1].content |