Spaces:
Runtime error
Runtime error
import os | |
import utils | |
utils.load_env() | |
# os.environ['LANGCHAIN_TRACING_V2'] = "true" | |
from typing import Literal | |
from langchain_core.messages import HumanMessage | |
from langchain_openai import ChatOpenAI | |
from langgraph.checkpoint.memory import MemorySaver | |
from langgraph.graph import END, StateGraph, MessagesState | |
import tools | |
# define tools node. | |
tool_node = tools.tool_node | |
# load models. | |
model = ChatOpenAI(model="gpt-4o-mini") | |
# Define the function that determines whether to continue or not | |
def should_continue(state: MessagesState) -> Literal["tools", END]: | |
messages = state['messages'] | |
last_message = messages[-1] | |
# If the LLM makes a tool call, then we route to the "tools" node | |
if last_message.tool_calls: | |
return "tools" | |
# Otherwise, we stop (reply to the user) | |
return END | |
# Define the function that calls the model | |
def call_model(state: MessagesState): | |
messages = state['messages'] | |
response = model.invoke(messages) | |
# We return a list, because this will get added to the existing list | |
return {"messages": [response]} | |
# Define a new graph | |
workflow = StateGraph(MessagesState) | |
# Define the two nodes we will cycle between | |
workflow.add_node("agent", call_model) | |
workflow.add_node("tools", tool_node) | |
# Set the entrypoint as `agent` | |
workflow.set_entry_point("agent") | |
# We now add a conditional edge | |
workflow.add_conditional_edges( | |
# First, we define the start node. We use `agent`. | |
# This means these are the edges taken after the `agent` node is called. | |
"agent", | |
# Next, we pass in the function that will determine which node is called next. | |
should_continue, | |
) | |
# We now add a normal edge from `tools` to `agent`. | |
# This means that after `tools` is called, `agent` node is called next. | |
workflow.add_edge("tools", 'agent') | |
# Initialize memory to persist state between graph runs | |
checkpointer = MemorySaver() | |
# Finally, we compile it! | |
# This compiles it into a LangChain Runnable, | |
# meaning you can use it as you would any other runnable. | |
# Note that we're (optionally) passing the memory when compiling the graph | |
app = workflow.compile(checkpointer=checkpointer) | |
def submitUserMessage(message:str): | |
final_state = app.invoke( | |
{"messages": [HumanMessage(content=message)]}, | |
config={"configurable": {"thread_id": 42}} | |
) | |
return final_state["messages"][-1].content |