DylanonWic commited on
Commit
edd1568
1 Parent(s): 5831cdb

Upload 11 files

Browse files
Files changed (2) hide show
  1. chatbot.ipynb +1 -0
  2. chatbot.py +18 -3
chatbot.ipynb CHANGED
@@ -35,6 +35,7 @@
35
  "\n",
36
  "\n",
37
  "tool_node = tools.tool_node\n",
 
38
  "\n",
39
  "# model = ChatAnthropic(model=\"claude-3-5-sonnet-20240620\", temperature=0).bind_tools(tools)\n",
40
  "model = ChatOpenAI(model=\"gpt-4o-mini\")\n",
 
35
  "\n",
36
  "\n",
37
  "tool_node = tools.tool_node\n",
38
+ "tools.tools\n",
39
  "\n",
40
  "# model = ChatAnthropic(model=\"claude-3-5-sonnet-20240620\", temperature=0).bind_tools(tools)\n",
41
  "model = ChatOpenAI(model=\"gpt-4o-mini\")\n",
chatbot.py CHANGED
@@ -9,13 +9,17 @@ from langchain_core.messages import HumanMessage
9
  from langchain_openai import ChatOpenAI
10
  from langgraph.checkpoint.memory import MemorySaver
11
  from langgraph.graph import END, StateGraph, MessagesState
 
 
 
12
  import tools
13
 
14
  # define tools node.
15
  tool_node = tools.tool_node
16
 
 
17
  # load models.
18
- model = ChatOpenAI(model="gpt-4o-mini")
19
 
20
  # Define the function that determines whether to continue or not
21
  def should_continue(state: MessagesState) -> Literal["tools", END]:
@@ -28,11 +32,21 @@ def should_continue(state: MessagesState) -> Literal["tools", END]:
28
  return END
29
 
30
 
 
31
  # Define the function that calls the model
32
  def call_model(state: MessagesState):
33
  messages = state['messages']
34
- response = model.invoke(messages)
35
- # We return a list, because this will get added to the existing list
 
 
 
 
 
 
 
 
 
36
  return {"messages": [response]}
37
 
38
 
@@ -46,6 +60,7 @@ workflow.add_node("tools", tool_node)
46
  # Set the entrypoint as `agent`
47
  workflow.set_entry_point("agent")
48
 
 
49
  # We now add a conditional edge
50
  workflow.add_conditional_edges(
51
  # First, we define the start node. We use `agent`.
 
9
  from langchain_openai import ChatOpenAI
10
  from langgraph.checkpoint.memory import MemorySaver
11
  from langgraph.graph import END, StateGraph, MessagesState
12
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
13
+ from langchain.agents import AgentExecutor, create_openai_tools_agent
14
+ import functools
15
  import tools
16
 
17
  # define tools node.
18
  tool_node = tools.tool_node
19
 
20
+
21
  # load models.
22
+ llm = ChatOpenAI(model="gpt-4o-mini")
23
 
24
  # Define the function that determines whether to continue or not
25
  def should_continue(state: MessagesState) -> Literal["tools", END]:
 
32
  return END
33
 
34
 
35
+ # Define the function that calls the model
36
  # Define the function that calls the model
37
  def call_model(state: MessagesState):
38
  messages = state['messages']
39
+
40
+ # Check if this is the first message in the conversation
41
+ if len(messages) == 1:
42
+ # Add the initial prompt before the user's first message
43
+ initial_prompt = HumanMessage(content="You are a market analyst specializing in feasibility studies. Your task is to analyze the potential of a new business location based on specific criteria. You will gather data from Google Maps about the presence and distribution of relevant points of interest, such as coffee shops, restaurants, or other businesses, near a specified location.")
44
+ messages.insert(0, initial_prompt)
45
+
46
+ # Call the model with the updated messages
47
+ response = llm.invoke(messages)
48
+
49
+ # Return the updated messages state
50
  return {"messages": [response]}
51
 
52
 
 
60
  # Set the entrypoint as `agent`
61
  workflow.set_entry_point("agent")
62
 
63
+
64
  # We now add a conditional edge
65
  workflow.add_conditional_edges(
66
  # First, we define the start node. We use `agent`.