bstraehle commited on
Commit
78765d2
1 Parent(s): 2fb08c5

Update rag_langgraph.py

Browse files
Files changed (1) hide show
  1. rag_langgraph.py +21 -33
rag_langgraph.py CHANGED
@@ -18,13 +18,16 @@ import functools
18
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
19
  from langgraph.graph import StateGraph, END
20
 
 
 
 
 
21
  def create_agent(llm: ChatOpenAI, tools: list, system_prompt: str):
22
- # Each worker node will be given a name and some tools.
23
  prompt = ChatPromptTemplate.from_messages(
24
  [
25
  (
26
- "system",
27
- system_prompt,
28
  ),
29
  MessagesPlaceholder(variable_name="messages"),
30
  MessagesPlaceholder(variable_name="agent_scratchpad"),
@@ -38,30 +41,21 @@ def agent_node(state, agent, name):
38
  result = agent.invoke(state)
39
  return {"messages": [HumanMessage(content=result["output"], name=name)]}
40
 
41
- # The agent state is the input to each node in the graph
42
- class AgentState(TypedDict):
43
- # The annotation tells the graph that new messages will always
44
- # be added to the current states
45
- messages: Annotated[Sequence[BaseMessage], operator.add]
46
- # The 'next' field indicates where to route to next
47
- next: str
48
-
49
  def create_graph(topic, word_count):
50
- tavily_tool = TavilySearchResults(max_results=5)
51
- python_repl_tool = PythonREPLTool()
52
-
53
- members = ["Researcher", "Coder"]
54
  system_prompt = (
55
  "You are a supervisor tasked with managing a conversation between the"
56
- " following workers: {members}. Given the following user request,"
57
  " respond with the worker to act next. Each worker will perform a"
58
  " task and respond with their results and status. When finished,"
59
  " respond with FINISH."
60
  )
61
- # Our team supervisor is an LLM node. It just picks the next agent to process
62
- # and decides when the work is completed
63
  options = ["FINISH"] + members
64
- # Using openai function calling can make output parsing easier for us
65
  function_def = {
66
  "name": "route",
67
  "description": "Select the next role.",
@@ -79,6 +73,7 @@ def create_graph(topic, word_count):
79
  "required": ["next"],
80
  },
81
  }
 
82
  prompt = ChatPromptTemplate.from_messages(
83
  [
84
  ("system", system_prompt),
@@ -99,31 +94,24 @@ def create_graph(topic, word_count):
99
  | JsonOutputFunctionsParser()
100
  )
101
 
102
- research_agent = create_agent(llm, [tavily_tool], "You are a web researcher.")
103
  research_node = functools.partial(agent_node, agent=research_agent, name="Researcher")
104
 
105
- # NOTE: THIS PERFORMS ARBITRARY CODE EXECUTION. PROCEED WITH CAUTION
106
- code_agent = create_agent(
107
- llm,
108
- [python_repl_tool],
109
- "You may generate safe python code to analyze data and generate charts using matplotlib.",
110
- )
111
- code_node = functools.partial(agent_node, agent=code_agent, name="Coder")
112
 
113
  workflow = StateGraph(AgentState)
114
  workflow.add_node("Researcher", research_node)
115
- workflow.add_node("Coder", code_node)
116
  workflow.add_node("supervisor", supervisor_chain)
117
 
118
  for member in members:
119
- # We want our workers to ALWAYS "report back" to the supervisor when done
120
  workflow.add_edge(member, "supervisor")
121
- # The supervisor populates the "next" field in the graph state
122
- # which routes to a node or finishes
123
  conditional_map = {k: k for k in members}
124
  conditional_map["FINISH"] = END
 
125
  workflow.add_conditional_edges("supervisor", lambda x: x["next"], conditional_map)
126
- # Finally, add entrypoint
127
  workflow.set_entry_point("supervisor")
128
 
129
  return workflow.compile()
@@ -132,6 +120,6 @@ def run_multi_agent(topic, word_count):
132
  graph = create_graph(topic, word_count)
133
  result = graph.invoke({
134
  "messages": [
135
- HumanMessage(content="Code hello world and print it to the terminal")
136
  ]
137
  })
 
18
  from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
19
  from langgraph.graph import StateGraph, END
20
 
21
+ class AgentState(TypedDict):
22
+ messages: Annotated[Sequence[BaseMessage], operator.add]
23
+ next: str
24
+
25
  def create_agent(llm: ChatOpenAI, tools: list, system_prompt: str):
 
26
  prompt = ChatPromptTemplate.from_messages(
27
  [
28
  (
29
+ "system",
30
+ system_prompt
31
  ),
32
  MessagesPlaceholder(variable_name="messages"),
33
  MessagesPlaceholder(variable_name="agent_scratchpad"),
 
41
  result = agent.invoke(state)
42
  return {"messages": [HumanMessage(content=result["output"], name=name)]}
43
 
 
 
 
 
 
 
 
 
44
  def create_graph(topic, word_count):
45
+ tavily_tool = TavilySearchResults(max_results=10)
46
+
47
+ members = ["Researcher", "Blogger"]
48
+
49
  system_prompt = (
50
  "You are a supervisor tasked with managing a conversation between the"
51
+ " following workers: {members}. Given the following user request,"
52
  " respond with the worker to act next. Each worker will perform a"
53
  " task and respond with their results and status. When finished,"
54
  " respond with FINISH."
55
  )
56
+
 
57
  options = ["FINISH"] + members
58
+
59
  function_def = {
60
  "name": "route",
61
  "description": "Select the next role.",
 
73
  "required": ["next"],
74
  },
75
  }
76
+
77
  prompt = ChatPromptTemplate.from_messages(
78
  [
79
  ("system", system_prompt),
 
94
  | JsonOutputFunctionsParser()
95
  )
96
 
97
+ research_agent = create_agent(llm, [tavily_tool], "Research content on topic {topic}")
98
  research_node = functools.partial(agent_node, agent=research_agent, name="Researcher")
99
 
100
+ blogger_agent = create_agent(llm, [], "Write a {word_count}-word blog post on topic {topic}")
101
+ blogger_node = functools.partial(agent_node, agent=code_agent, name="Blogger")
 
 
 
 
 
102
 
103
  workflow = StateGraph(AgentState)
104
  workflow.add_node("Researcher", research_node)
105
+ workflow.add_node("Blogger", blogger_node)
106
  workflow.add_node("supervisor", supervisor_chain)
107
 
108
  for member in members:
 
109
  workflow.add_edge(member, "supervisor")
110
+
 
111
  conditional_map = {k: k for k in members}
112
  conditional_map["FINISH"] = END
113
+
114
  workflow.add_conditional_edges("supervisor", lambda x: x["next"], conditional_map)
 
115
  workflow.set_entry_point("supervisor")
116
 
117
  return workflow.compile()
 
120
  graph = create_graph(topic, word_count)
121
  result = graph.invoke({
122
  "messages": [
123
+ HumanMessage(content="Evolution of Retrieval-Augmented Generation from Naive RAG to Agentic RAG")
124
  ]
125
  })