bstraehle commited on
Commit
7cd8d83
1 Parent(s): 96013a0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +157 -5
app.py CHANGED
@@ -1,9 +1,153 @@
1
  import gradio as gr
2
- import agentops, os
 
3
 
4
- #from crew import get_crew
 
 
5
 
6
- LLM = "gpt-4o"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  def invoke(openai_api_key, topic, word_count=500):
9
  if (openai_api_key == ""):
@@ -15,8 +159,16 @@ def invoke(openai_api_key, topic, word_count=500):
15
 
16
  os.environ["OPENAI_API_KEY"] = openai_api_key
17
 
18
- #result = get_crew(LLM).kickoff(inputs={"topic": topic, "word_count": word_count})
19
- result = "TODO"
 
 
 
 
 
 
 
 
20
 
21
  return result
22
 
 
1
  import gradio as gr
2
+ import getpass
3
+ import os
4
 
5
+ def _set_if_undefined(var: str):
6
+ if not os.environ.get(var):
7
+ os.environ[var] = getpass.getpass(f"Please provide your {var}")
8
 
9
+ _set_if_undefined("OPENAI_API_KEY")
10
+ _set_if_undefined("LANGCHAIN_API_KEY")
11
+ _set_if_undefined("TAVILY_API_KEY")
12
+
13
+ # Optional, add tracing in LangSmith
14
+ os.environ["LANGCHAIN_TRACING_V2"] = "true"
15
+ os.environ["LANGCHAIN_PROJECT"] = "Multi-agent Collaboration"
16
+
17
+ from typing import Annotated, List, Tuple, Union
18
+
19
+ from langchain_community.tools.tavily_search import TavilySearchResults
20
+ from langchain_core.tools import tool
21
+ from langchain_experimental.tools import PythonREPLTool
22
+
23
+ tavily_tool = TavilySearchResults(max_results=5)
24
+
25
+ # This executes code locally, which can be unsafe
26
+ python_repl_tool = PythonREPLTool()
27
+
28
+ from langchain.agents import AgentExecutor, create_openai_tools_agent
29
+ from langchain_core.messages import BaseMessage, HumanMessage
30
+ from langchain_openai import ChatOpenAI
31
+
32
+ def create_agent(llm: ChatOpenAI, tools: list, system_prompt: str):
33
+ # Each worker node will be given a name and some tools.
34
+ prompt = ChatPromptTemplate.from_messages(
35
+ [
36
+ (
37
+ "system",
38
+ system_prompt,
39
+ ),
40
+ MessagesPlaceholder(variable_name="messages"),
41
+ MessagesPlaceholder(variable_name="agent_scratchpad"),
42
+ ]
43
+ )
44
+ agent = create_openai_tools_agent(llm, tools, prompt)
45
+ executor = AgentExecutor(agent=agent, tools=tools)
46
+ return executor
47
+
48
+ def agent_node(state, agent, name):
49
+ result = agent.invoke(state)
50
+ return {"messages": [HumanMessage(content=result["output"], name=name)]}
51
+
52
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
53
+ from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
54
+
55
+ members = ["Researcher", "Coder"]
56
+ system_prompt = (
57
+ "You are a supervisor tasked with managing a conversation between the"
58
+ " following workers: {members}. Given the following user request,"
59
+ " respond with the worker to act next. Each worker will perform a"
60
+ " task and respond with their results and status. When finished,"
61
+ " respond with FINISH."
62
+ )
63
+ # Our team supervisor is an LLM node. It just picks the next agent to process
64
+ # and decides when the work is completed
65
+ options = ["FINISH"] + members
66
+ # Using openai function calling can make output parsing easier for us
67
+ function_def = {
68
+ "name": "route",
69
+ "description": "Select the next role.",
70
+ "parameters": {
71
+ "title": "routeSchema",
72
+ "type": "object",
73
+ "properties": {
74
+ "next": {
75
+ "title": "Next",
76
+ "anyOf": [
77
+ {"enum": options},
78
+ ],
79
+ }
80
+ },
81
+ "required": ["next"],
82
+ },
83
+ }
84
+ prompt = ChatPromptTemplate.from_messages(
85
+ [
86
+ ("system", system_prompt),
87
+ MessagesPlaceholder(variable_name="messages"),
88
+ (
89
+ "system",
90
+ "Given the conversation above, who should act next?"
91
+ " Or should we FINISH? Select one of: {options}",
92
+ ),
93
+ ]
94
+ ).partial(options=str(options), members=", ".join(members))
95
+
96
+ llm = ChatOpenAI(model="gpt-4-1106-preview")
97
+
98
+ supervisor_chain = (
99
+ prompt
100
+ | llm.bind_functions(functions=[function_def], function_call="route")
101
+ | JsonOutputFunctionsParser()
102
+ )
103
+
104
+ import operator
105
+ from typing import Annotated, Any, Dict, List, Optional, Sequence, TypedDict
106
+ import functools
107
+
108
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
109
+ from langgraph.graph import StateGraph, END
110
+
111
+
112
+ # The agent state is the input to each node in the graph
113
+ class AgentState(TypedDict):
114
+ # The annotation tells the graph that new messages will always
115
+ # be added to the current states
116
+ messages: Annotated[Sequence[BaseMessage], operator.add]
117
+ # The 'next' field indicates where to route to next
118
+ next: str
119
+
120
+
121
+ research_agent = create_agent(llm, [tavily_tool], "You are a web researcher.")
122
+ research_node = functools.partial(agent_node, agent=research_agent, name="Researcher")
123
+
124
+ # NOTE: THIS PERFORMS ARBITRARY CODE EXECUTION. PROCEED WITH CAUTION
125
+ code_agent = create_agent(
126
+ llm,
127
+ [python_repl_tool],
128
+ "You may generate safe python code to analyze data and generate charts using matplotlib.",
129
+ )
130
+ code_node = functools.partial(agent_node, agent=code_agent, name="Coder")
131
+
132
+ workflow = StateGraph(AgentState)
133
+ workflow.add_node("Researcher", research_node)
134
+ workflow.add_node("Coder", code_node)
135
+ workflow.add_node("supervisor", supervisor_chain)
136
+
137
+ for member in members:
138
+ # We want our workers to ALWAYS "report back" to the supervisor when done
139
+ workflow.add_edge(member, "supervisor")
140
+ # The supervisor populates the "next" field in the graph state
141
+ # which routes to a node or finishes
142
+ conditional_map = {k: k for k in members}
143
+ conditional_map["FINISH"] = END
144
+ workflow.add_conditional_edges("supervisor", lambda x: x["next"], conditional_map)
145
+ # Finally, add entrypoint
146
+ workflow.set_entry_point("supervisor")
147
+
148
+ graph = workflow.compile()
149
+
150
+ ###
151
 
152
  def invoke(openai_api_key, topic, word_count=500):
153
  if (openai_api_key == ""):
 
159
 
160
  os.environ["OPENAI_API_KEY"] = openai_api_key
161
 
162
+ for s in graph.stream(
163
+ {
164
+ "messages": [
165
+ HumanMessage(content="Code hello world and print it to the terminal")
166
+ ]
167
+ }
168
+ ):
169
+ if "__end__" not in s:
170
+ print(s)
171
+ print("----")
172
 
173
  return result
174