Spaces:
Paused
Paused
netman19731
commited on
Commit
•
3d53b23
1
Parent(s):
c9586ba
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_openai.chat_models import ChatOpenAI
|
2 |
+
from langchain_community.tools.tavily_search import TavilySearchResults
|
3 |
+
from langchain.tools.render import format_tool_to_openai_function
|
4 |
+
from langgraph.prebuilt import ToolExecutor,ToolInvocation
|
5 |
+
from typing import TypedDict, Annotated, Sequence
|
6 |
+
import operator
|
7 |
+
from langchain_core.messages import BaseMessage,FunctionMessage,HumanMessage,AIMessage
|
8 |
+
from langchain_core.prompts import ChatPromptTemplate
|
9 |
+
from langchain.tools import ShellTool,tool
|
10 |
+
import json
|
11 |
+
import os
|
12 |
+
import gradio as gr
|
13 |
+
os.environ["LANGCHAIN_TRACING_V2"] ="True"
|
14 |
+
os.environ["LANGCHAIN_API_KEY"]="ls__54e16f70b2b0455aad0f2cbf47777d30"
|
15 |
+
os.environ["OPENAI_API_KEY"]="20a79668d6113e99b35fcd541c65bfeaec497b8262c111bd328ef5f1ad8c6335"
|
16 |
+
# os.environ["OPENAI_API_KEY"]="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"
|
17 |
+
os.environ["LANGCHAIN_ENDPOINT"]="https://api.smith.langchain.com"
|
18 |
+
os.environ["LANGCHAIN_PROJECT"]="default"
|
19 |
+
os.environ['TAVILY_API_KEY'] = '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'
|
20 |
+
|
21 |
+
class AgentState(TypedDict):
|
22 |
+
messages: Annotated[Sequence[BaseMessage], operator.add]
|
23 |
+
|
24 |
+
model = ChatOpenAI(model="gpt-3.5-turbo-1106",api_key="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")
|
25 |
+
# model = ChatOpenAI(model="Qwen/Qwen1.5-72B-Chat",api_key="20a79668d6113e99b35fcd541c65bfeaec497b8262c111bd328ef5f1ad8c6335",base_url="https://api.together.xyz/v1")
|
26 |
+
|
27 |
+
|
28 |
+
prompt = ChatPromptTemplate.from_messages([
|
29 |
+
("system", "你是一个餐厅经理,你叫唐僧,能为顾客提供服务,你有三个员工,分别是:厨师八戒,侍者沙僧,收银悟空,你需要根据顾客的需求,分别向员工下达指令,你和员工的对话也要同步显示给顾客,当收银结束后,服务全部结束。"),
|
30 |
+
("user", "{input}")
|
31 |
+
])
|
32 |
+
@tool(return_direct=True)
|
33 |
+
def chushi(query: str)->str:
|
34 |
+
'''你是餐厅厨师八戒,能根据经理的指令,做出一道菜'''
|
35 |
+
input={"input":query},
|
36 |
+
|
37 |
+
return "菜已做好"
|
38 |
+
@tool
|
39 |
+
def shizhe(query: str)->str:
|
40 |
+
'''你是餐厅侍者沙僧,能根据经理的指令,把菜端到顾客面前'''
|
41 |
+
input={"input":query}
|
42 |
+
return "菜已送到"
|
43 |
+
@tool
|
44 |
+
def shouyin(query: str)->str:
|
45 |
+
'''你是餐厅收银悟空,能根据经理的指令,为顾客结账'''
|
46 |
+
input={"input":query}
|
47 |
+
return "结账完成,欢迎下次光临"
|
48 |
+
tools=[chushi,shizhe,shouyin]
|
49 |
+
|
50 |
+
functions = [format_tool_to_openai_function(t) for t in tools]
|
51 |
+
model = model.bind_functions(functions)
|
52 |
+
# model= model.bind(tools=tools)
|
53 |
+
tool_executor = ToolExecutor(tools)
|
54 |
+
|
55 |
+
def should_continue(state):
|
56 |
+
messages = state['messages']
|
57 |
+
last_message = messages[-1]
|
58 |
+
# If there is no function call, then we finish
|
59 |
+
if "function_call" not in last_message.additional_kwargs:
|
60 |
+
return "end"
|
61 |
+
# Otherwise if there is, we continue
|
62 |
+
else:
|
63 |
+
return "continue"
|
64 |
+
|
65 |
+
# Define the function that calls the model
|
66 |
+
def call_model(state):
|
67 |
+
messages = state['messages']
|
68 |
+
response = model.invoke(messages)
|
69 |
+
# We return a list, because this will get added to the existing list
|
70 |
+
return {"messages": [response]}
|
71 |
+
|
72 |
+
# Define the function to execute tools
|
73 |
+
def call_tool(state):
|
74 |
+
messages = state['messages']
|
75 |
+
# Based on the continue condition
|
76 |
+
# we know the last message involves a function call
|
77 |
+
last_message = messages[-1]
|
78 |
+
# We construct an ToolInvocation from the function_call
|
79 |
+
action = ToolInvocation(
|
80 |
+
tool=last_message.additional_kwargs["function_call"]["name"],
|
81 |
+
tool_input=json.loads(last_message.additional_kwargs["function_call"]["arguments"]),
|
82 |
+
)
|
83 |
+
# We call the tool_executor and get back a response
|
84 |
+
response = tool_executor.invoke(action)
|
85 |
+
# We use the response to create a FunctionMessage
|
86 |
+
function_message = FunctionMessage(content=str(response), name=action.tool)
|
87 |
+
# We return a list, because this will get added to the existing list
|
88 |
+
return {"messages": [function_message]}
|
89 |
+
|
90 |
+
from langgraph.graph import StateGraph, END
|
91 |
+
# Define a new graph
|
92 |
+
workflow = StateGraph(AgentState)
|
93 |
+
|
94 |
+
# Define the two nodes we will cycle between
|
95 |
+
workflow.add_node("agent", call_model)
|
96 |
+
workflow.add_node("action", call_tool)
|
97 |
+
|
98 |
+
# Set the entrypoint as `agent`
|
99 |
+
# This means that this node is the first one called
|
100 |
+
workflow.set_entry_point("agent")
|
101 |
+
|
102 |
+
# We now add a conditional edge
|
103 |
+
workflow.add_conditional_edges(
|
104 |
+
# First, we define the start node. We use `agent`.
|
105 |
+
# This means these are the edges taken after the `agent` node is called.
|
106 |
+
"agent",
|
107 |
+
# Next, we pass in the function that will determine which node is called next.
|
108 |
+
should_continue,
|
109 |
+
# Finally we pass in a mapping.
|
110 |
+
# The keys are strings, and the values are other nodes.
|
111 |
+
# END is a special node marking that the graph should finish.
|
112 |
+
# What will happen is we will call `should_continue`, and then the output of that
|
113 |
+
# will be matched against the keys in this mapping.
|
114 |
+
# Based on which one it matches, that node will then be called.
|
115 |
+
{
|
116 |
+
# If `tools`, then we call the tool node.
|
117 |
+
"continue": "action",
|
118 |
+
# Otherwise we finish.
|
119 |
+
"end": END
|
120 |
+
}
|
121 |
+
)
|
122 |
+
|
123 |
+
# We now add a normal edge from `tools` to `agent`.
|
124 |
+
# This means that after `tools` is called, `agent` node is called next.
|
125 |
+
workflow.add_edge('action', 'agent')
|
126 |
+
|
127 |
+
# Finally, we compile it!
|
128 |
+
# This compiles it into a LangChain Runnable,
|
129 |
+
# meaning you can use it as you would any other runnable
|
130 |
+
app = workflow.compile()
|
131 |
+
|
132 |
+
|
133 |
+
async def predict(message,history):
|
134 |
+
# history_langchain_format = []
|
135 |
+
# for human, ai in history:
|
136 |
+
# history_langchain_format.append(HumanMessage(content=human))
|
137 |
+
# history_langchain_format.append(AIMessage(content=ai))
|
138 |
+
# history_langchain_format.append(HumanMessage(content=message))
|
139 |
+
|
140 |
+
# que={"messages": history_langchain_format}
|
141 |
+
que={"messages": [HumanMessage(content=message)]}
|
142 |
+
res=app.invoke(que)
|
143 |
+
if res:
|
144 |
+
|
145 |
+
response=(res["messages"][-1].content)
|
146 |
+
return(response)
|
147 |
+
else:print("不好意思,出了一个小问题,请联系我的微信:13603634456")
|
148 |
+
demo = gr.ChatInterface(fn=predict, title="西游餐厅",description="西游餐厅开张了,我是经理唐僧,欢迎光临,您有什么需求,可以直接问我哦!",)
|
149 |
+
demo.launch()
|