classAgentState(TypedDict):
# The input stringinput: str# The list of previous messages in the conversation
chat_history: list[BaseMessage]
# The outcome of a given call to the agent# Needs `None` as a valid type, since this is what this will start as
agent_outcome: Union[AgentAction, AgentFinish, None]
# List of actions and corresponding observations# Here we annotate this with `operator.add` to indicate that operations to# this state should be ADDED to the existing values (not overwrite it)
intermediate_steps: Annotated[list[tuple[AgentAction, str]], operator.add]
# This a helper class we have that is useful for running tools# It takes in an agent action and calls that tool and returns the result
tool_executor = ToolExecutor(tools)
# Define the agent node functiondefrun_agent(data):
agent_outcome = agent_runnable.invoke(data)
return {"agent_outcome": agent_outcome}
# Define the function to execute toolsdefexecute_tools(data):
# Get the most recent agent_outcome - this is the key added in the `agent` above
agent_action = data["agent_outcome"]
print("agent action:{}".format(agent_action))
output = tool_executor.invoke(agent_action[-1])
return {"intermediate_steps": [(agent_action[-1], str(output))]}
# Define logic that will be used to determine which conditional edge to go downdefshould_continue(data):
# If the agent outcome is an AgentFinish, then we return `exit` string# This will be used when setting up the graph to define the flowifisinstance(data["agent_outcome"], AgentFinish):
return"end"# Otherwise, an AgentAction is returned# Here we return `continue` string# This will be used when setting up the graph to define the flowelse:
return"continue"
# Get the prompt to use - you can modify this!
prompt = hub.pull("hwchase17/openai-tools-agent")
# Construct the OpenAI Functions agent
agent_runnable = create_openai_tools_agent(chat_zhipu, tools, prompt)
# Define a new graph
workflow = StateGraph(AgentState)
# Define the two nodes we will cycle between
workflow.add_node("agent", run_agent)
workflow.add_node("action", execute_tools)
# Set the entrypoint as `agent`# This means that this node is the first one called
workflow.set_entry_point("agent")
# We now add a conditional edge
workflow.add_conditional_edges(
# First, we define the start node. We use `agent`."agent",
# Next, we pass in the function that will determine which node is called next.
should_continue,
# Finally we pass in a mapping.# The keys are strings, and the values are other nodes.# END is a special node marking that the graph should finish.
{
# If `tools`, then we call the tool node."continue": "action",
# Otherwise we finish."end": END,
},
)
# We now add a normal edge from `tools` to `agent`.# This means that after `tools` is called, `agent` node is called next.
workflow.add_edge("action", "agent")
# Finally, we compile it!# This compiles it into a LangChain Runnable,# meaning you can use it as you would any other runnable
app = workflow.compile()
运行示例
最后,我们需要准备输入数据并运行应用。输入应包含用户的初始请求和空的对话历史。
inputs = {"input": "what is the weather in NewYork", "chat_history": []}
result = app.invoke(inputs)
print(result["agent_outcome"].messages[0].content)
执行流程解析:
程序启动,进入 agent 节点。
run_agent 函数被调用,LLM 分析输入 "what is the weather in NewYork"。