[Doc] Name variable langgraph_agent_executor (#26799)

This commit is contained in:
William FH 2024-09-30 15:52:23 -07:00 committed by GitHub
parent 5346c7b27e
commit 6a861b0ad9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -34,6 +34,12 @@
"LangChain agents (the [AgentExecutor](https://python.langchain.com/api_reference/langchain/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor) in particular) have multiple configuration parameters.\n",
"In this notebook we will show how those parameters map to the LangGraph react agent executor using the [create_react_agent](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) prebuilt helper method.\n",
"\n",
"\n",
":::note\n",
"In LangGraph, the graph replaces LangChain's agent executor. It manages the agent's cycles and tracks the scratchpad as messages within its state. The LangChain \"agent\" corresponds to the state_modifier and LLM you've provided.\n",
":::\n",
"\n",
"\n",
"#### Prerequisites\n",
"\n",
"This how-to guide uses OpenAI as the LLM. Install the dependencies to run."
@ -183,10 +189,10 @@
"source": [
"from langgraph.prebuilt import create_react_agent\n",
"\n",
"app = create_react_agent(model, tools)\n",
"langgraph_agent_executor = create_react_agent(model, tools)\n",
"\n",
"\n",
"messages = app.invoke({\"messages\": [(\"human\", query)]})\n",
"messages = langgraph_agent_executor.invoke({\"messages\": [(\"human\", query)]})\n",
"{\n",
" \"input\": query,\n",
" \"output\": messages[\"messages\"][-1].content,\n",
@ -216,7 +222,9 @@
"\n",
"new_query = \"Pardon?\"\n",
"\n",
"messages = app.invoke({\"messages\": message_history + [(\"human\", new_query)]})\n",
"messages = langgraph_agent_executor.invoke(\n",
" {\"messages\": message_history + [(\"human\", new_query)]}\n",
")\n",
"{\n",
" \"input\": new_query,\n",
" \"output\": messages[\"messages\"][-1].content,\n",
@ -309,10 +317,12 @@
"# This could also be a SystemMessage object\n",
"# system_message = SystemMessage(content=\"You are a helpful assistant. Respond only in Spanish.\")\n",
"\n",
"app = create_react_agent(model, tools, state_modifier=system_message)\n",
"langgraph_agent_executor = create_react_agent(\n",
" model, tools, state_modifier=system_message\n",
")\n",
"\n",
"\n",
"messages = app.invoke({\"messages\": [(\"user\", query)]})"
"messages = langgraph_agent_executor.invoke({\"messages\": [(\"user\", query)]})"
]
},
{
@ -356,10 +366,12 @@
" ]\n",
"\n",
"\n",
"app = create_react_agent(model, tools, state_modifier=_modify_state_messages)\n",
"langgraph_agent_executor = create_react_agent(\n",
" model, tools, state_modifier=_modify_state_messages\n",
")\n",
"\n",
"\n",
"messages = app.invoke({\"messages\": [(\"human\", query)]})\n",
"messages = langgraph_agent_executor.invoke({\"messages\": [(\"human\", query)]})\n",
"print(\n",
" {\n",
" \"input\": query,\n",
@ -503,13 +515,13 @@
"# system_message = SystemMessage(content=\"You are a helpful assistant. Respond only in Spanish.\")\n",
"\n",
"memory = MemorySaver()\n",
"app = create_react_agent(\n",
"langgraph_agent_executor = create_react_agent(\n",
" model, tools, state_modifier=system_message, checkpointer=memory\n",
")\n",
"\n",
"config = {\"configurable\": {\"thread_id\": \"test-thread\"}}\n",
"print(\n",
" app.invoke(\n",
" langgraph_agent_executor.invoke(\n",
" {\n",
" \"messages\": [\n",
" (\"user\", \"Hi, I'm polly! What's the output of magic_function of 3?\")\n",
@ -520,15 +532,15 @@
")\n",
"print(\"---\")\n",
"print(\n",
" app.invoke({\"messages\": [(\"user\", \"Remember my name?\")]}, config)[\"messages\"][\n",
" -1\n",
" ].content\n",
" langgraph_agent_executor.invoke(\n",
" {\"messages\": [(\"user\", \"Remember my name?\")]}, config\n",
" )[\"messages\"][-1].content\n",
")\n",
"print(\"---\")\n",
"print(\n",
" app.invoke({\"messages\": [(\"user\", \"what was that output again?\")]}, config)[\n",
" \"messages\"\n",
" ][-1].content\n",
" langgraph_agent_executor.invoke(\n",
" {\"messages\": [(\"user\", \"what was that output again?\")]}, config\n",
" )[\"messages\"][-1].content\n",
")"
]
},
@ -636,9 +648,13 @@
" return prompt.invoke({\"messages\": state[\"messages\"]}).to_messages()\n",
"\n",
"\n",
"app = create_react_agent(model, tools, state_modifier=_modify_state_messages)\n",
"langgraph_agent_executor = create_react_agent(\n",
" model, tools, state_modifier=_modify_state_messages\n",
")\n",
"\n",
"for step in app.stream({\"messages\": [(\"human\", query)]}, stream_mode=\"updates\"):\n",
"for step in langgraph_agent_executor.stream(\n",
" {\"messages\": [(\"human\", query)]}, stream_mode=\"updates\"\n",
"):\n",
" print(step)"
]
},
@ -707,9 +723,9 @@
"source": [
"from langgraph.prebuilt import create_react_agent\n",
"\n",
"app = create_react_agent(model, tools=tools)\n",
"langgraph_agent_executor = create_react_agent(model, tools=tools)\n",
"\n",
"messages = app.invoke({\"messages\": [(\"human\", query)]})\n",
"messages = langgraph_agent_executor.invoke({\"messages\": [(\"human\", query)]})\n",
"\n",
"messages"
]
@ -839,10 +855,10 @@
"\n",
"RECURSION_LIMIT = 2 * 3 + 1\n",
"\n",
"app = create_react_agent(model, tools=tools)\n",
"langgraph_agent_executor = create_react_agent(model, tools=tools)\n",
"\n",
"try:\n",
" for chunk in app.stream(\n",
" for chunk in langgraph_agent_executor.stream(\n",
" {\"messages\": [(\"human\", query)]},\n",
" {\"recursion_limit\": RECURSION_LIMIT},\n",
" stream_mode=\"values\",\n",
@ -953,12 +969,12 @@
"source": [
"from langgraph.prebuilt import create_react_agent\n",
"\n",
"app = create_react_agent(model, tools=tools)\n",
"langgraph_agent_executor = create_react_agent(model, tools=tools)\n",
"# Set the max timeout for each step here\n",
"app.step_timeout = 2\n",
"langgraph_agent_executor.step_timeout = 2\n",
"\n",
"try:\n",
" for chunk in app.stream({\"messages\": [(\"human\", query)]}):\n",
" for chunk in langgraph_agent_executor.stream({\"messages\": [(\"human\", query)]}):\n",
" print(chunk)\n",
" print(\"------\")\n",
"except TimeoutError:\n",
@ -994,17 +1010,21 @@
"\n",
"from langgraph.prebuilt import create_react_agent\n",
"\n",
"app = create_react_agent(model, tools=tools)\n",
"langgraph_agent_executor = create_react_agent(model, tools=tools)\n",
"\n",
"\n",
"async def stream(app, inputs):\n",
" async for chunk in app.astream({\"messages\": [(\"human\", query)]}):\n",
"async def stream(langgraph_agent_executor, inputs):\n",
" async for chunk in langgraph_agent_executor.astream(\n",
" {\"messages\": [(\"human\", query)]}\n",
" ):\n",
" print(chunk)\n",
" print(\"------\")\n",
"\n",
"\n",
"try:\n",
" task = asyncio.create_task(stream(app, {\"messages\": [(\"human\", query)]}))\n",
" task = asyncio.create_task(\n",
" stream(langgraph_agent_executor, {\"messages\": [(\"human\", query)]})\n",
" )\n",
" await asyncio.wait_for(task, timeout=3)\n",
"except TimeoutError:\n",
" print(\"Task Cancelled.\")"
@ -1108,10 +1128,10 @@
"\n",
"RECURSION_LIMIT = 2 * 1 + 1\n",
"\n",
"app = create_react_agent(model, tools=tools)\n",
"langgraph_agent_executor = create_react_agent(model, tools=tools)\n",
"\n",
"try:\n",
" for chunk in app.stream(\n",
" for chunk in langgraph_agent_executor.stream(\n",
" {\"messages\": [(\"human\", query)]},\n",
" {\"recursion_limit\": RECURSION_LIMIT},\n",
" stream_mode=\"values\",\n",
@ -1289,10 +1309,14 @@
" return [(\"system\", \"You are a helpful assistant\"), state[\"messages\"][0]]\n",
"\n",
"\n",
"app = create_react_agent(model, tools, state_modifier=_modify_state_messages)\n",
"langgraph_agent_executor = create_react_agent(\n",
" model, tools, state_modifier=_modify_state_messages\n",
")\n",
"\n",
"try:\n",
" for step in app.stream({\"messages\": [(\"human\", query)]}, stream_mode=\"updates\"):\n",
" for step in langgraph_agent_executor.stream(\n",
" {\"messages\": [(\"human\", query)]}, stream_mode=\"updates\"\n",
" ):\n",
" pass\n",
"except GraphRecursionError as e:\n",
" print(\"Stopping agent prematurely due to triggering stop condition\")"