update agent docs (#10894)

pull/10902/head
Harrison Chase 1 year ago committed by GitHub
parent 40e836c67e
commit a1ade48e8f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,10 +0,0 @@
# Structured tool chat
The structured tool chat agent is capable of using multi-input tools.
Older agents are configured to specify an action input as a single string, but this agent can use the provided tools' `args_schema` to populate the action input.
import Example from "@snippets/modules/agents/agent_types/structured_chat.mdx"
<Example/>

@ -9,7 +9,7 @@ In agents, a language model is used as a reasoning engine to determine which act
Some important terminology (and schema) to know:
1. `AgentAction`: This is a dataclass that represents the action an agent should take. It has a `tool` property (which is the tool that should be invoked) and a `tool_input` property (the input to that tool)
1. `AgentAction`: This is a dataclass that represents the action an agent should take. It has a `tool` property (which is the name of the tool that should be invoked) and a `tool_input` property (the input to that tool)
2. `AgentFinish`: This is a dataclass that signifies that the agent has finished and should return to the user. It has a `return_values` parameter, which is a dictionary to return. It often only has one key - `output` - that is a string, and so often it is just this key that is returned.
3. `intermediate_steps`: These represent previous agent actions and corresponding outputs that are passed around. These are important to pass to future iteration so the agent knows what work it has already done. This is typed as a `List[Tuple[AgentAction, Any]]`. Note that observation is currently left as type `Any` to be maximally flexible. In practice, this is often a string.

@ -74,7 +74,8 @@
"source": [
"from langchain.tools.render import render_text_description\n",
"from langchain.agents.output_parsers import ReActSingleInputOutputParser\n",
"from langchain.agents.format_scratchpad import format_log_to_str"
"from langchain.agents.format_scratchpad import format_log_to_str\n",
"from langchain import hub"
]
},
{
@ -84,7 +85,6 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import hub\n",
"prompt = hub.pull(\"hwchase17/react-chat\")"
]
},
@ -125,6 +125,16 @@
"} | prompt | llm_with_stop | ReActSingleInputOutputParser()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0b354cfe",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor"
]
},
{
"cell_type": "code",
"execution_count": 23,
@ -132,7 +142,6 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor\n",
"memory = ConversationBufferMemory(memory_key=\"chat_history\")\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, memory=memory)"
]
@ -278,24 +287,23 @@
},
{
"cell_type": "code",
"execution_count": 5,
"id": "16b17ca8",
"execution_count": null,
"id": "a5a705b2",
"metadata": {},
"outputs": [],
"source": [
"from langchain import hub\n",
"prompt = hub.pull(\"hwchase17/react-chat-json\")"
"from langchain.chat_models import ChatOpenAI\n",
"from langchain import hub"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "e93c0832",
"execution_count": 5,
"id": "16b17ca8",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"\n",
"prompt = hub.pull(\"hwchase17/react-chat-json\")\n",
"chat_model = ChatOpenAI(temperature=0, model='gpt-4')"
]
},
@ -322,6 +330,17 @@
"chat_model_with_stop = chat_model.bind(stop=[\"\\nObservation\"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f50a5ea8",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents.output_parsers import JSONAgentOutputParser\n",
"from langchain.agents.format_scratchpad import format_log_to_messages"
]
},
{
"cell_type": "code",
"execution_count": 26,
@ -329,9 +348,6 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents.output_parsers import JSONAgentOutputParser\n",
"from langchain.agents.format_scratchpad import format_log_to_messages\n",
"\n",
"# We need some extra steering, or the chat model forgets how to respond sometimes\n",
"TEMPLATE_TOOL_RESPONSE = \"\"\"TOOL RESPONSE: \n",
"---------------------\n",
@ -349,6 +365,16 @@
"} | prompt | chat_model_with_stop | JSONAgentOutputParser()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6cc033fc",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor"
]
},
{
"cell_type": "code",
"execution_count": 27,
@ -356,7 +382,6 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor\n",
"memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, memory=memory)"
]
@ -491,13 +516,21 @@
{
"cell_type": "code",
"execution_count": null,
"id": "734d1b21",
"id": "141f2469",
"metadata": {},
"outputs": [],
"source": [
"from langchain.memory import ConversationBufferMemory\n",
"from langchain.chat_models import ChatOpenAI\n",
"\n",
"from langchain.chat_models import ChatOpenAI"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "734d1b21",
"metadata": {},
"outputs": [],
"source": [
"memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)\n",
"llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY, temperature=0)\n",
"agent_chain = initialize_agent(tools, llm, agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory)"

@ -90,6 +90,16 @@
"We will first use LangChain Expression Language to create this agent"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "eac103f1",
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder"
]
},
{
"cell_type": "code",
"execution_count": 3,
@ -97,7 +107,6 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"prompt = ChatPromptTemplate.from_messages([\n",
" (\"system\", \"You are a helpful assistant\"),\n",
" (\"user\", \"{input}\"),\n",
@ -105,6 +114,16 @@
"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "50f40df4",
"metadata": {},
"outputs": [],
"source": [
"from langchain.tools.render import format_tool_to_openai_function"
]
},
{
"cell_type": "code",
"execution_count": 4,
@ -112,12 +131,22 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.tools.render import format_tool_to_openai_function\n",
"llm_with_tools = llm.bind(\n",
" functions=[format_tool_to_openai_function(t) for t in tools]\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3cafa0a3",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents.format_scratchpad import format_to_openai_functions\n",
"from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser"
]
},
{
"cell_type": "code",
"execution_count": 5,
@ -125,14 +154,22 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents.format_scratchpad import format_to_openai_functions\n",
"from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n",
"agent = {\n",
" \"input\": lambda x: x[\"input\"],\n",
" \"agent_scratchpad\": lambda x: format_to_openai_functions(x['intermediate_steps'])\n",
"} | prompt | llm_with_tools | OpenAIFunctionsAgentOutputParser()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5125573e",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor"
]
},
{
"cell_type": "code",
"execution_count": 6,
@ -140,7 +177,6 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
]
},

@ -78,7 +78,8 @@
"source": [
"from langchain.tools.render import render_text_description\n",
"from langchain.agents.output_parsers import ReActSingleInputOutputParser\n",
"from langchain.agents.format_scratchpad import format_log_to_str"
"from langchain.agents.format_scratchpad import format_log_to_str\n",
"from langchain import hub"
]
},
{
@ -88,7 +89,6 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import hub\n",
"prompt = hub.pull(\"hwchase17/react\")\n",
"prompt = prompt.partial(\n",
" tools=render_text_description(tools),\n",
@ -119,6 +119,16 @@
"} | prompt | llm_with_stop | ReActSingleInputOutputParser()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a0a57769",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor"
]
},
{
"cell_type": "code",
"execution_count": 8,
@ -126,7 +136,6 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
]
},
@ -250,6 +259,16 @@
"The main difference here is a different prompt. We will use JSON to encode the agent's actions (chat models are a bit tougher to steet, so using JSON helps to enforce the output format)."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6eeb1693",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_models import ChatOpenAI"
]
},
{
"cell_type": "code",
"execution_count": 29,
@ -257,8 +276,6 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.chat_models import ChatOpenAI\n",
"\n",
"chat_model = ChatOpenAI(temperature=0)"
]
},
@ -286,6 +303,16 @@
"chat_model_with_stop = chat_model.bind(stop=[\"\\nObservation\"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "deaeb1f6",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents.output_parsers import ReActJsonSingleInputOutputParser"
]
},
{
"cell_type": "code",
"execution_count": 31,
@ -293,7 +320,6 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents.output_parsers import ReActJsonSingleInputOutputParser\n",
"agent = {\n",
" \"input\": lambda x: x[\"input\"],\n",
" \"agent_scratchpad\": lambda x: format_log_to_str(x['intermediate_steps'])\n",

@ -51,7 +51,8 @@
"outputs": [],
"source": [
"from langchain.agents.output_parsers import SelfAskOutputParser\n",
"from langchain.agents.format_scratchpad import format_log_to_str"
"from langchain.agents.format_scratchpad import format_log_to_str\n",
"from langchain import hub"
]
},
{
@ -61,7 +62,6 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import hub\n",
"prompt = hub.pull(\"hwchase17/self-ask-with-search\")"
]
},
@ -93,6 +93,16 @@
"} | prompt | llm_with_stop | SelfAskOutputParser()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "643c3bfa",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor"
]
},
{
"cell_type": "code",
"execution_count": 14,
@ -100,7 +110,6 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
]
},

@ -86,6 +86,16 @@
"We can first construct this agent using LangChain Expression Language"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bf35a623",
"metadata": {},
"outputs": [],
"source": [
"from langchain import hub"
]
},
{
"cell_type": "code",
"execution_count": 19,
@ -93,10 +103,19 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import hub\n",
"prompt = hub.pull(\"hwchase17/react-multi-input-json\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "38c6496f",
"metadata": {},
"outputs": [],
"source": [
"from langchain.tools.render import render_text_description_and_args"
]
},
{
"cell_type": "code",
"execution_count": 20,
@ -104,7 +123,6 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.tools.render import render_text_description_and_args\n",
"prompt = prompt.partial(\n",
" tools=render_text_description_and_args(tools),\n",
" tool_names=\", \".join([t.name for t in tools]),\n",
@ -122,6 +140,17 @@
"llm_with_stop = llm.bind(stop=[\"Observation\"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2ceceadb",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents.output_parsers import JSONAgentOutputParser\n",
"from langchain.agents.format_scratchpad import format_log_to_str"
]
},
{
"cell_type": "code",
"execution_count": 22,
@ -129,14 +158,22 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents.output_parsers import JSONAgentOutputParser\n",
"from langchain.agents.format_scratchpad import format_log_to_str\n",
"agent = {\n",
" \"input\": lambda x: x[\"input\"],\n",
" \"agent_scratchpad\": lambda x: format_log_to_str(x['intermediate_steps']),\n",
"} | prompt | llm_with_stop | JSONAgentOutputParser()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "470b0859",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor"
]
},
{
"cell_type": "code",
"execution_count": 23,
@ -144,7 +181,6 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
]
},

@ -75,7 +75,8 @@
"source": [
"from langchain.tools.render import render_text_description\n",
"from langchain.agents.output_parsers import XMLAgentOutputParser\n",
"from langchain.agents.format_scratchpad import format_xml"
"from langchain.agents.format_scratchpad import format_xml\n",
"from langchain import hub"
]
},
{
@ -85,7 +86,6 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import hub\n",
"prompt = hub.pull(\"hwchase17/xml-agent\")"
]
},
@ -125,6 +125,16 @@
"} | prompt | llm_with_stop | XMLAgentOutputParser()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4e2bb03e",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor"
]
},
{
"cell_type": "code",
"execution_count": 16,
@ -132,7 +142,6 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
]
},

@ -170,7 +170,7 @@ Let's fix that by adding in memory.
In order to do this, we need to do two things:
1. Add a place for memory variables to go in the prompt
2. Add memory to the `AgentExecutor` (note that we add it here, and NOT to the agent, as this is the outermost chain)
2. Keep track of the chat history
First, let's add a place for memory in the prompt.
We do this by adding a placeholder for messages with the key `"chat_history"`.
@ -187,15 +187,10 @@ prompt = ChatPromptTemplate.from_messages([
MessagesPlaceholder(variable_name="agent_scratchpad"),
])
```
Next, let's create a memory object.
We will do this by using `ConversationBufferMemory`.
Importantly, we set `memory_key` also equal to `"chat_history"` (to align it with the prompt) and set `return_messages` (to make it return messages rather than a string).
```python
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(memory_key=MEMORY_KEY, return_messages=True)
We can then set up a list to track the chat history
```
from langchain.schema.messages import HumanMessage, AIMessage
chat_history = []
```
We can then put it all together!
@ -206,7 +201,13 @@ agent = {
"agent_scratchpad": lambda x: format_to_openai_functions(x['intermediate_steps']),
"chat_history": lambda x: x["chat_history"]
} | prompt | llm_with_tools | OpenAIFunctionsAgentOutputParser()
agent_executor = AgentExecutor(agent=agent, tools=tools, memory=memory, verbose=True)
agent_executor.run("how many letters in the word educa?")
agent_executor.run("is that a real word?")
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
```
When running, we now need to track the inputs and outputs as chat history
```
input1 = "how many letters in the word educa?"
result = agent_executor.invoke({"input": input1, "chat_history": chat_history})
chat_history.append(HumanMessage(content=input1))
chat_history.append(AIMessage(content=result['output']))
agent_executor.invoke({"input": "is that a real word?", "chat_history": chat_history})
```

@ -1,3 +1,10 @@
"""Logic for formatting intermediate steps into an agent scratchpad.
Intermediate steps refers to the list of (AgentAction, observation) tuples
that result from previous iterations of the agent.
Depending on the prompting strategy you are using, you may want to format these
differently before passing them into the LLM.
"""
from langchain.agents.format_scratchpad.log import format_log_to_str
from langchain.agents.format_scratchpad.log_to_messages import format_log_to_messages
from langchain.agents.format_scratchpad.openai_functions import (

@ -1,3 +1,9 @@
"""Different methods for rendering Tools to be passed to LLMs.
Depending on the LLM you are using and the prompting strategy you are using,
you may want Tools to be rendered in a different way.
This module contains various ways to render tools.
"""
from typing import List
from langchain.tools.base import BaseTool

Loading…
Cancel
Save