From 10dab053b40ab8b60e3105a6f9673faeb9b91945 Mon Sep 17 00:00:00 2001 From: Shrined <45953733+Shrined@users.noreply.github.com> Date: Mon, 3 Apr 2023 23:56:20 -0500 Subject: [PATCH] Add Enum for agent types (#2321) This pull request adds an enum class for the various types of agents used in the project, located in the `agent_types.py` file. Currently, the project is using hardcoded strings for the initialization of these agents, which can lead to errors and make the code harder to maintain. With the introduction of the new enums, the code will be more readable and less error-prone. The new enum members include: - ZERO_SHOT_REACT_DESCRIPTION - REACT_DOCSTORE - SELF_ASK_WITH_SEARCH - CONVERSATIONAL_REACT_DESCRIPTION - CHAT_ZERO_SHOT_REACT_DESCRIPTION - CHAT_CONVERSATIONAL_REACT_DESCRIPTION In this PR, I have also replaced the hardcoded strings with the appropriate enum members throughout the codebase, ensuring a smooth transition to the new approach. --- docs/ecosystem/aim_tracking.ipynb | 5 +++-- docs/ecosystem/clearml_tracking.ipynb | 3 ++- docs/ecosystem/google_serper.md | 3 ++- docs/ecosystem/wandb_tracking.ipynb | 5 +++-- docs/getting_started/getting_started.md | 8 ++++--- .../examples/agent_vectorstore.ipynb | 7 ++++--- .../examples/async_agent.ipynb | 7 ++++--- .../examples/intermediate_steps.ipynb | 3 ++- .../examples/max_iterations.ipynb | 7 ++++--- .../examples/chat_conversation_agent.ipynb | 5 +++-- .../examples/conversational_agent.ipynb | 3 ++- .../modules/agents/agents/examples/mrkl.ipynb | 5 +++-- .../agents/agents/examples/mrkl_chat.ipynb | 3 ++- .../agents/agents/examples/react.ipynb | 3 ++- .../examples/self_ask_with_search.ipynb | 3 ++- docs/modules/agents/getting_started.ipynb | 2 +- docs/modules/agents/tools/custom_tools.ipynb | 12 ++++++----- .../tools/examples/chatgpt_plugins.ipynb | 3 ++- .../agents/tools/examples/google_serper.ipynb | 3 ++- .../agents/tools/examples/human_tools.ipynb | 3 ++- .../agents/tools/examples/search_tools.ipynb | 9 ++++---- .../agents/tools/examples/zapier.ipynb | 3 ++- .../agents/tools/multi_input_tool.ipynb | 5 +++-- .../models/llms/examples/fake_llm.ipynb | 5 +++-- .../llms/examples/token_usage_tracking.ipynb | 3 ++- docs/tracing/agent_with_tracing.ipynb | 5 +++-- .../evaluation/agent_benchmarking.ipynb | 3 ++- .../evaluation/agent_vectordb_sota_pg.ipynb | 3 ++- langchain/agents/agent_types.py | 10 +++++++++ langchain/agents/conversational/base.py | 3 ++- langchain/agents/initialize.py | 16 +++++--------- langchain/agents/loading.py | 13 ++++++------ langchain/agents/mrkl/base.py | 3 ++- langchain/agents/react/base.py | 3 ++- langchain/agents/self_ask_with_search/base.py | 3 ++- langchain/tools/zapier/tool.py | 2 +- tests/unit_tests/agents/test_agent.py | 21 ++++++++++++------- 37 files changed, 123 insertions(+), 80 deletions(-) create mode 100644 langchain/agents/agent_types.py diff --git a/docs/ecosystem/aim_tracking.ipynb b/docs/ecosystem/aim_tracking.ipynb index a3314fe5..435b4544 100644 --- a/docs/ecosystem/aim_tracking.ipynb +++ b/docs/ecosystem/aim_tracking.ipynb @@ -205,7 +205,8 @@ }, "outputs": [], "source": [ - "from langchain.agents import initialize_agent, load_tools" + "from langchain.agents import initialize_agent, load_tools\n", + "from langchain.agents.agent_types import AgentType" ] }, { @@ -252,7 +253,7 @@ "agent = initialize_agent(\n", " tools,\n", " llm,\n", - " agent=\"zero-shot-react-description\",\n", + " agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n", " callback_manager=manager,\n", " verbose=True,\n", ")\n", diff --git a/docs/ecosystem/clearml_tracking.ipynb b/docs/ecosystem/clearml_tracking.ipynb index 568b920e..e3b337a2 100644 --- a/docs/ecosystem/clearml_tracking.ipynb +++ b/docs/ecosystem/clearml_tracking.ipynb @@ -520,13 +520,14 @@ ], "source": [ "from langchain.agents import initialize_agent, load_tools\n", + "from langchain.agents.agent_types import AgentType\n", "\n", "# SCENARIO 2 - Agent with Tools\n", "tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callback_manager=manager)\n", "agent = initialize_agent(\n", " tools,\n", " llm,\n", - " agent=\"zero-shot-react-description\",\n", + " agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n", " callback_manager=manager,\n", " verbose=True,\n", ")\n", diff --git a/docs/ecosystem/google_serper.md b/docs/ecosystem/google_serper.md index 446c346b..807c254c 100644 --- a/docs/ecosystem/google_serper.md +++ b/docs/ecosystem/google_serper.md @@ -23,6 +23,7 @@ You can use it as part of a Self Ask chain: from langchain.utilities import GoogleSerperAPIWrapper from langchain.llms.openai import OpenAI from langchain.agents import initialize_agent, Tool +from langchain.agents.agent_types import AgentType import os @@ -39,7 +40,7 @@ tools = [ ) ] -self_ask_with_search = initialize_agent(tools, llm, agent="self-ask-with-search", verbose=True) +self_ask_with_search = initialize_agent(tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True) self_ask_with_search.run("What is the hometown of the reigning men's U.S. Open champion?") ``` diff --git a/docs/ecosystem/wandb_tracking.ipynb b/docs/ecosystem/wandb_tracking.ipynb index 37797883..cd6aed3d 100644 --- a/docs/ecosystem/wandb_tracking.ipynb +++ b/docs/ecosystem/wandb_tracking.ipynb @@ -505,7 +505,8 @@ }, "outputs": [], "source": [ - "from langchain.agents import initialize_agent, load_tools" + "from langchain.agents import initialize_agent, load_tools\n", + "from langchain.agents.agent_types import AgentType" ] }, { @@ -580,7 +581,7 @@ "agent = initialize_agent(\n", " tools,\n", " llm,\n", - " agent=\"zero-shot-react-description\",\n", + " agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n", " callback_manager=manager,\n", " verbose=True,\n", ")\n", diff --git a/docs/getting_started/getting_started.md b/docs/getting_started/getting_started.md index 4d276edd..10b329fe 100644 --- a/docs/getting_started/getting_started.md +++ b/docs/getting_started/getting_started.md @@ -197,6 +197,7 @@ Now we can get started! ```python from langchain.agents import load_tools from langchain.agents import initialize_agent +from langchain.agents.agent_types import AgentType from langchain.llms import OpenAI # First, let's load the language model we're going to use to control the agent. @@ -207,7 +208,7 @@ tools = load_tools(["serpapi", "llm-math"], llm=llm) # Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use. -agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True) +agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True) # Now let's test it out! agent.run("What was the high temperature in SF yesterday in Fahrenheit? What is that number raised to the .023 power?") @@ -404,11 +405,12 @@ chain.run(input_language="English", output_language="French", text="I love progr ````` `````{dropdown} Agents with Chat Models -Agents can also be used with chat models, you can initialize one using `"chat-zero-shot-react-description"` as the agent type. +Agents can also be used with chat models, you can initialize one using `AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION` as the agent type. ```python from langchain.agents import load_tools from langchain.agents import initialize_agent +from langchain.agents.agent_types import AgentType from langchain.chat_models import ChatOpenAI from langchain.llms import OpenAI @@ -421,7 +423,7 @@ tools = load_tools(["serpapi", "llm-math"], llm=llm) # Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use. -agent = initialize_agent(tools, chat, agent="chat-zero-shot-react-description", verbose=True) +agent = initialize_agent(tools, chat, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True) # Now let's test it out! agent.run("Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?") diff --git a/docs/modules/agents/agent_executors/examples/agent_vectorstore.ipynb b/docs/modules/agents/agent_executors/examples/agent_vectorstore.ipynb index 0175cdfa..7bdbf3bd 100644 --- a/docs/modules/agents/agent_executors/examples/agent_vectorstore.ipynb +++ b/docs/modules/agents/agent_executors/examples/agent_vectorstore.ipynb @@ -154,6 +154,7 @@ "source": [ "# Import things that are needed generically\n", "from langchain.agents import initialize_agent, Tool\n", + "from langchain.agents.agent_types import AgentType\n", "from langchain.tools import BaseTool\n", "from langchain.llms import OpenAI\n", "from langchain import LLMMathChain, SerpAPIWrapper" @@ -189,7 +190,7 @@ "source": [ "# Construct the agent. We will use the default agent type here.\n", "# See documentation for a full list of options.\n", - "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)" + "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" ] }, { @@ -316,7 +317,7 @@ "metadata": {}, "outputs": [], "source": [ - "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)" + "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" ] }, { @@ -433,7 +434,7 @@ "source": [ "# Construct the agent. We will use the default agent type here.\n", "# See documentation for a full list of options.\n", - "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)" + "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" ] }, { diff --git a/docs/modules/agents/agent_executors/examples/async_agent.ipynb b/docs/modules/agents/agent_executors/examples/async_agent.ipynb index 216ddcd3..a945988c 100644 --- a/docs/modules/agents/agent_executors/examples/async_agent.ipynb +++ b/docs/modules/agents/agent_executors/examples/async_agent.ipynb @@ -39,6 +39,7 @@ "import time\n", "\n", "from langchain.agents import initialize_agent, load_tools\n", + "from langchain.agents.agent_types import AgentType\n", "from langchain.llms import OpenAI\n", "from langchain.callbacks.stdout import StdOutCallbackHandler\n", "from langchain.callbacks.base import CallbackManager\n", @@ -175,7 +176,7 @@ " llm = OpenAI(temperature=0)\n", " tools = load_tools([\"llm-math\", \"serpapi\"], llm=llm)\n", " agent = initialize_agent(\n", - " tools, llm, agent=\"zero-shot-react-description\", verbose=True\n", + " tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION verbose=True\n", " )\n", " agent.run(q)\n", "\n", @@ -311,7 +312,7 @@ " llm = OpenAI(temperature=0, callback_manager=manager)\n", " async_tools = load_tools([\"llm-math\", \"serpapi\"], llm=llm, aiosession=aiosession, callback_manager=manager)\n", " agents.append(\n", - " initialize_agent(async_tools, llm, agent=\"zero-shot-react-description\", verbose=True, callback_manager=manager)\n", + " initialize_agent(async_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, callback_manager=manager)\n", " )\n", " tasks = [async_agent.arun(q) for async_agent, q in zip(agents, questions)]\n", " await asyncio.gather(*tasks)\n", @@ -381,7 +382,7 @@ "llm = OpenAI(temperature=0, callback_manager=manager)\n", "\n", "async_tools = load_tools([\"llm-math\", \"serpapi\"], llm=llm, aiosession=aiosession)\n", - "async_agent = initialize_agent(async_tools, llm, agent=\"zero-shot-react-description\", verbose=True, callback_manager=manager)\n", + "async_agent = initialize_agent(async_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, callback_manager=manager)\n", "await async_agent.arun(questions[0])\n", "await aiosession.close()" ] diff --git a/docs/modules/agents/agent_executors/examples/intermediate_steps.ipynb b/docs/modules/agents/agent_executors/examples/intermediate_steps.ipynb index 4f2995bb..e1df52e4 100644 --- a/docs/modules/agents/agent_executors/examples/intermediate_steps.ipynb +++ b/docs/modules/agents/agent_executors/examples/intermediate_steps.ipynb @@ -19,6 +19,7 @@ "source": [ "from langchain.agents import load_tools\n", "from langchain.agents import initialize_agent\n", + "from langchain.agents.agent_types import AgentType\n", "from langchain.llms import OpenAI" ] }, @@ -56,7 +57,7 @@ "metadata": {}, "outputs": [], "source": [ - "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True, return_intermediate_steps=True)" + "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, return_intermediate_steps=True)" ] }, { diff --git a/docs/modules/agents/agent_executors/examples/max_iterations.ipynb b/docs/modules/agents/agent_executors/examples/max_iterations.ipynb index 754a4e3a..97b7ccf9 100644 --- a/docs/modules/agents/agent_executors/examples/max_iterations.ipynb +++ b/docs/modules/agents/agent_executors/examples/max_iterations.ipynb @@ -19,6 +19,7 @@ "source": [ "from langchain.agents import load_tools\n", "from langchain.agents import initialize_agent, Tool\n", + "from langchain.agents.agent_types import AgentType\n", "from langchain.llms import OpenAI" ] }, @@ -59,7 +60,7 @@ "metadata": {}, "outputs": [], "source": [ - "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)" + "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" ] }, { @@ -139,7 +140,7 @@ "metadata": {}, "outputs": [], "source": [ - "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True, max_iterations=2)" + "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, max_iterations=2)" ] }, { @@ -198,7 +199,7 @@ "metadata": {}, "outputs": [], "source": [ - "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True, max_iterations=2, early_stopping_method=\"generate\")" + "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, max_iterations=2, early_stopping_method=\"generate\")" ] }, { diff --git a/docs/modules/agents/agents/examples/chat_conversation_agent.ipynb b/docs/modules/agents/agents/examples/chat_conversation_agent.ipynb index 061a5a60..f1fa0ece 100644 --- a/docs/modules/agents/agents/examples/chat_conversation_agent.ipynb +++ b/docs/modules/agents/agents/examples/chat_conversation_agent.ipynb @@ -34,7 +34,8 @@ "from langchain.memory import ConversationBufferMemory\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.utilities import SerpAPIWrapper\n", - "from langchain.agents import initialize_agent" + "from langchain.agents import initialize_agent\n", + "from langchain.agents.agent_types import AgentType" ] }, { @@ -72,7 +73,7 @@ "outputs": [], "source": [ "llm=ChatOpenAI(temperature=0)\n", - "agent_chain = initialize_agent(tools, llm, agent=\"chat-conversational-react-description\", verbose=True, memory=memory)" + "agent_chain = initialize_agent(tools, llm, agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory)" ] }, { diff --git a/docs/modules/agents/agents/examples/conversational_agent.ipynb b/docs/modules/agents/agents/examples/conversational_agent.ipynb index 10cb7d0f..a8709e4e 100644 --- a/docs/modules/agents/agents/examples/conversational_agent.ipynb +++ b/docs/modules/agents/agents/examples/conversational_agent.ipynb @@ -20,6 +20,7 @@ "outputs": [], "source": [ "from langchain.agents import Tool\n", + "from langchain.agents.agent_types import AgentType\n", "from langchain.memory import ConversationBufferMemory\n", "from langchain import OpenAI\n", "from langchain.utilities import GoogleSearchAPIWrapper\n", @@ -61,7 +62,7 @@ "outputs": [], "source": [ "llm=OpenAI(temperature=0)\n", - "agent_chain = initialize_agent(tools, llm, agent=\"conversational-react-description\", verbose=True, memory=memory)" + "agent_chain = initialize_agent(tools, llm, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory)" ] }, { diff --git a/docs/modules/agents/agents/examples/mrkl.ipynb b/docs/modules/agents/agents/examples/mrkl.ipynb index 0afc3da0..fa05c438 100644 --- a/docs/modules/agents/agents/examples/mrkl.ipynb +++ b/docs/modules/agents/agents/examples/mrkl.ipynb @@ -27,7 +27,8 @@ "outputs": [], "source": [ "from langchain import LLMMathChain, OpenAI, SerpAPIWrapper, SQLDatabase, SQLDatabaseChain\n", - "from langchain.agents import initialize_agent, Tool" + "from langchain.agents import initialize_agent, Tool\n", + "from langchain.agents.agent_types import AgentType" ] }, { @@ -68,7 +69,7 @@ "metadata": {}, "outputs": [], "source": [ - "mrkl = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)" + "mrkl = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" ] }, { diff --git a/docs/modules/agents/agents/examples/mrkl_chat.ipynb b/docs/modules/agents/agents/examples/mrkl_chat.ipynb index 91303afa..48099496 100644 --- a/docs/modules/agents/agents/examples/mrkl_chat.ipynb +++ b/docs/modules/agents/agents/examples/mrkl_chat.ipynb @@ -28,6 +28,7 @@ "source": [ "from langchain import OpenAI, LLMMathChain, SerpAPIWrapper, SQLDatabase, SQLDatabaseChain\n", "from langchain.agents import initialize_agent, Tool\n", + "from langchain.agents.agent_types import AgentType\n", "from langchain.chat_models import ChatOpenAI" ] }, @@ -70,7 +71,7 @@ "metadata": {}, "outputs": [], "source": [ - "mrkl = initialize_agent(tools, llm, agent=\"chat-zero-shot-react-description\", verbose=True)" + "mrkl = initialize_agent(tools, llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" ] }, { diff --git a/docs/modules/agents/agents/examples/react.ipynb b/docs/modules/agents/agents/examples/react.ipynb index 5519e0fa..b5d5bf0e 100644 --- a/docs/modules/agents/agents/examples/react.ipynb +++ b/docs/modules/agents/agents/examples/react.ipynb @@ -19,6 +19,7 @@ "source": [ "from langchain import OpenAI, Wikipedia\n", "from langchain.agents import initialize_agent, Tool\n", + "from langchain.agents.agent_types import AgentType\n", "from langchain.agents.react.base import DocstoreExplorer\n", "docstore=DocstoreExplorer(Wikipedia())\n", "tools = [\n", @@ -35,7 +36,7 @@ "]\n", "\n", "llm = OpenAI(temperature=0, model_name=\"text-davinci-002\")\n", - "react = initialize_agent(tools, llm, agent=\"react-docstore\", verbose=True)" + "react = initialize_agent(tools, llm, agent=AgentType.REACT_DOCSTORE, verbose=True)" ] }, { diff --git a/docs/modules/agents/agents/examples/self_ask_with_search.ipynb b/docs/modules/agents/agents/examples/self_ask_with_search.ipynb index 6dbb01e6..f5265ceb 100644 --- a/docs/modules/agents/agents/examples/self_ask_with_search.ipynb +++ b/docs/modules/agents/agents/examples/self_ask_with_search.ipynb @@ -46,6 +46,7 @@ "source": [ "from langchain import OpenAI, SerpAPIWrapper\n", "from langchain.agents import initialize_agent, Tool\n", + "from langchain.agents.agent_types import AgentType\n", "\n", "llm = OpenAI(temperature=0)\n", "search = SerpAPIWrapper()\n", @@ -57,7 +58,7 @@ " )\n", "]\n", "\n", - "self_ask_with_search = initialize_agent(tools, llm, agent=\"self-ask-with-search\", verbose=True)\n", + "self_ask_with_search = initialize_agent(tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True)\n", "self_ask_with_search.run(\"What is the hometown of the reigning men's U.S. Open champion?\")" ] } diff --git a/docs/modules/agents/getting_started.ipynb b/docs/modules/agents/getting_started.ipynb index bd9961f4..307b148a 100644 --- a/docs/modules/agents/getting_started.ipynb +++ b/docs/modules/agents/getting_started.ipynb @@ -92,7 +92,7 @@ "metadata": {}, "outputs": [], "source": [ - "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)" + "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" ] }, { diff --git a/docs/modules/agents/tools/custom_tools.ipynb b/docs/modules/agents/tools/custom_tools.ipynb index 07f26e2e..3ab084d4 100644 --- a/docs/modules/agents/tools/custom_tools.ipynb +++ b/docs/modules/agents/tools/custom_tools.ipynb @@ -27,6 +27,7 @@ "source": [ "# Import things that are needed generically\n", "from langchain.agents import initialize_agent, Tool\n", + "from langchain.agents.agent_types import AgentType\n", "from langchain.tools import BaseTool\n", "from langchain.llms import OpenAI\n", "from langchain import LLMMathChain, SerpAPIWrapper" @@ -102,7 +103,7 @@ "source": [ "# Construct the agent. We will use the default agent type here.\n", "# See documentation for a full list of options.\n", - "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)" + "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" ] }, { @@ -217,7 +218,7 @@ "metadata": {}, "outputs": [], "source": [ - "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)" + "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" ] }, { @@ -410,7 +411,7 @@ "metadata": {}, "outputs": [], "source": [ - "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)" + "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" ] }, { @@ -484,6 +485,7 @@ "source": [ "# Import things that are needed generically\n", "from langchain.agents import initialize_agent, Tool\n", + "from langchain.agents.agent_types import AgentType\n", "from langchain.llms import OpenAI\n", "from langchain import LLMMathChain, SerpAPIWrapper\n", "search = SerpAPIWrapper()\n", @@ -500,7 +502,7 @@ " )\n", "]\n", "\n", - "agent = initialize_agent(tools, OpenAI(temperature=0), agent=\"zero-shot-react-description\", verbose=True)" + "agent = initialize_agent(tools, OpenAI(temperature=0), agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" ] }, { @@ -576,7 +578,7 @@ "outputs": [], "source": [ "llm = OpenAI(temperature=0)\n", - "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)" + "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" ] }, { diff --git a/docs/modules/agents/tools/examples/chatgpt_plugins.ipynb b/docs/modules/agents/tools/examples/chatgpt_plugins.ipynb index e1dbfa08..de016bc9 100644 --- a/docs/modules/agents/tools/examples/chatgpt_plugins.ipynb +++ b/docs/modules/agents/tools/examples/chatgpt_plugins.ipynb @@ -23,6 +23,7 @@ "source": [ "from langchain.chat_models import ChatOpenAI\n", "from langchain.agents import load_tools, initialize_agent\n", + "from langchain.agents.agent_types import AgentType\n", "from langchain.tools import AIPluginTool" ] }, @@ -83,7 +84,7 @@ "tools = load_tools([\"requests\"] )\n", "tools += [tool]\n", "\n", - "agent_chain = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)\n", + "agent_chain = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION verbose=True)\n", "agent_chain.run(\"what t shirts are available in klarna?\")" ] }, diff --git a/docs/modules/agents/tools/examples/google_serper.ipynb b/docs/modules/agents/tools/examples/google_serper.ipynb index e71e8a67..ba59a870 100644 --- a/docs/modules/agents/tools/examples/google_serper.ipynb +++ b/docs/modules/agents/tools/examples/google_serper.ipynb @@ -115,6 +115,7 @@ "from langchain.utilities import GoogleSerperAPIWrapper\n", "from langchain.llms.openai import OpenAI\n", "from langchain.agents import initialize_agent, Tool\n", + "from langchain.agents.agent_types import AgentType\n", "\n", "llm = OpenAI(temperature=0)\n", "search = GoogleSerperAPIWrapper()\n", @@ -126,7 +127,7 @@ " )\n", "]\n", "\n", - "self_ask_with_search = initialize_agent(tools, llm, agent=\"self-ask-with-search\", verbose=True)\n", + "self_ask_with_search = initialize_agent(tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True)\n", "self_ask_with_search.run(\"What is the hometown of the reigning men's U.S. Open champion?\")" ], "metadata": { diff --git a/docs/modules/agents/tools/examples/human_tools.ipynb b/docs/modules/agents/tools/examples/human_tools.ipynb index 498c471c..868462a1 100644 --- a/docs/modules/agents/tools/examples/human_tools.ipynb +++ b/docs/modules/agents/tools/examples/human_tools.ipynb @@ -20,6 +20,7 @@ "from langchain.chat_models import ChatOpenAI\n", "from langchain.llms import OpenAI\n", "from langchain.agents import load_tools, initialize_agent\n", + "from langchain.agents.agent_types import AgentType\n", "\n", "llm = ChatOpenAI(temperature=0.0)\n", "math_llm = OpenAI(temperature=0.0)\n", @@ -31,7 +32,7 @@ "agent_chain = initialize_agent(\n", " tools,\n", " llm,\n", - " agent=\"zero-shot-react-description\",\n", + " agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n", " verbose=True,\n", ")" ] diff --git a/docs/modules/agents/tools/examples/search_tools.ipynb b/docs/modules/agents/tools/examples/search_tools.ipynb index e59f8b8b..fadcf37b 100644 --- a/docs/modules/agents/tools/examples/search_tools.ipynb +++ b/docs/modules/agents/tools/examples/search_tools.ipynb @@ -23,6 +23,7 @@ "source": [ "from langchain.agents import load_tools\n", "from langchain.agents import initialize_agent\n", + "from langchain.agents.agent_types import AgentType\n", "from langchain.llms import OpenAI" ] }, @@ -63,7 +64,7 @@ "metadata": {}, "outputs": [], "source": [ - "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)" + "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" ] }, { @@ -131,7 +132,7 @@ "metadata": {}, "outputs": [], "source": [ - "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)" + "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" ] }, { @@ -199,7 +200,7 @@ "metadata": {}, "outputs": [], "source": [ - "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)" + "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" ] }, { @@ -266,7 +267,7 @@ "metadata": {}, "outputs": [], "source": [ - "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)" + "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" ] }, { diff --git a/docs/modules/agents/tools/examples/zapier.ipynb b/docs/modules/agents/tools/examples/zapier.ipynb index 75ccd1cb..4891f9a1 100644 --- a/docs/modules/agents/tools/examples/zapier.ipynb +++ b/docs/modules/agents/tools/examples/zapier.ipynb @@ -77,6 +77,7 @@ "from langchain.llms import OpenAI\n", "from langchain.agents import initialize_agent\n", "from langchain.agents.agent_toolkits import ZapierToolkit\n", + "from langchain.agents.agent_types import AgentType\n", "from langchain.utilities.zapier import ZapierNLAWrapper" ] }, @@ -105,7 +106,7 @@ "llm = OpenAI(temperature=0)\n", "zapier = ZapierNLAWrapper()\n", "toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier)\n", - "agent = initialize_agent(toolkit.get_tools(), llm, agent=\"zero-shot-react-description\", verbose=True)" + "agent = initialize_agent(toolkit.get_tools(), llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" ] }, { diff --git a/docs/modules/agents/tools/multi_input_tool.ipynb b/docs/modules/agents/tools/multi_input_tool.ipynb index e06deba8..cbec9d69 100644 --- a/docs/modules/agents/tools/multi_input_tool.ipynb +++ b/docs/modules/agents/tools/multi_input_tool.ipynb @@ -23,7 +23,8 @@ "outputs": [], "source": [ "from langchain.llms import OpenAI\n", - "from langchain.agents import initialize_agent, Tool" + "from langchain.agents import initialize_agent, Tool\n", + "from langchain.agents.agent_types import AgentType" ] }, { @@ -64,7 +65,7 @@ " description=\"useful for when you need to multiply two numbers together. The input to this tool should be a comma separated list of numbers of length two, representing the two numbers you want to multiply together. For example, `1,2` would be the input if you wanted to multiply 1 by 2.\"\n", " )\n", "]\n", - "mrkl = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)" + "mrkl = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" ] }, { diff --git a/docs/modules/models/llms/examples/fake_llm.ipynb b/docs/modules/models/llms/examples/fake_llm.ipynb index 508d0fd2..bacbc557 100644 --- a/docs/modules/models/llms/examples/fake_llm.ipynb +++ b/docs/modules/models/llms/examples/fake_llm.ipynb @@ -31,7 +31,8 @@ "outputs": [], "source": [ "from langchain.agents import load_tools\n", - "from langchain.agents import initialize_agent" + "from langchain.agents import initialize_agent\n", + "from langchain.agents.agent_types import AgentType" ] }, { @@ -65,7 +66,7 @@ "metadata": {}, "outputs": [], "source": [ - "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)" + "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" ] }, { diff --git a/docs/modules/models/llms/examples/token_usage_tracking.ipynb b/docs/modules/models/llms/examples/token_usage_tracking.ipynb index 51cb420a..3c9f85eb 100644 --- a/docs/modules/models/llms/examples/token_usage_tracking.ipynb +++ b/docs/modules/models/llms/examples/token_usage_tracking.ipynb @@ -107,11 +107,12 @@ "source": [ "from langchain.agents import load_tools\n", "from langchain.agents import initialize_agent\n", + "from langchain.agents.agent_types import AgentType\n", "from langchain.llms import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n", - "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)" + "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)" ] }, { diff --git a/docs/tracing/agent_with_tracing.ipynb b/docs/tracing/agent_with_tracing.ipynb index 59095c26..37687b7c 100644 --- a/docs/tracing/agent_with_tracing.ipynb +++ b/docs/tracing/agent_with_tracing.ipynb @@ -35,6 +35,7 @@ "\n", "import langchain\n", "from langchain.agents import Tool, initialize_agent, load_tools\n", + "from langchain.agents.agent_types import AgentType\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.llms import OpenAI" ] @@ -93,7 +94,7 @@ ], "source": [ "agent = initialize_agent(\n", - " tools, llm, agent=\"zero-shot-react-description\", verbose=True\n", + " tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n", ")\n", "\n", "agent.run(\"What is 2 raised to .123243 power?\")" @@ -177,7 +178,7 @@ "source": [ "# Agent run with tracing using a chat model\n", "agent = initialize_agent(\n", - " tools, ChatOpenAI(temperature=0), agent=\"chat-zero-shot-react-description\", verbose=True\n", + " tools, ChatOpenAI(temperature=0), agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n", ")\n", "\n", "agent.run(\"What is 2 raised to .123243 power?\")" diff --git a/docs/use_cases/evaluation/agent_benchmarking.ipynb b/docs/use_cases/evaluation/agent_benchmarking.ipynb index 26fcb4ad..398e328b 100644 --- a/docs/use_cases/evaluation/agent_benchmarking.ipynb +++ b/docs/use_cases/evaluation/agent_benchmarking.ipynb @@ -85,9 +85,10 @@ "from langchain.llms import OpenAI\n", "from langchain.chains import LLMMathChain\n", "from langchain.agents import initialize_agent, Tool, load_tools\n", + "from langchain.agents.agent_types import AgentType\n", "\n", "tools = load_tools(['serpapi', 'llm-math'], llm=OpenAI(temperature=0))\n", - "agent = initialize_agent(tools, OpenAI(temperature=0), agent=\"zero-shot-react-description\")\n" + "agent = initialize_agent(tools, OpenAI(temperature=0), agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)\n" ] }, { diff --git a/docs/use_cases/evaluation/agent_vectordb_sota_pg.ipynb b/docs/use_cases/evaluation/agent_vectordb_sota_pg.ipynb index 45861e99..d36efada 100644 --- a/docs/use_cases/evaluation/agent_vectordb_sota_pg.ipynb +++ b/docs/use_cases/evaluation/agent_vectordb_sota_pg.ipynb @@ -255,6 +255,7 @@ "outputs": [], "source": [ "from langchain.agents import initialize_agent, Tool\n", + "from langchain.agents.agent_types import AgentType\n", "tools = [\n", " Tool(\n", " name = \"State of Union QA System\",\n", @@ -276,7 +277,7 @@ "metadata": {}, "outputs": [], "source": [ - "agent = initialize_agent(tools, OpenAI(temperature=0), agent=\"zero-shot-react-description\", max_iterations=3)" + "agent = initialize_agent(tools, OpenAI(temperature=0), agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, max_iterations=3)" ] }, { diff --git a/langchain/agents/agent_types.py b/langchain/agents/agent_types.py new file mode 100644 index 00000000..117f59a5 --- /dev/null +++ b/langchain/agents/agent_types.py @@ -0,0 +1,10 @@ +from enum import Enum + + +class AgentType(str, Enum): + ZERO_SHOT_REACT_DESCRIPTION = "zero-shot-react-description" + REACT_DOCSTORE = "react-docstore" + SELF_ASK_WITH_SEARCH = "self-ask-with-search" + CONVERSATIONAL_REACT_DESCRIPTION = "conversational-react-description" + CHAT_ZERO_SHOT_REACT_DESCRIPTION = "chat-zero-shot-react-description" + CHAT_CONVERSATIONAL_REACT_DESCRIPTION = "chat-conversational-react-description" diff --git a/langchain/agents/conversational/base.py b/langchain/agents/conversational/base.py index 23995797..921649d1 100644 --- a/langchain/agents/conversational/base.py +++ b/langchain/agents/conversational/base.py @@ -5,6 +5,7 @@ import re from typing import Any, List, Optional, Sequence, Tuple from langchain.agents.agent import Agent +from langchain.agents.agent_types import AgentType from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX from langchain.callbacks.base import BaseCallbackManager from langchain.chains import LLMChain @@ -21,7 +22,7 @@ class ConversationalAgent(Agent): @property def _agent_type(self) -> str: """Return Identifier of agent type.""" - return "conversational-react-description" + return AgentType.CONVERSATIONAL_REACT_DESCRIPTION @property def observation_prefix(self) -> str: diff --git a/langchain/agents/initialize.py b/langchain/agents/initialize.py index 04277c60..72784b89 100644 --- a/langchain/agents/initialize.py +++ b/langchain/agents/initialize.py @@ -2,6 +2,7 @@ from typing import Any, Optional, Sequence from langchain.agents.agent import AgentExecutor +from langchain.agents.agent_types import AgentType from langchain.agents.loading import AGENT_TO_CLASS, load_agent from langchain.callbacks.base import BaseCallbackManager from langchain.schema import BaseLanguageModel @@ -11,7 +12,7 @@ from langchain.tools.base import BaseTool def initialize_agent( tools: Sequence[BaseTool], llm: BaseLanguageModel, - agent: Optional[str] = None, + agent: Optional[AgentType] = None, callback_manager: Optional[BaseCallbackManager] = None, agent_path: Optional[str] = None, agent_kwargs: Optional[dict] = None, @@ -22,15 +23,8 @@ def initialize_agent( Args: tools: List of tools this agent has access to. llm: Language model to use as the agent. - agent: A string that specified the agent type to use. Valid options are: - `zero-shot-react-description` - `react-docstore` - `self-ask-with-search` - `conversational-react-description` - `chat-zero-shot-react-description`, - `chat-conversational-react-description`, - If None and agent_path is also None, will default to - `zero-shot-react-description`. + agent: Agent type to use. If None and agent_path is also None, will default to + AgentType.ZERO_SHOT_REACT_DESCRIPTION. callback_manager: CallbackManager to use. Global callback manager is used if not provided. Defaults to None. agent_path: Path to serialized agent to use. @@ -41,7 +35,7 @@ def initialize_agent( An agent executor """ if agent is None and agent_path is None: - agent = "zero-shot-react-description" + agent = AgentType.ZERO_SHOT_REACT_DESCRIPTION if agent is not None and agent_path is not None: raise ValueError( "Both `agent` and `agent_path` are specified, " diff --git a/langchain/agents/loading.py b/langchain/agents/loading.py index 930ce925..01196b31 100644 --- a/langchain/agents/loading.py +++ b/langchain/agents/loading.py @@ -6,6 +6,7 @@ from typing import Any, List, Optional, Union import yaml from langchain.agents.agent import Agent +from langchain.agents.agent_types import AgentType from langchain.agents.chat.base import ChatAgent from langchain.agents.conversational.base import ConversationalAgent from langchain.agents.conversational_chat.base import ConversationalChatAgent @@ -18,12 +19,12 @@ from langchain.llms.base import BaseLLM from langchain.utilities.loading import try_load_from_hub AGENT_TO_CLASS = { - "zero-shot-react-description": ZeroShotAgent, - "react-docstore": ReActDocstoreAgent, - "self-ask-with-search": SelfAskWithSearchAgent, - "conversational-react-description": ConversationalAgent, - "chat-zero-shot-react-description": ChatAgent, - "chat-conversational-react-description": ConversationalChatAgent, + AgentType.ZERO_SHOT_REACT_DESCRIPTION: ZeroShotAgent, + AgentType.REACT_DOCSTORE: ReActDocstoreAgent, + AgentType.SELF_ASK_WITH_SEARCH: SelfAskWithSearchAgent, + AgentType.CONVERSATIONAL_REACT_DESCRIPTION: ConversationalAgent, + AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION: ChatAgent, + AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION: ConversationalChatAgent, } URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/" diff --git a/langchain/agents/mrkl/base.py b/langchain/agents/mrkl/base.py index a388a120..576c1bd8 100644 --- a/langchain/agents/mrkl/base.py +++ b/langchain/agents/mrkl/base.py @@ -5,6 +5,7 @@ import re from typing import Any, Callable, List, NamedTuple, Optional, Sequence, Tuple from langchain.agents.agent import Agent, AgentExecutor +from langchain.agents.agent_types import AgentType from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS, PREFIX, SUFFIX from langchain.agents.tools import Tool from langchain.callbacks.base import BaseCallbackManager @@ -56,7 +57,7 @@ class ZeroShotAgent(Agent): @property def _agent_type(self) -> str: """Return Identifier of agent type.""" - return "zero-shot-react-description" + return AgentType.ZERO_SHOT_REACT_DESCRIPTION @property def observation_prefix(self) -> str: diff --git a/langchain/agents/react/base.py b/langchain/agents/react/base.py index bae6478b..68ba9476 100644 --- a/langchain/agents/react/base.py +++ b/langchain/agents/react/base.py @@ -5,6 +5,7 @@ from typing import Any, List, Optional, Sequence, Tuple from pydantic import BaseModel from langchain.agents.agent import Agent, AgentExecutor +from langchain.agents.agent_types import AgentType from langchain.agents.react.textworld_prompt import TEXTWORLD_PROMPT from langchain.agents.react.wiki_prompt import WIKI_PROMPT from langchain.agents.tools import Tool @@ -21,7 +22,7 @@ class ReActDocstoreAgent(Agent, BaseModel): @property def _agent_type(self) -> str: """Return Identifier of agent type.""" - return "react-docstore" + return AgentType.REACT_DOCSTORE @classmethod def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: diff --git a/langchain/agents/self_ask_with_search/base.py b/langchain/agents/self_ask_with_search/base.py index 694273e3..4bc888e7 100644 --- a/langchain/agents/self_ask_with_search/base.py +++ b/langchain/agents/self_ask_with_search/base.py @@ -2,6 +2,7 @@ from typing import Any, Optional, Sequence, Tuple, Union from langchain.agents.agent import Agent, AgentExecutor +from langchain.agents.agent_types import AgentType from langchain.agents.self_ask_with_search.prompt import PROMPT from langchain.agents.tools import Tool from langchain.llms.base import BaseLLM @@ -17,7 +18,7 @@ class SelfAskWithSearchAgent(Agent): @property def _agent_type(self) -> str: """Return Identifier of agent type.""" - return "self-ask-with-search" + return AgentType.SELF_ASK_WITH_SEARCH @classmethod def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: diff --git a/langchain/tools/zapier/tool.py b/langchain/tools/zapier/tool.py index 275c9002..36a459f9 100644 --- a/langchain/tools/zapier/tool.py +++ b/langchain/tools/zapier/tool.py @@ -65,7 +65,7 @@ toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier) agent = initialize_agent( toolkit.get_tools(), llm, - agent="zero-shot-react-description", + agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True ) diff --git a/tests/unit_tests/agents/test_agent.py b/tests/unit_tests/agents/test_agent.py index f928efa7..51a152f3 100644 --- a/tests/unit_tests/agents/test_agent.py +++ b/tests/unit_tests/agents/test_agent.py @@ -5,6 +5,7 @@ from typing import Any, List, Mapping, Optional from pydantic import BaseModel from langchain.agents import AgentExecutor, initialize_agent +from langchain.agents.agent_types import AgentType from langchain.agents.tools import Tool from langchain.callbacks.base import CallbackManager from langchain.llms.base import LLM @@ -55,7 +56,11 @@ def _get_agent(**kwargs: Any) -> AgentExecutor: ), ] agent = initialize_agent( - tools, fake_llm, agent="zero-shot-react-description", verbose=True, **kwargs + tools, + fake_llm, + agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, + verbose=True, + **kwargs, ) return agent @@ -98,7 +103,7 @@ def test_agent_with_callbacks_global() -> None: agent = initialize_agent( tools, fake_llm, - agent="zero-shot-react-description", + agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, callback_manager=manager, ) @@ -144,7 +149,7 @@ def test_agent_with_callbacks_local() -> None: agent = initialize_agent( tools, fake_llm, - agent="zero-shot-react-description", + agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, callback_manager=manager, ) @@ -191,7 +196,7 @@ def test_agent_with_callbacks_not_verbose() -> None: agent = initialize_agent( tools, fake_llm, - agent="zero-shot-react-description", + agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, callback_manager=manager, ) @@ -223,7 +228,7 @@ def test_agent_tool_return_direct() -> None: agent = initialize_agent( tools, fake_llm, - agent="zero-shot-react-description", + agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, ) output = agent.run("when was langchain made") @@ -249,7 +254,7 @@ def test_agent_tool_return_direct_in_intermediate_steps() -> None: agent = initialize_agent( tools, fake_llm, - agent="zero-shot-react-description", + agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, return_intermediate_steps=True, ) @@ -280,7 +285,7 @@ def test_agent_with_new_prefix_suffix() -> None: agent = initialize_agent( tools=tools, llm=fake_llm, - agent="zero-shot-react-description", + agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, agent_kwargs={"prefix": prefix, "suffix": suffix}, ) @@ -307,7 +312,7 @@ def test_agent_lookup_tool() -> None: agent = initialize_agent( tools=tools, llm=fake_llm, - agent="zero-shot-react-description", + agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, ) assert agent.lookup_tool("Search") == tools[0]