langchain/tests/unit_tests/agents/test_agent.py

249 lines
7.2 KiB
Python
Raw Normal View History

"""Unit tests for agents."""
from typing import Any, List, Mapping, Optional
2023-04-04 14:21:50 +00:00
from langchain.agents import AgentExecutor, AgentType, initialize_agent
from langchain.agents.tools import Tool
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
class FakeListLLM(LLM):
"""Fake LLM for testing that outputs elements of a list."""
responses: List[str]
i: int = -1
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Increment counter, and then return response in that index."""
self.i += 1
print(f"=== Mock Response #{self.i} ===")
print(self.responses[self.i])
return self.responses[self.i]
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fake_list"
def _get_agent(**kwargs: Any) -> AgentExecutor:
"""Get agent for testing."""
bad_action_name = "BadAction"
responses = [
f"I'm turning evil\nAction: {bad_action_name}\nAction Input: misalignment",
2023-04-16 20:15:21 +00:00
"Oh well\nFinal Answer: curses foiled again",
]
fake_llm = FakeListLLM(responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
),
Tool(
name="Lookup",
func=lambda x: x,
description="Useful for looking up things in a table",
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
**kwargs,
)
return agent
def test_agent_bad_action() -> None:
"""Test react chain when bad action given."""
agent = _get_agent()
output = agent.run("when was langchain made")
assert output == "curses foiled again"
2022-12-29 13:21:11 +00:00
def test_agent_stopped_early() -> None:
"""Test react chain when max iterations or max execution time is exceeded."""
# iteration limit
agent = _get_agent(max_iterations=0)
output = agent.run("when was langchain made")
assert output == "Agent stopped due to iteration limit or time limit."
# execution time limit
agent = _get_agent(max_execution_time=0.0)
output = agent.run("when was langchain made")
assert output == "Agent stopped due to iteration limit or time limit."
def test_agent_with_callbacks() -> None:
"""Test react chain with callbacks by setting verbose globally."""
handler1 = FakeCallbackHandler()
handler2 = FakeCallbackHandler()
tool = "Search"
2022-12-29 13:21:11 +00:00
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
2023-04-16 20:15:21 +00:00
"Oh well\nFinal Answer: curses foiled again",
2022-12-29 13:21:11 +00:00
]
# Only fake LLM gets callbacks for handler2
fake_llm = FakeListLLM(responses=responses, callbacks=[handler2])
2022-12-29 13:21:11 +00:00
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
),
2022-12-29 13:21:11 +00:00
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
2022-12-29 13:21:11 +00:00
)
output = agent.run("when was langchain made", callbacks=[handler1])
assert output == "curses foiled again"
# 1 top level chain run runs, 2 LLMChain runs, 2 LLM runs, 1 tool run
assert handler1.chain_starts == handler1.chain_ends == 3
assert handler1.llm_starts == handler1.llm_ends == 2
assert handler1.tool_starts == 1
assert handler1.tool_ends == 1
# 1 extra agent action
assert handler1.starts == 7
# 1 extra agent end
assert handler1.ends == 7
assert handler1.errors == 0
# during LLMChain
assert handler1.text == 2
assert handler2.llm_starts == 2
assert handler2.llm_ends == 2
assert (
handler2.chain_starts
== handler2.tool_starts
== handler2.tool_ends
== handler2.chain_ends
== 0
)
def test_agent_tool_return_direct() -> None:
"""Test agent using tools that return directly."""
tool = "Search"
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
2023-04-16 20:15:21 +00:00
"Oh well\nFinal Answer: curses foiled again",
]
fake_llm = FakeListLLM(responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
output = agent.run("when was langchain made")
assert output == "misalignment"
Pass kwargs from initialize_agent into agent classmethod (#799) # Problem I noticed that in order to change the prefix of the prompt in the `zero-shot-react-description` agent we had to dig around to subset strings deep into the agent's attributes. It requires the user to inspect a long chain of attributes and classes. `initialize_agent -> AgentExecutor -> Agent -> LLMChain -> Prompt from Agent.create_prompt` ``` python agent = initialize_agent( tools=tools, llm=fake_llm, agent="zero-shot-react-description" ) prompt_str = agent.agent.llm_chain.prompt.template new_prompt_str = change_prefix(prompt_str) agent.agent.llm_chain.prompt.template = new_prompt_str ``` # Implemented Solution `initialize_agent` accepts `**kwargs` but passes it to `AgentExecutor` but not `ZeroShotAgent`, by simply giving the kwargs to the agent class methods we can support changing the prefix and suffix for one agent while allowing future agents to take advantage of `initialize_agent`. ``` agent = initialize_agent( tools=tools, llm=fake_llm, agent="zero-shot-react-description", agent_kwargs={"prefix": prefix, "suffix": suffix} ) ``` To be fair, this was before finding docs around custom agents here: https://langchain.readthedocs.io/en/latest/modules/agents/examples/custom_agent.html?highlight=custom%20#custom-llmchain but i find that my use case just needed to change the prefix a little. # Changes * Pass kwargs to Agent class method * Added a test to check suffix and prefix --------- Co-authored-by: Jason Liu <jason@jxnl.coA>
2023-01-30 22:54:09 +00:00
def test_agent_tool_return_direct_in_intermediate_steps() -> None:
"""Test agent using tools that return directly."""
tool = "Search"
responses = [
f"FooBarBaz\nAction: {tool}\nAction Input: misalignment",
2023-04-16 20:15:21 +00:00
"Oh well\nFinal Answer: curses foiled again",
]
fake_llm = FakeListLLM(responses=responses)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools,
fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
return_intermediate_steps=True,
)
resp = agent("when was langchain made")
assert resp["output"] == "misalignment"
assert len(resp["intermediate_steps"]) == 1
action, _action_intput = resp["intermediate_steps"][0]
assert action.tool == "Search"
Pass kwargs from initialize_agent into agent classmethod (#799) # Problem I noticed that in order to change the prefix of the prompt in the `zero-shot-react-description` agent we had to dig around to subset strings deep into the agent's attributes. It requires the user to inspect a long chain of attributes and classes. `initialize_agent -> AgentExecutor -> Agent -> LLMChain -> Prompt from Agent.create_prompt` ``` python agent = initialize_agent( tools=tools, llm=fake_llm, agent="zero-shot-react-description" ) prompt_str = agent.agent.llm_chain.prompt.template new_prompt_str = change_prefix(prompt_str) agent.agent.llm_chain.prompt.template = new_prompt_str ``` # Implemented Solution `initialize_agent` accepts `**kwargs` but passes it to `AgentExecutor` but not `ZeroShotAgent`, by simply giving the kwargs to the agent class methods we can support changing the prefix and suffix for one agent while allowing future agents to take advantage of `initialize_agent`. ``` agent = initialize_agent( tools=tools, llm=fake_llm, agent="zero-shot-react-description", agent_kwargs={"prefix": prefix, "suffix": suffix} ) ``` To be fair, this was before finding docs around custom agents here: https://langchain.readthedocs.io/en/latest/modules/agents/examples/custom_agent.html?highlight=custom%20#custom-llmchain but i find that my use case just needed to change the prefix a little. # Changes * Pass kwargs to Agent class method * Added a test to check suffix and prefix --------- Co-authored-by: Jason Liu <jason@jxnl.coA>
2023-01-30 22:54:09 +00:00
def test_agent_with_new_prefix_suffix() -> None:
"""Test agent initilization kwargs with new prefix and suffix."""
fake_llm = FakeListLLM(
responses=["FooBarBaz\nAction: Search\nAction Input: misalignment"]
)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
Pass kwargs from initialize_agent into agent classmethod (#799) # Problem I noticed that in order to change the prefix of the prompt in the `zero-shot-react-description` agent we had to dig around to subset strings deep into the agent's attributes. It requires the user to inspect a long chain of attributes and classes. `initialize_agent -> AgentExecutor -> Agent -> LLMChain -> Prompt from Agent.create_prompt` ``` python agent = initialize_agent( tools=tools, llm=fake_llm, agent="zero-shot-react-description" ) prompt_str = agent.agent.llm_chain.prompt.template new_prompt_str = change_prefix(prompt_str) agent.agent.llm_chain.prompt.template = new_prompt_str ``` # Implemented Solution `initialize_agent` accepts `**kwargs` but passes it to `AgentExecutor` but not `ZeroShotAgent`, by simply giving the kwargs to the agent class methods we can support changing the prefix and suffix for one agent while allowing future agents to take advantage of `initialize_agent`. ``` agent = initialize_agent( tools=tools, llm=fake_llm, agent="zero-shot-react-description", agent_kwargs={"prefix": prefix, "suffix": suffix} ) ``` To be fair, this was before finding docs around custom agents here: https://langchain.readthedocs.io/en/latest/modules/agents/examples/custom_agent.html?highlight=custom%20#custom-llmchain but i find that my use case just needed to change the prefix a little. # Changes * Pass kwargs to Agent class method * Added a test to check suffix and prefix --------- Co-authored-by: Jason Liu <jason@jxnl.coA>
2023-01-30 22:54:09 +00:00
]
prefix = "FooBarBaz"
suffix = "Begin now!\nInput: {input}\nThought: {agent_scratchpad}"
agent = initialize_agent(
tools=tools,
llm=fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
Pass kwargs from initialize_agent into agent classmethod (#799) # Problem I noticed that in order to change the prefix of the prompt in the `zero-shot-react-description` agent we had to dig around to subset strings deep into the agent's attributes. It requires the user to inspect a long chain of attributes and classes. `initialize_agent -> AgentExecutor -> Agent -> LLMChain -> Prompt from Agent.create_prompt` ``` python agent = initialize_agent( tools=tools, llm=fake_llm, agent="zero-shot-react-description" ) prompt_str = agent.agent.llm_chain.prompt.template new_prompt_str = change_prefix(prompt_str) agent.agent.llm_chain.prompt.template = new_prompt_str ``` # Implemented Solution `initialize_agent` accepts `**kwargs` but passes it to `AgentExecutor` but not `ZeroShotAgent`, by simply giving the kwargs to the agent class methods we can support changing the prefix and suffix for one agent while allowing future agents to take advantage of `initialize_agent`. ``` agent = initialize_agent( tools=tools, llm=fake_llm, agent="zero-shot-react-description", agent_kwargs={"prefix": prefix, "suffix": suffix} ) ``` To be fair, this was before finding docs around custom agents here: https://langchain.readthedocs.io/en/latest/modules/agents/examples/custom_agent.html?highlight=custom%20#custom-llmchain but i find that my use case just needed to change the prefix a little. # Changes * Pass kwargs to Agent class method * Added a test to check suffix and prefix --------- Co-authored-by: Jason Liu <jason@jxnl.coA>
2023-01-30 22:54:09 +00:00
agent_kwargs={"prefix": prefix, "suffix": suffix},
)
# avoids "BasePromptTemplate" has no attribute "template" error
assert hasattr(agent.agent.llm_chain.prompt, "template") # type: ignore
prompt_str = agent.agent.llm_chain.prompt.template # type: ignore
Pass kwargs from initialize_agent into agent classmethod (#799) # Problem I noticed that in order to change the prefix of the prompt in the `zero-shot-react-description` agent we had to dig around to subset strings deep into the agent's attributes. It requires the user to inspect a long chain of attributes and classes. `initialize_agent -> AgentExecutor -> Agent -> LLMChain -> Prompt from Agent.create_prompt` ``` python agent = initialize_agent( tools=tools, llm=fake_llm, agent="zero-shot-react-description" ) prompt_str = agent.agent.llm_chain.prompt.template new_prompt_str = change_prefix(prompt_str) agent.agent.llm_chain.prompt.template = new_prompt_str ``` # Implemented Solution `initialize_agent` accepts `**kwargs` but passes it to `AgentExecutor` but not `ZeroShotAgent`, by simply giving the kwargs to the agent class methods we can support changing the prefix and suffix for one agent while allowing future agents to take advantage of `initialize_agent`. ``` agent = initialize_agent( tools=tools, llm=fake_llm, agent="zero-shot-react-description", agent_kwargs={"prefix": prefix, "suffix": suffix} ) ``` To be fair, this was before finding docs around custom agents here: https://langchain.readthedocs.io/en/latest/modules/agents/examples/custom_agent.html?highlight=custom%20#custom-llmchain but i find that my use case just needed to change the prefix a little. # Changes * Pass kwargs to Agent class method * Added a test to check suffix and prefix --------- Co-authored-by: Jason Liu <jason@jxnl.coA>
2023-01-30 22:54:09 +00:00
assert prompt_str.startswith(prefix), "Prompt does not start with prefix"
assert prompt_str.endswith(suffix), "Prompt does not end with suffix"
def test_agent_lookup_tool() -> None:
"""Test agent lookup tool."""
fake_llm = FakeListLLM(
responses=["FooBarBaz\nAction: Search\nAction Input: misalignment"]
)
tools = [
Tool(
name="Search",
func=lambda x: x,
description="Useful for searching",
return_direct=True,
),
]
agent = initialize_agent(
tools=tools,
llm=fake_llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
)
assert agent.lookup_tool("Search") == tools[0]