forked from Archives/langchain
68 lines
2.3 KiB
Python
68 lines
2.3 KiB
Python
"""Test MRKL functionality."""
|
|
|
|
import pytest
|
|
|
|
from langchain.prompts import PromptTemplate
|
|
from langchain.routing_chains.mrkl.base import ZeroShotRouter, get_action_and_input
|
|
from langchain.routing_chains.mrkl.prompt import BASE_TEMPLATE
|
|
from langchain.routing_chains.tools import Tool
|
|
from tests.unit_tests.llms.fake_llm import FakeLLM
|
|
|
|
|
|
def test_get_action_and_input() -> None:
|
|
"""Test getting an action from text."""
|
|
llm_output = (
|
|
"Thought: I need to search for NBA\n" "Action: Search\n" "Action Input: NBA"
|
|
)
|
|
action, action_input = get_action_and_input(llm_output)
|
|
assert action == "Search"
|
|
assert action_input == "NBA"
|
|
|
|
|
|
def test_get_final_answer() -> None:
|
|
"""Test getting final answer."""
|
|
llm_output = (
|
|
"Thought: I need to search for NBA\n"
|
|
"Action: Search\n"
|
|
"Action Input: NBA\n"
|
|
"Observation: founded in 1994\n"
|
|
"Thought: I can now answer the question\n"
|
|
"Final Answer: 1994"
|
|
)
|
|
action, action_input = get_action_and_input(llm_output)
|
|
assert action == "Final Answer"
|
|
assert action_input == "1994"
|
|
|
|
|
|
def test_bad_action_input_line() -> None:
|
|
"""Test handling when no action input found."""
|
|
llm_output = "Thought: I need to search for NBA\n" "Action: Search\n" "Thought: NBA"
|
|
with pytest.raises(ValueError):
|
|
get_action_and_input(llm_output)
|
|
|
|
|
|
def test_bad_action_line() -> None:
|
|
"""Test handling when no action input found."""
|
|
llm_output = (
|
|
"Thought: I need to search for NBA\n" "Thought: Search\n" "Action Input: NBA"
|
|
)
|
|
with pytest.raises(ValueError):
|
|
get_action_and_input(llm_output)
|
|
|
|
|
|
def test_from_chains() -> None:
|
|
"""Test initializing from chains."""
|
|
chain_configs = [
|
|
Tool(name="foo", func=lambda x: "foo", description="foobar1"),
|
|
Tool(name="bar", func=lambda x: "bar", description="foobar2"),
|
|
]
|
|
router_chain = ZeroShotRouter.from_llm_and_tools(FakeLLM(), chain_configs)
|
|
expected_tools_prompt = "foo: foobar1\nbar: foobar2"
|
|
expected_tool_names = "foo, bar"
|
|
expected_template = BASE_TEMPLATE.format(
|
|
tools=expected_tools_prompt, tool_names=expected_tool_names
|
|
)
|
|
prompt = router_chain.llm_chain.prompt
|
|
assert isinstance(prompt, PromptTemplate)
|
|
assert prompt.template == expected_template
|