mirror of
https://github.com/hwchase17/langchain
synced 2024-11-08 07:10:35 +00:00
add initial anthropic agent (#8468)
Co-authored-by: Nuno Campos <nuno@boringbits.io>
This commit is contained in:
parent
a795c3d860
commit
6556a8fcfd
274
docs/extras/modules/agents/agent_types/anthropic_agent.ipynb
Normal file
274
docs/extras/modules/agents/agent_types/anthropic_agent.ipynb
Normal file
@ -0,0 +1,274 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"id": "9926203f",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import os\n",
|
||||||
|
"\n",
|
||||||
|
"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
|
||||||
|
"os.environ[\"LANGCHAIN_ENDPOINT\"] = \"https://api.smith.langchain.com\"\n",
|
||||||
|
"os.environ[\"LANGCHAIN_API_KEY\"] = \"\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"id": "45bc4149",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"agent_instructions = \"\"\"You are a helpful assistant. Help the user answer any questions.\n",
|
||||||
|
"\n",
|
||||||
|
"You have access to the following tools:\n",
|
||||||
|
"\n",
|
||||||
|
"{tools}\n",
|
||||||
|
"\n",
|
||||||
|
"In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. \\\n",
|
||||||
|
"You will then get back a response in the form <observation></observation>\n",
|
||||||
|
"For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:\n",
|
||||||
|
"\n",
|
||||||
|
"<tool>search</tool><tool_input>weather in SF</tool_input>\n",
|
||||||
|
"<observation>64 degrees</observation>\n",
|
||||||
|
"\n",
|
||||||
|
"When you are done, respond with a final answer between <final_answer></final_answer>. For example:\n",
|
||||||
|
"\n",
|
||||||
|
"<final_answer>The weather in SF is 64 degrees</final_answer>\n",
|
||||||
|
"\n",
|
||||||
|
"Begin!\n",
|
||||||
|
"\n",
|
||||||
|
"Question: {question}\"\"\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"id": "4da4c0d2",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"/Users/harrisonchase/.pyenv/versions/3.9.1/envs/langchain/lib/python3.9/site-packages/deeplake/util/check_latest_version.py:32: UserWarning: A newer version of deeplake (3.6.14) is available. It's recommended that you update to the latest version using `pip install -U deeplake`.\n",
|
||||||
|
" warnings.warn(\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"from langchain.chat_models import ChatAnthropic\n",
|
||||||
|
"from langchain.prompts import ChatPromptTemplate, AIMessagePromptTemplate\n",
|
||||||
|
"from langchain.agents import tool"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"id": "b81e9120",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"model = ChatAnthropic(model=\"claude-2\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"id": "5271f612",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"prompt_template = ChatPromptTemplate.from_template(agent_instructions) + AIMessagePromptTemplate.from_template(\"{intermediate_steps}\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"id": "83780d81",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"chain = prompt_template | model.bind(stop=[\"</tool_input>\", \"</final_answer>\"])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"id": "c091d0e1",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"@tool\n",
|
||||||
|
"def search(query: str) -> str:\n",
|
||||||
|
" \"\"\"Search things about current events.\"\"\"\n",
|
||||||
|
" return \"32 degrees\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"id": "1e81b05d",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"tool_list = [search]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
|
"id": "5f0d986f",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.agents import Tool, AgentExecutor, BaseSingleActionAgent\n",
|
||||||
|
"from typing import List, Tuple, Any, Union\n",
|
||||||
|
"from langchain.schema import AgentAction, AgentFinish\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"class AnthropicAgent(BaseSingleActionAgent):\n",
|
||||||
|
" \n",
|
||||||
|
" tools: List[Tool]\n",
|
||||||
|
" chain: Any\n",
|
||||||
|
"\n",
|
||||||
|
" @property\n",
|
||||||
|
" def input_keys(self):\n",
|
||||||
|
" return [\"input\"]\n",
|
||||||
|
"\n",
|
||||||
|
" def plan(\n",
|
||||||
|
" self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any\n",
|
||||||
|
" ) -> Union[AgentAction, AgentFinish]:\n",
|
||||||
|
" \"\"\"Given input, decided what to do.\n",
|
||||||
|
"\n",
|
||||||
|
" Args:\n",
|
||||||
|
" intermediate_steps: Steps the LLM has taken to date,\n",
|
||||||
|
" along with observations\n",
|
||||||
|
" **kwargs: User inputs.\n",
|
||||||
|
"\n",
|
||||||
|
" Returns:\n",
|
||||||
|
" Action specifying what tool to use.\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" log = \"\"\n",
|
||||||
|
" for action, observation in intermediate_steps:\n",
|
||||||
|
" log += f\"<tool>{action.tool}</tool><tool_input>{action.tool_input}</tool_input><observation>{observation}</observation>\"\n",
|
||||||
|
" tools = \"\"\n",
|
||||||
|
" for tool in self.tools:\n",
|
||||||
|
" tools += f\"{tool.name}: {tool.description}\\n\"\n",
|
||||||
|
" response = self.chain.invoke({\"intermediate_steps\": log, \"tools\": tools, \"question\": kwargs[\"input\"]})\n",
|
||||||
|
" if \"</tool>\" in response.content:\n",
|
||||||
|
" t, ti = response.content.split(\"</tool>\")\n",
|
||||||
|
" _t = t.split(\"<tool>\")[1]\n",
|
||||||
|
" _ti = ti.split(\"<tool_input>\")[1]\n",
|
||||||
|
" return AgentAction(tool=_t, tool_input=_ti, log=response.content)\n",
|
||||||
|
" elif \"<final_answer>\" in response.content:\n",
|
||||||
|
" t, ti = response.content.split(\"<final_answer>\")\n",
|
||||||
|
" return AgentFinish(return_values={\"output\": ti}, log=response.content)\n",
|
||||||
|
" else:\n",
|
||||||
|
" raise ValueError\n",
|
||||||
|
"\n",
|
||||||
|
" async def aplan(\n",
|
||||||
|
" self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any\n",
|
||||||
|
" ) -> Union[AgentAction, AgentFinish]:\n",
|
||||||
|
" \"\"\"Given input, decided what to do.\n",
|
||||||
|
"\n",
|
||||||
|
" Args:\n",
|
||||||
|
" intermediate_steps: Steps the LLM has taken to date,\n",
|
||||||
|
" along with observations\n",
|
||||||
|
" **kwargs: User inputs.\n",
|
||||||
|
"\n",
|
||||||
|
" Returns:\n",
|
||||||
|
" Action specifying what tool to use.\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" raise ValueError"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 10,
|
||||||
|
"id": "315361c5",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"agent = AnthropicAgent(tools=tool_list, chain=chain)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 11,
|
||||||
|
"id": "bca6096f",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"agent_executor = AgentExecutor(agent=agent, tools=tool_list, verbose=True)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 12,
|
||||||
|
"id": "71b872b1",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
||||||
|
"\u001b[32;1m\u001b[1;3m <tool>search</tool>\n",
|
||||||
|
"<tool_input>weather in new york\u001b[0m\u001b[36;1m\u001b[1;3m32 degrees\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
||||||
|
"\n",
|
||||||
|
"<final_answer>The weather in New York is 32 degrees\u001b[0m\n",
|
||||||
|
"\n",
|
||||||
|
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"'The weather in New York is 32 degrees'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 12,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"agent_executor.run(\"whats the weather in New york?\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "cca87246",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.9.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
@ -39,6 +39,7 @@ from langchain.agents.react.base import ReActChain, ReActTextWorldAgent
|
|||||||
from langchain.agents.self_ask_with_search.base import SelfAskWithSearchChain
|
from langchain.agents.self_ask_with_search.base import SelfAskWithSearchChain
|
||||||
from langchain.agents.structured_chat.base import StructuredChatAgent
|
from langchain.agents.structured_chat.base import StructuredChatAgent
|
||||||
from langchain.agents.tools import Tool, tool
|
from langchain.agents.tools import Tool, tool
|
||||||
|
from langchain.agents.xml.base import XMLAgent
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"Agent",
|
"Agent",
|
||||||
@ -78,4 +79,5 @@ __all__ = [
|
|||||||
"load_tools",
|
"load_tools",
|
||||||
"tool",
|
"tool",
|
||||||
"create_xorbits_agent",
|
"create_xorbits_agent",
|
||||||
|
"XMLAgent",
|
||||||
]
|
]
|
||||||
|
0
libs/langchain/langchain/agents/xml/__init__.py
Normal file
0
libs/langchain/langchain/agents/xml/__init__.py
Normal file
118
libs/langchain/langchain/agents/xml/base.py
Normal file
118
libs/langchain/langchain/agents/xml/base.py
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
from typing import Any, List, Tuple, Union
|
||||||
|
|
||||||
|
from langchain.agents.agent import AgentOutputParser, BaseSingleActionAgent
|
||||||
|
from langchain.agents.xml.prompt import agent_instructions
|
||||||
|
from langchain.callbacks.base import Callbacks
|
||||||
|
from langchain.chains.llm import LLMChain
|
||||||
|
from langchain.prompts.chat import AIMessagePromptTemplate, ChatPromptTemplate
|
||||||
|
from langchain.schema import AgentAction, AgentFinish
|
||||||
|
from langchain.tools.base import BaseTool
|
||||||
|
|
||||||
|
|
||||||
|
class XMLAgentOutputParser(AgentOutputParser):
|
||||||
|
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
||||||
|
if "</tool>" in text:
|
||||||
|
tool, tool_input = text.split("</tool>")
|
||||||
|
_tool = tool.split("<tool>")[1]
|
||||||
|
_tool_input = tool_input.split("<tool_input>")[1]
|
||||||
|
return AgentAction(tool=_tool, tool_input=_tool_input, log=text)
|
||||||
|
elif "<final_answer>" in text:
|
||||||
|
_, answer = text.split("<final_answer>")
|
||||||
|
return AgentFinish(return_values={"output": answer}, log=text)
|
||||||
|
else:
|
||||||
|
raise ValueError
|
||||||
|
|
||||||
|
def get_format_instructions(self) -> str:
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _type(self) -> str:
|
||||||
|
return "xml-agent"
|
||||||
|
|
||||||
|
|
||||||
|
class XMLAgent(BaseSingleActionAgent):
|
||||||
|
"""Agent that uses XML tags.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tools: list of tools the agent can choose from
|
||||||
|
llm_chain: The LLMChain to call to predict the next action
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
.. code-block:: python
|
||||||
|
|
||||||
|
from langchain.agents import XMLAgent
|
||||||
|
from langchain
|
||||||
|
|
||||||
|
tools = ...
|
||||||
|
model =
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
tools: List[BaseTool]
|
||||||
|
"""List of tools this agent has access to."""
|
||||||
|
llm_chain: LLMChain
|
||||||
|
"""Chain to use to predict action."""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def input_keys(self) -> List[str]:
|
||||||
|
return ["input"]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_default_prompt() -> ChatPromptTemplate:
|
||||||
|
return ChatPromptTemplate.from_template(
|
||||||
|
agent_instructions
|
||||||
|
) + AIMessagePromptTemplate.from_template("{intermediate_steps}")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_default_output_parser() -> XMLAgentOutputParser:
|
||||||
|
return XMLAgentOutputParser()
|
||||||
|
|
||||||
|
def plan(
|
||||||
|
self,
|
||||||
|
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||||
|
callbacks: Callbacks = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> Union[AgentAction, AgentFinish]:
|
||||||
|
log = ""
|
||||||
|
for action, observation in intermediate_steps:
|
||||||
|
log += (
|
||||||
|
f"<tool>{action.tool}</tool><tool_input>{action.tool_input}"
|
||||||
|
f"</tool_input><observation>{observation}</observation>"
|
||||||
|
)
|
||||||
|
tools = ""
|
||||||
|
for tool in self.tools:
|
||||||
|
tools += f"{tool.name}: {tool.description}\n"
|
||||||
|
inputs = {
|
||||||
|
"intermediate_steps": log,
|
||||||
|
"tools": tools,
|
||||||
|
"question": kwargs["input"],
|
||||||
|
"stop": ["</tool_input>", "</final_answer>"],
|
||||||
|
}
|
||||||
|
response = self.llm_chain(inputs, callbacks=callbacks)
|
||||||
|
return response[self.llm_chain.output_key]
|
||||||
|
|
||||||
|
async def aplan(
|
||||||
|
self,
|
||||||
|
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||||
|
callbacks: Callbacks = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> Union[AgentAction, AgentFinish]:
|
||||||
|
log = ""
|
||||||
|
for action, observation in intermediate_steps:
|
||||||
|
log += (
|
||||||
|
f"<tool>{action.tool}</tool><tool_input>{action.tool_input}"
|
||||||
|
f"</tool_input><observation>{observation}</observation>"
|
||||||
|
)
|
||||||
|
tools = ""
|
||||||
|
for tool in self.tools:
|
||||||
|
tools += f"{tool.name}: {tool.description}\n"
|
||||||
|
inputs = {
|
||||||
|
"intermediate_steps": log,
|
||||||
|
"tools": tools,
|
||||||
|
"question": kwargs["input"],
|
||||||
|
"stop": ["</tool_input>", "</final_answer>"],
|
||||||
|
}
|
||||||
|
response = await self.llm_chain.acall(inputs, callbacks=callbacks)
|
||||||
|
return response[self.llm_chain.output_key]
|
21
libs/langchain/langchain/agents/xml/prompt.py
Normal file
21
libs/langchain/langchain/agents/xml/prompt.py
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
# flake8: noqa
|
||||||
|
agent_instructions = """You are a helpful assistant. Help the user answer any questions.
|
||||||
|
|
||||||
|
You have access to the following tools:
|
||||||
|
|
||||||
|
{tools}
|
||||||
|
|
||||||
|
In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. \
|
||||||
|
You will then get back a response in the form <observation></observation>
|
||||||
|
For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:
|
||||||
|
|
||||||
|
<tool>search</tool><tool_input>weather in SF</tool_input>
|
||||||
|
<observation>64 degrees</observation>
|
||||||
|
|
||||||
|
When you are done, respond with a final answer between <final_answer></final_answer>. For example:
|
||||||
|
|
||||||
|
<final_answer>The weather in SF is 64 degrees</final_answer>
|
||||||
|
|
||||||
|
Begin!
|
||||||
|
|
||||||
|
Question: {question}"""
|
@ -19,6 +19,7 @@ _EXPECTED = [
|
|||||||
"SelfAskWithSearchChain",
|
"SelfAskWithSearchChain",
|
||||||
"StructuredChatAgent",
|
"StructuredChatAgent",
|
||||||
"Tool",
|
"Tool",
|
||||||
|
"XMLAgent",
|
||||||
"ZeroShotAgent",
|
"ZeroShotAgent",
|
||||||
"create_csv_agent",
|
"create_csv_agent",
|
||||||
"create_json_agent",
|
"create_json_agent",
|
||||||
|
File diff suppressed because one or more lines are too long
@ -33,16 +33,38 @@ from langchain.schema.runnable import (
|
|||||||
|
|
||||||
|
|
||||||
class FakeTracer(BaseTracer):
|
class FakeTracer(BaseTracer):
|
||||||
"""Fake tracer that records LangChain execution."""
|
"""Fake tracer that records LangChain execution.
|
||||||
|
It replaces run ids with deterministic UUIDs for snapshotting."""
|
||||||
|
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
"""Initialize the tracer."""
|
"""Initialize the tracer."""
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.runs: List[Run] = []
|
self.runs: List[Run] = []
|
||||||
|
self.uuids_map: Dict[UUID, UUID] = {}
|
||||||
|
self.uuids_generator = (
|
||||||
|
UUID(f"00000000-0000-4000-8000-{i:012}", version=4) for i in range(10000)
|
||||||
|
)
|
||||||
|
|
||||||
|
def _replace_uuid(self, uuid: UUID) -> UUID:
|
||||||
|
if uuid not in self.uuids_map:
|
||||||
|
self.uuids_map[uuid] = next(self.uuids_generator)
|
||||||
|
return self.uuids_map[uuid]
|
||||||
|
|
||||||
|
def _copy_run(self, run: Run) -> Run:
|
||||||
|
return run.copy(
|
||||||
|
update={
|
||||||
|
"id": self._replace_uuid(run.id),
|
||||||
|
"parent_run_id": self.uuids_map[run.parent_run_id]
|
||||||
|
if run.parent_run_id
|
||||||
|
else None,
|
||||||
|
"child_runs": [self._copy_run(child) for child in run.child_runs],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
def _persist_run(self, run: Run) -> None:
|
def _persist_run(self, run: Run) -> None:
|
||||||
"""Persist a run."""
|
"""Persist a run."""
|
||||||
self.runs.append(run)
|
|
||||||
|
self.runs.append(self._copy_run(run))
|
||||||
|
|
||||||
|
|
||||||
class FakeRunnable(Runnable[str, int]):
|
class FakeRunnable(Runnable[str, int]):
|
||||||
@ -78,20 +100,6 @@ class FakeRetriever(BaseRetriever):
|
|||||||
return [Document(page_content="foo"), Document(page_content="bar")]
|
return [Document(page_content="foo"), Document(page_content="bar")]
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture()
|
|
||||||
def fixed_uuids(mocker: MockerFixture) -> MockerFixture._Patcher:
|
|
||||||
"""Note this mock only works with `import uuid; uuid.uuid4()`,
|
|
||||||
it does not work with `from uuid import uuid4; uuid4()`."""
|
|
||||||
|
|
||||||
# Disable tracing to avoid fixed UUIDs causing tracing errors.
|
|
||||||
mocker.patch.dict("os.environ", {"LANGCHAIN_TRACING_V2": "false"})
|
|
||||||
|
|
||||||
side_effect = (
|
|
||||||
UUID(f"00000000-0000-4000-8000-{i:012}", version=4) for i in range(10000)
|
|
||||||
)
|
|
||||||
return mocker.patch("uuid.uuid4", side_effect=side_effect)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_default_method_implementations(mocker: MockerFixture) -> None:
|
async def test_default_method_implementations(mocker: MockerFixture) -> None:
|
||||||
fake = FakeRunnable()
|
fake = FakeRunnable()
|
||||||
@ -206,13 +214,13 @@ async def test_prompt() -> None:
|
|||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@freeze_time("2023-01-01")
|
@freeze_time("2023-01-01")
|
||||||
async def test_prompt_with_chat_model(
|
async def test_prompt_with_chat_model(
|
||||||
mocker: MockerFixture, snapshot: SnapshotAssertion, fixed_uuids: None
|
mocker: MockerFixture, snapshot: SnapshotAssertion
|
||||||
) -> None:
|
) -> None:
|
||||||
prompt = (
|
prompt = (
|
||||||
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
|
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
|
||||||
+ "{question}"
|
+ "{question}"
|
||||||
)
|
)
|
||||||
chat = FakeListChatModel(responses=["foo", "bar"])
|
chat = FakeListChatModel(responses=["foo"])
|
||||||
|
|
||||||
chain = prompt | chat
|
chain = prompt | chat
|
||||||
|
|
||||||
@ -251,7 +259,7 @@ async def test_prompt_with_chat_model(
|
|||||||
],
|
],
|
||||||
dict(callbacks=[tracer]),
|
dict(callbacks=[tracer]),
|
||||||
) == [
|
) == [
|
||||||
AIMessage(content="bar"),
|
AIMessage(content="foo"),
|
||||||
AIMessage(content="foo"),
|
AIMessage(content="foo"),
|
||||||
]
|
]
|
||||||
assert prompt_spy.call_args.args[1] == [
|
assert prompt_spy.call_args.args[1] == [
|
||||||
@ -272,7 +280,16 @@ async def test_prompt_with_chat_model(
|
|||||||
]
|
]
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
assert tracer.runs == snapshot
|
assert (
|
||||||
|
len(
|
||||||
|
[
|
||||||
|
r
|
||||||
|
for r in tracer.runs
|
||||||
|
if r.parent_run_id is None and len(r.child_runs) == 2
|
||||||
|
]
|
||||||
|
)
|
||||||
|
== 2
|
||||||
|
), "Each of 2 outer runs contains exactly two inner runs (1 prompt, 1 chat)"
|
||||||
mocker.stop(prompt_spy)
|
mocker.stop(prompt_spy)
|
||||||
mocker.stop(chat_spy)
|
mocker.stop(chat_spy)
|
||||||
|
|
||||||
@ -282,7 +299,7 @@ async def test_prompt_with_chat_model(
|
|||||||
tracer = FakeTracer()
|
tracer = FakeTracer()
|
||||||
assert [
|
assert [
|
||||||
*chain.stream({"question": "What is your name?"}, dict(callbacks=[tracer]))
|
*chain.stream({"question": "What is your name?"}, dict(callbacks=[tracer]))
|
||||||
] == [AIMessage(content="bar")]
|
] == [AIMessage(content="foo")]
|
||||||
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
|
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
|
||||||
assert chat_spy.call_args.args[1] == ChatPromptValue(
|
assert chat_spy.call_args.args[1] == ChatPromptValue(
|
||||||
messages=[
|
messages=[
|
||||||
@ -295,7 +312,7 @@ async def test_prompt_with_chat_model(
|
|||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@freeze_time("2023-01-01")
|
@freeze_time("2023-01-01")
|
||||||
async def test_prompt_with_llm(
|
async def test_prompt_with_llm(
|
||||||
mocker: MockerFixture, snapshot: SnapshotAssertion, fixed_uuids: None
|
mocker: MockerFixture, snapshot: SnapshotAssertion
|
||||||
) -> None:
|
) -> None:
|
||||||
prompt = (
|
prompt = (
|
||||||
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
|
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
|
||||||
@ -386,7 +403,7 @@ async def test_prompt_with_llm(
|
|||||||
|
|
||||||
@freeze_time("2023-01-01")
|
@freeze_time("2023-01-01")
|
||||||
def test_prompt_with_chat_model_and_parser(
|
def test_prompt_with_chat_model_and_parser(
|
||||||
mocker: MockerFixture, snapshot: SnapshotAssertion, fixed_uuids: None
|
mocker: MockerFixture, snapshot: SnapshotAssertion
|
||||||
) -> None:
|
) -> None:
|
||||||
prompt = (
|
prompt = (
|
||||||
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
|
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
|
||||||
@ -424,7 +441,7 @@ def test_prompt_with_chat_model_and_parser(
|
|||||||
|
|
||||||
@freeze_time("2023-01-01")
|
@freeze_time("2023-01-01")
|
||||||
def test_seq_dict_prompt_llm(
|
def test_seq_dict_prompt_llm(
|
||||||
mocker: MockerFixture, snapshot: SnapshotAssertion, fixed_uuids: None
|
mocker: MockerFixture, snapshot: SnapshotAssertion
|
||||||
) -> None:
|
) -> None:
|
||||||
passthrough = mocker.Mock(side_effect=lambda x: x)
|
passthrough = mocker.Mock(side_effect=lambda x: x)
|
||||||
|
|
||||||
@ -487,13 +504,16 @@ What is your name?"""
|
|||||||
]
|
]
|
||||||
)
|
)
|
||||||
assert parser_spy.call_args.args[1] == AIMessage(content="foo, bar")
|
assert parser_spy.call_args.args[1] == AIMessage(content="foo, bar")
|
||||||
assert tracer.runs == snapshot
|
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
|
||||||
|
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
|
||||||
|
assert len(parent_run.child_runs) == 4
|
||||||
|
map_run = parent_run.child_runs[0]
|
||||||
|
assert map_run.name == "RunnableMap"
|
||||||
|
assert len(map_run.child_runs) == 3
|
||||||
|
|
||||||
|
|
||||||
@freeze_time("2023-01-01")
|
@freeze_time("2023-01-01")
|
||||||
def test_seq_prompt_dict(
|
def test_seq_prompt_dict(mocker: MockerFixture, snapshot: SnapshotAssertion) -> None:
|
||||||
mocker: MockerFixture, snapshot: SnapshotAssertion, fixed_uuids: None
|
|
||||||
) -> None:
|
|
||||||
passthrough = mocker.Mock(side_effect=lambda x: x)
|
passthrough = mocker.Mock(side_effect=lambda x: x)
|
||||||
|
|
||||||
prompt = (
|
prompt = (
|
||||||
@ -544,13 +564,16 @@ def test_seq_prompt_dict(
|
|||||||
HumanMessage(content="What is your name?"),
|
HumanMessage(content="What is your name?"),
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
assert tracer.runs == snapshot
|
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
|
||||||
|
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
|
||||||
|
assert len(parent_run.child_runs) == 3
|
||||||
|
map_run = parent_run.child_runs[2]
|
||||||
|
assert map_run.name == "RunnableMap"
|
||||||
|
assert len(map_run.child_runs) == 2
|
||||||
|
|
||||||
|
|
||||||
@freeze_time("2023-01-01")
|
@freeze_time("2023-01-01")
|
||||||
def test_seq_prompt_map(
|
def test_seq_prompt_map(mocker: MockerFixture, snapshot: SnapshotAssertion) -> None:
|
||||||
mocker: MockerFixture, snapshot: SnapshotAssertion, fixed_uuids: None
|
|
||||||
) -> None:
|
|
||||||
passthrough = mocker.Mock(side_effect=lambda x: x)
|
passthrough = mocker.Mock(side_effect=lambda x: x)
|
||||||
|
|
||||||
prompt = (
|
prompt = (
|
||||||
@ -608,7 +631,12 @@ def test_seq_prompt_map(
|
|||||||
HumanMessage(content="What is your name?"),
|
HumanMessage(content="What is your name?"),
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
assert tracer.runs == snapshot
|
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
|
||||||
|
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
|
||||||
|
assert len(parent_run.child_runs) == 3
|
||||||
|
map_run = parent_run.child_runs[2]
|
||||||
|
assert map_run.name == "RunnableMap"
|
||||||
|
assert len(map_run.child_runs) == 3
|
||||||
|
|
||||||
|
|
||||||
def test_bind_bind() -> None:
|
def test_bind_bind() -> None:
|
||||||
|
Loading…
Reference in New Issue
Block a user