diff --git a/docs/extras/expression_language/cookbook/agent.ipynb b/docs/extras/expression_language/cookbook/agent.ipynb
new file mode 100644
index 0000000000..5be6b9d4d1
--- /dev/null
+++ b/docs/extras/expression_language/cookbook/agent.ipynb
@@ -0,0 +1,203 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "e89f490d",
+ "metadata": {},
+ "source": [
+ "# Agents\n",
+ "\n",
+ "You can pass a Runnable into an agent."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "af4381de",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain.agents import XMLAgent, tool, AgentExecutor\n",
+ "from langchain.chat_models import ChatAnthropic"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "24cc8134",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "model = ChatAnthropic(model=\"claude-2\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "67c0b0e4",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "@tool\n",
+ "def search(query: str) -> str:\n",
+ " \"\"\"Search things about current events.\"\"\"\n",
+ " return \"32 degrees\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "7203b101",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "tool_list = [search]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "b68e756d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Get prompt to use\n",
+ "prompt = XMLAgent.get_default_prompt()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "61ab3e9a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Logic for going from intermediate steps to a string to pass into model\n",
+ "# This is pretty tied to the prompt\n",
+ "def convert_intermediate_steps(intermediate_steps):\n",
+ " log = \"\"\n",
+ " for action, observation in intermediate_steps:\n",
+ " log += (\n",
+ " f\"{action.tool}{action.tool_input}\"\n",
+ " f\"{observation}\"\n",
+ " )\n",
+ " return log\n",
+ "\n",
+ "\n",
+ "# Logic for converting tools to string to go in prompt\n",
+ "def convert_tools(tools):\n",
+ " return \"\\n\".join([f\"{tool.name}: {tool.description}\" for tool in tools])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "260f5988",
+ "metadata": {},
+ "source": [
+ "Building an agent from a runnable usually involves a few things:\n",
+ "\n",
+ "1. Data processing for the intermediate steps. These need to represented in a way that the language model can recognize them. This should be pretty tightly coupled to the instructions in the prompt\n",
+ "\n",
+ "2. The prompt itself\n",
+ "\n",
+ "3. The model, complete with stop tokens if needed\n",
+ "\n",
+ "4. The output parser - should be in sync with how the prompt specifies things to be formatted."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "id": "e92f1d6f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "agent = (\n",
+ " {\n",
+ " \"question\": lambda x: x[\"question\"],\n",
+ " \"intermediate_steps\": lambda x: convert_intermediate_steps(x[\"intermediate_steps\"])\n",
+ " }\n",
+ " | prompt.partial(tools=convert_tools(tool_list))\n",
+ " | model.bind(stop=[\"\", \"\"])\n",
+ " | XMLAgent.get_default_output_parser()\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "id": "6ce6ec7a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "agent_executor = AgentExecutor(agent=agent, tools=tool_list, verbose=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "fb5cb2e3",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\n",
+ "\n",
+ "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
+ "\u001b[32;1m\u001b[1;3m search\n",
+ "weather in new york\u001b[0m\u001b[36;1m\u001b[1;3m32 degrees\u001b[0m\u001b[32;1m\u001b[1;3m\n",
+ "\n",
+ "The weather in New York is 32 degrees\u001b[0m\n",
+ "\n",
+ "\u001b[1m> Finished chain.\u001b[0m\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "{'question': 'whats the weather in New york?',\n",
+ " 'output': 'The weather in New York is 32 degrees'}"
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "agent_executor.invoke({\"question\": \"whats the weather in New york?\"})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "bce86dd8",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.1"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/extras/modules/agents/agent_types/xml_agent.ipynb b/docs/extras/modules/agents/agent_types/xml_agent.ipynb
index ed183d0467..251c94c171 100644
--- a/docs/extras/modules/agents/agent_types/xml_agent.ipynb
+++ b/docs/extras/modules/agents/agent_types/xml_agent.ipynb
@@ -141,7 +141,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.9.1"
+ "version": "3.10.1"
}
},
"nbformat": 4,
diff --git a/libs/langchain/langchain/agents/agent.py b/libs/langchain/langchain/agents/agent.py
index bc266d7c1e..2912cc57fd 100644
--- a/libs/langchain/langchain/agents/agent.py
+++ b/libs/langchain/langchain/agents/agent.py
@@ -7,7 +7,16 @@ import logging
import time
from abc import abstractmethod
from pathlib import Path
-from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+)
import yaml
@@ -36,6 +45,7 @@ from langchain.schema import (
)
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import BaseMessage
+from langchain.schema.runnable import Runnable
from langchain.tools.base import BaseTool
from langchain.utilities.asyncio import asyncio_timeout
from langchain.utils.input import get_color_mapping
@@ -307,6 +317,71 @@ class AgentOutputParser(BaseOutputParser):
"""Parse text into agent action/finish."""
+class RunnableAgent(BaseSingleActionAgent):
+ """Agent powered by runnables."""
+
+ runnable: Runnable[dict, Union[AgentAction, AgentFinish]]
+ """Runnable to call to get agent action."""
+ _input_keys: List[str] = []
+ """Input keys."""
+
+ class Config:
+ """Configuration for this pydantic object."""
+
+ arbitrary_types_allowed = True
+
+ @property
+ def input_keys(self) -> List[str]:
+ """Return the input keys.
+
+ Returns:
+ List of input keys.
+ """
+ return self._input_keys
+
+ def plan(
+ self,
+ intermediate_steps: List[Tuple[AgentAction, str]],
+ callbacks: Callbacks = None,
+ **kwargs: Any,
+ ) -> Union[AgentAction, AgentFinish]:
+ """Given input, decided what to do.
+
+ Args:
+ intermediate_steps: Steps the LLM has taken to date,
+ along with the observations.
+ callbacks: Callbacks to run.
+ **kwargs: User inputs.
+
+ Returns:
+ Action specifying what tool to use.
+ """
+ inputs = {**kwargs, **{"intermediate_steps": intermediate_steps}}
+ output = self.runnable.invoke(inputs, config={"callbacks": callbacks})
+ return output
+
+ async def aplan(
+ self,
+ intermediate_steps: List[Tuple[AgentAction, str]],
+ callbacks: Callbacks = None,
+ **kwargs: Any,
+ ) -> Union[AgentAction, AgentFinish]:
+ """Given input, decided what to do.
+
+ Args:
+ intermediate_steps: Steps the LLM has taken to date,
+ along with observations
+ callbacks: Callbacks to run.
+ **kwargs: User inputs.
+
+ Returns:
+ Action specifying what tool to use.
+ """
+ inputs = {**kwargs, **{"intermediate_steps": intermediate_steps}}
+ output = await self.runnable.ainvoke(inputs, config={"callbacks": callbacks})
+ return output
+
+
class LLMSingleActionAgent(BaseSingleActionAgent):
"""Base class for single action agents."""
@@ -725,6 +800,14 @@ s
)
return values
+ @root_validator(pre=True)
+ def validate_runnable_agent(cls, values: Dict) -> Dict:
+ """Convert runnable to agent if passed in."""
+ agent = values["agent"]
+ if isinstance(agent, Runnable):
+ values["agent"] = RunnableAgent(runnable=agent)
+ return values
+
def save(self, file_path: Union[Path, str]) -> None:
"""Raise error - saving not supported for Agent Executors."""
raise ValueError(