diff --git a/docs/docs/modules/agents/how_to/structured_tools.ipynb b/docs/docs/modules/agents/how_to/structured_tools.ipynb new file mode 100644 index 0000000000..ee9293e826 --- /dev/null +++ b/docs/docs/modules/agents/how_to/structured_tools.ipynb @@ -0,0 +1,142 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "473081cc", + "metadata": {}, + "source": [ + "---\n", + "sidebar_position: 1\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "16ee4216", + "metadata": {}, + "source": [ + "# Structured Tools" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "670078c4", + "metadata": {}, + "outputs": [], + "source": [ + "from typing import List\n", + "\n", + "from langchain_core.tools import tool\n", + "\n", + "\n", + "@tool\n", + "def get_data(n: int) -> List[dict]:\n", + " \"\"\"Get n datapoints.\"\"\"\n", + " return [{\"name\": \"foo\", \"value\": \"bar\"}] * n\n", + "\n", + "\n", + "tools = [get_data]" + ] + }, + { + "cell_type": "markdown", + "id": "5e04164b", + "metadata": {}, + "source": [ + "We will use a prompt from the hub - you can inspect the prompt more at [https://smith.langchain.com/hub/hwchase17/openai-functions-agent](https://smith.langchain.com/hub/hwchase17/openai-functions-agent)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "d8c5d907", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain import hub\n", + "from langchain.agents import AgentExecutor, create_openai_functions_agent\n", + "from langchain_openai import ChatOpenAI\n", + "\n", + "# Get the prompt to use - you can modify this!\n", + "# If you want to see the prompt in full, you can at: https://smith.langchain.com/hub/hwchase17/openai-functions-agent\n", + "prompt = hub.pull(\"hwchase17/openai-functions-agent\")\n", + "\n", + "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n", + "\n", + "agent = create_openai_functions_agent(llm, tools, prompt)\n", + "agent_executor = AgentExecutor(agent=agent, tools=tools)" + ] + }, + { + "cell_type": "markdown", + "id": "cba9a9eb", + "metadata": {}, + "source": [ + "## Stream intermediate steps\n", + "\n", + "Let's look at how to stream intermediate steps. We can do this easily by just using the `.stream` method on the AgentExecutor\n", + "\n", + "We can then parse the results to get actions (tool inputs) and observtions (tool outputs)." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "b6bd9bf2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Calling Tool ```get_data``` with input ```{'n': 3}```\n", + "Got result: ```[{'name': 'foo', 'value': 'bar'}, {'name': 'foo', 'value': 'bar'}, {'name': 'foo', 'value': 'bar'}]```\n" + ] + } + ], + "source": [ + "for chunk in agent_executor.stream({\"input\": \"get me three datapoints\"}):\n", + " # Agent Action\n", + " if \"actions\" in chunk:\n", + " for action in chunk[\"actions\"]:\n", + " print(\n", + " f\"Calling Tool ```{action.tool}``` with input ```{action.tool_input}```\"\n", + " )\n", + " # Observation\n", + " elif \"steps\" in chunk:\n", + " for step in chunk[\"steps\"]:\n", + " print(f\"Got result: ```{step.observation}```\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "af9e32fe", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}