diff --git a/docs/extras/modules/agents/agent_types/openai_multi_functions_agent.ipynb b/docs/extras/modules/agents/agent_types/openai_multi_functions_agent.ipynb index a86aa3bcbc..372efa6785 100644 --- a/docs/extras/modules/agents/agent_types/openai_multi_functions_agent.ipynb +++ b/docs/extras/modules/agents/agent_types/openai_multi_functions_agent.ipynb @@ -16,7 +16,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "id": "c0a83623", "metadata": {}, "outputs": [], @@ -38,6 +38,18 @@ ">This initializes the SerpAPIWrapper for search functionality (search).\n" ] }, + { + "cell_type": "code", + "execution_count": 2, + "id": "a2b0a215", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "os.environ[\"SERPAPI_API_KEY\"] = \"897780527132b5f31d8d73c40c820d5ef2c2279687efa69f413a61f752027747\"" + ] + }, { "cell_type": "code", "execution_count": 3, @@ -199,20 +211,232 @@ ")" ] }, + { + "cell_type": "markdown", + "id": "d31d4c09", + "metadata": {}, + "source": [ + "## Configuring max iteration behavior\n", + "\n", + "To make sure that our agent doesn't get stuck in excessively long loops, we can set max_iterations. We can also set an early stopping method, which will determine our agent's behavior once the number of max iterations is hit. By default, the early stopping uses method `force` which just returns that constant string. Alternatively, you could specify method `generate` which then does one FINAL pass through the LLM to generate an output." + ] + }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "id": "9f5f6743", "metadata": {}, "outputs": [], + "source": [ + "mrkl = initialize_agent(\n", + " tools, \n", + " llm, \n", + " agent=AgentType.OPENAI_FUNCTIONS, \n", + " verbose=True, \n", + " max_iterations=2, \n", + " early_stopping_method=\"generate\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "4362ebc7", + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor] Entering Chain run with input:\n", + "\u001b[0m{\n", + " \"input\": \"What is the weather in NYC today, yesterday, and the day before?\"\n", + "}\n", + "\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 2:llm:ChatOpenAI] Entering LLM run with input:\n", + "\u001b[0m{\n", + " \"prompts\": [\n", + " \"System: You are a helpful AI assistant.\\nHuman: What is the weather in NYC today, yesterday, and the day before?\"\n", + " ]\n", + "}\n", + "\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 2:llm:ChatOpenAI] [1.27s] Exiting LLM run with output:\n", + "\u001b[0m{\n", + " \"generations\": [\n", + " [\n", + " {\n", + " \"text\": \"\",\n", + " \"generation_info\": null,\n", + " \"message\": {\n", + " \"lc\": 1,\n", + " \"type\": \"constructor\",\n", + " \"id\": [\n", + " \"langchain\",\n", + " \"schema\",\n", + " \"messages\",\n", + " \"AIMessage\"\n", + " ],\n", + " \"kwargs\": {\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"function_call\": {\n", + " \"name\": \"Search\",\n", + " \"arguments\": \"{\\n \\\"query\\\": \\\"weather in NYC today\\\"\\n}\"\n", + " }\n", + " }\n", + " }\n", + " }\n", + " }\n", + " ]\n", + " ],\n", + " \"llm_output\": {\n", + " \"token_usage\": {\n", + " \"prompt_tokens\": 79,\n", + " \"completion_tokens\": 17,\n", + " \"total_tokens\": 96\n", + " },\n", + " \"model_name\": \"gpt-3.5-turbo-0613\"\n", + " },\n", + " \"run\": null\n", + "}\n", + "\u001b[32;1m\u001b[1;3m[tool/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 3:tool:Search] Entering Tool run with input:\n", + "\u001b[0m\"{'query': 'weather in NYC today'}\"\n", + "\u001b[36;1m\u001b[1;3m[tool/end]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 3:tool:Search] [3.84s] Exiting Tool run with output:\n", + "\u001b[0m\"10:00 am · Feels Like85° · WindSE 4 mph · Humidity78% · UV Index3 of 11 · Cloud Cover81% · Rain Amount0 in ...\"\n", + "\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 4:llm:ChatOpenAI] Entering LLM run with input:\n", + "\u001b[0m{\n", + " \"prompts\": [\n", + " \"System: You are a helpful AI assistant.\\nHuman: What is the weather in NYC today, yesterday, and the day before?\\nAI: {'name': 'Search', 'arguments': '{\\\\n \\\"query\\\": \\\"weather in NYC today\\\"\\\\n}'}\\nFunction: 10:00 am · Feels Like85° · WindSE 4 mph · Humidity78% · UV Index3 of 11 · Cloud Cover81% · Rain Amount0 in ...\"\n", + " ]\n", + "}\n", + "\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 4:llm:ChatOpenAI] [1.24s] Exiting LLM run with output:\n", + "\u001b[0m{\n", + " \"generations\": [\n", + " [\n", + " {\n", + " \"text\": \"\",\n", + " \"generation_info\": null,\n", + " \"message\": {\n", + " \"lc\": 1,\n", + " \"type\": \"constructor\",\n", + " \"id\": [\n", + " \"langchain\",\n", + " \"schema\",\n", + " \"messages\",\n", + " \"AIMessage\"\n", + " ],\n", + " \"kwargs\": {\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"function_call\": {\n", + " \"name\": \"Search\",\n", + " \"arguments\": \"{\\n \\\"query\\\": \\\"weather in NYC yesterday\\\"\\n}\"\n", + " }\n", + " }\n", + " }\n", + " }\n", + " }\n", + " ]\n", + " ],\n", + " \"llm_output\": {\n", + " \"token_usage\": {\n", + " \"prompt_tokens\": 142,\n", + " \"completion_tokens\": 17,\n", + " \"total_tokens\": 159\n", + " },\n", + " \"model_name\": \"gpt-3.5-turbo-0613\"\n", + " },\n", + " \"run\": null\n", + "}\n", + "\u001b[32;1m\u001b[1;3m[tool/start]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 5:tool:Search] Entering Tool run with input:\n", + "\u001b[0m\"{'query': 'weather in NYC yesterday'}\"\n", + "\u001b[36;1m\u001b[1;3m[tool/end]\u001b[0m \u001b[1m[1:chain:AgentExecutor > 5:tool:Search] [1.15s] Exiting Tool run with output:\n", + "\u001b[0m\"New York Temperature Yesterday. Maximum temperature yesterday: 81 °F (at 1:51 pm) Minimum temperature yesterday: 72 °F (at 7:17 pm) Average temperature ...\"\n", + "\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[1:llm:ChatOpenAI] Entering LLM run with input:\n", + "\u001b[0m{\n", + " \"prompts\": [\n", + " \"System: You are a helpful AI assistant.\\nHuman: What is the weather in NYC today, yesterday, and the day before?\\nAI: {'name': 'Search', 'arguments': '{\\\\n \\\"query\\\": \\\"weather in NYC today\\\"\\\\n}'}\\nFunction: 10:00 am · Feels Like85° · WindSE 4 mph · Humidity78% · UV Index3 of 11 · Cloud Cover81% · Rain Amount0 in ...\\nAI: {'name': 'Search', 'arguments': '{\\\\n \\\"query\\\": \\\"weather in NYC yesterday\\\"\\\\n}'}\\nFunction: New York Temperature Yesterday. Maximum temperature yesterday: 81 °F (at 1:51 pm) Minimum temperature yesterday: 72 °F (at 7:17 pm) Average temperature ...\"\n", + " ]\n", + "}\n", + "\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[1:llm:ChatOpenAI] [2.68s] Exiting LLM run with output:\n", + "\u001b[0m{\n", + " \"generations\": [\n", + " [\n", + " {\n", + " \"text\": \"Today in NYC, the weather is currently 85°F with a southeast wind of 4 mph. The humidity is at 78% and there is 81% cloud cover. There is no rain expected today.\\n\\nYesterday in NYC, the maximum temperature was 81°F at 1:51 pm, and the minimum temperature was 72°F at 7:17 pm.\\n\\nFor the day before yesterday, I do not have the specific weather information.\",\n", + " \"generation_info\": null,\n", + " \"message\": {\n", + " \"lc\": 1,\n", + " \"type\": \"constructor\",\n", + " \"id\": [\n", + " \"langchain\",\n", + " \"schema\",\n", + " \"messages\",\n", + " \"AIMessage\"\n", + " ],\n", + " \"kwargs\": {\n", + " \"content\": \"Today in NYC, the weather is currently 85°F with a southeast wind of 4 mph. The humidity is at 78% and there is 81% cloud cover. There is no rain expected today.\\n\\nYesterday in NYC, the maximum temperature was 81°F at 1:51 pm, and the minimum temperature was 72°F at 7:17 pm.\\n\\nFor the day before yesterday, I do not have the specific weather information.\",\n", + " \"additional_kwargs\": {}\n", + " }\n", + " }\n", + " }\n", + " ]\n", + " ],\n", + " \"llm_output\": {\n", + " \"token_usage\": {\n", + " \"prompt_tokens\": 160,\n", + " \"completion_tokens\": 91,\n", + " \"total_tokens\": 251\n", + " },\n", + " \"model_name\": \"gpt-3.5-turbo-0613\"\n", + " },\n", + " \"run\": null\n", + "}\n", + "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:AgentExecutor] [10.18s] Exiting Chain run with output:\n", + "\u001b[0m{\n", + " \"output\": \"Today in NYC, the weather is currently 85°F with a southeast wind of 4 mph. The humidity is at 78% and there is 81% cloud cover. There is no rain expected today.\\n\\nYesterday in NYC, the maximum temperature was 81°F at 1:51 pm, and the minimum temperature was 72°F at 7:17 pm.\\n\\nFor the day before yesterday, I do not have the specific weather information.\"\n", + "}\n" + ] + }, + { + "data": { + "text/plain": [ + "'Today in NYC, the weather is currently 85°F with a southeast wind of 4 mph. The humidity is at 78% and there is 81% cloud cover. There is no rain expected today.\\n\\nYesterday in NYC, the maximum temperature was 81°F at 1:51 pm, and the minimum temperature was 72°F at 7:17 pm.\\n\\nFor the day before yesterday, I do not have the specific weather information.'" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "mrkl.run(\n", + " \"What is the weather in NYC today, yesterday, and the day before?\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "067a8d3e", + "metadata": {}, + "source": [ + "Notice that we never get around to looking up the weather the day before yesterday, due to hitting our max_iterations limit." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c3318a11", + "metadata": {}, + "outputs": [], "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "venv", "language": "python", - "name": "python3" + "name": "venv" }, "language_info": { "codemirror_mode": { @@ -224,7 +448,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.1" + "version": "3.11.3" } }, "nbformat": 4, diff --git a/langchain/agents/openai_functions_agent/base.py b/langchain/agents/openai_functions_agent/base.py index fcd1ca50f6..f3e2fc095c 100644 --- a/langchain/agents/openai_functions_agent/base.py +++ b/langchain/agents/openai_functions_agent/base.py @@ -189,6 +189,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent): self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks = None, + with_functions: bool = True, **kwargs: Any, ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. @@ -207,9 +208,17 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent): full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad) prompt = self.prompt.format_prompt(**full_inputs) messages = prompt.to_messages() - predicted_message = self.llm.predict_messages( - messages, functions=self.functions, callbacks=callbacks - ) + if with_functions: + predicted_message = self.llm.predict_messages( + messages, + functions=self.functions, + callbacks=callbacks, + ) + else: + predicted_message = self.llm.predict_messages( + messages, + callbacks=callbacks, + ) agent_decision = _parse_ai_message(predicted_message) return agent_decision @@ -242,6 +251,35 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent): agent_decision = _parse_ai_message(predicted_message) return agent_decision + def return_stopped_response( + self, + early_stopping_method: str, + intermediate_steps: List[Tuple[AgentAction, str]], + **kwargs: Any, + ) -> AgentFinish: + """Return response when agent has been stopped due to max iterations.""" + if early_stopping_method == "force": + # `force` just returns a constant string + return AgentFinish( + {"output": "Agent stopped due to iteration limit or time limit."}, "" + ) + elif early_stopping_method == "generate": + # Generate does one final forward pass + agent_decision = self.plan( + intermediate_steps, with_functions=False, **kwargs + ) + if type(agent_decision) == AgentFinish: + return agent_decision + else: + raise ValueError( + f"got AgentAction with no functions provided: {agent_decision}" + ) + else: + raise ValueError( + "early_stopping_method should be one of `force` or `generate`, " + f"got {early_stopping_method}" + ) + @classmethod def create_prompt( cls,