From 1f4abb265a9fd6c520835c3bebe8243b077495b5 Mon Sep 17 00:00:00 2001 From: Nathan Azrak <42650258+nathan-az@users.noreply.github.com> Date: Sun, 4 Jun 2023 07:59:09 +1000 Subject: [PATCH] Adds the option to pass the original prompt into the AgentExecutor for PlanAndExecute agents (#5401) # Adds the option to pass the original prompt into the AgentExecutor for PlanAndExecute agents This PR allows the user to optionally specify that they wish for the original prompt/objective to be passed into the Executor agent used by the PlanAndExecute agent. This solves a potential problem where the plan is formed referring to some context contained in the original prompt, but which is not included in the current prompt. Currently, the prompt format given to the Executor is: ``` System: Respond to the human as helpfully and accurately as possible. You have access to the following tools: Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. Thought: Human: ``` This PR changes the final part after `Human:` to optionally insert the objective: ``` Human: ``` I have given a specific example in #5400 where the context of a database path is lost, since the plan refers to the "given path". The PR has been linted and formatted. So that existing behaviour is not changed, I have defaulted the argument to `False` and added it as the last argument in the signature, so it does not cause issues for any users passing args positionally as opposed to using keywords. Happy to take any feedback or make required changes! Fixes #5400 ## Who can review? Community members can review the PR once tests pass. Tag maintainers/contributors who might be interested: @vowelparrot --------- Co-authored-by: Nathan Azrak --- .../plan_and_execute/agent_executor.py | 6 +++++- .../executors/agent_executor.py | 20 ++++++++++++++++--- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/langchain/experimental/plan_and_execute/agent_executor.py b/langchain/experimental/plan_and_execute/agent_executor.py index e268ca34..dccf9eb2 100644 --- a/langchain/experimental/plan_and_execute/agent_executor.py +++ b/langchain/experimental/plan_and_execute/agent_executor.py @@ -39,7 +39,11 @@ class PlanAndExecute(Chain): if run_manager: run_manager.on_text(str(plan), verbose=self.verbose) for step in plan.steps: - _new_inputs = {"previous_steps": self.step_container, "current_step": step} + _new_inputs = { + "previous_steps": self.step_container, + "current_step": step, + "objective": inputs[self.input_key], + } new_inputs = {**_new_inputs, **inputs} response = self.executor.step( new_inputs, diff --git a/langchain/experimental/plan_and_execute/executors/agent_executor.py b/langchain/experimental/plan_and_execute/executors/agent_executor.py index d3c21bec..201a25df 100644 --- a/langchain/experimental/plan_and_execute/executors/agent_executor.py +++ b/langchain/experimental/plan_and_execute/executors/agent_executor.py @@ -12,15 +12,29 @@ Current objective: {current_step} {agent_scratchpad}""" +TASK_PREFIX = """{objective} + +""" + def load_agent_executor( - llm: BaseLanguageModel, tools: List[BaseTool], verbose: bool = False + llm: BaseLanguageModel, + tools: List[BaseTool], + verbose: bool = False, + include_task_in_prompt: bool = False, ) -> ChainExecutor: + input_variables = ["previous_steps", "current_step", "agent_scratchpad"] + template = HUMAN_MESSAGE_TEMPLATE + + if include_task_in_prompt: + input_variables.append("objective") + template = TASK_PREFIX + template + agent = StructuredChatAgent.from_llm_and_tools( llm, tools, - human_message_template=HUMAN_MESSAGE_TEMPLATE, - input_variables=["previous_steps", "current_step", "agent_scratchpad"], + human_message_template=template, + input_variables=input_variables, ) agent_executor = AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, verbose=verbose