forked from Archives/langchain
Adds the option to pass the original prompt into the AgentExecutor for PlanAndExecute agents (#5401)
# Adds the option to pass the original prompt into the AgentExecutor for PlanAndExecute agents This PR allows the user to optionally specify that they wish for the original prompt/objective to be passed into the Executor agent used by the PlanAndExecute agent. This solves a potential problem where the plan is formed referring to some context contained in the original prompt, but which is not included in the current prompt. Currently, the prompt format given to the Executor is: ``` System: Respond to the human as helpfully and accurately as possible. You have access to the following tools: <Tool and Action Description> <Output Format Description> Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. Thought: Human: <Previous steps> <Current step> ``` This PR changes the final part after `Human:` to optionally insert the objective: ``` Human: <objective> <Previous steps> <Current step> ``` I have given a specific example in #5400 where the context of a database path is lost, since the plan refers to the "given path". The PR has been linted and formatted. So that existing behaviour is not changed, I have defaulted the argument to `False` and added it as the last argument in the signature, so it does not cause issues for any users passing args positionally as opposed to using keywords. Happy to take any feedback or make required changes! Fixes #5400 ## Who can review? Community members can review the PR once tests pass. Tag maintainers/contributors who might be interested: @vowelparrot --------- Co-authored-by: Nathan Azrak <nathan.azrak@gmail.com>
This commit is contained in:
parent
ae2cf1f598
commit
1f4abb265a
@ -39,7 +39,11 @@ class PlanAndExecute(Chain):
|
|||||||
if run_manager:
|
if run_manager:
|
||||||
run_manager.on_text(str(plan), verbose=self.verbose)
|
run_manager.on_text(str(plan), verbose=self.verbose)
|
||||||
for step in plan.steps:
|
for step in plan.steps:
|
||||||
_new_inputs = {"previous_steps": self.step_container, "current_step": step}
|
_new_inputs = {
|
||||||
|
"previous_steps": self.step_container,
|
||||||
|
"current_step": step,
|
||||||
|
"objective": inputs[self.input_key],
|
||||||
|
}
|
||||||
new_inputs = {**_new_inputs, **inputs}
|
new_inputs = {**_new_inputs, **inputs}
|
||||||
response = self.executor.step(
|
response = self.executor.step(
|
||||||
new_inputs,
|
new_inputs,
|
||||||
|
@ -12,15 +12,29 @@ Current objective: {current_step}
|
|||||||
|
|
||||||
{agent_scratchpad}"""
|
{agent_scratchpad}"""
|
||||||
|
|
||||||
|
TASK_PREFIX = """{objective}
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
def load_agent_executor(
|
def load_agent_executor(
|
||||||
llm: BaseLanguageModel, tools: List[BaseTool], verbose: bool = False
|
llm: BaseLanguageModel,
|
||||||
|
tools: List[BaseTool],
|
||||||
|
verbose: bool = False,
|
||||||
|
include_task_in_prompt: bool = False,
|
||||||
) -> ChainExecutor:
|
) -> ChainExecutor:
|
||||||
|
input_variables = ["previous_steps", "current_step", "agent_scratchpad"]
|
||||||
|
template = HUMAN_MESSAGE_TEMPLATE
|
||||||
|
|
||||||
|
if include_task_in_prompt:
|
||||||
|
input_variables.append("objective")
|
||||||
|
template = TASK_PREFIX + template
|
||||||
|
|
||||||
agent = StructuredChatAgent.from_llm_and_tools(
|
agent = StructuredChatAgent.from_llm_and_tools(
|
||||||
llm,
|
llm,
|
||||||
tools,
|
tools,
|
||||||
human_message_template=HUMAN_MESSAGE_TEMPLATE,
|
human_message_template=template,
|
||||||
input_variables=["previous_steps", "current_step", "agent_scratchpad"],
|
input_variables=input_variables,
|
||||||
)
|
)
|
||||||
agent_executor = AgentExecutor.from_agent_and_tools(
|
agent_executor = AgentExecutor.from_agent_and_tools(
|
||||||
agent=agent, tools=tools, verbose=verbose
|
agent=agent, tools=tools, verbose=verbose
|
||||||
|
Loading…
Reference in New Issue
Block a user