diff --git a/libs/langchain/langchain/agents/json_chat/base.py b/libs/langchain/langchain/agents/json_chat/base.py index 70228ce273..ce41f2d296 100644 --- a/libs/langchain/langchain/agents/json_chat/base.py +++ b/libs/langchain/langchain/agents/json_chat/base.py @@ -17,6 +17,7 @@ def create_json_chat_agent( prompt: ChatPromptTemplate, stop_sequence: Union[bool, List[str]] = True, tools_renderer: ToolsRenderer = render_text_description, + template_tool_response: str = TEMPLATE_TOOL_RESPONSE, ) -> Runnable: """Create an agent that uses JSON to format its logic, build for Chat Models. @@ -33,6 +34,8 @@ def create_json_chat_agent( does not support stop sequences. tools_renderer: This controls how the tools are converted into a string and then passed into the LLM. Default is `render_text_description`. + template_tool_response: Template prompt that uses the tool response (observation) + to make the LLM generate the next action to take. Returns: A Runnable sequence representing an agent. It takes as input all the same input @@ -157,6 +160,11 @@ def create_json_chat_agent( if missing_vars: raise ValueError(f"Prompt missing required variables: {missing_vars}") + if "{observation}" not in template_tool_response: + raise ValueError( + "Template tool response missing required variable 'observation'" + ) + prompt = prompt.partial( tools=tools_renderer(list(tools)), tool_names=", ".join([t.name for t in tools]), @@ -170,7 +178,7 @@ def create_json_chat_agent( agent = ( RunnablePassthrough.assign( agent_scratchpad=lambda x: format_log_to_messages( - x["intermediate_steps"], template_tool_response=TEMPLATE_TOOL_RESPONSE + x["intermediate_steps"], template_tool_response=template_tool_response ) ) | prompt