From 824dbc49eef0b5c30a5f67c033d31d59f836c0ac Mon Sep 17 00:00:00 2001 From: Davide Menini <48685774+dmenini@users.noreply.github.com> Date: Thu, 28 Mar 2024 21:59:54 +0100 Subject: [PATCH] langchain[patch]: add template_tool_response arg to create_json_chat (#19696) In this small PR I added the `template_tool_response` arg to the `create_json_chat` function, so that users can customize this prompt in case of need. Thanks for your reviews! --------- Co-authored-by: taamedag --- libs/langchain/langchain/agents/json_chat/base.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/libs/langchain/langchain/agents/json_chat/base.py b/libs/langchain/langchain/agents/json_chat/base.py index 70228ce273..ce41f2d296 100644 --- a/libs/langchain/langchain/agents/json_chat/base.py +++ b/libs/langchain/langchain/agents/json_chat/base.py @@ -17,6 +17,7 @@ def create_json_chat_agent( prompt: ChatPromptTemplate, stop_sequence: Union[bool, List[str]] = True, tools_renderer: ToolsRenderer = render_text_description, + template_tool_response: str = TEMPLATE_TOOL_RESPONSE, ) -> Runnable: """Create an agent that uses JSON to format its logic, build for Chat Models. @@ -33,6 +34,8 @@ def create_json_chat_agent( does not support stop sequences. tools_renderer: This controls how the tools are converted into a string and then passed into the LLM. Default is `render_text_description`. + template_tool_response: Template prompt that uses the tool response (observation) + to make the LLM generate the next action to take. Returns: A Runnable sequence representing an agent. It takes as input all the same input @@ -157,6 +160,11 @@ def create_json_chat_agent( if missing_vars: raise ValueError(f"Prompt missing required variables: {missing_vars}") + if "{observation}" not in template_tool_response: + raise ValueError( + "Template tool response missing required variable 'observation'" + ) + prompt = prompt.partial( tools=tools_renderer(list(tools)), tool_names=", ".join([t.name for t in tools]), @@ -170,7 +178,7 @@ def create_json_chat_agent( agent = ( RunnablePassthrough.assign( agent_scratchpad=lambda x: format_log_to_messages( - x["intermediate_steps"], template_tool_response=TEMPLATE_TOOL_RESPONSE + x["intermediate_steps"], template_tool_response=template_tool_response ) ) | prompt