langchain[patch]: add template_tool_response arg to create_json_chat (#19696)

In this small PR I added the `template_tool_response` arg to the
`create_json_chat` function, so that users can customize this prompt in
case of need.
Thanks for your reviews!

---------

Co-authored-by: taamedag <Davide.Menini@swisscom.com>
pull/19737/head
Davide Menini 5 months ago committed by GitHub
parent 688ca48019
commit 824dbc49ee
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -17,6 +17,7 @@ def create_json_chat_agent(
prompt: ChatPromptTemplate,
stop_sequence: Union[bool, List[str]] = True,
tools_renderer: ToolsRenderer = render_text_description,
template_tool_response: str = TEMPLATE_TOOL_RESPONSE,
) -> Runnable:
"""Create an agent that uses JSON to format its logic, build for Chat Models.
@ -33,6 +34,8 @@ def create_json_chat_agent(
does not support stop sequences.
tools_renderer: This controls how the tools are converted into a string and
then passed into the LLM. Default is `render_text_description`.
template_tool_response: Template prompt that uses the tool response (observation)
to make the LLM generate the next action to take.
Returns:
A Runnable sequence representing an agent. It takes as input all the same input
@ -157,6 +160,11 @@ def create_json_chat_agent(
if missing_vars:
raise ValueError(f"Prompt missing required variables: {missing_vars}")
if "{observation}" not in template_tool_response:
raise ValueError(
"Template tool response missing required variable 'observation'"
)
prompt = prompt.partial(
tools=tools_renderer(list(tools)),
tool_names=", ".join([t.name for t in tools]),
@ -170,7 +178,7 @@ def create_json_chat_agent(
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_log_to_messages(
x["intermediate_steps"], template_tool_response=TEMPLATE_TOOL_RESPONSE
x["intermediate_steps"], template_tool_response=template_tool_response
)
)
| prompt

Loading…
Cancel
Save