docs: Update trim message usage in migrating_memory (#26722)

Make sure we don't end up with a ToolMessage that precedes an AIMessage
This commit is contained in:
Eugene Yurtsev 2024-09-20 16:20:27 -04:00 committed by GitHub
parent 91f4711e53
commit 8a9f7091c0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -54,12 +54,12 @@
"outputs": [],
"source": [
"%%capture --no-stderr\n",
"%pip install --upgrade --quiet langchain-openai langchain langchain-community"
"%pip install --upgrade --quiet langchain-openai langchain"
]
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"id": "7127478f-4413-48be-bfec-d0cd91b8cf70",
"metadata": {},
"outputs": [],
@ -83,7 +83,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 3,
"id": "371616e1-ca41-4a57-99e0-5fbf7d63f2ad",
"metadata": {},
"outputs": [
@ -91,7 +91,8 @@
"name": "stdout",
"output_type": "stream",
"text": [
"{'text': 'Nice to meet you, Bob! How can I assist you today?', 'chat_history': []}\n"
"{'text': 'Nice to meet you, Bob! How can I assist you today?', 'chat_history': []}\n",
"{'text': 'Your name is Bob. How can I assist you further, Bob?', 'chat_history': [HumanMessage(content='my name is bob', additional_kwargs={}, response_metadata={}), AIMessage(content='Nice to meet you, Bob! How can I assist you today?', additional_kwargs={}, response_metadata={})]}\n"
]
}
],
@ -129,7 +130,8 @@
"legacy_result = legacy_chain.invoke({\"text\": \"my name is bob\"})\n",
"print(legacy_result)\n",
"\n",
"legacy_result = legacy_chain.invoke({\"text\": \"what was my name\"})"
"legacy_result = legacy_chain.invoke({\"text\": \"what was my name\"})\n",
"print(legacy_result)"
]
},
{
@ -150,7 +152,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 4,
"id": "0a92b3f3-0315-46ac-bb28-d07398dd23ea",
"metadata": {},
"outputs": [],
@ -184,7 +186,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 5,
"id": "e7ddf8dc-ea27-43e2-8800-9f7c1d4abdc1",
"metadata": {},
"outputs": [
@ -220,6 +222,12 @@
" token_counter=len, # <-- len will simply count the number of messages rather than tokens\n",
" max_tokens=5, # <-- allow up to 5 messages.\n",
" strategy=\"last\",\n",
" # The start_on is specified\n",
" # to make sure we do not generate a sequence where\n",
" # a ToolMessage that contains the result of a tool invocation\n",
" # appears before the AIMessage that requested a tool invocation\n",
" # as this will cause some chat models to raise an error.\n",
" start_on=(\"human\", \"ai\"),\n",
" include_system=True, # <-- Keep the system message\n",
" allow_partial=False,\n",
")\n",
@ -240,7 +248,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 6,
"id": "6442f74b-2c36-48fd-a3d1-c7c5d18c050f",
"metadata": {},
"outputs": [
@ -271,6 +279,12 @@
" # Please see API reference for trim_messages for other ways to specify a token counter.\n",
" token_counter=ChatOpenAI(model=\"gpt-4o\"),\n",
" max_tokens=80, # <-- token limit\n",
" # The start_on is specified\n",
" # to make sure we do not generate a sequence where\n",
" # a ToolMessage that contains the result of a tool invocation\n",
" # appears before the AIMessage that requested a tool invocation\n",
" # as this will cause some chat models to raise an error.\n",
" start_on=(\"human\", \"ai\"),\n",
" strategy=\"last\",\n",
" include_system=True, # <-- Keep the system message\n",
")\n",
@ -301,7 +315,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 7,
"id": "7d6f79a3-fda7-48fd-9128-bbe4aad84199",
"metadata": {},
"outputs": [
@ -320,7 +334,7 @@
"what was my name?\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"\n",
"Your name is Bob. How can I assist you today, Bob?\n"
"Your name is Bob. How can I help you, Bob?\n"
]
}
],
@ -347,6 +361,12 @@
" token_counter=len, # <-- len will simply count the number of messages rather than tokens\n",
" max_tokens=5, # <-- allow up to 5 messages.\n",
" strategy=\"last\",\n",
" # The start_on is specified\n",
" # to make sure we do not generate a sequence where\n",
" # a ToolMessage that contains the result of a tool invocation\n",
" # appears before the AIMessage that requested a tool invocation\n",
" # as this will cause some chat models to raise an error.\n",
" start_on=(\"human\", \"ai\"),\n",
" include_system=True, # <-- Keep the system message\n",
" allow_partial=False,\n",
" )\n",
@ -410,7 +430,7 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 8,
"id": "f671db87-8f01-453e-81fd-4e603140a512",
"metadata": {},
"outputs": [
@ -423,8 +443,8 @@
"hi! I'm bob. What is my age?\n",
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"Tool Calls:\n",
" get_user_age (call_8bCAm5B4H6WzebGzMnUj2xmN)\n",
" Call ID: call_8bCAm5B4H6WzebGzMnUj2xmN\n",
" get_user_age (call_jsMvoIFv970DhqqLCJDzPKsp)\n",
" Call ID: call_jsMvoIFv970DhqqLCJDzPKsp\n",
" Args:\n",
" name: bob\n",
"=================================\u001b[1m Tool Message \u001b[0m=================================\n",
@ -481,6 +501,12 @@
" token_counter=len, # <-- len will simply count the number of messages rather than tokens\n",
" max_tokens=5, # <-- allow up to 5 messages.\n",
" strategy=\"last\",\n",
" # The start_on is specified\n",
" # to make sure we do not generate a sequence where\n",
" # a ToolMessage that contains the result of a tool invocation\n",
" # appears before the AIMessage that requested a tool invocation\n",
" # as this will cause some chat models to raise an error.\n",
" start_on=(\"human\", \"ai\"),\n",
" include_system=True, # <-- Keep the system message\n",
" allow_partial=False,\n",
" )\n",
@ -547,7 +573,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 9,
"id": "072046bb-3892-4206-8ae5-025e93110dcc",
"metadata": {},
"outputs": [
@ -557,8 +583,8 @@
"text": [
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"Tool Calls:\n",
" what_did_the_cow_say (call_M4OGGCfbN2EF5yGBjdM7qNiz)\n",
" Call ID: call_M4OGGCfbN2EF5yGBjdM7qNiz\n",
" what_did_the_cow_say (call_urHTB5CShhcKz37QiVzNBlIS)\n",
" Call ID: call_urHTB5CShhcKz37QiVzNBlIS\n",
" Args:\n"
]
}
@ -588,6 +614,12 @@
" token_counter=len, # <-- len will simply count the number of messages rather than tokens\n",
" max_tokens=5, # <-- allow up to 5 messages.\n",
" strategy=\"last\",\n",
" # The start_on is specified\n",
" # to make sure we do not generate a sequence where\n",
" # a ToolMessage that contains the result of a tool invocation\n",
" # appears before the AIMessage that requested a tool invocation\n",
" # as this will cause some chat models to raise an error.\n",
" start_on=(\"human\", \"ai\"),\n",
" include_system=True, # <-- Keep the system message\n",
" allow_partial=False,\n",
")\n",