docs: Fix trim_messages invocations in the memory migration guide (#26902)

Should only be start_on="human", not start_on=("human", "ai")
This commit is contained in:
Eugene Yurtsev 2024-09-26 13:02:30 -04:00 committed by GitHub
parent 7a99a4d4f8
commit 2a0d9d05fb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -222,13 +222,15 @@
" token_counter=len, # <-- len will simply count the number of messages rather than tokens\n",
" max_tokens=5, # <-- allow up to 5 messages.\n",
" strategy=\"last\",\n",
" # The start_on is specified\n",
" # to make sure we do not generate a sequence where\n",
" # a ToolMessage that contains the result of a tool invocation\n",
" # appears before the AIMessage that requested a tool invocation\n",
" # as this will cause some chat models to raise an error.\n",
" start_on=(\"human\", \"ai\"),\n",
" include_system=True, # <-- Keep the system message\n",
" # Most chat models expect that chat history starts with either:\n",
" # (1) a HumanMessage or\n",
" # (2) a SystemMessage followed by a HumanMessage\n",
" # start_on=\"human\" makes sure we produce a valid chat history\n",
" start_on=\"human\",\n",
" # Usually, we want to keep the SystemMessage\n",
" # if it's present in the original history.\n",
" # The SystemMessage has special instructions for the model.\n",
" include_system=True,\n",
" allow_partial=False,\n",
")\n",
"\n",
@ -280,13 +282,16 @@
" token_counter=ChatOpenAI(model=\"gpt-4o\"),\n",
" max_tokens=80, # <-- token limit\n",
" # The start_on is specified\n",
" # to make sure we do not generate a sequence where\n",
" # a ToolMessage that contains the result of a tool invocation\n",
" # appears before the AIMessage that requested a tool invocation\n",
" # as this will cause some chat models to raise an error.\n",
" start_on=(\"human\", \"ai\"),\n",
" # Most chat models expect that chat history starts with either:\n",
" # (1) a HumanMessage or\n",
" # (2) a SystemMessage followed by a HumanMessage\n",
" # start_on=\"human\" makes sure we produce a valid chat history\n",
" start_on=\"human\",\n",
" # Usually, we want to keep the SystemMessage\n",
" # if it's present in the original history.\n",
" # The SystemMessage has special instructions for the model.\n",
" include_system=True,\n",
" strategy=\"last\",\n",
" include_system=True, # <-- Keep the system message\n",
")\n",
"\n",
"for msg in selected_messages:\n",
@ -361,13 +366,15 @@
" token_counter=len, # <-- len will simply count the number of messages rather than tokens\n",
" max_tokens=5, # <-- allow up to 5 messages.\n",
" strategy=\"last\",\n",
" # The start_on is specified\n",
" # to make sure we do not generate a sequence where\n",
" # a ToolMessage that contains the result of a tool invocation\n",
" # appears before the AIMessage that requested a tool invocation\n",
" # as this will cause some chat models to raise an error.\n",
" start_on=(\"human\", \"ai\"),\n",
" include_system=True, # <-- Keep the system message\n",
" # Most chat models expect that chat history starts with either:\n",
" # (1) a HumanMessage or\n",
" # (2) a SystemMessage followed by a HumanMessage\n",
" # start_on=\"human\" makes sure we produce a valid chat history\n",
" start_on=\"human\",\n",
" # Usually, we want to keep the SystemMessage\n",
" # if it's present in the original history.\n",
" # The SystemMessage has special instructions for the model.\n",
" include_system=True,\n",
" allow_partial=False,\n",
" )\n",
"\n",
@ -501,13 +508,15 @@
" token_counter=len, # <-- len will simply count the number of messages rather than tokens\n",
" max_tokens=5, # <-- allow up to 5 messages.\n",
" strategy=\"last\",\n",
" # The start_on is specified\n",
" # to make sure we do not generate a sequence where\n",
" # a ToolMessage that contains the result of a tool invocation\n",
" # appears before the AIMessage that requested a tool invocation\n",
" # as this will cause some chat models to raise an error.\n",
" start_on=(\"human\", \"ai\"),\n",
" include_system=True, # <-- Keep the system message\n",
" # Most chat models expect that chat history starts with either:\n",
" # (1) a HumanMessage or\n",
" # (2) a SystemMessage followed by a HumanMessage\n",
" # start_on=\"human\" makes sure we produce a valid chat history\n",
" start_on=\"human\",\n",
" # Usually, we want to keep the SystemMessage\n",
" # if it's present in the original history.\n",
" # The SystemMessage has special instructions for the model.\n",
" include_system=True,\n",
" allow_partial=False,\n",
" )\n",
"\n",