From 47a685adcf282102d562baeb964d2d1c3d9d6212 Mon Sep 17 00:00:00 2001 From: Jef Packer Date: Mon, 1 May 2023 09:21:42 -0700 Subject: [PATCH] count tokens instead of chars in autogpt prompt (#3841) This looks like a bug. Overall by using len instead of token_counter the prompt thinks it has less context window than it actually does. Because of this it adds fewer messages. The reduced previous message context makes the agent repetitive when selecting tasks. --- langchain/experimental/autonomous_agents/autogpt/prompt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/langchain/experimental/autonomous_agents/autogpt/prompt.py b/langchain/experimental/autonomous_agents/autogpt/prompt.py index 95647b7c..909e9b0c 100644 --- a/langchain/experimental/autonomous_agents/autogpt/prompt.py +++ b/langchain/experimental/autonomous_agents/autogpt/prompt.py @@ -63,7 +63,7 @@ class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel): f"from your past:\n{relevant_memory}\n\n" ) memory_message = SystemMessage(content=content_format) - used_tokens += len(memory_message.content) + used_tokens += self.token_counter(memory_message.content) historical_messages: List[BaseMessage] = [] for message in previous_messages[-10:][::-1]: message_tokens = self.token_counter(message.content)