From 42d725223ea3765a7699e19d46a6e0c70b4baa79 Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Mon, 27 Mar 2023 08:16:32 -0700 Subject: [PATCH] Harrison/num token calculation (#2041) Co-authored-by: Aratako <127325395+Aratako@users.noreply.github.com> --- langchain/chat_models/openai.py | 54 ++++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 17 deletions(-) diff --git a/langchain/chat_models/openai.py b/langchain/chat_models/openai.py index 900e89ff88..df717211e6 100644 --- a/langchain/chat_models/openai.py +++ b/langchain/chat_models/openai.py @@ -335,10 +335,11 @@ class ChatOpenAI(BaseChatModel, BaseModel): # calculate the number of tokens in the encoded text return len(tokenized_text) - def get_num_tokens_from_messages( - self, messages: List[BaseMessage], model: str = "gpt-3.5-turbo-0301" - ) -> int: - """Calculate num tokens for gpt-3.5-turbo with tiktoken package.""" + def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: + """Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package. + + Official documentation: https://github.com/openai/openai-cookbook/blob/ + main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb""" try: import tiktoken except ImportError: @@ -348,23 +349,31 @@ class ChatOpenAI(BaseChatModel, BaseModel): "Please it install it with `pip install tiktoken`." ) - """Returns the number of tokens used by a list of messages.""" + model = self.model_name + if model == "gpt-3.5-turbo": + # gpt-3.5-turbo may change over time. + # Returning num tokens assuming gpt-3.5-turbo-0301. + model = "gpt-3.5-turbo-0301" + elif model == "gpt-4": + # gpt-4 may change over time. + # Returning num tokens assuming gpt-4-0314. + model = "gpt-4-0314" + + # Returns the number of tokens used by a list of messages. try: encoding = tiktoken.encoding_for_model(model) except KeyError: + logger.warning("Warning: model not found. Using cl100k_base encoding.") encoding = tiktoken.get_encoding("cl100k_base") - if model == "gpt-3.5-turbo-0301": # note: future models may deviate from this - num_tokens = 0 - messages_dict = [_convert_message_to_dict(m) for m in messages] - for message in messages_dict: - # every message follows {role/name}\n{content}\n - num_tokens += 4 - for key, value in message.items(): - num_tokens += len(encoding.encode(value)) - if key == "name": # if there's a name, the role is omitted - num_tokens += -1 # role is always required and always 1 token - num_tokens += 2 # every reply is primed with assistant - return num_tokens + + if model == "gpt-3.5-turbo-0301": + # every message follows {role/name}\n{content}\n + tokens_per_message = 4 + # if there's a name, the role is omitted + tokens_per_name = -1 + elif model == "gpt-4-0314": + tokens_per_message = 3 + tokens_per_name = 1 else: raise NotImplementedError( f"get_num_tokens_from_messages() is not presently implemented " @@ -372,3 +381,14 @@ class ChatOpenAI(BaseChatModel, BaseModel): "See https://github.com/openai/openai-python/blob/main/chatml.md for " "information on how messages are converted to tokens." ) + num_tokens = 0 + messages_dict = [_convert_message_to_dict(m) for m in messages] + for message in messages_dict: + num_tokens += tokens_per_message + for key, value in message.items(): + num_tokens += len(encoding.encode(value)) + if key == "name": + num_tokens += tokens_per_name + # every reply is primed with assistant + num_tokens += 2 + return num_tokens