From abe4c361f920cb1062301f59b4b195974b2cd5ba Mon Sep 17 00:00:00 2001 From: lvisdd Date: Sat, 29 Jul 2023 07:07:03 +0900 Subject: [PATCH] update get_num_tokens_from_messages model (#8431) (#8430) Co-authored-by: Kano Kunihiko --- libs/langchain/langchain/chat_models/openai.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/langchain/langchain/chat_models/openai.py b/libs/langchain/langchain/chat_models/openai.py index 520c205c08..de4108e069 100644 --- a/libs/langchain/langchain/chat_models/openai.py +++ b/libs/langchain/langchain/chat_models/openai.py @@ -550,12 +550,12 @@ class ChatOpenAI(BaseChatModel): if sys.version_info[1] <= 7: return super().get_num_tokens_from_messages(messages) model, encoding = self._get_encoding_model() - if model.startswith("gpt-3.5-turbo"): + if model.startswith("gpt-3.5-turbo-0301"): # every message follows {role/name}\n{content}\n tokens_per_message = 4 # if there's a name, the role is omitted tokens_per_name = -1 - elif model.startswith("gpt-4"): + elif model.startswith("gpt-3.5-turbo") or model.startswith("gpt-4"): tokens_per_message = 3 tokens_per_name = 1 else: