From b9db20481f28c1172e4d3221c5948c5c292bf108 Mon Sep 17 00:00:00 2001 From: Tim Asp <707699+timothyasp@users.noreply.github.com> Date: Sat, 15 Apr 2023 16:09:17 -0700 Subject: [PATCH] Fix wrong token counts from `get_num_tokens` from openai llms (#2952) The encoding fetch was out of date. Luckily OpenAI has a nice[ `encoding_for_model`](https://github.com/openai/tiktoken/blob/46287bfa493f8ccca4d927386d7ea9cc20487525/tiktoken/model.py) function in `tiktoken` we can use now. --- langchain/llms/openai.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/langchain/llms/openai.py b/langchain/llms/openai.py index 8a54c5d3..32edad00 100644 --- a/langchain/llms/openai.py +++ b/langchain/llms/openai.py @@ -446,15 +446,9 @@ class BaseOpenAI(BaseLLM): "This is needed in order to calculate get_num_tokens. " "Please install it with `pip install tiktoken`." ) - encoder = "gpt2" - if self.model_name in ("text-davinci-003", "text-davinci-002"): - encoder = "p50k_base" - if self.model_name.startswith("code"): - encoder = "p50k_base" - # create a GPT-3 encoder instance - enc = tiktoken.get_encoding(encoder) - - # encode the text using the GPT-3 encoder + + enc = tiktoken.encoding_for_model(self.model_name) + tokenized_text = enc.encode(text) # calculate the number of tokens in the encoded text