From df6865cd5265d311f89c14e33d1db7bb4d3b2cf3 Mon Sep 17 00:00:00 2001 From: kahkeng Date: Mon, 6 Mar 2023 13:18:55 -0800 Subject: [PATCH] Allow no token limit for ChatGPT API (#1481) The endpoint default is inf if we don't specify max_tokens, so unlike regular completion API, we don't need to calculate this based on the prompt. --- langchain/llms/openai.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/langchain/llms/openai.py b/langchain/llms/openai.py index 6289f256..adc0c55e 100644 --- a/langchain/llms/openai.py +++ b/langchain/llms/openai.py @@ -619,6 +619,9 @@ class OpenAIChat(BaseLLM, BaseModel): if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop + if params.get("max_tokens") == -1: + # for ChatGPT api, omitting max_tokens is equivalent to having no limit + del params["max_tokens"] return messages, params def _generate(