forked from Archives/langchain
Allow no token limit for ChatGPT API (#1481)
The endpoint default is inf if we don't specify max_tokens, so unlike regular completion API, we don't need to calculate this based on the prompt.
This commit is contained in:
parent
312c319d8b
commit
df6865cd52
@ -619,6 +619,9 @@ class OpenAIChat(BaseLLM, BaseModel):
|
|||||||
if "stop" in params:
|
if "stop" in params:
|
||||||
raise ValueError("`stop` found in both the input and default params.")
|
raise ValueError("`stop` found in both the input and default params.")
|
||||||
params["stop"] = stop
|
params["stop"] = stop
|
||||||
|
if params.get("max_tokens") == -1:
|
||||||
|
# for ChatGPT api, omitting max_tokens is equivalent to having no limit
|
||||||
|
del params["max_tokens"]
|
||||||
return messages, params
|
return messages, params
|
||||||
|
|
||||||
def _generate(
|
def _generate(
|
||||||
|
Loading…
Reference in New Issue
Block a user