From 9c89ff8bd9b35dde8aabd87634a2d60d5615ac91 Mon Sep 17 00:00:00 2001 From: Tim Asp <707699+timothyasp@users.noreply.github.com> Date: Mon, 1 May 2023 14:51:05 -0700 Subject: [PATCH] Increase `request_timeout` on ChatOpenAI (#3910) With longer context and completions, gpt-3.5-turbo and, especially, gpt-4, will more times than not take > 60seconds to respond. Based on some other discussions, it seems like this is an increasingly common problem, especially with summarization tasks. - https://github.com/hwchase17/langchain/issues/3512 - https://github.com/hwchase17/langchain/issues/3005 OpenAI's max 600s timeout seems excessive, so I settled on 120, but I do run into generations that take >240 seconds when using large prompts and completions with GPT-4, so maybe 240 would be a better compromise? --- langchain/chat_models/openai.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/langchain/chat_models/openai.py b/langchain/chat_models/openai.py index cd5efb1c..ca851f6d 100644 --- a/langchain/chat_models/openai.py +++ b/langchain/chat_models/openai.py @@ -120,7 +120,7 @@ class ChatOpenAI(BaseChatModel): """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None openai_organization: Optional[str] = None - request_timeout: int = 60 + request_timeout: int = 120 """Timeout in seconds for the OpenAPI request.""" max_retries: int = 6 """Maximum number of retries to make when generating."""