From 0af48b06d00b23be65d0a10ff27aff4db0f6c85f Mon Sep 17 00:00:00 2001 From: jacobswe Date: Tue, 25 Jul 2023 14:30:22 -0400 Subject: [PATCH] Bug Fix #6462 (#8241) - Description: Small change to fix broken Azure streaming. More complete migration probably still necessary once the new API behavior is finalized. - Issue: Implements fix by @rock-you in #6462 - Dependencies: N/A There don't seem to be any tests specifically for this, and I was having some trouble adding some. This is just a small temporary fix to allow for the new API changes that OpenAI are releasing without breaking any other code. --------- Co-authored-by: Jacob Swe Co-authored-by: Bagatur --- .../langchain/langchain/chat_models/openai.py | 50 +++++++++++-------- 1 file changed, 28 insertions(+), 22 deletions(-) diff --git a/libs/langchain/langchain/chat_models/openai.py b/libs/langchain/langchain/chat_models/openai.py index a3071b8d6b..92a6da820b 100644 --- a/libs/langchain/langchain/chat_models/openai.py +++ b/libs/langchain/langchain/chat_models/openai.py @@ -330,17 +330,20 @@ class ChatOpenAI(BaseChatModel): for stream_resp in self.completion_with_retry( messages=message_dicts, **params ): - role = stream_resp["choices"][0]["delta"].get("role", role) - token = stream_resp["choices"][0]["delta"].get("content") or "" - inner_completion += token - _function_call = stream_resp["choices"][0]["delta"].get("function_call") - if _function_call: - if function_call is None: - function_call = _function_call - else: - function_call["arguments"] += _function_call["arguments"] - if run_manager: - run_manager.on_llm_new_token(token) + if len(stream_resp["choices"]) > 0: + role = stream_resp["choices"][0]["delta"].get("role", role) + token = stream_resp["choices"][0]["delta"].get("content") or "" + inner_completion += token + _function_call = stream_resp["choices"][0]["delta"].get( + "function_call" + ) + if _function_call: + if function_call is None: + function_call = _function_call + else: + function_call["arguments"] += _function_call["arguments"] + if run_manager: + run_manager.on_llm_new_token(token) message = _convert_dict_to_message( { "content": inner_completion, @@ -393,17 +396,20 @@ class ChatOpenAI(BaseChatModel): async for stream_resp in await acompletion_with_retry( self, messages=message_dicts, **params ): - role = stream_resp["choices"][0]["delta"].get("role", role) - token = stream_resp["choices"][0]["delta"].get("content", "") - inner_completion += token or "" - _function_call = stream_resp["choices"][0]["delta"].get("function_call") - if _function_call: - if function_call is None: - function_call = _function_call - else: - function_call["arguments"] += _function_call["arguments"] - if run_manager: - await run_manager.on_llm_new_token(token) + if len(stream_resp["choices"]) > 0: + role = stream_resp["choices"][0]["delta"].get("role", role) + token = stream_resp["choices"][0]["delta"].get("content", "") + inner_completion += token or "" + _function_call = stream_resp["choices"][0]["delta"].get( + "function_call" + ) + if _function_call: + if function_call is None: + function_call = _function_call + else: + function_call["arguments"] += _function_call["arguments"] + if run_manager: + await run_manager.on_llm_new_token(token) message = _convert_dict_to_message( { "content": inner_completion,