From e74733ab9e5e307fd828ea600ea929a1cb24320f Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Tue, 13 Jun 2023 15:26:26 -0700 Subject: [PATCH] support streaming for functions (#6115) --- langchain/chat_models/openai.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/langchain/chat_models/openai.py b/langchain/chat_models/openai.py index e78ee222..db21ff8b 100644 --- a/langchain/chat_models/openai.py +++ b/langchain/chat_models/openai.py @@ -321,16 +321,27 @@ class ChatOpenAI(BaseChatModel): inner_completion = "" role = "assistant" params["stream"] = True + function_call: Optional[dict] = None for stream_resp in self.completion_with_retry( messages=message_dicts, **params ): role = stream_resp["choices"][0]["delta"].get("role", role) - token = stream_resp["choices"][0]["delta"].get("content", "") + token = stream_resp["choices"][0]["delta"].get("content") or "" inner_completion += token + _function_call = stream_resp["choices"][0]["delta"].get("function_call") + if _function_call: + if function_call is None: + function_call = _function_call + else: + function_call["arguments"] += _function_call["arguments"] if run_manager: run_manager.on_llm_new_token(token) message = _convert_dict_to_message( - {"content": inner_completion, "role": role} + { + "content": inner_completion, + "role": role, + "function_call": function_call, + } ) return ChatResult(generations=[ChatGeneration(message=message)]) response = self.completion_with_retry(messages=message_dicts, **params)