From e2f36ee6082506049419875fa4a374f8fa2a88fe Mon Sep 17 00:00:00 2001 From: ju-bezdek Date: Mon, 19 Jun 2023 02:05:16 +0200 Subject: [PATCH] OpenAI functions dont work with async streaming... #6225 (#6226) Related to this https://github.com/hwchase17/langchain/issues/6225 Just copied the implementation from `generate` function to `agenerate` and tested it. Didn't run any official tests thought Fixes #6225 #### Before submitting #### Who can review? Tag maintainers/contributors who might be interested: @hwchase17, @agola11 --- langchain/chat_models/openai.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/langchain/chat_models/openai.py b/langchain/chat_models/openai.py index 674bf5e9..4785b053 100644 --- a/langchain/chat_models/openai.py +++ b/langchain/chat_models/openai.py @@ -386,16 +386,27 @@ class ChatOpenAI(BaseChatModel): inner_completion = "" role = "assistant" params["stream"] = True + function_call: Optional[dict] = None async for stream_resp in await acompletion_with_retry( self, messages=message_dicts, **params ): role = stream_resp["choices"][0]["delta"].get("role", role) token = stream_resp["choices"][0]["delta"].get("content", "") - inner_completion += token + inner_completion += token or "" + _function_call = stream_resp["choices"][0]["delta"].get("function_call") + if _function_call: + if function_call is None: + function_call = _function_call + else: + function_call["arguments"] += _function_call["arguments"] if run_manager: await run_manager.on_llm_new_token(token) message = _convert_dict_to_message( - {"content": inner_completion, "role": role} + { + "content": inner_completion, + "role": role, + "function_call": function_call, + } ) return ChatResult(generations=[ChatGeneration(message=message)]) else: