From a265878d711b63836d25e2442362bacbd3cdabe1 Mon Sep 17 00:00:00 2001 From: Eugene Yurtsev Date: Thu, 1 Feb 2024 16:43:10 -0800 Subject: [PATCH] langchain_openai[patch]: Invoke callback prior to yielding token (#16909) All models should be calling the callback for new token prior to yielding the token. Not doing this can cause callbacks for downstream steps to be called prior to the callback for the new token; causing issues in astream_events APIs and other things that depend in callback ordering being correct. We need to make this change for all chat models. --- libs/partners/openai/langchain_openai/chat_models/base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index d0bed6f921..b5b7199edb 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -424,9 +424,9 @@ class ChatOpenAI(BaseChatModel): chunk = ChatGenerationChunk( message=chunk, generation_info=generation_info or None ) - yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text, chunk=chunk, logprobs=logprobs) + yield chunk def _generate( self, @@ -516,11 +516,11 @@ class ChatOpenAI(BaseChatModel): chunk = ChatGenerationChunk( message=chunk, generation_info=generation_info or None ) - yield chunk if run_manager: await run_manager.on_llm_new_token( token=chunk.text, chunk=chunk, logprobs=logprobs ) + yield chunk async def _agenerate( self,