From 5ee76fccd53e5ed66b5d0e816846bd2c84ee88d8 Mon Sep 17 00:00:00 2001 From: William De Vena <60664495+williamdevena@users.noreply.github.com> Date: Thu, 29 Feb 2024 00:43:16 +0100 Subject: [PATCH] langchain_groq[patch]: Invoke callback prior to yielding token (#18272) ## PR title langchain_groq[patch]: Invoke callback prior to yielding ## PR message **Description:**Invoke callback prior to yielding token in _stream and _astream methods for groq. Issue: https://github.com/langchain-ai/langchain/issues/16913 Dependencies: None Twitter handle: None --- libs/partners/groq/langchain_groq/chat_models.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/libs/partners/groq/langchain_groq/chat_models.py b/libs/partners/groq/langchain_groq/chat_models.py index f09da3d09e..e185d4d538 100644 --- a/libs/partners/groq/langchain_groq/chat_models.py +++ b/libs/partners/groq/langchain_groq/chat_models.py @@ -274,9 +274,10 @@ class ChatGroq(BaseChatModel): chunk = ChatGenerationChunk( message=chunk, generation_info=generation_info or None ) - yield chunk + if run_manager: run_manager.on_llm_new_token(chunk.text, chunk=chunk, logprobs=logprobs) + yield chunk async def _astream( self, @@ -310,11 +311,12 @@ class ChatGroq(BaseChatModel): chunk = ChatGenerationChunk( message=chunk, generation_info=generation_info or None ) - yield chunk + if run_manager: await run_manager.on_llm_new_token( token=chunk.text, chunk=chunk, logprobs=logprobs ) + yield chunk # # Internal methods