From a37dc83a9ea7880fe91e4e8794c8edf6bf264f64 Mon Sep 17 00:00:00 2001 From: William De Vena <60664495+williamdevena@users.noreply.github.com> Date: Wed, 28 Feb 2024 21:19:22 +0100 Subject: [PATCH] langchain_anthropic[patch]: Invoke callback prior to yielding token (#18274) ## PR title langchain_anthropic[patch]: Invoke callback prior to yielding ## PR message - Description: Invoke callback prior to yielding token in _stream and _astream methods for anthropic. - Issue: https://github.com/langchain-ai/langchain/issues/16913 - Dependencies: None - Twitter handle: None --- libs/partners/anthropic/langchain_anthropic/llms.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/libs/partners/anthropic/langchain_anthropic/llms.py b/libs/partners/anthropic/langchain_anthropic/llms.py index d5d04a7f3e..82f1b80433 100644 --- a/libs/partners/anthropic/langchain_anthropic/llms.py +++ b/libs/partners/anthropic/langchain_anthropic/llms.py @@ -300,9 +300,10 @@ class AnthropicLLM(LLM, _AnthropicCommon): prompt=self._wrap_prompt(prompt), stop_sequences=stop, stream=True, **params ): chunk = GenerationChunk(text=token.completion) - yield chunk + if run_manager: run_manager.on_llm_new_token(chunk.text, chunk=chunk) + yield chunk async def _astream( self, @@ -336,9 +337,10 @@ class AnthropicLLM(LLM, _AnthropicCommon): **params, ): chunk = GenerationChunk(text=token.completion) - yield chunk + if run_manager: await run_manager.on_llm_new_token(chunk.text, chunk=chunk) + yield chunk def get_num_tokens(self, text: str) -> int: """Calculate number of tokens."""