From b66a4f48fa5656871c3e849f7e1790dfb5a4c56b Mon Sep 17 00:00:00 2001 From: balloonio Date: Sun, 14 Apr 2024 14:32:52 -0400 Subject: [PATCH] community[patch]: Invoke callback prior to yielding token fix [DeepInfra] (#20427) - [x] **PR title**: community[patch]: Invoke callback prior to yielding token fix for [DeepInfra] - [x] **PR message**: - **Description:** Invoke callback prior to yielding token in stream method in [DeepInfra] - **Issue:** https://github.com/langchain-ai/langchain/issues/16913 - **Dependencies:** None - **Twitter handle:** @bolun_zhang If no one reviews your PR within a few days, please @-mention one of baskaryan, efriis, eyurtsev, hwchase17. --- libs/community/langchain_community/llms/deepinfra.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/community/langchain_community/llms/deepinfra.py b/libs/community/langchain_community/llms/deepinfra.py index 412de97cf7..65911921a2 100644 --- a/libs/community/langchain_community/llms/deepinfra.py +++ b/libs/community/langchain_community/llms/deepinfra.py @@ -155,9 +155,9 @@ class DeepInfra(LLM): for line in _parse_stream(response.iter_lines()): chunk = _handle_sse_line(line) if chunk: - yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text) + yield chunk async def _astream( self, @@ -174,9 +174,9 @@ class DeepInfra(LLM): async for line in _parse_stream_async(response.content): chunk = _handle_sse_line(line) if chunk: - yield chunk if run_manager: await run_manager.on_llm_new_token(chunk.text) + yield chunk def _parse_stream(rbody: Iterator[bytes]) -> Iterator[str]: