From ea6a5b03e077526896071da80530bebb94eb390b Mon Sep 17 00:00:00 2001 From: Jan Pawellek <2392542+janpawellek@users.noreply.github.com> Date: Mon, 19 Jun 2023 02:01:15 +0200 Subject: [PATCH] Fix output final text for HuggingFaceTextGenInference when streaming (#6211) The LLM integration [HuggingFaceTextGenInference](https://github.com/hwchase17/langchain/blob/master/langchain/llms/huggingface_text_gen_inference.py) already has streaming support. However, when streaming is enabled, it always returns an empty string as the final output text when the LLM is finished. This is because `text` is instantiated with an empty string and never updated. This PR fixes the collection of the final output text by concatenating new tokens. --- langchain/llms/huggingface_text_gen_inference.py | 1 + 1 file changed, 1 insertion(+) diff --git a/langchain/llms/huggingface_text_gen_inference.py b/langchain/llms/huggingface_text_gen_inference.py index 3d17c734..cab11789 100644 --- a/langchain/llms/huggingface_text_gen_inference.py +++ b/langchain/llms/huggingface_text_gen_inference.py @@ -169,4 +169,5 @@ class HuggingFaceTextGenInference(LLM): if not token.special: if text_callback: text_callback(token.text) + text += token.text return text