diff --git a/libs/community/langchain_community/chat_models/fireworks.py b/libs/community/langchain_community/chat_models/fireworks.py index 7c7c127046..118126c4e6 100644 --- a/libs/community/langchain_community/chat_models/fireworks.py +++ b/libs/community/langchain_community/chat_models/fireworks.py @@ -223,7 +223,7 @@ class ChatFireworks(BaseChatModel): message=chunk, generation_info=generation_info ) if run_manager: - run_manager.on_llm_new_token(chunk.text, chunk=cg_chunk) + run_manager.on_llm_new_token(cg_chunk.text, chunk=cg_chunk) yield cg_chunk async def _astream( diff --git a/libs/community/langchain_community/chat_models/konko.py b/libs/community/langchain_community/chat_models/konko.py index afeab01312..0eab398af7 100644 --- a/libs/community/langchain_community/chat_models/konko.py +++ b/libs/community/langchain_community/chat_models/konko.py @@ -221,7 +221,7 @@ class ChatKonko(ChatOpenAI): message=chunk, generation_info=generation_info ) if run_manager: - run_manager.on_llm_new_token(chunk.text, chunk=cg_chunk) + run_manager.on_llm_new_token(cg_chunk.text, chunk=cg_chunk) yield cg_chunk def _generate( diff --git a/libs/community/langchain_community/chat_models/llama_edge.py b/libs/community/langchain_community/chat_models/llama_edge.py index 603546a9f7..064fe5f97b 100644 --- a/libs/community/langchain_community/chat_models/llama_edge.py +++ b/libs/community/langchain_community/chat_models/llama_edge.py @@ -192,7 +192,7 @@ class LlamaEdgeChatService(BaseChatModel): message=chunk, generation_info=generation_info ) if run_manager: - run_manager.on_llm_new_token(chunk.text, chunk=cg_chunk) + run_manager.on_llm_new_token(cg_chunk.text, chunk=cg_chunk) yield cg_chunk def _chat(self, messages: List[BaseMessage], **kwargs: Any) -> requests.Response: diff --git a/libs/community/langchain_community/chat_models/openai.py b/libs/community/langchain_community/chat_models/openai.py index 9573b9bd2b..b3d26a6c2d 100644 --- a/libs/community/langchain_community/chat_models/openai.py +++ b/libs/community/langchain_community/chat_models/openai.py @@ -415,7 +415,7 @@ class ChatOpenAI(BaseChatModel): message=chunk, generation_info=generation_info ) if run_manager: - run_manager.on_llm_new_token(chunk.text, chunk=cg_chunk) + run_manager.on_llm_new_token(cg_chunk.text, chunk=cg_chunk) yield cg_chunk def _generate( @@ -507,7 +507,7 @@ class ChatOpenAI(BaseChatModel): message=chunk, generation_info=generation_info ) if run_manager: - await run_manager.on_llm_new_token(token=chunk.text, chunk=cg_chunk) + await run_manager.on_llm_new_token(token=cg_chunk.text, chunk=cg_chunk) yield cg_chunk async def _agenerate(