community: fix openai streaming throws 'AIMessageChunk' object has no attribute 'text' (#18006)

After upgrading langchain-community to 0.0.22, it's not possible to use
openai from the community package with streaming=True
```
  File "/home/runner/work/ragstack-ai/ragstack-ai/ragstack-e2e-tests/.tox/langchain/lib/python3.11/site-packages/langchain_community/chat_models/openai.py", line 434, in _generate
    return generate_from_stream(stream_iter)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/runner/work/ragstack-ai/ragstack-ai/ragstack-e2e-tests/.tox/langchain/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py", line 65, in generate_from_stream
    for chunk in stream:
  File "/home/runner/work/ragstack-ai/ragstack-ai/ragstack-e2e-tests/.tox/langchain/lib/python3.11/site-packages/langchain_community/chat_models/openai.py", line 418, in _stream
    run_manager.on_llm_new_token(chunk.text, chunk=cg_chunk)
                                 ^^^^^^^^^^
AttributeError: 'AIMessageChunk' object has no attribute 'text'
```

Fix regression of https://github.com/langchain-ai/langchain/pull/17907 
**Twitter handle:** @nicoloboschi
pull/18038/head
Nicolò Boschi 7 months ago committed by GitHub
parent 9b982b2aba
commit 4c132b4cc6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -223,7 +223,7 @@ class ChatFireworks(BaseChatModel):
message=chunk, generation_info=generation_info
)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=cg_chunk)
run_manager.on_llm_new_token(cg_chunk.text, chunk=cg_chunk)
yield cg_chunk
async def _astream(

@ -221,7 +221,7 @@ class ChatKonko(ChatOpenAI):
message=chunk, generation_info=generation_info
)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=cg_chunk)
run_manager.on_llm_new_token(cg_chunk.text, chunk=cg_chunk)
yield cg_chunk
def _generate(

@ -192,7 +192,7 @@ class LlamaEdgeChatService(BaseChatModel):
message=chunk, generation_info=generation_info
)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=cg_chunk)
run_manager.on_llm_new_token(cg_chunk.text, chunk=cg_chunk)
yield cg_chunk
def _chat(self, messages: List[BaseMessage], **kwargs: Any) -> requests.Response:

@ -415,7 +415,7 @@ class ChatOpenAI(BaseChatModel):
message=chunk, generation_info=generation_info
)
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=cg_chunk)
run_manager.on_llm_new_token(cg_chunk.text, chunk=cg_chunk)
yield cg_chunk
def _generate(
@ -507,7 +507,7 @@ class ChatOpenAI(BaseChatModel):
message=chunk, generation_info=generation_info
)
if run_manager:
await run_manager.on_llm_new_token(token=chunk.text, chunk=cg_chunk)
await run_manager.on_llm_new_token(token=cg_chunk.text, chunk=cg_chunk)
yield cg_chunk
async def _agenerate(

Loading…
Cancel
Save