mirror of
https://github.com/hwchase17/langchain
synced 2024-11-08 07:10:35 +00:00
Update ChatOpenAI._astream to respect finish_reason (#9431)
Currently, ChatOpenAI._astream does not reflect finish_reason to generation_info. Change it to reflect that.
This commit is contained in:
parent
949b2cf177
commit
3d1095218c
@ -381,10 +381,16 @@ class ChatOpenAI(BaseChatModel):
|
|||||||
):
|
):
|
||||||
if len(chunk["choices"]) == 0:
|
if len(chunk["choices"]) == 0:
|
||||||
continue
|
continue
|
||||||
delta = chunk["choices"][0]["delta"]
|
choice = chunk["choices"][0]
|
||||||
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
|
chunk = _convert_delta_to_message_chunk(
|
||||||
|
choice["delta"], default_chunk_class
|
||||||
|
)
|
||||||
|
finish_reason = choice.get("finish_reason")
|
||||||
|
generation_info = (
|
||||||
|
dict(finish_reason=finish_reason) if finish_reason is not None else None
|
||||||
|
)
|
||||||
default_chunk_class = chunk.__class__
|
default_chunk_class = chunk.__class__
|
||||||
yield ChatGenerationChunk(message=chunk)
|
yield ChatGenerationChunk(message=chunk, generation_info=generation_info)
|
||||||
if run_manager:
|
if run_manager:
|
||||||
await run_manager.on_llm_new_token(chunk.content)
|
await run_manager.on_llm_new_token(chunk.content)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user