Update ChatOpenAI._astream to respect finish_reason (#9431)

Currently, ChatOpenAI._astream does not reflect finish_reason to
generation_info. Change it to reflect that.
This commit is contained in:
Kim Minjong 2023-08-22 02:56:42 +07:00 committed by GitHub
parent 949b2cf177
commit 3d1095218c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -381,10 +381,16 @@ class ChatOpenAI(BaseChatModel):
): ):
if len(chunk["choices"]) == 0: if len(chunk["choices"]) == 0:
continue continue
delta = chunk["choices"][0]["delta"] choice = chunk["choices"][0]
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class) chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
finish_reason = choice.get("finish_reason")
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
default_chunk_class = chunk.__class__ default_chunk_class = chunk.__class__
yield ChatGenerationChunk(message=chunk) yield ChatGenerationChunk(message=chunk, generation_info=generation_info)
if run_manager: if run_manager:
await run_manager.on_llm_new_token(chunk.content) await run_manager.on_llm_new_token(chunk.content)