Update ChatOpenAI._astream to respect finish_reason (#9431)

Currently, ChatOpenAI._astream does not reflect finish_reason to
generation_info. Change it to reflect that.
pull/9561/head
Kim Minjong 1 year ago committed by GitHub
parent 949b2cf177
commit 3d1095218c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -381,10 +381,16 @@ class ChatOpenAI(BaseChatModel):
):
if len(chunk["choices"]) == 0:
continue
delta = chunk["choices"][0]["delta"]
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
choice = chunk["choices"][0]
chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
finish_reason = choice.get("finish_reason")
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
default_chunk_class = chunk.__class__
yield ChatGenerationChunk(message=chunk)
yield ChatGenerationChunk(message=chunk, generation_info=generation_info)
if run_manager:
await run_manager.on_llm_new_token(chunk.content)

Loading…
Cancel
Save