From ba0cbb4a418c4d68d90fa66f78877a246d8234c8 Mon Sep 17 00:00:00 2001 From: Diwank Singh Tomer Date: Fri, 6 Jan 2023 20:45:25 +0530 Subject: [PATCH] Add finish reason to Generation for usage downstream (#526) Add `finish_reason` to `Generation` as well as extend `BaseOpenAI._generate` to include it in the output. This can be useful for usage in downstream tasks when we need to filter for only generations that finished because of `"stop"` for example. Maybe we should add this to `LLMChain` as well? For more details, see https://beta.openai.com/docs/guides/completion/best-practices Signed-off-by: Diwank Singh Tomer --- langchain/llms/openai.py | 11 ++++++++++- langchain/schema.py | 6 +++++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/langchain/llms/openai.py b/langchain/llms/openai.py index a454b40c..22840dcb 100644 --- a/langchain/llms/openai.py +++ b/langchain/llms/openai.py @@ -164,7 +164,16 @@ class BaseOpenAI(BaseLLM, BaseModel): for i, prompt in enumerate(prompts): sub_choices = choices[i * self.n : (i + 1) * self.n] generations.append( - [Generation(text=choice["text"]) for choice in sub_choices] + [ + Generation( + text=choice["text"], + generation_info=dict( + finish_reason=choice["finish_reason"], + logprobs=choice["logprobs"], + ), + ) + for choice in sub_choices + ] ) return LLMResult( generations=generations, llm_output={"token_usage": token_usage} diff --git a/langchain/schema.py b/langchain/schema.py index a4b4e626..6bb53eb5 100644 --- a/langchain/schema.py +++ b/langchain/schema.py @@ -1,6 +1,6 @@ """Common schema objects.""" -from typing import List, NamedTuple, Optional +from typing import Any, Dict, List, NamedTuple, Optional class AgentAction(NamedTuple): @@ -23,6 +23,10 @@ class Generation(NamedTuple): text: str """Generated text output.""" + + generation_info: Optional[Dict[str, Any]] = None + """Raw generation info response from the provider""" + """May include things like reason for finishing (e.g. in OpenAI)""" # TODO: add log probs