From a8e88e1874a7c689eb6cbdb0e1e8acd0abc2967f Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Fri, 24 Feb 2023 07:37:45 -0800 Subject: [PATCH] Harrison/logprobs (#1279) Co-authored-by: Prateek Shah <97124740+prateekspanning@users.noreply.github.com> --- langchain/llms/openai.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/langchain/llms/openai.py b/langchain/llms/openai.py index 973d35dd..4f2b37bd 100644 --- a/langchain/llms/openai.py +++ b/langchain/llms/openai.py @@ -251,7 +251,9 @@ class BaseOpenAI(BaseLLM, BaseModel): prompt=_prompts, **params ): self.callback_manager.on_llm_new_token( - stream_resp["choices"][0]["text"], verbose=self.verbose + stream_resp["choices"][0]["text"], + verbose=self.verbose, + logprobs=stream_resp["choices"][0]["logprobs"], ) _update_response(response, stream_resp) choices.extend(response["choices"]) @@ -285,11 +287,15 @@ class BaseOpenAI(BaseLLM, BaseModel): ): if self.callback_manager.is_async: await self.callback_manager.on_llm_new_token( - stream_resp["choices"][0]["text"], verbose=self.verbose + stream_resp["choices"][0]["text"], + verbose=self.verbose, + logprobs=stream_resp["choices"][0]["logprobs"], ) else: self.callback_manager.on_llm_new_token( - stream_resp["choices"][0]["text"], verbose=self.verbose + stream_resp["choices"][0]["text"], + verbose=self.verbose, + logprobs=stream_resp["choices"][0]["logprobs"], ) _update_response(response, stream_resp) choices.extend(response["choices"])