From 3e41ab7bff2e9e715d0fec9f7320011dec95db49 Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Thu, 29 Dec 2022 22:16:35 -0500 Subject: [PATCH] check keys before using (#475) --- langchain/llms/openai.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/langchain/llms/openai.py b/langchain/llms/openai.py index 42ca39b5c1..820e70ca7c 100644 --- a/langchain/llms/openai.py +++ b/langchain/llms/openai.py @@ -142,11 +142,12 @@ class BaseOpenAI(BaseLLM, BaseModel): token_usage = {} # Get the token usage from the response. # Includes prompt, completion, and total tokens used. - _keys = ["completion_tokens", "prompt_tokens", "total_tokens"] + _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} for _prompts in sub_prompts: response = self.client.create(prompt=_prompts, **params) choices.extend(response["choices"]) - for _key in _keys: + _keys_to_use = _keys.intersection(response["usage"]) + for _key in _keys_to_use: if _key not in token_usage: token_usage[_key] = response["usage"][_key] else: