Add successful request count to OpenAI callback (#2128)

I've found it useful to track the number of successful requests to
OpenAI. This gives me a better sense of the efficiency of my prompts and
helps compare map_reduce/refine on a cheaper model vs. stuffing on a
more expensive model with higher capacity.
doc
Jonathan Page 1 year ago committed by GitHub
parent 4ab66c4f52
commit 8441cbfc03
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -45,7 +45,8 @@
"text": [
"Total Tokens: 42\n",
"Prompt Tokens: 4\n",
"Completion Tokens: 38\n"
"Completion Tokens: 38\n",
"Successful Requests: 1\n"
]
}
],
@ -54,7 +55,8 @@
" result = llm(\"Tell me a joke\")\n",
" print(f\"Total Tokens: {cb.total_tokens}\")\n",
" print(f\"Prompt Tokens: {cb.prompt_tokens}\")\n",
" print(f\"Completion Tokens: {cb.completion_tokens}\")"
" print(f\"Completion Tokens: {cb.completion_tokens}\")\n",
" print(f\"Successful Requests: {cb.successful_requests}\")"
]
},
{

@ -11,6 +11,7 @@ class OpenAICallbackHandler(BaseCallbackHandler):
total_tokens: int = 0
prompt_tokens: int = 0
completion_tokens: int = 0
successful_requests: int = 0
@property
def always_verbose(self) -> bool:
@ -30,6 +31,7 @@ class OpenAICallbackHandler(BaseCallbackHandler):
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Collect token usage."""
if response.llm_output is not None:
self.successful_requests += 1
if "token_usage" in response.llm_output:
token_usage = response.llm_output["token_usage"]
if "total_tokens" in token_usage:

Loading…
Cancel
Save