mirror of
https://github.com/hwchase17/langchain
synced 2024-11-06 03:20:49 +00:00
Add prompt and completion token tracking (#2080)
Tracking the breakdown of token usage is useful when using GPT-4, where prompt and completion tokens are priced differently.
This commit is contained in:
parent
b5020c7d9c
commit
e8d9cbca3f
@ -9,6 +9,8 @@ class OpenAICallbackHandler(BaseCallbackHandler):
|
||||
"""Callback Handler that tracks OpenAI info."""
|
||||
|
||||
total_tokens: int = 0
|
||||
prompt_tokens: int = 0
|
||||
completion_tokens: int = 0
|
||||
|
||||
@property
|
||||
def always_verbose(self) -> bool:
|
||||
@ -32,6 +34,10 @@ class OpenAICallbackHandler(BaseCallbackHandler):
|
||||
token_usage = response.llm_output["token_usage"]
|
||||
if "total_tokens" in token_usage:
|
||||
self.total_tokens += token_usage["total_tokens"]
|
||||
if "prompt_tokens" in token_usage:
|
||||
self.prompt_tokens += token_usage["prompt_tokens"]
|
||||
if "completion_tokens" in token_usage:
|
||||
self.completion_tokens += token_usage["completion_tokens"]
|
||||
|
||||
def on_llm_error(
|
||||
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
||||
|
Loading…
Reference in New Issue
Block a user