forked from Archives/langchain
8441cbfc03
I've found it useful to track the number of successful requests to OpenAI. This gives me a better sense of the efficiency of my prompts and helps compare map_reduce/refine on a cheaper model vs. stuffing on a more expensive model with higher capacity.
111 lines
3.2 KiB
Python
111 lines
3.2 KiB
Python
"""Callback Handler that prints to std out."""
|
|
from typing import Any, Dict, List, Optional, Union
|
|
|
|
from langchain.callbacks.base import BaseCallbackHandler
|
|
from langchain.schema import AgentAction, AgentFinish, LLMResult
|
|
|
|
|
|
class OpenAICallbackHandler(BaseCallbackHandler):
|
|
"""Callback Handler that tracks OpenAI info."""
|
|
|
|
total_tokens: int = 0
|
|
prompt_tokens: int = 0
|
|
completion_tokens: int = 0
|
|
successful_requests: int = 0
|
|
|
|
@property
|
|
def always_verbose(self) -> bool:
|
|
"""Whether to call verbose callbacks even if verbose is False."""
|
|
return True
|
|
|
|
def on_llm_start(
|
|
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
|
) -> None:
|
|
"""Print out the prompts."""
|
|
pass
|
|
|
|
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
|
|
"""Print out the token."""
|
|
pass
|
|
|
|
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
|
"""Collect token usage."""
|
|
if response.llm_output is not None:
|
|
self.successful_requests += 1
|
|
if "token_usage" in response.llm_output:
|
|
token_usage = response.llm_output["token_usage"]
|
|
if "total_tokens" in token_usage:
|
|
self.total_tokens += token_usage["total_tokens"]
|
|
if "prompt_tokens" in token_usage:
|
|
self.prompt_tokens += token_usage["prompt_tokens"]
|
|
if "completion_tokens" in token_usage:
|
|
self.completion_tokens += token_usage["completion_tokens"]
|
|
|
|
def on_llm_error(
|
|
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
|
) -> None:
|
|
"""Do nothing."""
|
|
pass
|
|
|
|
def on_chain_start(
|
|
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
|
|
) -> None:
|
|
"""Print out that we are entering a chain."""
|
|
pass
|
|
|
|
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
|
|
"""Print out that we finished a chain."""
|
|
pass
|
|
|
|
def on_chain_error(
|
|
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
|
) -> None:
|
|
"""Do nothing."""
|
|
pass
|
|
|
|
def on_tool_start(
|
|
self,
|
|
serialized: Dict[str, Any],
|
|
input_str: str,
|
|
**kwargs: Any,
|
|
) -> None:
|
|
"""Print out the log in specified color."""
|
|
pass
|
|
|
|
def on_tool_end(
|
|
self,
|
|
output: str,
|
|
color: Optional[str] = None,
|
|
observation_prefix: Optional[str] = None,
|
|
llm_prefix: Optional[str] = None,
|
|
**kwargs: Any,
|
|
) -> None:
|
|
"""If not the final action, print out observation."""
|
|
pass
|
|
|
|
def on_tool_error(
|
|
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
|
|
) -> None:
|
|
"""Do nothing."""
|
|
pass
|
|
|
|
def on_text(
|
|
self,
|
|
text: str,
|
|
color: Optional[str] = None,
|
|
end: str = "",
|
|
**kwargs: Optional[str],
|
|
) -> None:
|
|
"""Run when agent ends."""
|
|
pass
|
|
|
|
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
|
|
"""Run on agent action."""
|
|
pass
|
|
|
|
def on_agent_finish(
|
|
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
|
|
) -> None:
|
|
"""Run on agent end."""
|
|
pass
|