docs: Update doc-string in base callback managers (#15885)

Update doc-strings with a comment about on_llm_start vs.
on_chat_model_start.
This commit is contained in:
Eugene Yurtsev 2024-01-30 12:51:45 -05:00 committed by GitHub
parent 881dc28d2c
commit ef2bd745cb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -166,7 +166,12 @@ class CallbackManagerMixin:
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
"""Run when LLM starts running."""
"""Run when LLM starts running.
**ATTENTION**: This method is called for non-chat models (regular LLMs). If
you're implementing a handler for a chat model,
you should use on_chat_model_start instead.
"""
def on_chat_model_start(
self,
@ -179,7 +184,13 @@ class CallbackManagerMixin:
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
"""Run when a chat model starts running."""
"""Run when a chat model starts running.
**ATTENTION**: This method is called for chat models. If you're implementing
a handler for a non-chat model, you should use on_llm_start instead.
"""
# NotImplementedError is thrown intentionally
# Callback handler will fall back to on_llm_start if this is exception is thrown
raise NotImplementedError(
f"{self.__class__.__name__} does not implement `on_chat_model_start`"
)
@ -308,7 +319,12 @@ class AsyncCallbackHandler(BaseCallbackHandler):
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
"""Run when LLM starts running."""
"""Run when LLM starts running.
**ATTENTION**: This method is called for non-chat models (regular LLMs). If
you're implementing a handler for a chat model,
you should use on_chat_model_start instead.
"""
async def on_chat_model_start(
self,
@ -321,7 +337,13 @@ class AsyncCallbackHandler(BaseCallbackHandler):
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
"""Run when a chat model starts running."""
"""Run when a chat model starts running.
**ATTENTION**: This method is called for chat models. If you're implementing
a handler for a non-chat model, you should use on_llm_start instead.
"""
# NotImplementedError is thrown intentionally
# Callback handler will fall back to on_llm_start if this is exception is thrown
raise NotImplementedError(
f"{self.__class__.__name__} does not implement `on_chat_model_start`"
)
@ -359,8 +381,9 @@ class AsyncCallbackHandler(BaseCallbackHandler):
**kwargs: Any,
) -> None:
"""Run when LLM errors.
Args:
error (BaseException): The error that occurred.
error: The error that occurred.
kwargs (Any): Additional keyword arguments.
- response (LLMResult): The response which was generated before
the error occurred.