From ef2bd745cbe328cc1e6e40cf7177bd7eff01e8be Mon Sep 17 00:00:00 2001 From: Eugene Yurtsev Date: Tue, 30 Jan 2024 12:51:45 -0500 Subject: [PATCH] docs: Update doc-string in base callback managers (#15885) Update doc-strings with a comment about on_llm_start vs. on_chat_model_start. --- libs/core/langchain_core/callbacks/base.py | 33 ++++++++++++++++++---- 1 file changed, 28 insertions(+), 5 deletions(-) diff --git a/libs/core/langchain_core/callbacks/base.py b/libs/core/langchain_core/callbacks/base.py index ff5e7770c5..eb4d1de706 100644 --- a/libs/core/langchain_core/callbacks/base.py +++ b/libs/core/langchain_core/callbacks/base.py @@ -166,7 +166,12 @@ class CallbackManagerMixin: metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> Any: - """Run when LLM starts running.""" + """Run when LLM starts running. + + **ATTENTION**: This method is called for non-chat models (regular LLMs). If + you're implementing a handler for a chat model, + you should use on_chat_model_start instead. + """ def on_chat_model_start( self, @@ -179,7 +184,13 @@ class CallbackManagerMixin: metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> Any: - """Run when a chat model starts running.""" + """Run when a chat model starts running. + + **ATTENTION**: This method is called for chat models. If you're implementing + a handler for a non-chat model, you should use on_llm_start instead. + """ + # NotImplementedError is thrown intentionally + # Callback handler will fall back to on_llm_start if this is exception is thrown raise NotImplementedError( f"{self.__class__.__name__} does not implement `on_chat_model_start`" ) @@ -308,7 +319,12 @@ class AsyncCallbackHandler(BaseCallbackHandler): metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> None: - """Run when LLM starts running.""" + """Run when LLM starts running. + + **ATTENTION**: This method is called for non-chat models (regular LLMs). If + you're implementing a handler for a chat model, + you should use on_chat_model_start instead. + """ async def on_chat_model_start( self, @@ -321,7 +337,13 @@ class AsyncCallbackHandler(BaseCallbackHandler): metadata: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> Any: - """Run when a chat model starts running.""" + """Run when a chat model starts running. + + **ATTENTION**: This method is called for chat models. If you're implementing + a handler for a non-chat model, you should use on_llm_start instead. + """ + # NotImplementedError is thrown intentionally + # Callback handler will fall back to on_llm_start if this is exception is thrown raise NotImplementedError( f"{self.__class__.__name__} does not implement `on_chat_model_start`" ) @@ -359,8 +381,9 @@ class AsyncCallbackHandler(BaseCallbackHandler): **kwargs: Any, ) -> None: """Run when LLM errors. + Args: - error (BaseException): The error that occurred. + error: The error that occurred. kwargs (Any): Additional keyword arguments. - response (LLMResult): The response which was generated before the error occurred.