mirror of
https://github.com/hwchase17/langchain
synced 2024-10-31 15:20:26 +00:00
209 lines
6.8 KiB
Plaintext
209 lines
6.8 KiB
Plaintext
|
{
|
||
|
"cells": [
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"id": "bab2d297",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"# Multiple callback handlers\n",
|
||
|
"\n",
|
||
|
"In the previous examples, we passed in callback handlers upon creation of an object by using `callbacks=`. In this case, the callbacks will be scoped to that particular object. \n",
|
||
|
"\n",
|
||
|
"However, in many cases, it is advantageous to pass in handlers instead when running the object. When we pass through `CallbackHandlers` using the `callbacks` keyword arg when executing an run, those callbacks will be issued by all nested objects involved in the execution. For example, when a handler is passed through to an `Agent`, it will be used for all callbacks related to the agent and all the objects involved in the agent's execution, in this case, the `Tools`, `LLMChain`, and `LLM`.\n",
|
||
|
"\n",
|
||
|
"This prevents us from having to manually attach the handlers to each individual nested object."
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": 1,
|
||
|
"id": "f94fc171",
|
||
|
"metadata": {},
|
||
|
"outputs": [
|
||
|
{
|
||
|
"name": "stdout",
|
||
|
"output_type": "stream",
|
||
|
"text": [
|
||
|
"on_chain_start AgentExecutor\n",
|
||
|
"on_chain_start LLMChain\n",
|
||
|
"on_llm_start OpenAI\n",
|
||
|
"on_llm_start (I'm the second handler!!) OpenAI\n",
|
||
|
"on_new_token I\n",
|
||
|
"on_new_token need\n",
|
||
|
"on_new_token to\n",
|
||
|
"on_new_token use\n",
|
||
|
"on_new_token a\n",
|
||
|
"on_new_token calculator\n",
|
||
|
"on_new_token to\n",
|
||
|
"on_new_token solve\n",
|
||
|
"on_new_token this\n",
|
||
|
"on_new_token .\n",
|
||
|
"on_new_token \n",
|
||
|
"Action\n",
|
||
|
"on_new_token :\n",
|
||
|
"on_new_token Calculator\n",
|
||
|
"on_new_token \n",
|
||
|
"Action\n",
|
||
|
"on_new_token Input\n",
|
||
|
"on_new_token :\n",
|
||
|
"on_new_token 2\n",
|
||
|
"on_new_token ^\n",
|
||
|
"on_new_token 0\n",
|
||
|
"on_new_token .\n",
|
||
|
"on_new_token 235\n",
|
||
|
"on_new_token \n",
|
||
|
"on_agent_action AgentAction(tool='Calculator', tool_input='2^0.235', log=' I need to use a calculator to solve this.\\nAction: Calculator\\nAction Input: 2^0.235')\n",
|
||
|
"on_tool_start Calculator\n",
|
||
|
"on_chain_start LLMMathChain\n",
|
||
|
"on_chain_start LLMChain\n",
|
||
|
"on_llm_start OpenAI\n",
|
||
|
"on_llm_start (I'm the second handler!!) OpenAI\n",
|
||
|
"on_new_token \n",
|
||
|
"on_new_token ```text\n",
|
||
|
"on_new_token \n",
|
||
|
"\n",
|
||
|
"on_new_token 2\n",
|
||
|
"on_new_token **\n",
|
||
|
"on_new_token 0\n",
|
||
|
"on_new_token .\n",
|
||
|
"on_new_token 235\n",
|
||
|
"on_new_token \n",
|
||
|
"\n",
|
||
|
"on_new_token ```\n",
|
||
|
"\n",
|
||
|
"on_new_token ...\n",
|
||
|
"on_new_token num\n",
|
||
|
"on_new_token expr\n",
|
||
|
"on_new_token .\n",
|
||
|
"on_new_token evaluate\n",
|
||
|
"on_new_token (\"\n",
|
||
|
"on_new_token 2\n",
|
||
|
"on_new_token **\n",
|
||
|
"on_new_token 0\n",
|
||
|
"on_new_token .\n",
|
||
|
"on_new_token 235\n",
|
||
|
"on_new_token \")\n",
|
||
|
"on_new_token ...\n",
|
||
|
"on_new_token \n",
|
||
|
"\n",
|
||
|
"on_new_token \n",
|
||
|
"on_chain_start LLMChain\n",
|
||
|
"on_llm_start OpenAI\n",
|
||
|
"on_llm_start (I'm the second handler!!) OpenAI\n",
|
||
|
"on_new_token I\n",
|
||
|
"on_new_token now\n",
|
||
|
"on_new_token know\n",
|
||
|
"on_new_token the\n",
|
||
|
"on_new_token final\n",
|
||
|
"on_new_token answer\n",
|
||
|
"on_new_token .\n",
|
||
|
"on_new_token \n",
|
||
|
"Final\n",
|
||
|
"on_new_token Answer\n",
|
||
|
"on_new_token :\n",
|
||
|
"on_new_token 1\n",
|
||
|
"on_new_token .\n",
|
||
|
"on_new_token 17\n",
|
||
|
"on_new_token 690\n",
|
||
|
"on_new_token 67\n",
|
||
|
"on_new_token 372\n",
|
||
|
"on_new_token 187\n",
|
||
|
"on_new_token 674\n",
|
||
|
"on_new_token \n"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"data": {
|
||
|
"text/plain": [
|
||
|
"'1.1769067372187674'"
|
||
|
]
|
||
|
},
|
||
|
"execution_count": 1,
|
||
|
"metadata": {},
|
||
|
"output_type": "execute_result"
|
||
|
}
|
||
|
],
|
||
|
"source": [
|
||
|
"from typing import Dict, Union, Any, List\n",
|
||
|
"\n",
|
||
|
"from langchain.callbacks.base import BaseCallbackHandler\n",
|
||
|
"from langchain.schema import AgentAction\n",
|
||
|
"from langchain.agents import AgentType, initialize_agent, load_tools\n",
|
||
|
"from langchain.callbacks import tracing_enabled\n",
|
||
|
"from langchain.llms import OpenAI\n",
|
||
|
"\n",
|
||
|
"\n",
|
||
|
"# First, define custom callback handler implementations\n",
|
||
|
"class MyCustomHandlerOne(BaseCallbackHandler):\n",
|
||
|
" def on_llm_start(\n",
|
||
|
" self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any\n",
|
||
|
" ) -> Any:\n",
|
||
|
" print(f\"on_llm_start {serialized['name']}\")\n",
|
||
|
"\n",
|
||
|
" def on_llm_new_token(self, token: str, **kwargs: Any) -> Any:\n",
|
||
|
" print(f\"on_new_token {token}\")\n",
|
||
|
"\n",
|
||
|
" def on_llm_error(\n",
|
||
|
" self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any\n",
|
||
|
" ) -> Any:\n",
|
||
|
" \"\"\"Run when LLM errors.\"\"\"\n",
|
||
|
"\n",
|
||
|
" def on_chain_start(\n",
|
||
|
" self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any\n",
|
||
|
" ) -> Any:\n",
|
||
|
" print(f\"on_chain_start {serialized['name']}\")\n",
|
||
|
"\n",
|
||
|
" def on_tool_start(\n",
|
||
|
" self, serialized: Dict[str, Any], input_str: str, **kwargs: Any\n",
|
||
|
" ) -> Any:\n",
|
||
|
" print(f\"on_tool_start {serialized['name']}\")\n",
|
||
|
"\n",
|
||
|
" def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:\n",
|
||
|
" print(f\"on_agent_action {action}\")\n",
|
||
|
"\n",
|
||
|
"\n",
|
||
|
"class MyCustomHandlerTwo(BaseCallbackHandler):\n",
|
||
|
" def on_llm_start(\n",
|
||
|
" self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any\n",
|
||
|
" ) -> Any:\n",
|
||
|
" print(f\"on_llm_start (I'm the second handler!!) {serialized['name']}\")\n",
|
||
|
"\n",
|
||
|
"\n",
|
||
|
"# Instantiate the handlers\n",
|
||
|
"handler1 = MyCustomHandlerOne()\n",
|
||
|
"handler2 = MyCustomHandlerTwo()\n",
|
||
|
"\n",
|
||
|
"# Setup the agent. Only the `llm` will issue callbacks for handler2\n",
|
||
|
"llm = OpenAI(temperature=0, streaming=True, callbacks=[handler2])\n",
|
||
|
"tools = load_tools([\"llm-math\"], llm=llm)\n",
|
||
|
"agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)\n",
|
||
|
"\n",
|
||
|
"# Callbacks for handler1 will be issued by every object involved in the\n",
|
||
|
"# Agent execution (llm, llmchain, tool, agent executor)\n",
|
||
|
"agent.run(\"What is 2 raised to the 0.235 power?\", callbacks=[handler1])"
|
||
|
]
|
||
|
}
|
||
|
],
|
||
|
"metadata": {
|
||
|
"kernelspec": {
|
||
|
"display_name": "venv",
|
||
|
"language": "python",
|
||
|
"name": "venv"
|
||
|
},
|
||
|
"language_info": {
|
||
|
"codemirror_mode": {
|
||
|
"name": "ipython",
|
||
|
"version": 3
|
||
|
},
|
||
|
"file_extension": ".py",
|
||
|
"mimetype": "text/x-python",
|
||
|
"name": "python",
|
||
|
"nbconvert_exporter": "python",
|
||
|
"pygments_lexer": "ipython3",
|
||
|
"version": "3.11.3"
|
||
|
}
|
||
|
},
|
||
|
"nbformat": 4,
|
||
|
"nbformat_minor": 5
|
||
|
}
|