diff --git a/libs/langchain/langchain/callbacks/manager.py b/libs/langchain/langchain/callbacks/manager.py index 43e6317ac1..09e3250118 100644 --- a/libs/langchain/langchain/callbacks/manager.py +++ b/libs/langchain/langchain/callbacks/manager.py @@ -1891,13 +1891,13 @@ def _configure( ) else: callback_manager = callback_manager_cls( - handlers=inheritable_callbacks.handlers, - inheritable_handlers=inheritable_callbacks.inheritable_handlers, + handlers=inheritable_callbacks.handlers.copy(), + inheritable_handlers=inheritable_callbacks.inheritable_handlers.copy(), parent_run_id=inheritable_callbacks.parent_run_id, - tags=inheritable_callbacks.tags, - inheritable_tags=inheritable_callbacks.inheritable_tags, - metadata=inheritable_callbacks.metadata, - inheritable_metadata=inheritable_callbacks.inheritable_metadata, + tags=inheritable_callbacks.tags.copy(), + inheritable_tags=inheritable_callbacks.inheritable_tags.copy(), + metadata=inheritable_callbacks.metadata.copy(), + inheritable_metadata=inheritable_callbacks.inheritable_metadata.copy(), ) local_handlers_ = ( local_callbacks @@ -1991,7 +1991,7 @@ def _configure( e, ) if open_ai is not None and not any( - isinstance(handler, OpenAICallbackHandler) + handler is open_ai # direct pointer comparison for handler in callback_manager.handlers ): callback_manager.add_handler(open_ai, True) diff --git a/libs/langchain/tests/unit_tests/callbacks/test_callback_manager.py b/libs/langchain/tests/unit_tests/callbacks/test_callback_manager.py index c25b9b3c77..fdbea1e912 100644 --- a/libs/langchain/tests/unit_tests/callbacks/test_callback_manager.py +++ b/libs/langchain/tests/unit_tests/callbacks/test_callback_manager.py @@ -4,8 +4,15 @@ from typing import List, Tuple import pytest from langchain.callbacks.base import BaseCallbackHandler -from langchain.callbacks.manager import AsyncCallbackManager, CallbackManager +from langchain.callbacks.manager import ( + AsyncCallbackManager, + CallbackManager, + get_openai_callback, + trace_as_chain_group, +) from langchain.callbacks.stdout import StdOutCallbackHandler +from langchain.callbacks.tracers.langchain import LangChainTracer +from langchain.llms.openai import BaseOpenAI from langchain.schema import AgentAction, AgentFinish, LLMResult from tests.unit_tests.callbacks.fake_callback_handler import ( BaseFakeCallbackHandler, @@ -291,3 +298,75 @@ def test_callback_manager_configure(monkeypatch: pytest.MonkeyPatch) -> None: handler4, ] assert isinstance(async_configured_manager, AsyncCallbackManager) + + +def test_callback_manager_configure_context_vars( + monkeypatch: pytest.MonkeyPatch, +) -> None: + monkeypatch.setenv("LANGCHAIN_TRACING_V2", "false") + monkeypatch.setenv("LANGCHAIN_TRACING", "false") + + with trace_as_chain_group("test") as group_manager: + assert len(group_manager.handlers) == 1 + tracer = group_manager.handlers[0] + assert isinstance(tracer, LangChainTracer) + + with get_openai_callback() as cb: + # This is a new empty callback handler + assert cb.successful_requests == 0 + assert cb.total_tokens == 0 + + # configure adds this openai cb but doesn't modify the group manager + mngr = CallbackManager.configure(group_manager) + assert mngr.handlers == [tracer, cb] + assert group_manager.handlers == [tracer] + + response = LLMResult( + generations=[], + llm_output={ + "token_usage": { + "prompt_tokens": 2, + "completion_tokens": 1, + "total_tokens": 3, + }, + "model_name": BaseOpenAI.__fields__["model_name"].default, + }, + ) + mngr.on_llm_start({}, ["prompt"])[0].on_llm_end(response) + + # The callback handler has been updated + assert cb.successful_requests == 1 + assert cb.total_tokens == 3 + assert cb.prompt_tokens == 2 + assert cb.completion_tokens == 1 + assert cb.total_cost > 0 + + with get_openai_callback() as cb: + # This is a new empty callback handler + assert cb.successful_requests == 0 + assert cb.total_tokens == 0 + + # configure adds this openai cb but doesn't modify the group manager + mngr = CallbackManager.configure(group_manager) + assert mngr.handlers == [tracer, cb] + assert group_manager.handlers == [tracer] + + response = LLMResult( + generations=[], + llm_output={ + "token_usage": { + "prompt_tokens": 2, + "completion_tokens": 1, + "total_tokens": 3, + }, + "model_name": BaseOpenAI.__fields__["model_name"].default, + }, + ) + mngr.on_llm_start({}, ["prompt"])[0].on_llm_end(response) + + # The callback handler has been updated + assert cb.successful_requests == 1 + assert cb.total_tokens == 3 + assert cb.prompt_tokens == 2 + assert cb.completion_tokens == 1 + assert cb.total_cost > 0