mirror of
https://github.com/hwchase17/langchain
synced 2024-11-20 03:25:56 +00:00
4ee47926ca
### Add on_chat_message_start to callback manager and base tracer Goal: trace messages directly to permit reloading as chat messages (store in an integration-agnostic way) Add an `on_chat_message_start` method. Fall back to `on_llm_start()` for handlers that don't have it implemented. Does so in a non-backwards-compat breaking way (for now)
47 lines
1.6 KiB
Python
47 lines
1.6 KiB
Python
"""Test LLM callbacks."""
|
|
from langchain.schema import HumanMessage
|
|
from tests.unit_tests.callbacks.fake_callback_handler import (
|
|
FakeCallbackHandler,
|
|
FakeCallbackHandlerWithChatStart,
|
|
)
|
|
from tests.unit_tests.llms.fake_chat_model import FakeChatModel
|
|
from tests.unit_tests.llms.fake_llm import FakeLLM
|
|
|
|
|
|
def test_llm_with_callbacks() -> None:
|
|
"""Test LLM callbacks."""
|
|
handler = FakeCallbackHandler()
|
|
llm = FakeLLM(callbacks=[handler], verbose=True)
|
|
output = llm("foo")
|
|
assert output == "foo"
|
|
assert handler.starts == 1
|
|
assert handler.ends == 1
|
|
assert handler.errors == 0
|
|
|
|
|
|
def test_chat_model_with_v1_callbacks() -> None:
|
|
"""Test chat model callbacks fall back to on_llm_start."""
|
|
handler = FakeCallbackHandler()
|
|
llm = FakeChatModel(callbacks=[handler], verbose=True)
|
|
output = llm([HumanMessage(content="foo")])
|
|
assert output.content == "fake response"
|
|
assert handler.starts == 1
|
|
assert handler.ends == 1
|
|
assert handler.errors == 0
|
|
assert handler.llm_starts == 1
|
|
assert handler.llm_ends == 1
|
|
|
|
|
|
def test_chat_model_with_v2_callbacks() -> None:
|
|
"""Test chat model callbacks fall back to on_llm_start."""
|
|
handler = FakeCallbackHandlerWithChatStart()
|
|
llm = FakeChatModel(callbacks=[handler], verbose=True)
|
|
output = llm([HumanMessage(content="foo")])
|
|
assert output.content == "fake response"
|
|
assert handler.starts == 1
|
|
assert handler.ends == 1
|
|
assert handler.errors == 0
|
|
assert handler.llm_starts == 0
|
|
assert handler.llm_ends == 1
|
|
assert handler.chat_model_starts == 1
|