langchain/libs/community/tests/unit_tests/llms/test_callbacks.py
ccurme 481d3855dc
patch: remove usage of llm, chat model __call__ (#20788)
- `llm(prompt)` -> `llm.invoke(prompt)`
- `llm(prompt=prompt` -> `llm.invoke(prompt)` (same with `messages=`)
- `llm(prompt, callbacks=callbacks)` -> `llm.invoke(prompt,
config={"callbacks": callbacks})`
- `llm(prompt, **kwargs)` -> `llm.invoke(prompt, **kwargs)`
2024-04-24 19:39:23 -04:00

52 lines
1.7 KiB
Python

"""Test LLM callbacks."""
from langchain_core.messages import HumanMessage
from langchain_community.chat_models.fake import FakeListChatModel
from langchain_community.llms.fake import FakeListLLM
from tests.unit_tests.callbacks.fake_callback_handler import (
FakeCallbackHandler,
FakeCallbackHandlerWithChatStart,
)
def test_llm_with_callbacks() -> None:
"""Test LLM callbacks."""
handler = FakeCallbackHandler()
llm = FakeListLLM(callbacks=[handler], verbose=True, responses=["foo"])
output = llm.invoke("foo")
assert output == "foo"
assert handler.starts == 1
assert handler.ends == 1
assert handler.errors == 0
def test_chat_model_with_v1_callbacks() -> None:
"""Test chat model callbacks fall back to on_llm_start."""
handler = FakeCallbackHandler()
llm = FakeListChatModel(
callbacks=[handler], verbose=True, responses=["fake response"]
)
output = llm.invoke([HumanMessage(content="foo")])
assert output.content == "fake response"
assert handler.starts == 1
assert handler.ends == 1
assert handler.errors == 0
assert handler.llm_starts == 1
assert handler.llm_ends == 1
def test_chat_model_with_v2_callbacks() -> None:
"""Test chat model callbacks fall back to on_llm_start."""
handler = FakeCallbackHandlerWithChatStart()
llm = FakeListChatModel(
callbacks=[handler], verbose=True, responses=["fake response"]
)
output = llm.invoke([HumanMessage(content="foo")])
assert output.content == "fake response"
assert handler.starts == 1
assert handler.ends == 1
assert handler.errors == 0
assert handler.llm_starts == 0
assert handler.llm_ends == 1
assert handler.chat_model_starts == 1