mirror of
https://github.com/hwchase17/langchain
synced 2024-10-29 17:07:25 +00:00
068142fce2
# Add caching to BaseChatModel Fixes #1644 (Sidenote: While testing, I noticed we have multiple implementations of Fake LLMs, used for testing. I consolidated them.) ## Who can review? Community members can review the PR once tests pass. Tag maintainers/contributors who might be interested: Models - @hwchase17 - @agola11 Twitter: [@UmerHAdil](https://twitter.com/@UmerHAdil) | Discord: RicChilligerDude#7589 --------- Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
51 lines
1.7 KiB
Python
51 lines
1.7 KiB
Python
"""Test LLM callbacks."""
|
|
from langchain.chat_models.fake import FakeListChatModel
|
|
from langchain.llms.fake import FakeListLLM
|
|
from langchain.schema import HumanMessage
|
|
from tests.unit_tests.callbacks.fake_callback_handler import (
|
|
FakeCallbackHandler,
|
|
FakeCallbackHandlerWithChatStart,
|
|
)
|
|
|
|
|
|
def test_llm_with_callbacks() -> None:
|
|
"""Test LLM callbacks."""
|
|
handler = FakeCallbackHandler()
|
|
llm = FakeListLLM(callbacks=[handler], verbose=True, responses=["foo"])
|
|
output = llm("foo")
|
|
assert output == "foo"
|
|
assert handler.starts == 1
|
|
assert handler.ends == 1
|
|
assert handler.errors == 0
|
|
|
|
|
|
def test_chat_model_with_v1_callbacks() -> None:
|
|
"""Test chat model callbacks fall back to on_llm_start."""
|
|
handler = FakeCallbackHandler()
|
|
llm = FakeListChatModel(
|
|
callbacks=[handler], verbose=True, responses=["fake response"]
|
|
)
|
|
output = llm([HumanMessage(content="foo")])
|
|
assert output.content == "fake response"
|
|
assert handler.starts == 1
|
|
assert handler.ends == 1
|
|
assert handler.errors == 0
|
|
assert handler.llm_starts == 1
|
|
assert handler.llm_ends == 1
|
|
|
|
|
|
def test_chat_model_with_v2_callbacks() -> None:
|
|
"""Test chat model callbacks fall back to on_llm_start."""
|
|
handler = FakeCallbackHandlerWithChatStart()
|
|
llm = FakeListChatModel(
|
|
callbacks=[handler], verbose=True, responses=["fake response"]
|
|
)
|
|
output = llm([HumanMessage(content="foo")])
|
|
assert output.content == "fake response"
|
|
assert handler.starts == 1
|
|
assert handler.ends == 1
|
|
assert handler.errors == 0
|
|
assert handler.llm_starts == 0
|
|
assert handler.llm_ends == 1
|
|
assert handler.chat_model_starts == 1
|