mirror of
https://github.com/hwchase17/langchain
synced 2024-11-10 01:10:59 +00:00
040597e832
This PR improves on the `CassandraCache` and `CassandraSemanticCache` classes, mainly in the constructor signature, and also introduces several minor improvements around these classes. ### Init signature A (sigh) breaking change is tentatively introduced to the constructor. To me, the advantages outweigh the possible discomfort: the new syntax places the DB-connection objects `session` and `keyspace` later in the param list, so that they can be given a default value. This is what enables the pattern of _not_ specifying them, provided one has previously initialized the Cassandra connection through the versatile utility method `cassio.init(...)`. In this way, a much less unwieldy instantiation can be done, such as `CassandraCache()` and `CassandraSemanticCache(embedding=xyz)`, everything else falling back to defaults. A downside is that, compared to the earlier signature, this might turn out to be breaking for those doing positional instantiation. As a way to mitigate this problem, this PR typechecks its first argument trying to detect the legacy usage. (And to make this point less tricky in the future, most arguments are left to be keyword-only). If this is considered too harsh, I'd like guidance on how to further smoothen this transition. **Our plan is to make the pattern of optional session/keyspace a standard across all Cassandra classes**, so that a repeatable strategy would be ideal. A possibility would be to keep positional arguments for legacy reasons but issue a deprecation warning if any of them is actually used, to later remove them with 0.2 - please advise on this point. ### Other changes - class docstrings: enriched, completely moved to class level, added note on `cassio.init(...)` pattern, added tiny sample usage code. - semantic cache: revised terminology to never mention "distance" (it is in fact a similarity!). Kept the legacy constructor param with a deprecation warning if used. - `llm_caching` notebook: uniform flow with the Cassandra and Astra DB separate cases; better and Cassandra-first description; all imports made explicit and from community where appropriate. - cache integration tests moved to community (incl. the imported tools), env var bugfix for `CASSANDRA_CONTACT_POINTS`. --------- Co-authored-by: Erick Friis <erick@langchain.dev>
178 lines
5.9 KiB
Python
178 lines
5.9 KiB
Python
"""Test Cassandra caches. Requires a running vector-capable Cassandra cluster."""
|
|
import asyncio
|
|
import os
|
|
import time
|
|
from typing import Any, Iterator, Tuple
|
|
|
|
import pytest
|
|
from langchain.globals import get_llm_cache, set_llm_cache
|
|
from langchain_core.outputs import Generation, LLMResult
|
|
|
|
from langchain_community.cache import CassandraCache, CassandraSemanticCache
|
|
from langchain_community.utilities.cassandra import SetupMode
|
|
from tests.integration_tests.cache.fake_embeddings import FakeEmbeddings
|
|
from tests.unit_tests.llms.fake_llm import FakeLLM
|
|
|
|
|
|
@pytest.fixture(scope="module")
|
|
def cassandra_connection() -> Iterator[Tuple[Any, str]]:
|
|
from cassandra.cluster import Cluster
|
|
|
|
keyspace = "langchain_cache_test_keyspace"
|
|
# get db connection
|
|
if "CASSANDRA_CONTACT_POINTS" in os.environ:
|
|
contact_points = os.environ["CASSANDRA_CONTACT_POINTS"].split(",")
|
|
cluster = Cluster(contact_points)
|
|
else:
|
|
cluster = Cluster()
|
|
#
|
|
session = cluster.connect()
|
|
# ensure keyspace exists
|
|
session.execute(
|
|
(
|
|
f"CREATE KEYSPACE IF NOT EXISTS {keyspace} "
|
|
f"WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 1}}"
|
|
)
|
|
)
|
|
|
|
yield (session, keyspace)
|
|
|
|
|
|
def test_cassandra_cache(cassandra_connection: Tuple[Any, str]) -> None:
|
|
session, keyspace = cassandra_connection
|
|
cache = CassandraCache(session=session, keyspace=keyspace)
|
|
set_llm_cache(cache)
|
|
llm = FakeLLM()
|
|
params = llm.dict()
|
|
params["stop"] = None
|
|
llm_string = str(sorted([(k, v) for k, v in params.items()]))
|
|
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
|
|
output = llm.generate(["foo"])
|
|
expected_output = LLMResult(
|
|
generations=[[Generation(text="fizz")]],
|
|
llm_output={},
|
|
)
|
|
assert output == expected_output
|
|
cache.clear()
|
|
|
|
|
|
async def test_cassandra_cache_async(cassandra_connection: Tuple[Any, str]) -> None:
|
|
session, keyspace = cassandra_connection
|
|
cache = CassandraCache(
|
|
session=session, keyspace=keyspace, setup_mode=SetupMode.ASYNC
|
|
)
|
|
set_llm_cache(cache)
|
|
llm = FakeLLM()
|
|
params = llm.dict()
|
|
params["stop"] = None
|
|
llm_string = str(sorted([(k, v) for k, v in params.items()]))
|
|
await get_llm_cache().aupdate("foo", llm_string, [Generation(text="fizz")])
|
|
output = await llm.agenerate(["foo"])
|
|
expected_output = LLMResult(
|
|
generations=[[Generation(text="fizz")]],
|
|
llm_output={},
|
|
)
|
|
assert output == expected_output
|
|
await cache.aclear()
|
|
|
|
|
|
def test_cassandra_cache_ttl(cassandra_connection: Tuple[Any, str]) -> None:
|
|
session, keyspace = cassandra_connection
|
|
cache = CassandraCache(session=session, keyspace=keyspace, ttl_seconds=2)
|
|
set_llm_cache(cache)
|
|
llm = FakeLLM()
|
|
params = llm.dict()
|
|
params["stop"] = None
|
|
llm_string = str(sorted([(k, v) for k, v in params.items()]))
|
|
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
|
|
expected_output = LLMResult(
|
|
generations=[[Generation(text="fizz")]],
|
|
llm_output={},
|
|
)
|
|
output = llm.generate(["foo"])
|
|
assert output == expected_output
|
|
time.sleep(2.5)
|
|
# entry has expired away.
|
|
output = llm.generate(["foo"])
|
|
assert output != expected_output
|
|
cache.clear()
|
|
|
|
|
|
async def test_cassandra_cache_ttl_async(cassandra_connection: Tuple[Any, str]) -> None:
|
|
session, keyspace = cassandra_connection
|
|
cache = CassandraCache(
|
|
session=session, keyspace=keyspace, ttl_seconds=2, setup_mode=SetupMode.ASYNC
|
|
)
|
|
set_llm_cache(cache)
|
|
llm = FakeLLM()
|
|
params = llm.dict()
|
|
params["stop"] = None
|
|
llm_string = str(sorted([(k, v) for k, v in params.items()]))
|
|
await get_llm_cache().aupdate("foo", llm_string, [Generation(text="fizz")])
|
|
expected_output = LLMResult(
|
|
generations=[[Generation(text="fizz")]],
|
|
llm_output={},
|
|
)
|
|
output = await llm.agenerate(["foo"])
|
|
assert output == expected_output
|
|
await asyncio.sleep(2.5)
|
|
# entry has expired away.
|
|
output = await llm.agenerate(["foo"])
|
|
assert output != expected_output
|
|
await cache.aclear()
|
|
|
|
|
|
def test_cassandra_semantic_cache(cassandra_connection: Tuple[Any, str]) -> None:
|
|
session, keyspace = cassandra_connection
|
|
sem_cache = CassandraSemanticCache(
|
|
session=session,
|
|
keyspace=keyspace,
|
|
embedding=FakeEmbeddings(),
|
|
)
|
|
set_llm_cache(sem_cache)
|
|
llm = FakeLLM()
|
|
params = llm.dict()
|
|
params["stop"] = None
|
|
llm_string = str(sorted([(k, v) for k, v in params.items()]))
|
|
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
|
|
output = llm.generate(["bar"]) # same embedding as 'foo'
|
|
expected_output = LLMResult(
|
|
generations=[[Generation(text="fizz")]],
|
|
llm_output={},
|
|
)
|
|
assert output == expected_output
|
|
# clear the cache
|
|
sem_cache.clear()
|
|
output = llm.generate(["bar"]) # 'fizz' is erased away now
|
|
assert output != expected_output
|
|
sem_cache.clear()
|
|
|
|
|
|
async def test_cassandra_semantic_cache_async(
|
|
cassandra_connection: Tuple[Any, str],
|
|
) -> None:
|
|
session, keyspace = cassandra_connection
|
|
sem_cache = CassandraSemanticCache(
|
|
session=session,
|
|
keyspace=keyspace,
|
|
embedding=FakeEmbeddings(),
|
|
setup_mode=SetupMode.ASYNC,
|
|
)
|
|
set_llm_cache(sem_cache)
|
|
llm = FakeLLM()
|
|
params = llm.dict()
|
|
params["stop"] = None
|
|
llm_string = str(sorted([(k, v) for k, v in params.items()]))
|
|
await get_llm_cache().aupdate("foo", llm_string, [Generation(text="fizz")])
|
|
output = await llm.agenerate(["bar"]) # same embedding as 'foo'
|
|
expected_output = LLMResult(
|
|
generations=[[Generation(text="fizz")]],
|
|
llm_output={},
|
|
)
|
|
assert output == expected_output
|
|
# clear the cache
|
|
await sem_cache.aclear()
|
|
output = await llm.agenerate(["bar"]) # 'fizz' is erased away now
|
|
assert output != expected_output
|
|
await sem_cache.aclear()
|