mirror of https://github.com/hwchase17/langchain
mongodb[minor]: Add MongoDB LLM Cache (#17470)
# Description - **Description:** Adding MongoDB LLM Caching Layer abstraction - **Issue:** N/A - **Dependencies:** None - **Twitter handle:** @mongodb Checklist: - [x] PR title: Please title your PR "package: description", where "package" is whichever of langchain, community, core, experimental, etc. is being modified. Use "docs: ..." for purely docs changes, "templates: ..." for template changes, "infra: ..." for CI changes. - Example: "community: add foobar LLM" - [x] PR Message (above) - [x] Pass lint and test: Run `make format`, `make lint` and `make test` from the root of the package(s) you've modified to check that you're passing lint and testing. See contribution guidelines for more information on how to write/run tests, lint, etc: https://python.langchain.com/docs/contributing/ - [ ] Add tests and docs: If you're adding a new integration, please include 1. a test for the integration, preferably unit tests that do not rely on network access, 2. an example notebook showing its use. It lives in `docs/docs/integrations` directory. Additional guidelines: - Make sure optional dependencies are imported within a function. - Please do not add dependencies to pyproject.toml files (even optional ones) unless they are required for unit tests. - Most PRs should not touch more than one package. - Changes should be backwards compatible. - If you are adding something to community, do not re-import it in langchain. If no one reviews your PR within a few days, please @-mention one of @baskaryan, @efriis, @eyurtsev, @hwchase17. --------- Co-authored-by: Jib <jib@byblack.us>pull/18590/head^2
parent
449d8781ec
commit
f92f7d2e03
@ -0,0 +1,312 @@
|
||||
"""
|
||||
LangChain MongoDB Caches
|
||||
|
||||
Functions "_loads_generations" and "_dumps_generations"
|
||||
are duplicated in this utility from modules:
|
||||
- "libs/community/langchain_community/cache.py"
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from importlib.metadata import version
|
||||
from typing import Any, Callable, Dict, Optional, Union
|
||||
|
||||
from langchain_core.caches import RETURN_VAL_TYPE, BaseCache
|
||||
from langchain_core.embeddings import Embeddings
|
||||
from langchain_core.load.dump import dumps
|
||||
from langchain_core.load.load import loads
|
||||
from langchain_core.outputs import Generation
|
||||
from pymongo import MongoClient
|
||||
from pymongo.collection import Collection
|
||||
from pymongo.database import Database
|
||||
from pymongo.driver_info import DriverInfo
|
||||
|
||||
from langchain_mongodb.vectorstores import MongoDBAtlasVectorSearch
|
||||
|
||||
logger = logging.getLogger(__file__)
|
||||
|
||||
|
||||
def _generate_mongo_client(connection_string: str) -> MongoClient:
|
||||
return MongoClient(
|
||||
connection_string,
|
||||
driver=DriverInfo(name="Langchain", version=version("langchain-mongodb")),
|
||||
)
|
||||
|
||||
|
||||
def _dumps_generations(generations: RETURN_VAL_TYPE) -> str:
|
||||
"""
|
||||
Serialization for generic RETURN_VAL_TYPE, i.e. sequence of `Generation`
|
||||
|
||||
Args:
|
||||
generations (RETURN_VAL_TYPE): A list of language model generations.
|
||||
|
||||
Returns:
|
||||
str: a single string representing a list of generations.
|
||||
|
||||
This function (+ its counterpart `_loads_generations`) rely on
|
||||
the dumps/loads pair with Reviver, so are able to deal
|
||||
with all subclasses of Generation.
|
||||
|
||||
Each item in the list can be `dumps`ed to a string,
|
||||
then we make the whole list of strings into a json-dumped.
|
||||
"""
|
||||
return json.dumps([dumps(_item) for _item in generations])
|
||||
|
||||
|
||||
def _loads_generations(generations_str: str) -> Union[RETURN_VAL_TYPE, None]:
|
||||
"""
|
||||
Deserialization of a string into a generic RETURN_VAL_TYPE
|
||||
(i.e. a sequence of `Generation`).
|
||||
|
||||
See `_dumps_generations`, the inverse of this function.
|
||||
|
||||
Args:
|
||||
generations_str (str): A string representing a list of generations.
|
||||
|
||||
Compatible with the legacy cache-blob format
|
||||
Does not raise exceptions for malformed entries, just logs a warning
|
||||
and returns none: the caller should be prepared for such a cache miss.
|
||||
|
||||
Returns:
|
||||
RETURN_VAL_TYPE: A list of generations.
|
||||
"""
|
||||
try:
|
||||
generations = [loads(_item_str) for _item_str in json.loads(generations_str)]
|
||||
return generations
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
# deferring the (soft) handling to after the legacy-format attempt
|
||||
pass
|
||||
|
||||
try:
|
||||
gen_dicts = json.loads(generations_str)
|
||||
# not relying on `_load_generations_from_json` (which could disappear):
|
||||
generations = [Generation(**generation_dict) for generation_dict in gen_dicts]
|
||||
logger.warning(
|
||||
f"Legacy 'Generation' cached blob encountered: '{generations_str}'"
|
||||
)
|
||||
return generations
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
logger.warning(
|
||||
f"Malformed/unparsable cached blob encountered: '{generations_str}'"
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
def _wait_until(
|
||||
predicate: Callable, success_description: Any, timeout: float = 10.0
|
||||
) -> None:
|
||||
"""Wait up to 10 seconds (by default) for predicate to be true.
|
||||
|
||||
E.g.:
|
||||
|
||||
wait_until(lambda: client.primary == ('a', 1),
|
||||
'connect to the primary')
|
||||
|
||||
If the lambda-expression isn't true after 10 seconds, we raise
|
||||
AssertionError("Didn't ever connect to the primary").
|
||||
|
||||
Returns the predicate's first true value.
|
||||
"""
|
||||
start = time.time()
|
||||
interval = min(float(timeout) / 100, 0.1)
|
||||
while True:
|
||||
retval = predicate()
|
||||
if retval:
|
||||
return retval
|
||||
|
||||
if time.time() - start > timeout:
|
||||
raise TimeoutError("Didn't ever %s" % success_description)
|
||||
|
||||
time.sleep(interval)
|
||||
|
||||
|
||||
class MongoDBCache(BaseCache):
|
||||
"""MongoDB Atlas cache
|
||||
|
||||
A cache that uses MongoDB Atlas as a backend
|
||||
"""
|
||||
|
||||
PROMPT = "prompt"
|
||||
LLM = "llm"
|
||||
RETURN_VAL = "return_val"
|
||||
_local_cache: Dict[str, Any]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
connection_string: str,
|
||||
collection_name: str = "default",
|
||||
database_name: str = "default",
|
||||
**kwargs: Dict[str, Any],
|
||||
) -> None:
|
||||
"""
|
||||
Initialize Atlas Cache. Creates collection on instantiation
|
||||
|
||||
Args:
|
||||
collection_name (str): Name of collection for cache to live.
|
||||
Defaults to "default".
|
||||
connection_string (str): Connection URI to MongoDB Atlas.
|
||||
Defaults to "default".
|
||||
database_name (str): Name of database for cache to live.
|
||||
Defaults to "default".
|
||||
"""
|
||||
self.client = _generate_mongo_client(connection_string)
|
||||
self.__database_name = database_name
|
||||
self.__collection_name = collection_name
|
||||
self._local_cache = {}
|
||||
|
||||
if self.__collection_name not in self.database.list_collection_names():
|
||||
self.database.create_collection(self.__collection_name)
|
||||
# Create an index on key and llm_string
|
||||
self.collection.create_index([self.PROMPT, self.LLM])
|
||||
|
||||
@property
|
||||
def database(self) -> Database:
|
||||
"""Returns the database used to store cache values."""
|
||||
return self.client[self.__database_name]
|
||||
|
||||
@property
|
||||
def collection(self) -> Collection:
|
||||
"""Returns the collection used to store cache values."""
|
||||
return self.database[self.__collection_name]
|
||||
|
||||
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
|
||||
"""Look up based on prompt and llm_string."""
|
||||
cache_key = self._generate_local_key(prompt, llm_string)
|
||||
if cache_key in self._local_cache:
|
||||
return self._local_cache[cache_key]
|
||||
|
||||
return_doc = (
|
||||
self.collection.find_one(self._generate_keys(prompt, llm_string)) or {}
|
||||
)
|
||||
return_val = return_doc.get(self.RETURN_VAL)
|
||||
return _loads_generations(return_val) if return_val else None # type: ignore
|
||||
|
||||
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
|
||||
"""Update cache based on prompt and llm_string."""
|
||||
cache_key = self._generate_local_key(prompt, llm_string)
|
||||
self._local_cache[cache_key] = return_val
|
||||
|
||||
self.collection.update_one(
|
||||
{**self._generate_keys(prompt, llm_string)},
|
||||
{"$set": {self.RETURN_VAL: _dumps_generations(return_val)}},
|
||||
upsert=True,
|
||||
)
|
||||
|
||||
def _generate_keys(self, prompt: str, llm_string: str) -> Dict[str, str]:
|
||||
"""Create keyed fields for caching layer"""
|
||||
return {self.PROMPT: prompt, self.LLM: llm_string}
|
||||
|
||||
def _generate_local_key(self, prompt: str, llm_string: str) -> str:
|
||||
"""Create keyed fields for local caching layer"""
|
||||
return f"{prompt}#{llm_string}"
|
||||
|
||||
def clear(self, **kwargs: Any) -> None:
|
||||
"""Clear cache that can take additional keyword arguments.
|
||||
Any additional arguments will propagate as filtration criteria for
|
||||
what gets deleted.
|
||||
|
||||
E.g.
|
||||
# Delete only entries that have llm_string as "fake-model"
|
||||
self.clear(llm_string="fake-model")
|
||||
"""
|
||||
self.collection.delete_many({**kwargs})
|
||||
|
||||
|
||||
class MongoDBAtlasSemanticCache(BaseCache, MongoDBAtlasVectorSearch):
|
||||
"""MongoDB Atlas Semantic cache.
|
||||
|
||||
A Cache backed by a MongoDB Atlas server with vector-store support
|
||||
"""
|
||||
|
||||
LLM = "llm_string"
|
||||
RETURN_VAL = "return_val"
|
||||
_local_cache: Dict[str, Any]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
connection_string: str,
|
||||
embedding: Embeddings,
|
||||
collection_name: str = "default",
|
||||
database_name: str = "default",
|
||||
wait_until_ready: bool = False,
|
||||
**kwargs: Dict[str, Any],
|
||||
):
|
||||
"""
|
||||
Initialize Atlas VectorSearch Cache.
|
||||
Assumes collection exists before instantiation
|
||||
|
||||
Args:
|
||||
connection_string (str): MongoDB URI to connect to MongoDB Atlas cluster.
|
||||
embedding (Embeddings): Text embedding model to use.
|
||||
collection_name (str): MongoDB Collection to add the texts to.
|
||||
Defaults to "default".
|
||||
database_name (str): MongoDB Database where to store texts.
|
||||
Defaults to "default".
|
||||
wait_until_ready (bool): Block until MongoDB Atlas finishes indexing
|
||||
the stored text. Hard timeout of 10 seconds. Defaults to False.
|
||||
"""
|
||||
client = _generate_mongo_client(connection_string)
|
||||
self.collection = client[database_name][collection_name]
|
||||
self._wait_until_ready = wait_until_ready
|
||||
super().__init__(self.collection, embedding, **kwargs) # type: ignore
|
||||
self._local_cache = dict()
|
||||
|
||||
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
|
||||
"""Look up based on prompt and llm_string."""
|
||||
cache_key = self._generate_local_key(prompt, llm_string)
|
||||
if cache_key in self._local_cache:
|
||||
return self._local_cache[cache_key]
|
||||
search_response = self.similarity_search_with_score(
|
||||
prompt, 1, pre_filter={self.LLM: {"$eq": llm_string}}
|
||||
)
|
||||
if search_response:
|
||||
return_val = search_response[0][0].metadata.get(self.RETURN_VAL)
|
||||
response = _loads_generations(return_val) or return_val # type: ignore
|
||||
self._local_cache[cache_key] = response
|
||||
return response
|
||||
return None
|
||||
|
||||
def update(
|
||||
self,
|
||||
prompt: str,
|
||||
llm_string: str,
|
||||
return_val: RETURN_VAL_TYPE,
|
||||
wait_until_ready: Optional[bool] = None,
|
||||
) -> None:
|
||||
"""Update cache based on prompt and llm_string."""
|
||||
cache_key = self._generate_local_key(prompt, llm_string)
|
||||
self._local_cache[cache_key] = return_val
|
||||
|
||||
self.add_texts(
|
||||
[prompt],
|
||||
[
|
||||
{
|
||||
self.LLM: llm_string,
|
||||
self.RETURN_VAL: _dumps_generations(return_val),
|
||||
}
|
||||
],
|
||||
)
|
||||
wait = self._wait_until_ready if wait_until_ready is None else wait_until_ready
|
||||
|
||||
def is_indexed() -> bool:
|
||||
return self.lookup(prompt, llm_string) == return_val
|
||||
|
||||
if wait:
|
||||
_wait_until(is_indexed, return_val)
|
||||
|
||||
def _generate_local_key(self, prompt: str, llm_string: str) -> str:
|
||||
"""Create keyed fields for local caching layer"""
|
||||
return f"{prompt}#{llm_string}"
|
||||
|
||||
def clear(self, **kwargs: Any) -> None:
|
||||
"""Clear cache that can take additional keyword arguments.
|
||||
Any additional arguments will propagate as filtration criteria for
|
||||
what gets deleted. It will delete any locally cached content regardless
|
||||
|
||||
E.g.
|
||||
# Delete only entries that have llm_string as "fake-model"
|
||||
self.clear(llm_string="fake-model")
|
||||
"""
|
||||
self.collection.delete_many({**kwargs})
|
||||
self._local_cache.clear()
|
@ -0,0 +1,153 @@
|
||||
import os
|
||||
import uuid
|
||||
from typing import Any, List, Union
|
||||
|
||||
import pytest
|
||||
from langchain_core.caches import BaseCache
|
||||
from langchain_core.globals import get_llm_cache, set_llm_cache
|
||||
from langchain_core.load.dump import dumps
|
||||
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
|
||||
from langchain_core.outputs import ChatGeneration, Generation, LLMResult
|
||||
|
||||
from langchain_mongodb.cache import MongoDBAtlasSemanticCache, MongoDBCache
|
||||
from tests.utils import ConsistentFakeEmbeddings, FakeChatModel, FakeLLM
|
||||
|
||||
CONN_STRING = os.environ.get("MONGODB_ATLAS_URI")
|
||||
COLLECTION = "default"
|
||||
DATABASE = "default"
|
||||
|
||||
|
||||
def random_string() -> str:
|
||||
return str(uuid.uuid4())
|
||||
|
||||
|
||||
def llm_cache(cls: Any) -> BaseCache:
|
||||
set_llm_cache(
|
||||
cls(
|
||||
embedding=ConsistentFakeEmbeddings(dimensionality=1536),
|
||||
connection_string=CONN_STRING,
|
||||
collection_name=COLLECTION,
|
||||
database_name=DATABASE,
|
||||
wait_until_ready=True,
|
||||
)
|
||||
)
|
||||
assert get_llm_cache()
|
||||
return get_llm_cache()
|
||||
|
||||
|
||||
def _execute_test(
|
||||
prompt: Union[str, List[BaseMessage]],
|
||||
llm: Union[str, FakeLLM, FakeChatModel],
|
||||
response: List[Generation],
|
||||
) -> None:
|
||||
# Fabricate an LLM String
|
||||
|
||||
if not isinstance(llm, str):
|
||||
params = llm.dict()
|
||||
params["stop"] = None
|
||||
llm_string = str(sorted([(k, v) for k, v in params.items()]))
|
||||
else:
|
||||
llm_string = llm
|
||||
|
||||
# If the prompt is a str then we should pass just the string
|
||||
dumped_prompt: str = prompt if isinstance(prompt, str) else dumps(prompt)
|
||||
|
||||
# Update the cache
|
||||
get_llm_cache().update(dumped_prompt, llm_string, response)
|
||||
|
||||
# Retrieve the cached result through 'generate' call
|
||||
output: Union[List[Generation], LLMResult, None]
|
||||
expected_output: Union[List[Generation], LLMResult]
|
||||
|
||||
if isinstance(llm, str):
|
||||
output = get_llm_cache().lookup(dumped_prompt, llm) # type: ignore
|
||||
expected_output = response
|
||||
else:
|
||||
output = llm.generate([prompt]) # type: ignore
|
||||
expected_output = LLMResult(
|
||||
generations=[response],
|
||||
llm_output={},
|
||||
)
|
||||
|
||||
assert output == expected_output # type: ignore
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"prompt, llm, response",
|
||||
[
|
||||
("foo", "bar", [Generation(text="fizz")]),
|
||||
("foo", FakeLLM(), [Generation(text="fizz")]),
|
||||
(
|
||||
[HumanMessage(content="foo")],
|
||||
FakeChatModel(),
|
||||
[ChatGeneration(message=AIMessage(content="foo"))],
|
||||
),
|
||||
],
|
||||
ids=[
|
||||
"plain_cache",
|
||||
"cache_with_llm",
|
||||
"cache_with_chat",
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize("cacher", [MongoDBCache, MongoDBAtlasSemanticCache])
|
||||
def test_mongodb_cache(
|
||||
cacher: Union[MongoDBCache, MongoDBAtlasSemanticCache],
|
||||
prompt: Union[str, List[BaseMessage]],
|
||||
llm: Union[str, FakeLLM, FakeChatModel],
|
||||
response: List[Generation],
|
||||
) -> None:
|
||||
llm_cache(cacher)
|
||||
try:
|
||||
_execute_test(prompt, llm, response)
|
||||
finally:
|
||||
get_llm_cache().clear()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"prompts, generations",
|
||||
[
|
||||
# Single prompt, single generation
|
||||
([random_string()], [[random_string()]]),
|
||||
# Single prompt, multiple generations
|
||||
([random_string()], [[random_string(), random_string()]]),
|
||||
# Single prompt, multiple generations
|
||||
([random_string()], [[random_string(), random_string(), random_string()]]),
|
||||
# Multiple prompts, multiple generations
|
||||
(
|
||||
[random_string(), random_string()],
|
||||
[[random_string()], [random_string(), random_string()]],
|
||||
),
|
||||
],
|
||||
ids=[
|
||||
"single_prompt_single_generation",
|
||||
"single_prompt_two_generations",
|
||||
"single_prompt_three_generations",
|
||||
"multiple_prompts_multiple_generations",
|
||||
],
|
||||
)
|
||||
def test_mongodb_atlas_cache_matrix(
|
||||
prompts: List[str],
|
||||
generations: List[List[str]],
|
||||
) -> None:
|
||||
llm_cache(MongoDBAtlasSemanticCache)
|
||||
llm = FakeLLM()
|
||||
|
||||
# Fabricate an LLM String
|
||||
params = llm.dict()
|
||||
params["stop"] = None
|
||||
llm_string = str(sorted([(k, v) for k, v in params.items()]))
|
||||
|
||||
llm_generations = [
|
||||
[
|
||||
Generation(text=generation, generation_info=params)
|
||||
for generation in prompt_i_generations
|
||||
]
|
||||
for prompt_i_generations in generations
|
||||
]
|
||||
|
||||
for prompt_i, llm_generations_i in zip(prompts, llm_generations):
|
||||
_execute_test(prompt_i, llm_string, llm_generations_i)
|
||||
assert llm.generate(prompts) == LLMResult(
|
||||
generations=llm_generations, llm_output={}
|
||||
)
|
||||
get_llm_cache().clear()
|
@ -0,0 +1,211 @@
|
||||
import uuid
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
import pytest
|
||||
from langchain_core.caches import BaseCache
|
||||
from langchain_core.embeddings import Embeddings
|
||||
from langchain_core.globals import get_llm_cache, set_llm_cache
|
||||
from langchain_core.load.dump import dumps
|
||||
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
|
||||
from langchain_core.outputs import ChatGeneration, Generation, LLMResult
|
||||
from pymongo.collection import Collection
|
||||
|
||||
from langchain_mongodb.cache import MongoDBAtlasSemanticCache, MongoDBCache
|
||||
from langchain_mongodb.vectorstores import MongoDBAtlasVectorSearch
|
||||
from tests.utils import ConsistentFakeEmbeddings, FakeChatModel, FakeLLM, MockCollection
|
||||
|
||||
CONN_STRING = "MockString"
|
||||
COLLECTION = "default"
|
||||
DATABASE = "default"
|
||||
|
||||
|
||||
class PatchedMongoDBCache(MongoDBCache):
|
||||
def __init__(
|
||||
self,
|
||||
connection_string: str,
|
||||
collection_name: str = "default",
|
||||
database_name: str = "default",
|
||||
**kwargs: Dict[str, Any],
|
||||
) -> None:
|
||||
self.__database_name = database_name
|
||||
self.__collection_name = collection_name
|
||||
self.client = {self.__database_name: {self.__collection_name: MockCollection()}} # type: ignore
|
||||
self._local_cache = {}
|
||||
|
||||
@property
|
||||
def database(self) -> Any: # type: ignore
|
||||
"""Returns the database used to store cache values."""
|
||||
return self.client[self.__database_name]
|
||||
|
||||
@property
|
||||
def collection(self) -> Collection:
|
||||
"""Returns the collection used to store cache values."""
|
||||
return self.database[self.__collection_name]
|
||||
|
||||
|
||||
class PatchedMongoDBAtlasSemanticCache(MongoDBAtlasSemanticCache):
|
||||
def __init__(
|
||||
self,
|
||||
connection_string: str,
|
||||
embedding: Embeddings,
|
||||
collection_name: str = "default",
|
||||
database_name: str = "default",
|
||||
wait_until_ready: bool = False,
|
||||
**kwargs: Dict[str, Any],
|
||||
):
|
||||
self.collection = MockCollection()
|
||||
self._wait_until_ready = False
|
||||
self._local_cache = dict()
|
||||
MongoDBAtlasVectorSearch.__init__(
|
||||
self,
|
||||
self.collection,
|
||||
embedding=embedding,
|
||||
**kwargs, # type: ignore
|
||||
)
|
||||
|
||||
|
||||
def random_string() -> str:
|
||||
return str(uuid.uuid4())
|
||||
|
||||
|
||||
def llm_cache(cls: Any) -> BaseCache:
|
||||
set_llm_cache(
|
||||
cls(
|
||||
embedding=ConsistentFakeEmbeddings(dimensionality=1536),
|
||||
connection_string=CONN_STRING,
|
||||
collection_name=COLLECTION,
|
||||
database_name=DATABASE,
|
||||
wait_until_ready=True,
|
||||
)
|
||||
)
|
||||
assert get_llm_cache()
|
||||
return get_llm_cache()
|
||||
|
||||
|
||||
def _execute_test(
|
||||
prompt: Union[str, List[BaseMessage]],
|
||||
llm: Union[str, FakeLLM, FakeChatModel],
|
||||
response: List[Generation],
|
||||
) -> None:
|
||||
# Fabricate an LLM String
|
||||
|
||||
if not isinstance(llm, str):
|
||||
params = llm.dict()
|
||||
params["stop"] = None
|
||||
llm_string = str(sorted([(k, v) for k, v in params.items()]))
|
||||
else:
|
||||
llm_string = llm
|
||||
|
||||
# If the prompt is a str then we should pass just the string
|
||||
dumped_prompt: str = prompt if isinstance(prompt, str) else dumps(prompt)
|
||||
|
||||
# Update the cache
|
||||
llm_cache = get_llm_cache()
|
||||
llm_cache.update(dumped_prompt, llm_string, response)
|
||||
|
||||
# Retrieve the cached result through 'generate' call
|
||||
output: Union[List[Generation], LLMResult, None]
|
||||
expected_output: Union[List[Generation], LLMResult]
|
||||
if isinstance(llm_cache, PatchedMongoDBAtlasSemanticCache):
|
||||
llm_cache._collection._aggregate_result = [ # type: ignore
|
||||
data
|
||||
for data in llm_cache._collection._data # type: ignore
|
||||
if data.get("text") == dumped_prompt
|
||||
and data.get("llm_string") == llm_string
|
||||
] # type: ignore
|
||||
if isinstance(llm, str):
|
||||
output = get_llm_cache().lookup(dumped_prompt, llm) # type: ignore
|
||||
expected_output = response
|
||||
else:
|
||||
output = llm.generate([prompt]) # type: ignore
|
||||
expected_output = LLMResult(
|
||||
generations=[response],
|
||||
llm_output={},
|
||||
)
|
||||
|
||||
assert output == expected_output # type: ignore
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"prompt, llm, response",
|
||||
[
|
||||
("foo", "bar", [Generation(text="fizz")]),
|
||||
("foo", FakeLLM(), [Generation(text="fizz")]),
|
||||
(
|
||||
[HumanMessage(content="foo")],
|
||||
FakeChatModel(),
|
||||
[ChatGeneration(message=AIMessage(content="foo"))],
|
||||
),
|
||||
],
|
||||
ids=[
|
||||
"plain_cache",
|
||||
"cache_with_llm",
|
||||
"cache_with_chat",
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"cacher", [PatchedMongoDBCache, PatchedMongoDBAtlasSemanticCache]
|
||||
)
|
||||
def test_mongodb_cache(
|
||||
cacher: Union[MongoDBCache, MongoDBAtlasSemanticCache],
|
||||
prompt: Union[str, List[BaseMessage]],
|
||||
llm: Union[str, FakeLLM, FakeChatModel],
|
||||
response: List[Generation],
|
||||
) -> None:
|
||||
llm_cache(cacher)
|
||||
try:
|
||||
_execute_test(prompt, llm, response)
|
||||
finally:
|
||||
get_llm_cache().clear()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"prompts, generations",
|
||||
[
|
||||
# Single prompt, single generation
|
||||
([random_string()], [[random_string()]]),
|
||||
# Single prompt, multiple generations
|
||||
([random_string()], [[random_string(), random_string()]]),
|
||||
# Single prompt, multiple generations
|
||||
([random_string()], [[random_string(), random_string(), random_string()]]),
|
||||
# Multiple prompts, multiple generations
|
||||
(
|
||||
[random_string(), random_string()],
|
||||
[[random_string()], [random_string(), random_string()]],
|
||||
),
|
||||
],
|
||||
ids=[
|
||||
"single_prompt_single_generation",
|
||||
"single_prompt_two_generations",
|
||||
"single_prompt_three_generations",
|
||||
"multiple_prompts_multiple_generations",
|
||||
],
|
||||
)
|
||||
def test_mongodb_atlas_cache_matrix(
|
||||
prompts: List[str],
|
||||
generations: List[List[str]],
|
||||
) -> None:
|
||||
llm_cache(PatchedMongoDBAtlasSemanticCache)
|
||||
llm = FakeLLM()
|
||||
|
||||
# Fabricate an LLM String
|
||||
params = llm.dict()
|
||||
params["stop"] = None
|
||||
llm_string = str(sorted([(k, v) for k, v in params.items()]))
|
||||
|
||||
llm_generations = [
|
||||
[
|
||||
Generation(text=generation, generation_info=params)
|
||||
for generation in prompt_i_generations
|
||||
]
|
||||
for prompt_i_generations in generations
|
||||
]
|
||||
|
||||
for prompt_i, llm_generations_i in zip(prompts, llm_generations):
|
||||
_execute_test(prompt_i, llm_string, llm_generations_i)
|
||||
|
||||
get_llm_cache()._collection._simluate_cache_aggregation_query = True # type: ignore
|
||||
assert llm.generate(prompts) == LLMResult(
|
||||
generations=llm_generations, llm_output={}
|
||||
)
|
||||
get_llm_cache().clear()
|
Loading…
Reference in New Issue