mirror of
https://github.com/hwchase17/langchain
synced 2024-11-08 07:10:35 +00:00
Harrison/fix and test caching (#538)
This commit is contained in:
parent
73f7ebd9d1
commit
1631981f84
@ -96,7 +96,7 @@ class BaseLLM(BaseModel, ABC):
|
|||||||
new_results = self._generate(missing_prompts, stop=stop)
|
new_results = self._generate(missing_prompts, stop=stop)
|
||||||
self.callback_manager.on_llm_end(new_results)
|
self.callback_manager.on_llm_end(new_results)
|
||||||
for i, result in enumerate(new_results.generations):
|
for i, result in enumerate(new_results.generations):
|
||||||
existing_prompts[i] = result
|
existing_prompts[missing_prompt_idxs[i]] = result
|
||||||
prompt = prompts[i]
|
prompt = prompts[i]
|
||||||
langchain.llm_cache.update(prompt, llm_string, result)
|
langchain.llm_cache.update(prompt, llm_string, result)
|
||||||
generations = [existing_prompts[i] for i in range(len(prompts))]
|
generations = [existing_prompts[i] for i in range(len(prompts))]
|
||||||
|
27
tests/unit_tests/llms/test_base.py
Normal file
27
tests/unit_tests/llms/test_base.py
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
"""Test base LLM functionality."""
|
||||||
|
import langchain
|
||||||
|
from langchain.cache import InMemoryCache
|
||||||
|
from langchain.schema import Generation, LLMResult
|
||||||
|
from tests.unit_tests.llms.fake_llm import FakeLLM
|
||||||
|
|
||||||
|
|
||||||
|
def test_caching() -> None:
|
||||||
|
"""Test caching behavior."""
|
||||||
|
langchain.llm_cache = InMemoryCache()
|
||||||
|
llm = FakeLLM()
|
||||||
|
params = llm._llm_dict()
|
||||||
|
params["stop"] = None
|
||||||
|
llm_string = str(sorted([(k, v) for k, v in params.items()]))
|
||||||
|
langchain.llm_cache.update("foo", llm_string, [Generation(text="fizz")])
|
||||||
|
output = llm.generate(["foo", "bar", "foo"])
|
||||||
|
langchain.llm_cache = None
|
||||||
|
expected_generations = [
|
||||||
|
[Generation(text="fizz")],
|
||||||
|
[Generation(text="foo")],
|
||||||
|
[Generation(text="fizz")],
|
||||||
|
]
|
||||||
|
expected_output = LLMResult(
|
||||||
|
expected_generations,
|
||||||
|
llm_output=None,
|
||||||
|
)
|
||||||
|
assert output == expected_output
|
Loading…
Reference in New Issue
Block a user