mirror of
https://github.com/hwchase17/langchain
synced 2024-11-06 03:20:49 +00:00
acd86d33bc
Provide shared memory capability for the Agent. Inspired by #1293 . ## Problem If both Agent and Tools (i.e., LLMChain) use the same memory, both of them will save the context. It can be annoying in some cases. ## Solution Create a memory wrapper that ignores the save and clear, thereby preventing updates from Agent or Tools.
38 lines
1.1 KiB
Python
38 lines
1.1 KiB
Python
import pytest
|
|
|
|
from langchain.chains.conversation.memory import (
|
|
ConversationBufferMemory,
|
|
ConversationBufferWindowMemory,
|
|
ConversationSummaryMemory,
|
|
)
|
|
from langchain.memory import ReadOnlySharedMemory, SimpleMemory
|
|
from langchain.schema import BaseMemory
|
|
from tests.unit_tests.llms.fake_llm import FakeLLM
|
|
|
|
|
|
def test_simple_memory() -> None:
|
|
"""Test SimpleMemory."""
|
|
memory = SimpleMemory(memories={"baz": "foo"})
|
|
|
|
output = memory.load_memory_variables({})
|
|
|
|
assert output == {"baz": "foo"}
|
|
assert ["baz"] == memory.memory_variables
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"memory",
|
|
[
|
|
ConversationBufferMemory(memory_key="baz"),
|
|
ConversationSummaryMemory(llm=FakeLLM(), memory_key="baz"),
|
|
ConversationBufferWindowMemory(memory_key="baz"),
|
|
],
|
|
)
|
|
def test_readonly_memory(memory: BaseMemory) -> None:
|
|
read_only_memory = ReadOnlySharedMemory(memory=memory)
|
|
memory.save_context({"input": "bar"}, {"output": "foo"})
|
|
|
|
assert read_only_memory.load_memory_variables({}) == memory.load_memory_variables(
|
|
{}
|
|
)
|