mirror of
https://github.com/hwchase17/langchain
synced 2024-11-10 01:10:59 +00:00
core[patch]: Fix logic in BaseChatModel that processes the llm string that is used as a key for caching chat models responses (#23842)
This PR should fix the following issue: https://github.com/langchain-ai/langchain/issues/23824 Introduced as part of this PR: https://github.com/langchain-ai/langchain/pull/23416 I am unable to reproduce the issue locally though it's clear that we're getting a `serialized` object which is not a dictionary somehow. The test below passes for me prior to the PR as well ```python def test_cache_with_sqllite() -> None: from langchain_community.cache import SQLiteCache from langchain_core.globals import set_llm_cache cache = SQLiteCache(database_path=".langchain.db") set_llm_cache(cache) chat_model = FakeListChatModel(responses=["hello", "goodbye"], cache=True) assert chat_model.invoke("How are you?").content == "hello" assert chat_model.invoke("How are you?").content == "hello" ```
This commit is contained in:
parent
9bb623381b
commit
9ccc4b1616
@ -1258,7 +1258,12 @@ def _cleanup_llm_representation(serialized: Any, depth: int) -> None:
|
||||
"""Remove non-serializable objects from a serialized object."""
|
||||
if depth > 100: # Don't cooperate for pathological cases
|
||||
return
|
||||
if serialized["type"] == "not_implemented" and "repr" in serialized:
|
||||
|
||||
if not isinstance(serialized, dict):
|
||||
return
|
||||
|
||||
if "type" in serialized and serialized["type"] == "not_implemented":
|
||||
if "repr" in serialized:
|
||||
del serialized["repr"]
|
||||
|
||||
if "graph" in serialized:
|
||||
|
Loading…
Reference in New Issue
Block a user