From 9ccc4b1616591bd4f92ead6f15689d5d36534c21 Mon Sep 17 00:00:00 2001 From: Eugene Yurtsev Date: Wed, 3 Jul 2024 16:23:55 -0400 Subject: [PATCH] core[patch]: Fix logic in BaseChatModel that processes the llm string that is used as a key for caching chat models responses (#23842) This PR should fix the following issue: https://github.com/langchain-ai/langchain/issues/23824 Introduced as part of this PR: https://github.com/langchain-ai/langchain/pull/23416 I am unable to reproduce the issue locally though it's clear that we're getting a `serialized` object which is not a dictionary somehow. The test below passes for me prior to the PR as well ```python def test_cache_with_sqllite() -> None: from langchain_community.cache import SQLiteCache from langchain_core.globals import set_llm_cache cache = SQLiteCache(database_path=".langchain.db") set_llm_cache(cache) chat_model = FakeListChatModel(responses=["hello", "goodbye"], cache=True) assert chat_model.invoke("How are you?").content == "hello" assert chat_model.invoke("How are you?").content == "hello" ``` --- libs/core/langchain_core/language_models/chat_models.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/libs/core/langchain_core/language_models/chat_models.py b/libs/core/langchain_core/language_models/chat_models.py index 13c68dded9..45492d61d9 100644 --- a/libs/core/langchain_core/language_models/chat_models.py +++ b/libs/core/langchain_core/language_models/chat_models.py @@ -1258,8 +1258,13 @@ def _cleanup_llm_representation(serialized: Any, depth: int) -> None: """Remove non-serializable objects from a serialized object.""" if depth > 100: # Don't cooperate for pathological cases return - if serialized["type"] == "not_implemented" and "repr" in serialized: - del serialized["repr"] + + if not isinstance(serialized, dict): + return + + if "type" in serialized and serialized["type"] == "not_implemented": + if "repr" in serialized: + del serialized["repr"] if "graph" in serialized: del serialized["graph"]