core[patch]: Fix logic in BaseChatModel that processes the llm string that is used as a key for caching chat models responses (#23842)

This PR should fix the following issue:
https://github.com/langchain-ai/langchain/issues/23824
Introduced as part of this PR:
https://github.com/langchain-ai/langchain/pull/23416

I am unable to reproduce the issue locally though it's clear that we're
getting a `serialized` object which is not a dictionary somehow.

The test below passes for me prior to the PR as well

```python

def test_cache_with_sqllite() -> None:
    from langchain_community.cache import SQLiteCache

    from langchain_core.globals import set_llm_cache

    cache = SQLiteCache(database_path=".langchain.db")
    set_llm_cache(cache)
    chat_model = FakeListChatModel(responses=["hello", "goodbye"], cache=True)
    assert chat_model.invoke("How are you?").content == "hello"
    assert chat_model.invoke("How are you?").content == "hello"
```
This commit is contained in:
Eugene Yurtsev 2024-07-03 16:23:55 -04:00 committed by GitHub
parent 9bb623381b
commit 9ccc4b1616
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -1258,8 +1258,13 @@ def _cleanup_llm_representation(serialized: Any, depth: int) -> None:
"""Remove non-serializable objects from a serialized object.""" """Remove non-serializable objects from a serialized object."""
if depth > 100: # Don't cooperate for pathological cases if depth > 100: # Don't cooperate for pathological cases
return return
if serialized["type"] == "not_implemented" and "repr" in serialized:
del serialized["repr"] if not isinstance(serialized, dict):
return
if "type" in serialized and serialized["type"] == "not_implemented":
if "repr" in serialized:
del serialized["repr"]
if "graph" in serialized: if "graph" in serialized:
del serialized["graph"] del serialized["graph"]