@ -25,7 +25,6 @@ import hashlib
import inspect
import inspect
import json
import json
import logging
import logging
import warnings
from datetime import timedelta
from datetime import timedelta
from functools import lru_cache
from functools import lru_cache
from typing import (
from typing import (
@ -54,7 +53,7 @@ except ImportError:
from langchain . llms . base import LLM , get_prompts
from langchain . llms . base import LLM , get_prompts
from langchain . load . dump import dumps
from langchain . load . dump import dumps
from langchain . load . load import loads
from langchain . load . load import loads
from langchain . schema import ChatGeneration, Generation
from langchain . schema import Generation
from langchain . schema . cache import RETURN_VAL_TYPE , BaseCache
from langchain . schema . cache import RETURN_VAL_TYPE , BaseCache
from langchain . schema . embeddings import Embeddings
from langchain . schema . embeddings import Embeddings
from langchain . utils import get_from_env
from langchain . utils import get_from_env
@ -306,6 +305,17 @@ class RedisCache(BaseCache):
results = self . redis . hgetall ( self . _key ( prompt , llm_string ) )
results = self . redis . hgetall ( self . _key ( prompt , llm_string ) )
if results :
if results :
for _ , text in results . items ( ) :
for _ , text in results . items ( ) :
try :
generations . append ( loads ( text ) )
except Exception :
logger . warning (
" Retrieving a cache value that could not be deserialized "
" properly. This is likely due to the cache being in an "
" older format. Please recreate your cache to avoid this "
" error. "
)
# In a previous life we stored the raw text directly
# in the table, so assume it's in that format.
generations . append ( Generation ( text = text ) )
generations . append ( Generation ( text = text ) )
return generations if generations else None
return generations if generations else None
@ -317,12 +327,6 @@ class RedisCache(BaseCache):
" RedisCache only supports caching of normal LLM generations, "
" RedisCache only supports caching of normal LLM generations, "
f " got { type ( gen ) } "
f " got { type ( gen ) } "
)
)
if isinstance ( gen , ChatGeneration ) :
warnings . warn (
" NOTE: Generation has not been cached. RedisCache does not "
" support caching ChatModel outputs. "
)
return
# Write to a Redis HASH
# Write to a Redis HASH
key = self . _key ( prompt , llm_string )
key = self . _key ( prompt , llm_string )
@ -330,7 +334,7 @@ class RedisCache(BaseCache):
pipe . hset (
pipe . hset (
key ,
key ,
mapping = {
mapping = {
str ( idx ) : generation. text
str ( idx ) : dumps( generation )
for idx , generation in enumerate ( return_val )
for idx , generation in enumerate ( return_val )
} ,
} ,
)
)
@ -441,6 +445,17 @@ class RedisSemanticCache(BaseCache):
)
)
if results :
if results :
for document in results :
for document in results :
try :
generations . extend ( loads ( document . metadata [ " return_val " ] ) )
except Exception :
logger . warning (
" Retrieving a cache value that could not be deserialized "
" properly. This is likely due to the cache being in an "
" older format. Please recreate your cache to avoid this "
" error. "
)
# In a previous life we stored the raw text directly
# in the table, so assume it's in that format.
generations . extend (
generations . extend (
_load_generations_from_json ( document . metadata [ " return_val " ] )
_load_generations_from_json ( document . metadata [ " return_val " ] )
)
)
@ -454,18 +469,12 @@ class RedisSemanticCache(BaseCache):
" RedisSemanticCache only supports caching of "
" RedisSemanticCache only supports caching of "
f " normal LLM generations, got { type ( gen ) } "
f " normal LLM generations, got { type ( gen ) } "
)
)
if isinstance ( gen , ChatGeneration ) :
warnings . warn (
" NOTE: Generation has not been cached. RedisSentimentCache does not "
" support caching ChatModel outputs. "
)
return
llm_cache = self . _get_llm_cache ( llm_string )
llm_cache = self . _get_llm_cache ( llm_string )
_dump_generations_to_json ( [ g for g in return_val ] )
metadata = {
metadata = {
" llm_string " : llm_string ,
" llm_string " : llm_string ,
" prompt " : prompt ,
" prompt " : prompt ,
" return_val " : _ dump_generation s_to_json ( [ g for g in return_val ] ) ,
" return_val " : dumps ( [ g for g in return_val ] ) ,
}
}
llm_cache . add_texts ( texts = [ prompt ] , metadatas = [ metadata ] )
llm_cache . add_texts ( texts = [ prompt ] , metadatas = [ metadata ] )