From 384fa43fc305922638d22d88463c9b8b50283f40 Mon Sep 17 00:00:00 2001 From: Bryce Drennan Date: Mon, 19 Jun 2023 22:47:59 -0700 Subject: [PATCH] fix: llm caching for replicate (#6396) Caching wasn't accounting for which model was used so a result for the first executed model would return for the same prompt on a different model. This was because `Replicate._identifying_params` did not include the `model` parameter. FYI - @cbh123 - @hwchase17 - @agola11 --- langchain/llms/replicate.py | 1 + 1 file changed, 1 insertion(+) diff --git a/langchain/llms/replicate.py b/langchain/llms/replicate.py index f4660f6b..5e932ca5 100644 --- a/langchain/llms/replicate.py +++ b/langchain/llms/replicate.py @@ -72,6 +72,7 @@ class Replicate(LLM): def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { + "model": self.model, **{"model_kwargs": self.model_kwargs}, }