forked from Archives/langchain
fix: llm caching for replicate (#6396)
Caching wasn't accounting for which model was used so a result for the first executed model would return for the same prompt on a different model. This was because `Replicate._identifying_params` did not include the `model` parameter. FYI - @cbh123 - @hwchase17 - @agola11
This commit is contained in:
parent
8a604b93ab
commit
384fa43fc3
@ -72,6 +72,7 @@ class Replicate(LLM):
|
|||||||
def _identifying_params(self) -> Mapping[str, Any]:
|
def _identifying_params(self) -> Mapping[str, Any]:
|
||||||
"""Get the identifying parameters."""
|
"""Get the identifying parameters."""
|
||||||
return {
|
return {
|
||||||
|
"model": self.model,
|
||||||
**{"model_kwargs": self.model_kwargs},
|
**{"model_kwargs": self.model_kwargs},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user