From 0c95f3a981bf38275d96dfad5c16eb06aaa85d15 Mon Sep 17 00:00:00 2001 From: Erick Friis Date: Tue, 9 Jan 2024 16:27:20 -0800 Subject: [PATCH] mistralai[patch]: warn on stop token, fix on_llm_new_token (#15787) Fixes #15269 Addresses with warning. MistralAI API doesn't support stop token yet. --------- Co-authored-by: Niels Garve --- .../mistralai/langchain_mistralai/chat_models.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/libs/partners/mistralai/langchain_mistralai/chat_models.py b/libs/partners/mistralai/langchain_mistralai/chat_models.py index bf5ca6b867..dda70525d8 100644 --- a/libs/partners/mistralai/langchain_mistralai/chat_models.py +++ b/libs/partners/mistralai/langchain_mistralai/chat_models.py @@ -289,10 +289,12 @@ class ChatMistralAI(BaseChatModel): self, messages: List[BaseMessage], stop: Optional[List[str]] ) -> Tuple[List[MistralChatMessage], Dict[str, Any]]: params = self._client_params - if stop is not None: + if stop is not None or "stop" in params: if "stop" in params: - raise ValueError("`stop` found in both the input and default params.") - params["stop"] = stop + params.pop("stop") + logger.warning( + "Parameter `stop` not yet supported (https://docs.mistral.ai/api)" + ) message_dicts = [_convert_message_to_mistral_chat_message(m) for m in messages] return message_dicts, params @@ -319,7 +321,7 @@ class ChatMistralAI(BaseChatModel): default_chunk_class = chunk.__class__ yield ChatGenerationChunk(message=chunk) if run_manager: - run_manager.on_llm_new_token(chunk.content) + run_manager.on_llm_new_token(token=chunk.content, chunk=chunk) async def _astream( self, @@ -344,7 +346,7 @@ class ChatMistralAI(BaseChatModel): default_chunk_class = chunk.__class__ yield ChatGenerationChunk(message=chunk) if run_manager: - await run_manager.on_llm_new_token(chunk.content) + await run_manager.on_llm_new_token(token=chunk.content, chunk=chunk) async def _agenerate( self,