diff --git a/libs/community/langchain_community/adapters/openai.py b/libs/community/langchain_community/adapters/openai.py index e6ff136f7a..4f902a6e18 100644 --- a/libs/community/langchain_community/adapters/openai.py +++ b/libs/community/langchain_community/adapters/openai.py @@ -95,18 +95,18 @@ def convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: elif role == "system": return SystemMessage(content=_dict.get("content", "")) elif role == "function": - return FunctionMessage(content=_dict.get("content", ""), name=_dict.get("name")) + return FunctionMessage(content=_dict.get("content", ""), name=_dict.get("name")) # type: ignore[arg-type] elif role == "tool": additional_kwargs = {} if "name" in _dict: additional_kwargs["name"] = _dict["name"] return ToolMessage( content=_dict.get("content", ""), - tool_call_id=_dict.get("tool_call_id"), + tool_call_id=_dict.get("tool_call_id"), # type: ignore[arg-type] additional_kwargs=additional_kwargs, ) else: - return ChatMessage(content=_dict.get("content", ""), role=role) + return ChatMessage(content=_dict.get("content", ""), role=role) # type: ignore[arg-type] def convert_message_to_dict(message: BaseMessage) -> dict: diff --git a/libs/community/langchain_community/agent_toolkits/azure_ai_services.py b/libs/community/langchain_community/agent_toolkits/azure_ai_services.py index a83bd73a03..282a3aa9bc 100644 --- a/libs/community/langchain_community/agent_toolkits/azure_ai_services.py +++ b/libs/community/langchain_community/agent_toolkits/azure_ai_services.py @@ -21,11 +21,11 @@ class AzureAiServicesToolkit(BaseToolkit): """Get the tools in the toolkit.""" tools: List[BaseTool] = [ - AzureAiServicesDocumentIntelligenceTool(), - AzureAiServicesImageAnalysisTool(), - AzureAiServicesSpeechToTextTool(), - AzureAiServicesTextToSpeechTool(), - AzureAiServicesTextAnalyticsForHealthTool(), + AzureAiServicesDocumentIntelligenceTool(), # type: ignore[call-arg] + AzureAiServicesImageAnalysisTool(), # type: ignore[call-arg] + AzureAiServicesSpeechToTextTool(), # type: ignore[call-arg] + AzureAiServicesTextToSpeechTool(), # type: ignore[call-arg] + AzureAiServicesTextAnalyticsForHealthTool(), # type: ignore[call-arg] ] return tools diff --git a/libs/community/langchain_community/agent_toolkits/azure_cognitive_services.py b/libs/community/langchain_community/agent_toolkits/azure_cognitive_services.py index 741648a138..d365486d39 100644 --- a/libs/community/langchain_community/agent_toolkits/azure_cognitive_services.py +++ b/libs/community/langchain_community/agent_toolkits/azure_cognitive_services.py @@ -21,13 +21,13 @@ class AzureCognitiveServicesToolkit(BaseToolkit): """Get the tools in the toolkit.""" tools: List[BaseTool] = [ - AzureCogsFormRecognizerTool(), - AzureCogsSpeech2TextTool(), - AzureCogsText2SpeechTool(), - AzureCogsTextAnalyticsHealthTool(), + AzureCogsFormRecognizerTool(), # type: ignore[call-arg] + AzureCogsSpeech2TextTool(), # type: ignore[call-arg] + AzureCogsText2SpeechTool(), # type: ignore[call-arg] + AzureCogsTextAnalyticsHealthTool(), # type: ignore[call-arg] ] # TODO: Remove check once azure-ai-vision supports MacOS. if sys.platform.startswith("linux") or sys.platform.startswith("win"): - tools.append(AzureCogsImageAnalysisTool()) + tools.append(AzureCogsImageAnalysisTool()) # type: ignore[call-arg] return tools diff --git a/libs/community/langchain_community/agent_toolkits/clickup/toolkit.py b/libs/community/langchain_community/agent_toolkits/clickup/toolkit.py index c8d45e2711..9e0188da93 100644 --- a/libs/community/langchain_community/agent_toolkits/clickup/toolkit.py +++ b/libs/community/langchain_community/agent_toolkits/clickup/toolkit.py @@ -102,7 +102,7 @@ class ClickupToolkit(BaseToolkit): ) for action in operations ] - return cls(tools=tools) + return cls(tools=tools) # type: ignore[arg-type] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" diff --git a/libs/community/langchain_community/agent_toolkits/connery/toolkit.py b/libs/community/langchain_community/agent_toolkits/connery/toolkit.py index c3fad66932..b48b16ac93 100644 --- a/libs/community/langchain_community/agent_toolkits/connery/toolkit.py +++ b/libs/community/langchain_community/agent_toolkits/connery/toolkit.py @@ -45,6 +45,6 @@ class ConneryToolkit(BaseToolkit): ConneryToolkit: The Connery Toolkit. """ - instance = cls(tools=connery_service.list_actions()) + instance = cls(tools=connery_service.list_actions()) # type: ignore[arg-type] return instance diff --git a/libs/community/langchain_community/agent_toolkits/file_management/toolkit.py b/libs/community/langchain_community/agent_toolkits/file_management/toolkit.py index 8e9122aa9a..28384c0090 100644 --- a/libs/community/langchain_community/agent_toolkits/file_management/toolkit.py +++ b/libs/community/langchain_community/agent_toolkits/file_management/toolkit.py @@ -73,7 +73,7 @@ class FileManagementToolkit(BaseToolkit): tools: List[BaseTool] = [] for tool in allowed_tools: tool_cls = _FILE_TOOLS_MAP[tool] - tools.append(tool_cls(root_dir=self.root_dir)) + tools.append(tool_cls(root_dir=self.root_dir)) # type: ignore[call-arg] return tools diff --git a/libs/community/langchain_community/agent_toolkits/github/toolkit.py b/libs/community/langchain_community/agent_toolkits/github/toolkit.py index f0675cc2f5..81ee2a7eec 100644 --- a/libs/community/langchain_community/agent_toolkits/github/toolkit.py +++ b/libs/community/langchain_community/agent_toolkits/github/toolkit.py @@ -308,7 +308,7 @@ class GitHubToolkit(BaseToolkit): ) for action in operations ] - return cls(tools=tools) + return cls(tools=tools) # type: ignore[arg-type] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" diff --git a/libs/community/langchain_community/agent_toolkits/gitlab/toolkit.py b/libs/community/langchain_community/agent_toolkits/gitlab/toolkit.py index 6d02d6db75..739ea5f70d 100644 --- a/libs/community/langchain_community/agent_toolkits/gitlab/toolkit.py +++ b/libs/community/langchain_community/agent_toolkits/gitlab/toolkit.py @@ -88,7 +88,7 @@ class GitLabToolkit(BaseToolkit): ) for action in operations ] - return cls(tools=tools) + return cls(tools=tools) # type: ignore[arg-type] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" diff --git a/libs/community/langchain_community/agent_toolkits/jira/toolkit.py b/libs/community/langchain_community/agent_toolkits/jira/toolkit.py index ddc7f50f6e..1ef1acfa68 100644 --- a/libs/community/langchain_community/agent_toolkits/jira/toolkit.py +++ b/libs/community/langchain_community/agent_toolkits/jira/toolkit.py @@ -64,7 +64,7 @@ class JiraToolkit(BaseToolkit): ) for action in operations ] - return cls(tools=tools) + return cls(tools=tools) # type: ignore[arg-type] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" diff --git a/libs/community/langchain_community/agent_toolkits/nasa/toolkit.py b/libs/community/langchain_community/agent_toolkits/nasa/toolkit.py index 2704f7af4c..5a18a22e32 100644 --- a/libs/community/langchain_community/agent_toolkits/nasa/toolkit.py +++ b/libs/community/langchain_community/agent_toolkits/nasa/toolkit.py @@ -51,7 +51,7 @@ class NasaToolkit(BaseToolkit): ) for action in operations ] - return cls(tools=tools) + return cls(tools=tools) # type: ignore[arg-type] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" diff --git a/libs/community/langchain_community/agent_toolkits/openapi/planner.py b/libs/community/langchain_community/agent_toolkits/openapi/planner.py index 281c8a49be..cd4a83ff92 100644 --- a/libs/community/langchain_community/agent_toolkits/openapi/planner.py +++ b/libs/community/langchain_community/agent_toolkits/openapi/planner.py @@ -262,12 +262,12 @@ def _create_api_controller_agent( get_llm_chain = LLMChain(llm=llm, prompt=PARSING_GET_PROMPT) post_llm_chain = LLMChain(llm=llm, prompt=PARSING_POST_PROMPT) tools: List[BaseTool] = [ - RequestsGetToolWithParsing( + RequestsGetToolWithParsing( # type: ignore[call-arg] requests_wrapper=requests_wrapper, llm_chain=get_llm_chain, allow_dangerous_requests=allow_dangerous_requests, ), - RequestsPostToolWithParsing( + RequestsPostToolWithParsing( # type: ignore[call-arg] requests_wrapper=requests_wrapper, llm_chain=post_llm_chain, allow_dangerous_requests=allow_dangerous_requests, diff --git a/libs/community/langchain_community/agent_toolkits/powerbi/toolkit.py b/libs/community/langchain_community/agent_toolkits/powerbi/toolkit.py index cf1d8397fd..07f680714d 100644 --- a/libs/community/langchain_community/agent_toolkits/powerbi/toolkit.py +++ b/libs/community/langchain_community/agent_toolkits/powerbi/toolkit.py @@ -66,7 +66,7 @@ class PowerBIToolkit(BaseToolkit): powerbi=self.powerbi, examples=self.examples, max_iterations=self.max_iterations, - output_token_limit=self.output_token_limit, + output_token_limit=self.output_token_limit, # type: ignore[arg-type] tiktoken_model_name=self.tiktoken_model_name, ), InfoPowerBITool(powerbi=self.powerbi), diff --git a/libs/community/langchain_community/agent_toolkits/sql/base.py b/libs/community/langchain_community/agent_toolkits/sql/base.py index 75f3cb97b4..e9871e076f 100644 --- a/libs/community/langchain_community/agent_toolkits/sql/base.py +++ b/libs/community/langchain_community/agent_toolkits/sql/base.py @@ -136,7 +136,7 @@ def create_sql_agent( "Must provide exactly one of 'toolkit' or 'db'. Received both." ) - toolkit = toolkit or SQLDatabaseToolkit(llm=llm, db=db) + toolkit = toolkit or SQLDatabaseToolkit(llm=llm, db=db) # type: ignore[arg-type] agent_type = agent_type or AgentType.ZERO_SHOT_REACT_DESCRIPTION tools = toolkit.get_tools() + list(extra_tools) if prompt is None: diff --git a/libs/community/langchain_community/agent_toolkits/steam/toolkit.py b/libs/community/langchain_community/agent_toolkits/steam/toolkit.py index 6033997ff5..f6c89b5aed 100644 --- a/libs/community/langchain_community/agent_toolkits/steam/toolkit.py +++ b/libs/community/langchain_community/agent_toolkits/steam/toolkit.py @@ -42,7 +42,7 @@ class SteamToolkit(BaseToolkit): ) for action in operations ] - return cls(tools=tools) + return cls(tools=tools) # type: ignore[arg-type] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" diff --git a/libs/community/langchain_community/agent_toolkits/zapier/toolkit.py b/libs/community/langchain_community/agent_toolkits/zapier/toolkit.py index 3314eb3457..207133e459 100644 --- a/libs/community/langchain_community/agent_toolkits/zapier/toolkit.py +++ b/libs/community/langchain_community/agent_toolkits/zapier/toolkit.py @@ -29,7 +29,7 @@ class ZapierToolkit(BaseToolkit): ) for action in actions ] - return cls(tools=tools) + return cls(tools=tools) # type: ignore[arg-type] @classmethod async def async_from_zapier_nla_wrapper( @@ -46,7 +46,7 @@ class ZapierToolkit(BaseToolkit): ) for action in actions ] - return cls(tools=tools) + return cls(tools=tools) # type: ignore[arg-type] def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" diff --git a/libs/community/langchain_community/cache.py b/libs/community/langchain_community/cache.py index 094dc3377c..eaf8b2be81 100644 --- a/libs/community/langchain_community/cache.py +++ b/libs/community/langchain_community/cache.py @@ -420,7 +420,7 @@ class _RedisCacheBase(BaseCache, ABC): ) # In a previous life we stored the raw text directly # in the table, so assume it's in that format. - generations.append(Generation(text=text)) + generations.append(Generation(text=text)) # type: ignore[arg-type] return generations if generations else None @staticmethod diff --git a/libs/community/langchain_community/chains/ernie_functions/base.py b/libs/community/langchain_community/chains/ernie_functions/base.py index 3749d66776..80b10ec051 100644 --- a/libs/community/langchain_community/chains/ernie_functions/base.py +++ b/libs/community/langchain_community/chains/ernie_functions/base.py @@ -376,7 +376,7 @@ def create_ernie_fn_chain( output_key: str = "function", output_parser: Optional[BaseLLMOutputParser] = None, **kwargs: Any, -) -> LLMChain: +) -> LLMChain: # type: ignore[valid-type] """[Legacy] Create an LLM chain that uses Ernie functions. Args: @@ -453,7 +453,7 @@ def create_ernie_fn_chain( } if len(ernie_functions) == 1: llm_kwargs["function_call"] = {"name": ernie_functions[0]["name"]} - llm_chain = LLMChain( + llm_chain = LLMChain( # type: ignore[misc] llm=llm, prompt=prompt, output_parser=output_parser, @@ -472,7 +472,7 @@ def create_structured_output_chain( output_key: str = "function", output_parser: Optional[BaseLLMOutputParser] = None, **kwargs: Any, -) -> LLMChain: +) -> LLMChain: # type: ignore[valid-type] """[Legacy] Create an LLMChain that uses an Ernie function to get a structured output. Args: diff --git a/libs/community/langchain_community/chat_loaders/imessage.py b/libs/community/langchain_community/chat_loaders/imessage.py index 8e924bcef5..b8d4e610d6 100644 --- a/libs/community/langchain_community/chat_loaders/imessage.py +++ b/libs/community/langchain_community/chat_loaders/imessage.py @@ -148,7 +148,7 @@ class IMessageChatLoader(BaseChatLoader): continue results.append( - HumanMessage( + HumanMessage( # type: ignore[call-arg] role=sender, content=content, additional_kwargs={ diff --git a/libs/community/langchain_community/chat_loaders/slack.py b/libs/community/langchain_community/chat_loaders/slack.py index b31a0f521a..fcce989aa9 100644 --- a/libs/community/langchain_community/chat_loaders/slack.py +++ b/libs/community/langchain_community/chat_loaders/slack.py @@ -51,7 +51,7 @@ class SlackChatLoader(BaseChatLoader): ) else: results.append( - HumanMessage( + HumanMessage( # type: ignore[call-arg] role=sender, content=text, additional_kwargs={ diff --git a/libs/community/langchain_community/chat_loaders/utils.py b/libs/community/langchain_community/chat_loaders/utils.py index 3fe9384e3d..151bc9d776 100644 --- a/libs/community/langchain_community/chat_loaders/utils.py +++ b/libs/community/langchain_community/chat_loaders/utils.py @@ -77,7 +77,7 @@ def map_ai_messages_in_session(chat_sessions: ChatSession, sender: str) -> ChatS message = AIMessage( content=message.content, additional_kwargs=message.additional_kwargs.copy(), - example=getattr(message, "example", None), + example=getattr(message, "example", None), # type: ignore[arg-type] ) num_converted += 1 messages.append(message) diff --git a/libs/community/langchain_community/chat_loaders/whatsapp.py b/libs/community/langchain_community/chat_loaders/whatsapp.py index 9378744b96..1b3173459e 100644 --- a/libs/community/langchain_community/chat_loaders/whatsapp.py +++ b/libs/community/langchain_community/chat_loaders/whatsapp.py @@ -73,7 +73,7 @@ class WhatsAppChatLoader(BaseChatLoader): timestamp, sender, text = result.groups() if not self._ignore_lines.match(text.strip()): results.append( - HumanMessage( + HumanMessage( # type: ignore[call-arg] role=sender, content=text, additional_kwargs={ diff --git a/libs/community/langchain_community/chat_models/azureml_endpoint.py b/libs/community/langchain_community/chat_models/azureml_endpoint.py index 91947f97ff..5f2a3f59c3 100644 --- a/libs/community/langchain_community/chat_models/azureml_endpoint.py +++ b/libs/community/langchain_community/chat_models/azureml_endpoint.py @@ -419,4 +419,4 @@ def _convert_delta_to_message_chunk( elif role or default_class == ChatMessageChunk: return ChatMessageChunk(content=content, role=role) else: - return default_class(content=content) + return default_class(content=content) # type: ignore[call-arg] diff --git a/libs/community/langchain_community/chat_models/baichuan.py b/libs/community/langchain_community/chat_models/baichuan.py index d6207f7faf..41e8de826c 100644 --- a/libs/community/langchain_community/chat_models/baichuan.py +++ b/libs/community/langchain_community/chat_models/baichuan.py @@ -66,9 +66,9 @@ def _convert_delta_to_message_chunk( elif role == "assistant" or default_class == AIMessageChunk: return AIMessageChunk(content=content) elif role or default_class == ChatMessageChunk: - return ChatMessageChunk(content=content, role=role) + return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type] else: - return default_class(content=content) + return default_class(content=content) # type: ignore[call-arg] class ChatBaichuan(BaseChatModel): diff --git a/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py b/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py index f25f44c933..95b5fc1639 100644 --- a/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py +++ b/libs/community/langchain_community/chat_models/baidu_qianfan_endpoint.py @@ -383,7 +383,7 @@ class QianfanChatEndpoint(BaseChatModel): additional_kwargs = msg.additional_kwargs.get("function_call", {}) chunk = ChatGenerationChunk( text=res["result"], - message=AIMessageChunk( + message=AIMessageChunk( # type: ignore[call-arg] content=msg.content, role="assistant", additional_kwargs=additional_kwargs, @@ -410,7 +410,7 @@ class QianfanChatEndpoint(BaseChatModel): additional_kwargs = msg.additional_kwargs.get("function_call", {}) chunk = ChatGenerationChunk( text=res["result"], - message=AIMessageChunk( + message=AIMessageChunk( # type: ignore[call-arg] content=msg.content, role="assistant", additional_kwargs=additional_kwargs, @@ -552,7 +552,8 @@ class QianfanChatEndpoint(BaseChatModel): llm = self.bind_tools([schema]) if is_pydantic_schema: output_parser: OutputParserLike = PydanticToolsParser( - tools=[schema], first_tool_only=True + tools=[schema], # type: ignore[list-item] + first_tool_only=True, # type: ignore[list-item] ) else: key_name = convert_to_openai_tool(schema)["function"]["name"] diff --git a/libs/community/langchain_community/chat_models/coze.py b/libs/community/langchain_community/chat_models/coze.py index c7ea63f032..733719be13 100644 --- a/libs/community/langchain_community/chat_models/coze.py +++ b/libs/community/langchain_community/chat_models/coze.py @@ -69,7 +69,7 @@ def _convert_delta_to_message_chunk(_dict: Mapping[str, Any]) -> BaseMessageChun elif role == "assistant": return AIMessageChunk(content=content) else: - return ChatMessageChunk(content=content, role=role) + return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type] class ChatCoze(BaseChatModel): diff --git a/libs/community/langchain_community/chat_models/deepinfra.py b/libs/community/langchain_community/chat_models/deepinfra.py index 0886e52086..51df3b634b 100644 --- a/libs/community/langchain_community/chat_models/deepinfra.py +++ b/libs/community/langchain_community/chat_models/deepinfra.py @@ -118,9 +118,9 @@ def _convert_delta_to_message_chunk( elif role == "function" or default_class == FunctionMessageChunk: return FunctionMessageChunk(content=content, name=_dict["name"]) elif role or default_class == ChatMessageChunk: - return ChatMessageChunk(content=content, role=role) + return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type] else: - return default_class(content=content) + return default_class(content=content) # type: ignore[call-arg] def _convert_message_to_dict(message: BaseMessage) -> dict: diff --git a/libs/community/langchain_community/chat_models/fireworks.py b/libs/community/langchain_community/chat_models/fireworks.py index 106ea7e921..2e434f1ae6 100644 --- a/libs/community/langchain_community/chat_models/fireworks.py +++ b/libs/community/langchain_community/chat_models/fireworks.py @@ -58,7 +58,7 @@ def _convert_delta_to_message_chunk( elif role or default_class == ChatMessageChunk: return ChatMessageChunk(content=content, role=role) else: - return default_class(content=content) + return default_class(content=content) # type: ignore[call-arg] def convert_dict_to_message(_dict: Any) -> BaseMessage: diff --git a/libs/community/langchain_community/chat_models/gigachat.py b/libs/community/langchain_community/chat_models/gigachat.py index 76e22bb248..1e20fa84df 100644 --- a/libs/community/langchain_community/chat_models/gigachat.py +++ b/libs/community/langchain_community/chat_models/gigachat.py @@ -108,9 +108,9 @@ def _convert_delta_to_message_chunk( elif role == "function" or default_class == FunctionMessageChunk: return FunctionMessageChunk(content=content, name=_dict["name"]) elif role or default_class == ChatMessageChunk: - return ChatMessageChunk(content=content, role=role) + return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type] else: - return default_class(content=content) + return default_class(content=content) # type: ignore[call-arg] class GigaChat(_BaseGigaChat, BaseChatModel): diff --git a/libs/community/langchain_community/chat_models/hunyuan.py b/libs/community/langchain_community/chat_models/hunyuan.py index 7eef887a5f..e83a48c6f7 100644 --- a/libs/community/langchain_community/chat_models/hunyuan.py +++ b/libs/community/langchain_community/chat_models/hunyuan.py @@ -72,9 +72,9 @@ def _convert_delta_to_message_chunk( elif role == "assistant" or default_class == AIMessageChunk: return AIMessageChunk(content=content) elif role or default_class == ChatMessageChunk: - return ChatMessageChunk(content=content, role=role) + return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type] else: - return default_class(content=content) + return default_class(content=content) # type: ignore[call-arg] # signature generation diff --git a/libs/community/langchain_community/chat_models/jinachat.py b/libs/community/langchain_community/chat_models/jinachat.py index 83f2e24959..75a373d515 100644 --- a/libs/community/langchain_community/chat_models/jinachat.py +++ b/libs/community/langchain_community/chat_models/jinachat.py @@ -103,9 +103,9 @@ def _convert_delta_to_message_chunk( elif role == "system" or default_class == SystemMessageChunk: return SystemMessageChunk(content=content) elif role or default_class == ChatMessageChunk: - return ChatMessageChunk(content=content, role=role) + return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type] else: - return default_class(content=content) + return default_class(content=content) # type: ignore[call-arg] def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: diff --git a/libs/community/langchain_community/chat_models/litellm.py b/libs/community/langchain_community/chat_models/litellm.py index 2f98033780..6d338f60db 100644 --- a/libs/community/langchain_community/chat_models/litellm.py +++ b/libs/community/langchain_community/chat_models/litellm.py @@ -131,9 +131,9 @@ def _convert_delta_to_message_chunk( elif role == "function" or default_class == FunctionMessageChunk: return FunctionMessageChunk(content=content, name=_dict["name"]) elif role or default_class == ChatMessageChunk: - return ChatMessageChunk(content=content, role=role) + return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type] else: - return default_class(content=content) + return default_class(content=content) # type: ignore[call-arg] def _convert_message_to_dict(message: BaseMessage) -> dict: diff --git a/libs/community/langchain_community/chat_models/llama_edge.py b/libs/community/langchain_community/chat_models/llama_edge.py index 064fe5f97b..ae603af7de 100644 --- a/libs/community/langchain_community/chat_models/llama_edge.py +++ b/libs/community/langchain_community/chat_models/llama_edge.py @@ -64,9 +64,9 @@ def _convert_delta_to_message_chunk( elif role == "assistant" or default_class == AIMessageChunk: return AIMessageChunk(content=content) elif role or default_class == ChatMessageChunk: - return ChatMessageChunk(content=content, role=role) + return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type] else: - return default_class(content=content) + return default_class(content=content) # type: ignore[call-arg] class LlamaEdgeChatService(BaseChatModel): diff --git a/libs/community/langchain_community/chat_models/minimax.py b/libs/community/langchain_community/chat_models/minimax.py index a315aa1f53..2b8419b9d1 100644 --- a/libs/community/langchain_community/chat_models/minimax.py +++ b/libs/community/langchain_community/chat_models/minimax.py @@ -82,7 +82,7 @@ class MiniMaxChat(MinimaxCommon, BaseChatModel): # This is required since the stop are not enforced by the model parameters text = text if stop is None else enforce_stop_tokens(text, stop) - return ChatResult(generations=[ChatGeneration(message=AIMessage(text))]) + return ChatResult(generations=[ChatGeneration(message=AIMessage(text))]) # type: ignore[misc] async def _agenerate( self, diff --git a/libs/community/langchain_community/chat_models/openai.py b/libs/community/langchain_community/chat_models/openai.py index 694d2e0661..515570c3af 100644 --- a/libs/community/langchain_community/chat_models/openai.py +++ b/libs/community/langchain_community/chat_models/openai.py @@ -139,9 +139,9 @@ def _convert_delta_to_message_chunk( elif role == "tool" or default_class == ToolMessageChunk: return ToolMessageChunk(content=content, tool_call_id=_dict["tool_call_id"]) elif role or default_class == ChatMessageChunk: - return ChatMessageChunk(content=content, role=role) + return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type] else: - return default_class(content=content) + return default_class(content=content) # type: ignore[call-arg] @deprecated( diff --git a/libs/community/langchain_community/chat_models/perplexity.py b/libs/community/langchain_community/chat_models/perplexity.py index 6c05c1a019..6538f7f3e0 100644 --- a/libs/community/langchain_community/chat_models/perplexity.py +++ b/libs/community/langchain_community/chat_models/perplexity.py @@ -198,9 +198,9 @@ class ChatPerplexity(BaseChatModel): elif role == "tool" or default_class == ToolMessageChunk: return ToolMessageChunk(content=content, tool_call_id=_dict["tool_call_id"]) elif role or default_class == ChatMessageChunk: - return ChatMessageChunk(content=content, role=role) + return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type] else: - return default_class(content=content) + return default_class(content=content) # type: ignore[call-arg] def _stream( self, diff --git a/libs/community/langchain_community/chat_models/premai.py b/libs/community/langchain_community/chat_models/premai.py index 5991506cd6..964a13fd99 100644 --- a/libs/community/langchain_community/chat_models/premai.py +++ b/libs/community/langchain_community/chat_models/premai.py @@ -136,7 +136,7 @@ def _convert_delta_response_to_message_chunk( elif role or default_class == ChatMessageChunk: return ChatMessageChunk(content=content, role=role), finish_reasons else: - return default_class(content=content), finish_reasons + return default_class(content=content), finish_reasons # type: ignore[call-arg] def _messages_to_prompt_dict( diff --git a/libs/community/langchain_community/chat_models/solar.py b/libs/community/langchain_community/chat_models/solar.py index 312e4a992b..417880757b 100644 --- a/libs/community/langchain_community/chat_models/solar.py +++ b/libs/community/langchain_community/chat_models/solar.py @@ -10,7 +10,7 @@ from langchain_community.chat_models import ChatOpenAI from langchain_community.llms.solar import SOLAR_SERVICE_URL_BASE, SolarCommon -@deprecated( +@deprecated( # type: ignore[arg-type] since="0.0.34", removal="0.3.0", alternative_import="langchain_upstage.ChatUpstage" ) class SolarChat(SolarCommon, ChatOpenAI): diff --git a/libs/community/langchain_community/chat_models/sparkllm.py b/libs/community/langchain_community/chat_models/sparkllm.py index f173519746..e9efafd0c8 100644 --- a/libs/community/langchain_community/chat_models/sparkllm.py +++ b/libs/community/langchain_community/chat_models/sparkllm.py @@ -85,7 +85,7 @@ def _convert_delta_to_message_chunk( elif msg_role or default_class == ChatMessageChunk: return ChatMessageChunk(content=msg_content, role=msg_role) else: - return default_class(content=msg_content) + return default_class(content=msg_content) # type: ignore[call-arg] class ChatSparkLLM(BaseChatModel): @@ -382,10 +382,10 @@ class _SparkLLMClient: on_close=self.on_close, on_open=self.on_open, ) - ws.messages = messages - ws.user_id = user_id - ws.model_kwargs = self.model_kwargs if model_kwargs is None else model_kwargs - ws.streaming = streaming + ws.messages = messages # type: ignore[attr-defined] + ws.user_id = user_id # type: ignore[attr-defined] + ws.model_kwargs = self.model_kwargs if model_kwargs is None else model_kwargs # type: ignore[attr-defined] + ws.streaming = streaming # type: ignore[attr-defined] ws.run_forever() def arun( diff --git a/libs/community/langchain_community/chat_models/tongyi.py b/libs/community/langchain_community/chat_models/tongyi.py index 943cace973..7c50cb064c 100644 --- a/libs/community/langchain_community/chat_models/tongyi.py +++ b/libs/community/langchain_community/chat_models/tongyi.py @@ -94,7 +94,7 @@ def convert_dict_to_message( else AIMessage( content=content, additional_kwargs=additional_kwargs, - tool_calls=tool_calls, + tool_calls=tool_calls, # type: ignore[arg-type] invalid_tool_calls=invalid_tool_calls, ) ) diff --git a/libs/community/langchain_community/chat_models/yuan2.py b/libs/community/langchain_community/chat_models/yuan2.py index 9cb6942fd2..5c030f870b 100644 --- a/libs/community/langchain_community/chat_models/yuan2.py +++ b/libs/community/langchain_community/chat_models/yuan2.py @@ -437,9 +437,9 @@ def _convert_delta_to_message_chunk( elif role == "system" or default_class == SystemMessageChunk: return SystemMessageChunk(content=content) elif role or default_class == ChatMessageChunk: - return ChatMessageChunk(content=content, role=role) + return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type] else: - return default_class(content=content) + return default_class(content=content) # type: ignore[call-arg] def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: @@ -451,7 +451,7 @@ def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: elif role == "system": return SystemMessage(content=_dict.get("content", "")) else: - return ChatMessage(content=_dict.get("content", ""), role=role) + return ChatMessage(content=_dict.get("content", ""), role=role) # type: ignore[arg-type] def _convert_message_to_dict(message: BaseMessage) -> dict: diff --git a/libs/community/langchain_community/chat_models/zhipuai.py b/libs/community/langchain_community/chat_models/zhipuai.py index 5d9dd0eb88..797bd1cb54 100644 --- a/libs/community/langchain_community/chat_models/zhipuai.py +++ b/libs/community/langchain_community/chat_models/zhipuai.py @@ -101,7 +101,7 @@ def _convert_dict_to_message(dct: Dict[str, Any]) -> BaseMessage: if tool_calls is not None: additional_kwargs["tool_calls"] = tool_calls return AIMessage(content=content, additional_kwargs=additional_kwargs) - return ChatMessage(role=role, content=content) + return ChatMessage(role=role, content=content) # type: ignore[arg-type] def _convert_message_to_dict(message: BaseMessage) -> Dict[str, Any]: @@ -144,8 +144,8 @@ def _convert_delta_to_message_chunk( if role == "assistant" or default_class == AIMessageChunk: return AIMessageChunk(content=content, additional_kwargs=additional_kwargs) if role or default_class == ChatMessageChunk: - return ChatMessageChunk(content=content, role=role) - return default_class(content=content) + return ChatMessageChunk(content=content, role=role) # type: ignore[arg-type] + return default_class(content=content) # type: ignore[call-arg] def _truncate_params(payload: Dict[str, Any]) -> None: diff --git a/libs/community/langchain_community/document_loaders/base_o365.py b/libs/community/langchain_community/document_loaders/base_o365.py index 90dba6d29e..ddf95bdc76 100644 --- a/libs/community/langchain_community/document_loaders/base_o365.py +++ b/libs/community/langchain_community/document_loaders/base_o365.py @@ -70,7 +70,7 @@ def fetch_mime_types(file_types: Sequence[_FileType]) -> Dict[str, str]: class O365BaseLoader(BaseLoader, BaseModel): """Base class for all loaders that uses O365 Package""" - settings: _O365Settings = Field(default_factory=_O365Settings) + settings: _O365Settings = Field(default_factory=_O365Settings) # type: ignore[arg-type] """Settings for the Office365 API client.""" auth_with_token: bool = False """Whether to authenticate with a token or not. Defaults to False.""" diff --git a/libs/community/langchain_community/document_loaders/kinetica_loader.py b/libs/community/langchain_community/document_loaders/kinetica_loader.py index d5cb1296e0..a6a15db4de 100644 --- a/libs/community/langchain_community/document_loaders/kinetica_loader.py +++ b/libs/community/langchain_community/document_loaders/kinetica_loader.py @@ -86,7 +86,7 @@ class KineticaLoader(BaseLoader): query_result = self._execute_query() if isinstance(query_result, Exception): print(f"An error occurred during the query: {query_result}") # noqa: T201 - return [] + return [] # type: ignore[return-value] page_content_columns, metadata_columns = self._get_columns(query_result) if "*" in page_content_columns: page_content_columns = list(query_result[0].keys()) diff --git a/libs/community/langchain_community/document_loaders/mhtml.py b/libs/community/langchain_community/document_loaders/mhtml.py index 95edc76d32..809f46c031 100644 --- a/libs/community/langchain_community/document_loaders/mhtml.py +++ b/libs/community/langchain_community/document_loaders/mhtml.py @@ -58,8 +58,8 @@ class MHTMLLoader(BaseLoader): parts = [message] for part in parts: - if part.get_content_type() == "text/html": - html = part.get_payload(decode=True).decode() + if part.get_content_type() == "text/html": # type: ignore[union-attr] + html = part.get_payload(decode=True).decode() # type: ignore[union-attr] soup = BeautifulSoup(html, **self.bs_kwargs) text = soup.get_text(self.get_text_separator) diff --git a/libs/community/langchain_community/document_loaders/onenote.py b/libs/community/langchain_community/document_loaders/onenote.py index 87c5d78daf..0ecc1e1da7 100644 --- a/libs/community/langchain_community/document_loaders/onenote.py +++ b/libs/community/langchain_community/document_loaders/onenote.py @@ -31,7 +31,7 @@ class _OneNoteGraphSettings(BaseSettings): class OneNoteLoader(BaseLoader, BaseModel): """Load pages from OneNote notebooks.""" - settings: _OneNoteGraphSettings = Field(default_factory=_OneNoteGraphSettings) + settings: _OneNoteGraphSettings = Field(default_factory=_OneNoteGraphSettings) # type: ignore[arg-type] """Settings for the Microsoft Graph API client.""" auth_with_token: bool = False """Whether to authenticate with a token or not. Defaults to False.""" diff --git a/libs/community/langchain_community/document_loaders/pdf.py b/libs/community/langchain_community/document_loaders/pdf.py index bbddd8a79b..17063eed0d 100644 --- a/libs/community/langchain_community/document_loaders/pdf.py +++ b/libs/community/langchain_community/document_loaders/pdf.py @@ -691,7 +691,7 @@ class AmazonTextractPDFLoader(BasePDFLoader): # raises ValueError when multi-page and not on S3""" if self.web_path and self._is_s3_url(self.web_path): - blob = Blob(path=self.web_path) # type: ignore[misc] + blob = Blob(path=self.web_path) # type: ignore[call-arg] # type: ignore[misc] else: blob = Blob.from_path(self.file_path) # type: ignore[attr-defined] if AmazonTextractPDFLoader._get_number_of_pages(blob) > 1: diff --git a/libs/community/langchain_community/document_loaders/pubmed.py b/libs/community/langchain_community/document_loaders/pubmed.py index 892d93affe..78d89977b5 100644 --- a/libs/community/langchain_community/document_loaders/pubmed.py +++ b/libs/community/langchain_community/document_loaders/pubmed.py @@ -28,8 +28,8 @@ class PubMedLoader(BaseLoader): """ self.query = query self.load_max_docs = load_max_docs - self._client = PubMedAPIWrapper( - top_k_results=load_max_docs, + self._client = PubMedAPIWrapper( # type: ignore[call-arg] + top_k_results=load_max_docs, # type: ignore[arg-type] ) def lazy_load(self) -> Iterator[Document]: diff --git a/libs/community/langchain_community/document_loaders/snowflake_loader.py b/libs/community/langchain_community/document_loaders/snowflake_loader.py index 3081a7b716..8a07287580 100644 --- a/libs/community/langchain_community/document_loaders/snowflake_loader.py +++ b/libs/community/langchain_community/document_loaders/snowflake_loader.py @@ -111,7 +111,7 @@ class SnowflakeLoader(BaseLoader): query_result = self._execute_query() if isinstance(query_result, Exception): print(f"An error occurred during the query: {query_result}") # noqa: T201 - return [] + return [] # type: ignore[return-value] page_content_columns, metadata_columns = self._get_columns(query_result) if "*" in page_content_columns: page_content_columns = list(query_result[0].keys()) diff --git a/libs/community/langchain_community/document_loaders/tensorflow_datasets.py b/libs/community/langchain_community/document_loaders/tensorflow_datasets.py index 07f6510fad..7beb292139 100644 --- a/libs/community/langchain_community/document_loaders/tensorflow_datasets.py +++ b/libs/community/langchain_community/document_loaders/tensorflow_datasets.py @@ -66,10 +66,10 @@ class TensorflowDatasetLoader(BaseLoader): ] = sample_to_document_function """Custom function that transform a dataset sample into a Document.""" - self._tfds_client = TensorflowDatasets( + self._tfds_client = TensorflowDatasets( # type: ignore[call-arg] dataset_name=self.dataset_name, split_name=self.split_name, - load_max_docs=self.load_max_docs, + load_max_docs=self.load_max_docs, # type: ignore[arg-type] sample_to_document_function=self.sample_to_document_function, ) diff --git a/libs/community/langchain_community/document_loaders/weather.py b/libs/community/langchain_community/document_loaders/weather.py index 51b1f27cd3..c058e68983 100644 --- a/libs/community/langchain_community/document_loaders/weather.py +++ b/libs/community/langchain_community/document_loaders/weather.py @@ -32,7 +32,7 @@ class WeatherDataLoader(BaseLoader): def from_params( cls, places: Sequence[str], *, openweathermap_api_key: Optional[str] = None ) -> WeatherDataLoader: - client = OpenWeatherMapAPIWrapper(openweathermap_api_key=openweathermap_api_key) + client = OpenWeatherMapAPIWrapper(openweathermap_api_key=openweathermap_api_key) # type: ignore[call-arg] return cls(client, places) def lazy_load( diff --git a/libs/community/langchain_community/document_loaders/wikipedia.py b/libs/community/langchain_community/document_loaders/wikipedia.py index 2b67c51b25..582a109489 100644 --- a/libs/community/langchain_community/document_loaders/wikipedia.py +++ b/libs/community/langchain_community/document_loaders/wikipedia.py @@ -50,10 +50,10 @@ class WikipediaLoader(BaseLoader): A list of Document objects representing the loaded Wikipedia pages. """ - client = WikipediaAPIWrapper( + client = WikipediaAPIWrapper( # type: ignore[call-arg] lang=self.lang, - top_k_results=self.load_max_docs, - load_all_available_meta=self.load_all_available_meta, - doc_content_chars_max=self.doc_content_chars_max, + top_k_results=self.load_max_docs, # type: ignore[arg-type] + load_all_available_meta=self.load_all_available_meta, # type: ignore[arg-type] + doc_content_chars_max=self.doc_content_chars_max, # type: ignore[arg-type] ) yield from client.load(self.query) diff --git a/libs/community/langchain_community/indexes/_sql_record_manager.py b/libs/community/langchain_community/indexes/_sql_record_manager.py index f70a1dc4f4..22e56885ae 100644 --- a/libs/community/langchain_community/indexes/_sql_record_manager.py +++ b/libs/community/langchain_community/indexes/_sql_record_manager.py @@ -312,7 +312,7 @@ class SQLRecordManager(RecordManager): # Note: uses SQLite insert to make on_conflict_do_update work. # This code needs to be generalized a bit to work with more dialects. - insert_stmt = pg_insert(UpsertionRecord).values(records_to_upsert) + insert_stmt = pg_insert(UpsertionRecord).values(records_to_upsert) # type: ignore[assignment] stmt = insert_stmt.on_conflict_do_update( # type: ignore[attr-defined] "uix_key_namespace", # Name of constraint set_=dict( @@ -387,7 +387,7 @@ class SQLRecordManager(RecordManager): # Note: uses SQLite insert to make on_conflict_do_update work. # This code needs to be generalized a bit to work with more dialects. - insert_stmt = pg_insert(UpsertionRecord).values(records_to_upsert) + insert_stmt = pg_insert(UpsertionRecord).values(records_to_upsert) # type: ignore[assignment] stmt = insert_stmt.on_conflict_do_update( # type: ignore[attr-defined] "uix_key_namespace", # Name of constraint set_=dict( @@ -470,7 +470,7 @@ class SQLRecordManager(RecordManager): if limit: query = query.limit(limit) # type: ignore[attr-defined] records = query.all() # type: ignore[attr-defined] - return [r.key for r in records] + return [r.key for r in records] # type: ignore[misc] async def alist_keys( self, diff --git a/libs/community/langchain_community/llms/aleph_alpha.py b/libs/community/langchain_community/llms/aleph_alpha.py index 48ea552c66..d2e56ac35d 100644 --- a/libs/community/langchain_community/llms/aleph_alpha.py +++ b/libs/community/langchain_community/llms/aleph_alpha.py @@ -282,6 +282,6 @@ class AlephAlpha(LLM): if __name__ == "__main__": - aa = AlephAlpha() + aa = AlephAlpha() # type: ignore[call-arg] print(aa.invoke("How are you?")) # noqa: T201 diff --git a/libs/community/langchain_community/llms/databricks.py b/libs/community/langchain_community/llms/databricks.py index 06da23183e..8da2f1db83 100644 --- a/libs/community/langchain_community/llms/databricks.py +++ b/libs/community/langchain_community/llms/databricks.py @@ -490,7 +490,7 @@ class Databricks(LLM): task=self.task, ) elif self.cluster_id and self.cluster_driver_port: - self._client = _DatabricksClusterDriverProxyClient( + self._client = _DatabricksClusterDriverProxyClient( # type: ignore[call-arg] host=self.host, api_token=self.api_token, cluster_id=self.cluster_id, diff --git a/libs/community/langchain_community/llms/minimax.py b/libs/community/langchain_community/llms/minimax.py index ea38c36fb4..316eb67652 100644 --- a/libs/community/langchain_community/llms/minimax.py +++ b/libs/community/langchain_community/llms/minimax.py @@ -87,7 +87,7 @@ class MinimaxCommon(BaseModel): "MINIMAX_API_HOST", default="https://api.minimax.chat", ) - values["_client"] = _MinimaxEndpointClient( + values["_client"] = _MinimaxEndpointClient( # type: ignore[call-arg] host=values["minimax_api_host"], api_key=values["minimax_api_key"], group_id=values["minimax_group_id"], diff --git a/libs/community/langchain_community/llms/ollama.py b/libs/community/langchain_community/llms/ollama.py index f08fab8218..4a81871efb 100644 --- a/libs/community/langchain_community/llms/ollama.py +++ b/libs/community/langchain_community/llms/ollama.py @@ -423,7 +423,7 @@ class Ollama(BaseLLM, _OllamaCommon): **kwargs, ) generations.append([final_chunk]) - return LLMResult(generations=generations) + return LLMResult(generations=generations) # type: ignore[arg-type] async def _agenerate( # type: ignore[override] self, @@ -459,7 +459,7 @@ class Ollama(BaseLLM, _OllamaCommon): **kwargs, ) generations.append([final_chunk]) - return LLMResult(generations=generations) + return LLMResult(generations=generations) # type: ignore[arg-type] def _stream( self, diff --git a/libs/community/langchain_community/llms/openllm.py b/libs/community/langchain_community/llms/openllm.py index 746e12b64e..612c4d4e56 100644 --- a/libs/community/langchain_community/llms/openllm.py +++ b/libs/community/langchain_community/llms/openllm.py @@ -155,7 +155,7 @@ class OpenLLM(LLM): client = client_cls(server_url, timeout) super().__init__( - **{ + **{ # type: ignore[arg-type] "server_url": server_url, "timeout": timeout, "server_type": server_type, @@ -180,7 +180,7 @@ class OpenLLM(LLM): **llm_kwargs, ) super().__init__( - **{ + **{ # type: ignore[arg-type] "model_name": model_name, "model_id": model_id, "embedded": embedded, diff --git a/libs/community/langchain_community/llms/sparkllm.py b/libs/community/langchain_community/llms/sparkllm.py index 467952f35c..c74abf21fd 100644 --- a/libs/community/langchain_community/llms/sparkllm.py +++ b/libs/community/langchain_community/llms/sparkllm.py @@ -274,10 +274,10 @@ class _SparkLLMClient: on_close=self.on_close, on_open=self.on_open, ) - ws.messages = messages - ws.user_id = user_id - ws.model_kwargs = self.model_kwargs if model_kwargs is None else model_kwargs - ws.streaming = streaming + ws.messages = messages # type: ignore[attr-defined] + ws.user_id = user_id # type: ignore[attr-defined] + ws.model_kwargs = self.model_kwargs if model_kwargs is None else model_kwargs # type: ignore[attr-defined] + ws.streaming = streaming # type: ignore[attr-defined] ws.run_forever() def arun( diff --git a/libs/community/langchain_community/llms/textgen.py b/libs/community/langchain_community/llms/textgen.py index 9a752f844a..aa3a490fa2 100644 --- a/libs/community/langchain_community/llms/textgen.py +++ b/libs/community/langchain_community/llms/textgen.py @@ -330,13 +330,13 @@ class TextGen(LLM): result = websocket_client.recv() result = json.loads(result) - if result["event"] == "text_stream": + if result["event"] == "text_stream": # type: ignore[call-overload, index] chunk = GenerationChunk( - text=result["text"], + text=result["text"], # type: ignore[call-overload, index] generation_info=None, ) yield chunk - elif result["event"] == "stream_end": + elif result["event"] == "stream_end": # type: ignore[call-overload, index] websocket_client.close() return @@ -403,13 +403,13 @@ class TextGen(LLM): result = websocket_client.recv() result = json.loads(result) - if result["event"] == "text_stream": + if result["event"] == "text_stream": # type: ignore[call-overload, index] chunk = GenerationChunk( - text=result["text"], + text=result["text"], # type: ignore[call-overload, index] generation_info=None, ) yield chunk - elif result["event"] == "stream_end": + elif result["event"] == "stream_end": # type: ignore[call-overload, index] websocket_client.close() return diff --git a/libs/community/langchain_community/llms/titan_takeoff.py b/libs/community/langchain_community/llms/titan_takeoff.py index 6df0d0a37e..5ff1f20a56 100644 --- a/libs/community/langchain_community/llms/titan_takeoff.py +++ b/libs/community/langchain_community/llms/titan_takeoff.py @@ -137,7 +137,7 @@ class TitanTakeoff(LLM): ImportError: If you haven't installed takeoff-client, you will get an ImportError. To remedy run `pip install 'takeoff-client==0.4.0'` """ - super().__init__( + super().__init__( # type: ignore[call-arg] base_url=base_url, port=port, mgmt_port=mgmt_port, streaming=streaming ) try: diff --git a/libs/community/langchain_community/llms/vertexai.py b/libs/community/langchain_community/llms/vertexai.py index 31fd77600f..0b027a195e 100644 --- a/libs/community/langchain_community/llms/vertexai.py +++ b/libs/community/langchain_community/llms/vertexai.py @@ -363,7 +363,7 @@ class VertexAI(_VertexAICommon, BaseLLM): generations.append( [self._response_to_generation(r) for r in res.candidates] ) - return LLMResult(generations=generations) + return LLMResult(generations=generations) # type: ignore[arg-type] def _stream( self, diff --git a/libs/community/langchain_community/llms/xinference.py b/libs/community/langchain_community/llms/xinference.py index 4828132e6a..3e38fb5fd0 100644 --- a/libs/community/langchain_community/llms/xinference.py +++ b/libs/community/langchain_community/llms/xinference.py @@ -100,7 +100,7 @@ class Xinference(LLM): model_kwargs = model_kwargs or {} super().__init__( - **{ + **{ # type: ignore[arg-type] "server_url": server_url, "model_uid": model_uid, "model_kwargs": model_kwargs, diff --git a/libs/community/langchain_community/retrievers/breebs.py b/libs/community/langchain_community/retrievers/breebs.py index ed42fc8fd4..b9238db5b1 100644 --- a/libs/community/langchain_community/retrievers/breebs.py +++ b/libs/community/langchain_community/retrievers/breebs.py @@ -21,7 +21,7 @@ class BreebsRetriever(BaseRetriever): url = "https://breebs.promptbreeders.com/knowledge" def __init__(self, breeb_key: str): - super().__init__(breeb_key=breeb_key) + super().__init__(breeb_key=breeb_key) # type: ignore[call-arg] self.breeb_key = breeb_key def _get_relevant_documents( diff --git a/libs/community/langchain_community/retrievers/dria_index.py b/libs/community/langchain_community/retrievers/dria_index.py index 5da93a804e..8f3e287d8e 100644 --- a/libs/community/langchain_community/retrievers/dria_index.py +++ b/libs/community/langchain_community/retrievers/dria_index.py @@ -23,7 +23,7 @@ class DriaRetriever(BaseRetriever): contract_id: The contract ID of the knowledge base to interact with. """ api_wrapper = DriaAPIWrapper(api_key=api_key, contract_id=contract_id) - super().__init__(api_wrapper=api_wrapper, **kwargs) + super().__init__(api_wrapper=api_wrapper, **kwargs) # type: ignore[call-arg] def create_knowledge_base( self, diff --git a/libs/community/langchain_community/retrievers/thirdai_neuraldb.py b/libs/community/langchain_community/retrievers/thirdai_neuraldb.py index 83592a550b..e4a25aea4a 100644 --- a/libs/community/langchain_community/retrievers/thirdai_neuraldb.py +++ b/libs/community/langchain_community/retrievers/thirdai_neuraldb.py @@ -73,7 +73,7 @@ class NeuralDBRetriever(BaseRetriever): NeuralDBRetriever._verify_thirdai_library(thirdai_key) from thirdai import neural_db as ndb - return cls(thirdai_key=thirdai_key, db=ndb.NeuralDB(**model_kwargs)) + return cls(thirdai_key=thirdai_key, db=ndb.NeuralDB(**model_kwargs)) # type: ignore[arg-type] @classmethod def from_checkpoint( @@ -108,7 +108,7 @@ class NeuralDBRetriever(BaseRetriever): NeuralDBRetriever._verify_thirdai_library(thirdai_key) from thirdai import neural_db as ndb - return cls(thirdai_key=thirdai_key, db=ndb.NeuralDB.from_checkpoint(checkpoint)) + return cls(thirdai_key=thirdai_key, db=ndb.NeuralDB.from_checkpoint(checkpoint)) # type: ignore[arg-type] @root_validator() def validate_environments(cls, values: Dict) -> Dict: diff --git a/libs/community/langchain_community/tools/arxiv/tool.py b/libs/community/langchain_community/tools/arxiv/tool.py index 423b3f5cfc..5c8ca77f1b 100644 --- a/libs/community/langchain_community/tools/arxiv/tool.py +++ b/libs/community/langchain_community/tools/arxiv/tool.py @@ -27,7 +27,7 @@ class ArxivQueryRun(BaseTool): "from scientific articles on arxiv.org. " "Input should be a search query." ) - api_wrapper: ArxivAPIWrapper = Field(default_factory=ArxivAPIWrapper) + api_wrapper: ArxivAPIWrapper = Field(default_factory=ArxivAPIWrapper) # type: ignore[arg-type] args_schema: Type[BaseModel] = ArxivInput def _run( diff --git a/libs/community/langchain_community/tools/audio/huggingface_text_to_speech_inference.py b/libs/community/langchain_community/tools/audio/huggingface_text_to_speech_inference.py index c728f8ad22..9c16b08d1b 100644 --- a/libs/community/langchain_community/tools/audio/huggingface_text_to_speech_inference.py +++ b/libs/community/langchain_community/tools/audio/huggingface_text_to_speech_inference.py @@ -72,7 +72,7 @@ class HuggingFaceTextToSpeechModelInference(BaseTool): f"Invalid value for 'file_naming_func': {file_naming_func}" ) - super().__init__( + super().__init__( # type: ignore[call-arg] model=model, file_extension=file_extension, api_url=f"{self._HUGGINGFACE_API_URL_ROOT}/{model}", diff --git a/libs/community/langchain_community/tools/github/tool.py b/libs/community/langchain_community/tools/github/tool.py index 8925318363..ec9c3c6481 100644 --- a/libs/community/langchain_community/tools/github/tool.py +++ b/libs/community/langchain_community/tools/github/tool.py @@ -19,7 +19,7 @@ from langchain_community.utilities.github import GitHubAPIWrapper class GitHubAction(BaseTool): """Tool for interacting with the GitHub API.""" - api_wrapper: GitHubAPIWrapper = Field(default_factory=GitHubAPIWrapper) + api_wrapper: GitHubAPIWrapper = Field(default_factory=GitHubAPIWrapper) # type: ignore[arg-type] mode: str name: str = "" description: str = "" diff --git a/libs/community/langchain_community/tools/gitlab/tool.py b/libs/community/langchain_community/tools/gitlab/tool.py index 92ea8b98bf..2a2dd686b5 100644 --- a/libs/community/langchain_community/tools/gitlab/tool.py +++ b/libs/community/langchain_community/tools/gitlab/tool.py @@ -19,7 +19,7 @@ from langchain_community.utilities.gitlab import GitLabAPIWrapper class GitLabAction(BaseTool): """Tool for interacting with the GitLab API.""" - api_wrapper: GitLabAPIWrapper = Field(default_factory=GitLabAPIWrapper) + api_wrapper: GitLabAPIWrapper = Field(default_factory=GitLabAPIWrapper) # type: ignore[arg-type] mode: str name: str = "" description: str = "" diff --git a/libs/community/langchain_community/tools/gmail/base.py b/libs/community/langchain_community/tools/gmail/base.py index b96e16117f..12ea5a88a3 100644 --- a/libs/community/langchain_community/tools/gmail/base.py +++ b/libs/community/langchain_community/tools/gmail/base.py @@ -34,4 +34,4 @@ class GmailBaseTool(BaseTool): Returns: A tool. """ - return cls(service=api_resource) + return cls(service=api_resource) # type: ignore[call-arg] diff --git a/libs/community/langchain_community/tools/gmail/get_message.py b/libs/community/langchain_community/tools/gmail/get_message.py index 79b963453e..3be2db0853 100644 --- a/libs/community/langchain_community/tools/gmail/get_message.py +++ b/libs/community/langchain_community/tools/gmail/get_message.py @@ -53,10 +53,10 @@ class GmailGetMessage(GmailBaseTool): ctype = part.get_content_type() cdispo = str(part.get("Content-Disposition")) if ctype == "text/plain" and "attachment" not in cdispo: - message_body = part.get_payload(decode=True).decode("utf-8") + message_body = part.get_payload(decode=True).decode("utf-8") # type: ignore[union-attr] break else: - message_body = email_msg.get_payload(decode=True).decode("utf-8") + message_body = email_msg.get_payload(decode=True).decode("utf-8") # type: ignore[union-attr] body = clean_email_body(message_body) diff --git a/libs/community/langchain_community/tools/gmail/search.py b/libs/community/langchain_community/tools/gmail/search.py index 8c49db5e86..7cc93b6319 100644 --- a/libs/community/langchain_community/tools/gmail/search.py +++ b/libs/community/langchain_community/tools/gmail/search.py @@ -99,14 +99,14 @@ class GmailSearch(GmailBaseTool): cdispo = str(part.get("Content-Disposition")) if ctype == "text/plain" and "attachment" not in cdispo: try: - message_body = part.get_payload(decode=True).decode("utf-8") + message_body = part.get_payload(decode=True).decode("utf-8") # type: ignore[union-attr] except UnicodeDecodeError: - message_body = part.get_payload(decode=True).decode( + message_body = part.get_payload(decode=True).decode( # type: ignore[union-attr] "latin-1" ) break else: - message_body = email_msg.get_payload(decode=True).decode("utf-8") + message_body = email_msg.get_payload(decode=True).decode("utf-8") # type: ignore[union-attr] body = clean_email_body(message_body) diff --git a/libs/community/langchain_community/tools/google_places/tool.py b/libs/community/langchain_community/tools/google_places/tool.py index 729d11a4f9..5c7eb3dcf5 100644 --- a/libs/community/langchain_community/tools/google_places/tool.py +++ b/libs/community/langchain_community/tools/google_places/tool.py @@ -31,7 +31,7 @@ class GooglePlacesTool(BaseTool): "discover addressed from ambiguous text. " "Input should be a search query." ) - api_wrapper: GooglePlacesAPIWrapper = Field(default_factory=GooglePlacesAPIWrapper) + api_wrapper: GooglePlacesAPIWrapper = Field(default_factory=GooglePlacesAPIWrapper) # type: ignore[arg-type] args_schema: Type[BaseModel] = GooglePlacesSchema def _run( diff --git a/libs/community/langchain_community/tools/jira/tool.py b/libs/community/langchain_community/tools/jira/tool.py index dc57b13dc2..267c9944c5 100644 --- a/libs/community/langchain_community/tools/jira/tool.py +++ b/libs/community/langchain_community/tools/jira/tool.py @@ -30,7 +30,7 @@ from langchain_community.utilities.jira import JiraAPIWrapper class JiraAction(BaseTool): """Tool that queries the Atlassian Jira API.""" - api_wrapper: JiraAPIWrapper = Field(default_factory=JiraAPIWrapper) + api_wrapper: JiraAPIWrapper = Field(default_factory=JiraAPIWrapper) # type: ignore[arg-type] mode: str name: str = "" description: str = "" diff --git a/libs/community/langchain_community/tools/nuclia/tool.py b/libs/community/langchain_community/tools/nuclia/tool.py index 2f8bae73ff..dce32f5089 100644 --- a/libs/community/langchain_community/tools/nuclia/tool.py +++ b/libs/community/langchain_community/tools/nuclia/tool.py @@ -75,7 +75,7 @@ class NucliaUnderstandingAPI(BaseTool): else: self._config["NUA_KEY"] = key self._config["enable_ml"] = enable_ml - super().__init__() + super().__init__() # type: ignore[call-arg] def _run( self, diff --git a/libs/community/langchain_community/tools/openapi/utils/api_models.py b/libs/community/langchain_community/tools/openapi/utils/api_models.py index 38ae4afca1..968f85dfa6 100644 --- a/libs/community/langchain_community/tools/openapi/utils/api_models.py +++ b/libs/community/langchain_community/tools/openapi/utils/api_models.py @@ -530,7 +530,7 @@ class APIOperation(BaseModel): description=description or "", base_url=spec.base_url, path=path, - method=method, + method=method, # type: ignore[arg-type] properties=properties, request_body=api_request_body, ) diff --git a/libs/community/langchain_community/tools/openweathermap/tool.py b/libs/community/langchain_community/tools/openweathermap/tool.py index 0cd5b94001..d716b8098d 100644 --- a/libs/community/langchain_community/tools/openweathermap/tool.py +++ b/libs/community/langchain_community/tools/openweathermap/tool.py @@ -13,7 +13,7 @@ class OpenWeatherMapQueryRun(BaseTool): """Tool that queries the OpenWeatherMap API.""" api_wrapper: OpenWeatherMapAPIWrapper = Field( - default_factory=OpenWeatherMapAPIWrapper + default_factory=OpenWeatherMapAPIWrapper # type: ignore[arg-type] ) name: str = "open_weather_map" diff --git a/libs/community/langchain_community/tools/playwright/base.py b/libs/community/langchain_community/tools/playwright/base.py index 716e82907d..5fab5a2902 100644 --- a/libs/community/langchain_community/tools/playwright/base.py +++ b/libs/community/langchain_community/tools/playwright/base.py @@ -54,4 +54,4 @@ class BaseBrowserTool(BaseTool): ) -> BaseBrowserTool: """Instantiate the tool.""" lazy_import_playwright_browsers() - return cls(sync_browser=sync_browser, async_browser=async_browser) + return cls(sync_browser=sync_browser, async_browser=async_browser) # type: ignore[call-arg] diff --git a/libs/community/langchain_community/tools/pubmed/tool.py b/libs/community/langchain_community/tools/pubmed/tool.py index 44242379dd..7eda0fd3a5 100644 --- a/libs/community/langchain_community/tools/pubmed/tool.py +++ b/libs/community/langchain_community/tools/pubmed/tool.py @@ -18,7 +18,7 @@ class PubmedQueryRun(BaseTool): "from biomedical literature, MEDLINE, life science journals, and online books. " "Input should be a search query." ) - api_wrapper: PubMedAPIWrapper = Field(default_factory=PubMedAPIWrapper) + api_wrapper: PubMedAPIWrapper = Field(default_factory=PubMedAPIWrapper) # type: ignore[arg-type] def _run( self, diff --git a/libs/community/langchain_community/tools/reddit_search/tool.py b/libs/community/langchain_community/tools/reddit_search/tool.py index 7c0da2a01e..e245286e84 100644 --- a/libs/community/langchain_community/tools/reddit_search/tool.py +++ b/libs/community/langchain_community/tools/reddit_search/tool.py @@ -42,7 +42,7 @@ class RedditSearchRun(BaseTool): "A tool that searches for posts on Reddit." "Useful when you need to know post information on a subreddit." ) - api_wrapper: RedditSearchAPIWrapper = Field(default_factory=RedditSearchAPIWrapper) + api_wrapper: RedditSearchAPIWrapper = Field(default_factory=RedditSearchAPIWrapper) # type: ignore[arg-type] args_schema: Type[BaseModel] = RedditSearchSchema def _run( diff --git a/libs/community/langchain_community/tools/scenexplain/tool.py b/libs/community/langchain_community/tools/scenexplain/tool.py index 5806cd7bb2..3e3639c4cf 100644 --- a/libs/community/langchain_community/tools/scenexplain/tool.py +++ b/libs/community/langchain_community/tools/scenexplain/tool.py @@ -23,7 +23,7 @@ class SceneXplainTool(BaseTool): "for an image. The input can be an image file of any format, and " "the output will be a text description that covers every detail of the image." ) - api_wrapper: SceneXplainAPIWrapper = Field(default_factory=SceneXplainAPIWrapper) + api_wrapper: SceneXplainAPIWrapper = Field(default_factory=SceneXplainAPIWrapper) # type: ignore[arg-type] def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None diff --git a/libs/community/langchain_community/tools/semanticscholar/tool.py b/libs/community/langchain_community/tools/semanticscholar/tool.py index 2faee2f77f..d9bf773ea9 100644 --- a/libs/community/langchain_community/tools/semanticscholar/tool.py +++ b/libs/community/langchain_community/tools/semanticscholar/tool.py @@ -26,7 +26,7 @@ class SemanticScholarQueryRun(BaseTool): "Input should be a search query." ) api_wrapper: SemanticScholarAPIWrapper = Field( - default_factory=SemanticScholarAPIWrapper + default_factory=SemanticScholarAPIWrapper # type: ignore[arg-type] ) args_schema: Type[BaseModel] = SemantscholarInput diff --git a/libs/community/langchain_community/tools/spark_sql/tool.py b/libs/community/langchain_community/tools/spark_sql/tool.py index 9110d543fd..dacf36aa3b 100644 --- a/libs/community/langchain_community/tools/spark_sql/tool.py +++ b/libs/community/langchain_community/tools/spark_sql/tool.py @@ -97,7 +97,7 @@ class QueryCheckerTool(BaseSparkSQLTool, BaseTool): from langchain.chains.llm import LLMChain values["llm_chain"] = LLMChain( - llm=values.get("llm"), + llm=values.get("llm"), # type: ignore[arg-type] prompt=PromptTemplate( template=QUERY_CHECKER, input_variables=["query"] ), diff --git a/libs/community/langchain_community/tools/sql_database/tool.py b/libs/community/langchain_community/tools/sql_database/tool.py index fa055a5a14..f3e4ff0a75 100644 --- a/libs/community/langchain_community/tools/sql_database/tool.py +++ b/libs/community/langchain_community/tools/sql_database/tool.py @@ -122,7 +122,7 @@ class QuerySQLCheckerTool(BaseSQLDatabaseTool, BaseTool): from langchain.chains.llm import LLMChain values["llm_chain"] = LLMChain( - llm=values.get("llm"), + llm=values.get("llm"), # type: ignore[arg-type] prompt=PromptTemplate( template=QUERY_CHECKER, input_variables=["dialect", "query"] ), diff --git a/libs/community/langchain_community/tools/tavily_search/tool.py b/libs/community/langchain_community/tools/tavily_search/tool.py index 4eb1ff4b45..1d05387927 100644 --- a/libs/community/langchain_community/tools/tavily_search/tool.py +++ b/libs/community/langchain_community/tools/tavily_search/tool.py @@ -27,7 +27,7 @@ class TavilySearchResults(BaseTool): "Useful for when you need to answer questions about current events. " "Input should be a search query." ) - api_wrapper: TavilySearchAPIWrapper = Field(default_factory=TavilySearchAPIWrapper) + api_wrapper: TavilySearchAPIWrapper = Field(default_factory=TavilySearchAPIWrapper) # type: ignore[arg-type] max_results: int = 5 args_schema: Type[BaseModel] = TavilyInput @@ -70,7 +70,7 @@ class TavilyAnswer(BaseTool): "Input should be a search query. " "This returns only the answer - not the original source data." ) - api_wrapper: TavilySearchAPIWrapper = Field(default_factory=TavilySearchAPIWrapper) + api_wrapper: TavilySearchAPIWrapper = Field(default_factory=TavilySearchAPIWrapper) # type: ignore[arg-type] args_schema: Type[BaseModel] = TavilyInput def _run( diff --git a/libs/community/langchain_community/tools/zapier/tool.py b/libs/community/langchain_community/tools/zapier/tool.py index 22a25b13a9..7330d24579 100644 --- a/libs/community/langchain_community/tools/zapier/tool.py +++ b/libs/community/langchain_community/tools/zapier/tool.py @@ -95,7 +95,7 @@ class ZapierNLARunAction(BaseTool): """ - api_wrapper: ZapierNLAWrapper = Field(default_factory=ZapierNLAWrapper) + api_wrapper: ZapierNLAWrapper = Field(default_factory=ZapierNLAWrapper) # type: ignore[arg-type] action_id: str params: Optional[dict] = None base_prompt: str = BASE_ZAPIER_TOOL_PROMPT @@ -174,7 +174,7 @@ class ZapierNLAListActions(BaseTool): description: str = BASE_ZAPIER_TOOL_PROMPT + ( "This tool returns a list of the user's exposed actions." ) - api_wrapper: ZapierNLAWrapper = Field(default_factory=ZapierNLAWrapper) + api_wrapper: ZapierNLAWrapper = Field(default_factory=ZapierNLAWrapper) # type: ignore[arg-type] def _run( self, diff --git a/libs/community/langchain_community/utilities/redis.py b/libs/community/langchain_community/utilities/redis.py index 5b613667b6..3840c7a63d 100644 --- a/libs/community/langchain_community/utilities/redis.py +++ b/libs/community/langchain_community/utilities/redis.py @@ -214,4 +214,4 @@ def _check_for_cluster(redis_client: RedisType) -> bool: def _redis_cluster_client(redis_url: str, **kwargs: Any) -> RedisType: from redis.cluster import RedisCluster - return RedisCluster.from_url(redis_url, **kwargs) + return RedisCluster.from_url(redis_url, **kwargs) # type: ignore[return-value] diff --git a/libs/community/poetry.lock b/libs/community/poetry.lock index 8a556b044e..c9d11c44e9 100644 --- a/libs/community/poetry.lock +++ b/libs/community/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. [[package]] name = "aenum" @@ -3455,6 +3455,7 @@ files = [ {file = "jq-1.6.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:227b178b22a7f91ae88525810441791b1ca1fc71c86f03190911793be15cec3d"}, {file = "jq-1.6.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:780eb6383fbae12afa819ef676fc93e1548ae4b076c004a393af26a04b460742"}, {file = "jq-1.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:08ded6467f4ef89fec35b2bf310f210f8cd13fbd9d80e521500889edf8d22441"}, + {file = "jq-1.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:49e44ed677713f4115bd5bf2dbae23baa4cd503be350e12a1c1f506b0687848f"}, {file = "jq-1.6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:984f33862af285ad3e41e23179ac4795f1701822473e1a26bf87ff023e5a89ea"}, {file = "jq-1.6.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f42264fafc6166efb5611b5d4cb01058887d050a6c19334f6a3f8a13bb369df5"}, {file = "jq-1.6.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a67154f150aaf76cc1294032ed588436eb002097dd4fd1e283824bf753a05080"}, @@ -4740,52 +4741,49 @@ para = ">=0.0.1" [[package]] name = "mypy" -version = "0.991" +version = "1.10.0" description = "Optional static typing for Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "mypy-0.991-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7d17e0a9707d0772f4a7b878f04b4fd11f6f5bcb9b3813975a9b13c9332153ab"}, - {file = "mypy-0.991-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0714258640194d75677e86c786e80ccf294972cc76885d3ebbb560f11db0003d"}, - {file = "mypy-0.991-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0c8f3be99e8a8bd403caa8c03be619544bc2c77a7093685dcf308c6b109426c6"}, - {file = "mypy-0.991-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc9ec663ed6c8f15f4ae9d3c04c989b744436c16d26580eaa760ae9dd5d662eb"}, - {file = "mypy-0.991-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4307270436fd7694b41f913eb09210faff27ea4979ecbcd849e57d2da2f65305"}, - {file = "mypy-0.991-cp310-cp310-win_amd64.whl", hash = "sha256:901c2c269c616e6cb0998b33d4adbb4a6af0ac4ce5cd078afd7bc95830e62c1c"}, - {file = "mypy-0.991-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d13674f3fb73805ba0c45eb6c0c3053d218aa1f7abead6e446d474529aafc372"}, - {file = "mypy-0.991-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1c8cd4fb70e8584ca1ed5805cbc7c017a3d1a29fb450621089ffed3e99d1857f"}, - {file = "mypy-0.991-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:209ee89fbb0deed518605edddd234af80506aec932ad28d73c08f1400ef80a33"}, - {file = "mypy-0.991-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37bd02ebf9d10e05b00d71302d2c2e6ca333e6c2a8584a98c00e038db8121f05"}, - {file = "mypy-0.991-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:26efb2fcc6b67e4d5a55561f39176821d2adf88f2745ddc72751b7890f3194ad"}, - {file = "mypy-0.991-cp311-cp311-win_amd64.whl", hash = "sha256:3a700330b567114b673cf8ee7388e949f843b356a73b5ab22dd7cff4742a5297"}, - {file = "mypy-0.991-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1f7d1a520373e2272b10796c3ff721ea1a0712288cafaa95931e66aa15798813"}, - {file = "mypy-0.991-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:641411733b127c3e0dab94c45af15fea99e4468f99ac88b39efb1ad677da5711"}, - {file = "mypy-0.991-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3d80e36b7d7a9259b740be6d8d906221789b0d836201af4234093cae89ced0cd"}, - {file = "mypy-0.991-cp37-cp37m-win_amd64.whl", hash = "sha256:e62ebaad93be3ad1a828a11e90f0e76f15449371ffeecca4a0a0b9adc99abcef"}, - {file = "mypy-0.991-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b86ce2c1866a748c0f6faca5232059f881cda6dda2a893b9a8373353cfe3715a"}, - {file = "mypy-0.991-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ac6e503823143464538efda0e8e356d871557ef60ccd38f8824a4257acc18d93"}, - {file = "mypy-0.991-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0cca5adf694af539aeaa6ac633a7afe9bbd760df9d31be55ab780b77ab5ae8bf"}, - {file = "mypy-0.991-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a12c56bf73cdab116df96e4ff39610b92a348cc99a1307e1da3c3768bbb5b135"}, - {file = "mypy-0.991-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:652b651d42f155033a1967739788c436491b577b6a44e4c39fb340d0ee7f0d70"}, - {file = "mypy-0.991-cp38-cp38-win_amd64.whl", hash = "sha256:4175593dc25d9da12f7de8de873a33f9b2b8bdb4e827a7cae952e5b1a342e243"}, - {file = "mypy-0.991-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:98e781cd35c0acf33eb0295e8b9c55cdbef64fcb35f6d3aa2186f289bed6e80d"}, - {file = "mypy-0.991-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6d7464bac72a85cb3491c7e92b5b62f3dcccb8af26826257760a552a5e244aa5"}, - {file = "mypy-0.991-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c9166b3f81a10cdf9b49f2d594b21b31adadb3d5e9db9b834866c3258b695be3"}, - {file = "mypy-0.991-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8472f736a5bfb159a5e36740847808f6f5b659960115ff29c7cecec1741c648"}, - {file = "mypy-0.991-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e80e758243b97b618cdf22004beb09e8a2de1af481382e4d84bc52152d1c476"}, - {file = "mypy-0.991-cp39-cp39-win_amd64.whl", hash = "sha256:74e259b5c19f70d35fcc1ad3d56499065c601dfe94ff67ae48b85596b9ec1461"}, - {file = "mypy-0.991-py3-none-any.whl", hash = "sha256:de32edc9b0a7e67c2775e574cb061a537660e51210fbf6006b0b36ea695ae9bb"}, - {file = "mypy-0.991.tar.gz", hash = "sha256:3c0165ba8f354a6d9881809ef29f1a9318a236a6d81c690094c5df32107bde06"}, -] - -[package.dependencies] -mypy-extensions = ">=0.4.3" + {file = "mypy-1.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:da1cbf08fb3b851ab3b9523a884c232774008267b1f83371ace57f412fe308c2"}, + {file = "mypy-1.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:12b6bfc1b1a66095ab413160a6e520e1dc076a28f3e22f7fb25ba3b000b4ef99"}, + {file = "mypy-1.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e36fb078cce9904c7989b9693e41cb9711e0600139ce3970c6ef814b6ebc2b2"}, + {file = "mypy-1.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2b0695d605ddcd3eb2f736cd8b4e388288c21e7de85001e9f85df9187f2b50f9"}, + {file = "mypy-1.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:cd777b780312ddb135bceb9bc8722a73ec95e042f911cc279e2ec3c667076051"}, + {file = "mypy-1.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3be66771aa5c97602f382230165b856c231d1277c511c9a8dd058be4784472e1"}, + {file = "mypy-1.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8b2cbaca148d0754a54d44121b5825ae71868c7592a53b7292eeb0f3fdae95ee"}, + {file = "mypy-1.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ec404a7cbe9fc0e92cb0e67f55ce0c025014e26d33e54d9e506a0f2d07fe5de"}, + {file = "mypy-1.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e22e1527dc3d4aa94311d246b59e47f6455b8729f4968765ac1eacf9a4760bc7"}, + {file = "mypy-1.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:a87dbfa85971e8d59c9cc1fcf534efe664d8949e4c0b6b44e8ca548e746a8d53"}, + {file = "mypy-1.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a781f6ad4bab20eef8b65174a57e5203f4be627b46291f4589879bf4e257b97b"}, + {file = "mypy-1.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b808e12113505b97d9023b0b5e0c0705a90571c6feefc6f215c1df9381256e30"}, + {file = "mypy-1.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f55583b12156c399dce2df7d16f8a5095291354f1e839c252ec6c0611e86e2e"}, + {file = "mypy-1.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4cf18f9d0efa1b16478c4c129eabec36148032575391095f73cae2e722fcf9d5"}, + {file = "mypy-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:bc6ac273b23c6b82da3bb25f4136c4fd42665f17f2cd850771cb600bdd2ebeda"}, + {file = "mypy-1.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9fd50226364cd2737351c79807775136b0abe084433b55b2e29181a4c3c878c0"}, + {file = "mypy-1.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f90cff89eea89273727d8783fef5d4a934be2fdca11b47def50cf5d311aff727"}, + {file = "mypy-1.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fcfc70599efde5c67862a07a1aaf50e55bce629ace26bb19dc17cece5dd31ca4"}, + {file = "mypy-1.10.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:075cbf81f3e134eadaf247de187bd604748171d6b79736fa9b6c9685b4083061"}, + {file = "mypy-1.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:3f298531bca95ff615b6e9f2fc0333aae27fa48052903a0ac90215021cdcfa4f"}, + {file = "mypy-1.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fa7ef5244615a2523b56c034becde4e9e3f9b034854c93639adb667ec9ec2976"}, + {file = "mypy-1.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3236a4c8f535a0631f85f5fcdffba71c7feeef76a6002fcba7c1a8e57c8be1ec"}, + {file = "mypy-1.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a2b5cdbb5dd35aa08ea9114436e0d79aceb2f38e32c21684dcf8e24e1e92821"}, + {file = "mypy-1.10.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92f93b21c0fe73dc00abf91022234c79d793318b8a96faac147cd579c1671746"}, + {file = "mypy-1.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:28d0e038361b45f099cc086d9dd99c15ff14d0188f44ac883010e172ce86c38a"}, + {file = "mypy-1.10.0-py3-none-any.whl", hash = "sha256:f8c083976eb530019175aabadb60921e73b4f45736760826aa1689dda8208aee"}, + {file = "mypy-1.10.0.tar.gz", hash = "sha256:3d087fcbec056c4ee34974da493a826ce316947485cef3901f511848e687c131"}, +] + +[package.dependencies] +mypy-extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=3.10" +typing-extensions = ">=4.1.0" [package.extras] dmypy = ["psutil (>=4.0)"] install-types = ["pip"] -python2 = ["typed-ast (>=1.4.0,<2)"] +mypyc = ["setuptools (>=50)"] reports = ["lxml"] [[package]] @@ -6107,8 +6105,6 @@ files = [ {file = "psycopg2-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:426f9f29bde126913a20a96ff8ce7d73fd8a216cfb323b1f04da402d452853c3"}, {file = "psycopg2-2.9.9-cp311-cp311-win32.whl", hash = "sha256:ade01303ccf7ae12c356a5e10911c9e1c51136003a9a1d92f7aa9d010fb98372"}, {file = "psycopg2-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:121081ea2e76729acfb0673ff33755e8703d45e926e416cb59bae3a86c6a4981"}, - {file = "psycopg2-2.9.9-cp312-cp312-win32.whl", hash = "sha256:d735786acc7dd25815e89cc4ad529a43af779db2e25aa7c626de864127e5a024"}, - {file = "psycopg2-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:a7653d00b732afb6fc597e29c50ad28087dcb4fbfb28e86092277a559ae4e693"}, {file = "psycopg2-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:5e0d98cade4f0e0304d7d6f25bbfbc5bd186e07b38eac65379309c4ca3193efa"}, {file = "psycopg2-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:7e2dacf8b009a1c1e843b5213a87f7c544b2b042476ed7755be813eaf4e8347a"}, {file = "psycopg2-2.9.9-cp38-cp38-win32.whl", hash = "sha256:ff432630e510709564c01dafdbe996cb552e0b9f3f065eb89bdce5bd31fabf4c"}, @@ -6151,7 +6147,6 @@ files = [ {file = "psycopg2_binary-2.9.9-cp311-cp311-win32.whl", hash = "sha256:dc4926288b2a3e9fd7b50dc6a1909a13bbdadfc67d93f3374d984e56f885579d"}, {file = "psycopg2_binary-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:b76bedd166805480ab069612119ea636f5ab8f8771e640ae103e05a4aae3e417"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8532fd6e6e2dc57bcb3bc90b079c60de896d2128c5d9d6f24a63875a95a088cf"}, - {file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b0605eaed3eb239e87df0d5e3c6489daae3f7388d455d0c0b4df899519c6a38d"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f8544b092a29a6ddd72f3556a9fcf249ec412e10ad28be6a0c0d948924f2212"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d423c8d8a3c82d08fe8af900ad5b613ce3632a1249fd6a223941d0735fce493"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e5afae772c00980525f6d6ecf7cbca55676296b580c0e6abb407f15f3706996"}, @@ -6160,8 +6155,6 @@ files = [ {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:cb16c65dcb648d0a43a2521f2f0a2300f40639f6f8c1ecbc662141e4e3e1ee07"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:911dda9c487075abd54e644ccdf5e5c16773470a6a5d3826fda76699410066fb"}, {file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:57fede879f08d23c85140a360c6a77709113efd1c993923c59fde17aa27599fe"}, - {file = "psycopg2_binary-2.9.9-cp312-cp312-win32.whl", hash = "sha256:64cf30263844fa208851ebb13b0732ce674d8ec6a0c86a4e160495d299ba3c93"}, - {file = "psycopg2_binary-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:81ff62668af011f9a48787564ab7eded4e9fb17a4a6a74af5ffa6a457400d2ab"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2293b001e319ab0d869d660a704942c9e2cce19745262a8aba2115ef41a0a42a"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ef7df18daf2c4c07e2695e8cfd5ee7f748a1d54d802330985a78d2a5a6dca9"}, {file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a602ea5aff39bb9fac6308e9c9d82b9a35c2bf288e184a816002c9fae930b77"}, @@ -7159,7 +7152,6 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, @@ -10092,4 +10084,4 @@ extended-testing = ["aiosqlite", "aleph-alpha-client", "anthropic", "arxiv", "as [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "657797396fff40d21216962c43171ba5605725e1047339443d7f346e2f5f0b80" +content-hash = "66b00eea10e05312fcafa5f68e4b863942c344051bdd93b575b0b26fce9fce21" diff --git a/libs/community/pyproject.toml b/libs/community/pyproject.toml index b1ba5b4f7b..00328f63f6 100644 --- a/libs/community/pyproject.toml +++ b/libs/community/pyproject.toml @@ -174,7 +174,7 @@ optional = true ruff = "^0.1.5" [tool.poetry.group.typing.dependencies] -mypy = "^0.991" +mypy = "^1" types-pyyaml = "^6.0.12.2" types-requests = "^2.28.11.5" types-toml = "^0.10.8.1" diff --git a/libs/community/tests/integration_tests/chat_models/test_anthropic.py b/libs/community/tests/integration_tests/chat_models/test_anthropic.py index c01b5ff298..bbee040370 100644 --- a/libs/community/tests/integration_tests/chat_models/test_anthropic.py +++ b/libs/community/tests/integration_tests/chat_models/test_anthropic.py @@ -15,7 +15,7 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler @pytest.mark.scheduled def test_anthropic_call() -> None: """Test valid call to anthropic.""" - chat = ChatAnthropic(model="test") + chat = ChatAnthropic(model="test") # type: ignore[call-arg] message = HumanMessage(content="Hello") response = chat.invoke([message]) assert isinstance(response, AIMessage) @@ -25,7 +25,7 @@ def test_anthropic_call() -> None: @pytest.mark.scheduled def test_anthropic_generate() -> None: """Test generate method of anthropic.""" - chat = ChatAnthropic(model="test") + chat = ChatAnthropic(model="test") # type: ignore[call-arg] chat_messages: List[List[BaseMessage]] = [ [HumanMessage(content="How many toes do dogs have?")] ] @@ -42,7 +42,7 @@ def test_anthropic_generate() -> None: @pytest.mark.scheduled def test_anthropic_streaming() -> None: """Test streaming tokens from anthropic.""" - chat = ChatAnthropic(model="test", streaming=True) + chat = ChatAnthropic(model="test", streaming=True) # type: ignore[call-arg] message = HumanMessage(content="Hello") response = chat.invoke([message]) assert isinstance(response, AIMessage) @@ -54,7 +54,7 @@ def test_anthropic_streaming_callback() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) - chat = ChatAnthropic( + chat = ChatAnthropic( # type: ignore[call-arg] model="test", streaming=True, callback_manager=callback_manager, @@ -70,7 +70,7 @@ async def test_anthropic_async_streaming_callback() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) - chat = ChatAnthropic( + chat = ChatAnthropic( # type: ignore[call-arg] model="test", streaming=True, callback_manager=callback_manager, diff --git a/libs/community/tests/integration_tests/chat_models/test_azure_openai.py b/libs/community/tests/integration_tests/chat_models/test_azure_openai.py index 7d3c3cfe8c..03362273aa 100644 --- a/libs/community/tests/integration_tests/chat_models/test_azure_openai.py +++ b/libs/community/tests/integration_tests/chat_models/test_azure_openai.py @@ -20,7 +20,7 @@ DEPLOYMENT_NAME = os.environ.get( def _get_llm(**kwargs: Any) -> AzureChatOpenAI: - return AzureChatOpenAI( + return AzureChatOpenAI( # type: ignore[call-arg] deployment_name=DEPLOYMENT_NAME, openai_api_version=OPENAI_API_VERSION, azure_endpoint=OPENAI_API_BASE, diff --git a/libs/community/tests/integration_tests/chat_models/test_baichuan.py b/libs/community/tests/integration_tests/chat_models/test_baichuan.py index a44e0c94db..62008a8ab2 100644 --- a/libs/community/tests/integration_tests/chat_models/test_baichuan.py +++ b/libs/community/tests/integration_tests/chat_models/test_baichuan.py @@ -23,7 +23,7 @@ def test_chat_baichuan_default_non_streaming() -> None: def test_chat_baichuan_turbo() -> None: - chat = ChatBaichuan(model="Baichuan2-Turbo", streaming=True) + chat = ChatBaichuan(model="Baichuan2-Turbo", streaming=True) # type: ignore[call-arg] message = HumanMessage(content="Hello") response = chat.invoke([message]) assert isinstance(response, AIMessage) @@ -31,7 +31,7 @@ def test_chat_baichuan_turbo() -> None: def test_chat_baichuan_turbo_non_streaming() -> None: - chat = ChatBaichuan(model="Baichuan2-Turbo") + chat = ChatBaichuan(model="Baichuan2-Turbo") # type: ignore[call-arg] message = HumanMessage(content="Hello") response = chat.invoke([message]) assert isinstance(response, AIMessage) diff --git a/libs/community/tests/integration_tests/chat_models/test_bedrock.py b/libs/community/tests/integration_tests/chat_models/test_bedrock.py index d1d5399bb8..54a000668e 100644 --- a/libs/community/tests/integration_tests/chat_models/test_bedrock.py +++ b/libs/community/tests/integration_tests/chat_models/test_bedrock.py @@ -17,7 +17,7 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler @pytest.fixture def chat() -> BedrockChat: - return BedrockChat(model_id="anthropic.claude-v2", model_kwargs={"temperature": 0}) + return BedrockChat(model_id="anthropic.claude-v2", model_kwargs={"temperature": 0}) # type: ignore[call-arg] @pytest.mark.scheduled @@ -63,7 +63,7 @@ def test_chat_bedrock_streaming() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) - chat = BedrockChat( + chat = BedrockChat( # type: ignore[call-arg] model_id="anthropic.claude-v2", streaming=True, callback_manager=callback_manager, @@ -92,7 +92,7 @@ def test_chat_bedrock_streaming_generation_info() -> None: callback = _FakeCallback() callback_manager = CallbackManager([callback]) - chat = BedrockChat( + chat = BedrockChat( # type: ignore[call-arg] model_id="anthropic.claude-v2", callback_manager=callback_manager, ) diff --git a/libs/community/tests/integration_tests/chat_models/test_coze.py b/libs/community/tests/integration_tests/chat_models/test_coze.py index 6ce07bce7d..7eaaa86d5e 100644 --- a/libs/community/tests/integration_tests/chat_models/test_coze.py +++ b/libs/community/tests/integration_tests/chat_models/test_coze.py @@ -9,7 +9,7 @@ from langchain_community.chat_models.coze import ChatCoze def test_chat_coze_default() -> None: chat = ChatCoze( coze_api_base="https://api.coze.com", - coze_api_key="pat_...", + coze_api_key="pat_...", # type: ignore[arg-type] bot_id="7....", user="123", conversation_id="", @@ -24,7 +24,7 @@ def test_chat_coze_default() -> None: def test_chat_coze_default_non_streaming() -> None: chat = ChatCoze( coze_api_base="https://api.coze.com", - coze_api_key="pat_...", + coze_api_key="pat_...", # type: ignore[arg-type] bot_id="7....", user="123", conversation_id="", diff --git a/libs/community/tests/integration_tests/chat_models/test_dappier.py b/libs/community/tests/integration_tests/chat_models/test_dappier.py index 2fcfe1f68e..2a0c8527a7 100644 --- a/libs/community/tests/integration_tests/chat_models/test_dappier.py +++ b/libs/community/tests/integration_tests/chat_models/test_dappier.py @@ -12,7 +12,7 @@ from langchain_community.chat_models.dappier import ( @pytest.mark.scheduled def test_dappier_chat() -> None: """Test ChatDappierAI wrapper.""" - chat = ChatDappierAI( + chat = ChatDappierAI( # type: ignore[call-arg] dappier_endpoint="https://api.dappier.com/app/datamodelconversation", dappier_model="dm_01hpsxyfm2fwdt2zet9cg6fdxt", ) @@ -25,7 +25,7 @@ def test_dappier_chat() -> None: @pytest.mark.scheduled def test_dappier_generate() -> None: """Test generate method of Dappier AI.""" - chat = ChatDappierAI( + chat = ChatDappierAI( # type: ignore[call-arg] dappier_endpoint="https://api.dappier.com/app/datamodelconversation", dappier_model="dm_01hpsxyfm2fwdt2zet9cg6fdxt", ) @@ -45,7 +45,7 @@ def test_dappier_generate() -> None: @pytest.mark.scheduled async def test_dappier_agenerate() -> None: """Test async generation.""" - chat = ChatDappierAI( + chat = ChatDappierAI( # type: ignore[call-arg] dappier_endpoint="https://api.dappier.com/app/datamodelconversation", dappier_model="dm_01hpsxyfm2fwdt2zet9cg6fdxt", ) diff --git a/libs/community/tests/integration_tests/chat_models/test_edenai.py b/libs/community/tests/integration_tests/chat_models/test_edenai.py index e699da6ef2..52f51d022b 100644 --- a/libs/community/tests/integration_tests/chat_models/test_edenai.py +++ b/libs/community/tests/integration_tests/chat_models/test_edenai.py @@ -13,7 +13,7 @@ from langchain_community.chat_models.edenai import ( @pytest.mark.scheduled def test_chat_edenai() -> None: """Test ChatEdenAI wrapper.""" - chat = ChatEdenAI( + chat = ChatEdenAI( # type: ignore[call-arg] provider="openai", model="gpt-3.5-turbo", temperature=0, max_tokens=1000 ) message = HumanMessage(content="Who are you ?") @@ -25,7 +25,7 @@ def test_chat_edenai() -> None: @pytest.mark.scheduled def test_edenai_generate() -> None: """Test generate method of edenai.""" - chat = ChatEdenAI(provider="google") + chat = ChatEdenAI(provider="google") # type: ignore[call-arg] chat_messages: List[List[BaseMessage]] = [ [HumanMessage(content="What is the meaning of life?")] ] @@ -42,7 +42,7 @@ def test_edenai_generate() -> None: @pytest.mark.scheduled async def test_edenai_async_generate() -> None: """Test async generation.""" - chat = ChatEdenAI(provider="google", max_tokens=50) + chat = ChatEdenAI(provider="google", max_tokens=50) # type: ignore[call-arg] message = HumanMessage(content="Hello") result: LLMResult = await chat.agenerate([[message], [message]]) assert isinstance(result, LLMResult) @@ -55,7 +55,7 @@ async def test_edenai_async_generate() -> None: @pytest.mark.scheduled def test_edenai_streaming() -> None: """Test streaming EdenAI chat.""" - llm = ChatEdenAI(provider="openai", max_tokens=50) + llm = ChatEdenAI(provider="openai", max_tokens=50) # type: ignore[call-arg] for chunk in llm.stream("Generate a high fantasy story."): assert isinstance(chunk.content, str) @@ -64,7 +64,7 @@ def test_edenai_streaming() -> None: @pytest.mark.scheduled async def test_edenai_astream() -> None: """Test streaming from EdenAI.""" - llm = ChatEdenAI(provider="openai", max_tokens=50) + llm = ChatEdenAI(provider="openai", max_tokens=50) # type: ignore[call-arg] async for token in llm.astream("Generate a high fantasy story."): assert isinstance(token.content, str) diff --git a/libs/community/tests/integration_tests/chat_models/test_google_palm.py b/libs/community/tests/integration_tests/chat_models/test_google_palm.py index 11cf35877c..e0a879bbd0 100644 --- a/libs/community/tests/integration_tests/chat_models/test_google_palm.py +++ b/libs/community/tests/integration_tests/chat_models/test_google_palm.py @@ -12,7 +12,7 @@ from langchain_community.chat_models import ChatGooglePalm def test_chat_google_palm() -> None: """Test Google PaLM Chat API wrapper.""" - chat = ChatGooglePalm() + chat = ChatGooglePalm() # type: ignore[call-arg] message = HumanMessage(content="Hello") response = chat.invoke([message]) assert isinstance(response, BaseMessage) @@ -21,7 +21,7 @@ def test_chat_google_palm() -> None: def test_chat_google_palm_system_message() -> None: """Test Google PaLM Chat API wrapper with system message.""" - chat = ChatGooglePalm() + chat = ChatGooglePalm() # type: ignore[call-arg] system_message = SystemMessage(content="You are to chat with the user.") human_message = HumanMessage(content="Hello") response = chat.invoke([system_message, human_message]) @@ -31,7 +31,7 @@ def test_chat_google_palm_system_message() -> None: def test_chat_google_palm_generate() -> None: """Test Google PaLM Chat API wrapper with generate.""" - chat = ChatGooglePalm(n=2, temperature=1.0) + chat = ChatGooglePalm(n=2, temperature=1.0) # type: ignore[call-arg] message = HumanMessage(content="Hello") response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) @@ -48,7 +48,7 @@ def test_chat_google_palm_multiple_completions() -> None: """Test Google PaLM Chat API wrapper with multiple completions.""" # The API de-dupes duplicate responses, so set temperature higher. This # could be a flakey test though... - chat = ChatGooglePalm(n=5, temperature=1.0) + chat = ChatGooglePalm(n=5, temperature=1.0) # type: ignore[call-arg] message = HumanMessage(content="Hello") response = chat._generate([message]) assert isinstance(response, ChatResult) @@ -60,7 +60,7 @@ def test_chat_google_palm_multiple_completions() -> None: async def test_async_chat_google_palm() -> None: """Test async generation.""" - chat = ChatGooglePalm(n=2, temperature=1.0) + chat = ChatGooglePalm(n=2, temperature=1.0) # type: ignore[call-arg] message = HumanMessage(content="Hello") response = await chat.agenerate([[message], [message]]) assert isinstance(response, LLMResult) diff --git a/libs/community/tests/integration_tests/chat_models/test_gpt_router.py b/libs/community/tests/integration_tests/chat_models/test_gpt_router.py index d98ba6fe4e..0b5230e98c 100644 --- a/libs/community/tests/integration_tests/chat_models/test_gpt_router.py +++ b/libs/community/tests/integration_tests/chat_models/test_gpt_router.py @@ -16,9 +16,9 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler def test_api_key_is_string() -> None: - gpt_router = GPTRouter( + gpt_router = GPTRouter( # type: ignore[call-arg] gpt_router_api_base="https://example.com", - gpt_router_api_key="secret-api-key", + gpt_router_api_key="secret-api-key", # type: ignore[arg-type] ) assert isinstance(gpt_router.gpt_router_api_key, SecretStr) @@ -26,9 +26,9 @@ def test_api_key_is_string() -> None: def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: - gpt_router = GPTRouter( + gpt_router = GPTRouter( # type: ignore[call-arg] gpt_router_api_base="https://example.com", - gpt_router_api_key="secret-api-key", + gpt_router_api_key="secret-api-key", # type: ignore[arg-type] ) print(gpt_router.gpt_router_api_key, end="") # noqa: T201 captured = capsys.readouterr() diff --git a/libs/community/tests/integration_tests/chat_models/test_jinachat.py b/libs/community/tests/integration_tests/chat_models/test_jinachat.py index 0865cfb0f1..a43b955d7e 100644 --- a/libs/community/tests/integration_tests/chat_models/test_jinachat.py +++ b/libs/community/tests/integration_tests/chat_models/test_jinachat.py @@ -14,7 +14,7 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler def test_jinachat_api_key_is_secret_string() -> None: - llm = JinaChat(jinachat_api_key="secret-api-key") + llm = JinaChat(jinachat_api_key="secret-api-key") # type: ignore[arg-type, call-arg] assert isinstance(llm.jinachat_api_key, SecretStr) @@ -23,7 +23,7 @@ def test_jinachat_api_key_masked_when_passed_from_env( ) -> None: """Test initialization with an API key provided via an env variable""" monkeypatch.setenv("JINACHAT_API_KEY", "secret-api-key") - llm = JinaChat() + llm = JinaChat() # type: ignore[call-arg] print(llm.jinachat_api_key, end="") # noqa: T201 captured = capsys.readouterr() @@ -34,7 +34,7 @@ def test_jinachat_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: """Test initialization with an API key provided via the initializer""" - llm = JinaChat(jinachat_api_key="secret-api-key") + llm = JinaChat(jinachat_api_key="secret-api-key") # type: ignore[arg-type, call-arg] print(llm.jinachat_api_key, end="") # noqa: T201 captured = capsys.readouterr() @@ -43,13 +43,13 @@ def test_jinachat_api_key_masked_when_passed_via_constructor( def test_uses_actual_secret_value_from_secretstr() -> None: """Test that actual secret is retrieved using `.get_secret_value()`.""" - llm = JinaChat(jinachat_api_key="secret-api-key") + llm = JinaChat(jinachat_api_key="secret-api-key") # type: ignore[arg-type, call-arg] assert cast(SecretStr, llm.jinachat_api_key).get_secret_value() == "secret-api-key" def test_jinachat() -> None: """Test JinaChat wrapper.""" - chat = JinaChat(max_tokens=10) + chat = JinaChat(max_tokens=10) # type: ignore[call-arg] message = HumanMessage(content="Hello") response = chat.invoke([message]) assert isinstance(response, BaseMessage) @@ -58,7 +58,7 @@ def test_jinachat() -> None: def test_jinachat_system_message() -> None: """Test JinaChat wrapper with system message.""" - chat = JinaChat(max_tokens=10) + chat = JinaChat(max_tokens=10) # type: ignore[call-arg] system_message = SystemMessage(content="You are to chat with the user.") human_message = HumanMessage(content="Hello") response = chat.invoke([system_message, human_message]) @@ -68,7 +68,7 @@ def test_jinachat_system_message() -> None: def test_jinachat_generate() -> None: """Test JinaChat wrapper with generate.""" - chat = JinaChat(max_tokens=10) + chat = JinaChat(max_tokens=10) # type: ignore[call-arg] message = HumanMessage(content="Hello") response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) @@ -85,7 +85,7 @@ def test_jinachat_streaming() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) - chat = JinaChat( + chat = JinaChat( # type: ignore[call-arg] max_tokens=10, streaming=True, temperature=0, @@ -100,7 +100,7 @@ def test_jinachat_streaming() -> None: async def test_async_jinachat() -> None: """Test async generation.""" - chat = JinaChat(max_tokens=102) + chat = JinaChat(max_tokens=102) # type: ignore[call-arg] message = HumanMessage(content="Hello") response = await chat.agenerate([[message], [message]]) assert isinstance(response, LLMResult) @@ -117,7 +117,7 @@ async def test_async_jinachat_streaming() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) - chat = JinaChat( + chat = JinaChat( # type: ignore[call-arg] max_tokens=10, streaming=True, temperature=0, @@ -140,18 +140,18 @@ async def test_async_jinachat_streaming() -> None: def test_jinachat_extra_kwargs() -> None: """Test extra kwargs to chat openai.""" # Check that foo is saved in extra_kwargs. - llm = JinaChat(foo=3, max_tokens=10) + llm = JinaChat(foo=3, max_tokens=10) # type: ignore[call-arg] assert llm.max_tokens == 10 assert llm.model_kwargs == {"foo": 3} # Test that if extra_kwargs are provided, they are added to it. - llm = JinaChat(foo=3, model_kwargs={"bar": 2}) + llm = JinaChat(foo=3, model_kwargs={"bar": 2}) # type: ignore[call-arg] assert llm.model_kwargs == {"foo": 3, "bar": 2} # Test that if provided twice it errors with pytest.raises(ValueError): - JinaChat(foo=3, model_kwargs={"foo": 2}) + JinaChat(foo=3, model_kwargs={"foo": 2}) # type: ignore[call-arg] # Test that if explicit param is specified in kwargs it errors with pytest.raises(ValueError): - JinaChat(model_kwargs={"temperature": 0.2}) + JinaChat(model_kwargs={"temperature": 0.2}) # type: ignore[call-arg] diff --git a/libs/community/tests/integration_tests/chat_models/test_kinetica.py b/libs/community/tests/integration_tests/chat_models/test_kinetica.py index 05f55e7a6b..8c8466bd7e 100644 --- a/libs/community/tests/integration_tests/chat_models/test_kinetica.py +++ b/libs/community/tests/integration_tests/chat_models/test_kinetica.py @@ -74,7 +74,7 @@ class TestChatKinetica: """Create an LLM instance.""" import gpudb - kinetica_llm = ChatKinetica() + kinetica_llm = ChatKinetica() # type: ignore[call-arg] LOG.info(kinetica_llm._identifying_params) assert isinstance(kinetica_llm.kdbc, gpudb.GPUdb) @@ -83,7 +83,7 @@ class TestChatKinetica: @pytest.mark.vcr() def test_load_context(self) -> None: """Load the LLM context from the DB.""" - kinetica_llm = ChatKinetica() + kinetica_llm = ChatKinetica() # type: ignore[call-arg] ctx_messages = kinetica_llm.load_messages_from_context(self.context_name) system_message = ctx_messages[0] @@ -96,7 +96,7 @@ class TestChatKinetica: @pytest.mark.vcr() def test_generate(self) -> None: """Generate SQL from a chain.""" - kinetica_llm = ChatKinetica() + kinetica_llm = ChatKinetica() # type: ignore[call-arg] # create chain ctx_messages = kinetica_llm.load_messages_from_context(self.context_name) @@ -113,7 +113,7 @@ class TestChatKinetica: @pytest.mark.vcr() def test_full_chain(self) -> None: """Generate SQL from a chain and execute the query.""" - kinetica_llm = ChatKinetica() + kinetica_llm = ChatKinetica() # type: ignore[call-arg] # create chain ctx_messages = kinetica_llm.load_messages_from_context(self.context_name) diff --git a/libs/community/tests/integration_tests/chat_models/test_konko.py b/libs/community/tests/integration_tests/chat_models/test_konko.py index 94f1b652b3..a4b9977de3 100644 --- a/libs/community/tests/integration_tests/chat_models/test_konko.py +++ b/libs/community/tests/integration_tests/chat_models/test_konko.py @@ -192,15 +192,15 @@ def test_konko_streaming_param_validation_test() -> None: def test_konko_additional_args_test() -> None: """Evaluate extra arguments for ChatKonko.""" - chat_instance = ChatKonko(extra=3, max_tokens=10) + chat_instance = ChatKonko(extra=3, max_tokens=10) # type: ignore[call-arg] assert chat_instance.max_tokens == 10 assert chat_instance.model_kwargs == {"extra": 3} - chat_instance = ChatKonko(extra=3, model_kwargs={"addition": 2}) + chat_instance = ChatKonko(extra=3, model_kwargs={"addition": 2}) # type: ignore[call-arg] assert chat_instance.model_kwargs == {"extra": 3, "addition": 2} with pytest.raises(ValueError): - ChatKonko(extra=3, model_kwargs={"extra": 2}) + ChatKonko(extra=3, model_kwargs={"extra": 2}) # type: ignore[call-arg] with pytest.raises(ValueError): ChatKonko(model_kwargs={"temperature": 0.2}) diff --git a/libs/community/tests/integration_tests/chat_models/test_litellm.py b/libs/community/tests/integration_tests/chat_models/test_litellm.py index 1d29c5c7ed..e7c4cbd85e 100644 --- a/libs/community/tests/integration_tests/chat_models/test_litellm.py +++ b/libs/community/tests/integration_tests/chat_models/test_litellm.py @@ -13,7 +13,7 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler def test_litellm_call() -> None: """Test valid call to litellm.""" - chat = ChatLiteLLM( + chat = ChatLiteLLM( # type: ignore[call-arg] model="test", ) message = HumanMessage(content="Hello") @@ -24,7 +24,7 @@ def test_litellm_call() -> None: def test_litellm_generate() -> None: """Test generate method of anthropic.""" - chat = ChatLiteLLM(model="test") + chat = ChatLiteLLM(model="test") # type: ignore[call-arg] chat_messages: List[List[BaseMessage]] = [ [HumanMessage(content="How many toes do dogs have?")] ] @@ -40,7 +40,7 @@ def test_litellm_generate() -> None: def test_litellm_streaming() -> None: """Test streaming tokens from anthropic.""" - chat = ChatLiteLLM(model="test", streaming=True) + chat = ChatLiteLLM(model="test", streaming=True) # type: ignore[call-arg] message = HumanMessage(content="Hello") response = chat.invoke([message]) assert isinstance(response, AIMessage) @@ -51,7 +51,7 @@ def test_litellm_streaming_callback() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) - chat = ChatLiteLLM( + chat = ChatLiteLLM( # type: ignore[call-arg] model="test", streaming=True, callback_manager=callback_manager, diff --git a/libs/community/tests/integration_tests/chat_models/test_openai.py b/libs/community/tests/integration_tests/chat_models/test_openai.py index dc5c6639ee..da6d382361 100644 --- a/libs/community/tests/integration_tests/chat_models/test_openai.py +++ b/libs/community/tests/integration_tests/chat_models/test_openai.py @@ -42,7 +42,7 @@ def test_chat_openai_model() -> None: """Test ChatOpenAI wrapper handles model_name.""" chat = ChatOpenAI(model="foo") assert chat.model_name == "foo" - chat = ChatOpenAI(model_name="bar") + chat = ChatOpenAI(model_name="bar") # type: ignore[call-arg] assert chat.model_name == "bar" @@ -243,17 +243,17 @@ async def test_async_chat_openai_bind_functions() -> None: def test_chat_openai_extra_kwargs() -> None: """Test extra kwargs to chat openai.""" # Check that foo is saved in extra_kwargs. - llm = ChatOpenAI(foo=3, max_tokens=10) + llm = ChatOpenAI(foo=3, max_tokens=10) # type: ignore[call-arg] assert llm.max_tokens == 10 assert llm.model_kwargs == {"foo": 3} # Test that if extra_kwargs are provided, they are added to it. - llm = ChatOpenAI(foo=3, model_kwargs={"bar": 2}) + llm = ChatOpenAI(foo=3, model_kwargs={"bar": 2}) # type: ignore[call-arg] assert llm.model_kwargs == {"foo": 3, "bar": 2} # Test that if provided twice it errors with pytest.raises(ValueError): - ChatOpenAI(foo=3, model_kwargs={"foo": 2}) + ChatOpenAI(foo=3, model_kwargs={"foo": 2}) # type: ignore[call-arg] # Test that if explicit param is specified in kwargs it errors with pytest.raises(ValueError): diff --git a/libs/community/tests/integration_tests/chat_models/test_pai_eas_chat_endpoint.py b/libs/community/tests/integration_tests/chat_models/test_pai_eas_chat_endpoint.py index dc114b6be7..8371503931 100644 --- a/libs/community/tests/integration_tests/chat_models/test_pai_eas_chat_endpoint.py +++ b/libs/community/tests/integration_tests/chat_models/test_pai_eas_chat_endpoint.py @@ -11,8 +11,8 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler def test_pai_eas_call() -> None: chat = PaiEasChatEndpoint( - eas_service_url=os.getenv("EAS_SERVICE_URL"), - eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), + eas_service_url=os.getenv("EAS_SERVICE_URL"), # type: ignore[arg-type] + eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), # type: ignore[arg-type] ) response = chat.invoke([HumanMessage(content="Say foo:")]) assert isinstance(response, BaseMessage) @@ -22,8 +22,8 @@ def test_pai_eas_call() -> None: def test_multiple_history() -> None: """Tests multiple history works.""" chat = PaiEasChatEndpoint( - eas_service_url=os.getenv("EAS_SERVICE_URL"), - eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), + eas_service_url=os.getenv("EAS_SERVICE_URL"), # type: ignore[arg-type] + eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), # type: ignore[arg-type] ) response = chat.invoke( @@ -40,8 +40,8 @@ def test_multiple_history() -> None: def test_stream() -> None: """Test that stream works.""" chat = PaiEasChatEndpoint( - eas_service_url=os.getenv("EAS_SERVICE_URL"), - eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), + eas_service_url=os.getenv("EAS_SERVICE_URL"), # type: ignore[arg-type] + eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), # type: ignore[arg-type] streaming=True, ) callback_handler = FakeCallbackHandler() @@ -62,8 +62,8 @@ def test_stream() -> None: def test_multiple_messages() -> None: """Tests multiple messages works.""" chat = PaiEasChatEndpoint( - eas_service_url=os.getenv("EAS_SERVICE_URL"), - eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), + eas_service_url=os.getenv("EAS_SERVICE_URL"), # type: ignore[arg-type] + eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), # type: ignore[arg-type] ) message = HumanMessage(content="Hi, how are you.") response = chat.generate([[message], [message]]) diff --git a/libs/community/tests/integration_tests/chat_models/test_premai.py b/libs/community/tests/integration_tests/chat_models/test_premai.py index 36b96dd8d5..27b2e9d661 100644 --- a/libs/community/tests/integration_tests/chat_models/test_premai.py +++ b/libs/community/tests/integration_tests/chat_models/test_premai.py @@ -14,12 +14,12 @@ from langchain_community.chat_models import ChatPremAI @pytest.fixture def chat() -> ChatPremAI: - return ChatPremAI(project_id=8) + return ChatPremAI(project_id=8) # type: ignore[call-arg] def test_chat_premai() -> None: """Test ChatPremAI wrapper.""" - chat = ChatPremAI(project_id=8) + chat = ChatPremAI(project_id=8) # type: ignore[call-arg] message = HumanMessage(content="Hello") response = chat.invoke([message]) assert isinstance(response, BaseMessage) @@ -28,7 +28,7 @@ def test_chat_premai() -> None: def test_chat_prem_system_message() -> None: """Test ChatPremAI wrapper for system message""" - chat = ChatPremAI(project_id=8) + chat = ChatPremAI(project_id=8) # type: ignore[call-arg] system_message = SystemMessage(content="You are to chat with the user.") human_message = HumanMessage(content="Hello") response = chat.invoke([system_message, human_message]) @@ -38,13 +38,13 @@ def test_chat_prem_system_message() -> None: def test_chat_prem_model() -> None: """Test ChatPremAI wrapper handles model_name.""" - chat = ChatPremAI(model="foo", project_id=8) + chat = ChatPremAI(model="foo", project_id=8) # type: ignore[call-arg] assert chat.model == "foo" def test_chat_prem_generate() -> None: """Test ChatPremAI wrapper with generate.""" - chat = ChatPremAI(project_id=8) + chat = ChatPremAI(project_id=8) # type: ignore[call-arg] message = HumanMessage(content="Hello") response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) @@ -64,7 +64,7 @@ async def test_prem_invoke(chat: ChatPremAI) -> None: def test_prem_streaming() -> None: """Test streaming tokens from Prem.""" - chat = ChatPremAI(project_id=8, streaming=True) + chat = ChatPremAI(project_id=8, streaming=True) # type: ignore[call-arg] for token in chat.stream("I'm Pickle Rick"): assert isinstance(token.content, str) diff --git a/libs/community/tests/integration_tests/chat_models/test_promptlayer_openai.py b/libs/community/tests/integration_tests/chat_models/test_promptlayer_openai.py index 455c2876dd..3df4d370c3 100644 --- a/libs/community/tests/integration_tests/chat_models/test_promptlayer_openai.py +++ b/libs/community/tests/integration_tests/chat_models/test_promptlayer_openai.py @@ -11,7 +11,7 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler def test_promptlayer_chat_openai() -> None: """Test PromptLayerChatOpenAI wrapper.""" - chat = PromptLayerChatOpenAI(max_tokens=10) + chat = PromptLayerChatOpenAI(max_tokens=10) # type: ignore[call-arg] message = HumanMessage(content="Hello") response = chat.invoke([message]) assert isinstance(response, BaseMessage) @@ -20,7 +20,7 @@ def test_promptlayer_chat_openai() -> None: def test_promptlayer_chat_openai_system_message() -> None: """Test PromptLayerChatOpenAI wrapper with system message.""" - chat = PromptLayerChatOpenAI(max_tokens=10) + chat = PromptLayerChatOpenAI(max_tokens=10) # type: ignore[call-arg] system_message = SystemMessage(content="You are to chat with the user.") human_message = HumanMessage(content="Hello") response = chat.invoke([system_message, human_message]) @@ -30,7 +30,7 @@ def test_promptlayer_chat_openai_system_message() -> None: def test_promptlayer_chat_openai_generate() -> None: """Test PromptLayerChatOpenAI wrapper with generate.""" - chat = PromptLayerChatOpenAI(max_tokens=10, n=2) + chat = PromptLayerChatOpenAI(max_tokens=10, n=2) # type: ignore[call-arg] message = HumanMessage(content="Hello") response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) @@ -45,7 +45,7 @@ def test_promptlayer_chat_openai_generate() -> None: def test_promptlayer_chat_openai_multiple_completions() -> None: """Test PromptLayerChatOpenAI wrapper with multiple completions.""" - chat = PromptLayerChatOpenAI(max_tokens=10, n=5) + chat = PromptLayerChatOpenAI(max_tokens=10, n=5) # type: ignore[call-arg] message = HumanMessage(content="Hello") response = chat._generate([message]) assert isinstance(response, ChatResult) @@ -59,7 +59,7 @@ def test_promptlayer_chat_openai_streaming() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) - chat = PromptLayerChatOpenAI( + chat = PromptLayerChatOpenAI( # type: ignore[call-arg] max_tokens=10, streaming=True, temperature=0, @@ -75,7 +75,7 @@ def test_promptlayer_chat_openai_streaming() -> None: def test_promptlayer_chat_openai_invalid_streaming_params() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" with pytest.raises(ValueError): - PromptLayerChatOpenAI( + PromptLayerChatOpenAI( # type: ignore[call-arg] max_tokens=10, streaming=True, temperature=0, @@ -85,7 +85,7 @@ def test_promptlayer_chat_openai_invalid_streaming_params() -> None: async def test_async_promptlayer_chat_openai() -> None: """Test async generation.""" - chat = PromptLayerChatOpenAI(max_tokens=10, n=2) + chat = PromptLayerChatOpenAI(max_tokens=10, n=2) # type: ignore[call-arg] message = HumanMessage(content="Hello") response = await chat.agenerate([[message], [message]]) assert isinstance(response, LLMResult) @@ -102,7 +102,7 @@ async def test_async_promptlayer_chat_openai_streaming() -> None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) - chat = PromptLayerChatOpenAI( + chat = PromptLayerChatOpenAI( # type: ignore[call-arg] max_tokens=10, streaming=True, temperature=0, diff --git a/libs/community/tests/integration_tests/chat_models/test_qianfan_endpoint.py b/libs/community/tests/integration_tests/chat_models/test_qianfan_endpoint.py index c57a77495e..82e568123d 100644 --- a/libs/community/tests/integration_tests/chat_models/test_qianfan_endpoint.py +++ b/libs/community/tests/integration_tests/chat_models/test_qianfan_endpoint.py @@ -90,8 +90,8 @@ def test_initialization() -> None: """Test chat model initialization.""" for model in [ - QianfanChatEndpoint(model="BLOOMZ-7B", timeout=40), - QianfanChatEndpoint(model="BLOOMZ-7B", request_timeout=40), + QianfanChatEndpoint(model="BLOOMZ-7B", timeout=40), # type: ignore[call-arg] + QianfanChatEndpoint(model="BLOOMZ-7B", request_timeout=40), # type: ignore[call-arg] ]: assert model.model == "BLOOMZ-7B" assert model.request_timeout == 40 @@ -99,7 +99,7 @@ def test_initialization() -> None: def test_default_call() -> None: """Test default model.invoke(`ERNIE-Bot`) call.""" - chat = QianfanChatEndpoint() + chat = QianfanChatEndpoint() # type: ignore[call-arg] response = chat.invoke([HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -107,7 +107,7 @@ def test_default_call() -> None: def test_model() -> None: """Test model kwarg works.""" - chat = QianfanChatEndpoint(model="BLOOMZ-7B") + chat = QianfanChatEndpoint(model="BLOOMZ-7B") # type: ignore[call-arg] response = chat.invoke([HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -115,7 +115,7 @@ def test_model() -> None: def test_model_param() -> None: """Test model params works.""" - chat = QianfanChatEndpoint() + chat = QianfanChatEndpoint() # type: ignore[call-arg] response = chat.invoke([HumanMessage(content="Hello")], model="BLOOMZ-7B") assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -123,7 +123,7 @@ def test_model_param() -> None: def test_endpoint() -> None: """Test user custom model deployments like some open source models.""" - chat = QianfanChatEndpoint(endpoint="qianfan_bloomz_7b_compressed") + chat = QianfanChatEndpoint(endpoint="qianfan_bloomz_7b_compressed") # type: ignore[call-arg] response = chat.invoke([HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -131,9 +131,9 @@ def test_endpoint() -> None: def test_endpoint_param() -> None: """Test user custom model deployments like some open source models.""" - chat = QianfanChatEndpoint() + chat = QianfanChatEndpoint() # type: ignore[call-arg] response = chat.invoke( - [HumanMessage(endpoint="qianfan_bloomz_7b_compressed", content="Hello")] + [HumanMessage(endpoint="qianfan_bloomz_7b_compressed", content="Hello")] # type: ignore[call-arg] ) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -141,7 +141,7 @@ def test_endpoint_param() -> None: def test_multiple_history() -> None: """Tests multiple history works.""" - chat = QianfanChatEndpoint() + chat = QianfanChatEndpoint() # type: ignore[call-arg] response = chat.invoke( [ @@ -156,7 +156,7 @@ def test_multiple_history() -> None: def test_chat_generate() -> None: """Tests chat generate works.""" - chat = QianfanChatEndpoint() + chat = QianfanChatEndpoint() # type: ignore[call-arg] response = chat.generate( [ [ @@ -175,7 +175,7 @@ def test_chat_generate() -> None: def test_stream() -> None: """Test that stream works.""" - chat = QianfanChatEndpoint(streaming=True) + chat = QianfanChatEndpoint(streaming=True) # type: ignore[call-arg] callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) response = chat.invoke( @@ -203,7 +203,7 @@ def test_stream() -> None: @pytest.mark.asyncio async def test_async_invoke() -> None: - chat = QianfanChatEndpoint() + chat = QianfanChatEndpoint() # type: ignore[call-arg] res = await chat.ainvoke([HumanMessage(content="Hello")]) assert isinstance(res, BaseMessage) assert res.content != "" @@ -212,7 +212,7 @@ async def test_async_invoke() -> None: @pytest.mark.asyncio async def test_async_generate() -> None: """Tests chat agenerate works.""" - chat = QianfanChatEndpoint() + chat = QianfanChatEndpoint() # type: ignore[call-arg] response = await chat.agenerate( [ [ @@ -231,7 +231,7 @@ async def test_async_generate() -> None: @pytest.mark.asyncio async def test_async_stream() -> None: - chat = QianfanChatEndpoint(streaming=True) + chat = QianfanChatEndpoint(streaming=True) # type: ignore[call-arg] async for token in chat.astream( [ HumanMessage(content="Hello."), @@ -244,7 +244,7 @@ async def test_async_stream() -> None: def test_multiple_messages() -> None: """Tests multiple messages works.""" - chat = QianfanChatEndpoint() + chat = QianfanChatEndpoint() # type: ignore[call-arg] message = HumanMessage(content="Hi, how are you.") response = chat.generate([[message], [message]]) @@ -259,13 +259,13 @@ def test_multiple_messages() -> None: def test_functions_call_thoughts() -> None: - chat = QianfanChatEndpoint(model="ERNIE-Bot") + chat = QianfanChatEndpoint(model="ERNIE-Bot") # type: ignore[call-arg] prompt_tmpl = "Use the given functions to answer following question: {input}" prompt_msgs = [ HumanMessagePromptTemplate.from_template(prompt_tmpl), ] - prompt = ChatPromptTemplate(messages=prompt_msgs) + prompt = ChatPromptTemplate(messages=prompt_msgs) # type: ignore[arg-type, call-arg] chain = prompt | chat.bind(functions=_FUNCTIONS) @@ -276,9 +276,9 @@ def test_functions_call_thoughts() -> None: def test_functions_call() -> None: - chat = QianfanChatEndpoint(model="ERNIE-Bot") + chat = QianfanChatEndpoint(model="ERNIE-Bot") # type: ignore[call-arg] - prompt = ChatPromptTemplate( + prompt = ChatPromptTemplate( # type: ignore[call-arg] messages=[ HumanMessage(content="What's the temperature in Shanghai today?"), AIMessage( @@ -305,7 +305,7 @@ def test_functions_call() -> None: def test_rate_limit() -> None: - chat = QianfanChatEndpoint(model="ERNIE-Bot", init_kwargs={"query_per_second": 2}) + chat = QianfanChatEndpoint(model="ERNIE-Bot", init_kwargs={"query_per_second": 2}) # type: ignore[call-arg] assert chat.client._client._rate_limiter._sync_limiter._query_per_second == 2 responses = chat.batch( [ @@ -326,7 +326,7 @@ def test_qianfan_key_masked_when_passed_from_env( monkeypatch.setenv("QIANFAN_AK", "test-api-key") monkeypatch.setenv("QIANFAN_SK", "test-secret-key") - chat = QianfanChatEndpoint() + chat = QianfanChatEndpoint() # type: ignore[call-arg] print(chat.qianfan_ak, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" @@ -340,9 +340,9 @@ def test_qianfan_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: """Test initialization with an API key provided via the initializer""" - chat = QianfanChatEndpoint( - qianfan_ak="test-api-key", - qianfan_sk="test-secret-key", + chat = QianfanChatEndpoint( # type: ignore[call-arg] + qianfan_ak="test-api-key", # type: ignore[arg-type] + qianfan_sk="test-secret-key", # type: ignore[arg-type] ) print(chat.qianfan_ak, end="") # noqa: T201 captured = capsys.readouterr() @@ -356,9 +356,9 @@ def test_qianfan_key_masked_when_passed_via_constructor( def test_uses_actual_secret_value_from_secret_str() -> None: """Test that actual secret is retrieved using `.get_secret_value()`.""" - chat = QianfanChatEndpoint( - qianfan_ak="test-api-key", - qianfan_sk="test-secret-key", + chat = QianfanChatEndpoint( # type: ignore[call-arg] + qianfan_ak="test-api-key", # type: ignore[arg-type] + qianfan_sk="test-secret-key", # type: ignore[arg-type] ) assert cast(SecretStr, chat.qianfan_ak).get_secret_value() == "test-api-key" assert cast(SecretStr, chat.qianfan_sk).get_secret_value() == "test-secret-key" diff --git a/libs/community/tests/integration_tests/chat_models/test_sparkllm.py b/libs/community/tests/integration_tests/chat_models/test_sparkllm.py index df5357d8e7..1a94af7eb2 100644 --- a/libs/community/tests/integration_tests/chat_models/test_sparkllm.py +++ b/libs/community/tests/integration_tests/chat_models/test_sparkllm.py @@ -7,13 +7,13 @@ def test_initialization() -> None: """Test chat model initialization.""" for model in [ ChatSparkLLM(timeout=30), - ChatSparkLLM(request_timeout=30), + ChatSparkLLM(request_timeout=30), # type: ignore[call-arg] ]: assert model.request_timeout == 30 def test_chat_spark_llm() -> None: - chat = ChatSparkLLM() + chat = ChatSparkLLM() # type: ignore[call-arg] message = HumanMessage(content="Hello") response = chat.invoke([message]) assert isinstance(response, AIMessage) @@ -21,14 +21,14 @@ def test_chat_spark_llm() -> None: def test_chat_spark_llm_streaming() -> None: - chat = ChatSparkLLM(streaming=True) + chat = ChatSparkLLM(streaming=True) # type: ignore[call-arg] for chunk in chat.stream("Hello!"): assert isinstance(chunk, AIMessageChunk) assert isinstance(chunk.content, str) def test_chat_spark_llm_with_domain() -> None: - chat = ChatSparkLLM(spark_llm_domain="generalv3") + chat = ChatSparkLLM(spark_llm_domain="generalv3") # type: ignore[call-arg] message = HumanMessage(content="Hello") response = chat.invoke([message]) print(response) # noqa: T201 @@ -37,7 +37,7 @@ def test_chat_spark_llm_with_domain() -> None: def test_chat_spark_llm_with_temperature() -> None: - chat = ChatSparkLLM(temperature=0.9, top_k=2) + chat = ChatSparkLLM(temperature=0.9, top_k=2) # type: ignore[call-arg] message = HumanMessage(content="Hello") response = chat.invoke([message]) print(response) # noqa: T201 diff --git a/libs/community/tests/integration_tests/chat_models/test_tongyi.py b/libs/community/tests/integration_tests/chat_models/test_tongyi.py index 3e0a8f9442..8961821fea 100644 --- a/libs/community/tests/integration_tests/chat_models/test_tongyi.py +++ b/libs/community/tests/integration_tests/chat_models/test_tongyi.py @@ -36,22 +36,22 @@ _FUNCTIONS: Any = [ def test_initialization() -> None: """Test chat model initialization.""" for model in [ - ChatTongyi(model_name="qwen-turbo", api_key="xyz"), - ChatTongyi(model="qwen-turbo", dashscope_api_key="xyz"), + ChatTongyi(model_name="qwen-turbo", api_key="xyz"), # type: ignore[arg-type, call-arg] + ChatTongyi(model="qwen-turbo", dashscope_api_key="xyz"), # type: ignore[call-arg] ]: assert model.model_name == "qwen-turbo" assert cast(SecretStr, model.dashscope_api_key).get_secret_value() == "xyz" def test_api_key_is_string() -> None: - llm = ChatTongyi(dashscope_api_key="secret-api-key") + llm = ChatTongyi(dashscope_api_key="secret-api-key") # type: ignore[call-arg] assert isinstance(llm.dashscope_api_key, SecretStr) def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: - llm = ChatTongyi(dashscope_api_key="secret-api-key") + llm = ChatTongyi(dashscope_api_key="secret-api-key") # type: ignore[call-arg] print(llm.dashscope_api_key, end="") # noqa: T201 captured = capsys.readouterr() @@ -60,7 +60,7 @@ def test_api_key_masked_when_passed_via_constructor( def test_default_call() -> None: """Test default model call.""" - chat = ChatTongyi() + chat = ChatTongyi() # type: ignore[call-arg] response = chat.invoke([HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -68,20 +68,20 @@ def test_default_call() -> None: def test_model() -> None: """Test model kwarg works.""" - chat = ChatTongyi(model="qwen-plus") + chat = ChatTongyi(model="qwen-plus") # type: ignore[call-arg] response = chat.invoke([HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) def test_functions_call_thoughts() -> None: - chat = ChatTongyi(model="qwen-plus") + chat = ChatTongyi(model="qwen-plus") # type: ignore[call-arg] prompt_tmpl = "Use the given functions to answer following question: {input}" prompt_msgs = [ HumanMessagePromptTemplate.from_template(prompt_tmpl), ] - prompt = ChatPromptTemplate(messages=prompt_msgs) + prompt = ChatPromptTemplate(messages=prompt_msgs) # type: ignore[arg-type, call-arg] chain = prompt | chat.bind(functions=_FUNCTIONS) @@ -93,7 +93,7 @@ def test_functions_call_thoughts() -> None: def test_multiple_history() -> None: """Tests multiple history works.""" - chat = ChatTongyi() + chat = ChatTongyi() # type: ignore[call-arg] response = chat.invoke( [ @@ -108,7 +108,7 @@ def test_multiple_history() -> None: def test_stream() -> None: """Test that stream works.""" - chat = ChatTongyi(streaming=True) + chat = ChatTongyi(streaming=True) # type: ignore[call-arg] callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) response = chat.invoke( @@ -126,7 +126,7 @@ def test_stream() -> None: def test_multiple_messages() -> None: """Tests multiple messages works.""" - chat = ChatTongyi() + chat = ChatTongyi() # type: ignore[call-arg] message = HumanMessage(content="Hi, how are you.") response = chat.generate([[message], [message]]) diff --git a/libs/community/tests/integration_tests/chat_models/test_vertexai.py b/libs/community/tests/integration_tests/chat_models/test_vertexai.py index 235d3cfa8f..cca4349ea9 100644 --- a/libs/community/tests/integration_tests/chat_models/test_vertexai.py +++ b/libs/community/tests/integration_tests/chat_models/test_vertexai.py @@ -248,7 +248,7 @@ def test_vertexai_args_passed(stop: Optional[str]) -> None: mock_send_message = MagicMock(return_value=mock_response) mock_chat.send_message = mock_send_message - model = ChatVertexAI(**prompt_params) + model = ChatVertexAI(**prompt_params) # type: ignore[arg-type] message = HumanMessage(content=user_prompt) if stop: response = model.invoke([message], stop=[stop]) diff --git a/libs/community/tests/integration_tests/chat_models/test_volcengine_maas.py b/libs/community/tests/integration_tests/chat_models/test_volcengine_maas.py index 24002ec617..d236ae2251 100644 --- a/libs/community/tests/integration_tests/chat_models/test_volcengine_maas.py +++ b/libs/community/tests/integration_tests/chat_models/test_volcengine_maas.py @@ -10,7 +10,7 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler def test_default_call() -> None: """Test valid chat call to volc engine.""" - chat = VolcEngineMaasChat() + chat = VolcEngineMaasChat() # type: ignore[call-arg] response = chat.invoke([HumanMessage(content="Hello")]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str) @@ -18,7 +18,7 @@ def test_default_call() -> None: def test_multiple_history() -> None: """Tests multiple history works.""" - chat = VolcEngineMaasChat() + chat = VolcEngineMaasChat() # type: ignore[call-arg] response = chat.invoke( [ @@ -33,7 +33,7 @@ def test_multiple_history() -> None: def test_stream() -> None: """Test that stream works.""" - chat = VolcEngineMaasChat(streaming=True) + chat = VolcEngineMaasChat(streaming=True) # type: ignore[call-arg] callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) response = chat.invoke( @@ -51,7 +51,7 @@ def test_stream() -> None: def test_stop() -> None: """Test that stop works.""" - chat = VolcEngineMaasChat( + chat = VolcEngineMaasChat( # type: ignore[call-arg] model="skylark2-pro-4k", model_version="1.2", streaming=True ) callback_handler = FakeCallbackHandler() @@ -73,7 +73,7 @@ def test_stop() -> None: def test_multiple_messages() -> None: """Tests multiple messages works.""" - chat = VolcEngineMaasChat() + chat = VolcEngineMaasChat() # type: ignore[call-arg] message = HumanMessage(content="Hi, how are you?") response = chat.generate([[message], [message]]) diff --git a/libs/community/tests/integration_tests/chat_models/test_yuan2.py b/libs/community/tests/integration_tests/chat_models/test_yuan2.py index 53678016e1..07329ea409 100644 --- a/libs/community/tests/integration_tests/chat_models/test_yuan2.py +++ b/libs/community/tests/integration_tests/chat_models/test_yuan2.py @@ -16,7 +16,7 @@ from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler @pytest.mark.scheduled def test_chat_yuan2() -> None: """Test ChatYuan2 wrapper.""" - chat = ChatYuan2( + chat = ChatYuan2( # type: ignore[call-arg] yuan2_api_key="EMPTY", yuan2_api_base="http://127.0.0.1:8001/v1", temperature=1.0, @@ -34,7 +34,7 @@ def test_chat_yuan2() -> None: def test_chat_yuan2_system_message() -> None: """Test ChatYuan2 wrapper with system message.""" - chat = ChatYuan2( + chat = ChatYuan2( # type: ignore[call-arg] yuan2_api_key="EMPTY", yuan2_api_base="http://127.0.0.1:8001/v1", temperature=1.0, @@ -54,7 +54,7 @@ def test_chat_yuan2_system_message() -> None: @pytest.mark.scheduled def test_chat_yuan2_generate() -> None: """Test ChatYuan2 wrapper with generate.""" - chat = ChatYuan2( + chat = ChatYuan2( # type: ignore[call-arg] yuan2_api_key="EMPTY", yuan2_api_base="http://127.0.0.1:8001/v1", temperature=1.0, @@ -82,7 +82,7 @@ def test_chat_yuan2_streaming() -> None: callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) - chat = ChatYuan2( + chat = ChatYuan2( # type: ignore[call-arg] yuan2_api_key="EMPTY", yuan2_api_base="http://127.0.0.1:8001/v1", temperature=1.0, @@ -102,7 +102,7 @@ def test_chat_yuan2_streaming() -> None: @pytest.mark.asyncio async def test_async_chat_yuan2() -> None: """Test async generation.""" - chat = ChatYuan2( + chat = ChatYuan2( # type: ignore[call-arg] yuan2_api_key="EMPTY", yuan2_api_base="http://127.0.0.1:8001/v1", temperature=1.0, @@ -129,7 +129,7 @@ async def test_async_chat_yuan2_streaming() -> None: callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) - chat = ChatYuan2( + chat = ChatYuan2( # type: ignore[call-arg] yuan2_api_key="EMPTY", yuan2_api_base="http://127.0.0.1:8001/v1", temperature=1.0, diff --git a/libs/community/tests/integration_tests/embeddings/test_awa.py b/libs/community/tests/integration_tests/embeddings/test_awa.py index 68a71085cd..68b044baba 100644 --- a/libs/community/tests/integration_tests/embeddings/test_awa.py +++ b/libs/community/tests/integration_tests/embeddings/test_awa.py @@ -5,7 +5,7 @@ from langchain_community.embeddings.awa import AwaEmbeddings def test_awa_embedding_documents() -> None: """Test Awa embeddings for documents.""" documents = ["foo bar", "test document"] - embedding = AwaEmbeddings() + embedding = AwaEmbeddings() # type: ignore[call-arg] output = embedding.embed_documents(documents) assert len(output) == 2 assert len(output[0]) == 768 @@ -14,6 +14,6 @@ def test_awa_embedding_documents() -> None: def test_awa_embedding_query() -> None: """Test Awa embeddings for query.""" document = "foo bar" - embedding = AwaEmbeddings() + embedding = AwaEmbeddings() # type: ignore[call-arg] output = embedding.embed_query(document) assert len(output) == 768 diff --git a/libs/community/tests/integration_tests/embeddings/test_azure_openai.py b/libs/community/tests/integration_tests/embeddings/test_azure_openai.py index ec1e5f4720..43deadf40e 100644 --- a/libs/community/tests/integration_tests/embeddings/test_azure_openai.py +++ b/libs/community/tests/integration_tests/embeddings/test_azure_openai.py @@ -17,7 +17,7 @@ DEPLOYMENT_NAME = os.environ.get( def _get_embeddings(**kwargs: Any) -> AzureOpenAIEmbeddings: - return AzureOpenAIEmbeddings( + return AzureOpenAIEmbeddings( # type: ignore[call-arg] azure_deployment=DEPLOYMENT_NAME, api_version=OPENAI_API_VERSION, openai_api_base=OPENAI_API_BASE, diff --git a/libs/community/tests/integration_tests/embeddings/test_baichuan.py b/libs/community/tests/integration_tests/embeddings/test_baichuan.py index 7512b06058..b8f8e68bff 100644 --- a/libs/community/tests/integration_tests/embeddings/test_baichuan.py +++ b/libs/community/tests/integration_tests/embeddings/test_baichuan.py @@ -5,7 +5,7 @@ from langchain_community.embeddings.baichuan import BaichuanTextEmbeddings def test_baichuan_embedding_documents() -> None: """Test Baichuan Text Embedding for documents.""" documents = ["今天天气不错", "今天阳光灿烂"] - embedding = BaichuanTextEmbeddings() + embedding = BaichuanTextEmbeddings() # type: ignore[call-arg] output = embedding.embed_documents(documents) assert len(output) == 2 # type: ignore[arg-type] assert len(output[0]) == 1024 # type: ignore[index] @@ -14,6 +14,6 @@ def test_baichuan_embedding_documents() -> None: def test_baichuan_embedding_query() -> None: """Test Baichuan Text Embedding for query.""" document = "所有的小学生都会学过只因兔同笼问题。" - embedding = BaichuanTextEmbeddings() + embedding = BaichuanTextEmbeddings() # type: ignore[call-arg] output = embedding.embed_query(document) assert len(output) == 1024 # type: ignore[arg-type] diff --git a/libs/community/tests/integration_tests/embeddings/test_cohere.py b/libs/community/tests/integration_tests/embeddings/test_cohere.py index 3ca7fd6f2c..d8e7e3d039 100644 --- a/libs/community/tests/integration_tests/embeddings/test_cohere.py +++ b/libs/community/tests/integration_tests/embeddings/test_cohere.py @@ -5,7 +5,7 @@ from langchain_community.embeddings.cohere import CohereEmbeddings def test_cohere_embedding_documents() -> None: """Test cohere embeddings.""" documents = ["foo bar"] - embedding = CohereEmbeddings() + embedding = CohereEmbeddings() # type: ignore[call-arg] output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) == 2048 @@ -14,6 +14,6 @@ def test_cohere_embedding_documents() -> None: def test_cohere_embedding_query() -> None: """Test cohere embeddings.""" document = "foo bar" - embedding = CohereEmbeddings() + embedding = CohereEmbeddings() # type: ignore[call-arg] output = embedding.embed_query(document) assert len(output) == 2048 diff --git a/libs/community/tests/integration_tests/embeddings/test_dashscope.py b/libs/community/tests/integration_tests/embeddings/test_dashscope.py index 82e507c863..869f91ead8 100644 --- a/libs/community/tests/integration_tests/embeddings/test_dashscope.py +++ b/libs/community/tests/integration_tests/embeddings/test_dashscope.py @@ -7,7 +7,7 @@ from langchain_community.embeddings.dashscope import DashScopeEmbeddings def test_dashscope_embedding_documents() -> None: """Test dashscope embeddings.""" documents = ["foo bar"] - embedding = DashScopeEmbeddings(model="text-embedding-v1") + embedding = DashScopeEmbeddings(model="text-embedding-v1") # type: ignore[call-arg] output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) == 1536 @@ -45,7 +45,7 @@ def test_dashscope_embedding_documents_multiple() -> None: "foo23", "foo24", ] - embedding = DashScopeEmbeddings(model="text-embedding-v1") + embedding = DashScopeEmbeddings(model="text-embedding-v1") # type: ignore[call-arg] output = embedding.embed_documents(documents) assert len(output) == 28 assert len(output[0]) == 1536 @@ -56,7 +56,7 @@ def test_dashscope_embedding_documents_multiple() -> None: def test_dashscope_embedding_query() -> None: """Test dashscope embeddings.""" document = "foo bar" - embedding = DashScopeEmbeddings(model="text-embedding-v1") + embedding = DashScopeEmbeddings(model="text-embedding-v1") # type: ignore[call-arg] output = embedding.embed_query(document) assert len(output) == 1536 @@ -66,7 +66,7 @@ def test_dashscope_embedding_with_empty_string() -> None: import dashscope document = ["", "abc"] - embedding = DashScopeEmbeddings(model="text-embedding-v1") + embedding = DashScopeEmbeddings(model="text-embedding-v1") # type: ignore[call-arg] output = embedding.embed_documents(document) assert len(output) == 2 assert len(output[0]) == 1536 diff --git a/libs/community/tests/integration_tests/embeddings/test_edenai.py b/libs/community/tests/integration_tests/embeddings/test_edenai.py index f200e30d35..0b5bb1f835 100644 --- a/libs/community/tests/integration_tests/embeddings/test_edenai.py +++ b/libs/community/tests/integration_tests/embeddings/test_edenai.py @@ -6,7 +6,7 @@ from langchain_community.embeddings.edenai import EdenAiEmbeddings def test_edenai_embedding_documents() -> None: """Test edenai embeddings with openai.""" documents = ["foo bar", "test text"] - embedding = EdenAiEmbeddings(provider="openai") + embedding = EdenAiEmbeddings(provider="openai") # type: ignore[call-arg] output = embedding.embed_documents(documents) assert len(output) == 2 assert len(output[0]) == 1536 @@ -16,6 +16,6 @@ def test_edenai_embedding_documents() -> None: def test_edenai_embedding_query() -> None: """Test eden ai embeddings with google.""" document = "foo bar" - embedding = EdenAiEmbeddings(provider="google") + embedding = EdenAiEmbeddings(provider="google") # type: ignore[call-arg] output = embedding.embed_query(document) assert len(output) == 768 diff --git a/libs/community/tests/integration_tests/embeddings/test_fastembed.py b/libs/community/tests/integration_tests/embeddings/test_fastembed.py index 80215e9056..48595f7ee3 100644 --- a/libs/community/tests/integration_tests/embeddings/test_fastembed.py +++ b/libs/community/tests/integration_tests/embeddings/test_fastembed.py @@ -15,10 +15,10 @@ def test_fastembed_embedding_documents( ) -> None: """Test fastembed embeddings for documents.""" documents = ["foo bar", "bar foo"] - embedding = FastEmbedEmbeddings( + embedding = FastEmbedEmbeddings( # type: ignore[call-arg] model_name=model_name, max_length=max_length, - doc_embed_type=doc_embed_type, + doc_embed_type=doc_embed_type, # type: ignore[arg-type] threads=threads, ) output = embedding.embed_documents(documents) @@ -33,7 +33,7 @@ def test_fastembed_embedding_documents( def test_fastembed_embedding_query(model_name: str, max_length: int) -> None: """Test fastembed embeddings for query.""" document = "foo bar" - embedding = FastEmbedEmbeddings(model_name=model_name, max_length=max_length) + embedding = FastEmbedEmbeddings(model_name=model_name, max_length=max_length) # type: ignore[call-arg] output = embedding.embed_query(document) assert len(output) == 384 @@ -49,10 +49,10 @@ async def test_fastembed_async_embedding_documents( ) -> None: """Test fastembed embeddings for documents.""" documents = ["foo bar", "bar foo"] - embedding = FastEmbedEmbeddings( + embedding = FastEmbedEmbeddings( # type: ignore[call-arg] model_name=model_name, max_length=max_length, - doc_embed_type=doc_embed_type, + doc_embed_type=doc_embed_type, # type: ignore[arg-type] threads=threads, ) output = await embedding.aembed_documents(documents) @@ -69,6 +69,6 @@ async def test_fastembed_async_embedding_query( ) -> None: """Test fastembed embeddings for query.""" document = "foo bar" - embedding = FastEmbedEmbeddings(model_name=model_name, max_length=max_length) + embedding = FastEmbedEmbeddings(model_name=model_name, max_length=max_length) # type: ignore[call-arg] output = await embedding.aembed_query(document) assert len(output) == 384 diff --git a/libs/community/tests/integration_tests/embeddings/test_google_palm.py b/libs/community/tests/integration_tests/embeddings/test_google_palm.py index 047edc76e1..3481c0cfdc 100644 --- a/libs/community/tests/integration_tests/embeddings/test_google_palm.py +++ b/libs/community/tests/integration_tests/embeddings/test_google_palm.py @@ -9,7 +9,7 @@ from langchain_community.embeddings.google_palm import GooglePalmEmbeddings def test_google_palm_embedding_documents() -> None: """Test Google PaLM embeddings.""" documents = ["foo bar"] - embedding = GooglePalmEmbeddings() + embedding = GooglePalmEmbeddings() # type: ignore[call-arg] output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) == 768 @@ -18,7 +18,7 @@ def test_google_palm_embedding_documents() -> None: def test_google_palm_embedding_documents_multiple() -> None: """Test Google PaLM embeddings.""" documents = ["foo bar", "bar foo", "foo"] - embedding = GooglePalmEmbeddings() + embedding = GooglePalmEmbeddings() # type: ignore[call-arg] output = embedding.embed_documents(documents) assert len(output) == 3 assert len(output[0]) == 768 @@ -29,6 +29,6 @@ def test_google_palm_embedding_documents_multiple() -> None: def test_google_palm_embedding_query() -> None: """Test Google PaLM embeddings.""" document = "foo bar" - embedding = GooglePalmEmbeddings() + embedding = GooglePalmEmbeddings() # type: ignore[call-arg] output = embedding.embed_query(document) assert len(output) == 768 diff --git a/libs/community/tests/integration_tests/embeddings/test_huggingface_hub.py b/libs/community/tests/integration_tests/embeddings/test_huggingface_hub.py index 0a47d97f53..b6e3dc9e07 100644 --- a/libs/community/tests/integration_tests/embeddings/test_huggingface_hub.py +++ b/libs/community/tests/integration_tests/embeddings/test_huggingface_hub.py @@ -7,7 +7,7 @@ from langchain_community.embeddings import HuggingFaceHubEmbeddings def test_huggingfacehub_embedding_documents() -> None: """Test huggingfacehub embeddings.""" documents = ["foo bar"] - embedding = HuggingFaceHubEmbeddings() + embedding = HuggingFaceHubEmbeddings() # type: ignore[call-arg] output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) == 768 @@ -16,7 +16,7 @@ def test_huggingfacehub_embedding_documents() -> None: async def test_huggingfacehub_embedding_async_documents() -> None: """Test huggingfacehub embeddings.""" documents = ["foo bar"] - embedding = HuggingFaceHubEmbeddings() + embedding = HuggingFaceHubEmbeddings() # type: ignore[call-arg] output = await embedding.aembed_documents(documents) assert len(output) == 1 assert len(output[0]) == 768 @@ -25,7 +25,7 @@ async def test_huggingfacehub_embedding_async_documents() -> None: def test_huggingfacehub_embedding_query() -> None: """Test huggingfacehub embeddings.""" document = "foo bar" - embedding = HuggingFaceHubEmbeddings() + embedding = HuggingFaceHubEmbeddings() # type: ignore[call-arg] output = embedding.embed_query(document) assert len(output) == 768 @@ -33,7 +33,7 @@ def test_huggingfacehub_embedding_query() -> None: async def test_huggingfacehub_embedding_async_query() -> None: """Test huggingfacehub embeddings.""" document = "foo bar" - embedding = HuggingFaceHubEmbeddings() + embedding = HuggingFaceHubEmbeddings() # type: ignore[call-arg] output = await embedding.aembed_query(document) assert len(output) == 768 @@ -42,4 +42,4 @@ def test_huggingfacehub_embedding_invalid_repo() -> None: """Test huggingfacehub embedding repo id validation.""" # Only sentence-transformers models are currently supported. with pytest.raises(ValueError): - HuggingFaceHubEmbeddings(repo_id="allenai/specter") + HuggingFaceHubEmbeddings(repo_id="allenai/specter") # type: ignore[call-arg] diff --git a/libs/community/tests/integration_tests/embeddings/test_jina.py b/libs/community/tests/integration_tests/embeddings/test_jina.py index 4c1dcca9b1..7d5633ca1c 100644 --- a/libs/community/tests/integration_tests/embeddings/test_jina.py +++ b/libs/community/tests/integration_tests/embeddings/test_jina.py @@ -5,7 +5,7 @@ from langchain_community.embeddings.jina import JinaEmbeddings def test_jina_embedding_documents() -> None: """Test jina embeddings for documents.""" documents = ["foo bar", "bar foo"] - embedding = JinaEmbeddings() + embedding = JinaEmbeddings() # type: ignore[call-arg] output = embedding.embed_documents(documents) assert len(output) == 2 assert len(output[0]) == 512 @@ -14,6 +14,6 @@ def test_jina_embedding_documents() -> None: def test_jina_embedding_query() -> None: """Test jina embeddings for query.""" document = "foo bar" - embedding = JinaEmbeddings() + embedding = JinaEmbeddings() # type: ignore[call-arg] output = embedding.embed_query(document) assert len(output) == 512 diff --git a/libs/community/tests/integration_tests/embeddings/test_laser.py b/libs/community/tests/integration_tests/embeddings/test_laser.py index b8bf9ab335..d5c20ab3cb 100644 --- a/libs/community/tests/integration_tests/embeddings/test_laser.py +++ b/libs/community/tests/integration_tests/embeddings/test_laser.py @@ -11,7 +11,7 @@ def test_laser_embedding_documents(lang: str) -> None: User warning is returned by LASER library implementation so will ignore in testing.""" documents = ["hello", "world"] - embedding = LaserEmbeddings(lang=lang) + embedding = LaserEmbeddings(lang=lang) # type: ignore[call-arg] output = embedding.embed_documents(documents) assert len(output) == 2 # type: ignore[arg-type] assert len(output[0]) == 1024 # type: ignore[index] @@ -24,6 +24,6 @@ def test_laser_embedding_query(lang: str) -> None: User warning is returned by LASER library implementation so will ignore in testing.""" query = "hello world" - embedding = LaserEmbeddings(lang=lang) + embedding = LaserEmbeddings(lang=lang) # type: ignore[call-arg] output = embedding.embed_query(query) assert len(output) == 1024 diff --git a/libs/community/tests/integration_tests/embeddings/test_llamacpp.py b/libs/community/tests/integration_tests/embeddings/test_llamacpp.py index d604f8aa10..321a640756 100644 --- a/libs/community/tests/integration_tests/embeddings/test_llamacpp.py +++ b/libs/community/tests/integration_tests/embeddings/test_llamacpp.py @@ -31,7 +31,7 @@ def test_llamacpp_embedding_documents() -> None: """Test llamacpp embeddings.""" documents = ["foo bar"] model_path = get_model() - embedding = LlamaCppEmbeddings(model_path=model_path) + embedding = LlamaCppEmbeddings(model_path=model_path) # type: ignore[call-arg] output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) == 512 @@ -41,6 +41,6 @@ def test_llamacpp_embedding_query() -> None: """Test llamacpp embeddings.""" document = "foo bar" model_path = get_model() - embedding = LlamaCppEmbeddings(model_path=model_path) + embedding = LlamaCppEmbeddings(model_path=model_path) # type: ignore[call-arg] output = embedding.embed_query(document) assert len(output) == 512 diff --git a/libs/community/tests/integration_tests/embeddings/test_premai.py b/libs/community/tests/integration_tests/embeddings/test_premai.py index f0848760bf..96218673e4 100644 --- a/libs/community/tests/integration_tests/embeddings/test_premai.py +++ b/libs/community/tests/integration_tests/embeddings/test_premai.py @@ -12,7 +12,7 @@ from langchain_community.embeddings.premai import PremAIEmbeddings @pytest.fixture def embedder() -> PremAIEmbeddings: - return PremAIEmbeddings(project_id=8, model="text-embedding-3-small") + return PremAIEmbeddings(project_id=8, model="text-embedding-3-small") # type: ignore[call-arg] def test_prem_embedding_documents(embedder: PremAIEmbeddings) -> None: diff --git a/libs/community/tests/integration_tests/embeddings/test_qianfan_endpoint.py b/libs/community/tests/integration_tests/embeddings/test_qianfan_endpoint.py index c575f8475c..6361939f4b 100644 --- a/libs/community/tests/integration_tests/embeddings/test_qianfan_endpoint.py +++ b/libs/community/tests/integration_tests/embeddings/test_qianfan_endpoint.py @@ -6,7 +6,7 @@ from langchain_community.embeddings.baidu_qianfan_endpoint import ( def test_embedding_multiple_documents() -> None: documents = ["foo", "bar"] - embedding = QianfanEmbeddingsEndpoint() + embedding = QianfanEmbeddingsEndpoint() # type: ignore[call-arg] output = embedding.embed_documents(documents) assert len(output) == 2 assert len(output[0]) == 384 @@ -15,20 +15,20 @@ def test_embedding_multiple_documents() -> None: def test_embedding_query() -> None: query = "foo" - embedding = QianfanEmbeddingsEndpoint() + embedding = QianfanEmbeddingsEndpoint() # type: ignore[call-arg] output = embedding.embed_query(query) assert len(output) == 384 def test_model() -> None: documents = ["hi", "qianfan"] - embedding = QianfanEmbeddingsEndpoint(model="Embedding-V1") + embedding = QianfanEmbeddingsEndpoint(model="Embedding-V1") # type: ignore[call-arg] output = embedding.embed_documents(documents) assert len(output) == 2 def test_rate_limit() -> None: - llm = QianfanEmbeddingsEndpoint( + llm = QianfanEmbeddingsEndpoint( # type: ignore[call-arg] model="Embedding-V1", init_kwargs={"query_per_second": 2} ) assert llm.client._client._rate_limiter._sync_limiter._query_per_second == 2 diff --git a/libs/community/tests/integration_tests/embeddings/test_self_hosted.py b/libs/community/tests/integration_tests/embeddings/test_self_hosted.py index a270c2ce6b..4f84e8d877 100644 --- a/libs/community/tests/integration_tests/embeddings/test_self_hosted.py +++ b/libs/community/tests/integration_tests/embeddings/test_self_hosted.py @@ -77,7 +77,7 @@ def test_self_hosted_embedding_documents() -> None: """Test self-hosted huggingface instruct embeddings.""" documents = ["foo bar"] * 2 gpu = get_remote_instance() - embedding = SelfHostedEmbeddings( + embedding = SelfHostedEmbeddings( # type: ignore[call-arg] model_load_fn=get_pipeline, hardware=gpu, inference_fn=inference_fn ) output = embedding.embed_documents(documents) @@ -89,7 +89,7 @@ def test_self_hosted_embedding_query() -> None: """Test self-hosted custom embeddings.""" query = "foo bar" gpu = get_remote_instance() - embedding = SelfHostedEmbeddings( + embedding = SelfHostedEmbeddings( # type: ignore[call-arg] model_load_fn=get_pipeline, hardware=gpu, inference_fn=inference_fn ) output = embedding.embed_query(query) diff --git a/libs/community/tests/integration_tests/embeddings/test_sparkllm.py b/libs/community/tests/integration_tests/embeddings/test_sparkllm.py index b934e2e008..722b6ea8a7 100644 --- a/libs/community/tests/integration_tests/embeddings/test_sparkllm.py +++ b/libs/community/tests/integration_tests/embeddings/test_sparkllm.py @@ -17,7 +17,7 @@ def test_baichuan_embedding_documents() -> None: "understand and think, " "creating a better world with artificial intelligence." ] - embedding = SparkLLMTextEmbeddings() + embedding = SparkLLMTextEmbeddings() # type: ignore[call-arg] output = embedding.embed_documents(documents) assert len(output) == 1 # type: ignore[arg-type] assert len(output[0]) == 2560 # type: ignore[index] @@ -30,6 +30,6 @@ def test_baichuan_embedding_query() -> None: "first Artificial Intelligence open platform for Mobile Internet " "and intelligent hardware developers" ) - embedding = SparkLLMTextEmbeddings() + embedding = SparkLLMTextEmbeddings() # type: ignore[call-arg] output = embedding.embed_query(document) assert len(output) == 2560 # type: ignore[arg-type] diff --git a/libs/community/tests/integration_tests/embeddings/test_volcano.py b/libs/community/tests/integration_tests/embeddings/test_volcano.py index 7ef7ac33fa..3411681229 100644 --- a/libs/community/tests/integration_tests/embeddings/test_volcano.py +++ b/libs/community/tests/integration_tests/embeddings/test_volcano.py @@ -5,7 +5,7 @@ from langchain_community.embeddings import VolcanoEmbeddings def test_embedding_documents() -> None: """Test embeddings for documents.""" documents = ["foo", "bar"] - embedding = VolcanoEmbeddings() + embedding = VolcanoEmbeddings() # type: ignore[call-arg] output = embedding.embed_documents(documents) assert len(output) == 2 assert len(output[0]) == 1024 @@ -14,6 +14,6 @@ def test_embedding_documents() -> None: def test_embedding_query() -> None: """Test embeddings for query.""" document = "foo bar" - embedding = VolcanoEmbeddings() + embedding = VolcanoEmbeddings() # type: ignore[call-arg] output = embedding.embed_query(document) assert len(output) == 1024 diff --git a/libs/community/tests/integration_tests/embeddings/test_voyageai.py b/libs/community/tests/integration_tests/embeddings/test_voyageai.py index c14c08c5db..8f48243188 100644 --- a/libs/community/tests/integration_tests/embeddings/test_voyageai.py +++ b/libs/community/tests/integration_tests/embeddings/test_voyageai.py @@ -8,7 +8,7 @@ MODEL = "voyage-2" def test_voyagi_embedding_documents() -> None: """Test voyage embeddings.""" documents = ["foo bar"] - embedding = VoyageEmbeddings(model=MODEL) + embedding = VoyageEmbeddings(model=MODEL) # type: ignore[call-arg] output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) == 1024 @@ -16,7 +16,7 @@ def test_voyagi_embedding_documents() -> None: def test_voyagi_with_default_model() -> None: """Test voyage embeddings.""" - embedding = VoyageEmbeddings() + embedding = VoyageEmbeddings() # type: ignore[call-arg] assert embedding.model == "voyage-01" assert embedding.batch_size == 7 documents = [f"foo bar {i}" for i in range(72)] @@ -40,6 +40,6 @@ def test_voyage_embedding_documents_multiple() -> None: def test_voyage_embedding_query() -> None: """Test voyage embeddings.""" document = "foo bar" - embedding = VoyageEmbeddings(model=MODEL) + embedding = VoyageEmbeddings(model=MODEL) # type: ignore[call-arg] output = embedding.embed_query(document) assert len(output) == 1024 diff --git a/libs/community/tests/integration_tests/llms/test_aleph_alpha.py b/libs/community/tests/integration_tests/llms/test_aleph_alpha.py index 56d6aad2af..3ccafb99bb 100644 --- a/libs/community/tests/integration_tests/llms/test_aleph_alpha.py +++ b/libs/community/tests/integration_tests/llms/test_aleph_alpha.py @@ -5,6 +5,6 @@ from langchain_community.llms.aleph_alpha import AlephAlpha def test_aleph_alpha_call() -> None: """Test valid call to cohere.""" - llm = AlephAlpha(maximum_tokens=10) + llm = AlephAlpha(maximum_tokens=10) # type: ignore[call-arg] output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_anthropic.py b/libs/community/tests/integration_tests/llms/test_anthropic.py index d0e42eec5e..23dac7f9f4 100644 --- a/libs/community/tests/integration_tests/llms/test_anthropic.py +++ b/libs/community/tests/integration_tests/llms/test_anthropic.py @@ -17,20 +17,20 @@ def test_anthropic_model_name_param() -> None: @pytest.mark.requires("anthropic") def test_anthropic_model_param() -> None: - llm = Anthropic(model="foo") + llm = Anthropic(model="foo") # type: ignore[call-arg] assert llm.model == "foo" def test_anthropic_call() -> None: """Test valid call to anthropic.""" - llm = Anthropic(model="claude-instant-1") + llm = Anthropic(model="claude-instant-1") # type: ignore[call-arg] output = llm.invoke("Say foo:") assert isinstance(output, str) def test_anthropic_streaming() -> None: """Test streaming tokens from anthropic.""" - llm = Anthropic(model="claude-instant-1") + llm = Anthropic(model="claude-instant-1") # type: ignore[call-arg] generator = llm.stream("I'm Pickle Rick") assert isinstance(generator, Generator) diff --git a/libs/community/tests/integration_tests/llms/test_azureml_endpoint.py b/libs/community/tests/integration_tests/llms/test_azureml_endpoint.py index 3c4e61eb44..d6c0a9993d 100644 --- a/libs/community/tests/integration_tests/llms/test_azureml_endpoint.py +++ b/libs/community/tests/integration_tests/llms/test_azureml_endpoint.py @@ -22,9 +22,9 @@ from langchain_community.llms.loading import load_llm def test_gpt2_call() -> None: """Test valid call to GPT2.""" llm = AzureMLOnlineEndpoint( - endpoint_api_key=os.getenv("OSS_ENDPOINT_API_KEY"), - endpoint_url=os.getenv("OSS_ENDPOINT_URL"), - deployment_name=os.getenv("OSS_DEPLOYMENT_NAME"), + endpoint_api_key=os.getenv("OSS_ENDPOINT_API_KEY"), # type: ignore[arg-type] + endpoint_url=os.getenv("OSS_ENDPOINT_URL"), # type: ignore[arg-type] + deployment_name=os.getenv("OSS_DEPLOYMENT_NAME"), # type: ignore[arg-type] content_formatter=OSSContentFormatter(), ) output = llm.invoke("Foo") @@ -34,9 +34,9 @@ def test_gpt2_call() -> None: def test_hf_call() -> None: """Test valid call to HuggingFace Foundation Model.""" llm = AzureMLOnlineEndpoint( - endpoint_api_key=os.getenv("HF_ENDPOINT_API_KEY"), - endpoint_url=os.getenv("HF_ENDPOINT_URL"), - deployment_name=os.getenv("HF_DEPLOYMENT_NAME"), + endpoint_api_key=os.getenv("HF_ENDPOINT_API_KEY"), # type: ignore[arg-type] + endpoint_url=os.getenv("HF_ENDPOINT_URL"), # type: ignore[arg-type] + deployment_name=os.getenv("HF_DEPLOYMENT_NAME"), # type: ignore[arg-type] content_formatter=HFContentFormatter(), ) output = llm.invoke("Foo") @@ -46,9 +46,9 @@ def test_hf_call() -> None: def test_dolly_call() -> None: """Test valid call to dolly-v2.""" llm = AzureMLOnlineEndpoint( - endpoint_api_key=os.getenv("DOLLY_ENDPOINT_API_KEY"), - endpoint_url=os.getenv("DOLLY_ENDPOINT_URL"), - deployment_name=os.getenv("DOLLY_DEPLOYMENT_NAME"), + endpoint_api_key=os.getenv("DOLLY_ENDPOINT_API_KEY"), # type: ignore[arg-type] + endpoint_url=os.getenv("DOLLY_ENDPOINT_URL"), # type: ignore[arg-type] + deployment_name=os.getenv("DOLLY_DEPLOYMENT_NAME"), # type: ignore[arg-type] content_formatter=DollyContentFormatter(), ) output = llm.invoke("Foo") @@ -77,9 +77,9 @@ def test_custom_formatter() -> None: return response_json[0]["summary_text"] llm = AzureMLOnlineEndpoint( - endpoint_api_key=os.getenv("BART_ENDPOINT_API_KEY"), - endpoint_url=os.getenv("BART_ENDPOINT_URL"), - deployment_name=os.getenv("BART_DEPLOYMENT_NAME"), + endpoint_api_key=os.getenv("BART_ENDPOINT_API_KEY"), # type: ignore[arg-type] + endpoint_url=os.getenv("BART_ENDPOINT_URL"), # type: ignore[arg-type] + deployment_name=os.getenv("BART_DEPLOYMENT_NAME"), # type: ignore[arg-type] content_formatter=CustomFormatter(), ) output = llm.invoke("Foo") @@ -90,9 +90,9 @@ def test_missing_content_formatter() -> None: """Test AzureML LLM without a content_formatter attribute""" with pytest.raises(AttributeError): llm = AzureMLOnlineEndpoint( - endpoint_api_key=os.getenv("OSS_ENDPOINT_API_KEY"), - endpoint_url=os.getenv("OSS_ENDPOINT_URL"), - deployment_name=os.getenv("OSS_DEPLOYMENT_NAME"), + endpoint_api_key=os.getenv("OSS_ENDPOINT_API_KEY"), # type: ignore[arg-type] + endpoint_url=os.getenv("OSS_ENDPOINT_URL"), # type: ignore[arg-type] + deployment_name=os.getenv("OSS_DEPLOYMENT_NAME"), # type: ignore[arg-type] ) llm.invoke("Foo") @@ -119,9 +119,9 @@ def test_invalid_request_format() -> None: with pytest.raises(HTTPError): llm = AzureMLOnlineEndpoint( - endpoint_api_key=os.getenv("OSS_ENDPOINT_API_KEY"), - endpoint_url=os.getenv("OSS_ENDPOINT_URL"), - deployment_name=os.getenv("OSS_DEPLOYMENT_NAME"), + endpoint_api_key=os.getenv("OSS_ENDPOINT_API_KEY"), # type: ignore[arg-type] + endpoint_url=os.getenv("OSS_ENDPOINT_URL"), # type: ignore[arg-type] + deployment_name=os.getenv("OSS_DEPLOYMENT_NAME"), # type: ignore[arg-type] content_formatter=CustomContentFormatter(), ) llm.invoke("Foo") @@ -131,9 +131,9 @@ def test_incorrect_url() -> None: """Testing AzureML Endpoint for an incorrect URL""" with pytest.raises(ValidationError): llm = AzureMLOnlineEndpoint( - endpoint_api_key=os.getenv("OSS_ENDPOINT_API_KEY"), + endpoint_api_key=os.getenv("OSS_ENDPOINT_API_KEY"), # type: ignore[arg-type] endpoint_url="https://endpoint.inference.com", - deployment_name=os.getenv("OSS_DEPLOYMENT_NAME"), + deployment_name=os.getenv("OSS_DEPLOYMENT_NAME"), # type: ignore[arg-type] content_formatter=OSSContentFormatter(), ) llm.invoke("Foo") @@ -142,10 +142,10 @@ def test_incorrect_url() -> None: def test_incorrect_api_type() -> None: with pytest.raises(ValidationError): llm = AzureMLOnlineEndpoint( - endpoint_api_key=os.getenv("OSS_ENDPOINT_API_KEY"), - endpoint_url=os.getenv("OSS_ENDPOINT_URL"), - deployment_name=os.getenv("OSS_DEPLOYMENT_NAME"), - endpoint_api_type="serverless", + endpoint_api_key=os.getenv("OSS_ENDPOINT_API_KEY"), # type: ignore[arg-type] + endpoint_url=os.getenv("OSS_ENDPOINT_URL"), # type: ignore[arg-type] + deployment_name=os.getenv("OSS_DEPLOYMENT_NAME"), # type: ignore[arg-type] + endpoint_api_type="serverless", # type: ignore[arg-type] content_formatter=OSSContentFormatter(), ) llm.invoke("Foo") @@ -155,9 +155,9 @@ def test_incorrect_key() -> None: """Testing AzureML Endpoint for incorrect key""" with pytest.raises(HTTPError): llm = AzureMLOnlineEndpoint( - endpoint_api_key="incorrect-key", - endpoint_url=os.getenv("OSS_ENDPOINT_URL"), - deployment_name=os.getenv("OSS_DEPLOYMENT_NAME"), + endpoint_api_key="incorrect-key", # type: ignore[arg-type] + endpoint_url=os.getenv("OSS_ENDPOINT_URL"), # type: ignore[arg-type] + deployment_name=os.getenv("OSS_DEPLOYMENT_NAME"), # type: ignore[arg-type] content_formatter=OSSContentFormatter(), ) llm.invoke("Foo") diff --git a/libs/community/tests/integration_tests/llms/test_baseten.py b/libs/community/tests/integration_tests/llms/test_baseten.py index 692e34790e..dc282eb859 100644 --- a/libs/community/tests/integration_tests/llms/test_baseten.py +++ b/libs/community/tests/integration_tests/llms/test_baseten.py @@ -8,6 +8,6 @@ from langchain_community.llms.baseten import Baseten def test_baseten_call() -> None: """Test valid call to Baseten.""" - llm = Baseten(model=os.environ["BASETEN_MODEL_ID"]) + llm = Baseten(model=os.environ["BASETEN_MODEL_ID"]) # type: ignore[call-arg] output = llm.invoke("Test prompt, please respond.") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_beam.py b/libs/community/tests/integration_tests/llms/test_beam.py index 952c70ebe6..92a96770dd 100644 --- a/libs/community/tests/integration_tests/llms/test_beam.py +++ b/libs/community/tests/integration_tests/llms/test_beam.py @@ -8,7 +8,7 @@ def test_beam_call() -> None: llm = Beam( model_name="gpt2", name="langchain-gpt2", - cpu=8, + cpu=8, # type: ignore[arg-type] memory="32Gi", gpu="A10G", python_version="python3.8", diff --git a/libs/community/tests/integration_tests/llms/test_cerebriumai.py b/libs/community/tests/integration_tests/llms/test_cerebriumai.py index 5a33141c0a..90c3829670 100644 --- a/libs/community/tests/integration_tests/llms/test_cerebriumai.py +++ b/libs/community/tests/integration_tests/llms/test_cerebriumai.py @@ -5,6 +5,6 @@ from langchain_community.llms.cerebriumai import CerebriumAI def test_cerebriumai_call() -> None: """Test valid call to cerebriumai.""" - llm = CerebriumAI(max_length=10) + llm = CerebriumAI(max_length=10) # type: ignore[call-arg] output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_cohere.py b/libs/community/tests/integration_tests/llms/test_cohere.py index 2ccb86e770..5fad016491 100644 --- a/libs/community/tests/integration_tests/llms/test_cohere.py +++ b/libs/community/tests/integration_tests/llms/test_cohere.py @@ -12,7 +12,7 @@ from tests.integration_tests.llms.utils import assert_llm_equality def test_cohere_call() -> None: """Test valid call to cohere.""" - llm = Cohere(max_tokens=10) + llm = Cohere(max_tokens=10) # type: ignore[call-arg] output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -20,16 +20,16 @@ def test_cohere_call() -> None: def test_cohere_api_key(monkeypatch: MonkeyPatch) -> None: """Test that cohere api key is a secret key.""" # test initialization from init - assert isinstance(Cohere(cohere_api_key="1").cohere_api_key, SecretStr) + assert isinstance(Cohere(cohere_api_key="1").cohere_api_key, SecretStr) # type: ignore[arg-type, call-arg] # test initialization from env variable monkeypatch.setenv("COHERE_API_KEY", "secret-api-key") - assert isinstance(Cohere().cohere_api_key, SecretStr) + assert isinstance(Cohere().cohere_api_key, SecretStr) # type: ignore[call-arg] def test_saving_loading_llm(tmp_path: Path) -> None: """Test saving/loading an Cohere LLM.""" - llm = Cohere(max_tokens=10) + llm = Cohere(max_tokens=10) # type: ignore[call-arg] llm.save(file_path=tmp_path / "cohere.yaml") loaded_llm = load_llm(tmp_path / "cohere.yaml") assert_llm_equality(llm, loaded_llm) diff --git a/libs/community/tests/integration_tests/llms/test_forefrontai.py b/libs/community/tests/integration_tests/llms/test_forefrontai.py index a71c726b2c..d99e194d85 100644 --- a/libs/community/tests/integration_tests/llms/test_forefrontai.py +++ b/libs/community/tests/integration_tests/llms/test_forefrontai.py @@ -5,6 +5,6 @@ from langchain_community.llms.forefrontai import ForefrontAI def test_forefrontai_call() -> None: """Test valid call to forefrontai.""" - llm = ForefrontAI(length=10) + llm = ForefrontAI(length=10) # type: ignore[call-arg] output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_google_palm.py b/libs/community/tests/integration_tests/llms/test_google_palm.py index bd052d91ac..9fc2442f05 100644 --- a/libs/community/tests/integration_tests/llms/test_google_palm.py +++ b/libs/community/tests/integration_tests/llms/test_google_palm.py @@ -22,9 +22,9 @@ model_names = [None, "models/text-bison-001", "gemini-pro"] def test_google_generativeai_call(model_name: str) -> None: """Test valid call to Google GenerativeAI text API.""" if model_name: - llm = GooglePalm(max_output_tokens=10, model_name=model_name) + llm = GooglePalm(max_output_tokens=10, model_name=model_name) # type: ignore[call-arg] else: - llm = GooglePalm(max_output_tokens=10) + llm = GooglePalm(max_output_tokens=10) # type: ignore[call-arg] output = llm.invoke("Say foo:") assert isinstance(output, str) assert llm._llm_type == "google_palm" @@ -41,9 +41,9 @@ def test_google_generativeai_call(model_name: str) -> None: def test_google_generativeai_generate(model_name: str) -> None: n = 1 if model_name == "gemini-pro" else 2 if model_name: - llm = GooglePalm(temperature=0.3, n=n, model_name=model_name) + llm = GooglePalm(temperature=0.3, n=n, model_name=model_name) # type: ignore[call-arg] else: - llm = GooglePalm(temperature=0.3, n=n) + llm = GooglePalm(temperature=0.3, n=n) # type: ignore[call-arg] output = llm.generate(["Say foo:"]) assert isinstance(output, LLMResult) assert len(output.generations) == 1 @@ -51,26 +51,26 @@ def test_google_generativeai_generate(model_name: str) -> None: def test_google_generativeai_get_num_tokens() -> None: - llm = GooglePalm() + llm = GooglePalm() # type: ignore[call-arg] output = llm.get_num_tokens("How are you?") assert output == 4 async def test_google_generativeai_agenerate() -> None: - llm = GooglePalm(temperature=0, model_name="gemini-pro") + llm = GooglePalm(temperature=0, model_name="gemini-pro") # type: ignore[call-arg] output = await llm.agenerate(["Please say foo:"]) assert isinstance(output, LLMResult) def test_generativeai_stream() -> None: - llm = GooglePalm(temperature=0, model_name="gemini-pro") + llm = GooglePalm(temperature=0, model_name="gemini-pro") # type: ignore[call-arg] outputs = list(llm.stream("Please say foo:")) assert isinstance(outputs[0], str) def test_saving_loading_llm(tmp_path: Path) -> None: """Test saving/loading a Google PaLM LLM.""" - llm = GooglePalm(max_output_tokens=10) + llm = GooglePalm(max_output_tokens=10) # type: ignore[call-arg] llm.save(file_path=tmp_path / "google_palm.yaml") loaded_llm = load_llm(tmp_path / "google_palm.yaml") assert loaded_llm == llm diff --git a/libs/community/tests/integration_tests/llms/test_gooseai.py b/libs/community/tests/integration_tests/llms/test_gooseai.py index 7c890f459a..13ee8b79d4 100644 --- a/libs/community/tests/integration_tests/llms/test_gooseai.py +++ b/libs/community/tests/integration_tests/llms/test_gooseai.py @@ -5,14 +5,14 @@ from langchain_community.llms.gooseai import GooseAI def test_gooseai_call() -> None: """Test valid call to gooseai.""" - llm = GooseAI(max_tokens=10) + llm = GooseAI(max_tokens=10) # type: ignore[call-arg] output = llm.invoke("Say foo:") assert isinstance(output, str) def test_gooseai_call_fairseq() -> None: """Test valid call to gooseai with fairseq model.""" - llm = GooseAI(model_name="fairseq-1-3b", max_tokens=10) + llm = GooseAI(model_name="fairseq-1-3b", max_tokens=10) # type: ignore[call-arg] output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -20,9 +20,9 @@ def test_gooseai_call_fairseq() -> None: def test_gooseai_stop_valid() -> None: """Test gooseai stop logic on valid configuration.""" query = "write an ordered list of five items" - first_llm = GooseAI(stop="3", temperature=0) + first_llm = GooseAI(stop="3", temperature=0) # type: ignore[call-arg] first_output = first_llm.invoke(query) - second_llm = GooseAI(temperature=0) + second_llm = GooseAI(temperature=0) # type: ignore[call-arg] second_output = second_llm.invoke(query, stop=["3"]) # Because it stops on new lines, shouldn't return anything assert first_output == second_output diff --git a/libs/community/tests/integration_tests/llms/test_huggingface_endpoint.py b/libs/community/tests/integration_tests/llms/test_huggingface_endpoint.py index 1945c271c8..6b409eaf14 100644 --- a/libs/community/tests/integration_tests/llms/test_huggingface_endpoint.py +++ b/libs/community/tests/integration_tests/llms/test_huggingface_endpoint.py @@ -11,14 +11,14 @@ from tests.integration_tests.llms.utils import assert_llm_equality def test_huggingface_endpoint_call_error() -> None: """Test valid call to HuggingFace that errors.""" - llm = HuggingFaceEndpoint(endpoint_url="", model_kwargs={"max_new_tokens": -1}) + llm = HuggingFaceEndpoint(endpoint_url="", model_kwargs={"max_new_tokens": -1}) # type: ignore[call-arg] with pytest.raises(ValueError): llm.invoke("Say foo:") def test_saving_loading_endpoint_llm(tmp_path: Path) -> None: """Test saving/loading an HuggingFaceHub LLM.""" - llm = HuggingFaceEndpoint( + llm = HuggingFaceEndpoint( # type: ignore[call-arg] endpoint_url="", task="text-generation", model_kwargs={"max_new_tokens": 10} ) llm.save(file_path=tmp_path / "hf.yaml") @@ -28,7 +28,7 @@ def test_saving_loading_endpoint_llm(tmp_path: Path) -> None: def test_huggingface_text_generation() -> None: """Test valid call to HuggingFace text generation model.""" - llm = HuggingFaceEndpoint(repo_id="gpt2", model_kwargs={"max_new_tokens": 10}) + llm = HuggingFaceEndpoint(repo_id="gpt2", model_kwargs={"max_new_tokens": 10}) # type: ignore[call-arg] output = llm.invoke("Say foo:") print(output) # noqa: T201 assert isinstance(output, str) @@ -36,35 +36,35 @@ def test_huggingface_text_generation() -> None: def test_huggingface_text2text_generation() -> None: """Test valid call to HuggingFace text2text model.""" - llm = HuggingFaceEndpoint(repo_id="google/flan-t5-xl") + llm = HuggingFaceEndpoint(repo_id="google/flan-t5-xl") # type: ignore[call-arg] output = llm.invoke("The capital of New York is") assert output == "Albany" def test_huggingface_summarization() -> None: """Test valid call to HuggingFace summarization model.""" - llm = HuggingFaceEndpoint(repo_id="facebook/bart-large-cnn") + llm = HuggingFaceEndpoint(repo_id="facebook/bart-large-cnn") # type: ignore[call-arg] output = llm.invoke("Say foo:") assert isinstance(output, str) def test_huggingface_call_error() -> None: """Test valid call to HuggingFace that errors.""" - llm = HuggingFaceEndpoint(repo_id="gpt2", model_kwargs={"max_new_tokens": -1}) + llm = HuggingFaceEndpoint(repo_id="gpt2", model_kwargs={"max_new_tokens": -1}) # type: ignore[call-arg] with pytest.raises(ValueError): llm.invoke("Say foo:") def test_saving_loading_llm(tmp_path: Path) -> None: """Test saving/loading an HuggingFaceEndpoint LLM.""" - llm = HuggingFaceEndpoint(repo_id="gpt2", model_kwargs={"max_new_tokens": 10}) + llm = HuggingFaceEndpoint(repo_id="gpt2", model_kwargs={"max_new_tokens": 10}) # type: ignore[call-arg] llm.save(file_path=tmp_path / "hf.yaml") loaded_llm = load_llm(tmp_path / "hf.yaml") assert_llm_equality(llm, loaded_llm) def test_invocation_params_stop_sequences() -> None: - llm = HuggingFaceEndpoint() + llm = HuggingFaceEndpoint() # type: ignore[call-arg] assert llm._default_params["stop_sequences"] == [] runtime_stop = None @@ -75,7 +75,7 @@ def test_invocation_params_stop_sequences() -> None: assert llm._invocation_params(runtime_stop)["stop_sequences"] == ["stop"] assert llm._default_params["stop_sequences"] == [] - llm = HuggingFaceEndpoint(stop_sequences=["."]) + llm = HuggingFaceEndpoint(stop_sequences=["."]) # type: ignore[call-arg] runtime_stop = ["stop"] assert llm._invocation_params(runtime_stop)["stop_sequences"] == [".", "stop"] assert llm._default_params["stop_sequences"] == ["."] diff --git a/libs/community/tests/integration_tests/llms/test_huggingface_hub.py b/libs/community/tests/integration_tests/llms/test_huggingface_hub.py index 999f92ad41..678c9c7a80 100644 --- a/libs/community/tests/integration_tests/llms/test_huggingface_hub.py +++ b/libs/community/tests/integration_tests/llms/test_huggingface_hub.py @@ -11,35 +11,35 @@ from tests.integration_tests.llms.utils import assert_llm_equality def test_huggingface_text_generation() -> None: """Test valid call to HuggingFace text generation model.""" - llm = HuggingFaceHub(repo_id="gpt2", model_kwargs={"max_new_tokens": 10}) + llm = HuggingFaceHub(repo_id="gpt2", model_kwargs={"max_new_tokens": 10}) # type: ignore[call-arg] output = llm.invoke("Say foo:") assert isinstance(output, str) def test_huggingface_text2text_generation() -> None: """Test valid call to HuggingFace text2text model.""" - llm = HuggingFaceHub(repo_id="google/flan-t5-xl") + llm = HuggingFaceHub(repo_id="google/flan-t5-xl") # type: ignore[call-arg] output = llm.invoke("The capital of New York is") assert output == "Albany" def test_huggingface_summarization() -> None: """Test valid call to HuggingFace summarization model.""" - llm = HuggingFaceHub(repo_id="facebook/bart-large-cnn") + llm = HuggingFaceHub(repo_id="facebook/bart-large-cnn") # type: ignore[call-arg] output = llm.invoke("Say foo:") assert isinstance(output, str) def test_huggingface_call_error() -> None: """Test valid call to HuggingFace that errors.""" - llm = HuggingFaceHub(model_kwargs={"max_new_tokens": -1}) + llm = HuggingFaceHub(model_kwargs={"max_new_tokens": -1}) # type: ignore[call-arg] with pytest.raises(ValueError): llm.invoke("Say foo:") def test_saving_loading_llm(tmp_path: Path) -> None: """Test saving/loading an HuggingFaceHub LLM.""" - llm = HuggingFaceHub(repo_id="gpt2", model_kwargs={"max_new_tokens": 10}) + llm = HuggingFaceHub(repo_id="gpt2", model_kwargs={"max_new_tokens": 10}) # type: ignore[call-arg] llm.save(file_path=tmp_path / "hf.yaml") loaded_llm = load_llm(tmp_path / "hf.yaml") assert_llm_equality(llm, loaded_llm) diff --git a/libs/community/tests/integration_tests/llms/test_layerup_security.py b/libs/community/tests/integration_tests/llms/test_layerup_security.py index 91176a13a8..d475d774c7 100644 --- a/libs/community/tests/integration_tests/llms/test_layerup_security.py +++ b/libs/community/tests/integration_tests/llms/test_layerup_security.py @@ -24,7 +24,7 @@ class MockLLM(LLM): def test_layerup_security_with_invalid_api_key() -> None: mock_llm = MockLLM() - layerup_security = LayerupSecurity( + layerup_security = LayerupSecurity( # type: ignore[call-arg] llm=mock_llm, layerup_api_key="-- invalid API key --", layerup_api_base_url="https://api.uselayerup.com/v1", diff --git a/libs/community/tests/integration_tests/llms/test_minimax.py b/libs/community/tests/integration_tests/llms/test_minimax.py index cc7a33fb17..87bd8b7bc5 100644 --- a/libs/community/tests/integration_tests/llms/test_minimax.py +++ b/libs/community/tests/integration_tests/llms/test_minimax.py @@ -4,14 +4,14 @@ from langchain_community.llms.minimax import Minimax def test_minimax_call() -> None: """Test valid call to minimax.""" - llm = Minimax(max_tokens=10) + llm = Minimax(max_tokens=10) # type: ignore[call-arg] output = llm.invoke("Hello world!") assert isinstance(output, str) def test_minimax_call_successful() -> None: """Test valid call to minimax.""" - llm = Minimax() + llm = Minimax() # type: ignore[call-arg] output = llm.invoke( "A chain is a serial assembly of connected pieces, called links, \ typically made of metal, with an overall character similar to that\ diff --git a/libs/community/tests/integration_tests/llms/test_nlpcloud.py b/libs/community/tests/integration_tests/llms/test_nlpcloud.py index 6a4635b254..85a67e3be6 100644 --- a/libs/community/tests/integration_tests/llms/test_nlpcloud.py +++ b/libs/community/tests/integration_tests/llms/test_nlpcloud.py @@ -13,14 +13,14 @@ from tests.integration_tests.llms.utils import assert_llm_equality def test_nlpcloud_call() -> None: """Test valid call to nlpcloud.""" - llm = NLPCloud(max_length=10) + llm = NLPCloud(max_length=10) # type: ignore[call-arg] output = llm.invoke("Say foo:") assert isinstance(output, str) def test_saving_loading_llm(tmp_path: Path) -> None: """Test saving/loading an NLPCloud LLM.""" - llm = NLPCloud(max_length=10) + llm = NLPCloud(max_length=10) # type: ignore[call-arg] llm.save(file_path=tmp_path / "nlpcloud.yaml") loaded_llm = load_llm(tmp_path / "nlpcloud.yaml") assert_llm_equality(llm, loaded_llm) @@ -29,10 +29,10 @@ def test_saving_loading_llm(tmp_path: Path) -> None: def test_nlpcloud_api_key(monkeypatch: MonkeyPatch, capsys: CaptureFixture) -> None: """Test that nlpcloud api key is a secret key.""" # test initialization from init - assert isinstance(NLPCloud(nlpcloud_api_key="1").nlpcloud_api_key, SecretStr) + assert isinstance(NLPCloud(nlpcloud_api_key="1").nlpcloud_api_key, SecretStr) # type: ignore[arg-type, call-arg] monkeypatch.setenv("NLPCLOUD_API_KEY", "secret-api-key") - llm = NLPCloud() + llm = NLPCloud() # type: ignore[call-arg] assert isinstance(llm.nlpcloud_api_key, SecretStr) assert cast(SecretStr, llm.nlpcloud_api_key).get_secret_value() == "secret-api-key" diff --git a/libs/community/tests/integration_tests/llms/test_opaqueprompts.py b/libs/community/tests/integration_tests/llms/test_opaqueprompts.py index 7749000aa2..e13bc33c36 100644 --- a/libs/community/tests/integration_tests/llms/test_opaqueprompts.py +++ b/libs/community/tests/integration_tests/llms/test_opaqueprompts.py @@ -43,7 +43,7 @@ Question: ```{question}``` def test_opaqueprompts() -> None: - chain = PromptTemplate.from_template(prompt_template) | OpaquePrompts(llm=OpenAI()) + chain = PromptTemplate.from_template(prompt_template) | OpaquePrompts(llm=OpenAI()) # type: ignore[call-arg] output = chain.invoke( { "question": "Write a text message to remind John to do password reset \ diff --git a/libs/community/tests/integration_tests/llms/test_openai.py b/libs/community/tests/integration_tests/llms/test_openai.py index 1c8d1c37a4..4355805eec 100644 --- a/libs/community/tests/integration_tests/llms/test_openai.py +++ b/libs/community/tests/integration_tests/llms/test_openai.py @@ -33,7 +33,7 @@ def test_openai_llm_output_contains_model_name() -> None: def test_openai_stop_valid() -> None: """Test openai stop logic on valid configuration.""" query = "write an ordered list of five items" - first_llm = OpenAI(stop="3", temperature=0) + first_llm = OpenAI(stop="3", temperature=0) # type: ignore[call-arg] first_output = first_llm.invoke(query) second_llm = OpenAI(temperature=0) second_output = second_llm.invoke(query, stop=["3"]) @@ -43,7 +43,7 @@ def test_openai_stop_valid() -> None: def test_openai_stop_error() -> None: """Test openai stop logic on bad configuration.""" - llm = OpenAI(stop="3", temperature=0) + llm = OpenAI(stop="3", temperature=0) # type: ignore[call-arg] with pytest.raises(ValueError): llm.invoke("write an ordered list of five items", stop=["\n"]) diff --git a/libs/community/tests/integration_tests/llms/test_openlm.py b/libs/community/tests/integration_tests/llms/test_openlm.py index a567a37440..d318185ddc 100644 --- a/libs/community/tests/integration_tests/llms/test_openlm.py +++ b/libs/community/tests/integration_tests/llms/test_openlm.py @@ -3,6 +3,6 @@ from langchain_community.llms.openlm import OpenLM def test_openlm_call() -> None: """Test valid call to openlm.""" - llm = OpenLM(model_name="dolly-v2-7b", max_tokens=10) + llm = OpenLM(model_name="dolly-v2-7b", max_tokens=10) # type: ignore[call-arg] output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_pai_eas_endpoint.py b/libs/community/tests/integration_tests/llms/test_pai_eas_endpoint.py index e8476bd2a1..1543e5ac56 100644 --- a/libs/community/tests/integration_tests/llms/test_pai_eas_endpoint.py +++ b/libs/community/tests/integration_tests/llms/test_pai_eas_endpoint.py @@ -8,8 +8,8 @@ from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint def test_pai_eas_v1_call() -> None: """Test valid call to PAI-EAS Service.""" llm = PaiEasEndpoint( - eas_service_url=os.getenv("EAS_SERVICE_URL"), - eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), + eas_service_url=os.getenv("EAS_SERVICE_URL"), # type: ignore[arg-type] + eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), # type: ignore[arg-type] version="1.0", ) output = llm.invoke("Say foo:") @@ -18,8 +18,8 @@ def test_pai_eas_v1_call() -> None: def test_pai_eas_v2_call() -> None: llm = PaiEasEndpoint( - eas_service_url=os.getenv("EAS_SERVICE_URL"), - eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), + eas_service_url=os.getenv("EAS_SERVICE_URL"), # type: ignore[arg-type] + eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), # type: ignore[arg-type] version="2.0", ) output = llm.invoke("Say foo:") @@ -29,8 +29,8 @@ def test_pai_eas_v2_call() -> None: def test_pai_eas_v1_streaming() -> None: """Test streaming call to PAI-EAS Service.""" llm = PaiEasEndpoint( - eas_service_url=os.getenv("EAS_SERVICE_URL"), - eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), + eas_service_url=os.getenv("EAS_SERVICE_URL"), # type: ignore[arg-type] + eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), # type: ignore[arg-type] version="1.0", ) generator = llm.stream("Q: How do you say 'hello' in German? A:'", stop=["."]) @@ -45,8 +45,8 @@ def test_pai_eas_v1_streaming() -> None: def test_pai_eas_v2_streaming() -> None: llm = PaiEasEndpoint( - eas_service_url=os.getenv("EAS_SERVICE_URL"), - eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), + eas_service_url=os.getenv("EAS_SERVICE_URL"), # type: ignore[arg-type] + eas_service_token=os.getenv("EAS_SERVICE_TOKEN"), # type: ignore[arg-type] version="2.0", ) generator = llm.stream("Q: How do you say 'hello' in German? A:'", stop=["."]) diff --git a/libs/community/tests/integration_tests/llms/test_petals.py b/libs/community/tests/integration_tests/llms/test_petals.py index d1af2aaed0..82a7857ec0 100644 --- a/libs/community/tests/integration_tests/llms/test_petals.py +++ b/libs/community/tests/integration_tests/llms/test_petals.py @@ -7,14 +7,14 @@ from langchain_community.llms.petals import Petals def test_api_key_is_string() -> None: - llm = Petals(huggingface_api_key="secret-api-key") + llm = Petals(huggingface_api_key="secret-api-key") # type: ignore[arg-type, call-arg] assert isinstance(llm.huggingface_api_key, SecretStr) def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: - llm = Petals(huggingface_api_key="secret-api-key") + llm = Petals(huggingface_api_key="secret-api-key") # type: ignore[arg-type, call-arg] print(llm.huggingface_api_key, end="") # noqa: T201 captured = capsys.readouterr() @@ -23,6 +23,6 @@ def test_api_key_masked_when_passed_via_constructor( def test_gooseai_call() -> None: """Test valid call to gooseai.""" - llm = Petals(max_new_tokens=10) + llm = Petals(max_new_tokens=10) # type: ignore[call-arg] output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_predictionguard.py b/libs/community/tests/integration_tests/llms/test_predictionguard.py index fbe47e7499..77db6645bc 100644 --- a/libs/community/tests/integration_tests/llms/test_predictionguard.py +++ b/libs/community/tests/integration_tests/llms/test_predictionguard.py @@ -5,6 +5,6 @@ from langchain_community.llms.predictionguard import PredictionGuard def test_predictionguard_call() -> None: """Test valid call to prediction guard.""" - llm = PredictionGuard(model="OpenAI-text-davinci-003") + llm = PredictionGuard(model="OpenAI-text-davinci-003") # type: ignore[call-arg] output = llm.invoke("Say foo:") assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_promptlayer_openai.py b/libs/community/tests/integration_tests/llms/test_promptlayer_openai.py index 430a899643..be99142d78 100644 --- a/libs/community/tests/integration_tests/llms/test_promptlayer_openai.py +++ b/libs/community/tests/integration_tests/llms/test_promptlayer_openai.py @@ -11,7 +11,7 @@ from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI def test_promptlayer_openai_call() -> None: """Test valid call to promptlayer openai.""" - llm = PromptLayerOpenAI(max_tokens=10) + llm = PromptLayerOpenAI(max_tokens=10) # type: ignore[call-arg] output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -19,25 +19,25 @@ def test_promptlayer_openai_call() -> None: def test_promptlayer_openai_extra_kwargs() -> None: """Test extra kwargs to promptlayer openai.""" # Check that foo is saved in extra_kwargs. - llm = PromptLayerOpenAI(foo=3, max_tokens=10) + llm = PromptLayerOpenAI(foo=3, max_tokens=10) # type: ignore[call-arg] assert llm.max_tokens == 10 assert llm.model_kwargs == {"foo": 3} # Test that if extra_kwargs are provided, they are added to it. - llm = PromptLayerOpenAI(foo=3, model_kwargs={"bar": 2}) + llm = PromptLayerOpenAI(foo=3, model_kwargs={"bar": 2}) # type: ignore[call-arg] assert llm.model_kwargs == {"foo": 3, "bar": 2} # Test that if provided twice it errors with pytest.raises(ValueError): - PromptLayerOpenAI(foo=3, model_kwargs={"foo": 2}) + PromptLayerOpenAI(foo=3, model_kwargs={"foo": 2}) # type: ignore[call-arg] def test_promptlayer_openai_stop_valid() -> None: """Test promptlayer openai stop logic on valid configuration.""" query = "write an ordered list of five items" - first_llm = PromptLayerOpenAI(stop="3", temperature=0) + first_llm = PromptLayerOpenAI(stop="3", temperature=0) # type: ignore[call-arg] first_output = first_llm.invoke(query) - second_llm = PromptLayerOpenAI(temperature=0) + second_llm = PromptLayerOpenAI(temperature=0) # type: ignore[call-arg] second_output = second_llm.invoke(query, stop=["3"]) # Because it stops on new lines, shouldn't return anything assert first_output == second_output @@ -45,14 +45,14 @@ def test_promptlayer_openai_stop_valid() -> None: def test_promptlayer_openai_stop_error() -> None: """Test promptlayer openai stop logic on bad configuration.""" - llm = PromptLayerOpenAI(stop="3", temperature=0) + llm = PromptLayerOpenAI(stop="3", temperature=0) # type: ignore[call-arg] with pytest.raises(ValueError): llm.invoke("write an ordered list of five items", stop=["\n"]) def test_saving_loading_llm(tmp_path: Path) -> None: """Test saving/loading an promptlayer OpenAPI LLM.""" - llm = PromptLayerOpenAI(max_tokens=10) + llm = PromptLayerOpenAI(max_tokens=10) # type: ignore[call-arg] llm.save(file_path=tmp_path / "openai.yaml") loaded_llm = load_llm(tmp_path / "openai.yaml") assert loaded_llm == llm @@ -60,7 +60,7 @@ def test_saving_loading_llm(tmp_path: Path) -> None: def test_promptlayer_openai_streaming() -> None: """Test streaming tokens from promptalyer OpenAI.""" - llm = PromptLayerOpenAI(max_tokens=10) + llm = PromptLayerOpenAI(max_tokens=10) # type: ignore[call-arg] generator = llm.stream("I'm Pickle Rick") assert isinstance(generator, Generator) diff --git a/libs/community/tests/integration_tests/llms/test_propmptlayer_openai_chat.py b/libs/community/tests/integration_tests/llms/test_propmptlayer_openai_chat.py index 4a1c91d276..2e3cbfe841 100644 --- a/libs/community/tests/integration_tests/llms/test_propmptlayer_openai_chat.py +++ b/libs/community/tests/integration_tests/llms/test_propmptlayer_openai_chat.py @@ -10,7 +10,7 @@ from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat def test_promptlayer_openai_chat_call() -> None: """Test valid call to promptlayer openai.""" - llm = PromptLayerOpenAIChat(max_tokens=10) + llm = PromptLayerOpenAIChat(max_tokens=10) # type: ignore[call-arg] output = llm.invoke("Say foo:") assert isinstance(output, str) @@ -18,9 +18,9 @@ def test_promptlayer_openai_chat_call() -> None: def test_promptlayer_openai_chat_stop_valid() -> None: """Test promptlayer openai stop logic on valid configuration.""" query = "write an ordered list of five items" - first_llm = PromptLayerOpenAIChat(stop="3", temperature=0) + first_llm = PromptLayerOpenAIChat(stop="3", temperature=0) # type: ignore[call-arg] first_output = first_llm.invoke(query) - second_llm = PromptLayerOpenAIChat(temperature=0) + second_llm = PromptLayerOpenAIChat(temperature=0) # type: ignore[call-arg] second_output = second_llm.invoke(query, stop=["3"]) # Because it stops on new lines, shouldn't return anything assert first_output == second_output @@ -28,14 +28,14 @@ def test_promptlayer_openai_chat_stop_valid() -> None: def test_promptlayer_openai_chat_stop_error() -> None: """Test promptlayer openai stop logic on bad configuration.""" - llm = PromptLayerOpenAIChat(stop="3", temperature=0) + llm = PromptLayerOpenAIChat(stop="3", temperature=0) # type: ignore[call-arg] with pytest.raises(ValueError): llm.invoke("write an ordered list of five items", stop=["\n"]) def test_saving_loading_llm(tmp_path: Path) -> None: """Test saving/loading an promptlayer OpenAPI LLM.""" - llm = PromptLayerOpenAIChat(max_tokens=10) + llm = PromptLayerOpenAIChat(max_tokens=10) # type: ignore[call-arg] llm.save(file_path=tmp_path / "openai.yaml") loaded_llm = load_llm(tmp_path / "openai.yaml") assert loaded_llm == llm diff --git a/libs/community/tests/integration_tests/llms/test_qianfan_endpoint.py b/libs/community/tests/integration_tests/llms/test_qianfan_endpoint.py index 576c6ab9e4..fcc476fdec 100644 --- a/libs/community/tests/integration_tests/llms/test_qianfan_endpoint.py +++ b/libs/community/tests/integration_tests/llms/test_qianfan_endpoint.py @@ -8,14 +8,14 @@ from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint def test_call() -> None: """Test valid call to qianfan.""" - llm = QianfanLLMEndpoint() + llm = QianfanLLMEndpoint() # type: ignore[call-arg] output = llm.invoke("write a joke") assert isinstance(output, str) def test_generate() -> None: """Test valid call to qianfan.""" - llm = QianfanLLMEndpoint() + llm = QianfanLLMEndpoint() # type: ignore[call-arg] output = llm.generate(["write a joke"]) assert isinstance(output, LLMResult) assert isinstance(output.generations, list) @@ -23,20 +23,20 @@ def test_generate() -> None: def test_generate_stream() -> None: """Test valid call to qianfan.""" - llm = QianfanLLMEndpoint() + llm = QianfanLLMEndpoint() # type: ignore[call-arg] output = llm.stream("write a joke") assert isinstance(output, Generator) async def test_qianfan_aio() -> None: - llm = QianfanLLMEndpoint(streaming=True) + llm = QianfanLLMEndpoint(streaming=True) # type: ignore[call-arg] async for token in llm.astream("hi qianfan."): assert isinstance(token, str) def test_rate_limit() -> None: - llm = QianfanLLMEndpoint(model="ERNIE-Bot", init_kwargs={"query_per_second": 2}) + llm = QianfanLLMEndpoint(model="ERNIE-Bot", init_kwargs={"query_per_second": 2}) # type: ignore[call-arg] assert llm.client._client._rate_limiter._sync_limiter._query_per_second == 2 output = llm.generate(["write a joke"]) assert isinstance(output, LLMResult) diff --git a/libs/community/tests/integration_tests/llms/test_replicate.py b/libs/community/tests/integration_tests/llms/test_replicate.py index 13a20ae9ee..fa58bd19c2 100644 --- a/libs/community/tests/integration_tests/llms/test_replicate.py +++ b/libs/community/tests/integration_tests/llms/test_replicate.py @@ -29,11 +29,11 @@ def test_replicate_streaming_call() -> None: def test_replicate_model_kwargs() -> None: """Test simple non-streaming call to Replicate.""" - llm = Replicate( + llm = Replicate( # type: ignore[call-arg] model=TEST_MODEL, model_kwargs={"max_length": 100, "temperature": 0.01} ) long_output = llm.invoke("What is LangChain") - llm = Replicate( + llm = Replicate( # type: ignore[call-arg] model=TEST_MODEL, model_kwargs={"max_length": 10, "temperature": 0.01} ) short_output = llm.invoke("What is LangChain") diff --git a/libs/community/tests/integration_tests/llms/test_symblai_nebula.py b/libs/community/tests/integration_tests/llms/test_symblai_nebula.py index b1b2eb3b53..ea0a9f8764 100644 --- a/libs/community/tests/integration_tests/llms/test_symblai_nebula.py +++ b/libs/community/tests/integration_tests/llms/test_symblai_nebula.py @@ -34,7 +34,7 @@ Sam: Perfect! Let's keep the momentum going. Reach out if there are any sudden issues or support needed. Have a productive day! Alex: You too. Rhea: Thanks, bye!""" - llm = Nebula(nebula_api_key="") + llm = Nebula(nebula_api_key="") # type: ignore[arg-type] instruction = """Identify the main objectives mentioned in this conversation.""" diff --git a/libs/community/tests/integration_tests/llms/test_tongyi.py b/libs/community/tests/integration_tests/llms/test_tongyi.py index 04c6e2e997..923be81441 100644 --- a/libs/community/tests/integration_tests/llms/test_tongyi.py +++ b/libs/community/tests/integration_tests/llms/test_tongyi.py @@ -7,14 +7,14 @@ from langchain_community.llms.tongyi import Tongyi def test_tongyi_call() -> None: """Test valid call to tongyi.""" - llm = Tongyi() + llm = Tongyi() # type: ignore[call-arg] output = llm.invoke("who are you") assert isinstance(output, str) def test_tongyi_generate() -> None: """Test valid call to tongyi.""" - llm = Tongyi() + llm = Tongyi() # type: ignore[call-arg] output = llm.generate(["who are you"]) assert isinstance(output, LLMResult) assert isinstance(output.generations, list) @@ -22,7 +22,7 @@ def test_tongyi_generate() -> None: def test_tongyi_generate_stream() -> None: """Test valid call to tongyi.""" - llm = Tongyi(streaming=True) + llm = Tongyi(streaming=True) # type: ignore[call-arg] output = llm.generate(["who are you"]) print(output) # noqa: T201 assert isinstance(output, LLMResult) diff --git a/libs/community/tests/integration_tests/llms/test_volcengine_maas.py b/libs/community/tests/integration_tests/llms/test_volcengine_maas.py index b2af4e1d6f..7cf3e29081 100644 --- a/libs/community/tests/integration_tests/llms/test_volcengine_maas.py +++ b/libs/community/tests/integration_tests/llms/test_volcengine_maas.py @@ -13,9 +13,9 @@ from langchain_community.llms.volcengine_maas import ( def test_api_key_is_string() -> None: - llm = VolcEngineMaasBase( - volc_engine_maas_ak="secret-volc-ak", - volc_engine_maas_sk="secret-volc-sk", + llm = VolcEngineMaasBase( # type: ignore[call-arg] + volc_engine_maas_ak="secret-volc-ak", # type: ignore[arg-type] + volc_engine_maas_sk="secret-volc-sk", # type: ignore[arg-type] ) assert isinstance(llm.volc_engine_maas_ak, SecretStr) assert isinstance(llm.volc_engine_maas_sk, SecretStr) @@ -24,9 +24,9 @@ def test_api_key_is_string() -> None: def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: - llm = VolcEngineMaasBase( - volc_engine_maas_ak="secret-volc-ak", - volc_engine_maas_sk="secret-volc-sk", + llm = VolcEngineMaasBase( # type: ignore[call-arg] + volc_engine_maas_ak="secret-volc-ak", # type: ignore[arg-type] + volc_engine_maas_sk="secret-volc-sk", # type: ignore[arg-type] ) print(llm.volc_engine_maas_ak, end="") # noqa: T201 captured = capsys.readouterr() @@ -36,14 +36,14 @@ def test_api_key_masked_when_passed_via_constructor( def test_default_call() -> None: """Test valid call to volc engine.""" - llm = VolcEngineMaasLLM() + llm = VolcEngineMaasLLM() # type: ignore[call-arg] output = llm.invoke("tell me a joke") assert isinstance(output, str) def test_generate() -> None: """Test valid call to volc engine.""" - llm = VolcEngineMaasLLM() + llm = VolcEngineMaasLLM() # type: ignore[call-arg] output = llm.generate(["tell me a joke"]) assert isinstance(output, LLMResult) assert isinstance(output.generations, list) @@ -51,6 +51,6 @@ def test_generate() -> None: def test_generate_stream() -> None: """Test valid call to volc engine.""" - llm = VolcEngineMaasLLM(streaming=True) + llm = VolcEngineMaasLLM(streaming=True) # type: ignore[call-arg] output = llm.stream("tell me a joke") assert isinstance(output, Generator) diff --git a/libs/community/tests/integration_tests/retrievers/docarray/test_backends.py b/libs/community/tests/integration_tests/retrievers/docarray/test_backends.py index 2e41bfc44f..1747971d16 100644 --- a/libs/community/tests/integration_tests/retrievers/docarray/test_backends.py +++ b/libs/community/tests/integration_tests/retrievers/docarray/test_backends.py @@ -56,7 +56,7 @@ def test_backends(request: Any, backend: Any) -> None: index=index, embeddings=embeddings, search_field="title_embedding", - search_type="mmr", + search_type="mmr", # type: ignore[arg-type] content_field="title", filters=filter_query, ) diff --git a/libs/community/tests/integration_tests/retrievers/test_arxiv.py b/libs/community/tests/integration_tests/retrievers/test_arxiv.py index c2cda25aa0..3f885df431 100644 --- a/libs/community/tests/integration_tests/retrievers/test_arxiv.py +++ b/libs/community/tests/integration_tests/retrievers/test_arxiv.py @@ -9,7 +9,7 @@ from langchain_community.retrievers import ArxivRetriever @pytest.fixture def retriever() -> ArxivRetriever: - return ArxivRetriever() + return ArxivRetriever() # type: ignore[call-arg] def assert_docs(docs: List[Document], all_meta: bool = False) -> None: @@ -39,7 +39,7 @@ def test_load_success_all_meta(retriever: ArxivRetriever) -> None: def test_load_success_init_args() -> None: - retriever = ArxivRetriever(load_max_docs=1, load_all_available_meta=True) + retriever = ArxivRetriever(load_max_docs=1, load_all_available_meta=True) # type: ignore[call-arg] docs = retriever.invoke("ChatGPT") assert len(docs) == 1 assert_docs(docs, all_meta=True) diff --git a/libs/community/tests/integration_tests/retrievers/test_pubmed.py b/libs/community/tests/integration_tests/retrievers/test_pubmed.py index a13bba31e5..7d67c58481 100644 --- a/libs/community/tests/integration_tests/retrievers/test_pubmed.py +++ b/libs/community/tests/integration_tests/retrievers/test_pubmed.py @@ -9,7 +9,7 @@ from langchain_community.retrievers import PubMedRetriever @pytest.fixture def retriever() -> PubMedRetriever: - return PubMedRetriever() + return PubMedRetriever() # type: ignore[call-arg] def assert_docs(docs: List[Document]) -> None: diff --git a/libs/community/tests/integration_tests/retrievers/test_weaviate_hybrid_search.py b/libs/community/tests/integration_tests/retrievers/test_weaviate_hybrid_search.py index 2e80347f80..c59c8130b3 100644 --- a/libs/community/tests/integration_tests/retrievers/test_weaviate_hybrid_search.py +++ b/libs/community/tests/integration_tests/retrievers/test_weaviate_hybrid_search.py @@ -27,7 +27,7 @@ class TestWeaviateHybridSearchRetriever: raise ValueError("OPENAI_API_KEY environment variable is not set") @pytest.fixture(scope="class", autouse=True) - def weaviate_url(self) -> Union[str, Generator[str, None, None]]: + def weaviate_url(self) -> Union[str, Generator[str, None, None]]: # type: ignore[return] """Return the weaviate url.""" from weaviate import Client diff --git a/libs/community/tests/integration_tests/retrievers/test_wikipedia.py b/libs/community/tests/integration_tests/retrievers/test_wikipedia.py index 2388ad4fe5..4b3b1be761 100644 --- a/libs/community/tests/integration_tests/retrievers/test_wikipedia.py +++ b/libs/community/tests/integration_tests/retrievers/test_wikipedia.py @@ -9,7 +9,7 @@ from langchain_community.retrievers import WikipediaRetriever @pytest.fixture def retriever() -> WikipediaRetriever: - return WikipediaRetriever() + return WikipediaRetriever() # type: ignore[call-arg] def assert_docs(docs: List[Document], all_meta: bool = False) -> None: @@ -40,7 +40,7 @@ def test_load_success_all_meta(retriever: WikipediaRetriever) -> None: def test_load_success_init_args() -> None: - retriever = WikipediaRetriever( + retriever = WikipediaRetriever( # type: ignore[call-arg] lang="en", top_k_results=1, load_all_available_meta=True ) docs = retriever.invoke("HUNTER X HUNTER") @@ -49,7 +49,7 @@ def test_load_success_init_args() -> None: def test_load_success_init_args_more() -> None: - retriever = WikipediaRetriever( + retriever = WikipediaRetriever( # type: ignore[call-arg] lang="en", top_k_results=20, load_all_available_meta=False ) docs = retriever.invoke("HUNTER X HUNTER") diff --git a/libs/community/tests/integration_tests/retrievers/test_zep.py b/libs/community/tests/integration_tests/retrievers/test_zep.py index 195cfd31d2..a8ecd28032 100644 --- a/libs/community/tests/integration_tests/retrievers/test_zep.py +++ b/libs/community/tests/integration_tests/retrievers/test_zep.py @@ -67,7 +67,7 @@ def zep_retriever( mock_zep_client.memory.asearch_memory.return_value = copy.deepcopy( # type: ignore search_results ) - zep = ZepRetriever(session_id="123", url="http://localhost:8000") + zep = ZepRetriever(session_id="123", url="http://localhost:8000") # type: ignore[call-arg] zep.zep_client = mock_zep_client return zep diff --git a/libs/community/tests/integration_tests/tools/edenai/test_audio_speech_to_text.py b/libs/community/tests/integration_tests/tools/edenai/test_audio_speech_to_text.py index 000fc669cf..24c8e57727 100644 --- a/libs/community/tests/integration_tests/tools/edenai/test_audio_speech_to_text.py +++ b/libs/community/tests/integration_tests/tools/edenai/test_audio_speech_to_text.py @@ -13,7 +13,7 @@ from langchain_community.tools.edenai import EdenAiSpeechToTextTool def test_edenai_call() -> None: """Test simple call to edenai's speech to text endpoint.""" - speech2text = EdenAiSpeechToTextTool(providers=["amazon"]) + speech2text = EdenAiSpeechToTextTool(providers=["amazon"]) # type: ignore[call-arg] output = speech2text.invoke( "https://audio-samples.github.io/samples/mp3/blizzard_unconditional/sample-0.mp3" diff --git a/libs/community/tests/integration_tests/tools/edenai/test_audio_text_to_speech.py b/libs/community/tests/integration_tests/tools/edenai/test_audio_text_to_speech.py index 31c42ccf42..1052f9bab3 100644 --- a/libs/community/tests/integration_tests/tools/edenai/test_audio_text_to_speech.py +++ b/libs/community/tests/integration_tests/tools/edenai/test_audio_text_to_speech.py @@ -15,7 +15,7 @@ from langchain_community.tools.edenai import EdenAiTextToSpeechTool def test_edenai_call() -> None: """Test simple call to edenai's text to speech endpoint.""" - text2speech = EdenAiTextToSpeechTool( + text2speech = EdenAiTextToSpeechTool( # type: ignore[call-arg] providers=["amazon"], language="en", voice="MALE" ) diff --git a/libs/community/tests/integration_tests/tools/edenai/test_image_explicitcontent.py b/libs/community/tests/integration_tests/tools/edenai/test_image_explicitcontent.py index 9f76ebdc9b..11a16d261a 100644 --- a/libs/community/tests/integration_tests/tools/edenai/test_image_explicitcontent.py +++ b/libs/community/tests/integration_tests/tools/edenai/test_image_explicitcontent.py @@ -13,7 +13,7 @@ from langchain_community.tools.edenai import EdenAiExplicitImageTool def test_edenai_call() -> None: """Test simple call to edenai's image moderation endpoint.""" - image_moderation = EdenAiExplicitImageTool(providers=["amazon"]) + image_moderation = EdenAiExplicitImageTool(providers=["amazon"]) # type: ignore[call-arg] output = image_moderation.invoke("https://static.javatpoint.com/images/objects.jpg") diff --git a/libs/community/tests/integration_tests/tools/edenai/test_image_objectdetection.py b/libs/community/tests/integration_tests/tools/edenai/test_image_objectdetection.py index d68b019304..197fa9956f 100644 --- a/libs/community/tests/integration_tests/tools/edenai/test_image_objectdetection.py +++ b/libs/community/tests/integration_tests/tools/edenai/test_image_objectdetection.py @@ -13,7 +13,7 @@ from langchain_community.tools.edenai import EdenAiObjectDetectionTool def test_edenai_call() -> None: """Test simple call to edenai's object detection endpoint.""" - object_detection = EdenAiObjectDetectionTool(providers=["google"]) + object_detection = EdenAiObjectDetectionTool(providers=["google"]) # type: ignore[call-arg] output = object_detection.invoke("https://static.javatpoint.com/images/objects.jpg") diff --git a/libs/community/tests/integration_tests/tools/edenai/test_ocr_identityparser.py b/libs/community/tests/integration_tests/tools/edenai/test_ocr_identityparser.py index c0234a673f..30b03632d1 100644 --- a/libs/community/tests/integration_tests/tools/edenai/test_ocr_identityparser.py +++ b/libs/community/tests/integration_tests/tools/edenai/test_ocr_identityparser.py @@ -13,7 +13,7 @@ from langchain_community.tools.edenai import EdenAiParsingIDTool def test_edenai_call() -> None: """Test simple call to edenai's identity parser endpoint.""" - id_parser = EdenAiParsingIDTool(providers=["amazon"], language="en") + id_parser = EdenAiParsingIDTool(providers=["amazon"], language="en") # type: ignore[call-arg] output = id_parser.invoke( "https://www.citizencard.com/images/citizencard-uk-id-card-2023.jpg" diff --git a/libs/community/tests/integration_tests/tools/edenai/test_ocr_invoiceparser.py b/libs/community/tests/integration_tests/tools/edenai/test_ocr_invoiceparser.py index c8a62b8e61..daccfb03d5 100644 --- a/libs/community/tests/integration_tests/tools/edenai/test_ocr_invoiceparser.py +++ b/libs/community/tests/integration_tests/tools/edenai/test_ocr_invoiceparser.py @@ -13,7 +13,7 @@ from langchain_community.tools.edenai import EdenAiParsingInvoiceTool def test_edenai_call() -> None: """Test simple call to edenai's invoice parser endpoint.""" - invoice_parser = EdenAiParsingInvoiceTool(providers=["amazon"], language="en") + invoice_parser = EdenAiParsingInvoiceTool(providers=["amazon"], language="en") # type: ignore[call-arg] output = invoice_parser.invoke( "https://app.edenai.run/assets/img/data_1.72e3bdcc.png" diff --git a/libs/community/tests/integration_tests/tools/edenai/test_text_moderation.py b/libs/community/tests/integration_tests/tools/edenai/test_text_moderation.py index 0a6d2e9c28..6e59e211ba 100644 --- a/libs/community/tests/integration_tests/tools/edenai/test_text_moderation.py +++ b/libs/community/tests/integration_tests/tools/edenai/test_text_moderation.py @@ -14,7 +14,7 @@ from langchain_community.tools.edenai.text_moderation import EdenAiTextModeratio def test_edenai_call() -> None: """Test simple call to edenai's text moderation endpoint.""" - text_moderation = EdenAiTextModerationTool(providers=["openai"], language="en") + text_moderation = EdenAiTextModerationTool(providers=["openai"], language="en") # type: ignore[call-arg] output = text_moderation.invoke("i hate you") diff --git a/libs/community/tests/integration_tests/utilities/test_arxiv.py b/libs/community/tests/integration_tests/utilities/test_arxiv.py index c11ae32732..e4fa9e4aa5 100644 --- a/libs/community/tests/integration_tests/utilities/test_arxiv.py +++ b/libs/community/tests/integration_tests/utilities/test_arxiv.py @@ -12,7 +12,7 @@ from langchain_community.utilities import ArxivAPIWrapper @pytest.fixture def api_client() -> ArxivAPIWrapper: - return ArxivAPIWrapper() + return ArxivAPIWrapper() # type: ignore[call-arg] def test_run_success_paper_name(api_client: ArxivAPIWrapper) -> None: @@ -102,7 +102,7 @@ def test_load_returns_no_result(api_client: ArxivAPIWrapper) -> None: def test_load_returns_limited_docs() -> None: """Test that returns several docs""" expected_docs = 2 - api_client = ArxivAPIWrapper(load_max_docs=expected_docs) + api_client = ArxivAPIWrapper(load_max_docs=expected_docs) # type: ignore[call-arg] docs = api_client.load("ChatGPT") assert len(docs) == expected_docs assert_docs(docs) @@ -112,7 +112,7 @@ def test_load_returns_limited_doc_content_chars() -> None: """Test that returns limited doc_content_chars_max""" doc_content_chars_max = 100 - api_client = ArxivAPIWrapper(doc_content_chars_max=doc_content_chars_max) + api_client = ArxivAPIWrapper(doc_content_chars_max=doc_content_chars_max) # type: ignore[call-arg] docs = api_client.load("1605.08386") assert len(docs[0].page_content) == doc_content_chars_max @@ -121,14 +121,14 @@ def test_load_returns_unlimited_doc_content_chars() -> None: """Test that returns unlimited doc_content_chars_max""" doc_content_chars_max = None - api_client = ArxivAPIWrapper(doc_content_chars_max=doc_content_chars_max) + api_client = ArxivAPIWrapper(doc_content_chars_max=doc_content_chars_max) # type: ignore[call-arg] docs = api_client.load("1605.08386") assert len(docs[0].page_content) == pytest.approx(54338, rel=1e-2) def test_load_returns_full_set_of_metadata() -> None: """Test that returns several docs""" - api_client = ArxivAPIWrapper(load_max_docs=1, load_all_available_meta=True) + api_client = ArxivAPIWrapper(load_max_docs=1, load_all_available_meta=True) # type: ignore[call-arg] docs = api_client.load("ChatGPT") assert len(docs) == 1 for doc in docs: diff --git a/libs/community/tests/integration_tests/utilities/test_bing_search.py b/libs/community/tests/integration_tests/utilities/test_bing_search.py index 302d66dddb..a0f686a6e3 100644 --- a/libs/community/tests/integration_tests/utilities/test_bing_search.py +++ b/libs/community/tests/integration_tests/utilities/test_bing_search.py @@ -4,14 +4,14 @@ from langchain_community.utilities.bing_search import BingSearchAPIWrapper def test_call() -> None: """Test that call gives the correct answer.""" - search = BingSearchAPIWrapper() + search = BingSearchAPIWrapper() # type: ignore[call-arg] output = search.run("Obama's first name") assert "Barack Hussein Obama" in output def test_results() -> None: """Test that call gives the correct answer.""" - search = BingSearchAPIWrapper() + search = BingSearchAPIWrapper() # type: ignore[call-arg] results = search.results("Obama's first name", num_results=5) result_contents = "\n".join( f"{result['title']}: {result['snippet']}" for result in results diff --git a/libs/community/tests/integration_tests/utilities/test_dataherald_api.py b/libs/community/tests/integration_tests/utilities/test_dataherald_api.py index 8556dad408..9602bfb30c 100644 --- a/libs/community/tests/integration_tests/utilities/test_dataherald_api.py +++ b/libs/community/tests/integration_tests/utilities/test_dataherald_api.py @@ -4,6 +4,6 @@ from langchain_community.utilities.dataherald import DataheraldAPIWrapper def test_call() -> None: """Test that call gives the correct answer.""" - search = DataheraldAPIWrapper(db_connection_id="65fb766367dd22c99ce1a12d") + search = DataheraldAPIWrapper(db_connection_id="65fb766367dd22c99ce1a12d") # type: ignore[call-arg] output = search.run("How many employees are in the company?") assert "Answer: SELECT \n COUNT(*) FROM \n employees" in output diff --git a/libs/community/tests/integration_tests/utilities/test_duckduckdgo_search_api.py b/libs/community/tests/integration_tests/utilities/test_duckduckdgo_search_api.py index 220dc048a5..723827ccc9 100644 --- a/libs/community/tests/integration_tests/utilities/test_duckduckdgo_search_api.py +++ b/libs/community/tests/integration_tests/utilities/test_duckduckdgo_search_api.py @@ -28,7 +28,7 @@ def test_ddg_search_tool() -> None: @pytest.mark.skipif(not ddg_installed(), reason="requires duckduckgo-search package") def test_ddg_search_news_tool() -> None: keywords = "Tesla" - tool = DuckDuckGoSearchResults(source="news") + tool = DuckDuckGoSearchResults(source="news") # type: ignore[call-arg] result = tool.invoke(keywords) print(result) # noqa: T201 assert len(result.split()) > 20 diff --git a/libs/community/tests/integration_tests/utilities/test_github.py b/libs/community/tests/integration_tests/utilities/test_github.py index 77b87ec64f..1c5052bb40 100644 --- a/libs/community/tests/integration_tests/utilities/test_github.py +++ b/libs/community/tests/integration_tests/utilities/test_github.py @@ -12,7 +12,7 @@ from langchain_community.utilities.github import GitHubAPIWrapper @pytest.fixture def api_client() -> GitHubAPIWrapper: - return GitHubAPIWrapper() + return GitHubAPIWrapper() # type: ignore[call-arg] def test_get_open_issues(api_client: GitHubAPIWrapper) -> None: diff --git a/libs/community/tests/integration_tests/utilities/test_googlesearch_api.py b/libs/community/tests/integration_tests/utilities/test_googlesearch_api.py index b32d05e76b..8868bfc6bc 100644 --- a/libs/community/tests/integration_tests/utilities/test_googlesearch_api.py +++ b/libs/community/tests/integration_tests/utilities/test_googlesearch_api.py @@ -5,14 +5,14 @@ from langchain_community.utilities.google_search import GoogleSearchAPIWrapper def test_call() -> None: """Test that call gives the correct answer.""" - search = GoogleSearchAPIWrapper() + search = GoogleSearchAPIWrapper() # type: ignore[call-arg] output = search.run("What was Obama's first name?") assert "Barack Hussein Obama II" in output def test_no_result_call() -> None: """Test that call gives no result.""" - search = GoogleSearchAPIWrapper() + search = GoogleSearchAPIWrapper() # type: ignore[call-arg] output = search.run( "NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL" ) @@ -22,7 +22,7 @@ def test_no_result_call() -> None: def test_result_with_params_call() -> None: """Test that call gives the correct answer with extra params.""" - search = GoogleSearchAPIWrapper() + search = GoogleSearchAPIWrapper() # type: ignore[call-arg] output = search.results( query="What was Obama's first name?", num_results=5, diff --git a/libs/community/tests/integration_tests/utilities/test_jira_api.py b/libs/community/tests/integration_tests/utilities/test_jira_api.py index c59721c73e..107cccee27 100644 --- a/libs/community/tests/integration_tests/utilities/test_jira_api.py +++ b/libs/community/tests/integration_tests/utilities/test_jira_api.py @@ -5,14 +5,14 @@ from langchain_community.utilities.jira import JiraAPIWrapper def test_search() -> None: """Test for Searching issues on JIRA""" jql = "project = TP" - jira = JiraAPIWrapper() + jira = JiraAPIWrapper() # type: ignore[call-arg] output = jira.run("jql", jql) assert "issues" in output def test_getprojects() -> None: """Test for getting projects on JIRA""" - jira = JiraAPIWrapper() + jira = JiraAPIWrapper() # type: ignore[call-arg] output = jira.run("get_projects", "") assert "projects" in output @@ -23,7 +23,7 @@ def test_create_ticket() -> None: '{"summary": "Test Summary", "description": "Test Description",' ' "issuetype": {"name": "Bug"}, "project": {"key": "TP"}}' ) - jira = JiraAPIWrapper() + jira = JiraAPIWrapper() # type: ignore[call-arg] output = jira.run("create_issue", issue_string) assert "id" in output assert "key" in output @@ -31,7 +31,7 @@ def test_create_ticket() -> None: def test_create_confluence_page() -> None: """Test for getting projects on JIRA""" - jira = JiraAPIWrapper() + jira = JiraAPIWrapper() # type: ignore[call-arg] create_page_dict = ( '{"space": "ROC", "title":"This is the title",' '"body":"This is the body. You can use ' @@ -45,7 +45,7 @@ def test_create_confluence_page() -> None: def test_other() -> None: """Non-exhaustive test for accessing other JIRA API methods""" - jira = JiraAPIWrapper() + jira = JiraAPIWrapper() # type: ignore[call-arg] issue_create_dict = """ { "function":"issue_create", diff --git a/libs/community/tests/integration_tests/utilities/test_openweathermap.py b/libs/community/tests/integration_tests/utilities/test_openweathermap.py index 9f6e1f2c0f..5a4415f8ff 100644 --- a/libs/community/tests/integration_tests/utilities/test_openweathermap.py +++ b/libs/community/tests/integration_tests/utilities/test_openweathermap.py @@ -4,7 +4,7 @@ from langchain_community.utilities.openweathermap import OpenWeatherMapAPIWrappe def test_openweathermap_api_wrapper() -> None: """Test that OpenWeatherMapAPIWrapper returns correct data for London, GB.""" - weather = OpenWeatherMapAPIWrapper() + weather = OpenWeatherMapAPIWrapper() # type: ignore[call-arg] weather_data = weather.run("London,GB") assert weather_data is not None diff --git a/libs/community/tests/integration_tests/utilities/test_pubmed.py b/libs/community/tests/integration_tests/utilities/test_pubmed.py index 67bd85ec89..e8541224c3 100644 --- a/libs/community/tests/integration_tests/utilities/test_pubmed.py +++ b/libs/community/tests/integration_tests/utilities/test_pubmed.py @@ -13,7 +13,7 @@ xmltodict = pytest.importorskip("xmltodict") @pytest.fixture def api_client() -> PubMedAPIWrapper: - return PubMedAPIWrapper() + return PubMedAPIWrapper() # type: ignore[call-arg] def test_run_success(api_client: PubMedAPIWrapper) -> None: @@ -115,7 +115,7 @@ def test_load_returns_no_result(api_client: PubMedAPIWrapper) -> None: def test_load_returns_limited_docs() -> None: """Test that returns several docs""" expected_docs = 2 - api_client = PubMedAPIWrapper(top_k_results=expected_docs) + api_client = PubMedAPIWrapper(top_k_results=expected_docs) # type: ignore[call-arg] docs = api_client.load_docs("ChatGPT") assert len(docs) == expected_docs assert_docs(docs) @@ -123,7 +123,7 @@ def test_load_returns_limited_docs() -> None: def test_load_returns_full_set_of_metadata() -> None: """Test that returns several docs""" - api_client = PubMedAPIWrapper(load_max_docs=1, load_all_available_meta=True) + api_client = PubMedAPIWrapper(load_max_docs=1, load_all_available_meta=True) # type: ignore[call-arg] docs = api_client.load_docs("ChatGPT") assert len(docs) == 3 for doc in docs: diff --git a/libs/community/tests/integration_tests/utilities/test_reddit_search_api.py b/libs/community/tests/integration_tests/utilities/test_reddit_search_api.py index 939a647433..d80a177b86 100644 --- a/libs/community/tests/integration_tests/utilities/test_reddit_search_api.py +++ b/libs/community/tests/integration_tests/utilities/test_reddit_search_api.py @@ -5,7 +5,7 @@ from langchain_community.utilities.reddit_search import RedditSearchAPIWrapper @pytest.fixture def api_client() -> RedditSearchAPIWrapper: - return RedditSearchAPIWrapper() + return RedditSearchAPIWrapper() # type: ignore[call-arg] def assert_results_exists(results: list) -> None: diff --git a/libs/community/tests/integration_tests/utilities/test_serpapi.py b/libs/community/tests/integration_tests/utilities/test_serpapi.py index f1a86680ce..5afdb22545 100644 --- a/libs/community/tests/integration_tests/utilities/test_serpapi.py +++ b/libs/community/tests/integration_tests/utilities/test_serpapi.py @@ -4,6 +4,6 @@ from langchain_community.utilities import SerpAPIWrapper def test_call() -> None: """Test that call gives the correct answer.""" - chain = SerpAPIWrapper() + chain = SerpAPIWrapper() # type: ignore[call-arg] output = chain.run("What was Obama's first name?") assert output == "Barack Hussein Obama II" diff --git a/libs/community/tests/integration_tests/utilities/test_stackexchange.py b/libs/community/tests/integration_tests/utilities/test_stackexchange.py index 96bb9dc4f5..5751801f53 100644 --- a/libs/community/tests/integration_tests/utilities/test_stackexchange.py +++ b/libs/community/tests/integration_tests/utilities/test_stackexchange.py @@ -4,20 +4,20 @@ from langchain_community.utilities import StackExchangeAPIWrapper def test_call() -> None: """Test that call runs.""" - stackexchange = StackExchangeAPIWrapper() + stackexchange = StackExchangeAPIWrapper() # type: ignore[call-arg] output = stackexchange.run("zsh: command not found: python") assert output != "hello" def test_failure() -> None: """Test that call that doesn't run.""" - stackexchange = StackExchangeAPIWrapper() + stackexchange = StackExchangeAPIWrapper() # type: ignore[call-arg] output = stackexchange.run("sjefbsmnf") assert output == "No relevant results found for 'sjefbsmnf' on Stack Overflow" def test_success() -> None: """Test that call that doesn't run.""" - stackexchange = StackExchangeAPIWrapper() + stackexchange = StackExchangeAPIWrapper() # type: ignore[call-arg] output = stackexchange.run("zsh: command not found: python") assert "zsh: command not found: python" in output diff --git a/libs/community/tests/integration_tests/utilities/test_steam_api.py b/libs/community/tests/integration_tests/utilities/test_steam_api.py index be61f5f1ab..96de47eeb4 100644 --- a/libs/community/tests/integration_tests/utilities/test_steam_api.py +++ b/libs/community/tests/integration_tests/utilities/test_steam_api.py @@ -5,7 +5,7 @@ from langchain_community.utilities.steam import SteamWebAPIWrapper def test_get_game_details() -> None: """Test for getting game details on Steam""" - steam = SteamWebAPIWrapper() + steam = SteamWebAPIWrapper() # type: ignore[call-arg] output = steam.run("get_game_details", "Terraria") assert "id" in output assert "link" in output @@ -16,7 +16,7 @@ def test_get_game_details() -> None: def test_get_recommended_games() -> None: """Test for getting recommended games on Steam""" - steam = SteamWebAPIWrapper() + steam = SteamWebAPIWrapper() # type: ignore[call-arg] output = steam.run("get_recommended_games", "76561198362745711") output = ast.literal_eval(output) assert len(output) == 5 diff --git a/libs/community/tests/integration_tests/utilities/test_tensorflow_datasets.py b/libs/community/tests/integration_tests/utilities/test_tensorflow_datasets.py index 59c5c661b0..01990bc2c4 100644 --- a/libs/community/tests/integration_tests/utilities/test_tensorflow_datasets.py +++ b/libs/community/tests/integration_tests/utilities/test_tensorflow_datasets.py @@ -34,7 +34,7 @@ MAX_DOCS = 10 @pytest.fixture def tfds_client() -> TensorflowDatasets: - return TensorflowDatasets( + return TensorflowDatasets( # type: ignore[call-arg] dataset_name="mlqa/en", split_name="test", load_max_docs=MAX_DOCS, @@ -58,7 +58,7 @@ def test_load_success(tfds_client: TensorflowDatasets) -> None: def test_load_fail_wrong_dataset_name() -> None: """Test that fails to load""" with pytest.raises(ValidationError) as exc_info: - TensorflowDatasets( + TensorflowDatasets( # type: ignore[call-arg] dataset_name="wrong_dataset_name", split_name="test", load_max_docs=MAX_DOCS, @@ -70,7 +70,7 @@ def test_load_fail_wrong_dataset_name() -> None: def test_load_fail_wrong_split_name() -> None: """Test that fails to load""" with pytest.raises(ValidationError) as exc_info: - TensorflowDatasets( + TensorflowDatasets( # type: ignore[call-arg] dataset_name="mlqa/en", split_name="wrong_split_name", load_max_docs=MAX_DOCS, @@ -82,7 +82,7 @@ def test_load_fail_wrong_split_name() -> None: def test_load_fail_no_func() -> None: """Test that fails to load""" with pytest.raises(ValidationError) as exc_info: - TensorflowDatasets( + TensorflowDatasets( # type: ignore[call-arg] dataset_name="mlqa/en", split_name="test", load_max_docs=MAX_DOCS, diff --git a/libs/community/tests/integration_tests/utilities/test_twilio.py b/libs/community/tests/integration_tests/utilities/test_twilio.py index 5e46840270..a5b2bf535c 100644 --- a/libs/community/tests/integration_tests/utilities/test_twilio.py +++ b/libs/community/tests/integration_tests/utilities/test_twilio.py @@ -4,6 +4,6 @@ from langchain_community.utilities.twilio import TwilioAPIWrapper def test_call() -> None: """Test that call runs.""" - twilio = TwilioAPIWrapper() + twilio = TwilioAPIWrapper() # type: ignore[call-arg] output = twilio.run("Message", "+16162904619") assert output diff --git a/libs/community/tests/integration_tests/utilities/test_wikipedia_api.py b/libs/community/tests/integration_tests/utilities/test_wikipedia_api.py index f1d3cee538..9123d6bd6a 100644 --- a/libs/community/tests/integration_tests/utilities/test_wikipedia_api.py +++ b/libs/community/tests/integration_tests/utilities/test_wikipedia_api.py @@ -9,7 +9,7 @@ from langchain_community.utilities import WikipediaAPIWrapper @pytest.fixture def api_client() -> WikipediaAPIWrapper: - return WikipediaAPIWrapper() + return WikipediaAPIWrapper() # type: ignore[call-arg] def test_run_success(api_client: WikipediaAPIWrapper) -> None: @@ -53,7 +53,7 @@ def test_load_success_all_meta(api_client: WikipediaAPIWrapper) -> None: def test_load_more_docs_success(api_client: WikipediaAPIWrapper) -> None: top_k_results = 20 - api_client = WikipediaAPIWrapper(top_k_results=top_k_results) + api_client = WikipediaAPIWrapper(top_k_results=top_k_results) # type: ignore[call-arg] docs = api_client.load("HUNTER X HUNTER") assert len(docs) > 10 assert len(docs) <= top_k_results diff --git a/libs/community/tests/integration_tests/utilities/test_wolfram_alpha_api.py b/libs/community/tests/integration_tests/utilities/test_wolfram_alpha_api.py index f460e8c113..715e76963f 100644 --- a/libs/community/tests/integration_tests/utilities/test_wolfram_alpha_api.py +++ b/libs/community/tests/integration_tests/utilities/test_wolfram_alpha_api.py @@ -4,6 +4,6 @@ from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper def test_call() -> None: """Test that call gives the correct answer.""" - search = WolframAlphaAPIWrapper() + search = WolframAlphaAPIWrapper() # type: ignore[call-arg] output = search.run("what is 2x+18=x+5?") assert "x = -13" in output diff --git a/libs/community/tests/integration_tests/vectorstores/test_elastic_vector_search.py b/libs/community/tests/integration_tests/vectorstores/test_elastic_vector_search.py index ebaeb142be..3be8b824ff 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_elastic_vector_search.py +++ b/libs/community/tests/integration_tests/vectorstores/test_elastic_vector_search.py @@ -26,7 +26,7 @@ class TestElasticsearch: raise ValueError("OPENAI_API_KEY environment variable is not set") @pytest.fixture(scope="class", autouse=True) - def elasticsearch_url(self) -> Union[str, Generator[str, None, None]]: + def elasticsearch_url(self) -> Union[str, Generator[str, None, None]]: # type: ignore[return] """Return the elasticsearch url.""" from elasticsearch import Elasticsearch diff --git a/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py b/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py index f9b1f04d8d..2842938986 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py +++ b/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py @@ -49,7 +49,7 @@ class TestElasticsearch: raise ValueError("OPENAI_API_KEY environment variable is not set") @pytest.fixture(scope="class", autouse=True) - def elasticsearch_connection(self) -> Union[dict, Generator[dict, None, None]]: + def elasticsearch_connection(self) -> Union[dict, Generator[dict, None, None]]: # type: ignore[return] # Running this integration test with Elastic Cloud # Required for in-stack inference testing (ELSER + model_id) from elasticsearch import Elasticsearch diff --git a/libs/community/tests/integration_tests/vectorstores/test_vlite.py b/libs/community/tests/integration_tests/vectorstores/test_vlite.py index a0fc53f3c4..7acd2e02df 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_vlite.py +++ b/libs/community/tests/integration_tests/vectorstores/test_vlite.py @@ -9,7 +9,7 @@ from langchain_community.vectorstores import VLite def test_vlite() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] - docsearch = VLite.from_texts(texts=texts, embedding=FakeEmbeddings()) + docsearch = VLite.from_texts(texts=texts, embedding=FakeEmbeddings()) # type: ignore[call-arg] output = docsearch.similarity_search("foo", k=1) assert output == [Document(page_content="foo")] @@ -19,7 +19,9 @@ def test_vlite_with_metadatas() -> None: texts = ["foo", "bar", "baz"] metadatas = [{"page": str(i)} for i in range(len(texts))] docsearch = VLite.from_texts( - texts=texts, embedding=FakeEmbeddings(), metadatas=metadatas + texts=texts, + embedding=FakeEmbeddings(), # type: ignore[call-arg] + metadatas=metadatas, # type: ignore[call-arg] ) output = docsearch.similarity_search("foo", k=1) assert output == [Document(page_content="foo", metadata={"page": "0"})] @@ -30,7 +32,9 @@ def test_vlite_with_metadatas_with_scores() -> None: texts = ["foo", "bar", "baz"] metadatas = [{"page": str(i)} for i in range(len(texts))] docsearch = VLite.from_texts( - texts=texts, embedding=FakeEmbeddings(), metadatas=metadatas + texts=texts, + embedding=FakeEmbeddings(), # type: ignore[call-arg] + metadatas=metadatas, # type: ignore[call-arg] ) output = docsearch.similarity_search_with_score("foo", k=1) assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)] @@ -40,7 +44,9 @@ def test_vlite_update_document() -> None: """Test updating a document.""" texts = ["foo", "bar", "baz"] docsearch = VLite.from_texts( - texts=texts, embedding=FakeEmbeddings(), ids=["1", "2", "3"] + texts=texts, + embedding=FakeEmbeddings(), # type: ignore[call-arg] + ids=["1", "2", "3"], # type: ignore[call-arg] ) docsearch.update_document("1", Document(page_content="updated_foo")) output = docsearch.similarity_search("updated_foo", k=1) @@ -51,7 +57,9 @@ def test_vlite_delete_document() -> None: """Test deleting a document.""" texts = ["foo", "bar", "baz"] docsearch = VLite.from_texts( - texts=texts, embedding=FakeEmbeddings(), ids=["1", "2", "3"] + texts=texts, + embedding=FakeEmbeddings(), # type: ignore[call-arg] + ids=["1", "2", "3"], # type: ignore[call-arg] ) docsearch.delete(["1"]) output = docsearch.similarity_search("foo", k=3) @@ -64,7 +72,7 @@ def test_vlite_get_documents() -> None: metadatas = [{"page": str(i)} for i in range(len(texts))] docsearch = VLite.from_texts( texts=texts, - embedding=FakeEmbeddings(), + embedding=FakeEmbeddings(), # type: ignore[call-arg] metadatas=metadatas, ids=["1", "2", "3"], ) @@ -79,10 +87,13 @@ def test_vlite_from_existing_index() -> None: """Test loading from an existing index.""" texts = ["foo", "bar", "baz"] VLite.from_texts( - texts=texts, embedding=FakeEmbeddings(), collection="test_collection" + texts=texts, + embedding=FakeEmbeddings(), # type: ignore[call-arg] + collection="test_collection", # type: ignore[call-arg] ) new_docsearch = VLite.from_existing_index( - collection="test_collection", embedding=FakeEmbeddings() + collection="test_collection", + embedding=FakeEmbeddings(), # type: ignore[call-arg] ) output = new_docsearch.similarity_search("foo", k=1) assert output == [Document(page_content="foo")] diff --git a/libs/community/tests/integration_tests/vectorstores/test_weaviate.py b/libs/community/tests/integration_tests/vectorstores/test_weaviate.py index 38c879a0d2..9867728df0 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_weaviate.py +++ b/libs/community/tests/integration_tests/vectorstores/test_weaviate.py @@ -26,7 +26,7 @@ class TestWeaviate: raise ValueError("OPENAI_API_KEY environment variable is not set") @pytest.fixture(scope="class", autouse=True) - def weaviate_url(self) -> Union[str, Generator[str, None, None]]: + def weaviate_url(self) -> Union[str, Generator[str, None, None]]: # type: ignore[return] """Return the weaviate url.""" from weaviate import Client diff --git a/libs/community/tests/unit_tests/chat_models/konko.py b/libs/community/tests/unit_tests/chat_models/konko.py index 20b81d7527..88b4fe0357 100644 --- a/libs/community/tests/unit_tests/chat_models/konko.py +++ b/libs/community/tests/unit_tests/chat_models/konko.py @@ -149,15 +149,15 @@ def test_konko_streaming_param_validation_test() -> None: def test_konko_additional_args_test() -> None: """Evaluate extra arguments for ChatKonko.""" - chat_instance = ChatKonko(extra=3, max_tokens=10) + chat_instance = ChatKonko(extra=3, max_tokens=10) # type: ignore[call-arg] assert chat_instance.max_tokens == 10 assert chat_instance.model_kwargs == {"extra": 3} - chat_instance = ChatKonko(extra=3, model_kwargs={"addition": 2}) + chat_instance = ChatKonko(extra=3, model_kwargs={"addition": 2}) # type: ignore[call-arg] assert chat_instance.model_kwargs == {"extra": 3, "addition": 2} with pytest.raises(ValueError): - ChatKonko(extra=3, model_kwargs={"extra": 2}) + ChatKonko(extra=3, model_kwargs={"extra": 2}) # type: ignore[call-arg] with pytest.raises(ValueError): ChatKonko(model_kwargs={"temperature": 0.2}) diff --git a/libs/community/tests/unit_tests/chat_models/test_anthropic.py b/libs/community/tests/unit_tests/chat_models/test_anthropic.py index e883b6a9b7..7b03a7a2e8 100644 --- a/libs/community/tests/unit_tests/chat_models/test_anthropic.py +++ b/libs/community/tests/unit_tests/chat_models/test_anthropic.py @@ -21,7 +21,7 @@ def test_anthropic_model_name_param() -> None: @pytest.mark.requires("anthropic") def test_anthropic_model_param() -> None: - llm = ChatAnthropic(model="foo") + llm = ChatAnthropic(model="foo") # type: ignore[call-arg] assert llm.model == "foo" @@ -40,7 +40,7 @@ def test_anthropic_invalid_model_kwargs() -> None: @pytest.mark.requires("anthropic") def test_anthropic_incorrect_field() -> None: with pytest.warns(match="not default parameter"): - llm = ChatAnthropic(foo="bar") + llm = ChatAnthropic(foo="bar") # type: ignore[call-arg] assert llm.model_kwargs == {"foo": "bar"} @@ -49,7 +49,7 @@ def test_anthropic_initialization() -> None: """Test anthropic initialization.""" # Verify that chat anthropic can be initialized using a secret key provided # as a parameter rather than an environment variable. - ChatAnthropic(model="test", anthropic_api_key="test") + ChatAnthropic(model="test", anthropic_api_key="test") # type: ignore[arg-type, call-arg] @pytest.mark.parametrize( diff --git a/libs/community/tests/unit_tests/chat_models/test_azureml_endpoint.py b/libs/community/tests/unit_tests/chat_models/test_azureml_endpoint.py index d5dbb716d2..bf01dcae2a 100644 --- a/libs/community/tests/unit_tests/chat_models/test_azureml_endpoint.py +++ b/libs/community/tests/unit_tests/chat_models/test_azureml_endpoint.py @@ -27,7 +27,7 @@ def api_passed_via_constructor_fixture() -> AzureMLChatOnlineEndpoint: with API key passed from constructor""" azure_chat = AzureMLChatOnlineEndpoint( endpoint_url="https://..inference.ml.azure.com/score", - endpoint_api_key="my-api-key", + endpoint_api_key="my-api-key", # type: ignore[arg-type] ) return azure_chat diff --git a/libs/community/tests/unit_tests/chat_models/test_baichuan.py b/libs/community/tests/unit_tests/chat_models/test_baichuan.py index 6ca14998af..e79efac726 100644 --- a/libs/community/tests/unit_tests/chat_models/test_baichuan.py +++ b/libs/community/tests/unit_tests/chat_models/test_baichuan.py @@ -25,8 +25,8 @@ def test_initialization() -> None: """Test chat model initialization.""" for model in [ - ChatBaichuan(model="Baichuan2-Turbo-192K", api_key="test-api-key", timeout=40), - ChatBaichuan( + ChatBaichuan(model="Baichuan2-Turbo-192K", api_key="test-api-key", timeout=40), # type: ignore[arg-type, call-arg] + ChatBaichuan( # type: ignore[call-arg] model="Baichuan2-Turbo-192K", baichuan_api_key="test-api-key", request_timeout=40, @@ -117,7 +117,7 @@ def test_baichuan_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: """Test initialization with an API key provided via the initializer""" - chat = ChatBaichuan(baichuan_api_key="test-api-key") + chat = ChatBaichuan(baichuan_api_key="test-api-key") # type: ignore[call-arg] print(chat.baichuan_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" @@ -125,9 +125,9 @@ def test_baichuan_key_masked_when_passed_via_constructor( def test_uses_actual_secret_value_from_secret_str() -> None: """Test that actual secret is retrieved using `.get_secret_value()`.""" - chat = ChatBaichuan( + chat = ChatBaichuan( # type: ignore[call-arg] baichuan_api_key="test-api-key", - baichuan_secret_key="test-secret-key", # For backward compatibility + baichuan_secret_key="test-secret-key", # type: ignore[arg-type] # For backward compatibility ) assert cast(SecretStr, chat.baichuan_api_key).get_secret_value() == "test-api-key" assert ( diff --git a/libs/community/tests/unit_tests/chat_models/test_fireworks.py b/libs/community/tests/unit_tests/chat_models/test_fireworks.py index 211721bc08..5b9c69b4d0 100644 --- a/libs/community/tests/unit_tests/chat_models/test_fireworks.py +++ b/libs/community/tests/unit_tests/chat_models/test_fireworks.py @@ -14,7 +14,7 @@ if sys.version_info < (3, 9): @pytest.mark.requires("fireworks") def test_api_key_is_string() -> None: - llm = ChatFireworks(fireworks_api_key="secret-api-key") + llm = ChatFireworks(fireworks_api_key="secret-api-key") # type: ignore[arg-type] assert isinstance(llm.fireworks_api_key, SecretStr) @@ -22,7 +22,7 @@ def test_api_key_is_string() -> None: def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: - llm = ChatFireworks(fireworks_api_key="secret-api-key") + llm = ChatFireworks(fireworks_api_key="secret-api-key") # type: ignore[arg-type] print(llm.fireworks_api_key, end="") # noqa: T201 captured = capsys.readouterr() diff --git a/libs/community/tests/unit_tests/chat_models/test_friendli.py b/libs/community/tests/unit_tests/chat_models/test_friendli.py index b337e185d3..e101533fb8 100644 --- a/libs/community/tests/unit_tests/chat_models/test_friendli.py +++ b/libs/community/tests/unit_tests/chat_models/test_friendli.py @@ -38,7 +38,7 @@ def chat_friendli( def test_friendli_token_is_secret_string(capsys: CaptureFixture) -> None: """Test if friendli token is stored as a SecretStr.""" fake_token_value = "personal-access-token" - chat = ChatFriendli(friendli_token=fake_token_value) + chat = ChatFriendli(friendli_token=fake_token_value) # type: ignore[arg-type] assert isinstance(chat.friendli_token, SecretStr) assert chat.friendli_token.get_secret_value() == fake_token_value print(chat.friendli_token, end="") # noqa: T201 diff --git a/libs/community/tests/unit_tests/chat_models/test_google_palm.py b/libs/community/tests/unit_tests/chat_models/test_google_palm.py index 53e201c5b3..dab998d051 100644 --- a/libs/community/tests/unit_tests/chat_models/test_google_palm.py +++ b/libs/community/tests/unit_tests/chat_models/test_google_palm.py @@ -92,19 +92,19 @@ def test_messages_to_prompt_dict_raises_with_example_after_real() -> None: def test_chat_google_raises_with_invalid_temperature() -> None: pytest.importorskip("google.generativeai") with pytest.raises(ValueError) as e: - ChatGooglePalm(google_api_key="fake", temperature=2.0) + ChatGooglePalm(google_api_key="fake", temperature=2.0) # type: ignore[arg-type, call-arg] assert "must be in the range" in str(e) def test_chat_google_raises_with_invalid_top_p() -> None: pytest.importorskip("google.generativeai") with pytest.raises(ValueError) as e: - ChatGooglePalm(google_api_key="fake", top_p=2.0) + ChatGooglePalm(google_api_key="fake", top_p=2.0) # type: ignore[arg-type, call-arg] assert "must be in the range" in str(e) def test_chat_google_raises_with_invalid_top_k() -> None: pytest.importorskip("google.generativeai") with pytest.raises(ValueError) as e: - ChatGooglePalm(google_api_key="fake", top_k=-5) + ChatGooglePalm(google_api_key="fake", top_k=-5) # type: ignore[arg-type, call-arg] assert "must be positive" in str(e) diff --git a/libs/community/tests/unit_tests/chat_models/test_kinetica.py b/libs/community/tests/unit_tests/chat_models/test_kinetica.py index 8dd6239739..efc15dc6c1 100644 --- a/libs/community/tests/unit_tests/chat_models/test_kinetica.py +++ b/libs/community/tests/unit_tests/chat_models/test_kinetica.py @@ -55,7 +55,7 @@ class TestChatKinetica: monkeypatch.setattr(ChatKinetica, "_execute_sql", patch_execute_sql) - kinetica_llm = ChatKinetica() + kinetica_llm = ChatKinetica() # type: ignore[call-arg] test_messages = kinetica_llm.load_messages_from_context("test") LOG.info(f"test_messages: {test_messages}") diff --git a/libs/community/tests/unit_tests/chat_models/test_openai.py b/libs/community/tests/unit_tests/chat_models/test_openai.py index 6a59bce98a..8315821ed3 100644 --- a/libs/community/tests/unit_tests/chat_models/test_openai.py +++ b/libs/community/tests/unit_tests/chat_models/test_openai.py @@ -17,9 +17,9 @@ from langchain_community.chat_models.openai import ChatOpenAI @pytest.mark.requires("openai") def test_openai_model_param() -> None: - llm = ChatOpenAI(model="foo", openai_api_key="foo") + llm = ChatOpenAI(model="foo", openai_api_key="foo") # type: ignore[call-arg] assert llm.model_name == "foo" - llm = ChatOpenAI(model_name="foo", openai_api_key="foo") + llm = ChatOpenAI(model_name="foo", openai_api_key="foo") # type: ignore[call-arg] assert llm.model_name == "foo" @@ -81,7 +81,7 @@ def mock_completion() -> dict: @pytest.mark.requires("openai") def test_openai_predict(mock_completion: dict) -> None: - llm = ChatOpenAI(openai_api_key="foo") + llm = ChatOpenAI(openai_api_key="foo") # type: ignore[call-arg] mock_client = MagicMock() completed = False @@ -103,7 +103,7 @@ def test_openai_predict(mock_completion: dict) -> None: @pytest.mark.requires("openai") async def test_openai_apredict(mock_completion: dict) -> None: - llm = ChatOpenAI(openai_api_key="foo") + llm = ChatOpenAI(openai_api_key="foo") # type: ignore[call-arg] mock_client = MagicMock() completed = False diff --git a/libs/community/tests/unit_tests/chat_models/test_perplexity.py b/libs/community/tests/unit_tests/chat_models/test_perplexity.py index 1ae6cd8b5f..024274f7e8 100644 --- a/libs/community/tests/unit_tests/chat_models/test_perplexity.py +++ b/libs/community/tests/unit_tests/chat_models/test_perplexity.py @@ -11,13 +11,13 @@ os.environ["PPLX_API_KEY"] = "foo" @pytest.mark.requires("openai") def test_perplexity_model_name_param() -> None: - llm = ChatPerplexity(model="foo") + llm = ChatPerplexity(model="foo") # type: ignore[call-arg] assert llm.model == "foo" @pytest.mark.requires("openai") def test_perplexity_model_kwargs() -> None: - llm = ChatPerplexity(model="test", model_kwargs={"foo": "bar"}) + llm = ChatPerplexity(model="test", model_kwargs={"foo": "bar"}) # type: ignore[call-arg] assert llm.model_kwargs == {"foo": "bar"} @@ -27,10 +27,10 @@ def test_perplexity_initialization() -> None: # Verify that chat perplexity can be initialized using a secret key provided # as a parameter rather than an environment variable. for model in [ - ChatPerplexity( + ChatPerplexity( # type: ignore[call-arg] model="test", timeout=1, api_key="test", temperature=0.7, verbose=True ), - ChatPerplexity( + ChatPerplexity( # type: ignore[call-arg] model="test", request_timeout=1, pplx_api_key="test", diff --git a/libs/community/tests/unit_tests/chat_models/test_premai.py b/libs/community/tests/unit_tests/chat_models/test_premai.py index ac5299f76f..8318fc890a 100644 --- a/libs/community/tests/unit_tests/chat_models/test_premai.py +++ b/libs/community/tests/unit_tests/chat_models/test_premai.py @@ -13,7 +13,7 @@ from langchain_community.chat_models.premai import _messages_to_prompt_dict @pytest.mark.requires("premai") def test_api_key_is_string() -> None: - llm = ChatPremAI(premai_api_key="secret-api-key", project_id=8) + llm = ChatPremAI(premai_api_key="secret-api-key", project_id=8) # type: ignore[call-arg] assert isinstance(llm.premai_api_key, SecretStr) @@ -21,7 +21,7 @@ def test_api_key_is_string() -> None: def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: - llm = ChatPremAI(premai_api_key="secret-api-key", project_id=8) + llm = ChatPremAI(premai_api_key="secret-api-key", project_id=8) # type: ignore[call-arg] print(llm.premai_api_key, end="") # noqa: T201 captured = capsys.readouterr() @@ -52,8 +52,8 @@ def test_messages_to_prompt_dict_with_valid_messages() -> None: @pytest.mark.requires("premai") def test_premai_initialization() -> None: for model in [ - ChatPremAI(model="prem-ai-model", premai_api_key="xyz", project_id=8), - ChatPremAI(model_name="prem-ai-model", api_key="xyz", project_id=8), + ChatPremAI(model="prem-ai-model", premai_api_key="xyz", project_id=8), # type: ignore[call-arg] + ChatPremAI(model_name="prem-ai-model", api_key="xyz", project_id=8), # type: ignore[arg-type, call-arg] ]: assert model.model == "prem-ai-model" assert cast(SecretStr, model.premai_api_key).get_secret_value() == "xyz" diff --git a/libs/community/tests/unit_tests/chat_models/test_tongyi.py b/libs/community/tests/unit_tests/chat_models/test_tongyi.py index 62421b3e61..8bb20abc6b 100644 --- a/libs/community/tests/unit_tests/chat_models/test_tongyi.py +++ b/libs/community/tests/unit_tests/chat_models/test_tongyi.py @@ -58,7 +58,7 @@ def test__convert_dict_to_message_function_call() -> None: expected_output = AIMessage( content="foo", additional_kwargs={"tool_calls": raw_function_calls}, - tool_calls=tool_calls, + tool_calls=tool_calls, # type: ignore[arg-type] invalid_tool_calls=[], ) assert result == expected_output diff --git a/libs/community/tests/unit_tests/chat_models/test_yuan2.py b/libs/community/tests/unit_tests/chat_models/test_yuan2.py index c5253375d3..74b2fb84cf 100644 --- a/libs/community/tests/unit_tests/chat_models/test_yuan2.py +++ b/libs/community/tests/unit_tests/chat_models/test_yuan2.py @@ -16,9 +16,9 @@ from langchain_community.chat_models.yuan2 import ( @pytest.mark.requires("openai") def test_yuan2_model_param() -> None: - chat = ChatYuan2(model="foo") + chat = ChatYuan2(model="foo") # type: ignore[call-arg] assert chat.model_name == "foo" - chat = ChatYuan2(model_name="foo") + chat = ChatYuan2(model_name="foo") # type: ignore[call-arg] assert chat.model_name == "foo" diff --git a/libs/community/tests/unit_tests/chat_models/test_zhipuai.py b/libs/community/tests/unit_tests/chat_models/test_zhipuai.py index 31d6412885..5295b6f340 100644 --- a/libs/community/tests/unit_tests/chat_models/test_zhipuai.py +++ b/libs/community/tests/unit_tests/chat_models/test_zhipuai.py @@ -9,5 +9,5 @@ from langchain_community.chat_models.zhipuai import ChatZhipuAI def test_zhipuai_model_param() -> None: llm = ChatZhipuAI(api_key="test", model="foo") assert llm.model_name == "foo" - llm = ChatZhipuAI(api_key="test", model_name="foo") + llm = ChatZhipuAI(api_key="test", model_name="foo") # type: ignore[call-arg] assert llm.model_name == "foo" diff --git a/libs/community/tests/unit_tests/document_loaders/blob_loaders/test_schema.py b/libs/community/tests/unit_tests/document_loaders/blob_loaders/test_schema.py index 15ed29af43..8829b227cf 100644 --- a/libs/community/tests/unit_tests/document_loaders/blob_loaders/test_schema.py +++ b/libs/community/tests/unit_tests/document_loaders/blob_loaders/test_schema.py @@ -120,10 +120,10 @@ def test_mime_type_inference( def test_blob_initialization_validator() -> None: """Test that blob initialization validates the arguments.""" with pytest.raises(ValueError, match="Either data or path must be provided"): - Blob() + Blob() # type: ignore[call-arg] assert Blob(data=b"Hello, World!") is not None - assert Blob(path="some_path") is not None + assert Blob(path="some_path") is not None # type: ignore[call-arg] def test_blob_loader() -> None: diff --git a/libs/community/tests/unit_tests/document_loaders/loaders/vendors/test_docugami.py b/libs/community/tests/unit_tests/document_loaders/loaders/vendors/test_docugami.py index 17d0ee238a..d6c4118228 100644 --- a/libs/community/tests/unit_tests/document_loaders/loaders/vendors/test_docugami.py +++ b/libs/community/tests/unit_tests/document_loaders/loaders/vendors/test_docugami.py @@ -11,7 +11,7 @@ DOCUGAMI_XML_PATH = Path(__file__).parent / "test_data" / "docugami-example.xml" @pytest.mark.requires("dgml_utils") def test_docugami_loader_local() -> None: """Test DocugamiLoader.""" - loader = DocugamiLoader(file_paths=[DOCUGAMI_XML_PATH]) + loader = DocugamiLoader(file_paths=[DOCUGAMI_XML_PATH]) # type: ignore[call-arg] docs = loader.load() assert len(docs) == 25 @@ -24,4 +24,4 @@ def test_docugami_loader_local() -> None: def test_docugami_initialization() -> None: """Test correct initialization in remote mode.""" - DocugamiLoader(access_token="test", docset_id="123") + DocugamiLoader(access_token="test", docset_id="123") # type: ignore[call-arg] diff --git a/libs/community/tests/unit_tests/document_loaders/test_github.py b/libs/community/tests/unit_tests/document_loaders/test_github.py index bc00d37544..544681f0ae 100644 --- a/libs/community/tests/unit_tests/document_loaders/test_github.py +++ b/libs/community/tests/unit_tests/document_loaders/test_github.py @@ -38,19 +38,19 @@ def test_initialization_ghe() -> None: def test_invalid_initialization() -> None: # Invalid parameter with pytest.raises(ValueError): - GitHubIssuesLoader(invalid="parameter") + GitHubIssuesLoader(invalid="parameter") # type: ignore[call-arg] # Invalid value for valid parameter with pytest.raises(ValueError): - GitHubIssuesLoader(state="invalid_state") + GitHubIssuesLoader(state="invalid_state") # type: ignore[arg-type, call-arg] # Invalid type for labels with pytest.raises(ValueError): - GitHubIssuesLoader(labels="not_a_list") + GitHubIssuesLoader(labels="not_a_list") # type: ignore[arg-type, call-arg] # Invalid date format for since with pytest.raises(ValueError): - GitHubIssuesLoader(since="not_a_date") + GitHubIssuesLoader(since="not_a_date") # type: ignore[call-arg] def test_load_github_issue(mocker: MockerFixture) -> None: @@ -171,7 +171,7 @@ def test_github_file_content_get_file_paths(mocker: MockerFixture) -> None: assert files[0]["path"] == "readme.md" # case2: didn't add file_filter - loader = GithubFileLoader( + loader = GithubFileLoader( # type: ignore[call-arg] repo="shufanhao/langchain", access_token="access_token", github_api_url="https://github.com", @@ -220,7 +220,7 @@ def test_github_file_content_loader(mocker: MockerFixture) -> None: mocker.patch("requests.get", side_effect=[file_path_res, file_content_res]) # case1: file_extension=".md" - loader = GithubFileLoader( + loader = GithubFileLoader( # type: ignore[call-arg] repo="shufanhao/langchain", access_token="access_token", github_api_url="https://github.com", diff --git a/libs/community/tests/unit_tests/embeddings/test_edenai.py b/libs/community/tests/unit_tests/embeddings/test_edenai.py index ba2e05c72e..d80c2279f8 100644 --- a/libs/community/tests/unit_tests/embeddings/test_edenai.py +++ b/libs/community/tests/unit_tests/embeddings/test_edenai.py @@ -7,14 +7,14 @@ from langchain_community.embeddings import EdenAiEmbeddings def test_api_key_is_string() -> None: - llm = EdenAiEmbeddings(edenai_api_key="secret-api-key") + llm = EdenAiEmbeddings(edenai_api_key="secret-api-key") # type: ignore[arg-type] assert isinstance(llm.edenai_api_key, SecretStr) def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: - llm = EdenAiEmbeddings(edenai_api_key="secret-api-key") + llm = EdenAiEmbeddings(edenai_api_key="secret-api-key") # type: ignore[arg-type] print(llm.edenai_api_key, end="") # noqa: T201 captured = capsys.readouterr() diff --git a/libs/community/tests/unit_tests/embeddings/test_embaas.py b/libs/community/tests/unit_tests/embeddings/test_embaas.py index b62092a2aa..d631be8036 100644 --- a/libs/community/tests/unit_tests/embeddings/test_embaas.py +++ b/libs/community/tests/unit_tests/embeddings/test_embaas.py @@ -7,14 +7,14 @@ from langchain_community.embeddings import EmbaasEmbeddings def test_api_key_is_string() -> None: - llm = EmbaasEmbeddings(embaas_api_key="secret-api-key") + llm = EmbaasEmbeddings(embaas_api_key="secret-api-key") # type: ignore[arg-type] assert isinstance(llm.embaas_api_key, SecretStr) def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: - llm = EmbaasEmbeddings(embaas_api_key="secret-api-key") + llm = EmbaasEmbeddings(embaas_api_key="secret-api-key") # type: ignore[arg-type] print(llm.embaas_api_key, end="") # noqa: T201 captured = capsys.readouterr() diff --git a/libs/community/tests/unit_tests/embeddings/test_huggingface.py b/libs/community/tests/unit_tests/embeddings/test_huggingface.py index 22f91acb8a..f22a104e82 100644 --- a/libs/community/tests/unit_tests/embeddings/test_huggingface.py +++ b/libs/community/tests/unit_tests/embeddings/test_huggingface.py @@ -3,5 +3,5 @@ from langchain_community.embeddings.huggingface import HuggingFaceInferenceAPIEm def test_hugginggface_inferenceapi_embedding_documents_init() -> None: """Test huggingface embeddings.""" - embedding = HuggingFaceInferenceAPIEmbeddings(api_key="abcd123") + embedding = HuggingFaceInferenceAPIEmbeddings(api_key="abcd123") # type: ignore[arg-type] assert "abcd123" not in repr(embedding) diff --git a/libs/community/tests/unit_tests/embeddings/test_llm_rails.py b/libs/community/tests/unit_tests/embeddings/test_llm_rails.py index 0715efcb4c..ccc6ff0d77 100644 --- a/libs/community/tests/unit_tests/embeddings/test_llm_rails.py +++ b/libs/community/tests/unit_tests/embeddings/test_llm_rails.py @@ -7,14 +7,14 @@ from langchain_community.embeddings import LLMRailsEmbeddings def test_api_key_is_string() -> None: - llm = LLMRailsEmbeddings(api_key="secret-api-key") + llm = LLMRailsEmbeddings(api_key="secret-api-key") # type: ignore[arg-type] assert isinstance(llm.api_key, SecretStr) def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: - llm = LLMRailsEmbeddings(api_key="secret-api-key") + llm = LLMRailsEmbeddings(api_key="secret-api-key") # type: ignore[arg-type] print(llm.api_key, end="") # noqa: T201 captured = capsys.readouterr() diff --git a/libs/community/tests/unit_tests/embeddings/test_oci_gen_ai_embedding.py b/libs/community/tests/unit_tests/embeddings/test_oci_gen_ai_embedding.py index d43f3108b9..1262705c6f 100644 --- a/libs/community/tests/unit_tests/embeddings/test_oci_gen_ai_embedding.py +++ b/libs/community/tests/unit_tests/embeddings/test_oci_gen_ai_embedding.py @@ -19,7 +19,7 @@ class MockResponseDict(dict): def test_embedding_call(monkeypatch: MonkeyPatch, test_model_id: str) -> None: """Test valid call to OCI Generative AI embedding service.""" oci_gen_ai_client = MagicMock() - embeddings = OCIGenAIEmbeddings( + embeddings = OCIGenAIEmbeddings( # type: ignore[call-arg] model_id=test_model_id, service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com", client=oci_gen_ai_client, diff --git a/libs/community/tests/unit_tests/embeddings/test_openai.py b/libs/community/tests/unit_tests/embeddings/test_openai.py index c9499722ff..e62bee5f94 100644 --- a/libs/community/tests/unit_tests/embeddings/test_openai.py +++ b/libs/community/tests/unit_tests/embeddings/test_openai.py @@ -12,5 +12,5 @@ def test_openai_invalid_model_kwargs() -> None: @pytest.mark.requires("openai") def test_openai_incorrect_field() -> None: with pytest.warns(match="not default parameter"): - llm = OpenAIEmbeddings(foo="bar", openai_api_key="foo") + llm = OpenAIEmbeddings(foo="bar", openai_api_key="foo") # type: ignore[call-arg] assert llm.model_kwargs == {"foo": "bar"} diff --git a/libs/community/tests/unit_tests/embeddings/test_premai.py b/libs/community/tests/unit_tests/embeddings/test_premai.py index 3b06b19026..3d1b2b7744 100644 --- a/libs/community/tests/unit_tests/embeddings/test_premai.py +++ b/libs/community/tests/unit_tests/embeddings/test_premai.py @@ -9,8 +9,10 @@ from langchain_community.embeddings import PremAIEmbeddings @pytest.mark.requires("premai") def test_api_key_is_string() -> None: - llm = PremAIEmbeddings( - premai_api_key="secret-api-key", project_id=8, model="fake-model" + llm = PremAIEmbeddings( # type: ignore[call-arg] + premai_api_key="secret-api-key", # type: ignore[arg-type] + project_id=8, + model="fake-model", # type: ignore[arg-type] ) assert isinstance(llm.premai_api_key, SecretStr) @@ -19,8 +21,10 @@ def test_api_key_is_string() -> None: def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: - llm = PremAIEmbeddings( - premai_api_key="secret-api-key", project_id=8, model="fake-model" + llm = PremAIEmbeddings( # type: ignore[call-arg] + premai_api_key="secret-api-key", # type: ignore[arg-type] + project_id=8, + model="fake-model", # type: ignore[arg-type] ) print(llm.premai_api_key, end="") # noqa: T201 captured = capsys.readouterr() diff --git a/libs/community/tests/unit_tests/embeddings/test_yandex.py b/libs/community/tests/unit_tests/embeddings/test_yandex.py index 2593927799..c681af05a2 100644 --- a/libs/community/tests/unit_tests/embeddings/test_yandex.py +++ b/libs/community/tests/unit_tests/embeddings/test_yandex.py @@ -6,12 +6,12 @@ from langchain_community.embeddings import YandexGPTEmbeddings def test_init() -> None: os.environ["YC_API_KEY"] = "foo" models = [ - YandexGPTEmbeddings(folder_id="bar"), - YandexGPTEmbeddings( + YandexGPTEmbeddings(folder_id="bar"), # type: ignore[call-arg] + YandexGPTEmbeddings( # type: ignore[call-arg] query_model_uri="emb://bar/text-search-query/latest", doc_model_uri="emb://bar/text-search-doc/latest", ), - YandexGPTEmbeddings( + YandexGPTEmbeddings( # type: ignore[call-arg] folder_id="bar", query_model_name="text-search-query", doc_model_name="text-search-doc", diff --git a/libs/community/tests/unit_tests/llms/test_ai21.py b/libs/community/tests/unit_tests/llms/test_ai21.py index 4e111fe1ea..788364da0c 100644 --- a/libs/community/tests/unit_tests/llms/test_ai21.py +++ b/libs/community/tests/unit_tests/llms/test_ai21.py @@ -9,7 +9,7 @@ from langchain_community.llms.ai21 import AI21 def test_api_key_is_secret_string() -> None: - llm = AI21(ai21_api_key="secret-api-key") + llm = AI21(ai21_api_key="secret-api-key") # type: ignore[arg-type] assert isinstance(llm.ai21_api_key, SecretStr) @@ -29,7 +29,7 @@ def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: """Test initialization with an API key provided via the initializer""" - llm = AI21(ai21_api_key="secret-api-key") + llm = AI21(ai21_api_key="secret-api-key") # type: ignore[arg-type] print(llm.ai21_api_key, end="") # noqa: T201 captured = capsys.readouterr() @@ -38,5 +38,5 @@ def test_api_key_masked_when_passed_via_constructor( def test_uses_actual_secret_value_from_secretstr() -> None: """Test that actual secret is retrieved using `.get_secret_value()`.""" - llm = AI21(ai21_api_key="secret-api-key") + llm = AI21(ai21_api_key="secret-api-key") # type: ignore[arg-type] assert cast(SecretStr, llm.ai21_api_key).get_secret_value() == "secret-api-key" diff --git a/libs/community/tests/unit_tests/llms/test_aleph_alpha.py b/libs/community/tests/unit_tests/llms/test_aleph_alpha.py index da3ac1f992..ae09ecd116 100644 --- a/libs/community/tests/unit_tests/llms/test_aleph_alpha.py +++ b/libs/community/tests/unit_tests/llms/test_aleph_alpha.py @@ -9,7 +9,7 @@ from langchain_community.llms.aleph_alpha import AlephAlpha @pytest.mark.requires("aleph_alpha_client") def test_api_key_is_secret_string() -> None: - llm = AlephAlpha(aleph_alpha_api_key="secret-api-key") + llm = AlephAlpha(aleph_alpha_api_key="secret-api-key") # type: ignore[call-arg] assert isinstance(llm.aleph_alpha_api_key, SecretStr) @@ -17,7 +17,7 @@ def test_api_key_is_secret_string() -> None: def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: - llm = AlephAlpha(aleph_alpha_api_key="secret-api-key") + llm = AlephAlpha(aleph_alpha_api_key="secret-api-key") # type: ignore[call-arg] print(llm.aleph_alpha_api_key, end="") # noqa: T201 captured = capsys.readouterr() @@ -29,7 +29,7 @@ def test_api_key_masked_when_passed_from_env( monkeypatch: MonkeyPatch, capsys: CaptureFixture ) -> None: monkeypatch.setenv("ALEPH_ALPHA_API_KEY", "secret-api-key") - llm = AlephAlpha() + llm = AlephAlpha() # type: ignore[call-arg] print(llm.aleph_alpha_api_key, end="") # noqa: T201 captured = capsys.readouterr() diff --git a/libs/community/tests/unit_tests/llms/test_anyscale.py b/libs/community/tests/unit_tests/llms/test_anyscale.py index 155c4e9c10..c5896f540c 100644 --- a/libs/community/tests/unit_tests/llms/test_anyscale.py +++ b/libs/community/tests/unit_tests/llms/test_anyscale.py @@ -9,7 +9,7 @@ from langchain_community.llms.anyscale import Anyscale @pytest.mark.requires("openai") def test_api_key_is_secret_string() -> None: - llm = Anyscale(anyscale_api_key="secret-api-key", anyscale_api_base="test") + llm = Anyscale(anyscale_api_key="secret-api-key", anyscale_api_base="test") # type: ignore[arg-type] assert isinstance(llm.anyscale_api_key, SecretStr) @@ -31,7 +31,7 @@ def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: """Test initialization with an API key provided via the initializer""" - llm = Anyscale(anyscale_api_key="secret-api-key", anyscale_api_base="test") + llm = Anyscale(anyscale_api_key="secret-api-key", anyscale_api_base="test") # type: ignore[arg-type] print(llm.anyscale_api_key, end="") # noqa: T201 captured = capsys.readouterr() diff --git a/libs/community/tests/unit_tests/llms/test_bananadev.py b/libs/community/tests/unit_tests/llms/test_bananadev.py index ea219cc815..163606a8a2 100644 --- a/libs/community/tests/unit_tests/llms/test_bananadev.py +++ b/libs/community/tests/unit_tests/llms/test_bananadev.py @@ -8,7 +8,7 @@ from langchain_community.llms.bananadev import Banana def test_api_key_is_secret_string() -> None: - llm = Banana(banana_api_key="secret-api-key") + llm = Banana(banana_api_key="secret-api-key") # type: ignore[arg-type] assert isinstance(llm.banana_api_key, SecretStr) @@ -28,7 +28,7 @@ def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: """Test initialization with an API key provided via the initializer""" - llm = Banana(banana_api_key="secret-api-key") + llm = Banana(banana_api_key="secret-api-key") # type: ignore[arg-type] print(llm.banana_api_key, end="") # noqa: T201 captured = capsys.readouterr() @@ -37,5 +37,5 @@ def test_api_key_masked_when_passed_via_constructor( def test_uses_actual_secret_value_from_secretstr() -> None: """Test that actual secret is retrieved using `.get_secret_value()`.""" - llm = Banana(banana_api_key="secret-api-key") + llm = Banana(banana_api_key="secret-api-key") # type: ignore[arg-type] assert cast(SecretStr, llm.banana_api_key).get_secret_value() == "secret-api-key" diff --git a/libs/community/tests/unit_tests/llms/test_cerebriumai.py b/libs/community/tests/unit_tests/llms/test_cerebriumai.py index 7462b612e5..87e0439870 100644 --- a/libs/community/tests/unit_tests/llms/test_cerebriumai.py +++ b/libs/community/tests/unit_tests/llms/test_cerebriumai.py @@ -7,12 +7,12 @@ from langchain_community.llms.cerebriumai import CerebriumAI def test_api_key_is_secret_string() -> None: - llm = CerebriumAI(cerebriumai_api_key="test-cerebriumai-api-key") + llm = CerebriumAI(cerebriumai_api_key="test-cerebriumai-api-key") # type: ignore[arg-type] assert isinstance(llm.cerebriumai_api_key, SecretStr) def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture) -> None: - llm = CerebriumAI(cerebriumai_api_key="secret-api-key") + llm = CerebriumAI(cerebriumai_api_key="secret-api-key") # type: ignore[arg-type] print(llm.cerebriumai_api_key, end="") # noqa: T201 captured = capsys.readouterr() diff --git a/libs/community/tests/unit_tests/llms/test_forefrontai.py b/libs/community/tests/unit_tests/llms/test_forefrontai.py index e9cc97c0c5..711cb2186b 100644 --- a/libs/community/tests/unit_tests/llms/test_forefrontai.py +++ b/libs/community/tests/unit_tests/llms/test_forefrontai.py @@ -10,7 +10,7 @@ from langchain_community.llms.forefrontai import ForefrontAI def test_forefrontai_api_key_is_secret_string() -> None: """Test that the API key is stored as a SecretStr.""" - llm = ForefrontAI(forefrontai_api_key="secret-api-key", temperature=0.2) + llm = ForefrontAI(forefrontai_api_key="secret-api-key", temperature=0.2) # type: ignore[arg-type] assert isinstance(llm.forefrontai_api_key, SecretStr) @@ -19,7 +19,7 @@ def test_forefrontai_api_key_masked_when_passed_from_env( ) -> None: """Test that the API key is masked when passed from an environment variable.""" monkeypatch.setenv("FOREFRONTAI_API_KEY", "secret-api-key") - llm = ForefrontAI(temperature=0.2) + llm = ForefrontAI(temperature=0.2) # type: ignore[call-arg] print(llm.forefrontai_api_key, end="") # noqa: T201 captured = capsys.readouterr() @@ -31,7 +31,7 @@ def test_forefrontai_api_key_masked_when_passed_via_constructor( ) -> None: """Test that the API key is masked when passed via the constructor.""" llm = ForefrontAI( - forefrontai_api_key="secret-api-key", + forefrontai_api_key="secret-api-key", # type: ignore[arg-type] temperature=0.2, ) print(llm.forefrontai_api_key, end="") # noqa: T201 @@ -43,7 +43,7 @@ def test_forefrontai_api_key_masked_when_passed_via_constructor( def test_forefrontai_uses_actual_secret_value_from_secretstr() -> None: """Test that the actual secret value is correctly retrieved.""" llm = ForefrontAI( - forefrontai_api_key="secret-api-key", + forefrontai_api_key="secret-api-key", # type: ignore[arg-type] temperature=0.2, ) assert ( diff --git a/libs/community/tests/unit_tests/llms/test_friendli.py b/libs/community/tests/unit_tests/llms/test_friendli.py index dae9eadf89..d55d6d14fa 100644 --- a/libs/community/tests/unit_tests/llms/test_friendli.py +++ b/libs/community/tests/unit_tests/llms/test_friendli.py @@ -38,7 +38,7 @@ def friendli_llm( def test_friendli_token_is_secret_string(capsys: CaptureFixture) -> None: """Test if friendli token is stored as a SecretStr.""" fake_token_value = "personal-access-token" - chat = Friendli(friendli_token=fake_token_value) + chat = Friendli(friendli_token=fake_token_value) # type: ignore[arg-type] assert isinstance(chat.friendli_token, SecretStr) assert chat.friendli_token.get_secret_value() == fake_token_value print(chat.friendli_token, end="") # noqa: T201 diff --git a/libs/community/tests/unit_tests/llms/test_gooseai.py b/libs/community/tests/unit_tests/llms/test_gooseai.py index e87dd6f40d..be467b6175 100644 --- a/libs/community/tests/unit_tests/llms/test_gooseai.py +++ b/libs/community/tests/unit_tests/llms/test_gooseai.py @@ -17,7 +17,7 @@ def _openai_v1_installed() -> bool: @pytest.mark.requires("openai") def test_api_key_is_secret_string() -> None: - llm = GooseAI(gooseai_api_key="secret-api-key") + llm = GooseAI(gooseai_api_key="secret-api-key") # type: ignore[arg-type, call-arg] assert isinstance(llm.gooseai_api_key, SecretStr) assert llm.gooseai_api_key.get_secret_value() == "secret-api-key" @@ -27,7 +27,7 @@ def test_api_key_is_secret_string() -> None: ) @pytest.mark.requires("openai") def test_api_key_masked_when_passed_via_constructor() -> None: - llm = GooseAI(gooseai_api_key="secret-api-key") + llm = GooseAI(gooseai_api_key="secret-api-key") # type: ignore[arg-type, call-arg] assert str(llm.gooseai_api_key) == "**********" assert "secret-api-key" not in repr(llm.gooseai_api_key) assert "secret-api-key" not in repr(llm) @@ -40,7 +40,7 @@ def test_api_key_masked_when_passed_via_constructor() -> None: def test_api_key_masked_when_passed_from_env() -> None: with MonkeyPatch.context() as mp: mp.setenv("GOOSEAI_API_KEY", "secret-api-key") - llm = GooseAI() + llm = GooseAI() # type: ignore[call-arg] assert str(llm.gooseai_api_key) == "**********" assert "secret-api-key" not in repr(llm.gooseai_api_key) assert "secret-api-key" not in repr(llm) diff --git a/libs/community/tests/unit_tests/llms/test_minimax.py b/libs/community/tests/unit_tests/llms/test_minimax.py index f92a140239..ef128ea1d5 100644 --- a/libs/community/tests/unit_tests/llms/test_minimax.py +++ b/libs/community/tests/unit_tests/llms/test_minimax.py @@ -9,7 +9,7 @@ from langchain_community.llms.minimax import Minimax def test_api_key_is_secret_string() -> None: - llm = Minimax(minimax_api_key="secret-api-key", minimax_group_id="group_id") + llm = Minimax(minimax_api_key="secret-api-key", minimax_group_id="group_id") # type: ignore[arg-type, call-arg] assert isinstance(llm.minimax_api_key, SecretStr) @@ -19,7 +19,7 @@ def test_api_key_masked_when_passed_from_env( """Test initialization with an API key provided via an env variable""" monkeypatch.setenv("MINIMAX_API_KEY", "secret-api-key") monkeypatch.setenv("MINIMAX_GROUP_ID", "group_id") - llm = Minimax() + llm = Minimax() # type: ignore[call-arg] print(llm.minimax_api_key, end="") # noqa: T201 captured = capsys.readouterr() @@ -30,7 +30,7 @@ def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: """Test initialization with an API key provided via the initializer""" - llm = Minimax(minimax_api_key="secret-api-key", minimax_group_id="group_id") + llm = Minimax(minimax_api_key="secret-api-key", minimax_group_id="group_id") # type: ignore[arg-type, call-arg] print(llm.minimax_api_key, end="") # noqa: T201 captured = capsys.readouterr() @@ -39,5 +39,5 @@ def test_api_key_masked_when_passed_via_constructor( def test_uses_actual_secret_value_from_secretstr() -> None: """Test that actual secret is retrieved using `.get_secret_value()`.""" - llm = Minimax(minimax_api_key="secret-api-key", minimax_group_id="group_id") + llm = Minimax(minimax_api_key="secret-api-key", minimax_group_id="group_id") # type: ignore[arg-type, call-arg] assert cast(SecretStr, llm.minimax_api_key).get_secret_value() == "secret-api-key" diff --git a/libs/community/tests/unit_tests/llms/test_moonshot.py b/libs/community/tests/unit_tests/llms/test_moonshot.py index 182a61459f..fda1a5298f 100644 --- a/libs/community/tests/unit_tests/llms/test_moonshot.py +++ b/libs/community/tests/unit_tests/llms/test_moonshot.py @@ -9,7 +9,7 @@ os.environ["MOONSHOT_API_KEY"] = "key" @pytest.mark.requires("openai") def test_moonshot_model_param() -> None: - llm = Moonshot(model="foo") + llm = Moonshot(model="foo") # type: ignore[call-arg] assert llm.model_name == "foo" - llm = Moonshot(model_name="bar") + llm = Moonshot(model_name="bar") # type: ignore[call-arg] assert llm.model_name == "bar" diff --git a/libs/community/tests/unit_tests/llms/test_openai.py b/libs/community/tests/unit_tests/llms/test_openai.py index 73f6fad283..83b229d8c2 100644 --- a/libs/community/tests/unit_tests/llms/test_openai.py +++ b/libs/community/tests/unit_tests/llms/test_openai.py @@ -13,15 +13,15 @@ def _openai_v1_installed() -> bool: @pytest.mark.requires("openai") def test_openai_model_param() -> None: - llm = OpenAI(model="foo", openai_api_key="foo") + llm = OpenAI(model="foo", openai_api_key="foo") # type: ignore[call-arg] assert llm.model_name == "foo" - llm = OpenAI(model_name="foo", openai_api_key="foo") + llm = OpenAI(model_name="foo", openai_api_key="foo") # type: ignore[call-arg] assert llm.model_name == "foo" @pytest.mark.requires("openai") def test_openai_model_kwargs() -> None: - llm = OpenAI(model_kwargs={"foo": "bar"}, openai_api_key="foo") + llm = OpenAI(model_kwargs={"foo": "bar"}, openai_api_key="foo") # type: ignore[call-arg] assert llm.model_kwargs == {"foo": "bar"} @@ -38,7 +38,7 @@ def test_openai_invalid_model_kwargs() -> None: @pytest.mark.requires("openai") def test_openai_incorrect_field() -> None: with pytest.warns(match="not default parameter"): - llm = OpenAI(foo="bar", openai_api_key="foo") + llm = OpenAI(foo="bar", openai_api_key="foo") # type: ignore[call-arg] assert llm.model_kwargs == {"foo": "bar"} diff --git a/libs/community/tests/unit_tests/llms/test_pipelineai.py b/libs/community/tests/unit_tests/llms/test_pipelineai.py index afa91752ab..3a8513650f 100644 --- a/libs/community/tests/unit_tests/llms/test_pipelineai.py +++ b/libs/community/tests/unit_tests/llms/test_pipelineai.py @@ -5,14 +5,14 @@ from langchain_community.llms.pipelineai import PipelineAI def test_api_key_is_string() -> None: - llm = PipelineAI(pipeline_api_key="secret-api-key") + llm = PipelineAI(pipeline_api_key="secret-api-key") # type: ignore[arg-type] assert isinstance(llm.pipeline_api_key, SecretStr) def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: - llm = PipelineAI(pipeline_api_key="secret-api-key") + llm = PipelineAI(pipeline_api_key="secret-api-key") # type: ignore[arg-type] print(llm.pipeline_api_key, end="") # noqa: T201 captured = capsys.readouterr() diff --git a/libs/community/tests/unit_tests/llms/test_predibase.py b/libs/community/tests/unit_tests/llms/test_predibase.py index e2b2dc128d..bb8245f1ef 100644 --- a/libs/community/tests/unit_tests/llms/test_predibase.py +++ b/libs/community/tests/unit_tests/llms/test_predibase.py @@ -5,14 +5,14 @@ from langchain_community.llms.predibase import Predibase def test_api_key_is_string() -> None: - llm = Predibase(model="my_llm", predibase_api_key="secret-api-key") + llm = Predibase(model="my_llm", predibase_api_key="secret-api-key") # type: ignore[arg-type, call-arg] assert isinstance(llm.predibase_api_key, SecretStr) def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: - llm = Predibase(model="my_llm", predibase_api_key="secret-api-key") + llm = Predibase(model="my_llm", predibase_api_key="secret-api-key") # type: ignore[arg-type, call-arg] print(llm.predibase_api_key, end="") # noqa: T201 captured = capsys.readouterr() @@ -20,36 +20,36 @@ def test_api_key_masked_when_passed_via_constructor( def test_specifying_predibase_sdk_version_argument() -> None: - llm = Predibase( + llm = Predibase( # type: ignore[call-arg] model="my_llm", - predibase_api_key="secret-api-key", + predibase_api_key="secret-api-key", # type: ignore[arg-type] ) assert not llm.predibase_sdk_version legacy_predibase_sdk_version = "2024.4.8" - llm = Predibase( + llm = Predibase( # type: ignore[call-arg] model="my_llm", - predibase_api_key="secret-api-key", + predibase_api_key="secret-api-key", # type: ignore[arg-type] predibase_sdk_version=legacy_predibase_sdk_version, ) assert llm.predibase_sdk_version == legacy_predibase_sdk_version def test_specifying_adapter_id_argument() -> None: - llm = Predibase(model="my_llm", predibase_api_key="secret-api-key") + llm = Predibase(model="my_llm", predibase_api_key="secret-api-key") # type: ignore[arg-type, call-arg] assert not llm.adapter_id - llm = Predibase( + llm = Predibase( # type: ignore[call-arg] model="my_llm", - predibase_api_key="secret-api-key", + predibase_api_key="secret-api-key", # type: ignore[arg-type] adapter_id="my-hf-adapter", ) assert llm.adapter_id == "my-hf-adapter" assert llm.adapter_version is None - llm = Predibase( + llm = Predibase( # type: ignore[call-arg] model="my_llm", - predibase_api_key="secret-api-key", + predibase_api_key="secret-api-key", # type: ignore[arg-type] adapter_id="my-other-hf-adapter", ) assert llm.adapter_id == "my-other-hf-adapter" @@ -57,21 +57,21 @@ def test_specifying_adapter_id_argument() -> None: def test_specifying_adapter_id_and_adapter_version_arguments() -> None: - llm = Predibase(model="my_llm", predibase_api_key="secret-api-key") + llm = Predibase(model="my_llm", predibase_api_key="secret-api-key") # type: ignore[arg-type, call-arg] assert not llm.adapter_id - llm = Predibase( + llm = Predibase( # type: ignore[call-arg] model="my_llm", - predibase_api_key="secret-api-key", + predibase_api_key="secret-api-key", # type: ignore[arg-type] adapter_id="my-hf-adapter", adapter_version=None, ) assert llm.adapter_id == "my-hf-adapter" assert llm.adapter_version is None - llm = Predibase( + llm = Predibase( # type: ignore[call-arg] model="my_llm", - predibase_api_key="secret-api-key", + predibase_api_key="secret-api-key", # type: ignore[arg-type] adapter_id="my-other-hf-adapter", adapter_version=3, ) diff --git a/libs/community/tests/unit_tests/llms/test_stochasticai.py b/libs/community/tests/unit_tests/llms/test_stochasticai.py index 1fa0cfd243..285d4368f2 100644 --- a/libs/community/tests/unit_tests/llms/test_stochasticai.py +++ b/libs/community/tests/unit_tests/llms/test_stochasticai.py @@ -5,14 +5,14 @@ from langchain_community.llms.stochasticai import StochasticAI def test_api_key_is_string() -> None: - llm = StochasticAI(stochasticai_api_key="secret-api-key") + llm = StochasticAI(stochasticai_api_key="secret-api-key") # type: ignore[arg-type] assert isinstance(llm.stochasticai_api_key, SecretStr) def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: - llm = StochasticAI(stochasticai_api_key="secret-api-key") + llm = StochasticAI(stochasticai_api_key="secret-api-key") # type: ignore[arg-type] print(llm.stochasticai_api_key, end="") # noqa: T201 captured = capsys.readouterr() diff --git a/libs/community/tests/unit_tests/llms/test_symblai_nebula.py b/libs/community/tests/unit_tests/llms/test_symblai_nebula.py index 92ec7484ef..9109ee3c45 100644 --- a/libs/community/tests/unit_tests/llms/test_symblai_nebula.py +++ b/libs/community/tests/unit_tests/llms/test_symblai_nebula.py @@ -7,7 +7,7 @@ from langchain_community.llms.symblai_nebula import Nebula def test_api_key_is_secret_string() -> None: - llm = Nebula(nebula_api_key="secret-api-key") + llm = Nebula(nebula_api_key="secret-api-key") # type: ignore[arg-type] assert isinstance(llm.nebula_api_key, SecretStr) assert llm.nebula_api_key.get_secret_value() == "secret-api-key" @@ -23,7 +23,7 @@ def test_api_key_masked_when_passed_from_env( def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture) -> None: - llm = Nebula(nebula_api_key="secret-api-key") + llm = Nebula(nebula_api_key="secret-api-key") # type: ignore[arg-type] print(llm.nebula_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/unit_tests/llms/test_together.py b/libs/community/tests/unit_tests/llms/test_together.py index 00da416b04..0d6cf97503 100644 --- a/libs/community/tests/unit_tests/llms/test_together.py +++ b/libs/community/tests/unit_tests/llms/test_together.py @@ -11,7 +11,7 @@ from langchain_community.llms.together import Together def test_together_api_key_is_secret_string() -> None: """Test that the API key is stored as a SecretStr.""" llm = Together( - together_api_key="secret-api-key", + together_api_key="secret-api-key", # type: ignore[arg-type] model="togethercomputer/RedPajama-INCITE-7B-Base", temperature=0.2, max_tokens=250, @@ -24,7 +24,7 @@ def test_together_api_key_masked_when_passed_from_env( ) -> None: """Test that the API key is masked when passed from an environment variable.""" monkeypatch.setenv("TOGETHER_API_KEY", "secret-api-key") - llm = Together( + llm = Together( # type: ignore[call-arg] model="togethercomputer/RedPajama-INCITE-7B-Base", temperature=0.2, max_tokens=250, @@ -40,7 +40,7 @@ def test_together_api_key_masked_when_passed_via_constructor( ) -> None: """Test that the API key is masked when passed via the constructor.""" llm = Together( - together_api_key="secret-api-key", + together_api_key="secret-api-key", # type: ignore[arg-type] model="togethercomputer/RedPajama-INCITE-7B-Base", temperature=0.2, max_tokens=250, @@ -54,7 +54,7 @@ def test_together_api_key_masked_when_passed_via_constructor( def test_together_uses_actual_secret_value_from_secretstr() -> None: """Test that the actual secret value is correctly retrieved.""" llm = Together( - together_api_key="secret-api-key", + together_api_key="secret-api-key", # type: ignore[arg-type] model="togethercomputer/RedPajama-INCITE-7B-Base", temperature=0.2, max_tokens=250, diff --git a/libs/community/tests/unit_tests/retrievers/test_bedrock.py b/libs/community/tests/unit_tests/retrievers/test_bedrock.py index ad2da543d4..de954e6e19 100644 --- a/libs/community/tests/unit_tests/retrievers/test_bedrock.py +++ b/libs/community/tests/unit_tests/retrievers/test_bedrock.py @@ -23,7 +23,7 @@ def amazon_retriever( ) -> AmazonKnowledgeBasesRetriever: return AmazonKnowledgeBasesRetriever( knowledge_base_id="test_kb_id", - retrieval_config=mock_retriever_config, + retrieval_config=mock_retriever_config, # type: ignore[arg-type] client=mock_client, ) diff --git a/libs/community/tests/unit_tests/tools/eden_ai/test_tools.py b/libs/community/tests/unit_tests/tools/eden_ai/test_tools.py index 3a1b4ca86a..f3976ae8c0 100644 --- a/libs/community/tests/unit_tests/tools/eden_ai/test_tools.py +++ b/libs/community/tests/unit_tests/tools/eden_ai/test_tools.py @@ -5,7 +5,7 @@ import pytest from langchain_community.tools.edenai import EdenAiTextModerationTool -tool = EdenAiTextModerationTool( +tool = EdenAiTextModerationTool( # type: ignore[call-arg] providers=["openai"], language="en", edenai_api_key="fake_key" ) diff --git a/libs/community/tests/unit_tests/tools/test_you.py b/libs/community/tests/unit_tests/tools/test_you.py index acbf6154b0..08caf91077 100644 --- a/libs/community/tests/unit_tests/tools/test_you.py +++ b/libs/community/tests/unit_tests/tools/test_you.py @@ -23,7 +23,7 @@ class TestYouSearchTool: responses.GET, f"{TEST_ENDPOINT}/search", json=MOCK_RESPONSE_RAW, status=200 ) query = "Test query text" - you_tool = YouSearchTool(api_wrapper=YouSearchAPIWrapper(ydc_api_key="test")) + you_tool = YouSearchTool(api_wrapper=YouSearchAPIWrapper(ydc_api_key="test")) # type: ignore[call-arg] results = you_tool.invoke(query) expected_result = MOCK_PARSED_OUTPUT assert results == expected_result @@ -34,7 +34,7 @@ class TestYouSearchTool: responses.GET, f"{TEST_ENDPOINT}/search", json=MOCK_RESPONSE_RAW, status=200 ) query = "Test query text" - you_tool = YouSearchTool( + you_tool = YouSearchTool( # type: ignore[call-arg] api_wrapper=YouSearchAPIWrapper(ydc_api_key="test", k=2) ) results = you_tool.invoke(query) @@ -47,7 +47,7 @@ class TestYouSearchTool: responses.GET, f"{TEST_ENDPOINT}/search", json=MOCK_RESPONSE_RAW, status=200 ) query = "Test query text" - you_tool = YouSearchTool( + you_tool = YouSearchTool( # type: ignore[call-arg] api_wrapper=YouSearchAPIWrapper(ydc_api_key="test", n_snippets_per_hit=1) ) results = you_tool.invoke(query) @@ -61,7 +61,7 @@ class TestYouSearchTool: ) query = "Test news text" - you_tool = YouSearchTool( + you_tool = YouSearchTool( # type: ignore[call-arg] api_wrapper=YouSearchAPIWrapper(ydc_api_key="test", endpoint_type="news") ) results = you_tool.invoke(query) @@ -70,7 +70,7 @@ class TestYouSearchTool: @pytest.mark.asyncio async def test_ainvoke(self) -> None: - you_tool = YouSearchTool(api_wrapper=YouSearchAPIWrapper(ydc_api_key="test")) + you_tool = YouSearchTool(api_wrapper=YouSearchAPIWrapper(ydc_api_key="test")) # type: ignore[call-arg] # Mock response object to simulate aiohttp response mock_response = AsyncMock() diff --git a/libs/community/tests/unit_tests/tools/test_zapier.py b/libs/community/tests/unit_tests/tools/test_zapier.py index c10085ec73..7be1eacacb 100644 --- a/libs/community/tests/unit_tests/tools/test_zapier.py +++ b/libs/community/tests/unit_tests/tools/test_zapier.py @@ -15,7 +15,7 @@ def test_default_base_prompt() -> None: action_id="test", zapier_description="test", params_schema={"test": "test"}, - api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"), + api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"), # type: ignore[call-arg] ) # Test that the base prompt was successfully assigned to the default prompt @@ -34,7 +34,7 @@ def test_custom_base_prompt() -> None: zapier_description="test", params_schema={"test": "test"}, base_prompt=base_prompt, - api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"), + api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"), # type: ignore[call-arg] ) # Test that the base prompt was successfully assigned to the default prompt @@ -51,7 +51,7 @@ def test_custom_base_prompt_fail() -> None: zapier_description="test", params={"test": "test"}, base_prompt=base_prompt, - api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"), + api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"), # type: ignore[call-arg] ) @@ -61,7 +61,7 @@ def test_format_headers_api_key() -> None: action_id="test", zapier_description="test", params_schema={"test": "test"}, - api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"), + api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"), # type: ignore[call-arg] ) headers = tool.api_wrapper._format_headers() assert headers["Content-Type"] == "application/json" @@ -75,7 +75,7 @@ def test_format_headers_access_token() -> None: action_id="test", zapier_description="test", params_schema={"test": "test"}, - api_wrapper=ZapierNLAWrapper(zapier_nla_oauth_access_token="test"), + api_wrapper=ZapierNLAWrapper(zapier_nla_oauth_access_token="test"), # type: ignore[call-arg] ) headers = tool.api_wrapper._format_headers() assert headers["Content-Type"] == "application/json" @@ -89,7 +89,7 @@ def test_create_action_payload() -> None: action_id="test", zapier_description="test", params_schema={"test": "test"}, - api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"), + api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"), # type: ignore[call-arg] ) payload = tool.api_wrapper._create_action_payload("some instructions") @@ -103,7 +103,7 @@ def test_create_action_payload_preview() -> None: action_id="test", zapier_description="test", params_schema={"test": "test"}, - api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"), + api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"), # type: ignore[call-arg] ) payload = tool.api_wrapper._create_action_payload( @@ -120,7 +120,7 @@ def test_create_action_payload_with_params() -> None: action_id="test", zapier_description="test", params_schema={"test": "test"}, - api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"), + api_wrapper=ZapierNLAWrapper(zapier_nla_api_key="test"), # type: ignore[call-arg] ) payload = tool.api_wrapper._create_action_payload( @@ -139,7 +139,7 @@ async def test_apreview(mocker) -> None: # type: ignore[no-untyped-def] action_id="test", zapier_description="test", params_schema={"test": "test"}, - api_wrapper=ZapierNLAWrapper( + api_wrapper=ZapierNLAWrapper( # type: ignore[call-arg] zapier_nla_api_key="test", zapier_nla_api_base="http://localhost:8080/v1/", ), @@ -167,7 +167,7 @@ async def test_arun(mocker) -> None: # type: ignore[no-untyped-def] action_id="test", zapier_description="test", params_schema={"test": "test"}, - api_wrapper=ZapierNLAWrapper( + api_wrapper=ZapierNLAWrapper( # type: ignore[call-arg] zapier_nla_api_key="test", zapier_nla_api_base="http://localhost:8080/v1/", ), @@ -191,7 +191,7 @@ async def test_alist(mocker) -> None: # type: ignore[no-untyped-def] action_id="test", zapier_description="test", params_schema={"test": "test"}, - api_wrapper=ZapierNLAWrapper( + api_wrapper=ZapierNLAWrapper( # type: ignore[call-arg] zapier_nla_api_key="test", zapier_nla_api_base="http://localhost:8080/v1/", ), @@ -207,17 +207,17 @@ async def test_alist(mocker) -> None: # type: ignore[no-untyped-def] def test_wrapper_fails_no_api_key_or_access_token_initialization() -> None: """Test Wrapper requires either an API Key or OAuth Access Token.""" with pytest.raises(ValueError): - ZapierNLAWrapper() + ZapierNLAWrapper() # type: ignore[call-arg] def test_wrapper_api_key_initialization() -> None: """Test Wrapper initializes with an API Key.""" - ZapierNLAWrapper(zapier_nla_api_key="test") + ZapierNLAWrapper(zapier_nla_api_key="test") # type: ignore[call-arg] def test_wrapper_access_token_initialization() -> None: """Test Wrapper initializes with an API Key.""" - ZapierNLAWrapper(zapier_nla_oauth_access_token="test") + ZapierNLAWrapper(zapier_nla_oauth_access_token="test") # type: ignore[call-arg] def test_list_raises_401_invalid_api_key() -> None: @@ -233,7 +233,7 @@ def test_list_raises_401_invalid_api_key() -> None: mock_session.get.return_value = mock_response with patch("requests.Session", return_value=mock_session): - wrapper = ZapierNLAWrapper(zapier_nla_api_key="test") + wrapper = ZapierNLAWrapper(zapier_nla_api_key="test") # type: ignore[call-arg] with pytest.raises(requests.HTTPError) as err: wrapper.list() @@ -257,7 +257,7 @@ def test_list_raises_401_invalid_access_token() -> None: mock_session.get.return_value = mock_response with patch("requests.Session", return_value=mock_session): - wrapper = ZapierNLAWrapper(zapier_nla_oauth_access_token="test") + wrapper = ZapierNLAWrapper(zapier_nla_oauth_access_token="test") # type: ignore[call-arg] with pytest.raises(requests.HTTPError) as err: wrapper.list() @@ -280,7 +280,7 @@ def test_list_raises_other_error() -> None: mock_session.get.return_value = mock_response with patch("requests.Session", return_value=mock_session): - wrapper = ZapierNLAWrapper(zapier_nla_oauth_access_token="test") + wrapper = ZapierNLAWrapper(zapier_nla_oauth_access_token="test") # type: ignore[call-arg] with pytest.raises(requests.HTTPError) as err: wrapper.list() diff --git a/libs/community/tests/unit_tests/utilities/test_arxiv.py b/libs/community/tests/unit_tests/utilities/test_arxiv.py index ace8740192..061a3a547c 100644 --- a/libs/community/tests/unit_tests/utilities/test_arxiv.py +++ b/libs/community/tests/unit_tests/utilities/test_arxiv.py @@ -6,7 +6,7 @@ from langchain_community.utilities import ArxivAPIWrapper @pytest.mark.requires("arxiv") def test_is_arxiv_identifier() -> None: """Test that is_arxiv_identifier returns True for valid arxiv identifiers""" - api_client = ArxivAPIWrapper() + api_client = ArxivAPIWrapper() # type: ignore[call-arg] assert api_client.is_arxiv_identifier("1605.08386v1") assert api_client.is_arxiv_identifier("0705.0123") assert api_client.is_arxiv_identifier("2308.07912") diff --git a/libs/community/tests/unit_tests/utilities/test_graphql.py b/libs/community/tests/unit_tests/utilities/test_graphql.py index 82c75e21c0..51d43bb49e 100644 --- a/libs/community/tests/unit_tests/utilities/test_graphql.py +++ b/libs/community/tests/unit_tests/utilities/test_graphql.py @@ -82,7 +82,7 @@ def test_run() -> None: responses.add(responses.POST, TEST_ENDPOINT, json=MOCK_RESPONSE, status=200) query = "query { allUsers { name } }" - graphql_wrapper = GraphQLAPIWrapper( + graphql_wrapper = GraphQLAPIWrapper( # type: ignore[call-arg] graphql_endpoint=TEST_ENDPOINT, custom_headers={"Authorization": "Bearer testtoken"}, fetch_schema_from_transport=True, diff --git a/libs/community/tests/unit_tests/utilities/test_nvidia_riva_asr.py b/libs/community/tests/unit_tests/utilities/test_nvidia_riva_asr.py index b9072dbfbb..e95e41dc4f 100644 --- a/libs/community/tests/unit_tests/utilities/test_nvidia_riva_asr.py +++ b/libs/community/tests/unit_tests/utilities/test_nvidia_riva_asr.py @@ -109,7 +109,7 @@ def riva_asr_stub_init_patch( @pytest.fixture def asr() -> RivaASR: """Initialize a copy of the runnable.""" - return RivaASR(**CONFIG) + return RivaASR(**CONFIG) # type: ignore[arg-type] @pytest.fixture @@ -132,7 +132,7 @@ def test_init(asr: RivaASR) -> None: @pytest.mark.requires("riva.client") def test_init_defaults() -> None: """Ensure the runnable can be loaded with no arguments.""" - _ = RivaASR() + _ = RivaASR() # type: ignore[call-arg] @pytest.mark.requires("riva.client") diff --git a/libs/community/tests/unit_tests/utilities/test_nvidia_riva_tts.py b/libs/community/tests/unit_tests/utilities/test_nvidia_riva_tts.py index 321f6d1ea8..3024ff5885 100644 --- a/libs/community/tests/unit_tests/utilities/test_nvidia_riva_tts.py +++ b/libs/community/tests/unit_tests/utilities/test_nvidia_riva_tts.py @@ -50,7 +50,7 @@ def riva_tts_stub_init_patch( @pytest.fixture def tts() -> RivaTTS: """Initialize a copy of the runnable.""" - return RivaTTS(**CONFIG) + return RivaTTS(**CONFIG) # type: ignore[arg-type] @pytest.mark.requires("riva.client") @@ -63,7 +63,7 @@ def test_init(tts: RivaTTS) -> None: @pytest.mark.requires("riva.client") def test_init_defaults() -> None: """Ensure the runnable can be loaded with no arguments.""" - _ = RivaTTS() + _ = RivaTTS() # type: ignore[call-arg] @pytest.mark.requires("riva.client") diff --git a/libs/community/tests/unit_tests/utilities/test_tavily.py b/libs/community/tests/unit_tests/utilities/test_tavily.py index 751bca0cca..9d0ebb3660 100644 --- a/libs/community/tests/unit_tests/utilities/test_tavily.py +++ b/libs/community/tests/unit_tests/utilities/test_tavily.py @@ -3,5 +3,5 @@ from langchain_community.utilities.tavily_search import TavilySearchAPIWrapper def test_api_wrapper_api_key_not_visible() -> None: """Test that an exception is raised if the API key is not present.""" - wrapper = TavilySearchAPIWrapper(tavily_api_key="abcd123") + wrapper = TavilySearchAPIWrapper(tavily_api_key="abcd123") # type: ignore[arg-type] assert "abcd123" not in repr(wrapper) diff --git a/libs/community/tests/unit_tests/vectorstores/redis/test_redis_schema.py b/libs/community/tests/unit_tests/vectorstores/redis/test_redis_schema.py index c8cab81924..90196a7f3e 100644 --- a/libs/community/tests/unit_tests/vectorstores/redis/test_redis_schema.py +++ b/libs/community/tests/unit_tests/vectorstores/redis/test_redis_schema.py @@ -58,7 +58,7 @@ def test_flat_vector_field_defaults() -> None: "algorithm": "FLAT", } - flat_vector = FlatVectorField(**flat_vector_field_data) + flat_vector = FlatVectorField(**flat_vector_field_data) # type: ignore[arg-type] assert flat_vector.datatype == "FLOAT32" assert flat_vector.distance_metric == "COSINE" assert flat_vector.initial_cap is None @@ -75,7 +75,7 @@ def test_flat_vector_field_optional_values() -> None: "block_size": 10, } - flat_vector = FlatVectorField(**flat_vector_field_data) + flat_vector = FlatVectorField(**flat_vector_field_data) # type: ignore[arg-type] assert flat_vector.initial_cap == 1000 assert flat_vector.block_size == 10 @@ -88,7 +88,7 @@ def test_hnsw_vector_field_defaults() -> None: "algorithm": "HNSW", } - hnsw_vector = HNSWVectorField(**hnsw_vector_field_data) + hnsw_vector = HNSWVectorField(**hnsw_vector_field_data) # type: ignore[arg-type] assert hnsw_vector.datatype == "FLOAT32" assert hnsw_vector.distance_metric == "COSINE" assert hnsw_vector.initial_cap is None @@ -110,7 +110,7 @@ def test_hnsw_vector_field_optional_values() -> None: "ef_runtime": 15, "epsilon": 0.05, } - hnsw_vector = HNSWVectorField(**hnsw_vector_field_data) + hnsw_vector = HNSWVectorField(**hnsw_vector_field_data) # type: ignore[arg-type] assert hnsw_vector.initial_cap == 2000 assert hnsw_vector.m == 10 assert hnsw_vector.ef_construction == 250