diff --git a/.github/scripts/check_diff.py b/.github/scripts/check_diff.py index ee20daa547..4cbe1ec8b2 100644 --- a/.github/scripts/check_diff.py +++ b/.github/scripts/check_diff.py @@ -47,4 +47,4 @@ if __name__ == "__main__": else: pass json_output = json.dumps(list(dirs_to_run)) - print(f"dirs-to-run={json_output}") + print(f"dirs-to-run={json_output}") # noqa: T201 diff --git a/.github/scripts/get_min_versions.py b/.github/scripts/get_min_versions.py index fb68755ac2..16a47254b2 100644 --- a/.github/scripts/get_min_versions.py +++ b/.github/scripts/get_min_versions.py @@ -62,4 +62,6 @@ toml_file = sys.argv[1] # Call the function to get the minimum versions min_versions = get_min_version_from_toml(toml_file) -print(" ".join([f"{lib}=={version}" for lib, version in min_versions.items()])) +print( + " ".join([f"{lib}=={version}" for lib, version in min_versions.items()]) +) # noqa: T201 diff --git a/.github/workflows/extract_ignored_words_list.py b/.github/workflows/extract_ignored_words_list.py index 7c800e0df0..bb949d14f7 100644 --- a/.github/workflows/extract_ignored_words_list.py +++ b/.github/workflows/extract_ignored_words_list.py @@ -7,4 +7,4 @@ ignore_words_list = ( pyproject_toml.get("tool", {}).get("codespell", {}).get("ignore-words-list") ) -print(f"::set-output name=ignore_words_list::{ignore_words_list}") +print(f"::set-output name=ignore_words_list::{ignore_words_list}") # noqa: T201 diff --git a/docs/api_reference/create_api_rst.py b/docs/api_reference/create_api_rst.py index 7be0fcc43c..9413d90423 100644 --- a/docs/api_reference/create_api_rst.py +++ b/docs/api_reference/create_api_rst.py @@ -1,4 +1,5 @@ """Script for auto-generating api_reference.rst.""" + import importlib import inspect import os @@ -186,7 +187,7 @@ def _load_package_modules( modules_by_namespace[top_namespace] = _module_members except ImportError as e: - print(f"Error: Unable to import module '{namespace}' with error: {e}") + print(f"Error: Unable to import module '{namespace}' with error: {e}") # noqa: T201 return modules_by_namespace diff --git a/docs/docs/integrations/document_loaders/example_data/source_code/example.py b/docs/docs/integrations/document_loaders/example_data/source_code/example.py index 2a2760b6a6..5838764ee4 100644 --- a/docs/docs/integrations/document_loaders/example_data/source_code/example.py +++ b/docs/docs/integrations/document_loaders/example_data/source_code/example.py @@ -3,7 +3,7 @@ class MyClass: self.name = name def greet(self): - print(f"Hello, {self.name}!") + print(f"Hello, {self.name}!") # noqa: T201 def main(): diff --git a/docs/scripts/generate_api_reference_links.py b/docs/scripts/generate_api_reference_links.py index a9a38629b9..406c1c79bf 100644 --- a/docs/scripts/generate_api_reference_links.py +++ b/docs/scripts/generate_api_reference_links.py @@ -64,7 +64,7 @@ def main(): global_imports = {} for file in find_files(args.docs_dir): - print(f"Adding links for imports in {file}") + print(f"Adding links for imports in {file}") # noqa: T201 file_imports = replace_imports(file) if file_imports: diff --git a/libs/cli/langchain_cli/integration_template/pyproject.toml b/libs/cli/langchain_cli/integration_template/pyproject.toml index cccfba77d8..be76900001 100644 --- a/libs/cli/langchain_cli/integration_template/pyproject.toml +++ b/libs/cli/langchain_cli/integration_template/pyproject.toml @@ -58,6 +58,7 @@ select = [ "E", # pycodestyle "F", # pyflakes "I", # isort + "T201", # print ] [tool.mypy] diff --git a/libs/cli/langchain_cli/integration_template/scripts/check_imports.py b/libs/cli/langchain_cli/integration_template/scripts/check_imports.py index fd21a4975b..365f5fa118 100644 --- a/libs/cli/langchain_cli/integration_template/scripts/check_imports.py +++ b/libs/cli/langchain_cli/integration_template/scripts/check_imports.py @@ -10,8 +10,8 @@ if __name__ == "__main__": SourceFileLoader("x", file).load_module() except Exception: has_faillure = True - print(file) + print(file) # noqa: T201 traceback.print_exc() - print() + print() # noqa: T201 sys.exit(1 if has_failure else 0) diff --git a/libs/cli/pyproject.toml b/libs/cli/pyproject.toml index 481000bf86..24d4108580 100644 --- a/libs/cli/pyproject.toml +++ b/libs/cli/pyproject.toml @@ -45,6 +45,7 @@ select = [ "E", # pycodestyle "F", # pyflakes "I", # isort + "T201", # print ] [tool.poe.tasks] diff --git a/libs/community/langchain_community/callbacks/arize_callback.py b/libs/community/langchain_community/callbacks/arize_callback.py index 44212b6191..83cba04845 100644 --- a/libs/community/langchain_community/callbacks/arize_callback.py +++ b/libs/community/langchain_community/callbacks/arize_callback.py @@ -49,7 +49,7 @@ class ArizeCallbackHandler(BaseCallbackHandler): if SPACE_KEY == "SPACE_KEY" or API_KEY == "API_KEY": raise ValueError("❌ CHANGE SPACE AND API KEYS") else: - print("✅ Arize client setup done! Now you can start using Arize!") + print("✅ Arize client setup done! Now you can start using Arize!") # noqa: T201 def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any @@ -161,9 +161,9 @@ class ArizeCallbackHandler(BaseCallbackHandler): environment=Environments.PRODUCTION, ) if response_from_arize.status_code == 200: - print("✅ Successfully logged data to Arize!") + print("✅ Successfully logged data to Arize!") # noqa: T201 else: - print(f'❌ Logging failed "{response_from_arize.text}"') + print(f'❌ Logging failed "{response_from_arize.text}"') # noqa: T201 def on_llm_error(self, error: BaseException, **kwargs: Any) -> None: """Do nothing.""" diff --git a/libs/community/langchain_community/callbacks/clearml_callback.py b/libs/community/langchain_community/callbacks/clearml_callback.py index 7c4b66e614..8b8aa8e98a 100644 --- a/libs/community/langchain_community/callbacks/clearml_callback.py +++ b/libs/community/langchain_community/callbacks/clearml_callback.py @@ -509,8 +509,8 @@ class ClearMLCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): target_filename=name, ) except NotImplementedError as e: - print("Could not save model.") - print(repr(e)) + print("Could not save model.") # noqa: T201 + print(repr(e)) # noqa: T201 pass # Cleanup after adding everything to ClearML diff --git a/libs/community/langchain_community/callbacks/confident_callback.py b/libs/community/langchain_community/callbacks/confident_callback.py index d9432e5d56..4162dc1231 100644 --- a/libs/community/langchain_community/callbacks/confident_callback.py +++ b/libs/community/langchain_community/callbacks/confident_callback.py @@ -116,13 +116,13 @@ class DeepEvalCallbackHandler(BaseCallbackHandler): output=output, query=query, ) - print(f"Answer Relevancy: {result}") + print(f"Answer Relevancy: {result}") # noqa: T201 elif isinstance(metric, UnBiasedMetric): score = metric.measure(output) - print(f"Bias Score: {score}") + print(f"Bias Score: {score}") # noqa: T201 elif isinstance(metric, NonToxicMetric): score = metric.measure(output) - print(f"Toxic Score: {score}") + print(f"Toxic Score: {score}") # noqa: T201 else: raise ValueError( f"""Metric {metric.__name__} is not supported by deepeval diff --git a/libs/community/langchain_community/callbacks/infino_callback.py b/libs/community/langchain_community/callbacks/infino_callback.py index 106a9a5a08..0b58b598df 100644 --- a/libs/community/langchain_community/callbacks/infino_callback.py +++ b/libs/community/langchain_community/callbacks/infino_callback.py @@ -86,7 +86,7 @@ class InfinoCallbackHandler(BaseCallbackHandler): }, } if self.verbose: - print(f"Tracking {key} with Infino: {payload}") + print(f"Tracking {key} with Infino: {payload}") # noqa: T201 # Append to Infino time series only if is_ts is True, otherwise # append to Infino log. @@ -245,7 +245,7 @@ class InfinoCallbackHandler(BaseCallbackHandler): self._send_to_infino("prompt_tokens", prompt_tokens) if self.verbose: - print( + print( # noqa: T201 f"on_chat_model_start: is_chat_openai_model= \ {self.is_chat_openai_model}, \ chat_openai_model_name={self.chat_openai_model_name}" diff --git a/libs/community/langchain_community/callbacks/mlflow_callback.py b/libs/community/langchain_community/callbacks/mlflow_callback.py index b524bc19c2..b81da0c166 100644 --- a/libs/community/langchain_community/callbacks/mlflow_callback.py +++ b/libs/community/langchain_community/callbacks/mlflow_callback.py @@ -646,9 +646,11 @@ class MlflowCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): { "page_content": doc.page_content, "metadata": { - k: str(v) - if not isinstance(v, list) - else ",".join(str(x) for x in v) + k: ( + str(v) + if not isinstance(v, list) + else ",".join(str(x) for x in v) + ) for k, v in doc.metadata.items() }, } @@ -757,15 +759,15 @@ class MlflowCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): langchain_asset.save_agent(langchain_asset_path) self.mlflg.artifact(langchain_asset_path) except AttributeError: - print("Could not save model.") + print("Could not save model.") # noqa: T201 traceback.print_exc() pass except NotImplementedError: - print("Could not save model.") + print("Could not save model.") # noqa: T201 traceback.print_exc() pass except NotImplementedError: - print("Could not save model.") + print("Could not save model.") # noqa: T201 traceback.print_exc() pass if finish: diff --git a/libs/community/langchain_community/callbacks/wandb_callback.py b/libs/community/langchain_community/callbacks/wandb_callback.py index 9f968d3c67..035c44640d 100644 --- a/libs/community/langchain_community/callbacks/wandb_callback.py +++ b/libs/community/langchain_community/callbacks/wandb_callback.py @@ -558,8 +558,8 @@ class WandbCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): model_artifact.add_file(str(langchain_asset_path)) model_artifact.metadata = load_json_to_dict(langchain_asset_path) except NotImplementedError as e: - print("Could not save model.") - print(repr(e)) + print("Could not save model.") # noqa: T201 + print(repr(e)) # noqa: T201 pass self.run.log_artifact(model_artifact) @@ -577,7 +577,9 @@ class WandbCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): name=name if name else self.name, notes=notes if notes else self.notes, visualize=visualize if visualize else self.visualize, - complexity_metrics=complexity_metrics - if complexity_metrics - else self.complexity_metrics, + complexity_metrics=( + complexity_metrics + if complexity_metrics + else self.complexity_metrics + ), ) diff --git a/libs/community/langchain_community/chat_message_histories/rocksetdb.py b/libs/community/langchain_community/chat_message_histories/rocksetdb.py index 0391726c4c..3e48de373e 100644 --- a/libs/community/langchain_community/chat_message_histories/rocksetdb.py +++ b/libs/community/langchain_community/chat_message_histories/rocksetdb.py @@ -34,7 +34,7 @@ class RocksetChatMessageHistory(BaseChatMessageHistory): history.add_user_message("hi!") history.add_ai_message("whats up?") - print(history.messages) + print(history.messages) # noqa: T201 """ # You should set these values based on your VI. diff --git a/libs/community/langchain_community/chat_models/deepinfra.py b/libs/community/langchain_community/chat_models/deepinfra.py index 349e6074fd..06610e5926 100644 --- a/libs/community/langchain_community/chat_models/deepinfra.py +++ b/libs/community/langchain_community/chat_models/deepinfra.py @@ -1,4 +1,5 @@ """deepinfra.com chat models wrapper""" + from __future__ import annotations import json @@ -207,7 +208,7 @@ class ChatDeepInfra(BaseChatModel): return response except Exception as e: # import pdb; pdb.set_trace() - print("EX", e) + print("EX", e) # noqa: T201 raise return _completion_with_retry(**kwargs) @@ -231,7 +232,7 @@ class ChatDeepInfra(BaseChatModel): self._handle_status(response.status, response.text) return await response.json() except Exception as e: - print("EX", e) + print("EX", e) # noqa: T201 raise return await _completion_with_retry(**kwargs) diff --git a/libs/community/langchain_community/chat_models/human.py b/libs/community/langchain_community/chat_models/human.py index e029474693..030516a822 100644 --- a/libs/community/langchain_community/chat_models/human.py +++ b/libs/community/langchain_community/chat_models/human.py @@ -1,4 +1,5 @@ """ChatModel wrapper which returns user input as the response..""" + from io import StringIO from typing import Any, Callable, Dict, List, Mapping, Optional @@ -30,9 +31,9 @@ def _display_messages(messages: List[BaseMessage]) -> None: width=10000, line_break=None, ) - print("\n", "======= start of message =======", "\n\n") - print(yaml_string) - print("======= end of message =======", "\n\n") + print("\n", "======= start of message =======", "\n\n") # noqa: T201 + print(yaml_string) # noqa: T201 + print("======= end of message =======", "\n\n") # noqa: T201 def _collect_yaml_input( diff --git a/libs/community/langchain_community/document_loaders/assemblyai.py b/libs/community/langchain_community/document_loaders/assemblyai.py index eb4671c0d2..ee040ab513 100644 --- a/libs/community/langchain_community/document_loaders/assemblyai.py +++ b/libs/community/langchain_community/document_loaders/assemblyai.py @@ -150,7 +150,7 @@ class AssemblyAIAudioLoaderById(BaseLoader): ) transcript_response.raise_for_status() except Exception as e: - print(f"An error occurred: {e}") + print(f"An error occurred: {e}") # noqa: T201 raise transcript = transcript_response.json()["text"] @@ -166,7 +166,7 @@ class AssemblyAIAudioLoaderById(BaseLoader): ) paragraphs_response.raise_for_status() except Exception as e: - print(f"An error occurred: {e}") + print(f"An error occurred: {e}") # noqa: T201 raise paragraphs = paragraphs_response.json()["paragraphs"] @@ -181,7 +181,7 @@ class AssemblyAIAudioLoaderById(BaseLoader): ) sentences_response.raise_for_status() except Exception as e: - print(f"An error occurred: {e}") + print(f"An error occurred: {e}") # noqa: T201 raise sentences = sentences_response.json()["sentences"] @@ -196,7 +196,7 @@ class AssemblyAIAudioLoaderById(BaseLoader): ) srt_response.raise_for_status() except Exception as e: - print(f"An error occurred: {e}") + print(f"An error occurred: {e}") # noqa: T201 raise srt = srt_response.text @@ -211,7 +211,7 @@ class AssemblyAIAudioLoaderById(BaseLoader): ) vtt_response.raise_for_status() except Exception as e: - print(f"An error occurred: {e}") + print(f"An error occurred: {e}") # noqa: T201 raise vtt = vtt_response.text diff --git a/libs/community/langchain_community/document_loaders/blackboard.py b/libs/community/langchain_community/document_loaders/blackboard.py index cc33db332d..873f0ab1db 100644 --- a/libs/community/langchain_community/document_loaders/blackboard.py +++ b/libs/community/langchain_community/document_loaders/blackboard.py @@ -109,13 +109,13 @@ class BlackboardLoader(WebBaseLoader): documents = [] for path in relative_paths: url = self.base_url + path - print(f"Fetching documents from {url}") + print(f"Fetching documents from {url}") # noqa: T201 soup_info = self._scrape(url) with contextlib.suppress(ValueError): documents.extend(self._get_documents(soup_info)) return documents else: - print(f"Fetching documents from {self.web_path}") + print(f"Fetching documents from {self.web_path}") # noqa: T201 soup_info = self.scrape() self.folder_path = self._get_folder_path(soup_info) return self._get_documents(soup_info) @@ -295,4 +295,4 @@ if __name__ == "__main__": load_all_recursively=True, ) documents = loader.load() - print(f"Loaded {len(documents)} pages of PDFs from {loader.web_path}") + print(f"Loaded {len(documents)} pages of PDFs from {loader.web_path}") # noqa: T201 diff --git a/libs/community/langchain_community/document_loaders/blob_loaders/file_system.py b/libs/community/langchain_community/document_loaders/blob_loaders/file_system.py index 0fcdd4438e..d9c8ebf883 100644 --- a/libs/community/langchain_community/document_loaders/blob_loaders/file_system.py +++ b/libs/community/langchain_community/document_loaders/blob_loaders/file_system.py @@ -1,4 +1,5 @@ """Use to load blobs from the local file system.""" + from pathlib import Path from typing import Callable, Iterable, Iterator, Optional, Sequence, TypeVar, Union @@ -46,7 +47,7 @@ class FileSystemBlobLoader(BlobLoader): from langchain_community.document_loaders.blob_loaders import FileSystemBlobLoader loader = FileSystemBlobLoader("/path/to/directory") for blob in loader.yield_blobs(): - print(blob) + print(blob) # noqa: T201 """ # noqa: E501 def __init__( diff --git a/libs/community/langchain_community/document_loaders/confluence.py b/libs/community/langchain_community/document_loaders/confluence.py index 6e3293e00a..0c57f48fb2 100644 --- a/libs/community/langchain_community/document_loaders/confluence.py +++ b/libs/community/langchain_community/document_loaders/confluence.py @@ -564,7 +564,7 @@ class ConfluenceLoader(BaseLoader): texts.append(text) except requests.HTTPError as e: if e.response.status_code == 404: - print(f"Attachment not found at {absolute_url}") + print(f"Attachment not found at {absolute_url}") # noqa: T201 continue else: raise diff --git a/libs/community/langchain_community/document_loaders/dropbox.py b/libs/community/langchain_community/document_loaders/dropbox.py index fcf9d6503f..fb76043c13 100644 --- a/libs/community/langchain_community/document_loaders/dropbox.py +++ b/libs/community/langchain_community/document_loaders/dropbox.py @@ -121,7 +121,7 @@ class DropboxLoader(BaseLoader, BaseModel): file_extension = os.path.splitext(file_path)[1].lower() if file_extension == ".pdf": - print(f"File {file_path} type detected as .pdf") + print(f"File {file_path} type detected as .pdf") # noqa: T201 from langchain_community.document_loaders import UnstructuredPDFLoader # Download it to a temporary file. @@ -136,10 +136,10 @@ class DropboxLoader(BaseLoader, BaseModel): if docs: return docs[0] except Exception as pdf_ex: - print(f"Error while trying to parse PDF {file_path}: {pdf_ex}") + print(f"Error while trying to parse PDF {file_path}: {pdf_ex}") # noqa: T201 return None else: - print( + print( # noqa: T201 f"File {file_path} could not be decoded as pdf or text. Skipping." ) diff --git a/libs/community/langchain_community/document_loaders/etherscan.py b/libs/community/langchain_community/document_loaders/etherscan.py index 862b63843a..eb057b4b60 100644 --- a/libs/community/langchain_community/document_loaders/etherscan.py +++ b/libs/community/langchain_community/document_loaders/etherscan.py @@ -85,7 +85,7 @@ class EtherscanLoader(BaseLoader): response = requests.get(url) response.raise_for_status() except requests.exceptions.RequestException as e: - print("Error occurred while making the request:", e) + print("Error occurred while making the request:", e) # noqa: T201 items = response.json()["result"] result = [] if len(items) == 0: @@ -94,7 +94,7 @@ class EtherscanLoader(BaseLoader): content = str(item) metadata = {"from": item["from"], "tx_hash": item["hash"], "to": item["to"]} result.append(Document(page_content=content, metadata=metadata)) - print(len(result)) + print(len(result)) # noqa: T201 return result def getEthBalance(self) -> List[Document]: @@ -107,7 +107,7 @@ class EtherscanLoader(BaseLoader): response = requests.get(url) response.raise_for_status() except requests.exceptions.RequestException as e: - print("Error occurred while making the request:", e) + print("Error occurred while making the request:", e) # noqa: T201 return [Document(page_content=response.json()["result"])] def getInternalTx(self) -> List[Document]: @@ -122,7 +122,7 @@ class EtherscanLoader(BaseLoader): response = requests.get(url) response.raise_for_status() except requests.exceptions.RequestException as e: - print("Error occurred while making the request:", e) + print("Error occurred while making the request:", e) # noqa: T201 items = response.json()["result"] result = [] if len(items) == 0: @@ -145,7 +145,7 @@ class EtherscanLoader(BaseLoader): response = requests.get(url) response.raise_for_status() except requests.exceptions.RequestException as e: - print("Error occurred while making the request:", e) + print("Error occurred while making the request:", e) # noqa: T201 items = response.json()["result"] result = [] if len(items) == 0: @@ -168,7 +168,7 @@ class EtherscanLoader(BaseLoader): response = requests.get(url) response.raise_for_status() except requests.exceptions.RequestException as e: - print("Error occurred while making the request:", e) + print("Error occurred while making the request:", e) # noqa: T201 items = response.json()["result"] result = [] if len(items) == 0: @@ -191,7 +191,7 @@ class EtherscanLoader(BaseLoader): response = requests.get(url) response.raise_for_status() except requests.exceptions.RequestException as e: - print("Error occurred while making the request:", e) + print("Error occurred while making the request:", e) # noqa: T201 items = response.json()["result"] result = [] if len(items) == 0: diff --git a/libs/community/langchain_community/document_loaders/git.py b/libs/community/langchain_community/document_loaders/git.py index 5bd5341a3d..97c02b2111 100644 --- a/libs/community/langchain_community/document_loaders/git.py +++ b/libs/community/langchain_community/document_loaders/git.py @@ -105,6 +105,6 @@ class GitLoader(BaseLoader): doc = Document(page_content=text_content, metadata=metadata) docs.append(doc) except Exception as e: - print(f"Error reading file {file_path}: {e}") + print(f"Error reading file {file_path}: {e}") # noqa: T201 return docs diff --git a/libs/community/langchain_community/document_loaders/googledrive.py b/libs/community/langchain_community/document_loaders/googledrive.py index 7f5124a4d9..9e28b4dedf 100644 --- a/libs/community/langchain_community/document_loaders/googledrive.py +++ b/libs/community/langchain_community/document_loaders/googledrive.py @@ -216,9 +216,9 @@ class GoogleDriveLoader(BaseLoader, BaseModel): except HttpError as e: if e.resp.status == 404: - print("File not found: {}".format(id)) + print("File not found: {}".format(id)) # noqa: T201 else: - print("An error occurred: {}".format(e)) + print("An error occurred: {}".format(e)) # noqa: T201 text = fh.getvalue().decode("utf-8") metadata = { diff --git a/libs/community/langchain_community/document_loaders/nuclia.py b/libs/community/langchain_community/document_loaders/nuclia.py index 3312858899..0744b778b4 100644 --- a/libs/community/langchain_community/document_loaders/nuclia.py +++ b/libs/community/langchain_community/document_loaders/nuclia.py @@ -25,7 +25,7 @@ class NucliaLoader(BaseLoader): return [] obj = json.loads(data) text = obj["extracted_text"][0]["body"]["text"] - print(text) + print(text) # noqa: T201 metadata = { "file": obj["file_extracted_data"][0], "metadata": obj["field_metadata"][0], diff --git a/libs/community/langchain_community/document_loaders/onenote.py b/libs/community/langchain_community/document_loaders/onenote.py index 2c96eb10e3..6e3c6cd097 100644 --- a/libs/community/langchain_community/document_loaders/onenote.py +++ b/libs/community/langchain_community/document_loaders/onenote.py @@ -1,4 +1,5 @@ """Loads data from OneNote Notebooks""" + from pathlib import Path from typing import Dict, Iterator, List, Optional @@ -164,8 +165,8 @@ class OneNoteLoader(BaseLoader, BaseModel): authorization_request_url = client_instance.get_authorization_request_url( self._scopes ) - print("Visit the following url to give consent:") - print(authorization_request_url) + print("Visit the following url to give consent:") # noqa: T201 + print(authorization_request_url) # noqa: T201 authorization_url = input("Paste the authenticated url here:\n") authorization_code = authorization_url.split("code=")[1].split("&")[0] diff --git a/libs/community/langchain_community/document_loaders/parsers/audio.py b/libs/community/langchain_community/document_loaders/parsers/audio.py index 3b96f9860c..65674e3d1d 100644 --- a/libs/community/langchain_community/document_loaders/parsers/audio.py +++ b/libs/community/langchain_community/document_loaders/parsers/audio.py @@ -64,7 +64,7 @@ class OpenAIWhisperParser(BaseBlobParser): file_obj.name = f"part_{split_number}.mp3" # Transcribe - print(f"Transcribing part {split_number + 1}!") + print(f"Transcribing part {split_number + 1}!") # noqa: T201 attempts = 0 while attempts < 3: try: @@ -77,10 +77,10 @@ class OpenAIWhisperParser(BaseBlobParser): break except Exception as e: attempts += 1 - print(f"Attempt {attempts} failed. Exception: {str(e)}") + print(f"Attempt {attempts} failed. Exception: {str(e)}") # noqa: T201 time.sleep(5) else: - print("Failed to transcribe after 3 attempts.") + print("Failed to transcribe after 3 attempts.") # noqa: T201 continue yield Document( @@ -169,7 +169,7 @@ class OpenAIWhisperParserLocal(BaseBlobParser): rec_model = "openai/whisper-large" self.lang_model = lang_model if lang_model else rec_model - print("Using the following model: ", self.lang_model) + print("Using the following model: ", self.lang_model) # noqa: T201 self.batch_size = batch_size @@ -216,7 +216,7 @@ class OpenAIWhisperParserLocal(BaseBlobParser): file_obj = io.BytesIO(audio.export(format="mp3").read()) # Transcribe - print(f"Transcribing part {blob.path}!") + print(f"Transcribing part {blob.path}!") # noqa: T201 y, sr = librosa.load(file_obj, sr=16000) diff --git a/libs/community/langchain_community/document_loaders/parsers/vsdx.py b/libs/community/langchain_community/document_loaders/parsers/vsdx.py index b5077b1719..aeb414453a 100644 --- a/libs/community/langchain_community/document_loaders/parsers/vsdx.py +++ b/libs/community/langchain_community/document_loaders/parsers/vsdx.py @@ -61,13 +61,13 @@ class VsdxParser(BaseBlobParser, ABC): ) if "visio/pages/pages.xml" not in zfile.namelist(): - print("WARNING - No pages.xml file found in {}".format(source)) + print("WARNING - No pages.xml file found in {}".format(source)) # noqa: T201 return # type: ignore[return-value] if "visio/pages/_rels/pages.xml.rels" not in zfile.namelist(): - print("WARNING - No pages.xml.rels file found in {}".format(source)) + print("WARNING - No pages.xml.rels file found in {}".format(source)) # noqa: T201 return # type: ignore[return-value] if "docProps/app.xml" not in zfile.namelist(): - print("WARNING - No app.xml file found in {}".format(source)) + print("WARNING - No app.xml file found in {}".format(source)) # noqa: T201 return # type: ignore[return-value] pagesxml_content: dict = xmltodict.parse(zfile.read("visio/pages/pages.xml")) diff --git a/libs/community/langchain_community/document_loaders/pdf.py b/libs/community/langchain_community/document_loaders/pdf.py index 600eb26bd0..60c2e7fe48 100644 --- a/libs/community/langchain_community/document_loaders/pdf.py +++ b/libs/community/langchain_community/document_loaders/pdf.py @@ -480,7 +480,7 @@ class MathpixPDFLoader(BasePDFLoader): # This indicates an error with the PDF processing raise ValueError("Unable to retrieve PDF from Mathpix") else: - print(f"Status: {status}, waiting for processing to complete") + print(f"Status: {status}, waiting for processing to complete") # noqa: T201 time.sleep(5) raise TimeoutError diff --git a/libs/community/langchain_community/document_loaders/snowflake_loader.py b/libs/community/langchain_community/document_loaders/snowflake_loader.py index c0e479f610..b37926b5e0 100644 --- a/libs/community/langchain_community/document_loaders/snowflake_loader.py +++ b/libs/community/langchain_community/document_loaders/snowflake_loader.py @@ -88,7 +88,7 @@ class SnowflakeLoader(BaseLoader): column_names = [column[0] for column in cur.description] query_result = [dict(zip(column_names, row)) for row in query_result] except Exception as e: - print(f"An error occurred: {e}") + print(f"An error occurred: {e}") # noqa: T201 query_result = [] finally: cur.close() @@ -110,7 +110,7 @@ class SnowflakeLoader(BaseLoader): def lazy_load(self) -> Iterator[Document]: query_result = self._execute_query() if isinstance(query_result, Exception): - print(f"An error occurred during the query: {query_result}") + print(f"An error occurred during the query: {query_result}") # noqa: T201 return [] page_content_columns, metadata_columns = self._get_columns(query_result) if "*" in page_content_columns: diff --git a/libs/community/langchain_community/document_loaders/toml.py b/libs/community/langchain_community/document_loaders/toml.py index dcb59d41f4..34d7ccfbfe 100644 --- a/libs/community/langchain_community/document_loaders/toml.py +++ b/libs/community/langchain_community/document_loaders/toml.py @@ -44,4 +44,4 @@ class TomlLoader(BaseLoader): ) yield doc except tomli.TOMLDecodeError as e: - print(f"Error parsing TOML file {file_path}: {e}") + print(f"Error parsing TOML file {file_path}: {e}") # noqa: T201 diff --git a/libs/community/langchain_community/embeddings/baichuan.py b/libs/community/langchain_community/embeddings/baichuan.py index 9f0bf92ea8..430ef548cc 100644 --- a/libs/community/langchain_community/embeddings/baichuan.py +++ b/libs/community/langchain_community/embeddings/baichuan.py @@ -79,14 +79,14 @@ class BaichuanTextEmbeddings(BaseModel, Embeddings): return [result.get("embedding", []) for result in sorted_embeddings] else: # Log error or handle unsuccessful response appropriately - print( + print( # noqa: T201 f"""Error: Received status code {response.status_code} from embedding API""" ) return None except Exception as e: # Log the exception or handle it as needed - print(f"Exception occurred while trying to get embeddings: {str(e)}") + print(f"Exception occurred while trying to get embeddings: {str(e)}") # noqa: T201 return None def embed_documents(self, texts: List[str]) -> Optional[List[List[float]]]: # type: ignore[override] diff --git a/libs/community/langchain_community/embeddings/javelin_ai_gateway.py b/libs/community/langchain_community/embeddings/javelin_ai_gateway.py index c91a003291..6ee376097e 100644 --- a/libs/community/langchain_community/embeddings/javelin_ai_gateway.py +++ b/libs/community/langchain_community/embeddings/javelin_ai_gateway.py @@ -74,7 +74,7 @@ class JavelinAIGatewayEmbeddings(Embeddings, BaseModel): if "embedding" in item: embeddings.append(item["embedding"]) except ValueError as e: - print("Failed to query route: " + str(e)) + print("Failed to query route: " + str(e)) # noqa: T201 return embeddings @@ -92,7 +92,7 @@ class JavelinAIGatewayEmbeddings(Embeddings, BaseModel): if "embedding" in item: embeddings.append(item["embedding"]) except ValueError as e: - print("Failed to query route: " + str(e)) + print("Failed to query route: " + str(e)) # noqa: T201 return embeddings diff --git a/libs/community/langchain_community/llms/aleph_alpha.py b/libs/community/langchain_community/llms/aleph_alpha.py index 8ae891024b..be24b919a9 100644 --- a/libs/community/langchain_community/llms/aleph_alpha.py +++ b/libs/community/langchain_community/llms/aleph_alpha.py @@ -284,4 +284,4 @@ class AlephAlpha(LLM): if __name__ == "__main__": aa = AlephAlpha() - print(aa("How are you?")) + print(aa("How are you?")) # noqa: T201 diff --git a/libs/community/langchain_community/llms/beam.py b/libs/community/langchain_community/llms/beam.py index dfdb437535..9476cf07d0 100644 --- a/libs/community/langchain_community/llms/beam.py +++ b/libs/community/langchain_community/llms/beam.py @@ -187,7 +187,7 @@ class Beam(LLM): do_sample=True, pad_token_id=tokenizer.eos_token_id) output = tokenizer.decode(outputs[0], skip_special_tokens=True) - print(output) + print(output) # noqa: T201 return {{"text": output}} """ diff --git a/libs/community/langchain_community/llms/deepsparse.py b/libs/community/langchain_community/llms/deepsparse.py index ab7a7c58b4..c82f022e27 100644 --- a/libs/community/langchain_community/llms/deepsparse.py +++ b/libs/community/langchain_community/llms/deepsparse.py @@ -177,7 +177,7 @@ class DeepSparse(LLM): ) for chunk in llm.stream("Tell me a joke", stop=["'","\n"]): - print(chunk, end='', flush=True) + print(chunk, end='', flush=True) # noqa: T201 """ inference = self.pipeline( sequences=prompt, streaming=True, **self.generation_config @@ -215,7 +215,7 @@ class DeepSparse(LLM): ) for chunk in llm.stream("Tell me a joke", stop=["'","\n"]): - print(chunk, end='', flush=True) + print(chunk, end='', flush=True) # noqa: T201 """ inference = self.pipeline( sequences=prompt, streaming=True, **self.generation_config diff --git a/libs/community/langchain_community/llms/huggingface_text_gen_inference.py b/libs/community/langchain_community/llms/huggingface_text_gen_inference.py index b23aaa6e9b..e03b6f7adc 100644 --- a/libs/community/langchain_community/llms/huggingface_text_gen_inference.py +++ b/libs/community/langchain_community/llms/huggingface_text_gen_inference.py @@ -33,7 +33,7 @@ class HuggingFaceTextGenInference(LLM): temperature=0.01, repetition_penalty=1.03, ) - print(llm("What is Deep Learning?")) + print(llm("What is Deep Learning?")) # noqa: T201 # Streaming response example from langchain_community.callbacks import streaming_stdout @@ -50,7 +50,7 @@ class HuggingFaceTextGenInference(LLM): callbacks=callbacks, streaming=True ) - print(llm("What is Deep Learning?")) + print(llm("What is Deep Learning?")) # noqa: T201 """ diff --git a/libs/community/langchain_community/llms/human.py b/libs/community/langchain_community/llms/human.py index 8ee75db3c4..ae1e627f30 100644 --- a/libs/community/langchain_community/llms/human.py +++ b/libs/community/langchain_community/llms/human.py @@ -9,7 +9,7 @@ from langchain_community.llms.utils import enforce_stop_tokens def _display_prompt(prompt: str) -> None: """Displays the given prompt to the user.""" - print(f"\n{prompt}") + print(f"\n{prompt}") # noqa: T201 def _collect_user_input( diff --git a/libs/community/langchain_community/llms/llamacpp.py b/libs/community/langchain_community/llms/llamacpp.py index d46d741e7b..85acfb999e 100644 --- a/libs/community/langchain_community/llms/llamacpp.py +++ b/libs/community/langchain_community/llms/llamacpp.py @@ -333,7 +333,7 @@ class LlamaCpp(LLM): for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'", stop=["'","\n"]): result = chunk["choices"][0] - print(result["text"], end='', flush=True) + print(result["text"], end='', flush=True) # noqa: T201 """ params = {**self._get_parameters(stop), **kwargs} diff --git a/libs/community/langchain_community/llms/textgen.py b/libs/community/langchain_community/llms/textgen.py index 67e69a4f82..9d1ce10833 100644 --- a/libs/community/langchain_community/llms/textgen.py +++ b/libs/community/langchain_community/llms/textgen.py @@ -219,7 +219,7 @@ class TextGen(LLM): if response.status_code == 200: result = response.json()["results"][0]["text"] else: - print(f"ERROR: response: {response}") + print(f"ERROR: response: {response}") # noqa: T201 result = "" return result @@ -265,7 +265,7 @@ class TextGen(LLM): if response.status_code == 200: result = response.json()["results"][0]["text"] else: - print(f"ERROR: response: {response}") + print(f"ERROR: response: {response}") # noqa: T201 result = "" return result @@ -303,7 +303,7 @@ class TextGen(LLM): ) for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'", stop=["'","\n"]): - print(chunk, end='', flush=True) + print(chunk, end='', flush=True) # noqa: T201 """ try: @@ -376,7 +376,7 @@ class TextGen(LLM): ) for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'", stop=["'","\n"]): - print(chunk, end='', flush=True) + print(chunk, end='', flush=True) # noqa: T201 """ try: diff --git a/libs/community/langchain_community/llms/watsonxllm.py b/libs/community/langchain_community/llms/watsonxllm.py index d3225730c3..aaea7cd06f 100644 --- a/libs/community/langchain_community/llms/watsonxllm.py +++ b/libs/community/langchain_community/llms/watsonxllm.py @@ -166,24 +166,30 @@ class WatsonxLLM(BaseLLM): credentials = { "url": values["url"].get_secret_value() if values["url"] else None, - "apikey": values["apikey"].get_secret_value() - if values["apikey"] - else None, - "token": values["token"].get_secret_value() - if values["token"] - else None, - "password": values["password"].get_secret_value() - if values["password"] - else None, - "username": values["username"].get_secret_value() - if values["username"] - else None, - "instance_id": values["instance_id"].get_secret_value() - if values["instance_id"] - else None, - "version": values["version"].get_secret_value() - if values["version"] - else None, + "apikey": ( + values["apikey"].get_secret_value() if values["apikey"] else None + ), + "token": ( + values["token"].get_secret_value() if values["token"] else None + ), + "password": ( + values["password"].get_secret_value() + if values["password"] + else None + ), + "username": ( + values["username"].get_secret_value() + if values["username"] + else None + ), + "instance_id": ( + values["instance_id"].get_secret_value() + if values["instance_id"] + else None + ), + "version": ( + values["version"].get_secret_value() if values["version"] else None + ), } credentials_without_none_value = { key: value for key, value in credentials.items() if value is not None @@ -384,7 +390,7 @@ class WatsonxLLM(BaseLLM): response = watsonx_llm.stream("What is a molecule") for chunk in response: - print(chunk, end='') + print(chunk, end='') # noqa: T201 """ params = self._get_chat_params(stop=stop) for stream_resp in self.watsonx_model.generate_text_stream( diff --git a/libs/community/langchain_community/retrievers/google_vertex_ai_search.py b/libs/community/langchain_community/retrievers/google_vertex_ai_search.py index 622eea6871..f54d35c431 100644 --- a/libs/community/langchain_community/retrievers/google_vertex_ai_search.py +++ b/libs/community/langchain_community/retrievers/google_vertex_ai_search.py @@ -1,4 +1,5 @@ """Retriever wrapper for Google Vertex AI Search.""" + from __future__ import annotations from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence @@ -90,9 +91,11 @@ class _BaseGoogleVertexAISearchRetriever(BaseModel): from google.api_core.client_options import ClientOptions return ClientOptions( - api_endpoint=f"{self.location_id}-discoveryengine.googleapis.com" - if self.location_id != "global" - else None + api_endpoint=( + f"{self.location_id}-discoveryengine.googleapis.com" + if self.location_id != "global" + else None + ) ) def _convert_structured_search_response( @@ -188,9 +191,9 @@ class _BaseGoogleVertexAISearchRetriever(BaseModel): ) if not documents: - print(f"No {chunk_type} could be found.") + print(f"No {chunk_type} could be found.") # noqa: T201 if chunk_type == "extractive_answers": - print( + print( # noqa: T201 "Make sure that your data store is using Advanced Website " "Indexing.\n" "https://cloud.google.com/generative-ai-app-builder/docs/about-advanced-features#advanced-website-indexing" # noqa: E501 diff --git a/libs/community/langchain_community/storage/redis.py b/libs/community/langchain_community/storage/redis.py index 0cccbf071b..5e1b4fa56c 100644 --- a/libs/community/langchain_community/storage/redis.py +++ b/libs/community/langchain_community/storage/redis.py @@ -32,7 +32,7 @@ class RedisStore(ByteStore): # Iterate over keys for key in redis_store.yield_keys(): - print(key) + print(key) # noqa: T201 """ def __init__( diff --git a/libs/community/langchain_community/tools/amadeus/flight_search.py b/libs/community/langchain_community/tools/amadeus/flight_search.py index 85c173c119..21d9e4bce7 100644 --- a/libs/community/langchain_community/tools/amadeus/flight_search.py +++ b/libs/community/langchain_community/tools/amadeus/flight_search.py @@ -105,7 +105,7 @@ class AmadeusFlightSearch(AmadeusBaseTool): adults=1, ) except ResponseError as error: - print(error) + print(error) # noqa: T201 # Generate output dictionary output = [] diff --git a/libs/community/langchain_community/tools/bearly/tool.py b/libs/community/langchain_community/tools/bearly/tool.py index 8f4e46c0fa..286e024f64 100644 --- a/libs/community/langchain_community/tools/bearly/tool.py +++ b/libs/community/langchain_community/tools/bearly/tool.py @@ -59,7 +59,7 @@ If you have any files outputted write them to "output/" relative to the executio path. Output can only be read from the directory, stdout, and stdin. \ Do not use things like plot.show() as it will \ not work instead write them out `output/` and a link to the file will be returned. \ -print() any output and results so you can capture the output.""" +print() any output and results so you can capture the output.""" # noqa: T201 class FileInfo(BaseModel): @@ -125,12 +125,16 @@ class BearlyInterpreterTool: headers={"Authorization": self.api_key}, ).json() return { - "stdout": base64.b64decode(resp["stdoutBasesixtyfour"]).decode() - if resp["stdoutBasesixtyfour"] - else "", - "stderr": base64.b64decode(resp["stderrBasesixtyfour"]).decode() - if resp["stderrBasesixtyfour"] - else "", + "stdout": ( + base64.b64decode(resp["stdoutBasesixtyfour"]).decode() + if resp["stdoutBasesixtyfour"] + else "" + ), + "stderr": ( + base64.b64decode(resp["stderrBasesixtyfour"]).decode() + if resp["stderrBasesixtyfour"] + else "" + ), "fileLinks": resp["fileLinks"], "exitCode": resp["exitCode"], } diff --git a/libs/community/langchain_community/tools/human/tool.py b/libs/community/langchain_community/tools/human/tool.py index 7b7911577c..e4987e44fa 100644 --- a/libs/community/langchain_community/tools/human/tool.py +++ b/libs/community/langchain_community/tools/human/tool.py @@ -8,8 +8,8 @@ from langchain_core.tools import BaseTool def _print_func(text: str) -> None: - print("\n") - print(text) + print("\n") # noqa: T201 + print(text) # noqa: T201 class HumanInputRun(BaseTool): diff --git a/libs/community/langchain_community/tools/multion/close_session.py b/libs/community/langchain_community/tools/multion/close_session.py index 8232d861e2..023c07539e 100644 --- a/libs/community/langchain_community/tools/multion/close_session.py +++ b/libs/community/langchain_community/tools/multion/close_session.py @@ -52,6 +52,6 @@ Note: SessionId must be received from previous Browser window creation.""" try: multion.close_session(sessionId) except Exception as e: - print(f"{e}, retrying...") + print(f"{e}, retrying...") # noqa: T201 except Exception as e: raise Exception(f"An error occurred: {e}") diff --git a/libs/community/langchain_community/tools/multion/update_session.py b/libs/community/langchain_community/tools/multion/update_session.py index fe92c36dd7..10fe4c9fa6 100644 --- a/libs/community/langchain_community/tools/multion/update_session.py +++ b/libs/community/langchain_community/tools/multion/update_session.py @@ -68,7 +68,7 @@ Note: sessionId must be received from previous Browser window creation.""" self.sessionId = sessionId return content except Exception as e: - print(f"{e}, retrying...") + print(f"{e}, retrying...") # noqa: T201 return {"error": f"{e}", "Response": "retrying..."} except Exception as e: raise Exception(f"An error occurred: {e}") diff --git a/libs/community/langchain_community/tools/office365/utils.py b/libs/community/langchain_community/tools/office365/utils.py index 127fb6dba1..f13c10dde1 100644 --- a/libs/community/langchain_community/tools/office365/utils.py +++ b/libs/community/langchain_community/tools/office365/utils.py @@ -1,4 +1,5 @@ """O365 tool utils.""" + from __future__ import annotations import logging @@ -66,7 +67,7 @@ def authenticate() -> Account: "https://graph.microsoft.com/MailboxSettings.ReadWrite", ] ): - print("Error: Could not authenticate") + print("Error: Could not authenticate") # noqa: T201 return None else: return account diff --git a/libs/community/langchain_community/tools/shell/tool.py b/libs/community/langchain_community/tools/shell/tool.py index 15445441a3..cb3e941ec6 100644 --- a/libs/community/langchain_community/tools/shell/tool.py +++ b/libs/community/langchain_community/tools/shell/tool.py @@ -84,7 +84,7 @@ class ShellTool(BaseTool): ) -> str: """Run commands and return final output.""" - print(f"Executing command:\n {commands}") + print(f"Executing command:\n {commands}") # noqa: T201 try: if self.ask_human_input: diff --git a/libs/community/langchain_community/utilities/clickup.py b/libs/community/langchain_community/utilities/clickup.py index ed81e7fc72..e13deef894 100644 --- a/libs/community/langchain_community/utilities/clickup.py +++ b/libs/community/langchain_community/utilities/clickup.py @@ -1,4 +1,5 @@ """Util that calls clickup.""" + import json import warnings from dataclasses import asdict, dataclass, fields @@ -308,10 +309,10 @@ class ClickupAPIWrapper(BaseModel): data = response.json() if "access_token" not in data: - print(f"Error: {data}") + print(f"Error: {data}") # noqa: T201 if "ECODE" in data and data["ECODE"] == "OAUTH_014": url = ClickupAPIWrapper.get_access_code_url(oauth_client_id) - print( + print( # noqa: T201 "You already used this code once. Generate a new one.", f"Our best guess for the url to get a new code is:\n{url}", ) diff --git a/libs/community/langchain_community/utilities/github.py b/libs/community/langchain_community/utilities/github.py index 25d931bbce..6c2950dcc9 100644 --- a/libs/community/langchain_community/utilities/github.py +++ b/libs/community/langchain_community/utilities/github.py @@ -1,4 +1,5 @@ """Util that calls GitHub.""" + from __future__ import annotations import json @@ -297,7 +298,7 @@ class GitHubAPIWrapper(BaseModel): new_branch_name = f"{proposed_branch_name}_v{i}" else: # Handle any other exceptions - print(f"Failed to create branch. Error: {e}") + print(f"Failed to create branch. Error: {e}") # noqa: T201 raise Exception( "Unable to create branch name from proposed_branch_name: " f"{proposed_branch_name}" @@ -427,7 +428,7 @@ class GitHubAPIWrapper(BaseModel): "download_url" ] else: - print(f"Failed to download file: {file.contents_url}, skipping") + print(f"Failed to download file: {file.contents_url}, skipping") # noqa: T201 continue file_content_response = requests.get(download_url) @@ -435,7 +436,7 @@ class GitHubAPIWrapper(BaseModel): # Save the content as a UTF-8 string file_content = file_content_response.text else: - print( + print( # noqa: T201 "Failed downloading file content " f"(Error {file_content_response.status_code}). Skipping" ) @@ -457,7 +458,7 @@ class GitHubAPIWrapper(BaseModel): ) total_tokens += file_tokens except Exception as e: - print(f"Error when reading files from a PR on github. {e}") + print(f"Error when reading files from a PR on github. {e}") # noqa: T201 page += 1 return pr_files diff --git a/libs/community/langchain_community/utilities/google_lens.py b/libs/community/langchain_community/utilities/google_lens.py index f8419bdb75..028c4092a5 100644 --- a/libs/community/langchain_community/utilities/google_lens.py +++ b/libs/community/langchain_community/utilities/google_lens.py @@ -1,4 +1,5 @@ """Util that calls Google Lens Search.""" + from typing import Any, Dict, Optional, cast import requests @@ -77,7 +78,7 @@ class GoogleLensAPIWrapper(BaseModel): "Reverse Image Search" + f"Link: {responseValue['reverse_image_search']['link']}\n" ) - print(xs) + print(xs) # noqa: T201 docs = [xs] diff --git a/libs/community/langchain_community/utilities/nvidia_riva.py b/libs/community/langchain_community/utilities/nvidia_riva.py index e56a676df1..cd43b5a14e 100644 --- a/libs/community/langchain_community/utilities/nvidia_riva.py +++ b/libs/community/langchain_community/utilities/nvidia_riva.py @@ -1,4 +1,5 @@ """A common module for NVIDIA Riva Runnables.""" + import asyncio import logging import pathlib @@ -73,7 +74,7 @@ class RivaAudioEncoding(str, Enum): with the following commands: ```python import riva.client - print(riva.client.AudioEncoding.keys()) + print(riva.client.AudioEncoding.keys()) # noqa: T201 ``` """ diff --git a/libs/community/langchain_community/utilities/pubmed.py b/libs/community/langchain_community/utilities/pubmed.py index 981799d9de..8b67f25973 100644 --- a/libs/community/langchain_community/utilities/pubmed.py +++ b/libs/community/langchain_community/utilities/pubmed.py @@ -144,7 +144,7 @@ class PubMedAPIWrapper(BaseModel): if e.code == 429 and retry < self.max_retry: # Too Many Requests errors # wait for an exponentially increasing amount of time - print( + print( # noqa: T201 f"Too Many Requests, " f"waiting for {self.sleep_time:.2f} seconds..." ) diff --git a/libs/community/langchain_community/utilities/searx_search.py b/libs/community/langchain_community/utilities/searx_search.py index 3ac9f2cf1c..0ddbf738c0 100644 --- a/libs/community/langchain_community/utilities/searx_search.py +++ b/libs/community/langchain_community/utilities/searx_search.py @@ -225,7 +225,7 @@ class SearxSearchWrapper(BaseModel): urllib3.disable_warnings() except ImportError as e: - print(e) + print(e) # noqa: T201 return v @@ -246,7 +246,7 @@ class SearxSearchWrapper(BaseModel): searx_host = get_from_dict_or_env(values, "searx_host", "SEARX_HOST") if not searx_host.startswith("http"): - print( + print( # noqa: T201 f"Warning: missing the url scheme on host \ ! assuming secure https://{searx_host} " ) diff --git a/libs/community/langchain_community/vectorstores/analyticdb.py b/libs/community/langchain_community/vectorstores/analyticdb.py index e72383eae4..767a836a06 100644 --- a/libs/community/langchain_community/vectorstores/analyticdb.py +++ b/libs/community/langchain_community/vectorstores/analyticdb.py @@ -347,7 +347,7 @@ class AnalyticDB(VectorStore): conn.execute(chunks_table.delete().where(delete_condition)) return True except Exception as e: - print("Delete operation failed:", str(e)) + print("Delete operation failed:", str(e)) # noqa: T201 return False @classmethod diff --git a/libs/community/langchain_community/vectorstores/jaguar.py b/libs/community/langchain_community/vectorstores/jaguar.py index a42cdf3641..755e7a345f 100644 --- a/libs/community/langchain_community/vectorstores/jaguar.py +++ b/libs/community/langchain_community/vectorstores/jaguar.py @@ -431,7 +431,7 @@ class Jaguar(VectorStore): def prt(self, msg: str) -> None: with open("/tmp/debugjaguar.log", "a") as file: - print(f"msg={msg}", file=file, flush=True) + print(f"msg={msg}", file=file, flush=True) # noqa: T201 def _parseMeta(self, nvmap: dict, filecol: str) -> Tuple[List[str], List[str], str]: filepath = "" diff --git a/libs/community/langchain_community/vectorstores/marqo.py b/libs/community/langchain_community/vectorstores/marqo.py index d9777533ad..a2331bc65d 100644 --- a/libs/community/langchain_community/vectorstores/marqo.py +++ b/libs/community/langchain_community/vectorstores/marqo.py @@ -437,10 +437,10 @@ class Marqo(VectorStore): try: client.create_index(index_name, settings_dict=index_settings or {}) if verbose: - print(f"Created {index_name} successfully.") + print(f"Created {index_name} successfully.") # noqa: T201 except Exception: if verbose: - print(f"Index {index_name} exists.") + print(f"Index {index_name} exists.") # noqa: T201 instance: Marqo = cls( client, diff --git a/libs/community/langchain_community/vectorstores/pgembedding.py b/libs/community/langchain_community/vectorstores/pgembedding.py index cbdf65c6c4..21d24b2f9f 100644 --- a/libs/community/langchain_community/vectorstores/pgembedding.py +++ b/libs/community/langchain_community/vectorstores/pgembedding.py @@ -207,9 +207,9 @@ class PGEmbedding(VectorStore): # Create the HNSW index session.execute(create_index_query) session.commit() - print("HNSW extension and index created successfully.") + print("HNSW extension and index created successfully.") # noqa: T201 except Exception as e: - print(f"Failed to create HNSW extension or index: {e}") + print(f"Failed to create HNSW extension or index: {e}") # noqa: T201 def delete_collection(self) -> None: self.logger.debug("Trying to delete collection") diff --git a/libs/community/langchain_community/vectorstores/semadb.py b/libs/community/langchain_community/vectorstores/semadb.py index 854f045d70..f80c046a22 100644 --- a/libs/community/langchain_community/vectorstores/semadb.py +++ b/libs/community/langchain_community/vectorstores/semadb.py @@ -146,7 +146,7 @@ class SemaDB(VectorStore): headers=self.headers, ) if response.status_code != 200: - print("HERE--", batch) + print("HERE--", batch) # noqa: T201 raise ValueError(f"Error adding points: {response.text}") failed_ranges = response.json()["failedRanges"] if len(failed_ranges) > 0: diff --git a/libs/community/langchain_community/vectorstores/starrocks.py b/libs/community/langchain_community/vectorstores/starrocks.py index 568daf0d4a..807a21f54d 100644 --- a/libs/community/langchain_community/vectorstores/starrocks.py +++ b/libs/community/langchain_community/vectorstores/starrocks.py @@ -38,7 +38,7 @@ def debug_output(s: Any) -> None: s: The message to print """ if DEBUG: - print(s) + print(s) # noqa: T201 def get_named_result(connection: Any, query: str) -> List[dict[str, Any]]: @@ -217,9 +217,11 @@ CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}( for n in transac: n = ",".join( [ - f"'{self.escape_str(str(_n))}'" - if idx != embed_tuple_index - else f"array{str(_n)}" + ( + f"'{self.escape_str(str(_n))}'" + if idx != embed_tuple_index + else f"array{str(_n)}" + ) for (idx, _n) in enumerate(n) ] ) diff --git a/libs/community/langchain_community/vectorstores/vectara.py b/libs/community/langchain_community/vectorstores/vectara.py index 3e0ecd3bfe..edebc3f413 100644 --- a/libs/community/langchain_community/vectorstores/vectara.py +++ b/libs/community/langchain_community/vectorstores/vectara.py @@ -308,7 +308,7 @@ class Vectara(VectorStore): self._delete_doc(doc_id) self._index_doc(doc) elif success_str == "E_NO_PERMISSIONS": - print( + print( # noqa: T201 """No permissions to add document to Vectara. Check your corpus ID, customer ID and API key""" ) @@ -339,9 +339,11 @@ class Vectara(VectorStore): { "query": query, "start": 0, - "numResults": config.mmr_config.mmr_k - if config.mmr_config.is_enabled - else config.k, + "numResults": ( + config.mmr_config.mmr_k + if config.mmr_config.is_enabled + else config.k + ), "contextConfig": { "sentencesBefore": config.n_sentence_context, "sentencesAfter": config.n_sentence_context, diff --git a/libs/community/pyproject.toml b/libs/community/pyproject.toml index 72c3d7a785..ac6b9a3a99 100644 --- a/libs/community/pyproject.toml +++ b/libs/community/pyproject.toml @@ -270,6 +270,7 @@ select = [ "E", # pycodestyle "F", # pyflakes "I", # isort + "T201", # print ] [tool.mypy] diff --git a/libs/community/scripts/check_imports.py b/libs/community/scripts/check_imports.py index 462ab97ae2..825bea5b48 100644 --- a/libs/community/scripts/check_imports.py +++ b/libs/community/scripts/check_imports.py @@ -15,8 +15,8 @@ if __name__ == "__main__": SourceFileLoader(module_name, file).load_module() except Exception: has_failure = True - print(file) + print(file) # noqa: T201 traceback.print_exc() - print() + print() # noqa: T201 sys.exit(1 if has_failure else 0) diff --git a/libs/community/tests/examples/hello_world.py b/libs/community/tests/examples/hello_world.py index 3f0294febb..c98e152dde 100644 --- a/libs/community/tests/examples/hello_world.py +++ b/libs/community/tests/examples/hello_world.py @@ -4,7 +4,7 @@ import sys def main() -> int: - print("Hello World!") + print("Hello World!") # noqa: T201 return 0 diff --git a/libs/community/tests/integration_tests/callbacks/test_openai_callback.py b/libs/community/tests/integration_tests/callbacks/test_openai_callback.py index 5112f4dd84..100021e746 100644 --- a/libs/community/tests/integration_tests/callbacks/test_openai_callback.py +++ b/libs/community/tests/integration_tests/callbacks/test_openai_callback.py @@ -1,4 +1,5 @@ """Integration tests for the langchain tracer module.""" + import asyncio from langchain_community.callbacks import get_openai_callback @@ -62,7 +63,7 @@ def test_openai_callback_agent() -> None: "Who is Olivia Wilde's boyfriend? " "What is his current age raised to the 0.23 power?" ) - print(f"Total Tokens: {cb.total_tokens}") - print(f"Prompt Tokens: {cb.prompt_tokens}") - print(f"Completion Tokens: {cb.completion_tokens}") - print(f"Total Cost (USD): ${cb.total_cost}") + print(f"Total Tokens: {cb.total_tokens}") # noqa: T201 + print(f"Prompt Tokens: {cb.prompt_tokens}") # noqa: T201 + print(f"Completion Tokens: {cb.completion_tokens}") # noqa: T201 + print(f"Total Cost (USD): ${cb.total_cost}") # noqa: T201 diff --git a/libs/community/tests/integration_tests/chat_models/test_baichuan.py b/libs/community/tests/integration_tests/chat_models/test_baichuan.py index 6caec6003e..b1322435f3 100644 --- a/libs/community/tests/integration_tests/chat_models/test_baichuan.py +++ b/libs/community/tests/integration_tests/chat_models/test_baichuan.py @@ -50,7 +50,7 @@ def test_chat_baichuan_with_kwargs() -> None: chat = ChatBaichuan() message = HumanMessage(content="百川192K API是什么时候上线的?") response = chat([message], temperature=0.88, top_p=0.7, with_search_enhance=True) - print(response) + print(response) # noqa: T201 assert isinstance(response, AIMessage) assert isinstance(response.content, str) diff --git a/libs/community/tests/integration_tests/chat_models/test_gpt_router.py b/libs/community/tests/integration_tests/chat_models/test_gpt_router.py index 785ad6ac0a..f6cc3b6cb7 100644 --- a/libs/community/tests/integration_tests/chat_models/test_gpt_router.py +++ b/libs/community/tests/integration_tests/chat_models/test_gpt_router.py @@ -1,4 +1,5 @@ """Test GPTRouter API wrapper.""" + from typing import List import pytest @@ -29,7 +30,7 @@ def test_api_key_masked_when_passed_via_constructor( gpt_router_api_base="https://example.com", gpt_router_api_key="secret-api-key", ) - print(gpt_router.gpt_router_api_key, end="") + print(gpt_router.gpt_router_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/integration_tests/chat_models/test_jinachat.py b/libs/community/tests/integration_tests/chat_models/test_jinachat.py index 0d704ce386..3a62cbc05f 100644 --- a/libs/community/tests/integration_tests/chat_models/test_jinachat.py +++ b/libs/community/tests/integration_tests/chat_models/test_jinachat.py @@ -24,7 +24,7 @@ def test_jinachat_api_key_masked_when_passed_from_env( """Test initialization with an API key provided via an env variable""" monkeypatch.setenv("JINACHAT_API_KEY", "secret-api-key") llm = JinaChat() - print(llm.jinachat_api_key, end="") + print(llm.jinachat_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" @@ -35,7 +35,7 @@ def test_jinachat_api_key_masked_when_passed_via_constructor( ) -> None: """Test initialization with an API key provided via the initializer""" llm = JinaChat(jinachat_api_key="secret-api-key") - print(llm.jinachat_api_key, end="") + print(llm.jinachat_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/integration_tests/chat_models/test_konko.py b/libs/community/tests/integration_tests/chat_models/test_konko.py index 7287d5b11d..79feaba4a3 100644 --- a/libs/community/tests/integration_tests/chat_models/test_konko.py +++ b/libs/community/tests/integration_tests/chat_models/test_konko.py @@ -1,4 +1,5 @@ """Evaluate ChatKonko Interface.""" + from typing import Any, cast import pytest @@ -21,11 +22,11 @@ def test_konko_key_masked_when_passed_from_env( chat = ChatKonko() - print(chat.openai_api_key, end="") + print(chat.openai_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" - print(chat.konko_api_key, end="") + print(chat.konko_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" @@ -36,11 +37,11 @@ def test_konko_key_masked_when_passed_via_constructor( """Test initialization with an API key provided via the initializer""" chat = ChatKonko(openai_api_key="test-openai-key", konko_api_key="test-konko-key") - print(chat.konko_api_key, end="") + print(chat.konko_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" - print(chat.konko_secret_key, end="") # type: ignore[attr-defined] + print(chat.konko_secret_key, end="") # type: ignore[attr-defined] # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/integration_tests/chat_models/test_llama_edge.py b/libs/community/tests/integration_tests/chat_models/test_llama_edge.py index 92e674c0fb..08867d5374 100644 --- a/libs/community/tests/integration_tests/chat_models/test_llama_edge.py +++ b/libs/community/tests/integration_tests/chat_models/test_llama_edge.py @@ -46,7 +46,7 @@ def test_chat_wasm_service_streaming() -> None: output = "" for chunk in chat.stream(messages): - print(chunk.content, end="", flush=True) + print(chunk.content, end="", flush=True) # noqa: T201 output += chunk.content # type: ignore[operator] assert "Paris" in output diff --git a/libs/community/tests/integration_tests/chat_models/test_qianfan_endpoint.py b/libs/community/tests/integration_tests/chat_models/test_qianfan_endpoint.py index caa5ef20eb..afe2811211 100644 --- a/libs/community/tests/integration_tests/chat_models/test_qianfan_endpoint.py +++ b/libs/community/tests/integration_tests/chat_models/test_qianfan_endpoint.py @@ -318,11 +318,11 @@ def test_qianfan_key_masked_when_passed_from_env( monkeypatch.setenv("QIANFAN_SK", "test-secret-key") chat = QianfanChatEndpoint() - print(chat.qianfan_ak, end="") + print(chat.qianfan_ak, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" - print(chat.qianfan_sk, end="") + print(chat.qianfan_sk, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" @@ -335,11 +335,11 @@ def test_qianfan_key_masked_when_passed_via_constructor( qianfan_ak="test-api-key", qianfan_sk="test-secret-key", ) - print(chat.qianfan_ak, end="") + print(chat.qianfan_ak, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" - print(chat.qianfan_sk, end="") + print(chat.qianfan_sk, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/integration_tests/chat_models/test_sparkllm.py b/libs/community/tests/integration_tests/chat_models/test_sparkllm.py index a219b88574..fcb3a7a7f9 100644 --- a/libs/community/tests/integration_tests/chat_models/test_sparkllm.py +++ b/libs/community/tests/integration_tests/chat_models/test_sparkllm.py @@ -22,7 +22,7 @@ def test_chat_spark_llm_with_domain() -> None: chat = ChatSparkLLM(spark_llm_domain="generalv3") message = HumanMessage(content="Hello") response = chat([message]) - print(response) + print(response) # noqa: T201 assert isinstance(response, AIMessage) assert isinstance(response.content, str) @@ -31,6 +31,6 @@ def test_chat_spark_llm_with_temperature() -> None: chat = ChatSparkLLM(temperature=0.9, top_k=2) message = HumanMessage(content="Hello") response = chat([message]) - print(response) + print(response) # noqa: T201 assert isinstance(response, AIMessage) assert isinstance(response.content, str) diff --git a/libs/community/tests/integration_tests/chat_models/test_tongyi.py b/libs/community/tests/integration_tests/chat_models/test_tongyi.py index b01f3d00fd..79cb484b79 100644 --- a/libs/community/tests/integration_tests/chat_models/test_tongyi.py +++ b/libs/community/tests/integration_tests/chat_models/test_tongyi.py @@ -19,7 +19,7 @@ def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: llm = ChatTongyi(dashscope_api_key="secret-api-key") - print(llm.dashscope_api_key, end="") + print(llm.dashscope_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/integration_tests/chat_models/test_vertexai.py b/libs/community/tests/integration_tests/chat_models/test_vertexai.py index b4b29a97d1..5311f46daf 100644 --- a/libs/community/tests/integration_tests/chat_models/test_vertexai.py +++ b/libs/community/tests/integration_tests/chat_models/test_vertexai.py @@ -7,6 +7,7 @@ pip install google-cloud-aiplatform>=1.35.0 Your end-user credentials would be used to make the calls (make sure you've run `gcloud auth login` first). """ + from typing import Optional from unittest.mock import MagicMock, Mock, patch @@ -288,7 +289,7 @@ def test_parse_examples_correct() -> None: def test_parse_examples_failes_wrong_sequence() -> None: with pytest.raises(ValueError) as exc_info: _ = _parse_examples([AIMessage(content="a")]) - print(str(exc_info.value)) + print(str(exc_info.value)) # noqa: T201 assert ( str(exc_info.value) == "Expect examples to have an even amount of messages, got 1." diff --git a/libs/community/tests/integration_tests/document_loaders/parsers/test_language.py b/libs/community/tests/integration_tests/document_loaders/parsers/test_language.py index c28789c7cd..c3deddb9fd 100644 --- a/libs/community/tests/integration_tests/document_loaders/parsers/test_language.py +++ b/libs/community/tests/integration_tests/document_loaders/parsers/test_language.py @@ -29,7 +29,7 @@ def test_language_loader_for_python() -> None: assert ( docs[0].page_content == """def main(): - print("Hello World!") + print("Hello World!") # noqa: T201 return 0""" ) @@ -67,7 +67,7 @@ def esprima_installed() -> bool: return True except Exception as e: - print(f"esprima not installed, skipping test {e}") + print(f"esprima not installed, skipping test {e}") # noqa: T201 return False diff --git a/libs/community/tests/integration_tests/document_loaders/test_arxiv.py b/libs/community/tests/integration_tests/document_loaders/test_arxiv.py index 55ad01b9c4..765290642a 100644 --- a/libs/community/tests/integration_tests/document_loaders/test_arxiv.py +++ b/libs/community/tests/integration_tests/document_loaders/test_arxiv.py @@ -19,8 +19,8 @@ def test_load_success() -> None: docs = loader.load() assert len(docs) == 1 - print(docs[0].metadata) - print(docs[0].page_content) + print(docs[0].metadata) # noqa: T201 + print(docs[0].page_content) # noqa: T201 assert_docs(docs) @@ -53,7 +53,7 @@ def test_load_returns_full_set_of_metadata() -> None: assert set(doc.metadata).issuperset( {"Published", "Title", "Authors", "Summary"} ) - print(doc.metadata) + print(doc.metadata) # noqa: T201 assert len(set(doc.metadata)) > 4 diff --git a/libs/community/tests/integration_tests/document_loaders/test_blockchain.py b/libs/community/tests/integration_tests/document_loaders/test_blockchain.py index 8ea688df58..d101a255cf 100644 --- a/libs/community/tests/integration_tests/document_loaders/test_blockchain.py +++ b/libs/community/tests/integration_tests/document_loaders/test_blockchain.py @@ -21,7 +21,7 @@ def test_get_nfts_valid_contract() -> None: ) result = BlockchainDocumentLoader(contract_address).load() - print("Tokens returned for valid contract: ", len(result)) + print("Tokens returned for valid contract: ", len(result)) # noqa: T201 assert len(result) == max_alchemy_tokens, ( f"Wrong number of NFTs returned. " @@ -43,7 +43,7 @@ def test_get_nfts_with_pagination() -> None: startToken=startToken, ).load() - print("Tokens returned for contract with offset: ", len(result)) + print("Tokens returned for contract with offset: ", len(result)) # noqa: T201 assert len(result) > 0, "No NFTs returned" @@ -57,7 +57,7 @@ def test_get_nfts_polygon() -> None: contract_address, BlockchainType.POLYGON_MAINNET ).load() - print("Tokens returned for contract on Polygon: ", len(result)) + print("Tokens returned for contract on Polygon: ", len(result)) # noqa: T201 assert len(result) > 0, "No NFTs returned" @@ -94,7 +94,7 @@ def test_get_all() -> None: end_time = time.time() - print( + print( # noqa: T201 f"Tokens returned for {contract_address} " f"contract: {len(result)} in {end_time - start_time} seconds" ) @@ -121,4 +121,4 @@ def test_get_all_10sec_timeout() -> None: end_time = time.time() - print("Execution took ", end_time - start_time, " seconds") + print("Execution took ", end_time - start_time, " seconds") # noqa: T201 diff --git a/libs/community/tests/integration_tests/document_loaders/test_couchbase.py b/libs/community/tests/integration_tests/document_loaders/test_couchbase.py index f400867962..de1808b34c 100644 --- a/libs/community/tests/integration_tests/document_loaders/test_couchbase.py +++ b/libs/community/tests/integration_tests/document_loaders/test_couchbase.py @@ -31,11 +31,11 @@ class TestCouchbaseLoader(unittest.TestCase): metadata_fields=self.valid_metadata_fields, ) docs = loader.load() - print(docs) + print(docs) # noqa: T201 assert len(docs) > 0 # assuming the query returns at least one document for doc in docs: - print(doc) + print(doc) # noqa: T201 assert ( doc.page_content != "" ) # assuming that every document has page_content diff --git a/libs/community/tests/integration_tests/document_loaders/test_gitbook.py b/libs/community/tests/integration_tests/document_loaders/test_gitbook.py index fa2a6838bd..151f21be7e 100644 --- a/libs/community/tests/integration_tests/document_loaders/test_gitbook.py +++ b/libs/community/tests/integration_tests/document_loaders/test_gitbook.py @@ -28,7 +28,7 @@ class TestGitbookLoader: loader = GitbookLoader( web_page, load_all_paths=load_all_paths, base_url=base_url ) - print(loader.__dict__) + print(loader.__dict__) # noqa: T201 assert ( loader.base_url == (base_url or web_page)[:-1] if (base_url or web_page).endswith("/") @@ -52,5 +52,5 @@ class TestGitbookLoader: def test_load_multiple_pages(self, web_page: str) -> None: loader = GitbookLoader(web_page, load_all_paths=True) result = loader.load() - print(len(result)) + print(len(result)) # noqa: T201 assert len(result) > 10 diff --git a/libs/community/tests/integration_tests/document_loaders/test_pdf.py b/libs/community/tests/integration_tests/document_loaders/test_pdf.py index 97c005b46d..f9d71ea200 100644 --- a/libs/community/tests/integration_tests/document_loaders/test_pdf.py +++ b/libs/community/tests/integration_tests/document_loaders/test_pdf.py @@ -145,14 +145,14 @@ def test_mathpix_loader() -> None: docs = loader.load() assert len(docs) == 1 - print(docs[0].page_content) + print(docs[0].page_content) # noqa: T201 file_path = Path(__file__).parent.parent / "examples/layout-parser-paper.pdf" loader = MathpixPDFLoader(str(file_path)) docs = loader.load() assert len(docs) == 1 - print(docs[0].page_content) + print(docs[0].page_content) # noqa: T201 @pytest.mark.parametrize( @@ -230,7 +230,7 @@ def test_amazontextract_loader( else: loader = AmazonTextractPDFLoader(file_path, textract_features=features) docs = loader.load() - print(docs) + print(docs) # noqa: T201 assert len(docs) == docs_length diff --git a/libs/community/tests/integration_tests/document_loaders/test_pubmed.py b/libs/community/tests/integration_tests/document_loaders/test_pubmed.py index bea8afabba..48cda7774a 100644 --- a/libs/community/tests/integration_tests/document_loaders/test_pubmed.py +++ b/libs/community/tests/integration_tests/document_loaders/test_pubmed.py @@ -1,4 +1,5 @@ """Integration test for PubMed API Wrapper.""" + from typing import List import pytest @@ -13,7 +14,7 @@ def test_load_success() -> None: """Test that returns the correct answer""" api_client = PubMedLoader(query="chatgpt") docs = api_client.load() - print(docs) + print(docs) # noqa: T201 assert len(docs) == api_client.load_max_docs == 3 assert_docs(docs) @@ -22,7 +23,7 @@ def test_load_success_load_max_docs() -> None: """Test that returns the correct answer""" api_client = PubMedLoader(query="chatgpt", load_max_docs=2) docs = api_client.load() - print(docs) + print(docs) # noqa: T201 assert len(docs) == api_client.load_max_docs == 2 assert_docs(docs) @@ -38,7 +39,7 @@ def test_load_no_content() -> None: """Returns a Document without content.""" api_client = PubMedLoader(query="37548971") docs = api_client.load() - print(docs) + print(docs) # noqa: T201 assert len(docs) > 0 assert docs[0].page_content == "" diff --git a/libs/community/tests/integration_tests/document_loaders/test_telegram.py b/libs/community/tests/integration_tests/document_loaders/test_telegram.py index df90af025f..ceb360e106 100644 --- a/libs/community/tests/integration_tests/document_loaders/test_telegram.py +++ b/libs/community/tests/integration_tests/document_loaders/test_telegram.py @@ -33,7 +33,7 @@ def test_telegram_channel_loader_parsing() -> None: docs = loader.load() assert len(docs) == 1 - print(docs[0].page_content) + print(docs[0].page_content) # noqa: T201 assert docs[0].page_content == ( "Hello, world!.\nLLMs are awesome! Langchain is great. Telegram is the best!." ) diff --git a/libs/community/tests/integration_tests/embeddings/test_xinference.py b/libs/community/tests/integration_tests/embeddings/test_xinference.py index f09fe8fe4f..6c52fa9898 100644 --- a/libs/community/tests/integration_tests/embeddings/test_xinference.py +++ b/libs/community/tests/integration_tests/embeddings/test_xinference.py @@ -1,4 +1,5 @@ """Test Xinference embeddings.""" + import time from typing import AsyncGenerator, Tuple @@ -17,7 +18,7 @@ async def setup() -> AsyncGenerator[Tuple[str, str], None]: pool = await create_worker_actor_pool( f"test://127.0.0.1:{xo.utils.get_next_port()}" ) - print(f"Pool running on localhost:{pool.external_address}") + print(f"Pool running on localhost:{pool.external_address}") # noqa: T201 endpoint = await start_supervisor_components( pool.external_address, "127.0.0.1", xo.utils.get_next_port() diff --git a/libs/community/tests/integration_tests/examples/hello_world.py b/libs/community/tests/integration_tests/examples/hello_world.py index 3f0294febb..c98e152dde 100644 --- a/libs/community/tests/integration_tests/examples/hello_world.py +++ b/libs/community/tests/integration_tests/examples/hello_world.py @@ -4,7 +4,7 @@ import sys def main() -> int: - print("Hello World!") + print("Hello World!") # noqa: T201 return 0 diff --git a/libs/community/tests/integration_tests/llms/test_arcee.py b/libs/community/tests/integration_tests/llms/test_arcee.py index b3b7620cd0..e195a7818d 100644 --- a/libs/community/tests/integration_tests/llms/test_arcee.py +++ b/libs/community/tests/integration_tests/llms/test_arcee.py @@ -41,7 +41,7 @@ def test_api_key_masked_when_passed_via_constructor( arcee_api_url="https://localhost", arcee_api_version="version", ) - print(arcee_without_env_var.arcee_api_key, end="") + print(arcee_without_env_var.arcee_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert "**********" == captured.out @@ -64,7 +64,7 @@ def test_api_key_masked_when_passed_from_env( arcee_api_url="https://localhost", arcee_api_version="version", ) - print(arcee_with_env_var.arcee_api_key, end="") + print(arcee_with_env_var.arcee_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert "**********" == captured.out diff --git a/libs/community/tests/integration_tests/llms/test_aviary.py b/libs/community/tests/integration_tests/llms/test_aviary.py index dba183e6ed..2c8fd96f52 100644 --- a/libs/community/tests/integration_tests/llms/test_aviary.py +++ b/libs/community/tests/integration_tests/llms/test_aviary.py @@ -7,5 +7,5 @@ def test_aviary_call() -> None: """Test valid call to Anyscale.""" llm = Aviary() output = llm("Say bar:") - print(f"llm answer:\n{output}") + print(f"llm answer:\n{output}") # noqa: T201 assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_huggingface_endpoint.py b/libs/community/tests/integration_tests/llms/test_huggingface_endpoint.py index 9bed8b76bd..ca89d54cde 100644 --- a/libs/community/tests/integration_tests/llms/test_huggingface_endpoint.py +++ b/libs/community/tests/integration_tests/llms/test_huggingface_endpoint.py @@ -19,7 +19,7 @@ def test_huggingface_endpoint_text_generation() -> None: endpoint_url="", task="text-generation", model_kwargs={"max_new_tokens": 10} ) output = llm("Say foo:") - print(output) + print(output) # noqa: T201 assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_nlpcloud.py b/libs/community/tests/integration_tests/llms/test_nlpcloud.py index 25aff55f75..5806f4d21a 100644 --- a/libs/community/tests/integration_tests/llms/test_nlpcloud.py +++ b/libs/community/tests/integration_tests/llms/test_nlpcloud.py @@ -37,7 +37,7 @@ def test_nlpcloud_api_key(monkeypatch: MonkeyPatch, capsys: CaptureFixture) -> N assert cast(SecretStr, llm.nlpcloud_api_key).get_secret_value() == "secret-api-key" - print(llm.nlpcloud_api_key, end="") + print(llm.nlpcloud_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/integration_tests/llms/test_octoai_endpoint.py b/libs/community/tests/integration_tests/llms/test_octoai_endpoint.py index 0ad291257a..fcdfc749ba 100644 --- a/libs/community/tests/integration_tests/llms/test_octoai_endpoint.py +++ b/libs/community/tests/integration_tests/llms/test_octoai_endpoint.py @@ -25,7 +25,7 @@ def test_octoai_endpoint_text_generation() -> None: ) output = llm("Which state is Los Angeles in?") - print(output) + print(output) # noqa: T201 assert isinstance(output, str) diff --git a/libs/community/tests/integration_tests/llms/test_petals.py b/libs/community/tests/integration_tests/llms/test_petals.py index 774b56fdef..4fd76753a0 100644 --- a/libs/community/tests/integration_tests/llms/test_petals.py +++ b/libs/community/tests/integration_tests/llms/test_petals.py @@ -15,7 +15,7 @@ def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: llm = Petals(huggingface_api_key="secret-api-key") - print(llm.huggingface_api_key, end="") + print(llm.huggingface_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/integration_tests/llms/test_predibase.py b/libs/community/tests/integration_tests/llms/test_predibase.py index 2f8b923386..5e1d19e084 100644 --- a/libs/community/tests/integration_tests/llms/test_predibase.py +++ b/libs/community/tests/integration_tests/llms/test_predibase.py @@ -13,7 +13,7 @@ def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: llm = Predibase(predibase_api_key="secret-api-key") - print(llm.predibase_api_key, end="") + print(llm.predibase_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/integration_tests/llms/test_tongyi.py b/libs/community/tests/integration_tests/llms/test_tongyi.py index 78a2260455..7e918d87be 100644 --- a/libs/community/tests/integration_tests/llms/test_tongyi.py +++ b/libs/community/tests/integration_tests/llms/test_tongyi.py @@ -1,4 +1,5 @@ """Test Tongyi API wrapper.""" + from langchain_core.outputs import LLMResult from langchain_community.llms.tongyi import Tongyi @@ -23,6 +24,6 @@ def test_tongyi_generate_stream() -> None: """Test valid call to tongyi.""" llm = Tongyi(streaming=True) output = llm.generate(["who are you"]) - print(output) + print(output) # noqa: T201 assert isinstance(output, LLMResult) assert isinstance(output.generations, list) diff --git a/libs/community/tests/integration_tests/llms/test_volcengine_maas.py b/libs/community/tests/integration_tests/llms/test_volcengine_maas.py index 7d3094e627..f6bef52690 100644 --- a/libs/community/tests/integration_tests/llms/test_volcengine_maas.py +++ b/libs/community/tests/integration_tests/llms/test_volcengine_maas.py @@ -28,7 +28,7 @@ def test_api_key_masked_when_passed_via_constructor( volc_engine_maas_ak="secret-volc-ak", volc_engine_maas_sk="secret-volc-sk", ) - print(llm.volc_engine_maas_ak, end="") + print(llm.volc_engine_maas_ak, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/integration_tests/llms/test_xinference.py b/libs/community/tests/integration_tests/llms/test_xinference.py index 227db37c1d..3be912b4a4 100644 --- a/libs/community/tests/integration_tests/llms/test_xinference.py +++ b/libs/community/tests/integration_tests/llms/test_xinference.py @@ -1,4 +1,5 @@ """Test Xinference wrapper.""" + import time from typing import AsyncGenerator, Tuple @@ -17,7 +18,7 @@ async def setup() -> AsyncGenerator[Tuple[str, str], None]: pool = await create_worker_actor_pool( f"test://127.0.0.1:{xo.utils.get_next_port()}" ) - print(f"Pool running on localhost:{pool.external_address}") + print(f"Pool running on localhost:{pool.external_address}") # noqa: T201 endpoint = await start_supervisor_components( pool.external_address, "127.0.0.1", xo.utils.get_next_port() diff --git a/libs/community/tests/integration_tests/utilities/test_arxiv.py b/libs/community/tests/integration_tests/utilities/test_arxiv.py index 73e95b653c..a0799c9c3d 100644 --- a/libs/community/tests/integration_tests/utilities/test_arxiv.py +++ b/libs/community/tests/integration_tests/utilities/test_arxiv.py @@ -1,4 +1,5 @@ """Integration test for Arxiv API Wrapper.""" + from typing import Any, List import pytest @@ -136,7 +137,7 @@ def test_load_returns_full_set_of_metadata() -> None: assert set(doc.metadata).issuperset( {"Published", "Title", "Authors", "Summary"} ) - print(doc.metadata) + print(doc.metadata) # noqa: T201 assert len(set(doc.metadata)) > 4 diff --git a/libs/community/tests/integration_tests/utilities/test_duckduckdgo_search_api.py b/libs/community/tests/integration_tests/utilities/test_duckduckdgo_search_api.py index d8b1a6e165..d8e20b81a2 100644 --- a/libs/community/tests/integration_tests/utilities/test_duckduckdgo_search_api.py +++ b/libs/community/tests/integration_tests/utilities/test_duckduckdgo_search_api.py @@ -12,7 +12,7 @@ def ddg_installed() -> bool: return True except Exception as e: - print(f"duckduckgo not installed, skipping test {e}") + print(f"duckduckgo not installed, skipping test {e}") # noqa: T201 return False @@ -21,7 +21,7 @@ def test_ddg_search_tool() -> None: keywords = "Bella Ciao" tool = DuckDuckGoSearchRun() result = tool(keywords) - print(result) + print(result) # noqa: T201 assert len(result.split()) > 20 @@ -30,5 +30,5 @@ def test_ddg_search_news_tool() -> None: keywords = "Tesla" tool = DuckDuckGoSearchResults(source="news") result = tool(keywords) - print(result) + print(result) # noqa: T201 assert len(result.split()) > 20 diff --git a/libs/community/tests/integration_tests/utilities/test_googlesearch_api.py b/libs/community/tests/integration_tests/utilities/test_googlesearch_api.py index c40e703441..b32d05e76b 100644 --- a/libs/community/tests/integration_tests/utilities/test_googlesearch_api.py +++ b/libs/community/tests/integration_tests/utilities/test_googlesearch_api.py @@ -1,4 +1,5 @@ """Integration test for Google Search API Wrapper.""" + from langchain_community.utilities.google_search import GoogleSearchAPIWrapper @@ -15,7 +16,7 @@ def test_no_result_call() -> None: output = search.run( "NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL" ) - print(type(output)) + print(type(output)) # noqa: T201 assert "No good Google Search Result was found" == output diff --git a/libs/community/tests/integration_tests/utilities/test_powerbi_api.py b/libs/community/tests/integration_tests/utilities/test_powerbi_api.py index 842b3525eb..acc3adca64 100644 --- a/libs/community/tests/integration_tests/utilities/test_powerbi_api.py +++ b/libs/community/tests/integration_tests/utilities/test_powerbi_api.py @@ -1,4 +1,5 @@ """Integration test for POWERBI API Wrapper.""" + import pytest from langchain_core.utils import get_from_env @@ -12,7 +13,7 @@ def azure_installed() -> bool: return True except Exception as e: - print(f"azure not installed, skipping test {e}") + print(f"azure not installed, skipping test {e}") # noqa: T201 return False diff --git a/libs/community/tests/integration_tests/vectorstores/test_analyticdb.py b/libs/community/tests/integration_tests/vectorstores/test_analyticdb.py index 41b40c3b79..e58efcf07a 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_analyticdb.py +++ b/libs/community/tests/integration_tests/vectorstores/test_analyticdb.py @@ -1,4 +1,5 @@ """Test PGVector functionality.""" + import os from typing import List @@ -125,7 +126,7 @@ def test_analyticdb_with_filter_distant_match() -> None: pre_delete_collection=True, ) output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "2"}) - print(output) + print(output) # noqa: T201 assert output == [(Document(page_content="baz", metadata={"page": "2"}), 4.0)] @@ -160,7 +161,7 @@ def test_analyticdb_delete() -> None: pre_delete_collection=True, ) output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "2"}) - print(output) + print(output) # noqa: T201 assert output == [(Document(page_content="baz", metadata={"page": "2"}), 4.0)] docsearch.delete(ids=ids) output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "2"}) diff --git a/libs/community/tests/integration_tests/vectorstores/test_astradb.py b/libs/community/tests/integration_tests/vectorstores/test_astradb.py index f652342e5c..7bc4cbe03f 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_astradb.py +++ b/libs/community/tests/integration_tests/vectorstores/test_astradb.py @@ -74,7 +74,7 @@ class ParserEmbeddings(Embeddings): assert len(vals) == self.dimension return vals except Exception: - print(f'[ParserEmbeddings] Returning a moot vector for "{text}"') + print(f'[ParserEmbeddings] Returning a moot vector for "{text}"') # noqa: T201 return [0.0] * self.dimension async def aembed_query(self, text: str) -> List[float]: @@ -346,7 +346,7 @@ class TestAstraDB: res3 = store_someemb.similarity_search_with_score_id( query="cc", k=1, filter={"k": "c_new"} ) - print(str(res3)) + print(str(res3)) # noqa: T201 doc3, score3, id3 = res3[0] assert doc3.page_content == "cc" assert doc3.metadata == {"k": "c_new", "ord": 102} @@ -404,7 +404,7 @@ class TestAstraDB: res3 = await store_someemb.asimilarity_search_with_score_id( query="cc", k=1, filter={"k": "c_new"} ) - print(str(res3)) + print(str(res3)) # noqa: T201 doc3, score3, id3 = res3[0] assert doc3.page_content == "cc" assert doc3.metadata == {"k": "c_new", "ord": 102} diff --git a/libs/community/tests/integration_tests/vectorstores/test_bigquery_vector_search.py b/libs/community/tests/integration_tests/vectorstores/test_bigquery_vector_search.py index d5146258c5..b88df92498 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_bigquery_vector_search.py +++ b/libs/community/tests/integration_tests/vectorstores/test_bigquery_vector_search.py @@ -79,7 +79,7 @@ class TestBigQueryVectorStore: def test_semantic_search(self, store: BigQueryVectorSearch) -> None: """Test on semantic similarity.""" docs = store.similarity_search("food", k=4) - print(docs) + print(docs) # noqa: T201 kinds = [d.metadata["kind"] for d in docs] assert "fruit" in kinds assert "treat" in kinds diff --git a/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py b/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py index ff13a96dc8..8069952ed6 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py +++ b/libs/community/tests/integration_tests/vectorstores/test_elasticsearch.py @@ -1,4 +1,5 @@ """Test ElasticSearch functionality.""" + import logging import os import re @@ -88,9 +89,9 @@ class TestElasticsearch: for pipeline_id, _ in response.items(): try: es.ingest.delete_pipeline(id=pipeline_id) - print(f"Deleted pipeline: {pipeline_id}") + print(f"Deleted pipeline: {pipeline_id}") # noqa: T201 except Exception as e: - print(f"Pipeline error: {e}") + print(f"Pipeline error: {e}") # noqa: T201 except Exception: pass diff --git a/libs/community/tests/integration_tests/vectorstores/test_hanavector.py b/libs/community/tests/integration_tests/vectorstores/test_hanavector.py index 6e6f5b44a6..fb9ab6ec19 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_hanavector.py +++ b/libs/community/tests/integration_tests/vectorstores/test_hanavector.py @@ -1,4 +1,5 @@ """Test HANA vectorstore functionality.""" + import os import random from typing import List @@ -230,7 +231,7 @@ def test_hanavector_table_with_wrong_typed_columns() -> None: ) exception_occured = False except AttributeError as err: - print(err) + print(err) # noqa: T201 exception_occured = True assert exception_occured diff --git a/libs/community/tests/integration_tests/vectorstores/test_hippo.py b/libs/community/tests/integration_tests/vectorstores/test_hippo.py index 2d8d94c20d..78d62b8e7b 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_hippo.py +++ b/libs/community/tests/integration_tests/vectorstores/test_hippo.py @@ -1,4 +1,5 @@ """Test Hippo functionality.""" + from typing import List, Optional from langchain_core.documents import Document @@ -32,7 +33,7 @@ def test_hippo_add_extra() -> None: docsearch.add_texts(texts, metadatas) output = docsearch.similarity_search("foo", k=1) - print(output) + print(output) # noqa: T201 assert len(output) == 1 diff --git a/libs/community/tests/integration_tests/vectorstores/test_llm_rails.py b/libs/community/tests/integration_tests/vectorstores/test_llm_rails.py index 71c1c73d64..3dbe8d118b 100644 --- a/libs/community/tests/integration_tests/vectorstores/test_llm_rails.py +++ b/libs/community/tests/integration_tests/vectorstores/test_llm_rails.py @@ -23,7 +23,7 @@ def test_llm_rails_add_documents() -> None: # test without filter output1 = docsearch.similarity_search("large language model", k=1) - print(output1) + print(output1) # noqa: T201 assert len(output1) == 1 assert output1[0].page_content == "large language model" diff --git a/libs/community/tests/unit_tests/chat_models/test_azureml_endpoint.py b/libs/community/tests/unit_tests/chat_models/test_azureml_endpoint.py index 0cbe2ab9b1..d5dbb716d2 100644 --- a/libs/community/tests/unit_tests/chat_models/test_azureml_endpoint.py +++ b/libs/community/tests/unit_tests/chat_models/test_azureml_endpoint.py @@ -49,7 +49,7 @@ class TestAzureMLChatOnlineEndpoint: ) -> None: """Test that the API key is masked""" azure_chat = request.getfixturevalue(fixture_name) - print(azure_chat.endpoint_api_key, end="") + print(azure_chat.endpoint_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert ( (str(azure_chat.endpoint_api_key) == "**********") diff --git a/libs/community/tests/unit_tests/chat_models/test_baichuan.py b/libs/community/tests/unit_tests/chat_models/test_baichuan.py index f5664c88ff..f027cbb9c5 100644 --- a/libs/community/tests/unit_tests/chat_models/test_baichuan.py +++ b/libs/community/tests/unit_tests/chat_models/test_baichuan.py @@ -91,7 +91,7 @@ def test_baichuan_key_masked_when_passed_from_env( monkeypatch.setenv("BAICHUAN_API_KEY", "test-api-key") chat = ChatBaichuan() - print(chat.baichuan_api_key, end="") + print(chat.baichuan_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" @@ -101,7 +101,7 @@ def test_baichuan_key_masked_when_passed_via_constructor( ) -> None: """Test initialization with an API key provided via the initializer""" chat = ChatBaichuan(baichuan_api_key="test-api-key") - print(chat.baichuan_api_key, end="") + print(chat.baichuan_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/unit_tests/chat_models/test_fireworks.py b/libs/community/tests/unit_tests/chat_models/test_fireworks.py index fbad2356a4..211721bc08 100644 --- a/libs/community/tests/unit_tests/chat_models/test_fireworks.py +++ b/libs/community/tests/unit_tests/chat_models/test_fireworks.py @@ -1,4 +1,5 @@ """Test Fireworks chat model""" + import sys import pytest @@ -22,7 +23,7 @@ def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: llm = ChatFireworks(fireworks_api_key="secret-api-key") - print(llm.fireworks_api_key, end="") + print(llm.fireworks_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/unit_tests/document_loaders/test_mediawikidump.py b/libs/community/tests/unit_tests/document_loaders/test_mediawikidump.py index 6697df2a91..6bb3ae91e5 100644 --- a/libs/community/tests/unit_tests/document_loaders/test_mediawikidump.py +++ b/libs/community/tests/unit_tests/document_loaders/test_mediawikidump.py @@ -44,5 +44,5 @@ def test_multiple_namespaces() -> None: stop_on_error=False, ) documents = loader.load() - [print(doc) for doc in documents] + [print(doc) for doc in documents] # noqa: T201 assert len(documents) == 2 diff --git a/libs/community/tests/unit_tests/embeddings/test_edenai.py b/libs/community/tests/unit_tests/embeddings/test_edenai.py index 6616f3ef13..ba2e05c72e 100644 --- a/libs/community/tests/unit_tests/embeddings/test_edenai.py +++ b/libs/community/tests/unit_tests/embeddings/test_edenai.py @@ -15,7 +15,7 @@ def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: llm = EdenAiEmbeddings(edenai_api_key="secret-api-key") - print(llm.edenai_api_key, end="") + print(llm.edenai_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/unit_tests/embeddings/test_embaas.py b/libs/community/tests/unit_tests/embeddings/test_embaas.py index 05420ff42f..b62092a2aa 100644 --- a/libs/community/tests/unit_tests/embeddings/test_embaas.py +++ b/libs/community/tests/unit_tests/embeddings/test_embaas.py @@ -15,7 +15,7 @@ def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: llm = EmbaasEmbeddings(embaas_api_key="secret-api-key") - print(llm.embaas_api_key, end="") + print(llm.embaas_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/unit_tests/embeddings/test_llm_rails.py b/libs/community/tests/unit_tests/embeddings/test_llm_rails.py index 05a40c726e..0715efcb4c 100644 --- a/libs/community/tests/unit_tests/embeddings/test_llm_rails.py +++ b/libs/community/tests/unit_tests/embeddings/test_llm_rails.py @@ -15,7 +15,7 @@ def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: llm = LLMRailsEmbeddings(api_key="secret-api-key") - print(llm.api_key, end="") + print(llm.api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/unit_tests/llms/test_ai21.py b/libs/community/tests/unit_tests/llms/test_ai21.py index 60146a973d..4e111fe1ea 100644 --- a/libs/community/tests/unit_tests/llms/test_ai21.py +++ b/libs/community/tests/unit_tests/llms/test_ai21.py @@ -1,4 +1,5 @@ """Test AI21 llm""" + from typing import cast from langchain_core.pydantic_v1 import SecretStr @@ -18,7 +19,7 @@ def test_api_key_masked_when_passed_from_env( """Test initialization with an API key provided via an env variable""" monkeypatch.setenv("AI21_API_KEY", "secret-api-key") llm = AI21() - print(llm.ai21_api_key, end="") + print(llm.ai21_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" @@ -29,7 +30,7 @@ def test_api_key_masked_when_passed_via_constructor( ) -> None: """Test initialization with an API key provided via the initializer""" llm = AI21(ai21_api_key="secret-api-key") - print(llm.ai21_api_key, end="") + print(llm.ai21_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/unit_tests/llms/test_aleph_alpha.py b/libs/community/tests/unit_tests/llms/test_aleph_alpha.py index c08d0e7292..da3ac1f992 100644 --- a/libs/community/tests/unit_tests/llms/test_aleph_alpha.py +++ b/libs/community/tests/unit_tests/llms/test_aleph_alpha.py @@ -18,7 +18,7 @@ def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: llm = AlephAlpha(aleph_alpha_api_key="secret-api-key") - print(llm.aleph_alpha_api_key, end="") + print(llm.aleph_alpha_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" @@ -30,7 +30,7 @@ def test_api_key_masked_when_passed_from_env( ) -> None: monkeypatch.setenv("ALEPH_ALPHA_API_KEY", "secret-api-key") llm = AlephAlpha() - print(llm.aleph_alpha_api_key, end="") + print(llm.aleph_alpha_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/unit_tests/llms/test_anyscale.py b/libs/community/tests/unit_tests/llms/test_anyscale.py index f4f6845942..155c4e9c10 100644 --- a/libs/community/tests/unit_tests/llms/test_anyscale.py +++ b/libs/community/tests/unit_tests/llms/test_anyscale.py @@ -1,4 +1,5 @@ """Test Anyscale llm""" + import pytest from langchain_core.pydantic_v1 import SecretStr from pytest import CaptureFixture, MonkeyPatch @@ -19,7 +20,7 @@ def test_api_key_masked_when_passed_from_env( """Test initialization with an API key provided via an env variable""" monkeypatch.setenv("ANYSCALE_API_KEY", "secret-api-key") llm = Anyscale(anyscale_api_base="test") - print(llm.anyscale_api_key, end="") + print(llm.anyscale_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" @@ -31,7 +32,7 @@ def test_api_key_masked_when_passed_via_constructor( ) -> None: """Test initialization with an API key provided via the initializer""" llm = Anyscale(anyscale_api_key="secret-api-key", anyscale_api_base="test") - print(llm.anyscale_api_key, end="") + print(llm.anyscale_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/unit_tests/llms/test_cerebriumai.py b/libs/community/tests/unit_tests/llms/test_cerebriumai.py index ff9da5745f..7462b612e5 100644 --- a/libs/community/tests/unit_tests/llms/test_cerebriumai.py +++ b/libs/community/tests/unit_tests/llms/test_cerebriumai.py @@ -1,6 +1,5 @@ """Test CerebriumAI llm""" - from langchain_core.pydantic_v1 import SecretStr from pytest import CaptureFixture, MonkeyPatch @@ -14,7 +13,7 @@ def test_api_key_is_secret_string() -> None: def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture) -> None: llm = CerebriumAI(cerebriumai_api_key="secret-api-key") - print(llm.cerebriumai_api_key, end="") + print(llm.cerebriumai_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" @@ -26,7 +25,7 @@ def test_api_key_masked_when_passed_from_env( ) -> None: monkeypatch.setenv("CEREBRIUMAI_API_KEY", "secret-api-key") llm = CerebriumAI() - print(llm.cerebriumai_api_key, end="") + print(llm.cerebriumai_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/unit_tests/llms/test_fireworks.py b/libs/community/tests/unit_tests/llms/test_fireworks.py index 671016f570..1487fdc0fb 100644 --- a/libs/community/tests/unit_tests/llms/test_fireworks.py +++ b/libs/community/tests/unit_tests/llms/test_fireworks.py @@ -1,4 +1,5 @@ """Test Fireworks chat model""" + import sys import pytest @@ -22,7 +23,7 @@ def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: llm = Fireworks(fireworks_api_key="secret-api-key") - print(llm.fireworks_api_key, end="") + print(llm.fireworks_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/unit_tests/llms/test_forefrontai.py b/libs/community/tests/unit_tests/llms/test_forefrontai.py index 567b78180c..e9cc97c0c5 100644 --- a/libs/community/tests/unit_tests/llms/test_forefrontai.py +++ b/libs/community/tests/unit_tests/llms/test_forefrontai.py @@ -1,4 +1,5 @@ """Test ForeFrontAI LLM""" + from typing import cast from langchain_core.pydantic_v1 import SecretStr @@ -19,7 +20,7 @@ def test_forefrontai_api_key_masked_when_passed_from_env( """Test that the API key is masked when passed from an environment variable.""" monkeypatch.setenv("FOREFRONTAI_API_KEY", "secret-api-key") llm = ForefrontAI(temperature=0.2) - print(llm.forefrontai_api_key, end="") + print(llm.forefrontai_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" @@ -33,7 +34,7 @@ def test_forefrontai_api_key_masked_when_passed_via_constructor( forefrontai_api_key="secret-api-key", temperature=0.2, ) - print(llm.forefrontai_api_key, end="") + print(llm.forefrontai_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/unit_tests/llms/test_minimax.py b/libs/community/tests/unit_tests/llms/test_minimax.py index 24e7fa972c..f92a140239 100644 --- a/libs/community/tests/unit_tests/llms/test_minimax.py +++ b/libs/community/tests/unit_tests/llms/test_minimax.py @@ -1,4 +1,5 @@ """Test Minimax llm""" + from typing import cast from langchain_core.pydantic_v1 import SecretStr @@ -19,7 +20,7 @@ def test_api_key_masked_when_passed_from_env( monkeypatch.setenv("MINIMAX_API_KEY", "secret-api-key") monkeypatch.setenv("MINIMAX_GROUP_ID", "group_id") llm = Minimax() - print(llm.minimax_api_key, end="") + print(llm.minimax_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" @@ -30,7 +31,7 @@ def test_api_key_masked_when_passed_via_constructor( ) -> None: """Test initialization with an API key provided via the initializer""" llm = Minimax(minimax_api_key="secret-api-key", minimax_group_id="group_id") - print(llm.minimax_api_key, end="") + print(llm.minimax_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/unit_tests/llms/test_pipelineai.py b/libs/community/tests/unit_tests/llms/test_pipelineai.py index 63f750d0b2..afa91752ab 100644 --- a/libs/community/tests/unit_tests/llms/test_pipelineai.py +++ b/libs/community/tests/unit_tests/llms/test_pipelineai.py @@ -13,7 +13,7 @@ def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: llm = PipelineAI(pipeline_api_key="secret-api-key") - print(llm.pipeline_api_key, end="") + print(llm.pipeline_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/unit_tests/llms/test_stochasticai.py b/libs/community/tests/unit_tests/llms/test_stochasticai.py index bf3e428c6b..1fa0cfd243 100644 --- a/libs/community/tests/unit_tests/llms/test_stochasticai.py +++ b/libs/community/tests/unit_tests/llms/test_stochasticai.py @@ -13,7 +13,7 @@ def test_api_key_masked_when_passed_via_constructor( capsys: CaptureFixture, ) -> None: llm = StochasticAI(stochasticai_api_key="secret-api-key") - print(llm.stochasticai_api_key, end="") + print(llm.stochasticai_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/unit_tests/llms/test_symblai_nebula.py b/libs/community/tests/unit_tests/llms/test_symblai_nebula.py index fb75a6ad69..92ec7484ef 100644 --- a/libs/community/tests/unit_tests/llms/test_symblai_nebula.py +++ b/libs/community/tests/unit_tests/llms/test_symblai_nebula.py @@ -17,13 +17,13 @@ def test_api_key_masked_when_passed_from_env( ) -> None: monkeypatch.setenv("NEBULA_API_KEY", "secret-api-key") llm = Nebula() - print(llm.nebula_api_key, end="") + print(llm.nebula_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture) -> None: llm = Nebula(nebula_api_key="secret-api-key") - print(llm.nebula_api_key, end="") + print(llm.nebula_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/community/tests/unit_tests/llms/test_together.py b/libs/community/tests/unit_tests/llms/test_together.py index 772cde4050..00da416b04 100644 --- a/libs/community/tests/unit_tests/llms/test_together.py +++ b/libs/community/tests/unit_tests/llms/test_together.py @@ -1,4 +1,5 @@ """Test Together LLM""" + from typing import cast from langchain_core.pydantic_v1 import SecretStr @@ -28,7 +29,7 @@ def test_together_api_key_masked_when_passed_from_env( temperature=0.2, max_tokens=250, ) - print(llm.together_api_key, end="") + print(llm.together_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" @@ -44,7 +45,7 @@ def test_together_api_key_masked_when_passed_via_constructor( temperature=0.2, max_tokens=250, ) - print(llm.together_api_key, end="") + print(llm.together_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/core/langchain_core/callbacks/stdout.py b/libs/core/langchain_core/callbacks/stdout.py index f0755a2b1e..e129792a01 100644 --- a/libs/core/langchain_core/callbacks/stdout.py +++ b/libs/core/langchain_core/callbacks/stdout.py @@ -1,4 +1,5 @@ """Callback Handler that prints to std out.""" + from __future__ import annotations from typing import TYPE_CHECKING, Any, Dict, Optional @@ -22,11 +23,11 @@ class StdOutCallbackHandler(BaseCallbackHandler): ) -> None: """Print out that we are entering a chain.""" class_name = serialized.get("name", serialized.get("id", [""])[-1]) - print(f"\n\n\033[1m> Entering new {class_name} chain...\033[0m") + print(f"\n\n\033[1m> Entering new {class_name} chain...\033[0m") # noqa: T201 def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Print out that we finished a chain.""" - print("\n\033[1m> Finished chain.\033[0m") + print("\n\033[1m> Finished chain.\033[0m") # noqa: T201 def on_agent_action( self, action: AgentAction, color: Optional[str] = None, **kwargs: Any diff --git a/libs/core/langchain_core/messages/base.py b/libs/core/langchain_core/messages/base.py index a4af8e558f..42bd5e0869 100644 --- a/libs/core/langchain_core/messages/base.py +++ b/libs/core/langchain_core/messages/base.py @@ -56,7 +56,7 @@ class BaseMessage(Serializable): return f"{title}\n\n{self.content}" def pretty_print(self) -> None: - print(self.pretty_repr(html=is_interactive_env())) + print(self.pretty_repr(html=is_interactive_env())) # noqa: T201 def merge_content( diff --git a/libs/core/langchain_core/prompts/chat.py b/libs/core/langchain_core/prompts/chat.py index 605470c023..86f261b51f 100644 --- a/libs/core/langchain_core/prompts/chat.py +++ b/libs/core/langchain_core/prompts/chat.py @@ -1,4 +1,5 @@ """Chat prompt template.""" + from __future__ import annotations from abc import ABC, abstractmethod @@ -79,7 +80,7 @@ class BaseMessagePromptTemplate(Serializable, ABC): raise NotImplementedError def pretty_print(self) -> None: - print(self.pretty_repr(html=is_interactive_env())) + print(self.pretty_repr(html=is_interactive_env())) # noqa: T201 def __add__(self, other: Any) -> ChatPromptTemplate: """Combine two prompt templates. @@ -543,7 +544,7 @@ class BaseChatPromptTemplate(BasePromptTemplate, ABC): raise NotImplementedError def pretty_print(self) -> None: - print(self.pretty_repr(html=is_interactive_env())) + print(self.pretty_repr(html=is_interactive_env())) # noqa: T201 MessageLike = Union[BaseMessagePromptTemplate, BaseMessage, BaseChatPromptTemplate] diff --git a/libs/core/langchain_core/prompts/few_shot.py b/libs/core/langchain_core/prompts/few_shot.py index 8be9d0efb6..e03513fb86 100644 --- a/libs/core/langchain_core/prompts/few_shot.py +++ b/libs/core/langchain_core/prompts/few_shot.py @@ -1,4 +1,5 @@ """Prompt template that contains few shot examples.""" + from __future__ import annotations from pathlib import Path @@ -277,7 +278,7 @@ class FewShotChatMessagePromptTemplate( + HumanMessagePromptTemplate.from_template("{input}") ) # Show the prompt - print(final_prompt.format_messages(input="What's 3+3?")) + print(final_prompt.format_messages(input="What's 3+3?")) # noqa: T201 # Use within an LLM from langchain_core.chat_models import ChatAnthropic diff --git a/libs/core/langchain_core/prompts/string.py b/libs/core/langchain_core/prompts/string.py index 0923da9fbe..d95a3ce9d2 100644 --- a/libs/core/langchain_core/prompts/string.py +++ b/libs/core/langchain_core/prompts/string.py @@ -1,4 +1,5 @@ """BasePrompt schema definition.""" + from __future__ import annotations import warnings @@ -174,4 +175,4 @@ class StringPromptTemplate(BasePromptTemplate, ABC): return self.format(**dummy_vars) def pretty_print(self) -> None: - print(self.pretty_repr(html=is_interactive_env())) + print(self.pretty_repr(html=is_interactive_env())) # noqa: T201 diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py index 783d8bbbf9..4e4a79a50c 100644 --- a/libs/core/langchain_core/runnables/base.py +++ b/libs/core/langchain_core/runnables/base.py @@ -186,7 +186,7 @@ class Runnable(Generic[Input, Output], ABC): def buggy_double(y: int) -> int: '''Buggy code that will fail 70% of the time''' if random.random() > 0.3: - print('This code failed, and will probably be retried!') + print('This code failed, and will probably be retried!') # noqa: T201 raise ValueError('Triggered buggy code') return y * 2 @@ -1826,8 +1826,8 @@ class RunnableSequence(RunnableSerializable[Input, Output]): chain = prompt | model | SimpleJsonOutputParser() async for chunk in chain.astream({'topic': 'colors'}): - print('-') - print(chunk, sep='', flush=True) + print('-') # noqa: T201 + print(chunk, sep='', flush=True) # noqa: T201 """ # The steps are broken into first, middle and last, solely for type checking @@ -2527,7 +2527,7 @@ class RunnableParallel(RunnableSerializable[Input, Dict[str, Any]]): for chunk in runnable.stream({"topic": "bear"}): for key in chunk: output[key] = output[key] + chunk[key].content - print(output) + print(output) # noqa: T201 """ steps: Mapping[str, Runnable[Input, Any]] @@ -3862,7 +3862,7 @@ class RunnableEach(RunnableEachBase[Input, Output]): output = runnable_each.invoke([{'topic':'Computer Science'}, {'topic':'Art'}, {'topic':'Biology'}]) - print(output) + print(output) # noqa: T201 """ @classmethod @@ -4323,12 +4323,12 @@ class RunnableBinding(RunnableBindingBase[Input, Output]): bound=self.bound, kwargs=self.kwargs, config=self.config, - custom_input_type=input_type - if input_type is not None - else self.custom_input_type, - custom_output_type=output_type - if output_type is not None - else self.custom_output_type, + custom_input_type=( + input_type if input_type is not None else self.custom_input_type + ), + custom_output_type=( + output_type if output_type is not None else self.custom_output_type + ), ) def with_retry(self, **kwargs: Any) -> Runnable[Input, Output]: diff --git a/libs/core/langchain_core/runnables/graph.py b/libs/core/langchain_core/runnables/graph.py index f6b6b8b3ba..26a4fbd0aa 100644 --- a/libs/core/langchain_core/runnables/graph.py +++ b/libs/core/langchain_core/runnables/graph.py @@ -143,4 +143,4 @@ class Graph: ) def print_ascii(self) -> None: - print(self.draw_ascii()) + print(self.draw_ascii()) # noqa: T201 diff --git a/libs/core/langchain_core/runnables/history.py b/libs/core/langchain_core/runnables/history.py index 2134f2f88f..f9b68cd68e 100644 --- a/libs/core/langchain_core/runnables/history.py +++ b/libs/core/langchain_core/runnables/history.py @@ -115,7 +115,7 @@ class RunnableWithMessageHistory(RunnableBindingBase): history = get_by_session_id("1") history.add_message(AIMessage(content="hello")) - print(store) + print(store) # noqa: T201 Example where the wrapped Runnable takes a dictionary input: @@ -146,20 +146,20 @@ class RunnableWithMessageHistory(RunnableBindingBase): history_messages_key="history", ) - print(chain_with_history.invoke( + print(chain_with_history.invoke( # noqa: T201 {"ability": "math", "question": "What does cosine mean?"}, config={"configurable": {"session_id": "foo"}} )) # Uses the store defined in the example above. - print(store) + print(store) # noqa: T201 - print(chain_with_history.invoke( + print(chain_with_history.invoke( # noqa: T201 {"ability": "math", "question": "What's its inverse"}, config={"configurable": {"session_id": "foo"}} )) - print(store) + print(store) # noqa: T201 Example where the session factory takes two keys, user_id and conversation id): diff --git a/libs/core/langchain_core/sys_info.py b/libs/core/langchain_core/sys_info.py index bdda43f3f4..2cbdcacf95 100644 --- a/libs/core/langchain_core/sys_info.py +++ b/libs/core/langchain_core/sys_info.py @@ -1,4 +1,5 @@ """Print information about the system and langchain packages for debugging purposes.""" + from typing import Sequence @@ -37,17 +38,17 @@ def print_sys_info(*, additional_pkgs: Sequence[str] = tuple()) -> None: "OS Version": platform.version(), "Python Version": sys.version, } - print() - print("System Information") - print("------------------") - print("> OS: ", system_info["OS"]) - print("> OS Version: ", system_info["OS Version"]) - print("> Python Version: ", system_info["Python Version"]) + print() # noqa: T201 + print("System Information") # noqa: T201 + print("------------------") # noqa: T201 + print("> OS: ", system_info["OS"]) # noqa: T201 + print("> OS Version: ", system_info["OS Version"]) # noqa: T201 + print("> Python Version: ", system_info["Python Version"]) # noqa: T201 # Print out only langchain packages - print() - print("Package Information") - print("-------------------") + print() # noqa: T201 + print("Package Information") # noqa: T201 + print("-------------------") # noqa: T201 not_installed = [] @@ -68,18 +69,18 @@ def print_sys_info(*, additional_pkgs: Sequence[str] = tuple()) -> None: # Print package with version if package_version is not None: - print(f"> {pkg}: {package_version}") + print(f"> {pkg}: {package_version}") # noqa: T201 else: - print(f"> {pkg}: Installed. No version info available.") + print(f"> {pkg}: Installed. No version info available.") # noqa: T201 if not_installed: - print() - print("Packages not installed (Not Necessarily a Problem)") - print("--------------------------------------------------") - print("The following packages were not found:") - print() + print() # noqa: T201 + print("Packages not installed (Not Necessarily a Problem)") # noqa: T201 + print("--------------------------------------------------") # noqa: T201 + print("The following packages were not found:") # noqa: T201 + print() # noqa: T201 for pkg in not_installed: - print(f"> {pkg}") + print(f"> {pkg}") # noqa: T201 if __name__ == "__main__": diff --git a/libs/core/langchain_core/utils/input.py b/libs/core/langchain_core/utils/input.py index 8d5ae6cc24..beb8b653d1 100644 --- a/libs/core/langchain_core/utils/input.py +++ b/libs/core/langchain_core/utils/input.py @@ -1,4 +1,5 @@ """Handle chained inputs.""" + from typing import Dict, List, Optional, TextIO _TEXT_COLOR_MAPPING = { @@ -37,6 +38,6 @@ def print_text( ) -> None: """Print text with highlighting and no end characters.""" text_to_print = get_colored_text(text, color) if color else text - print(text_to_print, end=end, file=file) + print(text_to_print, end=end, file=file) # noqa: T201 if file: file.flush() # ensure all printed content are written to file diff --git a/libs/core/pyproject.toml b/libs/core/pyproject.toml index b19c371f52..fc082a9806 100644 --- a/libs/core/pyproject.toml +++ b/libs/core/pyproject.toml @@ -71,6 +71,7 @@ select = [ "E", # pycodestyle "F", # pyflakes "I", # isort + "T201", # print ] [tool.mypy] diff --git a/libs/core/scripts/check_imports.py b/libs/core/scripts/check_imports.py index 462ab97ae2..825bea5b48 100644 --- a/libs/core/scripts/check_imports.py +++ b/libs/core/scripts/check_imports.py @@ -15,8 +15,8 @@ if __name__ == "__main__": SourceFileLoader(module_name, file).load_module() except Exception: has_failure = True - print(file) + print(file) # noqa: T201 traceback.print_exc() - print() + print() # noqa: T201 sys.exit(1 if has_failure else 0) diff --git a/libs/experimental/langchain_experimental/autonomous_agents/autogpt/agent.py b/libs/experimental/langchain_experimental/autonomous_agents/autogpt/agent.py index 1e5e8933e0..5fd94b0c91 100644 --- a/libs/experimental/langchain_experimental/autonomous_agents/autogpt/agent.py +++ b/libs/experimental/langchain_experimental/autonomous_agents/autogpt/agent.py @@ -98,7 +98,7 @@ class AutoGPT: ) # Print Assistant thoughts - print(assistant_reply) + print(assistant_reply) # noqa: T201 self.chat_history_memory.add_message(HumanMessage(content=user_input)) self.chat_history_memory.add_message(AIMessage(content=assistant_reply)) @@ -135,7 +135,7 @@ class AutoGPT: if self.feedback_tool is not None: feedback = f"\n{self.feedback_tool.run('Input: ')}" if feedback in {"q", "stop"}: - print("EXITING") + print("EXITING") # noqa: T201 return "EXITING" memory_to_add += feedback diff --git a/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/baby_agi.py b/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/baby_agi.py index 13caa93705..6aa662892f 100644 --- a/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/baby_agi.py +++ b/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/baby_agi.py @@ -1,4 +1,5 @@ """BabyAGI agent.""" + from collections import deque from typing import Any, Dict, List, Optional @@ -59,17 +60,17 @@ class BabyAGI(Chain, BaseModel): # type: ignore[misc] self.task_list.append(task) def print_task_list(self) -> None: - print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m") + print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m") # noqa: T201 for t in self.task_list: - print(str(t["task_id"]) + ": " + t["task_name"]) + print(str(t["task_id"]) + ": " + t["task_name"]) # noqa: T201 def print_next_task(self, task: Dict) -> None: - print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m") - print(str(task["task_id"]) + ": " + task["task_name"]) + print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m") # noqa: T201 + print(str(task["task_id"]) + ": " + task["task_name"]) # noqa: T201 def print_task_result(self, result: str) -> None: - print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m") - print(result) + print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m") # noqa: T201 + print(result) # noqa: T201 @property def input_keys(self) -> List[str]: @@ -190,7 +191,7 @@ class BabyAGI(Chain, BaseModel): # type: ignore[misc] ) num_iters += 1 if self.max_iterations is not None and num_iters == self.max_iterations: - print( + print( # noqa: T201 "\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m" ) break diff --git a/libs/experimental/langchain_experimental/autonomous_agents/hugginggpt/task_executor.py b/libs/experimental/langchain_experimental/autonomous_agents/hugginggpt/task_executor.py index bf742d192b..c6a9be93a5 100644 --- a/libs/experimental/langchain_experimental/autonomous_agents/hugginggpt/task_executor.py +++ b/libs/experimental/langchain_experimental/autonomous_agents/hugginggpt/task_executor.py @@ -117,7 +117,7 @@ class TaskExecutor: def run(self) -> str: for task in self.tasks: - print(f"running {task}") + print(f"running {task}") # noqa: T201 if task.pending() and self.check_dependency(task): self.update_args(task) task.run() diff --git a/libs/experimental/pyproject.toml b/libs/experimental/pyproject.toml index d79488b2dc..b3386cc600 100644 --- a/libs/experimental/pyproject.toml +++ b/libs/experimental/pyproject.toml @@ -86,6 +86,7 @@ select = [ "E", # pycodestyle "F", # pyflakes "I", # isort + "T201", # print ] [tool.mypy] diff --git a/libs/experimental/scripts/check_imports.py b/libs/experimental/scripts/check_imports.py index 462ab97ae2..825bea5b48 100644 --- a/libs/experimental/scripts/check_imports.py +++ b/libs/experimental/scripts/check_imports.py @@ -15,8 +15,8 @@ if __name__ == "__main__": SourceFileLoader(module_name, file).load_module() except Exception: has_failure = True - print(file) + print(file) # noqa: T201 traceback.print_exc() - print() + print() # noqa: T201 sys.exit(1 if has_failure else 0) diff --git a/libs/experimental/tests/integration_tests/chains/test_cpal.py b/libs/experimental/tests/integration_tests/chains/test_cpal.py index 398350927c..5df9cd80b6 100644 --- a/libs/experimental/tests/integration_tests/chains/test_cpal.py +++ b/libs/experimental/tests/integration_tests/chains/test_cpal.py @@ -516,7 +516,7 @@ class TestCPALChain_MathWordProblems(unittest.TestCase): llm = OpenAI(temperature=0, max_tokens=512) cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True) with pytest.raises(Exception) as e_info: - print(e_info) + print(e_info) # noqa: T201 cpal_chain.run(narrative_input) def test_causal_mediator(self) -> None: diff --git a/libs/experimental/tests/unit_tests/python/test_python_1.py b/libs/experimental/tests/unit_tests/python/test_python_1.py index 46d92c29e6..4c961d8437 100644 --- a/libs/experimental/tests/unit_tests/python/test_python_1.py +++ b/libs/experimental/tests/unit_tests/python/test_python_1.py @@ -1,4 +1,5 @@ """Test functionality of Python REPL.""" + import sys import pytest @@ -9,7 +10,7 @@ from langchain_experimental.utilities.python import PythonREPL _SAMPLE_CODE = """ ``` def multiply(): - print(5*6) + print(5*6) # noqa: T201 multiply() ``` """ diff --git a/libs/experimental/tests/unit_tests/python/test_python_2.py b/libs/experimental/tests/unit_tests/python/test_python_2.py index 9a7f2c8d04..65fc8f62cb 100644 --- a/libs/experimental/tests/unit_tests/python/test_python_2.py +++ b/libs/experimental/tests/unit_tests/python/test_python_2.py @@ -1,4 +1,5 @@ """Test Python REPL Tools.""" + import sys import numpy as np @@ -24,7 +25,7 @@ import numpy as np v1 = np.array([1, 2, 3]) v2 = np.array([4, 5, 6]) dot_product = np.dot(v1, v2) -print("The dot product is {:d}.".format(dot_product)) +print("The dot product is {:d}.".format(dot_product)) # noqa: T201 """ tool = PythonREPLTool() assert tool.run(program) == "The dot product is 32.\n" @@ -75,9 +76,9 @@ def test_python_ast_repl_print() -> None: program = """python string = "racecar" if string == string[::-1]: - print(string, "is a palindrome") + print(string, "is a palindrome") # noqa: T201 else: - print(string, "is not a palindrome")""" + print(string, "is not a palindrome")""" # noqa: T201 tool = PythonAstREPLTool() assert tool.run(program) == "racecar is a palindrome\n" diff --git a/libs/experimental/tests/unit_tests/test_llm_bash.py b/libs/experimental/tests/unit_tests/test_llm_bash.py index 5f7dc3fb85..1adbd999fa 100644 --- a/libs/experimental/tests/unit_tests/test_llm_bash.py +++ b/libs/experimental/tests/unit_tests/test_llm_bash.py @@ -1,4 +1,5 @@ """Test LLM Bash functionality.""" + import sys import pytest @@ -85,7 +86,7 @@ ls && pwd && ls ``` ```python -print("hello") +print("hello") # noqa: T201 ``` ```bash diff --git a/libs/langchain/langchain/chains/llm_summarization_checker/base.py b/libs/langchain/langchain/chains/llm_summarization_checker/base.py index b5d1c1d504..f63e383bbc 100644 --- a/libs/langchain/langchain/chains/llm_summarization_checker/base.py +++ b/libs/langchain/langchain/chains/llm_summarization_checker/base.py @@ -157,7 +157,7 @@ class LLMSummarizationCheckerChain(Chain): break if self.verbose: - print(output["revised_summary"]) + print(output["revised_summary"]) # noqa: T201 chain_input = output["revised_summary"] diff --git a/libs/langchain/langchain/chains/natbot/crawler.py b/libs/langchain/langchain/chains/natbot/crawler.py index 8aa35d9525..b5fbc55335 100644 --- a/libs/langchain/langchain/chains/natbot/crawler.py +++ b/libs/langchain/langchain/chains/natbot/crawler.py @@ -114,7 +114,7 @@ class Crawler: self.page.mouse.click(x, y) else: - print("Could not find element") + print("Could not find element") # noqa: T201 def type(self, id: Union[str, int], text: str) -> None: self.click(id) @@ -442,5 +442,5 @@ class Crawler: ) id_counter += 1 - print("Parsing time: {:0.2f} seconds".format(time.time() - start)) + print("Parsing time: {:0.2f} seconds".format(time.time() - start)) # noqa: T201 return elements_of_interest diff --git a/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py b/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py index 532d67b0a7..89b389340e 100644 --- a/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py +++ b/libs/langchain/langchain/evaluation/agents/trajectory_eval_chain.py @@ -139,7 +139,7 @@ class TrajectoryEvalChain(AgentTrajectoryEvaluator, LLMEvalChain): prediction=response["output"], reference="Paris", ) - print(result["score"]) + print(result["score"]) # noqa: T201 # 0 """ # noqa: E501 diff --git a/libs/langchain/langchain/model_laboratory.py b/libs/langchain/langchain/model_laboratory.py index c012423e37..114994535d 100644 --- a/libs/langchain/langchain/model_laboratory.py +++ b/libs/langchain/langchain/model_laboratory.py @@ -1,4 +1,5 @@ """Experiment with different models.""" + from __future__ import annotations from typing import List, Optional, Sequence @@ -72,7 +73,7 @@ class ModelLaboratory: Args: text: input text to run all models on. """ - print(f"\033[1mInput:\033[0m\n{text}\n") + print(f"\033[1mInput:\033[0m\n{text}\n") # noqa: T201 for i, chain in enumerate(self.chains): if self.names is not None: name = self.names[i] diff --git a/libs/langchain/langchain/output_parsers/structured.py b/libs/langchain/langchain/output_parsers/structured.py index 1d99363bc1..62847e1dd1 100644 --- a/libs/langchain/langchain/output_parsers/structured.py +++ b/libs/langchain/langchain/output_parsers/structured.py @@ -67,7 +67,7 @@ class StructuredOutputParser(BaseOutputParser): parser = StructuredOutputParser.from_response_schemas(response_schemas) - print(parser.get_format_instructions()) + print(parser.get_format_instructions()) # noqa: T201 output: # The output should be a Markdown code snippet formatted in the following diff --git a/libs/langchain/langchain/retrievers/self_query/myscale.py b/libs/langchain/langchain/retrievers/self_query/myscale.py index a951272471..8b1afaf8b9 100644 --- a/libs/langchain/langchain/retrievers/self_query/myscale.py +++ b/libs/langchain/langchain/retrievers/self_query/myscale.py @@ -117,7 +117,7 @@ class MyScaleTranslator(Visitor): def visit_structured_query( self, structured_query: StructuredQuery ) -> Tuple[str, dict]: - print(structured_query) + print(structured_query) # noqa: T201 if structured_query.filter is None: kwargs = {} else: diff --git a/libs/langchain/langchain/smith/evaluation/progress.py b/libs/langchain/langchain/smith/evaluation/progress.py index bf25bec3fb..15ff4cc599 100644 --- a/libs/langchain/langchain/smith/evaluation/progress.py +++ b/libs/langchain/langchain/smith/evaluation/progress.py @@ -1,4 +1,5 @@ """A simple progress bar for the console.""" + import threading from typing import Any, Dict, Optional, Sequence from uuid import UUID @@ -36,7 +37,7 @@ class ProgressBarCallback(base_callbacks.BaseCallbackHandler): progress = self.counter / self.total arrow = "-" * int(round(progress * self.ncols) - 1) + ">" spaces = " " * (self.ncols - len(arrow)) - print(f"\r[{arrow + spaces}] {self.counter}/{self.total}", end="") + print(f"\r[{arrow + spaces}] {self.counter}/{self.total}", end="") # noqa: T201 def on_chain_error( self, diff --git a/libs/langchain/langchain/smith/evaluation/runner_utils.py b/libs/langchain/langchain/smith/evaluation/runner_utils.py index feb199e8d0..d55aabae40 100644 --- a/libs/langchain/langchain/smith/evaluation/runner_utils.py +++ b/libs/langchain/langchain/smith/evaluation/runner_utils.py @@ -999,7 +999,7 @@ run_on_dataset( f"\n\n{example_msg}" ) comparison_url = dataset.url + f"/compare?selectedSessions={project.id}" - print( + print( # noqa: T201 f"View the evaluation results for project '{project_name}'" f" at:\n{comparison_url}\n\n" f"View all tests for Dataset {dataset_name} at:\n{dataset.url}", @@ -1190,8 +1190,8 @@ def _display_aggregate_results(aggregate_results: pd.DataFrame) -> None: formatted_string = aggregate_results.to_string( float_format=lambda x: f"{x:.2f}", justify="right" ) - print("\n Experiment Results:") - print(formatted_string) + print("\n Experiment Results:") # noqa: T201 + print(formatted_string) # noqa: T201 _INPUT_MAPPER_DEP_WARNING = ( diff --git a/libs/langchain/langchain/storage/file_system.py b/libs/langchain/langchain/storage/file_system.py index e737309a18..bd0aca4875 100644 --- a/libs/langchain/langchain/storage/file_system.py +++ b/libs/langchain/langchain/storage/file_system.py @@ -32,7 +32,7 @@ class LocalFileStore(ByteStore): # Iterate over keys for key in file_store.yield_keys(): - print(key) + print(key) # noqa: T201 """ diff --git a/libs/langchain/pyproject.toml b/libs/langchain/pyproject.toml index 193ca3ece6..6e3722d5eb 100644 --- a/libs/langchain/pyproject.toml +++ b/libs/langchain/pyproject.toml @@ -310,6 +310,7 @@ select = [ "E", # pycodestyle "F", # pyflakes "I", # isort + "T201", # print ] [tool.mypy] diff --git a/libs/langchain/scripts/check_imports.py b/libs/langchain/scripts/check_imports.py index 462ab97ae2..825bea5b48 100644 --- a/libs/langchain/scripts/check_imports.py +++ b/libs/langchain/scripts/check_imports.py @@ -15,8 +15,8 @@ if __name__ == "__main__": SourceFileLoader(module_name, file).load_module() except Exception: has_failure = True - print(file) + print(file) # noqa: T201 traceback.print_exc() - print() + print() # noqa: T201 sys.exit(1 if has_failure else 0) diff --git a/libs/langchain/tests/integration_tests/agent/test_powerbi_agent.py b/libs/langchain/tests/integration_tests/agent/test_powerbi_agent.py index 7a661626fe..20c81de892 100644 --- a/libs/langchain/tests/integration_tests/agent/test_powerbi_agent.py +++ b/libs/langchain/tests/integration_tests/agent/test_powerbi_agent.py @@ -13,7 +13,7 @@ def azure_installed() -> bool: return True except Exception as e: - print(f"azure not installed, skipping test {e}") + print(f"azure not installed, skipping test {e}") # noqa: T201 return False diff --git a/libs/langchain/tests/integration_tests/cache/test_astradb.py b/libs/langchain/tests/integration_tests/cache/test_astradb.py index 79e5ee960e..37d538f800 100644 --- a/libs/langchain/tests/integration_tests/cache/test_astradb.py +++ b/libs/langchain/tests/integration_tests/cache/test_astradb.py @@ -10,6 +10,7 @@ Required to run this test: - optionally this as well (otherwise defaults are used): export ASTRA_DB_KEYSPACE="my_keyspace" """ + import os from typing import Iterator @@ -68,12 +69,12 @@ class TestAstraDBCaches: llm_string = str(sorted([(k, v) for k, v in params.items()])) get_llm_cache().update("foo", llm_string, [Generation(text="fizz")]) output = llm.generate(["foo"]) - print(output) + print(output) # noqa: T201 expected_output = LLMResult( generations=[[Generation(text="fizz")]], llm_output={}, ) - print(expected_output) + print(expected_output) # noqa: T201 assert output == expected_output astradb_cache.clear() diff --git a/libs/langchain/tests/integration_tests/cache/test_cassandra.py b/libs/langchain/tests/integration_tests/cache/test_cassandra.py index b12616963c..19a8efaf4a 100644 --- a/libs/langchain/tests/integration_tests/cache/test_cassandra.py +++ b/libs/langchain/tests/integration_tests/cache/test_cassandra.py @@ -1,4 +1,5 @@ """Test Cassandra caches. Requires a running vector-capable Cassandra cluster.""" + import os import time from typing import Any, Iterator, Tuple @@ -46,12 +47,12 @@ def test_cassandra_cache(cassandra_connection: Tuple[Any, str]) -> None: llm_string = str(sorted([(k, v) for k, v in params.items()])) get_llm_cache().update("foo", llm_string, [Generation(text="fizz")]) output = llm.generate(["foo"]) - print(output) + print(output) # noqa: T201 expected_output = LLMResult( generations=[[Generation(text="fizz")]], llm_output={}, ) - print(expected_output) + print(expected_output) # noqa: T201 assert output == expected_output cache.clear() diff --git a/libs/langchain/tests/integration_tests/cache/test_redis_cache.py b/libs/langchain/tests/integration_tests/cache/test_redis_cache.py index 644d038675..20ed08e678 100644 --- a/libs/langchain/tests/integration_tests/cache/test_redis_cache.py +++ b/libs/langchain/tests/integration_tests/cache/test_redis_cache.py @@ -1,4 +1,5 @@ """Test Redis cache functionality.""" + import uuid from contextlib import asynccontextmanager, contextmanager from typing import AsyncGenerator, Generator, List, Optional, cast @@ -309,8 +310,8 @@ def test_redis_semantic_cache_hit( ] llm_cache = cast(RedisSemanticCache, get_llm_cache()) for prompt_i, llm_generations_i in zip(prompts, llm_generations): - print(prompt_i) - print(llm_generations_i) + print(prompt_i) # noqa: T201 + print(llm_generations_i) # noqa: T201 llm_cache.update(prompt_i, llm_string, llm_generations_i) llm.generate(prompts) assert llm.generate(prompts) == LLMResult( diff --git a/libs/langchain/tests/integration_tests/examples/hello_world.py b/libs/langchain/tests/integration_tests/examples/hello_world.py index 3f0294febb..c98e152dde 100644 --- a/libs/langchain/tests/integration_tests/examples/hello_world.py +++ b/libs/langchain/tests/integration_tests/examples/hello_world.py @@ -4,7 +4,7 @@ import sys def main() -> int: - print("Hello World!") + print("Hello World!") # noqa: T201 return 0 diff --git a/libs/langchain/tests/unit_tests/agents/output_parsers/test_convo_output_parser.py b/libs/langchain/tests/unit_tests/agents/output_parsers/test_convo_output_parser.py index 9aa430e398..e3fe35666b 100644 --- a/libs/langchain/tests/unit_tests/agents/output_parsers/test_convo_output_parser.py +++ b/libs/langchain/tests/unit_tests/agents/output_parsers/test_convo_output_parser.py @@ -21,14 +21,14 @@ Thought: Do I need to use a tool? Yes Action: evaluate_code Action Input: Evaluate Code with the following Python content: ```python -print("Hello fifty shades of gray mans!"[::-1]) +print("Hello fifty shades of gray mans!"[::-1]) # noqa: T201 ``` """, "evaluate_code", """ Evaluate Code with the following Python content: ```python -print("Hello fifty shades of gray mans!"[::-1]) +print("Hello fifty shades of gray mans!"[::-1]) # noqa: T201 ```""".lstrip(), ) diff --git a/libs/langchain/tests/unit_tests/agents/test_agent.py b/libs/langchain/tests/unit_tests/agents/test_agent.py index 6ba908b6ad..5eb571c91b 100644 --- a/libs/langchain/tests/unit_tests/agents/test_agent.py +++ b/libs/langchain/tests/unit_tests/agents/test_agent.py @@ -1,4 +1,5 @@ """Unit tests for agents.""" + import json from itertools import cycle from typing import Any, Dict, List, Optional, Union, cast @@ -51,8 +52,8 @@ class FakeListLLM(LLM): ) -> str: """Increment counter, and then return response in that index.""" self.i += 1 - print(f"=== Mock Response #{self.i} ===") - print(self.responses[self.i]) + print(f"=== Mock Response #{self.i} ===") # noqa: T201 + print(self.responses[self.i]) # noqa: T201 return self.responses[self.i] def get_num_tokens(self, text: str) -> int: diff --git a/libs/langchain/tests/unit_tests/agents/test_agent_async.py b/libs/langchain/tests/unit_tests/agents/test_agent_async.py index aeee034df7..f1259cebc0 100644 --- a/libs/langchain/tests/unit_tests/agents/test_agent_async.py +++ b/libs/langchain/tests/unit_tests/agents/test_agent_async.py @@ -28,8 +28,8 @@ class FakeListLLM(LLM): ) -> str: """Increment counter, and then return response in that index.""" self.i += 1 - print(f"=== Mock Response #{self.i} ===") - print(self.responses[self.i]) + print(f"=== Mock Response #{self.i} ===") # noqa: T201 + print(self.responses[self.i]) # noqa: T201 return self.responses[self.i] def get_num_tokens(self, text: str) -> int: diff --git a/libs/langchain/tests/unit_tests/agents/test_agent_iterator.py b/libs/langchain/tests/unit_tests/agents/test_agent_iterator.py index 2b59cf8cd4..4fa886ba2d 100644 --- a/libs/langchain/tests/unit_tests/agents/test_agent_iterator.py +++ b/libs/langchain/tests/unit_tests/agents/test_agent_iterator.py @@ -143,7 +143,7 @@ def test_agent_iterator_with_callbacks() -> None: assert handler1.starts == 7 # 1 extra agent end assert handler1.ends == 7 - print("h:", handler1) + print("h:", handler1) # noqa: T201 assert handler1.errors == 0 # during LLMChain assert handler1.text == 2 diff --git a/libs/langchain/tests/unit_tests/chains/test_sequential.py b/libs/langchain/tests/unit_tests/chains/test_sequential.py index 36aafc2266..ca2ff78e7a 100644 --- a/libs/langchain/tests/unit_tests/chains/test_sequential.py +++ b/libs/langchain/tests/unit_tests/chains/test_sequential.py @@ -1,4 +1,5 @@ """Test pipeline functionality.""" + from typing import Dict, List, Optional import pytest @@ -107,7 +108,7 @@ def test_sequential_internal_chain_use_memory() -> None: chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"]) chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"]) output = chain({"foo": "123"}) - print("HEYYY OUTPUT", output) + print("HEYYY OUTPUT", output) # noqa: T201 expected_output = {"foo": "123", "baz": "123 Human: yo\nAI: yafoofoo"} assert output == expected_output diff --git a/libs/langchain/tests/unit_tests/output_parsers/test_pydantic_parser.py b/libs/langchain/tests/unit_tests/output_parsers/test_pydantic_parser.py index 07505e7bef..864478f73f 100644 --- a/libs/langchain/tests/unit_tests/output_parsers/test_pydantic_parser.py +++ b/libs/langchain/tests/unit_tests/output_parsers/test_pydantic_parser.py @@ -1,4 +1,5 @@ """Test PydanticOutputParser""" + from enum import Enum from typing import Optional @@ -58,7 +59,7 @@ def test_pydantic_output_parser() -> None: ) result = pydantic_parser.parse(DEF_RESULT) - print("parse_result:", result) + print("parse_result:", result) # noqa: T201 assert DEF_EXPECTED_RESULT == result @@ -72,7 +73,7 @@ def test_pydantic_output_parser_fail() -> None: try: pydantic_parser.parse(DEF_RESULT_FAIL) except OutputParserException as e: - print("parse_result:", e) + print("parse_result:", e) # noqa: T201 assert "Failed to parse TestModel from completion" in str(e) else: assert False, "Expected OutputParserException" diff --git a/libs/langchain/tests/unit_tests/output_parsers/test_regex_dict.py b/libs/langchain/tests/unit_tests/output_parsers/test_regex_dict.py index 09df585aed..c07d0c8567 100644 --- a/libs/langchain/tests/unit_tests/output_parsers/test_regex_dict.py +++ b/libs/langchain/tests/unit_tests/output_parsers/test_regex_dict.py @@ -1,4 +1,5 @@ """Test in memory docstore.""" + from langchain.output_parsers.regex_dict import RegexDictParser DEF_EXPECTED_RESULT = {"action": "Search", "action_input": "How to use this class?"} @@ -33,5 +34,5 @@ def test_regex_dict_result() -> None: output_key_to_format=DEF_OUTPUT_KEY_TO_FORMAT, no_update_value="N/A" ) result_dict = regex_dict_parser.parse(DEF_README) - print("parse_result:", result_dict) + print("parse_result:", result_dict) # noqa: T201 assert DEF_EXPECTED_RESULT == result_dict diff --git a/libs/langchain/tests/unit_tests/output_parsers/test_yaml_parser.py b/libs/langchain/tests/unit_tests/output_parsers/test_yaml_parser.py index e02ff9f42f..b48a353d8d 100644 --- a/libs/langchain/tests/unit_tests/output_parsers/test_yaml_parser.py +++ b/libs/langchain/tests/unit_tests/output_parsers/test_yaml_parser.py @@ -1,4 +1,5 @@ """Test yamlOutputParser""" + from enum import Enum from typing import Optional @@ -74,7 +75,7 @@ def test_yaml_output_parser(result: str) -> None: ) model = yaml_parser.parse(result) - print("parse_result:", result) + print("parse_result:", result) # noqa: T201 assert DEF_EXPECTED_RESULT == model @@ -88,7 +89,7 @@ def test_yaml_output_parser_fail() -> None: try: yaml_parser.parse(DEF_RESULT_FAIL) except OutputParserException as e: - print("parse_result:", e) + print("parse_result:", e) # noqa: T201 assert "Failed to parse TestModel from completion" in str(e) else: assert False, "Expected OutputParserException" diff --git a/libs/langchain/tests/unit_tests/retrievers/test_parent_document.py b/libs/langchain/tests/unit_tests/retrievers/test_parent_document.py index 278b05f93c..0b24795b78 100644 --- a/libs/langchain/tests/unit_tests/retrievers/test_parent_document.py +++ b/libs/langchain/tests/unit_tests/retrievers/test_parent_document.py @@ -18,7 +18,7 @@ class InMemoryVectorstoreWithSearch(InMemoryVectorStore): return [res] def add_documents(self, documents: Sequence[Document], **kwargs: Any) -> List[str]: - print(documents) + print(documents) # noqa: T201 return super().add_documents( documents, ids=[f"{i}" for i in range(len(documents))] ) diff --git a/libs/partners/anthropic/pyproject.toml b/libs/partners/anthropic/pyproject.toml index e2af2615f1..749a3b6d27 100644 --- a/libs/partners/anthropic/pyproject.toml +++ b/libs/partners/anthropic/pyproject.toml @@ -60,6 +60,7 @@ select = [ "E", # pycodestyle "F", # pyflakes "I", # isort + "T201", # print ] [tool.mypy] diff --git a/libs/partners/anthropic/scripts/check_imports.py b/libs/partners/anthropic/scripts/check_imports.py index fd21a4975b..365f5fa118 100644 --- a/libs/partners/anthropic/scripts/check_imports.py +++ b/libs/partners/anthropic/scripts/check_imports.py @@ -10,8 +10,8 @@ if __name__ == "__main__": SourceFileLoader("x", file).load_module() except Exception: has_faillure = True - print(file) + print(file) # noqa: T201 traceback.print_exc() - print() + print() # noqa: T201 sys.exit(1 if has_failure else 0) diff --git a/libs/partners/exa/scripts/check_imports.py b/libs/partners/exa/scripts/check_imports.py index fd21a4975b..365f5fa118 100644 --- a/libs/partners/exa/scripts/check_imports.py +++ b/libs/partners/exa/scripts/check_imports.py @@ -10,8 +10,8 @@ if __name__ == "__main__": SourceFileLoader("x", file).load_module() except Exception: has_faillure = True - print(file) + print(file) # noqa: T201 traceback.print_exc() - print() + print() # noqa: T201 sys.exit(1 if has_failure else 0) diff --git a/libs/partners/exa/tests/integration_tests/test_find_similar_tool.py b/libs/partners/exa/tests/integration_tests/test_find_similar_tool.py index fd21bd406b..c8d527c73a 100644 --- a/libs/partners/exa/tests/integration_tests/test_find_similar_tool.py +++ b/libs/partners/exa/tests/integration_tests/test_find_similar_tool.py @@ -9,5 +9,5 @@ def test_similarity_tool() -> None: "num_results": 5, } ) - print(res) + print(res) # noqa: T201 assert not isinstance(res, str) # str means error for this tool diff --git a/libs/partners/exa/tests/integration_tests/test_retriever.py b/libs/partners/exa/tests/integration_tests/test_retriever.py index a71e13cf5e..4e000dd36c 100644 --- a/libs/partners/exa/tests/integration_tests/test_retriever.py +++ b/libs/partners/exa/tests/integration_tests/test_retriever.py @@ -6,7 +6,7 @@ from langchain_exa import ExaSearchRetriever def test_exa_retriever() -> None: retriever = ExaSearchRetriever() res = retriever.invoke("best time to visit japan") - print(res) + print(res) # noqa: T201 assert len(res) == 10 # default k assert isinstance(res, list) assert isinstance(res[0], Document) @@ -15,7 +15,7 @@ def test_exa_retriever() -> None: def test_exa_retriever_highlights() -> None: retriever = ExaSearchRetriever(highlights=True) res = retriever.invoke("best time to visit japan") - print(res) + print(res) # noqa: T201 assert isinstance(res, list) assert isinstance(res[0], Document) highlights = res[0].metadata["highlights"] diff --git a/libs/partners/exa/tests/integration_tests/test_search_tool.py b/libs/partners/exa/tests/integration_tests/test_search_tool.py index 3cb380a29c..f3ebe87c38 100644 --- a/libs/partners/exa/tests/integration_tests/test_search_tool.py +++ b/libs/partners/exa/tests/integration_tests/test_search_tool.py @@ -4,5 +4,5 @@ from langchain_exa import ExaSearchResults def test_search_tool() -> None: tool = ExaSearchResults() res = tool.invoke({"query": "best time to visit japan", "num_results": 5}) - print(res) + print(res) # noqa: T201 assert not isinstance(res, str) # str means error for this tool\ diff --git a/libs/partners/google-genai/pyproject.toml b/libs/partners/google-genai/pyproject.toml index bdab0110e0..deee61e7f7 100644 --- a/libs/partners/google-genai/pyproject.toml +++ b/libs/partners/google-genai/pyproject.toml @@ -70,9 +70,10 @@ types-google-cloud-ndb = "^2.2.0.1" [tool.ruff.lint] select = [ - "E", # pycodestyle - "F", # pyflakes - "I", # isort + "E", # pycodestyle + "F", # pyflakes + "I", # isort + "T201", # print ] [tool.mypy] diff --git a/libs/partners/google-genai/scripts/check_imports.py b/libs/partners/google-genai/scripts/check_imports.py index fd21a4975b..365f5fa118 100644 --- a/libs/partners/google-genai/scripts/check_imports.py +++ b/libs/partners/google-genai/scripts/check_imports.py @@ -10,8 +10,8 @@ if __name__ == "__main__": SourceFileLoader("x", file).load_module() except Exception: has_faillure = True - print(file) + print(file) # noqa: T201 traceback.print_exc() - print() + print() # noqa: T201 sys.exit(1 if has_failure else 0) diff --git a/libs/partners/google-genai/tests/integration_tests/test_chat_models.py b/libs/partners/google-genai/tests/integration_tests/test_chat_models.py index 4551a860d0..91e6aab20f 100644 --- a/libs/partners/google-genai/tests/integration_tests/test_chat_models.py +++ b/libs/partners/google-genai/tests/integration_tests/test_chat_models.py @@ -1,4 +1,5 @@ """Test ChatGoogleGenerativeAI chat model.""" + import pytest from langchain_core.messages import AIMessage, HumanMessage, SystemMessage @@ -102,7 +103,7 @@ def test_chat_google_genai_invoke_multimodal() -> None: # Try streaming for chunk in llm.stream(messages): - print(chunk) + print(chunk) # noqa: T201 assert isinstance(chunk.content, str) assert len(chunk.content.strip()) > 0 diff --git a/libs/partners/google-genai/tests/unit_tests/test_chat_models.py b/libs/partners/google-genai/tests/unit_tests/test_chat_models.py index e13dcccafc..61990909ed 100644 --- a/libs/partners/google-genai/tests/unit_tests/test_chat_models.py +++ b/libs/partners/google-genai/tests/unit_tests/test_chat_models.py @@ -45,7 +45,7 @@ def test_api_key_is_string() -> None: def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture) -> None: chat = ChatGoogleGenerativeAI(model="gemini-nano", google_api_key="secret-api-key") - print(chat.google_api_key, end="") + print(chat.google_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/partners/google-genai/tests/unit_tests/test_embeddings.py b/libs/partners/google-genai/tests/unit_tests/test_embeddings.py index 45acffb33b..9bb57274e8 100644 --- a/libs/partners/google-genai/tests/unit_tests/test_embeddings.py +++ b/libs/partners/google-genai/tests/unit_tests/test_embeddings.py @@ -1,4 +1,5 @@ """Test embeddings model integration.""" + from langchain_core.pydantic_v1 import SecretStr from pytest import CaptureFixture @@ -31,7 +32,7 @@ def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture) -> N model="models/embedding-001", google_api_key="secret-api-key", ) - print(embeddings.google_api_key, end="") + print(embeddings.google_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/libs/partners/google-vertexai/pyproject.toml b/libs/partners/google-vertexai/pyproject.toml index 8ea2dbfd63..55e699f9e1 100644 --- a/libs/partners/google-vertexai/pyproject.toml +++ b/libs/partners/google-vertexai/pyproject.toml @@ -64,9 +64,10 @@ langchain-core = { path = "../../core", develop = true } [tool.ruff.lint] select = [ - "E", # pycodestyle - "F", # pyflakes - "I", # isort + "E", # pycodestyle + "F", # pyflakes + "I", # isort + "T201", # print ] [tool.mypy] diff --git a/libs/partners/google-vertexai/scripts/check_imports.py b/libs/partners/google-vertexai/scripts/check_imports.py index fd21a4975b..365f5fa118 100644 --- a/libs/partners/google-vertexai/scripts/check_imports.py +++ b/libs/partners/google-vertexai/scripts/check_imports.py @@ -10,8 +10,8 @@ if __name__ == "__main__": SourceFileLoader("x", file).load_module() except Exception: has_faillure = True - print(file) + print(file) # noqa: T201 traceback.print_exc() - print() + print() # noqa: T201 sys.exit(1 if has_failure else 0) diff --git a/libs/partners/mistralai/pyproject.toml b/libs/partners/mistralai/pyproject.toml index 0d534cce01..8f8278e2aa 100644 --- a/libs/partners/mistralai/pyproject.toml +++ b/libs/partners/mistralai/pyproject.toml @@ -56,6 +56,7 @@ select = [ "E", # pycodestyle "F", # pyflakes "I", # isort + "T201", # print ] [tool.mypy] diff --git a/libs/partners/mistralai/scripts/check_imports.py b/libs/partners/mistralai/scripts/check_imports.py index fd21a4975b..365f5fa118 100644 --- a/libs/partners/mistralai/scripts/check_imports.py +++ b/libs/partners/mistralai/scripts/check_imports.py @@ -10,8 +10,8 @@ if __name__ == "__main__": SourceFileLoader("x", file).load_module() except Exception: has_faillure = True - print(file) + print(file) # noqa: T201 traceback.print_exc() - print() + print() # noqa: T201 sys.exit(1 if has_failure else 0) diff --git a/libs/partners/nomic/scripts/check_imports.py b/libs/partners/nomic/scripts/check_imports.py index fd21a4975b..365f5fa118 100644 --- a/libs/partners/nomic/scripts/check_imports.py +++ b/libs/partners/nomic/scripts/check_imports.py @@ -10,8 +10,8 @@ if __name__ == "__main__": SourceFileLoader("x", file).load_module() except Exception: has_faillure = True - print(file) + print(file) # noqa: T201 traceback.print_exc() - print() + print() # noqa: T201 sys.exit(1 if has_failure else 0) diff --git a/libs/partners/nvidia-ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py b/libs/partners/nvidia-ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py index 3836f309d2..d71c1a6290 100644 --- a/libs/partners/nvidia-ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py +++ b/libs/partners/nvidia-ai-endpoints/langchain_nvidia_ai_endpoints/chat_models.py @@ -1,4 +1,5 @@ """Chat Model Components Derived from ChatModel/NVIDIA""" + from __future__ import annotations import base64 @@ -60,7 +61,7 @@ def _is_b64(s: str) -> bool: def _resize_image(img_data: bytes, max_dim: int = 1024) -> str: if not has_pillow: - print( + print( # noqa: T201 "Pillow is required to resize images down to reasonable scale." " Please install it using `pip install pillow`." " For now, not resizing; may cause NVIDIA API to fail." diff --git a/libs/partners/nvidia-ai-endpoints/pyproject.toml b/libs/partners/nvidia-ai-endpoints/pyproject.toml index a9242dac47..41f3bf23c2 100644 --- a/libs/partners/nvidia-ai-endpoints/pyproject.toml +++ b/libs/partners/nvidia-ai-endpoints/pyproject.toml @@ -58,9 +58,10 @@ langchain-core = { path = "../../core", develop = true } [tool.ruff.lint] select = [ - "E", # pycodestyle - "F", # pyflakes - "I", # isort + "E", # pycodestyle + "F", # pyflakes + "I", # isort + "T201", # print ] [tool.mypy] diff --git a/libs/partners/nvidia-ai-endpoints/scripts/check_imports.py b/libs/partners/nvidia-ai-endpoints/scripts/check_imports.py index fd21a4975b..365f5fa118 100644 --- a/libs/partners/nvidia-ai-endpoints/scripts/check_imports.py +++ b/libs/partners/nvidia-ai-endpoints/scripts/check_imports.py @@ -10,8 +10,8 @@ if __name__ == "__main__": SourceFileLoader("x", file).load_module() except Exception: has_faillure = True - print(file) + print(file) # noqa: T201 traceback.print_exc() - print() + print() # noqa: T201 sys.exit(1 if has_failure else 0) diff --git a/libs/partners/nvidia-trt/pyproject.toml b/libs/partners/nvidia-trt/pyproject.toml index f6936e0f75..83f9bebb4a 100644 --- a/libs/partners/nvidia-trt/pyproject.toml +++ b/libs/partners/nvidia-trt/pyproject.toml @@ -59,9 +59,10 @@ langchain-core = { path = "../../core", develop = true } [tool.ruff.lint] select = [ - "E", # pycodestyle - "F", # pyflakes - "I", # isort + "E", # pycodestyle + "F", # pyflakes + "I", # isort + "T201", # print ] [tool.mypy] diff --git a/libs/partners/nvidia-trt/scripts/check_imports.py b/libs/partners/nvidia-trt/scripts/check_imports.py index fd21a4975b..365f5fa118 100644 --- a/libs/partners/nvidia-trt/scripts/check_imports.py +++ b/libs/partners/nvidia-trt/scripts/check_imports.py @@ -10,8 +10,8 @@ if __name__ == "__main__": SourceFileLoader("x", file).load_module() except Exception: has_faillure = True - print(file) + print(file) # noqa: T201 traceback.print_exc() - print() + print() # noqa: T201 sys.exit(1 if has_failure else 0) diff --git a/libs/partners/openai/pyproject.toml b/libs/partners/openai/pyproject.toml index d3ef051b87..74d68cf487 100644 --- a/libs/partners/openai/pyproject.toml +++ b/libs/partners/openai/pyproject.toml @@ -62,6 +62,7 @@ select = [ "E", # pycodestyle "F", # pyflakes "I", # isort + "T201", # print ] [tool.mypy] diff --git a/libs/partners/openai/scripts/check_imports.py b/libs/partners/openai/scripts/check_imports.py index fd21a4975b..365f5fa118 100644 --- a/libs/partners/openai/scripts/check_imports.py +++ b/libs/partners/openai/scripts/check_imports.py @@ -10,8 +10,8 @@ if __name__ == "__main__": SourceFileLoader("x", file).load_module() except Exception: has_faillure = True - print(file) + print(file) # noqa: T201 traceback.print_exc() - print() + print() # noqa: T201 sys.exit(1 if has_failure else 0) diff --git a/libs/partners/pinecone/scripts/check_imports.py b/libs/partners/pinecone/scripts/check_imports.py index fd21a4975b..365f5fa118 100644 --- a/libs/partners/pinecone/scripts/check_imports.py +++ b/libs/partners/pinecone/scripts/check_imports.py @@ -10,8 +10,8 @@ if __name__ == "__main__": SourceFileLoader("x", file).load_module() except Exception: has_faillure = True - print(file) + print(file) # noqa: T201 traceback.print_exc() - print() + print() # noqa: T201 sys.exit(1 if has_failure else 0) diff --git a/libs/partners/pinecone/tests/integration_tests/test_vectorstores.py b/libs/partners/pinecone/tests/integration_tests/test_vectorstores.py index eb1f959839..0cd4d5cf29 100644 --- a/libs/partners/pinecone/tests/integration_tests/test_vectorstores.py +++ b/libs/partners/pinecone/tests/integration_tests/test_vectorstores.py @@ -56,7 +56,7 @@ class TestPinecone: @pytest.fixture(autouse=True) def setup(self) -> None: # delete all the vectors in the index - print("called") + print("called") # noqa: T201 self.index.delete(delete_all=True, namespace=NAMESPACE_NAME) # index_stats = self.index.describe_index_stats() # for _namespace_name in index_stats["namespaces"].keys(): @@ -119,7 +119,7 @@ class TestPinecone: """Test end to end construction and search with scores and IDs.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] - print("metadatas", metadatas) + print("metadatas", metadatas) # noqa: T201 docsearch = Pinecone.from_texts( texts, embedding_openai, @@ -127,7 +127,7 @@ class TestPinecone: metadatas=metadatas, namespace=NAMESPACE_NAME, ) - print(texts) + print(texts) # noqa: T201 time.sleep(DEFAULT_SLEEP) # prevent race condition output = docsearch.similarity_search_with_score( "foo", k=3, namespace=NAMESPACE_NAME @@ -135,7 +135,7 @@ class TestPinecone: docs = [o[0] for o in output] scores = [o[1] for o in output] sorted_documents = sorted(docs, key=lambda x: x.metadata["page"]) - print(sorted_documents) + print(sorted_documents) # noqa: T201 # TODO: why metadata={"page": 0.0}) instead of {"page": 0}, etc??? assert sorted_documents == [ @@ -230,7 +230,7 @@ class TestPinecone: # wait for the index to be ready time.sleep(DEFAULT_SLEEP) output = docsearch.similarity_search_with_relevance_scores("foo", k=3) - print(output) + print(output) # noqa: T201 assert all( (1 >= score or np.isclose(score, 1)) and score >= 0 for _, score in output ) diff --git a/libs/partners/robocorp/pyproject.toml b/libs/partners/robocorp/pyproject.toml index fa7599a0c3..581005cfa0 100644 --- a/libs/partners/robocorp/pyproject.toml +++ b/libs/partners/robocorp/pyproject.toml @@ -61,6 +61,7 @@ select = [ "E", # pycodestyle "F", # pyflakes "I", # isort + "T201", # print ] [tool.mypy] diff --git a/libs/partners/robocorp/scripts/check_imports.py b/libs/partners/robocorp/scripts/check_imports.py index ba8de50118..58a460c149 100644 --- a/libs/partners/robocorp/scripts/check_imports.py +++ b/libs/partners/robocorp/scripts/check_imports.py @@ -10,8 +10,8 @@ if __name__ == "__main__": SourceFileLoader("x", file).load_module() except Exception: has_failure = True - print(file) + print(file) # noqa: T201 traceback.print_exc() - print() + print() # noqa: T201 sys.exit(1 if has_failure else 0) diff --git a/libs/partners/together/pyproject.toml b/libs/partners/together/pyproject.toml index 96511d1a3e..851383a7bc 100644 --- a/libs/partners/together/pyproject.toml +++ b/libs/partners/together/pyproject.toml @@ -62,6 +62,7 @@ select = [ "E", # pycodestyle "F", # pyflakes "I", # isort + "T201", # print ] [tool.mypy] diff --git a/libs/partners/together/scripts/check_imports.py b/libs/partners/together/scripts/check_imports.py index fd21a4975b..365f5fa118 100644 --- a/libs/partners/together/scripts/check_imports.py +++ b/libs/partners/together/scripts/check_imports.py @@ -10,8 +10,8 @@ if __name__ == "__main__": SourceFileLoader("x", file).load_module() except Exception: has_faillure = True - print(file) + print(file) # noqa: T201 traceback.print_exc() - print() + print() # noqa: T201 sys.exit(1 if has_failure else 0) diff --git a/libs/partners/together/tests/unit_tests/test_llms.py b/libs/partners/together/tests/unit_tests/test_llms.py index 22cb93309a..ce9ba1be26 100644 --- a/libs/partners/together/tests/unit_tests/test_llms.py +++ b/libs/partners/together/tests/unit_tests/test_llms.py @@ -1,4 +1,5 @@ """Test Together LLM""" + from typing import cast from langchain_core.pydantic_v1 import SecretStr @@ -28,7 +29,7 @@ def test_together_api_key_masked_when_passed_from_env( temperature=0.2, max_tokens=250, ) - print(llm.together_api_key, end="") + print(llm.together_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" @@ -44,7 +45,7 @@ def test_together_api_key_masked_when_passed_via_constructor( temperature=0.2, max_tokens=250, ) - print(llm.together_api_key, end="") + print(llm.together_api_key, end="") # noqa: T201 captured = capsys.readouterr() assert captured.out == "**********" diff --git a/templates/anthropic-iterative-search/main.py b/templates/anthropic-iterative-search/main.py index 27b7aa1aa6..c04830d110 100644 --- a/templates/anthropic-iterative-search/main.py +++ b/templates/anthropic-iterative-search/main.py @@ -5,7 +5,7 @@ if __name__ == "__main__": "Which movie came out first: Oppenheimer, or " "Are You There God It's Me Margaret?" ) - print( + print( # noqa: T201 final_chain.with_config(configurable={"chain": "retrieve"}).invoke( {"query": query} ) diff --git a/templates/cassandra-entomology-rag/cassandra_entomology_rag/__init__.py b/templates/cassandra-entomology-rag/cassandra_entomology_rag/__init__.py index 1816e8c7fd..f54b58aa1a 100644 --- a/templates/cassandra-entomology-rag/cassandra_entomology_rag/__init__.py +++ b/templates/cassandra-entomology-rag/cassandra_entomology_rag/__init__.py @@ -42,7 +42,7 @@ retriever = vector_store.as_retriever(search_kwargs={"k": 3}) # Please remove this and/or adapt to your use case! inserted_lines = populate(vector_store) if inserted_lines: - print(f"Done ({inserted_lines} lines inserted).") + print(f"Done ({inserted_lines} lines inserted).") # noqa: T201 entomology_template = """ You are an expert entomologist, tasked with answering enthusiast biologists' questions. diff --git a/templates/csv-agent/main.py b/templates/csv-agent/main.py index 8814b92478..b0fba50cef 100644 --- a/templates/csv-agent/main.py +++ b/templates/csv-agent/main.py @@ -2,4 +2,4 @@ from csv_agent.agent import agent_executor if __name__ == "__main__": question = "who was in cabin c28?" - print(agent_executor.invoke({"input": question})) + print(agent_executor.invoke({"input": question})) # noqa: T201 diff --git a/templates/elastic-query-generator/main.py b/templates/elastic-query-generator/main.py index 4f848b6e88..6f1ff0b8c3 100644 --- a/templates/elastic-query-generator/main.py +++ b/templates/elastic-query-generator/main.py @@ -1,4 +1,4 @@ from elastic_query_generator.chain import chain if __name__ == "__main__": - print(chain.invoke({"input": "how many customers named Carol"})) + print(chain.invoke({"input": "how many customers named Carol"})) # noqa: T201 diff --git a/templates/gemini-functions-agent/main.py b/templates/gemini-functions-agent/main.py index f0e1f5963f..0e612d69f4 100644 --- a/templates/gemini-functions-agent/main.py +++ b/templates/gemini-functions-agent/main.py @@ -2,4 +2,4 @@ from openai_functions_agent.agent import agent_executor if __name__ == "__main__": question = "who won the womens world cup in 2023?" - print(agent_executor.invoke({"input": question, "chat_history": []})) + print(agent_executor.invoke({"input": question, "chat_history": []})) # noqa: T201 diff --git a/templates/neo4j-advanced-rag/main.py b/templates/neo4j-advanced-rag/main.py index a72b712637..3f3b3c2b5e 100644 --- a/templates/neo4j-advanced-rag/main.py +++ b/templates/neo4j-advanced-rag/main.py @@ -2,7 +2,7 @@ from neo4j_advanced_rag.chain import chain if __name__ == "__main__": original_query = "What is the plot of the Dune?" - print( + print( # noqa: T201 chain.invoke( {"question": original_query}, {"configurable": {"strategy": "parent_document"}}, diff --git a/templates/neo4j-cypher-ft/main.py b/templates/neo4j-cypher-ft/main.py index 490d454253..36b7ac41e3 100644 --- a/templates/neo4j-cypher-ft/main.py +++ b/templates/neo4j-cypher-ft/main.py @@ -2,4 +2,4 @@ from neo4j_cypher_ft.chain import chain if __name__ == "__main__": original_query = "Did tom cruis act in top gun?" - print(chain.invoke({"question": original_query})) + print(chain.invoke({"question": original_query})) # noqa: T201 diff --git a/templates/neo4j-cypher-memory/main.py b/templates/neo4j-cypher-memory/main.py index 735fac9965..0250de251b 100644 --- a/templates/neo4j-cypher-memory/main.py +++ b/templates/neo4j-cypher-memory/main.py @@ -2,7 +2,7 @@ from neo4j_cypher_memory.chain import chain if __name__ == "__main__": original_query = "Who played in Top Gun?" - print( + print( # noqa: T201 chain.invoke( { "question": original_query, @@ -12,7 +12,7 @@ if __name__ == "__main__": ) ) follow_up_query = "Did they play in any other movies?" - print( + print( # noqa: T201 chain.invoke( { "question": follow_up_query, diff --git a/templates/neo4j-cypher/main.py b/templates/neo4j-cypher/main.py index 611cbe2aff..7962b8f0ff 100644 --- a/templates/neo4j-cypher/main.py +++ b/templates/neo4j-cypher/main.py @@ -2,4 +2,4 @@ from neo4j_cypher.chain import chain if __name__ == "__main__": original_query = "Who played in Top Gun?" - print(chain.invoke({"question": original_query})) + print(chain.invoke({"question": original_query})) # noqa: T201 diff --git a/templates/neo4j-generation/main.py b/templates/neo4j-generation/main.py index 578a18013f..1068b9fe10 100644 --- a/templates/neo4j-generation/main.py +++ b/templates/neo4j-generation/main.py @@ -4,7 +4,7 @@ if __name__ == "__main__": text = "Harrison works at LangChain, which is located in San Francisco" allowed_nodes = ["Person", "Organization", "Location"] allowed_relationships = ["WORKS_AT", "LOCATED_IN"] - print( + print( # noqa: T201 chain( text, allowed_nodes=allowed_nodes, diff --git a/templates/neo4j-parent/main.py b/templates/neo4j-parent/main.py index ac52947e3f..e109d57e5f 100644 --- a/templates/neo4j-parent/main.py +++ b/templates/neo4j-parent/main.py @@ -2,4 +2,4 @@ from neo4j_parent.chain import chain if __name__ == "__main__": original_query = "What is the plot of the Dune?" - print(chain.invoke(original_query)) + print(chain.invoke(original_query)) # noqa: T201 diff --git a/templates/neo4j-semantic-layer/main.py b/templates/neo4j-semantic-layer/main.py index 681c6a20d6..cbe3517d5a 100644 --- a/templates/neo4j-semantic-layer/main.py +++ b/templates/neo4j-semantic-layer/main.py @@ -11,7 +11,7 @@ if __name__ == "__main__": "\n\n1. John Travolta\n2. John McDonough", ) ] - print(agent_executor.invoke({"input": original_query})) - print( + print(agent_executor.invoke({"input": original_query})) # noqa: T201 + print( # noqa: T201 agent_executor.invoke({"input": followup_query, "chat_history": chat_history}) ) diff --git a/templates/neo4j-semantic-layer/neo4j_semantic_layer/memory_tool.py b/templates/neo4j-semantic-layer/neo4j_semantic_layer/memory_tool.py index 784ba8f432..6e3e0a8664 100644 --- a/templates/neo4j-semantic-layer/neo4j_semantic_layer/memory_tool.py +++ b/templates/neo4j-semantic-layer/neo4j_semantic_layer/memory_tool.py @@ -34,7 +34,7 @@ def store_movie_rating(movie: str, rating: int): try: return response[0]["response"] except Exception as e: - print(e) + print(e) # noqa: T201 return "Something went wrong" diff --git a/templates/neo4j-semantic-ollama/main.py b/templates/neo4j-semantic-ollama/main.py index d6ee3a9196..3f8fe2b283 100644 --- a/templates/neo4j-semantic-ollama/main.py +++ b/templates/neo4j-semantic-ollama/main.py @@ -11,7 +11,7 @@ if __name__ == "__main__": "\n\n1. John Travolta\n2. John McDonough", ) ] - print(agent_executor.invoke({"input": original_query})) - print( + print(agent_executor.invoke({"input": original_query})) # noqa: T201 + print( # noqa: T201 agent_executor.invoke({"input": followup_query, "chat_history": chat_history}) ) diff --git a/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/memory_tool.py b/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/memory_tool.py index f455f70c4b..e0399af1d1 100644 --- a/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/memory_tool.py +++ b/templates/neo4j-semantic-ollama/neo4j_semantic_ollama/memory_tool.py @@ -35,7 +35,7 @@ def store_movie_rating(movie: str, rating: int): try: return response[0]["response"] except Exception as e: - print(e) + print(e) # noqa: T201 return "Something went wrong" diff --git a/templates/neo4j-vector-memory/main.py b/templates/neo4j-vector-memory/main.py index 732f0954d5..ca7f16ca3c 100644 --- a/templates/neo4j-vector-memory/main.py +++ b/templates/neo4j-vector-memory/main.py @@ -4,13 +4,13 @@ if __name__ == "__main__": user_id = "user_id_1" session_id = "session_id_1" original_query = "What is the plot of the Dune?" - print( + print( # noqa: T201 chain.invoke( {"question": original_query, "user_id": user_id, "session_id": session_id} ) ) follow_up_query = "Tell me more about Leto" - print( + print( # noqa: T201 chain.invoke( {"question": follow_up_query, "user_id": user_id, "session_id": session_id} ) diff --git a/templates/nvidia-rag-canonical/ingest.py b/templates/nvidia-rag-canonical/ingest.py index 8df12d6492..1abdc9ffa6 100644 --- a/templates/nvidia-rag-canonical/ingest.py +++ b/templates/nvidia-rag-canonical/ingest.py @@ -7,7 +7,7 @@ from langchain.vectorstores.milvus import Milvus from langchain_nvidia_aiplay import NVIDIAEmbeddings if os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"): - print("Valid NVIDIA_API_KEY already in environment. Delete to reset") + print("Valid NVIDIA_API_KEY already in environment. Delete to reset") # noqa: T201 else: nvapi_key = getpass.getpass("NVAPI Key (starts with nvapi-): ") assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key" diff --git a/templates/nvidia-rag-canonical/nvidia_rag_canonical/chain.py b/templates/nvidia-rag-canonical/nvidia_rag_canonical/chain.py index 47a2adb68a..5e7eb24c54 100644 --- a/templates/nvidia-rag-canonical/nvidia_rag_canonical/chain.py +++ b/templates/nvidia-rag-canonical/nvidia_rag_canonical/chain.py @@ -23,7 +23,7 @@ INGESTION_CHUNK_SIZE = 500 INGESTION_CHUNK_OVERLAP = 0 if os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"): - print("Valid NVIDIA_API_KEY already in environment. Delete to reset") + print("Valid NVIDIA_API_KEY already in environment. Delete to reset") # noqa: T201 else: nvapi_key = getpass.getpass("NVAPI Key (starts with nvapi-): ") assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key" diff --git a/templates/openai-functions-agent-gmail/main.py b/templates/openai-functions-agent-gmail/main.py index 90fcfcc30d..93e23ef6d4 100644 --- a/templates/openai-functions-agent-gmail/main.py +++ b/templates/openai-functions-agent-gmail/main.py @@ -6,4 +6,4 @@ if __name__ == "__main__": "First do background research on the sender and topics to make sure you" " understand the context, then write the draft." ) - print(agent_executor.invoke({"input": question, "chat_history": []})) + print(agent_executor.invoke({"input": question, "chat_history": []})) # noqa: T201 diff --git a/templates/openai-functions-agent/main.py b/templates/openai-functions-agent/main.py index f0e1f5963f..0e612d69f4 100644 --- a/templates/openai-functions-agent/main.py +++ b/templates/openai-functions-agent/main.py @@ -2,4 +2,4 @@ from openai_functions_agent.agent import agent_executor if __name__ == "__main__": question = "who won the womens world cup in 2023?" - print(agent_executor.invoke({"input": question, "chat_history": []})) + print(agent_executor.invoke({"input": question, "chat_history": []})) # noqa: T201 diff --git a/templates/rag-astradb/astradb_entomology_rag/__init__.py b/templates/rag-astradb/astradb_entomology_rag/__init__.py index 7997d5909a..3f8e807cab 100644 --- a/templates/rag-astradb/astradb_entomology_rag/__init__.py +++ b/templates/rag-astradb/astradb_entomology_rag/__init__.py @@ -26,7 +26,7 @@ retriever = vector_store.as_retriever(search_kwargs={"k": 3}) inserted_lines = populate(vector_store) if inserted_lines: - print(f"Done ({inserted_lines} lines inserted).") + print(f"Done ({inserted_lines} lines inserted).") # noqa: T201 entomology_template = """ You are an expert entomologist, tasked with answering enthusiast biologists' questions. diff --git a/templates/rag-astradb/main.py b/templates/rag-astradb/main.py index f80b1b6626..e049d7f743 100644 --- a/templates/rag-astradb/main.py +++ b/templates/rag-astradb/main.py @@ -2,4 +2,4 @@ from astradb_entomology_rag import chain if __name__ == "__main__": response = chain.invoke("Are there more coleoptera or bugs?") - print(response) + print(response) # noqa: T201 diff --git a/templates/rag-aws-bedrock/main.py b/templates/rag-aws-bedrock/main.py index d0a3c2f48c..7be600d6bc 100644 --- a/templates/rag-aws-bedrock/main.py +++ b/templates/rag-aws-bedrock/main.py @@ -3,4 +3,4 @@ from rag_aws_bedrock.chain import chain if __name__ == "__main__": query = "What is this data about?" - print(chain.invoke(query)) + print(chain.invoke(query)) # noqa: T201 diff --git a/templates/rag-aws-kendra/main.py b/templates/rag-aws-kendra/main.py index ceb1daa7af..a44556cfe3 100644 --- a/templates/rag-aws-kendra/main.py +++ b/templates/rag-aws-kendra/main.py @@ -3,4 +3,4 @@ from rag_aws_kendra.chain import chain if __name__ == "__main__": query = "Does Kendra support table extraction?" - print(chain.invoke(query)) + print(chain.invoke(query)) # noqa: T201 diff --git a/templates/rag-chroma-multi-modal-multi-vector/ingest.py b/templates/rag-chroma-multi-modal-multi-vector/ingest.py index 9447ca478e..941a8b6670 100644 --- a/templates/rag-chroma-multi-modal-multi-vector/ingest.py +++ b/templates/rag-chroma-multi-modal-multi-vector/ingest.py @@ -66,7 +66,7 @@ def generate_img_summaries(img_base64_list): image_summaries.append(image_summarize(base64_image, prompt)) processed_images.append(base64_image) except Exception as e: - print(f"Error with image {i+1}: {e}") + print(f"Error with image {i+1}: {e}") # noqa: T201 return image_summaries, processed_images @@ -178,14 +178,14 @@ def create_multi_vector_retriever( # Load PDF doc_path = Path(__file__).parent / "docs/DDOG_Q3_earnings_deck.pdf" rel_doc_path = doc_path.relative_to(Path.cwd()) -print("Extract slides as images") +print("Extract slides as images") # noqa: T201 pil_images = get_images_from_pdf(rel_doc_path) # Convert to b64 images_base_64 = [convert_to_base64(i) for i in pil_images] # Image summaries -print("Generate image summaries") +print("Generate image summaries") # noqa: T201 image_summaries, images_base_64_processed = generate_img_summaries(images_base_64) # The vectorstore to use to index the images summaries diff --git a/templates/rag-chroma-multi-modal/ingest.py b/templates/rag-chroma-multi-modal/ingest.py index 67c5f070c5..98d4f701a4 100644 --- a/templates/rag-chroma-multi-modal/ingest.py +++ b/templates/rag-chroma-multi-modal/ingest.py @@ -27,14 +27,14 @@ doc_path = Path(__file__).parent / "docs/DDOG_Q3_earnings_deck.pdf" img_dump_path = Path(__file__).parent / "docs/" rel_doc_path = doc_path.relative_to(Path.cwd()) rel_img_dump_path = img_dump_path.relative_to(Path.cwd()) -print("pdf index") +print("pdf index") # noqa: T201 pil_images = get_images_from_pdf(rel_doc_path, rel_img_dump_path) -print("done") +print("done") # noqa: T201 vectorstore = Path(__file__).parent / "chroma_db_multi_modal" re_vectorstore_path = vectorstore.relative_to(Path.cwd()) # Load embedding function -print("Loading embedding function") +print("Loading embedding function") # noqa: T201 embedding = OpenCLIPEmbeddings(model_name="ViT-H-14", checkpoint="laion2b_s32b_b79k") # Create chroma @@ -54,5 +54,5 @@ image_uris = sorted( ) # Add images -print("Embedding images") +print("Embedding images") # noqa: T201 vectorstore_mmembd.add_images(uris=image_uris) diff --git a/templates/rag-elasticsearch/main.py b/templates/rag-elasticsearch/main.py index 4034ab08f2..8da3a6ab06 100644 --- a/templates/rag-elasticsearch/main.py +++ b/templates/rag-elasticsearch/main.py @@ -14,7 +14,7 @@ if __name__ == "__main__": "chat_history": [], } ) - print(response) + print(response) # noqa: T201 follow_up_question = "What are their objectives?" @@ -30,4 +30,4 @@ if __name__ == "__main__": } ) - print(response) + print(response) # noqa: T201 diff --git a/templates/rag-fusion/main.py b/templates/rag-fusion/main.py index ed32889561..8d6fe45edf 100644 --- a/templates/rag-fusion/main.py +++ b/templates/rag-fusion/main.py @@ -2,4 +2,4 @@ from rag_fusion.chain import chain if __name__ == "__main__": original_query = "impact of climate change" - print(chain.invoke(original_query)) + print(chain.invoke(original_query)) # noqa: T201 diff --git a/templates/rag-gemini-multi-modal/ingest.py b/templates/rag-gemini-multi-modal/ingest.py index 67c5f070c5..98d4f701a4 100644 --- a/templates/rag-gemini-multi-modal/ingest.py +++ b/templates/rag-gemini-multi-modal/ingest.py @@ -27,14 +27,14 @@ doc_path = Path(__file__).parent / "docs/DDOG_Q3_earnings_deck.pdf" img_dump_path = Path(__file__).parent / "docs/" rel_doc_path = doc_path.relative_to(Path.cwd()) rel_img_dump_path = img_dump_path.relative_to(Path.cwd()) -print("pdf index") +print("pdf index") # noqa: T201 pil_images = get_images_from_pdf(rel_doc_path, rel_img_dump_path) -print("done") +print("done") # noqa: T201 vectorstore = Path(__file__).parent / "chroma_db_multi_modal" re_vectorstore_path = vectorstore.relative_to(Path.cwd()) # Load embedding function -print("Loading embedding function") +print("Loading embedding function") # noqa: T201 embedding = OpenCLIPEmbeddings(model_name="ViT-H-14", checkpoint="laion2b_s32b_b79k") # Create chroma @@ -54,5 +54,5 @@ image_uris = sorted( ) # Add images -print("Embedding images") +print("Embedding images") # noqa: T201 vectorstore_mmembd.add_images(uris=image_uris) diff --git a/templates/rag-google-cloud-sensitive-data-protection/main.py b/templates/rag-google-cloud-sensitive-data-protection/main.py index 30c6fa53c3..a69f075673 100644 --- a/templates/rag-google-cloud-sensitive-data-protection/main.py +++ b/templates/rag-google-cloud-sensitive-data-protection/main.py @@ -6,4 +6,4 @@ if __name__ == "__main__": "is 555-555-5555. And my email is lovely.pirate@gmail.com. Have a nice day.", "chat_history": [], } - print(chain.invoke(query)) + print(chain.invoke(query)) # noqa: T201 diff --git a/templates/rag-google-cloud-vertexai-search/main.py b/templates/rag-google-cloud-vertexai-search/main.py index a96c83c8bb..6912be7e2a 100644 --- a/templates/rag-google-cloud-vertexai-search/main.py +++ b/templates/rag-google-cloud-vertexai-search/main.py @@ -2,4 +2,4 @@ from rag_google_cloud_vertexai_search.chain import chain if __name__ == "__main__": query = "Who is the CEO of Google Cloud?" - print(chain.invoke(query)) + print(chain.invoke(query)) # noqa: T201 diff --git a/templates/rag-multi-modal-local/ingest.py b/templates/rag-multi-modal-local/ingest.py index 9aad0cf656..e1ad090af0 100644 --- a/templates/rag-multi-modal-local/ingest.py +++ b/templates/rag-multi-modal-local/ingest.py @@ -20,7 +20,7 @@ vectorstore = Path(__file__).parent / "chroma_db_multi_modal" re_vectorstore_path = vectorstore.relative_to(Path.cwd()) # Load embedding function -print("Loading embedding function") +print("Loading embedding function") # noqa: T201 embedding = OpenCLIPEmbeddings(model_name="ViT-H-14", checkpoint="laion2b_s32b_b79k") # Create chroma @@ -31,5 +31,5 @@ vectorstore_mmembd = Chroma( ) # Add images -print("Embedding images") +print("Embedding images") # noqa: T201 vectorstore_mmembd.add_images(uris=image_uris) diff --git a/templates/rag-multi-modal-mv-local/ingest.py b/templates/rag-multi-modal-mv-local/ingest.py index 4e3f711bd2..72b9dbf769 100644 --- a/templates/rag-multi-modal-mv-local/ingest.py +++ b/templates/rag-multi-modal-mv-local/ingest.py @@ -63,7 +63,7 @@ def generate_img_summaries(img_base64_list): image_summaries.append(image_summarize(base64_image, prompt)) processed_images.append(base64_image) except Exception as e: - print(f"Error with image {i+1}: {e}") + print(f"Error with image {i+1}: {e}") # noqa: T201 return image_summaries, processed_images @@ -162,14 +162,14 @@ def create_multi_vector_retriever(vectorstore, image_summaries, images): # Load images doc_path = Path(__file__).parent / "docs/" rel_doc_path = doc_path.relative_to(Path.cwd()) -print("Read images") +print("Read images") # noqa: T201 pil_images = get_images(rel_doc_path) # Convert to b64 images_base_64 = [convert_to_base64(i) for i in pil_images] # Image summaries -print("Generate image summaries") +print("Generate image summaries") # noqa: T201 image_summaries, images_base_64_processed = generate_img_summaries(images_base_64) # The vectorstore to use to index the images summaries diff --git a/templates/rag-opensearch/dummy_index_setup.py b/templates/rag-opensearch/dummy_index_setup.py index bb658adf51..e3bc286444 100644 --- a/templates/rag-opensearch/dummy_index_setup.py +++ b/templates/rag-opensearch/dummy_index_setup.py @@ -41,7 +41,7 @@ index_settings = { response = client.indices.create(index=OPENSEARCH_INDEX_NAME, body=index_settings) -print(response) +print(response) # noqa: T201 # Insert docs @@ -57,4 +57,4 @@ for each in docs: response = client.index(index=OPENSEARCH_INDEX_NAME, body=document, refresh=True) - print(response) + print(response) # noqa: T201 diff --git a/templates/rag-redis/ingest.py b/templates/rag-redis/ingest.py index 89ffa40019..fe7992a3cc 100644 --- a/templates/rag-redis/ingest.py +++ b/templates/rag-redis/ingest.py @@ -17,7 +17,7 @@ def ingest_documents(): data_path = "data/" doc = [os.path.join(data_path, file) for file in os.listdir(data_path)][0] - print("Parsing 10k filing doc for NIKE", doc) + print("Parsing 10k filing doc for NIKE", doc) # noqa: T201 text_splitter = RecursiveCharacterTextSplitter( chunk_size=1500, chunk_overlap=100, add_start_index=True @@ -25,7 +25,7 @@ def ingest_documents(): loader = UnstructuredFileLoader(doc, mode="single", strategy="fast") chunks = loader.load_and_split(text_splitter) - print("Done preprocessing. Created", len(chunks), "chunks of the original pdf") + print("Done preprocessing. Created", len(chunks), "chunks of the original pdf") # noqa: T201 # Create vectorstore embedder = HuggingFaceEmbeddings(model_name=EMBED_MODEL) diff --git a/templates/rag-self-query/main.py b/templates/rag-self-query/main.py index 83a1dc4c6c..8385dfaa4f 100644 --- a/templates/rag-self-query/main.py +++ b/templates/rag-self-query/main.py @@ -14,7 +14,7 @@ if __name__ == "__main__": "chat_history": [], } ) - print(response) + print(response) # noqa: T201 follow_up_question = "What are their objectives?" @@ -30,4 +30,4 @@ if __name__ == "__main__": } ) - print(response) + print(response) # noqa: T201 diff --git a/templates/rag-timescale-conversation/rag_timescale_conversation/load_sample_dataset.py b/templates/rag-timescale-conversation/rag_timescale_conversation/load_sample_dataset.py index b540a72cdc..65946fef90 100644 --- a/templates/rag-timescale-conversation/rag_timescale_conversation/load_sample_dataset.py +++ b/templates/rag-timescale-conversation/rag_timescale_conversation/load_sample_dataset.py @@ -47,7 +47,7 @@ def load_ts_git_dataset( with open(json_file_path, "w") as json_file: json_file.write(response.text) else: - print(f"Failed to download JSON file. Status code: {response.status_code}") + print(f"Failed to download JSON file. Status code: {response.status_code}") # noqa: T201 loader = JSONLoader( file_path=json_file_path, diff --git a/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/load_sample_dataset.py b/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/load_sample_dataset.py index b540a72cdc..65946fef90 100644 --- a/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/load_sample_dataset.py +++ b/templates/rag-timescale-hybrid-search-time/rag_timescale_hybrid_search_time/load_sample_dataset.py @@ -47,7 +47,7 @@ def load_ts_git_dataset( with open(json_file_path, "w") as json_file: json_file.write(response.text) else: - print(f"Failed to download JSON file. Status code: {response.status_code}") + print(f"Failed to download JSON file. Status code: {response.status_code}") # noqa: T201 loader = JSONLoader( file_path=json_file_path, diff --git a/templates/research-assistant/research_assistant/search/web.py b/templates/research-assistant/research_assistant/search/web.py index 52ea08542d..5f647904ab 100644 --- a/templates/research-assistant/research_assistant/search/web.py +++ b/templates/research-assistant/research_assistant/search/web.py @@ -40,7 +40,7 @@ def scrape_text(url: str): else: return f"Failed to retrieve the webpage: Status code {response.status_code}" except Exception as e: - print(e) + print(e) # noqa: T201 return f"Failed to retrieve the webpage: {e}" diff --git a/templates/sql-llamacpp/sql_llamacpp/chain.py b/templates/sql-llamacpp/sql_llamacpp/chain.py index a6de376379..59ead38b14 100644 --- a/templates/sql-llamacpp/sql_llamacpp/chain.py +++ b/templates/sql-llamacpp/sql_llamacpp/chain.py @@ -19,15 +19,15 @@ url = ( ) # Check if file is present in the current directory if not os.path.exists(file_name): - print(f"'{file_name}' not found. Downloading...") + print(f"'{file_name}' not found. Downloading...") # noqa: T201 # Download the file response = requests.get(url) response.raise_for_status() # Raise an exception for HTTP errors with open(file_name, "wb") as f: f.write(response.content) - print(f"'{file_name}' has been downloaded.") + print(f"'{file_name}' has been downloaded.") # noqa: T201 else: - print(f"'{file_name}' already exists in the current directory.") + print(f"'{file_name}' already exists in the current directory.") # noqa: T201 # Add the LLM downloaded from HF model_path = file_name diff --git a/templates/sql-research-assistant/sql_research_assistant/chain.py b/templates/sql-research-assistant/sql_research_assistant/chain.py index d04e14efd8..56f025df92 100644 --- a/templates/sql-research-assistant/sql_research_assistant/chain.py +++ b/templates/sql-research-assistant/sql_research_assistant/chain.py @@ -17,6 +17,6 @@ chain = chain_notypes.with_types(input_type=InputType) if __name__ == "__main__": - print( + print( # noqa: T201 chain.invoke({"question": "who is typically older: point guards or centers?"}) ) diff --git a/templates/sql-research-assistant/sql_research_assistant/search/web.py b/templates/sql-research-assistant/sql_research_assistant/search/web.py index 3c1e31b85e..407077a0b0 100644 --- a/templates/sql-research-assistant/sql_research_assistant/search/web.py +++ b/templates/sql-research-assistant/sql_research_assistant/search/web.py @@ -40,7 +40,7 @@ def scrape_text(url: str): else: return f"Failed to retrieve the webpage: Status code {response.status_code}" except Exception as e: - print(e) + print(e) # noqa: T201 return f"Failed to retrieve the webpage: {e}" diff --git a/templates/xml-agent/main.py b/templates/xml-agent/main.py index 02647eb845..55786914da 100644 --- a/templates/xml-agent/main.py +++ b/templates/xml-agent/main.py @@ -2,4 +2,4 @@ from xml_agent.agent import agent_executor if __name__ == "__main__": question = "who won the womens world cup in 2023?" - print(agent_executor.invoke({"question": question, "chat_history": []})) + print(agent_executor.invoke({"question": question, "chat_history": []})) # noqa: T201