From c306364b06a796c46ecbbcd73d304d3982cf80b8 Mon Sep 17 00:00:00 2001 From: Eugene Yurtsev Date: Thu, 2 May 2024 09:05:48 -0400 Subject: [PATCH] langchain[patch]: Update more code to use langchain community as an optional dependency (#21170) More code to use langchain community as an optional dependency --- libs/langchain/langchain/agents/react/base.py | 8 +- .../agents/self_ask_with_search/base.py | 12 +- libs/langchain/langchain/chains/api/base.py | 361 +++++++++--------- libs/langchain/langchain/chains/loading.py | 22 +- .../chains/openai_functions/openapi.py | 9 +- .../langchain/chains/sql_database/query.py | 8 +- .../parsers/language/python.py | 28 +- .../document_loaders/pyspark_dataframe.py | 27 +- .../langchain/document_loaders/python.py | 21 +- .../embeddings/sentence_transformer.py | 22 +- .../evaluation/scoring/eval_chain.py | 7 +- libs/langchain/langchain/indexes/__init__.py | 23 +- libs/langchain/langchain/indexes/graph.py | 28 +- .../prompts/knowledge_triplet_extraction.py | 3 +- .../langchain/indexes/vectorstore.py | 23 +- .../langchain/llms/titan_takeoff_pro.py | 24 +- libs/langchain/langchain/python.py | 19 +- .../langchain/tools/ainetwork/app.py | 7 - .../langchain/tools/ainetwork/base.py | 3 - .../langchain/tools/ainetwork/owner.py | 3 - .../langchain/tools/ainetwork/rule.py | 3 - .../langchain/tools/ainetwork/transfer.py | 3 - .../langchain/tools/ainetwork/value.py | 3 - .../langchain/tools/sql_database/prompt.py | 22 +- libs/langchain/langchain/utilities/python.py | 20 +- 25 files changed, 477 insertions(+), 232 deletions(-) delete mode 100644 libs/langchain/langchain/tools/ainetwork/app.py delete mode 100644 libs/langchain/langchain/tools/ainetwork/base.py delete mode 100644 libs/langchain/langchain/tools/ainetwork/owner.py delete mode 100644 libs/langchain/langchain/tools/ainetwork/rule.py delete mode 100644 libs/langchain/langchain/tools/ainetwork/transfer.py delete mode 100644 libs/langchain/langchain/tools/ainetwork/value.py diff --git a/libs/langchain/langchain/agents/react/base.py b/libs/langchain/langchain/agents/react/base.py index 0ed388d217..cdb39256ea 100644 --- a/libs/langchain/langchain/agents/react/base.py +++ b/libs/langchain/langchain/agents/react/base.py @@ -1,7 +1,8 @@ """Chain that implements the ReAct paper from https://arxiv.org/pdf/2210.03629.pdf.""" -from typing import Any, List, Optional, Sequence +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, List, Optional, Sequence -from langchain_community.docstore.base import Docstore from langchain_core._api import deprecated from langchain_core.documents import Document from langchain_core.language_models import BaseLanguageModel @@ -16,6 +17,9 @@ from langchain.agents.react.textworld_prompt import TEXTWORLD_PROMPT from langchain.agents.react.wiki_prompt import WIKI_PROMPT from langchain.agents.utils import validate_tools_single_input +if TYPE_CHECKING: + from langchain_community.docstore.base import Docstore + @deprecated("0.1.0", removal="0.2.0") class ReActDocstoreAgent(Agent): diff --git a/libs/langchain/langchain/agents/self_ask_with_search/base.py b/libs/langchain/langchain/agents/self_ask_with_search/base.py index bf7cf5ab77..27108aa97b 100644 --- a/libs/langchain/langchain/agents/self_ask_with_search/base.py +++ b/libs/langchain/langchain/agents/self_ask_with_search/base.py @@ -1,9 +1,8 @@ """Chain that does self-ask with search.""" -from typing import Any, Sequence, Union +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Sequence, Union -from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper -from langchain_community.utilities.searchapi import SearchApiAPIWrapper -from langchain_community.utilities.serpapi import SerpAPIWrapper from langchain_core._api import deprecated from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import BasePromptTemplate @@ -18,6 +17,11 @@ from langchain.agents.self_ask_with_search.output_parser import SelfAskOutputPar from langchain.agents.self_ask_with_search.prompt import PROMPT from langchain.agents.utils import validate_tools_single_input +if TYPE_CHECKING: + from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper + from langchain_community.utilities.searchapi import SearchApiAPIWrapper + from langchain_community.utilities.serpapi import SerpAPIWrapper + @deprecated("0.1.0", alternative="create_self_ask_with_search", removal="0.2.0") class SelfAskWithSearchAgent(Agent): diff --git a/libs/langchain/langchain/chains/api/base.py b/libs/langchain/langchain/chains/api/base.py index 212c04845c..b3cba05a37 100644 --- a/libs/langchain/langchain/chains/api/base.py +++ b/libs/langchain/langchain/chains/api/base.py @@ -4,7 +4,6 @@ from __future__ import annotations from typing import Any, Dict, List, Optional, Sequence, Tuple from urllib.parse import urlparse -from langchain_community.utilities.requests import TextRequestsWrapper from langchain_core.callbacks import ( AsyncCallbackManagerForChainRun, CallbackManagerForChainRun, @@ -50,189 +49,205 @@ def _check_in_allowed_domain(url: str, limit_to_domains: Sequence[str]) -> bool: return False -class APIChain(Chain): - """Chain that makes API calls and summarizes the responses to answer a question. +try: + from langchain_community.utilities.requests import TextRequestsWrapper - *Security Note*: This API chain uses the requests toolkit - to make GET, POST, PATCH, PUT, and DELETE requests to an API. + class APIChain(Chain): + """Chain that makes API calls and summarizes the responses to answer a question. - Exercise care in who is allowed to use this chain. If exposing - to end users, consider that users will be able to make arbitrary - requests on behalf of the server hosting the code. For example, - users could ask the server to make a request to a private API - that is only accessible from the server. + *Security Note*: This API chain uses the requests toolkit + to make GET, POST, PATCH, PUT, and DELETE requests to an API. - Control access to who can submit issue requests using this toolkit and - what network access it has. + Exercise care in who is allowed to use this chain. If exposing + to end users, consider that users will be able to make arbitrary + requests on behalf of the server hosting the code. For example, + users could ask the server to make a request to a private API + that is only accessible from the server. - See https://python.langchain.com/docs/security for more information. - """ - - api_request_chain: LLMChain - api_answer_chain: LLMChain - requests_wrapper: TextRequestsWrapper = Field(exclude=True) - api_docs: str - question_key: str = "question" #: :meta private: - output_key: str = "output" #: :meta private: - limit_to_domains: Optional[Sequence[str]] - """Use to limit the domains that can be accessed by the API chain. - - * For example, to limit to just the domain `https://www.example.com`, set - `limit_to_domains=["https://www.example.com"]`. - - * The default value is an empty tuple, which means that no domains are - allowed by default. By design this will raise an error on instantiation. - * Use a None if you want to allow all domains by default -- this is not - recommended for security reasons, as it would allow malicious users to - make requests to arbitrary URLS including internal APIs accessible from - the server. - """ + Control access to who can submit issue requests using this toolkit and + what network access it has. - @property - def input_keys(self) -> List[str]: - """Expect input key. - - :meta private: + See https://python.langchain.com/docs/security for more information. """ - return [self.question_key] - - @property - def output_keys(self) -> List[str]: - """Expect output key. - :meta private: + api_request_chain: LLMChain + api_answer_chain: LLMChain + requests_wrapper: TextRequestsWrapper = Field(exclude=True) + api_docs: str + question_key: str = "question" #: :meta private: + output_key: str = "output" #: :meta private: + limit_to_domains: Optional[Sequence[str]] + """Use to limit the domains that can be accessed by the API chain. + + * For example, to limit to just the domain `https://www.example.com`, set + `limit_to_domains=["https://www.example.com"]`. + + * The default value is an empty tuple, which means that no domains are + allowed by default. By design this will raise an error on instantiation. + * Use a None if you want to allow all domains by default -- this is not + recommended for security reasons, as it would allow malicious users to + make requests to arbitrary URLS including internal APIs accessible from + the server. """ - return [self.output_key] - - @root_validator(pre=True) - def validate_api_request_prompt(cls, values: Dict) -> Dict: - """Check that api request prompt expects the right variables.""" - input_vars = values["api_request_chain"].prompt.input_variables - expected_vars = {"question", "api_docs"} - if set(input_vars) != expected_vars: - raise ValueError( - f"Input variables should be {expected_vars}, got {input_vars}" + + @property + def input_keys(self) -> List[str]: + """Expect input key. + + :meta private: + """ + return [self.question_key] + + @property + def output_keys(self) -> List[str]: + """Expect output key. + + :meta private: + """ + return [self.output_key] + + @root_validator(pre=True) + def validate_api_request_prompt(cls, values: Dict) -> Dict: + """Check that api request prompt expects the right variables.""" + input_vars = values["api_request_chain"].prompt.input_variables + expected_vars = {"question", "api_docs"} + if set(input_vars) != expected_vars: + raise ValueError( + f"Input variables should be {expected_vars}, got {input_vars}" + ) + return values + + @root_validator(pre=True) + def validate_limit_to_domains(cls, values: Dict) -> Dict: + """Check that allowed domains are valid.""" + if "limit_to_domains" not in values: + raise ValueError( + "You must specify a list of domains to limit access using " + "`limit_to_domains`" + ) + if ( + not values["limit_to_domains"] + and values["limit_to_domains"] is not None + ): + raise ValueError( + "Please provide a list of domains to limit access using " + "`limit_to_domains`." + ) + return values + + @root_validator(pre=True) + def validate_api_answer_prompt(cls, values: Dict) -> Dict: + """Check that api answer prompt expects the right variables.""" + input_vars = values["api_answer_chain"].prompt.input_variables + expected_vars = {"question", "api_docs", "api_url", "api_response"} + if set(input_vars) != expected_vars: + raise ValueError( + f"Input variables should be {expected_vars}, got {input_vars}" + ) + return values + + def _call( + self, + inputs: Dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> Dict[str, str]: + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + question = inputs[self.question_key] + api_url = self.api_request_chain.predict( + question=question, + api_docs=self.api_docs, + callbacks=_run_manager.get_child(), + ) + _run_manager.on_text(api_url, color="green", end="\n", verbose=self.verbose) + api_url = api_url.strip() + if self.limit_to_domains and not _check_in_allowed_domain( + api_url, self.limit_to_domains + ): + raise ValueError( + f"{api_url} is not in the allowed domains: {self.limit_to_domains}" + ) + api_response = self.requests_wrapper.get(api_url) + _run_manager.on_text( + str(api_response), color="yellow", end="\n", verbose=self.verbose + ) + answer = self.api_answer_chain.predict( + question=question, + api_docs=self.api_docs, + api_url=api_url, + api_response=api_response, + callbacks=_run_manager.get_child(), + ) + return {self.output_key: answer} + + async def _acall( + self, + inputs: Dict[str, Any], + run_manager: Optional[AsyncCallbackManagerForChainRun] = None, + ) -> Dict[str, str]: + _run_manager = ( + run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() ) - return values - - @root_validator(pre=True) - def validate_limit_to_domains(cls, values: Dict) -> Dict: - """Check that allowed domains are valid.""" - if "limit_to_domains" not in values: - raise ValueError( - "You must specify a list of domains to limit access using " - "`limit_to_domains`" + question = inputs[self.question_key] + api_url = await self.api_request_chain.apredict( + question=question, + api_docs=self.api_docs, + callbacks=_run_manager.get_child(), ) - if not values["limit_to_domains"] and values["limit_to_domains"] is not None: - raise ValueError( - "Please provide a list of domains to limit access using " - "`limit_to_domains`." + await _run_manager.on_text( + api_url, color="green", end="\n", verbose=self.verbose ) - return values - - @root_validator(pre=True) - def validate_api_answer_prompt(cls, values: Dict) -> Dict: - """Check that api answer prompt expects the right variables.""" - input_vars = values["api_answer_chain"].prompt.input_variables - expected_vars = {"question", "api_docs", "api_url", "api_response"} - if set(input_vars) != expected_vars: - raise ValueError( - f"Input variables should be {expected_vars}, got {input_vars}" + api_url = api_url.strip() + if self.limit_to_domains and not _check_in_allowed_domain( + api_url, self.limit_to_domains + ): + raise ValueError( + f"{api_url} is not in the allowed domains: {self.limit_to_domains}" + ) + api_response = await self.requests_wrapper.aget(api_url) + await _run_manager.on_text( + str(api_response), color="yellow", end="\n", verbose=self.verbose ) - return values - - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, str]: - _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() - question = inputs[self.question_key] - api_url = self.api_request_chain.predict( - question=question, - api_docs=self.api_docs, - callbacks=_run_manager.get_child(), - ) - _run_manager.on_text(api_url, color="green", end="\n", verbose=self.verbose) - api_url = api_url.strip() - if self.limit_to_domains and not _check_in_allowed_domain( - api_url, self.limit_to_domains - ): - raise ValueError( - f"{api_url} is not in the allowed domains: {self.limit_to_domains}" + answer = await self.api_answer_chain.apredict( + question=question, + api_docs=self.api_docs, + api_url=api_url, + api_response=api_response, + callbacks=_run_manager.get_child(), ) - api_response = self.requests_wrapper.get(api_url) - _run_manager.on_text( - str(api_response), color="yellow", end="\n", verbose=self.verbose - ) - answer = self.api_answer_chain.predict( - question=question, - api_docs=self.api_docs, - api_url=api_url, - api_response=api_response, - callbacks=_run_manager.get_child(), - ) - return {self.output_key: answer} - - async def _acall( - self, - inputs: Dict[str, Any], - run_manager: Optional[AsyncCallbackManagerForChainRun] = None, - ) -> Dict[str, str]: - _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() - question = inputs[self.question_key] - api_url = await self.api_request_chain.apredict( - question=question, - api_docs=self.api_docs, - callbacks=_run_manager.get_child(), - ) - await _run_manager.on_text( - api_url, color="green", end="\n", verbose=self.verbose - ) - api_url = api_url.strip() - if self.limit_to_domains and not _check_in_allowed_domain( - api_url, self.limit_to_domains - ): - raise ValueError( - f"{api_url} is not in the allowed domains: {self.limit_to_domains}" + return {self.output_key: answer} + + @classmethod + def from_llm_and_api_docs( + cls, + llm: BaseLanguageModel, + api_docs: str, + headers: Optional[dict] = None, + api_url_prompt: BasePromptTemplate = API_URL_PROMPT, + api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT, + limit_to_domains: Optional[Sequence[str]] = tuple(), + **kwargs: Any, + ) -> APIChain: + """Load chain from just an LLM and the api docs.""" + get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt) + requests_wrapper = TextRequestsWrapper(headers=headers) + get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt) + return cls( + api_request_chain=get_request_chain, + api_answer_chain=get_answer_chain, + requests_wrapper=requests_wrapper, + api_docs=api_docs, + limit_to_domains=limit_to_domains, + **kwargs, + ) + + @property + def _chain_type(self) -> str: + return "api_chain" +except ImportError: + + class APIChain: # type: ignore[no-redef] + def __init__(self, *args: Any, **kwargs: Any) -> None: + raise ImportError( + "To use the APIChain, you must install the langchain_community package." + "pip install langchain_community" ) - api_response = await self.requests_wrapper.aget(api_url) - await _run_manager.on_text( - str(api_response), color="yellow", end="\n", verbose=self.verbose - ) - answer = await self.api_answer_chain.apredict( - question=question, - api_docs=self.api_docs, - api_url=api_url, - api_response=api_response, - callbacks=_run_manager.get_child(), - ) - return {self.output_key: answer} - - @classmethod - def from_llm_and_api_docs( - cls, - llm: BaseLanguageModel, - api_docs: str, - headers: Optional[dict] = None, - api_url_prompt: BasePromptTemplate = API_URL_PROMPT, - api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT, - limit_to_domains: Optional[Sequence[str]] = tuple(), - **kwargs: Any, - ) -> APIChain: - """Load chain from just an LLM and the api docs.""" - get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt) - requests_wrapper = TextRequestsWrapper(headers=headers) - get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt) - return cls( - api_request_chain=get_request_chain, - api_answer_chain=get_answer_chain, - requests_wrapper=requests_wrapper, - api_docs=api_docs, - limit_to_domains=limit_to_domains, - **kwargs, - ) - - @property - def _chain_type(self) -> str: - return "api_chain" diff --git a/libs/langchain/langchain/chains/loading.py b/libs/langchain/langchain/chains/loading.py index 5f9ef01f98..97798e7f15 100644 --- a/libs/langchain/langchain/chains/loading.py +++ b/libs/langchain/langchain/chains/loading.py @@ -5,7 +5,6 @@ from pathlib import Path from typing import Any, Union import yaml -from langchain_community.llms.loading import load_llm, load_llm_from_config from langchain_core.prompts.loading import ( _load_output_parser, load_prompt, @@ -30,6 +29,27 @@ from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesCha from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain from langchain.chains.retrieval_qa.base import RetrievalQA, VectorDBQA +try: + from langchain_community.llms.loading import load_llm, load_llm_from_config +except ImportError: + + def load_llm(*args: Any, **kwargs: Any) -> None: # type: ignore + raise ImportError( + "To use this load_llm functionality you must install the " + "langchain_community package. " + "You can install it with `pip install langchain_community`" + ) + + def load_llm_from_config( # type: ignore + *args: Any, **kwargs: Any + ) -> None: + raise ImportError( + "To use this load_llm_from_config functionality you must install the " + "langchain_community package. " + "You can install it with `pip install langchain_community`" + ) + + URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/" diff --git a/libs/langchain/langchain/chains/openai_functions/openapi.py b/libs/langchain/langchain/chains/openai_functions/openapi.py index b7c98348ea..2c3eeebad1 100644 --- a/libs/langchain/langchain/chains/openai_functions/openapi.py +++ b/libs/langchain/langchain/chains/openai_functions/openapi.py @@ -6,7 +6,6 @@ from collections import defaultdict from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union import requests -from langchain_community.utilities.openapi import OpenAPISpec from langchain_core.callbacks import CallbackManagerForChainRun from langchain_core.language_models import BaseLanguageModel from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser @@ -20,6 +19,7 @@ from langchain.chains.sequential import SequentialChain from langchain.tools import APIOperation if TYPE_CHECKING: + from langchain_community.utilities.openapi import OpenAPISpec from openapi_pydantic import Parameter @@ -255,6 +255,13 @@ def get_openapi_chain( prompt: Main prompt template to use. request_chain: Chain for taking the functions output and executing the request. """ + try: + from langchain_community.utilities.openapi import OpenAPISpec + except ImportError as e: + raise ImportError( + "Could not import langchain_community.utilities.openapi. " + "Please install it with `pip install langchain-community`." + ) from e if isinstance(spec, str): for conversion in ( OpenAPISpec.from_url, diff --git a/libs/langchain/langchain/chains/sql_database/query.py b/libs/langchain/langchain/chains/sql_database/query.py index 6bd074c716..bf5feef483 100644 --- a/libs/langchain/langchain/chains/sql_database/query.py +++ b/libs/langchain/langchain/chains/sql_database/query.py @@ -1,6 +1,7 @@ -from typing import Any, Dict, List, Optional, TypedDict, Union +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Dict, List, Optional, TypedDict, Union -from langchain_community.utilities.sql_database import SQLDatabase from langchain_core.language_models import BaseLanguageModel from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import BasePromptTemplate @@ -8,6 +9,9 @@ from langchain_core.runnables import Runnable, RunnablePassthrough from langchain.chains.sql_database.prompt import PROMPT, SQL_PROMPTS +if TYPE_CHECKING: + from langchain_community.utilities.sql_database import SQLDatabase + def _strip(text: str) -> str: return text.strip() diff --git a/libs/langchain/langchain/document_loaders/parsers/language/python.py b/libs/langchain/langchain/document_loaders/parsers/language/python.py index b96ae374fb..79a702792d 100644 --- a/libs/langchain/langchain/document_loaders/parsers/language/python.py +++ b/libs/langchain/langchain/document_loaders/parsers/language/python.py @@ -1,3 +1,27 @@ -from langchain_community.document_loaders.parsers.language.python import PythonSegmenter +from typing import TYPE_CHECKING, Any -__all__ = ["PythonSegmenter"] +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.parsers.language.python import ( + PythonSegmenter, + ) + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "PythonSegmenter": "langchain_community.document_loaders.parsers.language.python" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "PythonSegmenter", +] diff --git a/libs/langchain/langchain/document_loaders/pyspark_dataframe.py b/libs/langchain/langchain/document_loaders/pyspark_dataframe.py index dacec21078..1c870359a5 100644 --- a/libs/langchain/langchain/document_loaders/pyspark_dataframe.py +++ b/libs/langchain/langchain/document_loaders/pyspark_dataframe.py @@ -1,5 +1,26 @@ -from langchain_community.document_loaders.pyspark_dataframe import ( - PySparkDataFrameLoader, -) +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.pyspark_dataframe import ( + PySparkDataFrameLoader, + ) + + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "PySparkDataFrameLoader": "langchain_community.document_loaders.pyspark_dataframe" +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + __all__ = ["PySparkDataFrameLoader"] diff --git a/libs/langchain/langchain/document_loaders/python.py b/libs/langchain/langchain/document_loaders/python.py index 1f37f2f112..04907a0b08 100644 --- a/libs/langchain/langchain/document_loaders/python.py +++ b/libs/langchain/langchain/document_loaders/python.py @@ -1,3 +1,22 @@ -from langchain_community.document_loaders.python import PythonLoader +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.document_loaders.python import PythonLoader + + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"PythonLoader": "langchain_community.document_loaders.python"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + __all__ = ["PythonLoader"] diff --git a/libs/langchain/langchain/embeddings/sentence_transformer.py b/libs/langchain/langchain/embeddings/sentence_transformer.py index 8fe3f928cd..ea0e60b805 100644 --- a/libs/langchain/langchain/embeddings/sentence_transformer.py +++ b/libs/langchain/langchain/embeddings/sentence_transformer.py @@ -1,5 +1,21 @@ -from langchain_community.embeddings.sentence_transformer import ( - SentenceTransformerEmbeddings, -) +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.embeddings import SentenceTransformerEmbeddings + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"SentenceTransformerEmbeddings": "langchain_community.embeddings"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + __all__ = ["SentenceTransformerEmbeddings"] diff --git a/libs/langchain/langchain/evaluation/scoring/eval_chain.py b/libs/langchain/langchain/evaluation/scoring/eval_chain.py index 755ebc61eb..c73de5f21a 100644 --- a/libs/langchain/langchain/evaluation/scoring/eval_chain.py +++ b/libs/langchain/langchain/evaluation/scoring/eval_chain.py @@ -5,8 +5,6 @@ import logging import re from typing import Any, Dict, List, Optional, Union -from langchain_community.chat_models.azure_openai import AzureChatOpenAI -from langchain_community.chat_models.openai import ChatOpenAI from langchain_core.callbacks.manager import Callbacks from langchain_core.language_models import BaseLanguageModel from langchain_core.output_parsers import BaseOutputParser @@ -258,10 +256,7 @@ class ScoreStringEvalChain(StringEvaluator, LLMEvalChain, LLMChain): ValueError: If the input variables are not as expected. """ - if not ( - isinstance(llm, (ChatOpenAI, AzureChatOpenAI)) - and llm.model_name.startswith("gpt-4") - ): + if not (hasattr(llm, "model_name") and not llm.model_name.startswith("gpt-4")): logger.warning( "This chain was only tested with GPT-4. \ Performance may be significantly worse with other models." diff --git a/libs/langchain/langchain/indexes/__init__.py b/libs/langchain/langchain/indexes/__init__.py index 7e928112d0..05c9f429f2 100644 --- a/libs/langchain/langchain/indexes/__init__.py +++ b/libs/langchain/langchain/indexes/__init__.py @@ -11,12 +11,33 @@ Importantly, Index keeps on working even if the content being written is derived via a set of transformations from some source content (e.g., indexing children documents that were derived from parent documents by chunking.) """ -from langchain_community.graphs.index_creator import GraphIndexCreator +from typing import TYPE_CHECKING, Any + from langchain_core.indexing.api import IndexingResult, aindex, index +from langchain._api import create_importer from langchain.indexes._sql_record_manager import SQLRecordManager from langchain.indexes.vectorstore import VectorstoreIndexCreator +if TYPE_CHECKING: + from langchain_community.graphs.index_creator import GraphIndexCreator + + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GraphIndexCreator": "langchain_community.graphs.index_creator", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + __all__ = [ # Keep sorted "aindex", diff --git a/libs/langchain/langchain/indexes/graph.py b/libs/langchain/langchain/indexes/graph.py index aeaa1c21e2..9aaff5a5ed 100644 --- a/libs/langchain/langchain/indexes/graph.py +++ b/libs/langchain/langchain/indexes/graph.py @@ -1,5 +1,27 @@ -"""Graph Index Creator.""" -from langchain_community.graphs.index_creator import GraphIndexCreator -from langchain_community.graphs.networkx_graph import NetworkxEntityGraph +"""**Graphs** provide a natural language interface to graph databases.""" +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.graphs.index_creator import GraphIndexCreator + from langchain_community.graphs.networkx_graph import NetworkxEntityGraph + + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = { + "GraphIndexCreator": "langchain_community.graphs.index_creator", + "NetworkxEntityGraph": "langchain_community.graphs.networkx_graph", +} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + __all__ = ["GraphIndexCreator", "NetworkxEntityGraph"] diff --git a/libs/langchain/langchain/indexes/prompts/knowledge_triplet_extraction.py b/libs/langchain/langchain/indexes/prompts/knowledge_triplet_extraction.py index 0176dd428d..47f59d9d11 100644 --- a/libs/langchain/langchain/indexes/prompts/knowledge_triplet_extraction.py +++ b/libs/langchain/langchain/indexes/prompts/knowledge_triplet_extraction.py @@ -1,8 +1,9 @@ # flake8: noqa -from langchain_community.graphs.networkx_graph import KG_TRIPLE_DELIMITER from langchain_core.prompts.prompt import PromptTemplate +KG_TRIPLE_DELIMITER = "<|>" + _DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE = ( "You are a networked intelligence helping a human track knowledge triples" " about all relevant people, things, concepts, etc. and integrating" diff --git a/libs/langchain/langchain/indexes/vectorstore.py b/libs/langchain/langchain/indexes/vectorstore.py index 88439b85cf..55e773ebdc 100644 --- a/libs/langchain/langchain/indexes/vectorstore.py +++ b/libs/langchain/langchain/indexes/vectorstore.py @@ -1,6 +1,5 @@ from typing import Any, Dict, List, Optional, Type -from langchain_community.vectorstores.inmemory import InMemoryVectorStore from langchain_core.document_loaders import BaseLoader from langchain_core.documents import Document from langchain_core.embeddings import Embeddings @@ -117,10 +116,30 @@ class VectorStoreIndexWrapper(BaseModel): return await chain.ainvoke({chain.question_key: question}) +def _get_in_memory_vectorstore() -> Type[VectorStore]: + """Get the InMemoryVectorStore.""" + import warnings + + try: + from langchain_community.vectorstores.inmemory import InMemoryVectorStore + except ImportError: + raise ImportError( + "Please install langchain-community to use the InMemoryVectorStore." + ) + warnings.warn( + "Using InMemoryVectorStore as the default vectorstore." + "This memory store won't persist data. You should explicitly" + "specify a vectorstore when using VectorstoreIndexCreator" + ) + return InMemoryVectorStore + + class VectorstoreIndexCreator(BaseModel): """Logic for creating indexes.""" - vectorstore_cls: Type[VectorStore] = InMemoryVectorStore + vectorstore_cls: Type[VectorStore] = Field( + default_factory=_get_in_memory_vectorstore + ) embedding: Embeddings text_splitter: TextSplitter = Field(default_factory=_get_default_text_splitter) vectorstore_kwargs: dict = Field(default_factory=dict) diff --git a/libs/langchain/langchain/llms/titan_takeoff_pro.py b/libs/langchain/langchain/llms/titan_takeoff_pro.py index 0d323c197e..fde6a12ce0 100644 --- a/libs/langchain/langchain/llms/titan_takeoff_pro.py +++ b/libs/langchain/langchain/llms/titan_takeoff_pro.py @@ -1,3 +1,23 @@ -from langchain_community.llms.titan_takeoff import TitanTakeoff as TitanTakeoffPro +from typing import TYPE_CHECKING, Any -__all__ = ["TitanTakeoffPro"] +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.llms import TitanTakeoffPro + +# Create a way to dynamically look up deprecated imports. +# Used to consolidate logic for raising deprecation warnings and +# handling optional imports. +DEPRECATED_LOOKUP = {"TitanTakeoffPro": "langchain_community.llms"} + +_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _import_attribute(name) + + +__all__ = [ + "TitanTakeoffPro", +] diff --git a/libs/langchain/langchain/python.py b/libs/langchain/langchain/python.py index 6668535880..92397f0d86 100644 --- a/libs/langchain/langchain/python.py +++ b/libs/langchain/langchain/python.py @@ -1,4 +1,21 @@ """For backwards compatibility.""" -from langchain_community.utilities.python import PythonREPL +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities.python import PythonREPL + + +_importer = create_importer( + __package__, + deprecated_lookups={"PythonREPL": "langchain_community.utilities.python"}, +) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _importer(name) + __all__ = ["PythonREPL"] diff --git a/libs/langchain/langchain/tools/ainetwork/app.py b/libs/langchain/langchain/tools/ainetwork/app.py deleted file mode 100644 index 1b0976670a..0000000000 --- a/libs/langchain/langchain/tools/ainetwork/app.py +++ /dev/null @@ -1,7 +0,0 @@ -from langchain_community.tools.ainetwork.app import ( - AINAppOps, - AppOperationType, - AppSchema, -) - -__all__ = ["AppOperationType", "AppSchema", "AINAppOps"] diff --git a/libs/langchain/langchain/tools/ainetwork/base.py b/libs/langchain/langchain/tools/ainetwork/base.py deleted file mode 100644 index 357a3d142a..0000000000 --- a/libs/langchain/langchain/tools/ainetwork/base.py +++ /dev/null @@ -1,3 +0,0 @@ -from langchain_community.tools.ainetwork.base import AINBaseTool, OperationType - -__all__ = ["OperationType", "AINBaseTool"] diff --git a/libs/langchain/langchain/tools/ainetwork/owner.py b/libs/langchain/langchain/tools/ainetwork/owner.py deleted file mode 100644 index bc21b2f48e..0000000000 --- a/libs/langchain/langchain/tools/ainetwork/owner.py +++ /dev/null @@ -1,3 +0,0 @@ -from langchain_community.tools.ainetwork.owner import AINOwnerOps, RuleSchema - -__all__ = ["RuleSchema", "AINOwnerOps"] diff --git a/libs/langchain/langchain/tools/ainetwork/rule.py b/libs/langchain/langchain/tools/ainetwork/rule.py deleted file mode 100644 index 36c1bfef3f..0000000000 --- a/libs/langchain/langchain/tools/ainetwork/rule.py +++ /dev/null @@ -1,3 +0,0 @@ -from langchain_community.tools.ainetwork.rule import AINRuleOps, RuleSchema - -__all__ = ["RuleSchema", "AINRuleOps"] diff --git a/libs/langchain/langchain/tools/ainetwork/transfer.py b/libs/langchain/langchain/tools/ainetwork/transfer.py deleted file mode 100644 index 6cf3eeb6bf..0000000000 --- a/libs/langchain/langchain/tools/ainetwork/transfer.py +++ /dev/null @@ -1,3 +0,0 @@ -from langchain_community.tools.ainetwork.transfer import AINTransfer, TransferSchema - -__all__ = ["TransferSchema", "AINTransfer"] diff --git a/libs/langchain/langchain/tools/ainetwork/value.py b/libs/langchain/langchain/tools/ainetwork/value.py deleted file mode 100644 index c730c20fa1..0000000000 --- a/libs/langchain/langchain/tools/ainetwork/value.py +++ /dev/null @@ -1,3 +0,0 @@ -from langchain_community.tools.ainetwork.value import AINValueOps, ValueSchema - -__all__ = ["ValueSchema", "AINValueOps"] diff --git a/libs/langchain/langchain/tools/sql_database/prompt.py b/libs/langchain/langchain/tools/sql_database/prompt.py index 8eea844dd6..27b3ee6c6b 100644 --- a/libs/langchain/langchain/tools/sql_database/prompt.py +++ b/libs/langchain/langchain/tools/sql_database/prompt.py @@ -1,3 +1,23 @@ -from langchain_community.tools.sql_database.prompt import QUERY_CHECKER +"""For backwards compatibility.""" +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.tools.sql_database.prompt import QUERY_CHECKER + + +_importer = create_importer( + __package__, + deprecated_lookups={ + "QUERY_CHECKER": "langchain_community.tools.sql_database.prompt", + }, +) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _importer(name) + __all__ = ["QUERY_CHECKER"] diff --git a/libs/langchain/langchain/utilities/python.py b/libs/langchain/langchain/utilities/python.py index ce787ea466..92397f0d86 100644 --- a/libs/langchain/langchain/utilities/python.py +++ b/libs/langchain/langchain/utilities/python.py @@ -1,3 +1,21 @@ -from langchain_community.utilities.python import PythonREPL +"""For backwards compatibility.""" +from typing import TYPE_CHECKING, Any + +from langchain._api import create_importer + +if TYPE_CHECKING: + from langchain_community.utilities.python import PythonREPL + + +_importer = create_importer( + __package__, + deprecated_lookups={"PythonREPL": "langchain_community.utilities.python"}, +) + + +def __getattr__(name: str) -> Any: + """Look up attributes dynamically.""" + return _importer(name) + __all__ = ["PythonREPL"]