langchain[patch]: Update more code to use langchain community as an optional dependency (#21170)

More code to use langchain community as an optional dependency
pull/21194/head
Eugene Yurtsev 3 weeks ago committed by GitHub
parent cd4c54282a
commit c306364b06
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -1,7 +1,8 @@
"""Chain that implements the ReAct paper from https://arxiv.org/pdf/2210.03629.pdf."""
from typing import Any, List, Optional, Sequence
from __future__ import annotations
from typing import TYPE_CHECKING, Any, List, Optional, Sequence
from langchain_community.docstore.base import Docstore
from langchain_core._api import deprecated
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
@ -16,6 +17,9 @@ from langchain.agents.react.textworld_prompt import TEXTWORLD_PROMPT
from langchain.agents.react.wiki_prompt import WIKI_PROMPT
from langchain.agents.utils import validate_tools_single_input
if TYPE_CHECKING:
from langchain_community.docstore.base import Docstore
@deprecated("0.1.0", removal="0.2.0")
class ReActDocstoreAgent(Agent):

@ -1,9 +1,8 @@
"""Chain that does self-ask with search."""
from typing import Any, Sequence, Union
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence, Union
from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper
from langchain_community.utilities.searchapi import SearchApiAPIWrapper
from langchain_community.utilities.serpapi import SerpAPIWrapper
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
@ -18,6 +17,11 @@ from langchain.agents.self_ask_with_search.output_parser import SelfAskOutputPar
from langchain.agents.self_ask_with_search.prompt import PROMPT
from langchain.agents.utils import validate_tools_single_input
if TYPE_CHECKING:
from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper
from langchain_community.utilities.searchapi import SearchApiAPIWrapper
from langchain_community.utilities.serpapi import SerpAPIWrapper
@deprecated("0.1.0", alternative="create_self_ask_with_search", removal="0.2.0")
class SelfAskWithSearchAgent(Agent):

@ -4,7 +4,6 @@ from __future__ import annotations
from typing import Any, Dict, List, Optional, Sequence, Tuple
from urllib.parse import urlparse
from langchain_community.utilities.requests import TextRequestsWrapper
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
@ -50,189 +49,205 @@ def _check_in_allowed_domain(url: str, limit_to_domains: Sequence[str]) -> bool:
return False
class APIChain(Chain):
"""Chain that makes API calls and summarizes the responses to answer a question.
try:
from langchain_community.utilities.requests import TextRequestsWrapper
*Security Note*: This API chain uses the requests toolkit
to make GET, POST, PATCH, PUT, and DELETE requests to an API.
class APIChain(Chain):
"""Chain that makes API calls and summarizes the responses to answer a question.
Exercise care in who is allowed to use this chain. If exposing
to end users, consider that users will be able to make arbitrary
requests on behalf of the server hosting the code. For example,
users could ask the server to make a request to a private API
that is only accessible from the server.
*Security Note*: This API chain uses the requests toolkit
to make GET, POST, PATCH, PUT, and DELETE requests to an API.
Control access to who can submit issue requests using this toolkit and
what network access it has.
Exercise care in who is allowed to use this chain. If exposing
to end users, consider that users will be able to make arbitrary
requests on behalf of the server hosting the code. For example,
users could ask the server to make a request to a private API
that is only accessible from the server.
See https://python.langchain.com/docs/security for more information.
"""
api_request_chain: LLMChain
api_answer_chain: LLMChain
requests_wrapper: TextRequestsWrapper = Field(exclude=True)
api_docs: str
question_key: str = "question" #: :meta private:
output_key: str = "output" #: :meta private:
limit_to_domains: Optional[Sequence[str]]
"""Use to limit the domains that can be accessed by the API chain.
* For example, to limit to just the domain `https://www.example.com`, set
`limit_to_domains=["https://www.example.com"]`.
* The default value is an empty tuple, which means that no domains are
allowed by default. By design this will raise an error on instantiation.
* Use a None if you want to allow all domains by default -- this is not
recommended for security reasons, as it would allow malicious users to
make requests to arbitrary URLS including internal APIs accessible from
the server.
"""
Control access to who can submit issue requests using this toolkit and
what network access it has.
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
See https://python.langchain.com/docs/security for more information.
"""
return [self.question_key]
@property
def output_keys(self) -> List[str]:
"""Expect output key.
:meta private:
api_request_chain: LLMChain
api_answer_chain: LLMChain
requests_wrapper: TextRequestsWrapper = Field(exclude=True)
api_docs: str
question_key: str = "question" #: :meta private:
output_key: str = "output" #: :meta private:
limit_to_domains: Optional[Sequence[str]]
"""Use to limit the domains that can be accessed by the API chain.
* For example, to limit to just the domain `https://www.example.com`, set
`limit_to_domains=["https://www.example.com"]`.
* The default value is an empty tuple, which means that no domains are
allowed by default. By design this will raise an error on instantiation.
* Use a None if you want to allow all domains by default -- this is not
recommended for security reasons, as it would allow malicious users to
make requests to arbitrary URLS including internal APIs accessible from
the server.
"""
return [self.output_key]
@root_validator(pre=True)
def validate_api_request_prompt(cls, values: Dict) -> Dict:
"""Check that api request prompt expects the right variables."""
input_vars = values["api_request_chain"].prompt.input_variables
expected_vars = {"question", "api_docs"}
if set(input_vars) != expected_vars:
raise ValueError(
f"Input variables should be {expected_vars}, got {input_vars}"
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.question_key]
@property
def output_keys(self) -> List[str]:
"""Expect output key.
:meta private:
"""
return [self.output_key]
@root_validator(pre=True)
def validate_api_request_prompt(cls, values: Dict) -> Dict:
"""Check that api request prompt expects the right variables."""
input_vars = values["api_request_chain"].prompt.input_variables
expected_vars = {"question", "api_docs"}
if set(input_vars) != expected_vars:
raise ValueError(
f"Input variables should be {expected_vars}, got {input_vars}"
)
return values
@root_validator(pre=True)
def validate_limit_to_domains(cls, values: Dict) -> Dict:
"""Check that allowed domains are valid."""
if "limit_to_domains" not in values:
raise ValueError(
"You must specify a list of domains to limit access using "
"`limit_to_domains`"
)
if (
not values["limit_to_domains"]
and values["limit_to_domains"] is not None
):
raise ValueError(
"Please provide a list of domains to limit access using "
"`limit_to_domains`."
)
return values
@root_validator(pre=True)
def validate_api_answer_prompt(cls, values: Dict) -> Dict:
"""Check that api answer prompt expects the right variables."""
input_vars = values["api_answer_chain"].prompt.input_variables
expected_vars = {"question", "api_docs", "api_url", "api_response"}
if set(input_vars) != expected_vars:
raise ValueError(
f"Input variables should be {expected_vars}, got {input_vars}"
)
return values
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs[self.question_key]
api_url = self.api_request_chain.predict(
question=question,
api_docs=self.api_docs,
callbacks=_run_manager.get_child(),
)
_run_manager.on_text(api_url, color="green", end="\n", verbose=self.verbose)
api_url = api_url.strip()
if self.limit_to_domains and not _check_in_allowed_domain(
api_url, self.limit_to_domains
):
raise ValueError(
f"{api_url} is not in the allowed domains: {self.limit_to_domains}"
)
api_response = self.requests_wrapper.get(api_url)
_run_manager.on_text(
str(api_response), color="yellow", end="\n", verbose=self.verbose
)
answer = self.api_answer_chain.predict(
question=question,
api_docs=self.api_docs,
api_url=api_url,
api_response=api_response,
callbacks=_run_manager.get_child(),
)
return {self.output_key: answer}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = (
run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
)
return values
@root_validator(pre=True)
def validate_limit_to_domains(cls, values: Dict) -> Dict:
"""Check that allowed domains are valid."""
if "limit_to_domains" not in values:
raise ValueError(
"You must specify a list of domains to limit access using "
"`limit_to_domains`"
question = inputs[self.question_key]
api_url = await self.api_request_chain.apredict(
question=question,
api_docs=self.api_docs,
callbacks=_run_manager.get_child(),
)
if not values["limit_to_domains"] and values["limit_to_domains"] is not None:
raise ValueError(
"Please provide a list of domains to limit access using "
"`limit_to_domains`."
await _run_manager.on_text(
api_url, color="green", end="\n", verbose=self.verbose
)
return values
@root_validator(pre=True)
def validate_api_answer_prompt(cls, values: Dict) -> Dict:
"""Check that api answer prompt expects the right variables."""
input_vars = values["api_answer_chain"].prompt.input_variables
expected_vars = {"question", "api_docs", "api_url", "api_response"}
if set(input_vars) != expected_vars:
raise ValueError(
f"Input variables should be {expected_vars}, got {input_vars}"
api_url = api_url.strip()
if self.limit_to_domains and not _check_in_allowed_domain(
api_url, self.limit_to_domains
):
raise ValueError(
f"{api_url} is not in the allowed domains: {self.limit_to_domains}"
)
api_response = await self.requests_wrapper.aget(api_url)
await _run_manager.on_text(
str(api_response), color="yellow", end="\n", verbose=self.verbose
)
return values
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs[self.question_key]
api_url = self.api_request_chain.predict(
question=question,
api_docs=self.api_docs,
callbacks=_run_manager.get_child(),
)
_run_manager.on_text(api_url, color="green", end="\n", verbose=self.verbose)
api_url = api_url.strip()
if self.limit_to_domains and not _check_in_allowed_domain(
api_url, self.limit_to_domains
):
raise ValueError(
f"{api_url} is not in the allowed domains: {self.limit_to_domains}"
answer = await self.api_answer_chain.apredict(
question=question,
api_docs=self.api_docs,
api_url=api_url,
api_response=api_response,
callbacks=_run_manager.get_child(),
)
api_response = self.requests_wrapper.get(api_url)
_run_manager.on_text(
str(api_response), color="yellow", end="\n", verbose=self.verbose
)
answer = self.api_answer_chain.predict(
question=question,
api_docs=self.api_docs,
api_url=api_url,
api_response=api_response,
callbacks=_run_manager.get_child(),
)
return {self.output_key: answer}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
question = inputs[self.question_key]
api_url = await self.api_request_chain.apredict(
question=question,
api_docs=self.api_docs,
callbacks=_run_manager.get_child(),
)
await _run_manager.on_text(
api_url, color="green", end="\n", verbose=self.verbose
)
api_url = api_url.strip()
if self.limit_to_domains and not _check_in_allowed_domain(
api_url, self.limit_to_domains
):
raise ValueError(
f"{api_url} is not in the allowed domains: {self.limit_to_domains}"
return {self.output_key: answer}
@classmethod
def from_llm_and_api_docs(
cls,
llm: BaseLanguageModel,
api_docs: str,
headers: Optional[dict] = None,
api_url_prompt: BasePromptTemplate = API_URL_PROMPT,
api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT,
limit_to_domains: Optional[Sequence[str]] = tuple(),
**kwargs: Any,
) -> APIChain:
"""Load chain from just an LLM and the api docs."""
get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt)
requests_wrapper = TextRequestsWrapper(headers=headers)
get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt)
return cls(
api_request_chain=get_request_chain,
api_answer_chain=get_answer_chain,
requests_wrapper=requests_wrapper,
api_docs=api_docs,
limit_to_domains=limit_to_domains,
**kwargs,
)
@property
def _chain_type(self) -> str:
return "api_chain"
except ImportError:
class APIChain: # type: ignore[no-redef]
def __init__(self, *args: Any, **kwargs: Any) -> None:
raise ImportError(
"To use the APIChain, you must install the langchain_community package."
"pip install langchain_community"
)
api_response = await self.requests_wrapper.aget(api_url)
await _run_manager.on_text(
str(api_response), color="yellow", end="\n", verbose=self.verbose
)
answer = await self.api_answer_chain.apredict(
question=question,
api_docs=self.api_docs,
api_url=api_url,
api_response=api_response,
callbacks=_run_manager.get_child(),
)
return {self.output_key: answer}
@classmethod
def from_llm_and_api_docs(
cls,
llm: BaseLanguageModel,
api_docs: str,
headers: Optional[dict] = None,
api_url_prompt: BasePromptTemplate = API_URL_PROMPT,
api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT,
limit_to_domains: Optional[Sequence[str]] = tuple(),
**kwargs: Any,
) -> APIChain:
"""Load chain from just an LLM and the api docs."""
get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt)
requests_wrapper = TextRequestsWrapper(headers=headers)
get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt)
return cls(
api_request_chain=get_request_chain,
api_answer_chain=get_answer_chain,
requests_wrapper=requests_wrapper,
api_docs=api_docs,
limit_to_domains=limit_to_domains,
**kwargs,
)
@property
def _chain_type(self) -> str:
return "api_chain"

@ -5,7 +5,6 @@ from pathlib import Path
from typing import Any, Union
import yaml
from langchain_community.llms.loading import load_llm, load_llm_from_config
from langchain_core.prompts.loading import (
_load_output_parser,
load_prompt,
@ -30,6 +29,27 @@ from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesCha
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
from langchain.chains.retrieval_qa.base import RetrievalQA, VectorDBQA
try:
from langchain_community.llms.loading import load_llm, load_llm_from_config
except ImportError:
def load_llm(*args: Any, **kwargs: Any) -> None: # type: ignore
raise ImportError(
"To use this load_llm functionality you must install the "
"langchain_community package. "
"You can install it with `pip install langchain_community`"
)
def load_llm_from_config( # type: ignore
*args: Any, **kwargs: Any
) -> None:
raise ImportError(
"To use this load_llm_from_config functionality you must install the "
"langchain_community package. "
"You can install it with `pip install langchain_community`"
)
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/"

@ -6,7 +6,6 @@ from collections import defaultdict
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
import requests
from langchain_community.utilities.openapi import OpenAPISpec
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
@ -20,6 +19,7 @@ from langchain.chains.sequential import SequentialChain
from langchain.tools import APIOperation
if TYPE_CHECKING:
from langchain_community.utilities.openapi import OpenAPISpec
from openapi_pydantic import Parameter
@ -255,6 +255,13 @@ def get_openapi_chain(
prompt: Main prompt template to use.
request_chain: Chain for taking the functions output and executing the request.
"""
try:
from langchain_community.utilities.openapi import OpenAPISpec
except ImportError as e:
raise ImportError(
"Could not import langchain_community.utilities.openapi. "
"Please install it with `pip install langchain-community`."
) from e
if isinstance(spec, str):
for conversion in (
OpenAPISpec.from_url,

@ -1,6 +1,7 @@
from typing import Any, Dict, List, Optional, TypedDict, Union
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional, TypedDict, Union
from langchain_community.utilities.sql_database import SQLDatabase
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate
@ -8,6 +9,9 @@ from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain.chains.sql_database.prompt import PROMPT, SQL_PROMPTS
if TYPE_CHECKING:
from langchain_community.utilities.sql_database import SQLDatabase
def _strip(text: str) -> str:
return text.strip()

@ -1,3 +1,27 @@
from langchain_community.document_loaders.parsers.language.python import PythonSegmenter
from typing import TYPE_CHECKING, Any
__all__ = ["PythonSegmenter"]
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.language.python import (
PythonSegmenter,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"PythonSegmenter": "langchain_community.document_loaders.parsers.language.python"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"PythonSegmenter",
]

@ -1,5 +1,26 @@
from langchain_community.document_loaders.pyspark_dataframe import (
PySparkDataFrameLoader,
)
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.pyspark_dataframe import (
PySparkDataFrameLoader,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"PySparkDataFrameLoader": "langchain_community.document_loaders.pyspark_dataframe"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["PySparkDataFrameLoader"]

@ -1,3 +1,22 @@
from langchain_community.document_loaders.python import PythonLoader
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.python import PythonLoader
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"PythonLoader": "langchain_community.document_loaders.python"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["PythonLoader"]

@ -1,5 +1,21 @@
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.embeddings import SentenceTransformerEmbeddings
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"SentenceTransformerEmbeddings": "langchain_community.embeddings"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["SentenceTransformerEmbeddings"]

@ -5,8 +5,6 @@ import logging
import re
from typing import Any, Dict, List, Optional, Union
from langchain_community.chat_models.azure_openai import AzureChatOpenAI
from langchain_community.chat_models.openai import ChatOpenAI
from langchain_core.callbacks.manager import Callbacks
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser
@ -258,10 +256,7 @@ class ScoreStringEvalChain(StringEvaluator, LLMEvalChain, LLMChain):
ValueError: If the input variables are not as expected.
"""
if not (
isinstance(llm, (ChatOpenAI, AzureChatOpenAI))
and llm.model_name.startswith("gpt-4")
):
if not (hasattr(llm, "model_name") and not llm.model_name.startswith("gpt-4")):
logger.warning(
"This chain was only tested with GPT-4. \
Performance may be significantly worse with other models."

@ -11,12 +11,33 @@ Importantly, Index keeps on working even if the content being written is derived
via a set of transformations from some source content (e.g., indexing children
documents that were derived from parent documents by chunking.)
"""
from langchain_community.graphs.index_creator import GraphIndexCreator
from typing import TYPE_CHECKING, Any
from langchain_core.indexing.api import IndexingResult, aindex, index
from langchain._api import create_importer
from langchain.indexes._sql_record_manager import SQLRecordManager
from langchain.indexes.vectorstore import VectorstoreIndexCreator
if TYPE_CHECKING:
from langchain_community.graphs.index_creator import GraphIndexCreator
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GraphIndexCreator": "langchain_community.graphs.index_creator",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
# Keep sorted
"aindex",

@ -1,5 +1,27 @@
"""Graph Index Creator."""
from langchain_community.graphs.index_creator import GraphIndexCreator
from langchain_community.graphs.networkx_graph import NetworkxEntityGraph
"""**Graphs** provide a natural language interface to graph databases."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.graphs.index_creator import GraphIndexCreator
from langchain_community.graphs.networkx_graph import NetworkxEntityGraph
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GraphIndexCreator": "langchain_community.graphs.index_creator",
"NetworkxEntityGraph": "langchain_community.graphs.networkx_graph",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["GraphIndexCreator", "NetworkxEntityGraph"]

@ -1,8 +1,9 @@
# flake8: noqa
from langchain_community.graphs.networkx_graph import KG_TRIPLE_DELIMITER
from langchain_core.prompts.prompt import PromptTemplate
KG_TRIPLE_DELIMITER = "<|>"
_DEFAULT_KNOWLEDGE_TRIPLE_EXTRACTION_TEMPLATE = (
"You are a networked intelligence helping a human track knowledge triples"
" about all relevant people, things, concepts, etc. and integrating"

@ -1,6 +1,5 @@
from typing import Any, Dict, List, Optional, Type
from langchain_community.vectorstores.inmemory import InMemoryVectorStore
from langchain_core.document_loaders import BaseLoader
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
@ -117,10 +116,30 @@ class VectorStoreIndexWrapper(BaseModel):
return await chain.ainvoke({chain.question_key: question})
def _get_in_memory_vectorstore() -> Type[VectorStore]:
"""Get the InMemoryVectorStore."""
import warnings
try:
from langchain_community.vectorstores.inmemory import InMemoryVectorStore
except ImportError:
raise ImportError(
"Please install langchain-community to use the InMemoryVectorStore."
)
warnings.warn(
"Using InMemoryVectorStore as the default vectorstore."
"This memory store won't persist data. You should explicitly"
"specify a vectorstore when using VectorstoreIndexCreator"
)
return InMemoryVectorStore
class VectorstoreIndexCreator(BaseModel):
"""Logic for creating indexes."""
vectorstore_cls: Type[VectorStore] = InMemoryVectorStore
vectorstore_cls: Type[VectorStore] = Field(
default_factory=_get_in_memory_vectorstore
)
embedding: Embeddings
text_splitter: TextSplitter = Field(default_factory=_get_default_text_splitter)
vectorstore_kwargs: dict = Field(default_factory=dict)

@ -1,3 +1,23 @@
from langchain_community.llms.titan_takeoff import TitanTakeoff as TitanTakeoffPro
from typing import TYPE_CHECKING, Any
__all__ = ["TitanTakeoffPro"]
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.llms import TitanTakeoffPro
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"TitanTakeoffPro": "langchain_community.llms"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"TitanTakeoffPro",
]

@ -1,4 +1,21 @@
"""For backwards compatibility."""
from langchain_community.utilities.python import PythonREPL
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities.python import PythonREPL
_importer = create_importer(
__package__,
deprecated_lookups={"PythonREPL": "langchain_community.utilities.python"},
)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _importer(name)
__all__ = ["PythonREPL"]

@ -1,7 +0,0 @@
from langchain_community.tools.ainetwork.app import (
AINAppOps,
AppOperationType,
AppSchema,
)
__all__ = ["AppOperationType", "AppSchema", "AINAppOps"]

@ -1,3 +0,0 @@
from langchain_community.tools.ainetwork.base import AINBaseTool, OperationType
__all__ = ["OperationType", "AINBaseTool"]

@ -1,3 +0,0 @@
from langchain_community.tools.ainetwork.owner import AINOwnerOps, RuleSchema
__all__ = ["RuleSchema", "AINOwnerOps"]

@ -1,3 +0,0 @@
from langchain_community.tools.ainetwork.rule import AINRuleOps, RuleSchema
__all__ = ["RuleSchema", "AINRuleOps"]

@ -1,3 +0,0 @@
from langchain_community.tools.ainetwork.transfer import AINTransfer, TransferSchema
__all__ = ["TransferSchema", "AINTransfer"]

@ -1,3 +0,0 @@
from langchain_community.tools.ainetwork.value import AINValueOps, ValueSchema
__all__ = ["ValueSchema", "AINValueOps"]

@ -1,3 +1,23 @@
from langchain_community.tools.sql_database.prompt import QUERY_CHECKER
"""For backwards compatibility."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.sql_database.prompt import QUERY_CHECKER
_importer = create_importer(
__package__,
deprecated_lookups={
"QUERY_CHECKER": "langchain_community.tools.sql_database.prompt",
},
)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _importer(name)
__all__ = ["QUERY_CHECKER"]

@ -1,3 +1,21 @@
from langchain_community.utilities.python import PythonREPL
"""For backwards compatibility."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities.python import PythonREPL
_importer = create_importer(
__package__,
deprecated_lookups={"PythonREPL": "langchain_community.utilities.python"},
)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _importer(name)
__all__ = ["PythonREPL"]

Loading…
Cancel
Save