mirror of https://github.com/hwchase17/langchain
Merge branch 'master' into eugene/0.2rc
commit
35e857131e
@ -1,166 +1,6 @@
|
||||
"""Load summarizing chains."""
|
||||
from typing import Any, Mapping, Optional, Protocol
|
||||
from langchain.chains.summarize.chain import (
|
||||
LoadingCallable,
|
||||
load_summarize_chain,
|
||||
)
|
||||
|
||||
from langchain_core.callbacks import Callbacks
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.prompts import BasePromptTemplate
|
||||
|
||||
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
||||
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
|
||||
from langchain.chains.combine_documents.reduce import ReduceDocumentsChain
|
||||
from langchain.chains.combine_documents.refine import RefineDocumentsChain
|
||||
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.summarize import map_reduce_prompt, refine_prompts, stuff_prompt
|
||||
|
||||
|
||||
class LoadingCallable(Protocol):
|
||||
"""Interface for loading the combine documents chain."""
|
||||
|
||||
def __call__(
|
||||
self, llm: BaseLanguageModel, **kwargs: Any
|
||||
) -> BaseCombineDocumentsChain:
|
||||
"""Callable to load the combine documents chain."""
|
||||
|
||||
|
||||
def _load_stuff_chain(
|
||||
llm: BaseLanguageModel,
|
||||
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
|
||||
document_variable_name: str = "text",
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> StuffDocumentsChain:
|
||||
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose) # type: ignore[arg-type]
|
||||
# TODO: document prompt
|
||||
return StuffDocumentsChain(
|
||||
llm_chain=llm_chain,
|
||||
document_variable_name=document_variable_name,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
def _load_map_reduce_chain(
|
||||
llm: BaseLanguageModel,
|
||||
map_prompt: BasePromptTemplate = map_reduce_prompt.PROMPT,
|
||||
combine_prompt: BasePromptTemplate = map_reduce_prompt.PROMPT,
|
||||
combine_document_variable_name: str = "text",
|
||||
map_reduce_document_variable_name: str = "text",
|
||||
collapse_prompt: Optional[BasePromptTemplate] = None,
|
||||
reduce_llm: Optional[BaseLanguageModel] = None,
|
||||
collapse_llm: Optional[BaseLanguageModel] = None,
|
||||
verbose: Optional[bool] = None,
|
||||
token_max: int = 3000,
|
||||
callbacks: Callbacks = None,
|
||||
*,
|
||||
collapse_max_retries: Optional[int] = None,
|
||||
**kwargs: Any,
|
||||
) -> MapReduceDocumentsChain:
|
||||
map_chain = LLMChain(
|
||||
llm=llm,
|
||||
prompt=map_prompt,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callbacks=callbacks, # type: ignore[arg-type]
|
||||
)
|
||||
_reduce_llm = reduce_llm or llm
|
||||
reduce_chain = LLMChain(
|
||||
llm=_reduce_llm,
|
||||
prompt=combine_prompt,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callbacks=callbacks, # type: ignore[arg-type]
|
||||
)
|
||||
# TODO: document prompt
|
||||
combine_documents_chain = StuffDocumentsChain(
|
||||
llm_chain=reduce_chain,
|
||||
document_variable_name=combine_document_variable_name,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callbacks=callbacks,
|
||||
)
|
||||
if collapse_prompt is None:
|
||||
collapse_chain = None
|
||||
if collapse_llm is not None:
|
||||
raise ValueError(
|
||||
"collapse_llm provided, but collapse_prompt was not: please "
|
||||
"provide one or stop providing collapse_llm."
|
||||
)
|
||||
else:
|
||||
_collapse_llm = collapse_llm or llm
|
||||
collapse_chain = StuffDocumentsChain(
|
||||
llm_chain=LLMChain(
|
||||
llm=_collapse_llm,
|
||||
prompt=collapse_prompt,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callbacks=callbacks,
|
||||
),
|
||||
document_variable_name=combine_document_variable_name,
|
||||
)
|
||||
reduce_documents_chain = ReduceDocumentsChain(
|
||||
combine_documents_chain=combine_documents_chain,
|
||||
collapse_documents_chain=collapse_chain,
|
||||
token_max=token_max,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callbacks=callbacks,
|
||||
collapse_max_retries=collapse_max_retries,
|
||||
)
|
||||
return MapReduceDocumentsChain(
|
||||
llm_chain=map_chain,
|
||||
reduce_documents_chain=reduce_documents_chain,
|
||||
document_variable_name=map_reduce_document_variable_name,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callbacks=callbacks,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
def _load_refine_chain(
|
||||
llm: BaseLanguageModel,
|
||||
question_prompt: BasePromptTemplate = refine_prompts.PROMPT,
|
||||
refine_prompt: BasePromptTemplate = refine_prompts.REFINE_PROMPT,
|
||||
document_variable_name: str = "text",
|
||||
initial_response_name: str = "existing_answer",
|
||||
refine_llm: Optional[BaseLanguageModel] = None,
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> RefineDocumentsChain:
|
||||
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose) # type: ignore[arg-type]
|
||||
_refine_llm = refine_llm or llm
|
||||
refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=verbose) # type: ignore[arg-type]
|
||||
return RefineDocumentsChain(
|
||||
initial_llm_chain=initial_chain,
|
||||
refine_llm_chain=refine_chain,
|
||||
document_variable_name=document_variable_name,
|
||||
initial_response_name=initial_response_name,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
def load_summarize_chain(
|
||||
llm: BaseLanguageModel,
|
||||
chain_type: str = "stuff",
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> BaseCombineDocumentsChain:
|
||||
"""Load summarizing chain.
|
||||
|
||||
Args:
|
||||
llm: Language Model to use in the chain.
|
||||
chain_type: Type of document combining chain to use. Should be one of "stuff",
|
||||
"map_reduce", and "refine".
|
||||
verbose: Whether chains should be run in verbose mode or not. Note that this
|
||||
applies to all chains that make up the final chain.
|
||||
|
||||
Returns:
|
||||
A chain to use for summarizing.
|
||||
"""
|
||||
loader_mapping: Mapping[str, LoadingCallable] = {
|
||||
"stuff": _load_stuff_chain,
|
||||
"map_reduce": _load_map_reduce_chain,
|
||||
"refine": _load_refine_chain,
|
||||
}
|
||||
if chain_type not in loader_mapping:
|
||||
raise ValueError(
|
||||
f"Got unsupported chain type: {chain_type}. "
|
||||
f"Should be one of {loader_mapping.keys()}"
|
||||
)
|
||||
return loader_mapping[chain_type](llm, verbose=verbose, **kwargs)
|
||||
__all__ = ["LoadingCallable", "load_summarize_chain"]
|
||||
|
@ -0,0 +1,166 @@
|
||||
"""Load summarizing chains."""
|
||||
from typing import Any, Mapping, Optional, Protocol
|
||||
|
||||
from langchain_core.callbacks import Callbacks
|
||||
from langchain_core.language_models import BaseLanguageModel
|
||||
from langchain_core.prompts import BasePromptTemplate
|
||||
|
||||
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
||||
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
|
||||
from langchain.chains.combine_documents.reduce import ReduceDocumentsChain
|
||||
from langchain.chains.combine_documents.refine import RefineDocumentsChain
|
||||
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.summarize import map_reduce_prompt, refine_prompts, stuff_prompt
|
||||
|
||||
|
||||
class LoadingCallable(Protocol):
|
||||
"""Interface for loading the combine documents chain."""
|
||||
|
||||
def __call__(
|
||||
self, llm: BaseLanguageModel, **kwargs: Any
|
||||
) -> BaseCombineDocumentsChain:
|
||||
"""Callable to load the combine documents chain."""
|
||||
|
||||
|
||||
def _load_stuff_chain(
|
||||
llm: BaseLanguageModel,
|
||||
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
|
||||
document_variable_name: str = "text",
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> StuffDocumentsChain:
|
||||
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose) # type: ignore[arg-type]
|
||||
# TODO: document prompt
|
||||
return StuffDocumentsChain(
|
||||
llm_chain=llm_chain,
|
||||
document_variable_name=document_variable_name,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
def _load_map_reduce_chain(
|
||||
llm: BaseLanguageModel,
|
||||
map_prompt: BasePromptTemplate = map_reduce_prompt.PROMPT,
|
||||
combine_prompt: BasePromptTemplate = map_reduce_prompt.PROMPT,
|
||||
combine_document_variable_name: str = "text",
|
||||
map_reduce_document_variable_name: str = "text",
|
||||
collapse_prompt: Optional[BasePromptTemplate] = None,
|
||||
reduce_llm: Optional[BaseLanguageModel] = None,
|
||||
collapse_llm: Optional[BaseLanguageModel] = None,
|
||||
verbose: Optional[bool] = None,
|
||||
token_max: int = 3000,
|
||||
callbacks: Callbacks = None,
|
||||
*,
|
||||
collapse_max_retries: Optional[int] = None,
|
||||
**kwargs: Any,
|
||||
) -> MapReduceDocumentsChain:
|
||||
map_chain = LLMChain(
|
||||
llm=llm,
|
||||
prompt=map_prompt,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callbacks=callbacks, # type: ignore[arg-type]
|
||||
)
|
||||
_reduce_llm = reduce_llm or llm
|
||||
reduce_chain = LLMChain(
|
||||
llm=_reduce_llm,
|
||||
prompt=combine_prompt,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callbacks=callbacks, # type: ignore[arg-type]
|
||||
)
|
||||
# TODO: document prompt
|
||||
combine_documents_chain = StuffDocumentsChain(
|
||||
llm_chain=reduce_chain,
|
||||
document_variable_name=combine_document_variable_name,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callbacks=callbacks,
|
||||
)
|
||||
if collapse_prompt is None:
|
||||
collapse_chain = None
|
||||
if collapse_llm is not None:
|
||||
raise ValueError(
|
||||
"collapse_llm provided, but collapse_prompt was not: please "
|
||||
"provide one or stop providing collapse_llm."
|
||||
)
|
||||
else:
|
||||
_collapse_llm = collapse_llm or llm
|
||||
collapse_chain = StuffDocumentsChain(
|
||||
llm_chain=LLMChain(
|
||||
llm=_collapse_llm,
|
||||
prompt=collapse_prompt,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callbacks=callbacks,
|
||||
),
|
||||
document_variable_name=combine_document_variable_name,
|
||||
)
|
||||
reduce_documents_chain = ReduceDocumentsChain(
|
||||
combine_documents_chain=combine_documents_chain,
|
||||
collapse_documents_chain=collapse_chain,
|
||||
token_max=token_max,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callbacks=callbacks,
|
||||
collapse_max_retries=collapse_max_retries,
|
||||
)
|
||||
return MapReduceDocumentsChain(
|
||||
llm_chain=map_chain,
|
||||
reduce_documents_chain=reduce_documents_chain,
|
||||
document_variable_name=map_reduce_document_variable_name,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
callbacks=callbacks,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
def _load_refine_chain(
|
||||
llm: BaseLanguageModel,
|
||||
question_prompt: BasePromptTemplate = refine_prompts.PROMPT,
|
||||
refine_prompt: BasePromptTemplate = refine_prompts.REFINE_PROMPT,
|
||||
document_variable_name: str = "text",
|
||||
initial_response_name: str = "existing_answer",
|
||||
refine_llm: Optional[BaseLanguageModel] = None,
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> RefineDocumentsChain:
|
||||
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose) # type: ignore[arg-type]
|
||||
_refine_llm = refine_llm or llm
|
||||
refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=verbose) # type: ignore[arg-type]
|
||||
return RefineDocumentsChain(
|
||||
initial_llm_chain=initial_chain,
|
||||
refine_llm_chain=refine_chain,
|
||||
document_variable_name=document_variable_name,
|
||||
initial_response_name=initial_response_name,
|
||||
verbose=verbose, # type: ignore[arg-type]
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
def load_summarize_chain(
|
||||
llm: BaseLanguageModel,
|
||||
chain_type: str = "stuff",
|
||||
verbose: Optional[bool] = None,
|
||||
**kwargs: Any,
|
||||
) -> BaseCombineDocumentsChain:
|
||||
"""Load summarizing chain.
|
||||
|
||||
Args:
|
||||
llm: Language Model to use in the chain.
|
||||
chain_type: Type of document combining chain to use. Should be one of "stuff",
|
||||
"map_reduce", and "refine".
|
||||
verbose: Whether chains should be run in verbose mode or not. Note that this
|
||||
applies to all chains that make up the final chain.
|
||||
|
||||
Returns:
|
||||
A chain to use for summarizing.
|
||||
"""
|
||||
loader_mapping: Mapping[str, LoadingCallable] = {
|
||||
"stuff": _load_stuff_chain,
|
||||
"map_reduce": _load_map_reduce_chain,
|
||||
"refine": _load_refine_chain,
|
||||
}
|
||||
if chain_type not in loader_mapping:
|
||||
raise ValueError(
|
||||
f"Got unsupported chain type: {chain_type}. "
|
||||
f"Should be one of {loader_mapping.keys()}"
|
||||
)
|
||||
return loader_mapping[chain_type](llm, verbose=verbose, **kwargs)
|
@ -0,0 +1,30 @@
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from langchain._api import create_importer
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_community.tools import AINAppOps
|
||||
from langchain_community.tools.ainetwork.app import AppOperationType, AppSchema
|
||||
|
||||
# Create a way to dynamically look up deprecated imports.
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"AppOperationType": "langchain_community.tools.ainetwork.app",
|
||||
"AppSchema": "langchain_community.tools.ainetwork.app",
|
||||
"AINAppOps": "langchain_community.tools",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
||||
|
||||
def __getattr__(name: str) -> Any:
|
||||
"""Look up attributes dynamically."""
|
||||
return _import_attribute(name)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"AppOperationType",
|
||||
"AppSchema",
|
||||
"AINAppOps",
|
||||
]
|
@ -0,0 +1,27 @@
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from langchain._api import create_importer
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_community.tools.ainetwork.base import AINBaseTool, OperationType
|
||||
|
||||
# Create a way to dynamically look up deprecated imports.
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"OperationType": "langchain_community.tools.ainetwork.base",
|
||||
"AINBaseTool": "langchain_community.tools.ainetwork.base",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
||||
|
||||
def __getattr__(name: str) -> Any:
|
||||
"""Look up attributes dynamically."""
|
||||
return _import_attribute(name)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"OperationType",
|
||||
"AINBaseTool",
|
||||
]
|
@ -0,0 +1,28 @@
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from langchain._api import create_importer
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_community.tools import AINOwnerOps
|
||||
from langchain_community.tools.ainetwork.owner import RuleSchema
|
||||
|
||||
# Create a way to dynamically look up deprecated imports.
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"RuleSchema": "langchain_community.tools.ainetwork.owner",
|
||||
"AINOwnerOps": "langchain_community.tools",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
||||
|
||||
def __getattr__(name: str) -> Any:
|
||||
"""Look up attributes dynamically."""
|
||||
return _import_attribute(name)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"RuleSchema",
|
||||
"AINOwnerOps",
|
||||
]
|
@ -0,0 +1,28 @@
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from langchain._api import create_importer
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_community.tools import AINRuleOps
|
||||
from langchain_community.tools.ainetwork.rule import RuleSchema
|
||||
|
||||
# Create a way to dynamically look up deprecated imports.
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"RuleSchema": "langchain_community.tools.ainetwork.rule",
|
||||
"AINRuleOps": "langchain_community.tools",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
||||
|
||||
def __getattr__(name: str) -> Any:
|
||||
"""Look up attributes dynamically."""
|
||||
return _import_attribute(name)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"RuleSchema",
|
||||
"AINRuleOps",
|
||||
]
|
@ -0,0 +1,28 @@
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from langchain._api import create_importer
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_community.tools import AINTransfer
|
||||
from langchain_community.tools.ainetwork.transfer import TransferSchema
|
||||
|
||||
# Create a way to dynamically look up deprecated imports.
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"TransferSchema": "langchain_community.tools.ainetwork.transfer",
|
||||
"AINTransfer": "langchain_community.tools",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
||||
|
||||
def __getattr__(name: str) -> Any:
|
||||
"""Look up attributes dynamically."""
|
||||
return _import_attribute(name)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"TransferSchema",
|
||||
"AINTransfer",
|
||||
]
|
@ -0,0 +1,28 @@
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from langchain._api import create_importer
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from langchain_community.tools import AINValueOps
|
||||
from langchain_community.tools.ainetwork.value import ValueSchema
|
||||
|
||||
# Create a way to dynamically look up deprecated imports.
|
||||
# Used to consolidate logic for raising deprecation warnings and
|
||||
# handling optional imports.
|
||||
DEPRECATED_LOOKUP = {
|
||||
"ValueSchema": "langchain_community.tools.ainetwork.value",
|
||||
"AINValueOps": "langchain_community.tools",
|
||||
}
|
||||
|
||||
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
||||
|
||||
|
||||
def __getattr__(name: str) -> Any:
|
||||
"""Look up attributes dynamically."""
|
||||
return _import_attribute(name)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"ValueSchema",
|
||||
"AINValueOps",
|
||||
]
|
Loading…
Reference in New Issue