docstrings cleanup (#8311)

- added missed docstrings
 - changed docstrings into consistent format
  
@baskaryan
pull/8323/head^2
Leonid Ganeline 1 year ago committed by GitHub
parent ceab0a7c1f
commit ee6ff96e28
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -13,6 +13,8 @@ from langchain_experimental.autonomous_agents.autogpt.prompt_generator import ge
class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel): class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel):
"""Prompt for AutoGPT."""
ai_name: str ai_name: str
ai_role: str ai_role: str
tools: List[BaseTool] tools: List[BaseTool]

@ -38,6 +38,8 @@ logger = logging.getLogger(__name__)
class BaseAgentExecutorIterator(ABC): class BaseAgentExecutorIterator(ABC):
"""Base class for AgentExecutorIterator."""
@abstractmethod @abstractmethod
def build_callback_manager(self) -> None: def build_callback_manager(self) -> None:
pass pass
@ -57,6 +59,8 @@ def rebuild_callback_manager_on_set(
class AgentExecutorIterator(BaseAgentExecutorIterator): class AgentExecutorIterator(BaseAgentExecutorIterator):
"""Iterator for AgentExecutor."""
def __init__( def __init__(
self, self,
agent_executor: AgentExecutor, agent_executor: AgentExecutor,

@ -468,7 +468,7 @@ class AsyncCallbackHandler(BaseCallbackHandler):
class BaseCallbackManager(CallbackManagerMixin): class BaseCallbackManager(CallbackManagerMixin):
"""Base callback manager that can be used to handle callbacks from LangChain.""" """Base callback manager that handles callbacks from LangChain."""
def __init__( def __init__(
self, self,

@ -93,7 +93,7 @@ def analyze_text(
class FlyteCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler): class FlyteCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
"""This callback handler is designed specifically for usage within a Flyte task.""" """This callback handler that is used within a Flyte task."""
def __init__(self) -> None: def __init__(self) -> None:
"""Initialize callback handler.""" """Initialize callback handler."""

@ -1004,7 +1004,7 @@ class AsyncCallbackManagerForRetrieverRun(
class CallbackManager(BaseCallbackManager): class CallbackManager(BaseCallbackManager):
"""Callback manager that can be used to handle callbacks from langchain.""" """Callback manager that handles callbacks from langchain."""
def on_llm_start( def on_llm_start(
self, self,
@ -1273,7 +1273,7 @@ class CallbackManager(BaseCallbackManager):
class AsyncCallbackManager(BaseCallbackManager): class AsyncCallbackManager(BaseCallbackManager):
"""Async callback manager that can be used to handle callbacks from LangChain.""" """Async callback manager that handles callbacks from LangChain."""
@property @property
def is_async(self) -> bool: def is_async(self) -> bool:

@ -22,7 +22,9 @@ def StreamlitCallbackHandler(
collapse_completed_thoughts: bool = True, collapse_completed_thoughts: bool = True,
thought_labeler: Optional[LLMThoughtLabeler] = None, thought_labeler: Optional[LLMThoughtLabeler] = None,
) -> BaseCallbackHandler: ) -> BaseCallbackHandler:
"""Construct a new StreamlitCallbackHandler. This CallbackHandler is geared towards """Callback Handler that writes to a Streamlit app.
This CallbackHandler is geared towards
use with a LangChain Agent; it displays the Agent's LLM and tool-usage "thoughts" use with a LangChain Agent; it displays the Agent's LLM and tool-usage "thoughts"
inside a series of Streamlit expanders. inside a series of Streamlit expanders.

@ -31,7 +31,7 @@ class TracerSessionV1(TracerSessionV1Base):
class TracerSessionBase(TracerSessionV1Base): class TracerSessionBase(TracerSessionV1Base):
"""A creation class for TracerSession.""" """Base class for TracerSession."""
tenant_id: UUID tenant_id: UUID

@ -20,6 +20,7 @@ INTERMEDIATE_STEPS_KEY = "intermediate_steps"
def extract_cypher(text: str) -> str: def extract_cypher(text: str) -> str:
"""Extract Cypher code from text using Regex."""
# The pattern to find Cypher code enclosed in triple backticks # The pattern to find Cypher code enclosed in triple backticks
pattern = r"```(.*?)```" pattern = r"```(.*?)```"

@ -13,7 +13,7 @@ logger = logging.getLogger(__name__)
class TransformChain(Chain): class TransformChain(Chain):
"""Chain transform chain output. """Chain that transforms the chain output.
Example: Example:
.. code-block:: python .. code-block:: python

@ -22,7 +22,7 @@ from langchain.schema.output import ChatGenerationChunk
class ChatAnthropic(BaseChatModel, _AnthropicCommon): class ChatAnthropic(BaseChatModel, _AnthropicCommon):
r"""Wrapper around Anthropic's large language model. """Anthropic's large language chat model.
To use, you should have the ``anthropic`` python package installed, and the To use, you should have the ``anthropic`` python package installed, and the
environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass

@ -14,7 +14,9 @@ logger = logging.getLogger(__name__)
class AzureChatOpenAI(ChatOpenAI): class AzureChatOpenAI(ChatOpenAI):
"""Wrapper around Azure OpenAI Chat Completion API. To use this class you """Wrapper around Azure OpenAI Chat Completion API.
To use this class you
must have a deployed model on Azure OpenAI. Use `deployment_name` in the must have a deployed model on Azure OpenAI. Use `deployment_name` in the
constructor to refer to the "Model deployment name" in the Azure portal. constructor to refer to the "Model deployment name" in the Azure portal.

@ -67,7 +67,7 @@ def _collect_yaml_input(
class HumanInputChatModel(BaseChatModel): class HumanInputChatModel(BaseChatModel):
"""ChatModel wrapper which returns user input as the response..""" """ChatModel which returns user input as the response."""
input_func: Callable = Field(default_factory=lambda: _collect_yaml_input) input_func: Callable = Field(default_factory=lambda: _collect_yaml_input)
message_func: Callable = Field(default_factory=lambda: _display_messages) message_func: Callable = Field(default_factory=lambda: _display_messages)

@ -133,8 +133,8 @@ def _convert_message_to_dict(message: BaseMessage) -> dict:
class JinaChat(BaseChatModel): class JinaChat(BaseChatModel):
"""JinaChat is a wrapper for Jina AI's LLM service, providing cost-effective """Wrapper for Jina AI's LLM service, providing cost-effective
image chat capabilities in comparison to other LLM APIs. image chat capabilities.
To use, you should have the ``openai`` python package installed, and the To use, you should have the ``openai`` python package installed, and the
environment variable ``JINACHAT_API_KEY`` set to your API key, which you environment variable ``JINACHAT_API_KEY`` set to your API key, which you

@ -8,7 +8,8 @@ from langchain.document_loaders.unstructured import (
class UnstructuredEPubLoader(UnstructuredFileLoader): class UnstructuredEPubLoader(UnstructuredFileLoader):
"""UnstructuredEPubLoader uses unstructured to load EPUB files. """Loader that uses Unstructured to load EPUB files.
You can run the loader in one of two modes: "single" and "elements". You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured langchain Document object. If you use "elements" mode, the unstructured

@ -5,7 +5,8 @@ from langchain.document_loaders.unstructured import UnstructuredFileLoader
class UnstructuredHTMLLoader(UnstructuredFileLoader): class UnstructuredHTMLLoader(UnstructuredFileLoader):
"""UnstructuredHTMLLoader uses unstructured to load HTML files. """Loader that uses Unstructured to load HTML files.
You can run the loader in one of two modes: "single" and "elements". You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured langchain Document object. If you use "elements" mode, the unstructured

@ -5,7 +5,8 @@ from langchain.document_loaders.unstructured import UnstructuredFileLoader
class UnstructuredImageLoader(UnstructuredFileLoader): class UnstructuredImageLoader(UnstructuredFileLoader):
"""UnstructuredImageLoader uses unstructured to load PNG and JPG files. """Loader that uses Unstructured to load PNG and JPG files.
You can run the loader in one of two modes: "single" and "elements". You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured langchain Document object. If you use "elements" mode, the unstructured

@ -5,7 +5,8 @@ from langchain.document_loaders.unstructured import UnstructuredFileLoader
class UnstructuredMarkdownLoader(UnstructuredFileLoader): class UnstructuredMarkdownLoader(UnstructuredFileLoader):
"""UnstructuredMarkdownLoader uses unstructured to load markdown files. """Loader that uses Unstructured to load markdown files.
You can run the loader in one of two modes: "single" and "elements". You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured langchain Document object. If you use "elements" mode, the unstructured

@ -7,7 +7,7 @@ from langchain.document_loaders.tencent_cos_file import TencentCOSFileLoader
class TencentCOSDirectoryLoader(BaseLoader): class TencentCOSDirectoryLoader(BaseLoader):
"""Loading logic for loading documents from Tencent Cloud COS.""" """Loader for Tencent Cloud COS directory."""
def __init__(self, conf: Any, bucket: str, prefix: str = ""): def __init__(self, conf: Any, bucket: str, prefix: str = ""):
"""Initialize with COS config, bucket and prefix. """Initialize with COS config, bucket and prefix.

@ -9,7 +9,7 @@ from langchain.document_loaders.unstructured import UnstructuredFileLoader
class TencentCOSFileLoader(BaseLoader): class TencentCOSFileLoader(BaseLoader):
"""Loading logic for loading documents from Tencent Cloud COS.""" """Loader for Tencent Cloud COS file."""
def __init__(self, conf: Any, bucket: str, key: str): def __init__(self, conf: Any, bucket: str, key: str):
"""Initialize with COS config, bucket and key name. """Initialize with COS config, bucket and key name.

@ -34,7 +34,7 @@ def validate_unstructured_version(min_unstructured_version: str) -> None:
class UnstructuredBaseLoader(BaseLoader, ABC): class UnstructuredBaseLoader(BaseLoader, ABC):
"""Loader that uses unstructured to load files.""" """Loader that uses Unstructured to load files."""
def __init__( def __init__(
self, self,
@ -130,7 +130,9 @@ class UnstructuredBaseLoader(BaseLoader, ABC):
class UnstructuredFileLoader(UnstructuredBaseLoader): class UnstructuredFileLoader(UnstructuredBaseLoader):
"""UnstructuredFileLoader uses unstructured to load files. The file loader uses the """Loader that uses Unstructured to load files.
The file loader uses the
unstructured partition function and will automatically detect the file unstructured partition function and will automatically detect the file
type. You can run the loader in one of two modes: "single" and "elements". type. You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single If you use "single" mode, the document will be returned as a single
@ -209,7 +211,8 @@ def get_elements_from_api(
class UnstructuredAPIFileLoader(UnstructuredFileLoader): class UnstructuredAPIFileLoader(UnstructuredFileLoader):
"""UnstructuredAPIFileLoader uses the Unstructured API to load files. """Loader that uses the Unstructured API to load files.
By default, the loader makes a call to the hosted Unstructured API. By default, the loader makes a call to the hosted Unstructured API.
If you are running the unstructured API locally, you can change the If you are running the unstructured API locally, you can change the
API rule by passing in the url parameter when you initialize the loader. API rule by passing in the url parameter when you initialize the loader.
@ -272,7 +275,9 @@ class UnstructuredAPIFileLoader(UnstructuredFileLoader):
class UnstructuredFileIOLoader(UnstructuredBaseLoader): class UnstructuredFileIOLoader(UnstructuredBaseLoader):
"""UnstructuredFileIOLoader uses unstructured to load files. The file loader """Loader that uses Unstructured to load files.
The file loader
uses the unstructured partition function and will automatically detect the file uses the unstructured partition function and will automatically detect the file
type. You can run the loader in one of two modes: "single" and "elements". type. You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single If you use "single" mode, the document will be returned as a single
@ -317,7 +322,8 @@ class UnstructuredFileIOLoader(UnstructuredBaseLoader):
class UnstructuredAPIFileIOLoader(UnstructuredFileIOLoader): class UnstructuredAPIFileIOLoader(UnstructuredFileIOLoader):
"""UnstructuredAPIFileIOLoader uses the Unstructured API to load files. """Loader that uses the Unstructured API to load files.
By default, the loader makes a call to the hosted Unstructured API. By default, the loader makes a call to the hosted Unstructured API.
If you are running the unstructured API locally, you can change the If you are running the unstructured API locally, you can change the
API rule by passing in the url parameter when you initialize the loader. API rule by passing in the url parameter when you initialize the loader.

@ -5,7 +5,7 @@ from langchain.utils import get_from_env
class DoctranPropertyExtractor(BaseDocumentTransformer): class DoctranPropertyExtractor(BaseDocumentTransformer):
"""Extracts properties from text documents using doctran. """Extract properties from text documents using doctran.
Arguments: Arguments:
properties: A list of the properties to extract. properties: A list of the properties to extract.

@ -5,7 +5,7 @@ from langchain.utils import get_from_env
class DoctranQATransformer(BaseDocumentTransformer): class DoctranQATransformer(BaseDocumentTransformer):
"""Extracts QA from text documents using doctran. """Extract QA from text documents using doctran.
Arguments: Arguments:
openai_api_key: OpenAI API key. Can also be specified via environment variable openai_api_key: OpenAI API key. Can also be specified via environment variable

@ -5,7 +5,7 @@ from langchain.utils import get_from_env
class DoctranTextTranslator(BaseDocumentTransformer): class DoctranTextTranslator(BaseDocumentTransformer):
"""Translates text documents using doctran. """Translate text documents using doctran.
Arguments: Arguments:
openai_api_key: OpenAI API key. Can also be specified via environment variable openai_api_key: OpenAI API key. Can also be specified via environment variable

@ -1,4 +1,4 @@
"""LLM Chain specifically for evaluating question answering.""" """LLM Chains for evaluating question answering."""
from __future__ import annotations from __future__ import annotations
import re import re
@ -50,7 +50,7 @@ def _parse_string_eval_output(text: str) -> dict:
class QAEvalChain(LLMChain, StringEvaluator, LLMEvalChain): class QAEvalChain(LLMChain, StringEvaluator, LLMEvalChain):
"""LLM Chain specifically for evaluating question answering.""" """LLM Chain for evaluating question answering."""
output_key: str = "results" #: :meta private: output_key: str = "results" #: :meta private:
@ -184,7 +184,7 @@ class QAEvalChain(LLMChain, StringEvaluator, LLMEvalChain):
class ContextQAEvalChain(LLMChain, StringEvaluator, LLMEvalChain): class ContextQAEvalChain(LLMChain, StringEvaluator, LLMEvalChain):
"""LLM Chain specifically for evaluating QA w/o GT based on context""" """LLM Chain for evaluating QA w/o GT based on context"""
@property @property
def requires_reference(self) -> bool: def requires_reference(self) -> bool:
@ -308,7 +308,7 @@ class ContextQAEvalChain(LLMChain, StringEvaluator, LLMEvalChain):
class CotQAEvalChain(ContextQAEvalChain): class CotQAEvalChain(ContextQAEvalChain):
"""LLM Chain specifically for evaluating QA using chain of thought reasoning.""" """LLM Chain for evaluating QA using chain of thought reasoning."""
@property @property
def evaluation_name(self) -> str: def evaluation_name(self) -> str:

@ -1,4 +1,4 @@
"""LLM Chain specifically for generating examples for question answering.""" """LLM Chain for generating examples for question answering."""
from __future__ import annotations from __future__ import annotations
from typing import Any from typing import Any
@ -9,7 +9,7 @@ from langchain.schema.language_model import BaseLanguageModel
class QAGenerateChain(LLMChain): class QAGenerateChain(LLMChain):
"""LLM Chain specifically for generating examples for question answering.""" """LLM Chain for generating examples for question answering."""
@classmethod @classmethod
def from_llm(cls, llm: BaseLanguageModel, **kwargs: Any) -> QAGenerateChain: def from_llm(cls, llm: BaseLanguageModel, **kwargs: Any) -> QAGenerateChain:

@ -7,11 +7,15 @@ from langchain.schema import BaseOutputParser
class AutoGPTAction(NamedTuple): class AutoGPTAction(NamedTuple):
"""Action returned by AutoGPTOutputParser."""
name: str name: str
args: Dict args: Dict
class BaseAutoGPTOutputParser(BaseOutputParser): class BaseAutoGPTOutputParser(BaseOutputParser):
"""Base Output parser for AutoGPT."""
@abstractmethod @abstractmethod
def parse(self, text: str) -> AutoGPTAction: def parse(self, text: str) -> AutoGPTAction:
"""Return AutoGPTAction""" """Return AutoGPTAction"""
@ -36,6 +40,8 @@ def preprocess_json_input(input_str: str) -> str:
class AutoGPTOutputParser(BaseAutoGPTOutputParser): class AutoGPTOutputParser(BaseAutoGPTOutputParser):
"""Output parser for AutoGPT."""
def parse(self, text: str) -> AutoGPTAction: def parse(self, text: str) -> AutoGPTAction:
try: try:
parsed = json.loads(text, strict=False) parsed = json.loads(text, strict=False)

@ -13,6 +13,8 @@ from langchain.vectorstores.base import VectorStoreRetriever
class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel): class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel):
"""Prompt for AutoGPT."""
ai_name: str ai_name: str
ai_role: str ai_role: str
tools: List[BaseTool] tools: List[BaseTool]

@ -123,7 +123,7 @@ class PromptGenerator:
def get_prompt(tools: List[BaseTool]) -> str: def get_prompt(tools: List[BaseTool]) -> str:
"""This function generates a prompt string. """Generates a prompt string.
It includes various constraints, commands, resources, and performance evaluations. It includes various constraints, commands, resources, and performance evaluations.

@ -2,6 +2,8 @@ from enum import Enum
class Constant(Enum): class Constant(Enum):
"""Enum for constants used in the CPAL."""
narrative_input = "narrative_input" narrative_input = "narrative_input"
chain_answer = "chain_answer" # natural language answer chain_answer = "chain_answer" # natural language answer
chain_data = "chain_data" # pydantic instance chain_data = "chain_data" # pydantic instance

@ -136,7 +136,7 @@ def get_arangodb_client(
username: Optional[str] = None, username: Optional[str] = None,
password: Optional[str] = None, password: Optional[str] = None,
) -> Any: ) -> Any:
"""Convenience method that gets Arango DB from credentials. """Get the Arango DB client from credentials.
Args: Args:
url: Arango DB url. Can be passed in as named arg or set as environment url: Arango DB url. Can be passed in as named arg or set as environment

@ -110,7 +110,7 @@ class StringPromptValue(PromptValue):
class StringPromptTemplate(BasePromptTemplate, ABC): class StringPromptTemplate(BasePromptTemplate, ABC):
"""String prompt should expose the format method, returning a prompt.""" """String prompt that exposes the format method, returning a prompt."""
def format_prompt(self, **kwargs: Any) -> PromptValue: def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Create Chat Messages.""" """Create Chat Messages."""

@ -18,7 +18,8 @@ if TYPE_CHECKING:
class GoogleCloudEnterpriseSearchRetriever(BaseRetriever): class GoogleCloudEnterpriseSearchRetriever(BaseRetriever):
"""Wrapper around Google Cloud Enterprise Search Service API. """Retriever for the Google Cloud Enterprise Search Service API.
For the detailed explanation of the Enterprise Search concepts For the detailed explanation of the Enterprise Search concepts
and configuration parameters refer to the product documentation. and configuration parameters refer to the product documentation.

@ -26,6 +26,7 @@ OPERATOR_TO_TQL = {
def can_cast_to_float(string: str) -> bool: def can_cast_to_float(string: str) -> bool:
"""Check if a string can be cast to a float."""
try: try:
float(string) float(string)
return True return True

@ -67,6 +67,8 @@ class QuestionListOutputParser(PydanticOutputParser):
class WebResearchRetriever(BaseRetriever): class WebResearchRetriever(BaseRetriever):
"""Retriever for web research based on the Google Search API."""
# Inputs # Inputs
vectorstore: VectorStore = Field( vectorstore: VectorStore = Field(
..., description="Vector store for storing web pages" ..., description="Vector store for storing web pages"

@ -13,4 +13,6 @@ if TYPE_CHECKING:
class AmadeusBaseTool(BaseTool): class AmadeusBaseTool(BaseTool):
"""Base Tool for Amadeus."""
client: Client = Field(default_factory=authenticate) client: Client = Field(default_factory=authenticate)

@ -12,6 +12,8 @@ from langchain.tools.amadeus.base import AmadeusBaseTool
class ClosestAirportSchema(BaseModel): class ClosestAirportSchema(BaseModel):
"""Schema for the AmadeusClosestAirport tool."""
location: str = Field( location: str = Field(
description=( description=(
" The location for which you would like to find the nearest airport " " The location for which you would like to find the nearest airport "
@ -29,6 +31,8 @@ class ClosestAirportSchema(BaseModel):
class AmadeusClosestAirport(AmadeusBaseTool): class AmadeusClosestAirport(AmadeusBaseTool):
"""Tool for finding the closest airport to a particular location."""
name: str = "closest_airport" name: str = "closest_airport"
description: str = ( description: str = (
"Use this tool to find the closest airport to a particular location." "Use this tool to find the closest airport to a particular location."

@ -14,6 +14,8 @@ logger = logging.getLogger(__name__)
class FlightSearchSchema(BaseModel): class FlightSearchSchema(BaseModel):
"""Schema for the AmadeusFlightSearch tool."""
originLocationCode: str = Field( originLocationCode: str = Field(
description=( description=(
" The three letter International Air Transport " " The three letter International Air Transport "
@ -53,6 +55,8 @@ class FlightSearchSchema(BaseModel):
class AmadeusFlightSearch(AmadeusBaseTool): class AmadeusFlightSearch(AmadeusBaseTool):
"""Tool for searching for a single flight between two airports."""
name: str = "single_flight_search" name: str = "single_flight_search"
description: str = ( description: str = (
" Use this tool to search for a single flight between the origin and " " Use this tool to search for a single flight between the origin and "

@ -20,6 +20,8 @@ from langchain.utilities.github import GitHubAPIWrapper
class GitHubAction(BaseTool): class GitHubAction(BaseTool):
"""Tool for interacting with the GitHub API."""
api_wrapper: GitHubAPIWrapper = Field(default_factory=GitHubAPIWrapper) api_wrapper: GitHubAPIWrapper = Field(default_factory=GitHubAPIWrapper)
mode: str mode: str
name = "" name = ""

@ -36,4 +36,5 @@ def stringify_dict(data: dict) -> str:
def comma_list(items: List[Any]) -> str: def comma_list(items: List[Any]) -> str:
"""Convert a list to a comma-separated string."""
return ", ".join(str(item) for item in items) return ", ".join(str(item) for item in items)

Loading…
Cancel
Save