mirror of
https://github.com/hwchase17/langchain
synced 2024-11-10 01:10:59 +00:00
core[patch]: docstrings agents
(#23502)
Added missed docstrings. Formatted docstrings to the consistent form.
This commit is contained in:
parent
79d8556c22
commit
2c9b84c3a8
@ -5,10 +5,10 @@
|
||||
New agents should be built using the langgraph library
|
||||
(https://github.com/langchain-ai/langgraph)), which provides a simpler
|
||||
and more flexible way to define agents.
|
||||
|
||||
Please see the migration guide for information on how to migrate existing
|
||||
agents to modern langgraph agents:
|
||||
https://python.langchain.com/v0.2/docs/how_to/migrate_agent/
|
||||
|
||||
Please see the migration guide for information on how to migrate existing
|
||||
agents to modern langgraph agents:
|
||||
https://python.langchain.com/v0.2/docs/how_to/migrate_agent/
|
||||
|
||||
Agents use language models to choose a sequence of actions to take.
|
||||
|
||||
@ -21,6 +21,7 @@ A basic agent works in the following manner:
|
||||
|
||||
The schemas for the agents themselves are defined in langchain.agents.agent.
|
||||
""" # noqa: E501
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
@ -193,9 +194,11 @@ def _create_function_message(
|
||||
agent_action: AgentAction, observation: Any
|
||||
) -> FunctionMessage:
|
||||
"""Convert agent action and observation into a function message.
|
||||
|
||||
Args:
|
||||
agent_action: the tool invocation request from the agent
|
||||
observation: the result of the tool invocation
|
||||
|
||||
Returns:
|
||||
FunctionMessage that corresponds to the original tool invocation
|
||||
"""
|
||||
|
@ -19,6 +19,7 @@ Cache directly competes with Memory. See documentation for Pros and Cons.
|
||||
|
||||
BaseCache --> <name>Cache # Examples: InMemoryCache, RedisCache, GPTCache
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
@ -31,7 +32,7 @@ RETURN_VAL_TYPE = Sequence[Generation]
|
||||
|
||||
|
||||
class BaseCache(ABC):
|
||||
"""This interfaces provides a caching layer for LLMs and Chat models.
|
||||
"""Interface for a caching layer for LLMs and Chat models.
|
||||
|
||||
The cache interface consists of the following methods:
|
||||
|
||||
@ -73,7 +74,7 @@ class BaseCache(ABC):
|
||||
"""Update cache based on prompt and llm_string.
|
||||
|
||||
The prompt and llm_string are used to generate a key for the cache.
|
||||
The key should match that of the look up method.
|
||||
The key should match that of the lookup method.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
@ -93,7 +94,7 @@ class BaseCache(ABC):
|
||||
"""Clear cache that can take additional keyword arguments."""
|
||||
|
||||
async def alookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
|
||||
"""Look up based on prompt and llm_string.
|
||||
"""Async look up based on prompt and llm_string.
|
||||
|
||||
A cache implementation is expected to generate a key from the 2-tuple
|
||||
of prompt and llm_string (e.g., by concatenating them with a delimiter).
|
||||
@ -117,7 +118,7 @@ class BaseCache(ABC):
|
||||
async def aupdate(
|
||||
self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
|
||||
) -> None:
|
||||
"""Update cache based on prompt and llm_string.
|
||||
"""Async update cache based on prompt and llm_string.
|
||||
|
||||
The prompt and llm_string are used to generate a key for the cache.
|
||||
The key should match that of the look up method.
|
||||
@ -137,7 +138,7 @@ class BaseCache(ABC):
|
||||
return await run_in_executor(None, self.update, prompt, llm_string, return_val)
|
||||
|
||||
async def aclear(self, **kwargs: Any) -> None:
|
||||
"""Clear cache that can take additional keyword arguments."""
|
||||
"""Async clear cache that can take additional keyword arguments."""
|
||||
return await run_in_executor(None, self.clear, **kwargs)
|
||||
|
||||
|
||||
@ -149,11 +150,30 @@ class InMemoryCache(BaseCache):
|
||||
self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {}
|
||||
|
||||
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
|
||||
"""Look up based on prompt and llm_string."""
|
||||
"""Look up based on prompt and llm_string.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
|
||||
Returns:
|
||||
On a cache miss, return None. On a cache hit, return the cached value.
|
||||
"""
|
||||
return self._cache.get((prompt, llm_string), None)
|
||||
|
||||
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
|
||||
"""Update cache based on prompt and llm_string."""
|
||||
"""Update cache based on prompt and llm_string.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
return_val: The value to be cached. The value is a list of Generations
|
||||
(or subclasses).
|
||||
"""
|
||||
self._cache[(prompt, llm_string)] = return_val
|
||||
|
||||
def clear(self, **kwargs: Any) -> None:
|
||||
@ -161,15 +181,34 @@ class InMemoryCache(BaseCache):
|
||||
self._cache = {}
|
||||
|
||||
async def alookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
|
||||
"""Look up based on prompt and llm_string."""
|
||||
"""Async look up based on prompt and llm_string.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
|
||||
Returns:
|
||||
On a cache miss, return None. On a cache hit, return the cached value.
|
||||
"""
|
||||
return self.lookup(prompt, llm_string)
|
||||
|
||||
async def aupdate(
|
||||
self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
|
||||
) -> None:
|
||||
"""Update cache based on prompt and llm_string."""
|
||||
"""Async update cache based on prompt and llm_string.
|
||||
|
||||
Args:
|
||||
prompt: a string representation of the prompt.
|
||||
In the case of a Chat model, the prompt is a non-trivial
|
||||
serialization of the prompt into the language model.
|
||||
llm_string: A string representation of the LLM configuration.
|
||||
return_val: The value to be cached. The value is a list of Generations
|
||||
(or subclasses).
|
||||
"""
|
||||
self.update(prompt, llm_string, return_val)
|
||||
|
||||
async def aclear(self, **kwargs: Any) -> None:
|
||||
"""Clear cache."""
|
||||
"""Async clear cache."""
|
||||
self.clear()
|
||||
|
@ -1,4 +1,5 @@
|
||||
"""Custom **exceptions** for LangChain. """
|
||||
"""Custom **exceptions** for LangChain."""
|
||||
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
@ -18,7 +19,7 @@ class OutputParserException(ValueError, LangChainException):
|
||||
available to catch and handle in ways to fix the parsing error, while other
|
||||
errors will be raised.
|
||||
|
||||
Args:
|
||||
Parameters:
|
||||
error: The error that's being re-raised or an error message.
|
||||
observation: String explanation of error which can be passed to a
|
||||
model to try and remediate the issue.
|
||||
|
@ -1,5 +1,6 @@
|
||||
# flake8: noqa
|
||||
"""Global values and configuration that apply to all of LangChain."""
|
||||
|
||||
import warnings
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
@ -135,7 +136,11 @@ def get_debug() -> bool:
|
||||
|
||||
|
||||
def set_llm_cache(value: Optional["BaseCache"]) -> None:
|
||||
"""Set a new LLM cache, overwriting the previous value, if any."""
|
||||
"""Set a new LLM cache, overwriting the previous value, if any.
|
||||
|
||||
Args:
|
||||
value: The new LLM cache to use. If `None`, the LLM cache is disabled.
|
||||
"""
|
||||
try:
|
||||
import langchain # type: ignore[import]
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
BaseMemory --> <name>Memory --> <name>Memory # Examples: BaseChatMemory -> MotorheadMemory
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
@ -58,20 +59,37 @@ class BaseMemory(Serializable, ABC):
|
||||
|
||||
@abstractmethod
|
||||
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Return key-value pairs given the text input to the chain."""
|
||||
"""Return key-value pairs given the text input to the chain.
|
||||
|
||||
Args:
|
||||
inputs: The inputs to the chain."""
|
||||
|
||||
async def aload_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Return key-value pairs given the text input to the chain."""
|
||||
"""Async return key-value pairs given the text input to the chain.
|
||||
|
||||
Args:
|
||||
inputs: The inputs to the chain.
|
||||
"""
|
||||
return await run_in_executor(None, self.load_memory_variables, inputs)
|
||||
|
||||
@abstractmethod
|
||||
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
|
||||
"""Save the context of this chain run to memory."""
|
||||
"""Save the context of this chain run to memory.
|
||||
|
||||
Args:
|
||||
inputs: The inputs to the chain.
|
||||
outputs: The outputs of the chain.
|
||||
"""
|
||||
|
||||
async def asave_context(
|
||||
self, inputs: Dict[str, Any], outputs: Dict[str, str]
|
||||
) -> None:
|
||||
"""Save the context of this chain run to memory."""
|
||||
"""Async save the context of this chain run to memory.
|
||||
|
||||
Args:
|
||||
inputs: The inputs to the chain.
|
||||
outputs: The outputs of the chain.
|
||||
"""
|
||||
await run_in_executor(None, self.save_context, inputs, outputs)
|
||||
|
||||
@abstractmethod
|
||||
@ -79,5 +97,5 @@ class BaseMemory(Serializable, ABC):
|
||||
"""Clear memory contents."""
|
||||
|
||||
async def aclear(self) -> None:
|
||||
"""Clear memory contents."""
|
||||
"""Async clear memory contents."""
|
||||
await run_in_executor(None, self.clear)
|
||||
|
@ -3,6 +3,7 @@
|
||||
Prompt values are used to represent different pieces of prompts.
|
||||
They can be used to represent text, images, or chat message pieces.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
@ -103,15 +104,15 @@ class ImagePromptValue(PromptValue):
|
||||
"""Image prompt value."""
|
||||
|
||||
image_url: ImageURL
|
||||
"""Prompt image."""
|
||||
"""Image URL."""
|
||||
type: Literal["ImagePromptValue"] = "ImagePromptValue"
|
||||
|
||||
def to_string(self) -> str:
|
||||
"""Return prompt as string."""
|
||||
"""Return prompt (image URL) as string."""
|
||||
return self.image_url["url"]
|
||||
|
||||
def to_messages(self) -> List[BaseMessage]:
|
||||
"""Return prompt as messages."""
|
||||
"""Return prompt (image URL) as messages."""
|
||||
return [HumanMessage(content=[cast(dict, self.image_url)])]
|
||||
|
||||
|
||||
@ -120,6 +121,7 @@ class ChatPromptValueConcrete(ChatPromptValue):
|
||||
For use in external schemas."""
|
||||
|
||||
messages: Sequence[AnyMessage]
|
||||
"""Sequence of messages."""
|
||||
|
||||
type: Literal["ChatPromptValueConcrete"] = "ChatPromptValueConcrete"
|
||||
|
||||
|
@ -18,6 +18,7 @@ the backbone of a retriever, but there are other types of retrievers as well.
|
||||
Document, Serializable, Callbacks,
|
||||
CallbackManagerForRetrieverRun, AsyncCallbackManagerForRetrieverRun
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
@ -119,14 +120,14 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
_new_arg_supported: bool = False
|
||||
_expects_other_args: bool = False
|
||||
tags: Optional[List[str]] = None
|
||||
"""Optional list of tags associated with the retriever. Defaults to None
|
||||
"""Optional list of tags associated with the retriever. Defaults to None.
|
||||
These tags will be associated with each call to this retriever,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
You can use these to eg identify a specific instance of a retriever with its
|
||||
use case.
|
||||
"""
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
"""Optional metadata associated with the retriever. Defaults to None
|
||||
"""Optional metadata associated with the retriever. Defaults to None.
|
||||
This metadata will be associated with each call to this retriever,
|
||||
and passed as arguments to the handlers defined in `callbacks`.
|
||||
You can use these to eg identify a specific instance of a retriever with its
|
||||
@ -289,9 +290,10 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
|
||||
) -> List[Document]:
|
||||
"""Get documents relevant to a query.
|
||||
|
||||
Args:
|
||||
query: String to find relevant documents for
|
||||
run_manager: The callbacks handler to use
|
||||
run_manager: The callback handler to use
|
||||
Returns:
|
||||
List of relevant documents
|
||||
"""
|
||||
@ -300,9 +302,10 @@ class BaseRetriever(RunnableSerializable[RetrieverInput, RetrieverOutput], ABC):
|
||||
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
|
||||
) -> List[Document]:
|
||||
"""Asynchronously get documents relevant to a query.
|
||||
|
||||
Args:
|
||||
query: String to find relevant documents for
|
||||
run_manager: The callbacks handler to use
|
||||
run_manager: The callback handler to use
|
||||
Returns:
|
||||
List of relevant documents
|
||||
"""
|
||||
|
@ -5,6 +5,7 @@ to a simple key-value interface.
|
||||
|
||||
The primary goal of these storages is to support implementation of caching.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import (
|
||||
Any,
|
||||
@ -95,7 +96,7 @@ class BaseStore(Generic[K, V], ABC):
|
||||
"""
|
||||
|
||||
async def amget(self, keys: Sequence[K]) -> List[Optional[V]]:
|
||||
"""Get the values associated with the given keys.
|
||||
"""Async get the values associated with the given keys.
|
||||
|
||||
Args:
|
||||
keys (Sequence[K]): A sequence of keys.
|
||||
@ -115,7 +116,7 @@ class BaseStore(Generic[K, V], ABC):
|
||||
"""
|
||||
|
||||
async def amset(self, key_value_pairs: Sequence[Tuple[K, V]]) -> None:
|
||||
"""Set the values for the given keys.
|
||||
"""Async set the values for the given keys.
|
||||
|
||||
Args:
|
||||
key_value_pairs (Sequence[Tuple[K, V]]): A sequence of key-value pairs.
|
||||
@ -131,7 +132,7 @@ class BaseStore(Generic[K, V], ABC):
|
||||
"""
|
||||
|
||||
async def amdelete(self, keys: Sequence[K]) -> None:
|
||||
"""Delete the given keys and their associated values.
|
||||
"""Async delete the given keys and their associated values.
|
||||
|
||||
Args:
|
||||
keys (Sequence[K]): A sequence of keys to delete.
|
||||
@ -147,7 +148,7 @@ class BaseStore(Generic[K, V], ABC):
|
||||
Args:
|
||||
prefix (str): The prefix to match.
|
||||
|
||||
Returns:
|
||||
Yields:
|
||||
Iterator[K | str]: An iterator over keys that match the given prefix.
|
||||
|
||||
This method is allowed to return an iterator over either K or str
|
||||
@ -157,12 +158,12 @@ class BaseStore(Generic[K, V], ABC):
|
||||
async def ayield_keys(
|
||||
self, *, prefix: Optional[str] = None
|
||||
) -> Union[AsyncIterator[K], AsyncIterator[str]]:
|
||||
"""Get an iterator over keys that match the given prefix.
|
||||
"""Async get an iterator over keys that match the given prefix.
|
||||
|
||||
Args:
|
||||
prefix (str): The prefix to match.
|
||||
|
||||
Returns:
|
||||
Yields:
|
||||
Iterator[K | str]: An iterator over keys that match the given prefix.
|
||||
|
||||
This method is allowed to return an iterator over either K or str
|
||||
@ -200,7 +201,7 @@ class InMemoryBaseStore(BaseStore[str, V], Generic[V]):
|
||||
return [self.store.get(key) for key in keys]
|
||||
|
||||
async def amget(self, keys: Sequence[str]) -> List[Optional[V]]:
|
||||
"""Get the values associated with the given keys.
|
||||
"""Async get the values associated with the given keys.
|
||||
|
||||
Args:
|
||||
keys (Sequence[str]): A sequence of keys.
|
||||
@ -224,7 +225,7 @@ class InMemoryBaseStore(BaseStore[str, V], Generic[V]):
|
||||
self.store[key] = value
|
||||
|
||||
async def amset(self, key_value_pairs: Sequence[Tuple[str, V]]) -> None:
|
||||
"""Set the values for the given keys.
|
||||
"""Async set the values for the given keys.
|
||||
|
||||
Args:
|
||||
key_value_pairs (Sequence[Tuple[str, V]]): A sequence of key-value pairs.
|
||||
@ -245,7 +246,7 @@ class InMemoryBaseStore(BaseStore[str, V], Generic[V]):
|
||||
del self.store[key]
|
||||
|
||||
async def amdelete(self, keys: Sequence[str]) -> None:
|
||||
"""Delete the given keys and their associated values.
|
||||
"""Async delete the given keys and their associated values.
|
||||
|
||||
Args:
|
||||
keys (Sequence[str]): A sequence of keys to delete.
|
||||
@ -258,7 +259,7 @@ class InMemoryBaseStore(BaseStore[str, V], Generic[V]):
|
||||
Args:
|
||||
prefix (str, optional): The prefix to match. Defaults to None.
|
||||
|
||||
Returns:
|
||||
Yields:
|
||||
Iterator[str]: An iterator over keys that match the given prefix.
|
||||
"""
|
||||
if prefix is None:
|
||||
@ -269,12 +270,12 @@ class InMemoryBaseStore(BaseStore[str, V], Generic[V]):
|
||||
yield key
|
||||
|
||||
async def ayield_keys(self, prefix: Optional[str] = None) -> AsyncIterator[str]:
|
||||
"""Get an async iterator over keys that match the given prefix.
|
||||
"""Async get an async iterator over keys that match the given prefix.
|
||||
|
||||
Args:
|
||||
prefix (str, optional): The prefix to match. Defaults to None.
|
||||
|
||||
Returns:
|
||||
Yields:
|
||||
AsyncIterator[str]: An async iterator over keys that match the given prefix.
|
||||
"""
|
||||
if prefix is None:
|
||||
|
Loading…
Reference in New Issue
Block a user