core[patch]: Document messages namespace (#23154)

- Moved doc-strings below attribtues in TypedDicts -- seems to render
better on APIReference pages.
* Provided more description and some simple code examples
pull/23192/head
Eugene Yurtsev 3 months ago committed by GitHub
parent 3c917204dc
commit c2d43544cc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -25,19 +25,36 @@ from langchain_core.utils.json import (
class UsageMetadata(TypedDict): class UsageMetadata(TypedDict):
"""Usage metadata for a message, such as token counts. """Usage metadata for a message, such as token counts.
Attributes: This is a standard representation of token usage that is consistent across models.
input_tokens: (int) count of input (or prompt) tokens
output_tokens: (int) count of output (or completion) tokens Example:
total_tokens: (int) total token count
.. code-block:: python
{
"input_tokens": 10,
"output_tokens": 20,
"total_tokens": 30
}
""" """
input_tokens: int input_tokens: int
"""Count of input (or prompt) tokens."""
output_tokens: int output_tokens: int
"""Count of output (or completion) tokens."""
total_tokens: int total_tokens: int
"""Total token count."""
class AIMessage(BaseMessage): class AIMessage(BaseMessage):
"""Message from an AI.""" """Message from an AI.
AIMessage is returned from a chat model as a response to a prompt.
This message represents the output of the model and consists of both
the raw output as returned by the model together standardized fields
(e.g., tool calls, usage metadata) added by the LangChain framework.
"""
example: bool = False example: bool = False
"""Use to denote that a message is part of an example conversation. """Use to denote that a message is part of an example conversation.
@ -56,6 +73,7 @@ class AIMessage(BaseMessage):
""" """
type: Literal["ai"] = "ai" type: Literal["ai"] = "ai"
"""The type of the message (used for deserialization)."""
def __init__( def __init__(
self, content: Union[str, List[Union[str, Dict]]], **kwargs: Any self, content: Union[str, List[Union[str, Dict]]], **kwargs: Any

@ -13,7 +13,7 @@ if TYPE_CHECKING:
class BaseMessage(Serializable): class BaseMessage(Serializable):
"""Base abstract Message class. """Base abstract message class.
Messages are the inputs and outputs of ChatModels. Messages are the inputs and outputs of ChatModels.
""" """
@ -24,14 +24,28 @@ class BaseMessage(Serializable):
additional_kwargs: dict = Field(default_factory=dict) additional_kwargs: dict = Field(default_factory=dict)
"""Reserved for additional payload data associated with the message. """Reserved for additional payload data associated with the message.
For example, for a message from an AI, this could include tool calls.""" For example, for a message from an AI, this could include tool calls as
encoded by the model provider.
"""
response_metadata: dict = Field(default_factory=dict) response_metadata: dict = Field(default_factory=dict)
"""Response metadata. For example: response headers, logprobs, token counts.""" """Response metadata. For example: response headers, logprobs, token counts."""
type: str type: str
"""The type of the message. Must be a string that is unique to the message type.
The purpose of this field is to allow for easy identification of the message type
when deserializing messages.
"""
name: Optional[str] = None name: Optional[str] = None
"""An optional name for the message.
This can be used to provide a human-readable name for the message.
Usage of this field is optional, and whether it's used or not is up to the
model implementation.
"""
id: Optional[str] = None id: Optional[str] = None
"""An optional unique identifier for the message. This should ideally be """An optional unique identifier for the message. This should ideally be
@ -57,6 +71,7 @@ class BaseMessage(Serializable):
return ["langchain", "schema", "messages"] return ["langchain", "schema", "messages"]
def __add__(self, other: Any) -> ChatPromptTemplate: def __add__(self, other: Any) -> ChatPromptTemplate:
"""Concatenate this message with another message."""
from langchain_core.prompts.chat import ChatPromptTemplate from langchain_core.prompts.chat import ChatPromptTemplate
prompt = ChatPromptTemplate(messages=[self]) # type: ignore[call-arg] prompt = ChatPromptTemplate(messages=[self]) # type: ignore[call-arg]
@ -122,6 +137,17 @@ class BaseMessageChunk(BaseMessage):
return ["langchain", "schema", "messages"] return ["langchain", "schema", "messages"]
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
"""Message chunks support concatenation with other message chunks.
This functionality is useful to combine message chunks yielded from
a streaming model into a complete message.
For example,
`AIMessageChunk(content="Hello") + AIMessageChunk(content=" World")`
will give `AIMessageChunk(content="Hello World")`
"""
if isinstance(other, BaseMessageChunk): if isinstance(other, BaseMessageChunk):
# If both are (subclasses of) BaseMessageChunk, # If both are (subclasses of) BaseMessageChunk,
# concat into a single BaseMessageChunk # concat into a single BaseMessageChunk

@ -15,6 +15,7 @@ class ChatMessage(BaseMessage):
"""The speaker / role of the Message.""" """The speaker / role of the Message."""
type: Literal["chat"] = "chat" type: Literal["chat"] = "chat"
"""The type of the message (used during serialization)."""
@classmethod @classmethod
def get_lc_namespace(cls) -> List[str]: def get_lc_namespace(cls) -> List[str]:
@ -32,6 +33,7 @@ class ChatMessageChunk(ChatMessage, BaseMessageChunk):
# to make sure that the chunk variant can be discriminated from the # to make sure that the chunk variant can be discriminated from the
# non-chunk variant. # non-chunk variant.
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore
"""The type of the message (used during serialization)."""
@classmethod @classmethod
def get_lc_namespace(cls) -> List[str]: def get_lc_namespace(cls) -> List[str]:

@ -9,12 +9,21 @@ from langchain_core.utils._merge import merge_dicts
class FunctionMessage(BaseMessage): class FunctionMessage(BaseMessage):
"""Message for passing the result of executing a function back to a model.""" """Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
"""
name: str name: str
"""The name of the function that was executed.""" """The name of the function that was executed."""
type: Literal["function"] = "function" type: Literal["function"] = "function"
"""The type of the message (used for serialization)."""
@classmethod @classmethod
def get_lc_namespace(cls) -> List[str]: def get_lc_namespace(cls) -> List[str]:

@ -4,7 +4,29 @@ from langchain_core.messages.base import BaseMessage, BaseMessageChunk
class HumanMessage(BaseMessage): class HumanMessage(BaseMessage):
"""Message from a human.""" """Message from a human.
HumanMessages are messages that are passed in from a human to the model.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
"""
example: bool = False example: bool = False
"""Use to denote that a message is part of an example conversation. """Use to denote that a message is part of an example conversation.

@ -4,8 +4,29 @@ from langchain_core.messages.base import BaseMessage, BaseMessageChunk
class SystemMessage(BaseMessage): class SystemMessage(BaseMessage):
"""Message for priming AI behavior, usually passed in as the first of a sequence """Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages. of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
""" """
type: Literal["system"] = "system" type: Literal["system"] = "system"

@ -12,7 +12,23 @@ from langchain_core.utils._merge import merge_dicts
class ToolMessage(BaseMessage): class ToolMessage(BaseMessage):
"""Message for passing the result of executing a tool back to a model.""" """Message for passing the result of executing a tool back to a model.
ToolMessages contain the result of a tool invocation. Typically, the result
is encoded inside the `content` field.
Example: A TooMessage representing a result of 42 from a tool call with id
.. code-block:: python
from langchain_core.messages import ToolMessage
ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
"""
tool_call_id: str tool_call_id: str
"""Tool call that this message is responding to.""" """Tool call that this message is responding to."""
@ -75,15 +91,30 @@ class ToolMessageChunk(ToolMessage, BaseMessageChunk):
class ToolCall(TypedDict): class ToolCall(TypedDict):
"""Represents a request to call a tool. """Represents a request to call a tool.
Attributes: Example:
name: (str) the name of the tool to be called
args: (dict) the arguments to the tool call .. code-block:: python
id: (str) if provided, an identifier associated with the tool call
{
"name": "foo",
"args": {"a": 1},
"id": "123"
}
This represents a request to call the tool named "foo" with arguments {"a": 1}
and an identifier of "123".
""" """
name: str name: str
"""The name of the tool to be called."""
args: Dict[str, Any] args: Dict[str, Any]
"""The arguments to the tool call."""
id: Optional[str] id: Optional[str]
"""An identifier associated with the tool call.
An identifier is needed to associate a tool call request with a tool
call result in events when multiple concurrent tool calls are made.
"""
class ToolCallChunk(TypedDict): class ToolCallChunk(TypedDict):
@ -99,22 +130,21 @@ class ToolCallChunk(TypedDict):
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)] left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
right_chunks = [ToolCallChunk(name=None, args='1}', index=0)] right_chunks = [ToolCallChunk(name=None, args='1}', index=0)]
( (
AIMessageChunk(content="", tool_call_chunks=left_chunks) AIMessageChunk(content="", tool_call_chunks=left_chunks)
+ AIMessageChunk(content="", tool_call_chunks=right_chunks) + AIMessageChunk(content="", tool_call_chunks=right_chunks)
).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)] ).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)]
Attributes:
name: (str) if provided, a substring of the name of the tool to be called
args: (str) if provided, a JSON substring of the arguments to the tool call
id: (str) if provided, a substring of an identifier for the tool call
index: (int) if provided, the index of the tool call in a sequence
""" """
name: Optional[str] name: Optional[str]
"""The name of the tool to be called."""
args: Optional[str] args: Optional[str]
"""The arguments to the tool call."""
id: Optional[str] id: Optional[str]
"""An identifier associated with the tool call."""
index: Optional[int] index: Optional[int]
"""The index of the tool call in a sequence."""
class InvalidToolCall(TypedDict): class InvalidToolCall(TypedDict):
@ -125,9 +155,13 @@ class InvalidToolCall(TypedDict):
""" """
name: Optional[str] name: Optional[str]
"""The name of the tool to be called."""
args: Optional[str] args: Optional[str]
"""The arguments to the tool call."""
id: Optional[str] id: Optional[str]
"""An identifier associated with the tool call."""
error: Optional[str] error: Optional[str]
"""An error message associated with the tool call."""
def default_tool_parser( def default_tool_parser(

@ -1,3 +1,11 @@
"""Module contains utility functions for working with messages.
Some examples of what you can do with these functions include:
* Convert messages to strings (serialization)
* Convert messages from dicts to Message objects (deserialization)
* Filter messages from a list of messages based on name, type or id etc.
"""
from __future__ import annotations from __future__ import annotations
import inspect import inspect

@ -5216,7 +5216,15 @@
]), ]),
'definitions': dict({ 'definitions': dict({
'AIMessage': dict({ 'AIMessage': dict({
'description': 'Message from an AI.', 'description': '''
Message from an AI.
AIMessage is returned from a chat model as a response to a prompt.
This message represents the output of the model and consists of both
the raw output as returned by the model together standardized fields
(e.g., tool calls, usage metadata) added by the LangChain framework.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -5404,7 +5412,16 @@
'type': 'object', 'type': 'object',
}), }),
'FunctionMessage': dict({ 'FunctionMessage': dict({
'description': 'Message for passing the result of executing a function back to a model.', 'description': '''
Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -5460,7 +5477,30 @@
'type': 'object', 'type': 'object',
}), }),
'HumanMessage': dict({ 'HumanMessage': dict({
'description': 'Message from a human.', 'description': '''
Message from a human.
HumanMessages are messages that are passed in from a human to the model.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -5571,8 +5611,28 @@
}), }),
'SystemMessage': dict({ 'SystemMessage': dict({
'description': ''' 'description': '''
Message for priming AI behavior, usually passed in as the first of a sequence Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages. of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
''', ''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
@ -5651,7 +5711,24 @@
'type': 'object', 'type': 'object',
}), }),
'ToolMessage': dict({ 'ToolMessage': dict({
'description': 'Message for passing the result of executing a tool back to a model.', 'description': '''
Message for passing the result of executing a tool back to a model.
ToolMessages contain the result of a tool invocation. Typically, the result
is encoded inside the `content` field.
Example: A TooMessage representing a result of 42 from a tool call with id
.. code-block:: python
from langchain_core.messages import ToolMessage
ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -5777,7 +5854,15 @@
]), ]),
'definitions': dict({ 'definitions': dict({
'AIMessage': dict({ 'AIMessage': dict({
'description': 'Message from an AI.', 'description': '''
Message from an AI.
AIMessage is returned from a chat model as a response to a prompt.
This message represents the output of the model and consists of both
the raw output as returned by the model together standardized fields
(e.g., tool calls, usage metadata) added by the LangChain framework.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -5965,7 +6050,16 @@
'type': 'object', 'type': 'object',
}), }),
'FunctionMessage': dict({ 'FunctionMessage': dict({
'description': 'Message for passing the result of executing a function back to a model.', 'description': '''
Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -6021,7 +6115,30 @@
'type': 'object', 'type': 'object',
}), }),
'HumanMessage': dict({ 'HumanMessage': dict({
'description': 'Message from a human.', 'description': '''
Message from a human.
HumanMessages are messages that are passed in from a human to the model.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -6132,8 +6249,28 @@
}), }),
'SystemMessage': dict({ 'SystemMessage': dict({
'description': ''' 'description': '''
Message for priming AI behavior, usually passed in as the first of a sequence Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages. of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
''', ''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
@ -6212,7 +6349,24 @@
'type': 'object', 'type': 'object',
}), }),
'ToolMessage': dict({ 'ToolMessage': dict({
'description': 'Message for passing the result of executing a tool back to a model.', 'description': '''
Message for passing the result of executing a tool back to a model.
ToolMessages contain the result of a tool invocation. Typically, the result
is encoded inside the `content` field.
Example: A TooMessage representing a result of 42 from a tool call with id
.. code-block:: python
from langchain_core.messages import ToolMessage
ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -6322,7 +6476,15 @@
]), ]),
'definitions': dict({ 'definitions': dict({
'AIMessage': dict({ 'AIMessage': dict({
'description': 'Message from an AI.', 'description': '''
Message from an AI.
AIMessage is returned from a chat model as a response to a prompt.
This message represents the output of the model and consists of both
the raw output as returned by the model together standardized fields
(e.g., tool calls, usage metadata) added by the LangChain framework.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -6463,7 +6625,16 @@
'type': 'object', 'type': 'object',
}), }),
'FunctionMessage': dict({ 'FunctionMessage': dict({
'description': 'Message for passing the result of executing a function back to a model.', 'description': '''
Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -6519,7 +6690,30 @@
'type': 'object', 'type': 'object',
}), }),
'HumanMessage': dict({ 'HumanMessage': dict({
'description': 'Message from a human.', 'description': '''
Message from a human.
HumanMessages are messages that are passed in from a human to the model.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -6608,8 +6802,28 @@
}), }),
'SystemMessage': dict({ 'SystemMessage': dict({
'description': ''' 'description': '''
Message for priming AI behavior, usually passed in as the first of a sequence Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages. of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
''', ''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
@ -6688,7 +6902,24 @@
'type': 'object', 'type': 'object',
}), }),
'ToolMessage': dict({ 'ToolMessage': dict({
'description': 'Message for passing the result of executing a tool back to a model.', 'description': '''
Message for passing the result of executing a tool back to a model.
ToolMessages contain the result of a tool invocation. Typically, the result
is encoded inside the `content` field.
Example: A TooMessage representing a result of 42 from a tool call with id
.. code-block:: python
from langchain_core.messages import ToolMessage
ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -6786,7 +7017,15 @@
]), ]),
'definitions': dict({ 'definitions': dict({
'AIMessage': dict({ 'AIMessage': dict({
'description': 'Message from an AI.', 'description': '''
Message from an AI.
AIMessage is returned from a chat model as a response to a prompt.
This message represents the output of the model and consists of both
the raw output as returned by the model together standardized fields
(e.g., tool calls, usage metadata) added by the LangChain framework.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -6974,7 +7213,16 @@
'type': 'object', 'type': 'object',
}), }),
'FunctionMessage': dict({ 'FunctionMessage': dict({
'description': 'Message for passing the result of executing a function back to a model.', 'description': '''
Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -7030,7 +7278,30 @@
'type': 'object', 'type': 'object',
}), }),
'HumanMessage': dict({ 'HumanMessage': dict({
'description': 'Message from a human.', 'description': '''
Message from a human.
HumanMessages are messages that are passed in from a human to the model.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -7141,8 +7412,28 @@
}), }),
'SystemMessage': dict({ 'SystemMessage': dict({
'description': ''' 'description': '''
Message for priming AI behavior, usually passed in as the first of a sequence Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages. of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
''', ''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
@ -7221,7 +7512,24 @@
'type': 'object', 'type': 'object',
}), }),
'ToolMessage': dict({ 'ToolMessage': dict({
'description': 'Message for passing the result of executing a tool back to a model.', 'description': '''
Message for passing the result of executing a tool back to a model.
ToolMessages contain the result of a tool invocation. Typically, the result
is encoded inside the `content` field.
Example: A TooMessage representing a result of 42 from a tool call with id
.. code-block:: python
from langchain_core.messages import ToolMessage
ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -7319,7 +7627,15 @@
]), ]),
'definitions': dict({ 'definitions': dict({
'AIMessage': dict({ 'AIMessage': dict({
'description': 'Message from an AI.', 'description': '''
Message from an AI.
AIMessage is returned from a chat model as a response to a prompt.
This message represents the output of the model and consists of both
the raw output as returned by the model together standardized fields
(e.g., tool calls, usage metadata) added by the LangChain framework.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -7507,7 +7823,16 @@
'type': 'object', 'type': 'object',
}), }),
'FunctionMessage': dict({ 'FunctionMessage': dict({
'description': 'Message for passing the result of executing a function back to a model.', 'description': '''
Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -7563,7 +7888,30 @@
'type': 'object', 'type': 'object',
}), }),
'HumanMessage': dict({ 'HumanMessage': dict({
'description': 'Message from a human.', 'description': '''
Message from a human.
HumanMessages are messages that are passed in from a human to the model.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -7674,8 +8022,28 @@
}), }),
'SystemMessage': dict({ 'SystemMessage': dict({
'description': ''' 'description': '''
Message for priming AI behavior, usually passed in as the first of a sequence Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages. of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
''', ''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
@ -7754,7 +8122,24 @@
'type': 'object', 'type': 'object',
}), }),
'ToolMessage': dict({ 'ToolMessage': dict({
'description': 'Message for passing the result of executing a tool back to a model.', 'description': '''
Message for passing the result of executing a tool back to a model.
ToolMessages contain the result of a tool invocation. Typically, the result
is encoded inside the `content` field.
Example: A TooMessage representing a result of 42 from a tool call with id
.. code-block:: python
from langchain_core.messages import ToolMessage
ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -7844,7 +8229,15 @@
dict({ dict({
'definitions': dict({ 'definitions': dict({
'AIMessage': dict({ 'AIMessage': dict({
'description': 'Message from an AI.', 'description': '''
Message from an AI.
AIMessage is returned from a chat model as a response to a prompt.
This message represents the output of the model and consists of both
the raw output as returned by the model together standardized fields
(e.g., tool calls, usage metadata) added by the LangChain framework.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -8032,7 +8425,16 @@
'type': 'object', 'type': 'object',
}), }),
'FunctionMessage': dict({ 'FunctionMessage': dict({
'description': 'Message for passing the result of executing a function back to a model.', 'description': '''
Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -8088,7 +8490,30 @@
'type': 'object', 'type': 'object',
}), }),
'HumanMessage': dict({ 'HumanMessage': dict({
'description': 'Message from a human.', 'description': '''
Message from a human.
HumanMessages are messages that are passed in from a human to the model.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -8210,8 +8635,28 @@
}), }),
'SystemMessage': dict({ 'SystemMessage': dict({
'description': ''' 'description': '''
Message for priming AI behavior, usually passed in as the first of a sequence Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages. of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
''', ''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
@ -8290,7 +8735,24 @@
'type': 'object', 'type': 'object',
}), }),
'ToolMessage': dict({ 'ToolMessage': dict({
'description': 'Message for passing the result of executing a tool back to a model.', 'description': '''
Message for passing the result of executing a tool back to a model.
ToolMessages contain the result of a tool invocation. Typically, the result
is encoded inside the `content` field.
Example: A TooMessage representing a result of 42 from a tool call with id
.. code-block:: python
from langchain_core.messages import ToolMessage
ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -8407,7 +8869,15 @@
]), ]),
'definitions': dict({ 'definitions': dict({
'AIMessage': dict({ 'AIMessage': dict({
'description': 'Message from an AI.', 'description': '''
Message from an AI.
AIMessage is returned from a chat model as a response to a prompt.
This message represents the output of the model and consists of both
the raw output as returned by the model together standardized fields
(e.g., tool calls, usage metadata) added by the LangChain framework.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -8548,7 +9018,16 @@
'type': 'object', 'type': 'object',
}), }),
'FunctionMessage': dict({ 'FunctionMessage': dict({
'description': 'Message for passing the result of executing a function back to a model.', 'description': '''
Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -8604,7 +9083,30 @@
'type': 'object', 'type': 'object',
}), }),
'HumanMessage': dict({ 'HumanMessage': dict({
'description': 'Message from a human.', 'description': '''
Message from a human.
HumanMessages are messages that are passed in from a human to the model.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',
@ -8693,8 +9195,28 @@
}), }),
'SystemMessage': dict({ 'SystemMessage': dict({
'description': ''' 'description': '''
Message for priming AI behavior, usually passed in as the first of a sequence Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages. of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
''', ''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
@ -8773,7 +9295,24 @@
'type': 'object', 'type': 'object',
}), }),
'ToolMessage': dict({ 'ToolMessage': dict({
'description': 'Message for passing the result of executing a tool back to a model.', 'description': '''
Message for passing the result of executing a tool back to a model.
ToolMessages contain the result of a tool invocation. Typically, the result
is encoded inside the `content` field.
Example: A TooMessage representing a result of 42 from a tool call with id
.. code-block:: python
from langchain_core.messages import ToolMessage
ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({ 'properties': dict({
'additional_kwargs': dict({ 'additional_kwargs': dict({
'title': 'Additional Kwargs', 'title': 'Additional Kwargs',

@ -8,6 +8,7 @@ from langchain_core.output_parsers.string import StrOutputParser
from langchain_core.output_parsers.xml import XMLOutputParser from langchain_core.output_parsers.xml import XMLOutputParser
from langchain_core.prompts.prompt import PromptTemplate from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.runnables.base import Runnable, RunnableConfig from langchain_core.runnables.base import Runnable, RunnableConfig
from tests.unit_tests.stubs import AnyStr
def test_graph_single_runnable(snapshot: SnapshotAssertion) -> None: def test_graph_single_runnable(snapshot: SnapshotAssertion) -> None:
@ -254,7 +255,7 @@ def test_graph_sequence_map(snapshot: SnapshotAssertion) -> None:
}, },
"AIMessage": { "AIMessage": {
"title": "AIMessage", "title": "AIMessage",
"description": "Message from an AI.", "description": AnyStr(),
"type": "object", "type": "object",
"properties": { "properties": {
"content": { "content": {
@ -313,7 +314,7 @@ def test_graph_sequence_map(snapshot: SnapshotAssertion) -> None:
}, },
"HumanMessage": { "HumanMessage": {
"title": "HumanMessage", "title": "HumanMessage",
"description": "Message from a human.", "description": AnyStr(),
"type": "object", "type": "object",
"properties": { "properties": {
"content": { "content": {
@ -357,7 +358,7 @@ def test_graph_sequence_map(snapshot: SnapshotAssertion) -> None:
}, },
"ChatMessage": { "ChatMessage": {
"title": "ChatMessage", "title": "ChatMessage",
"description": "Message that can be assigned an arbitrary speaker (i.e. role).", # noqa: E501 "description": AnyStr(),
"type": "object", "type": "object",
"properties": { "properties": {
"content": { "content": {
@ -397,7 +398,7 @@ def test_graph_sequence_map(snapshot: SnapshotAssertion) -> None:
}, },
"SystemMessage": { "SystemMessage": {
"title": "SystemMessage", "title": "SystemMessage",
"description": "Message for priming AI behavior, usually passed in as the first of a sequence\nof input messages.", # noqa: E501 "description": AnyStr(),
"type": "object", "type": "object",
"properties": { "properties": {
"content": { "content": {
@ -436,7 +437,7 @@ def test_graph_sequence_map(snapshot: SnapshotAssertion) -> None:
}, },
"FunctionMessage": { "FunctionMessage": {
"title": "FunctionMessage", "title": "FunctionMessage",
"description": "Message for passing the result of executing a function back to a model.", # noqa: E501 "description": AnyStr(),
"type": "object", "type": "object",
"properties": { "properties": {
"content": { "content": {
@ -475,7 +476,7 @@ def test_graph_sequence_map(snapshot: SnapshotAssertion) -> None:
}, },
"ToolMessage": { "ToolMessage": {
"title": "ToolMessage", "title": "ToolMessage",
"description": "Message for passing the result of executing a tool back to a model.", # noqa: E501 "description": AnyStr(),
"type": "object", "type": "object",
"properties": { "properties": {
"content": { "content": {

Loading…
Cancel
Save