core[patch]: Document messages namespace (#23154)

- Moved doc-strings below attribtues in TypedDicts -- seems to render
better on APIReference pages.
* Provided more description and some simple code examples
pull/23192/head
Eugene Yurtsev 2 months ago committed by GitHub
parent 3c917204dc
commit c2d43544cc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -25,19 +25,36 @@ from langchain_core.utils.json import (
class UsageMetadata(TypedDict):
"""Usage metadata for a message, such as token counts.
Attributes:
input_tokens: (int) count of input (or prompt) tokens
output_tokens: (int) count of output (or completion) tokens
total_tokens: (int) total token count
This is a standard representation of token usage that is consistent across models.
Example:
.. code-block:: python
{
"input_tokens": 10,
"output_tokens": 20,
"total_tokens": 30
}
"""
input_tokens: int
"""Count of input (or prompt) tokens."""
output_tokens: int
"""Count of output (or completion) tokens."""
total_tokens: int
"""Total token count."""
class AIMessage(BaseMessage):
"""Message from an AI."""
"""Message from an AI.
AIMessage is returned from a chat model as a response to a prompt.
This message represents the output of the model and consists of both
the raw output as returned by the model together standardized fields
(e.g., tool calls, usage metadata) added by the LangChain framework.
"""
example: bool = False
"""Use to denote that a message is part of an example conversation.
@ -56,6 +73,7 @@ class AIMessage(BaseMessage):
"""
type: Literal["ai"] = "ai"
"""The type of the message (used for deserialization)."""
def __init__(
self, content: Union[str, List[Union[str, Dict]]], **kwargs: Any

@ -13,7 +13,7 @@ if TYPE_CHECKING:
class BaseMessage(Serializable):
"""Base abstract Message class.
"""Base abstract message class.
Messages are the inputs and outputs of ChatModels.
"""
@ -24,14 +24,28 @@ class BaseMessage(Serializable):
additional_kwargs: dict = Field(default_factory=dict)
"""Reserved for additional payload data associated with the message.
For example, for a message from an AI, this could include tool calls."""
For example, for a message from an AI, this could include tool calls as
encoded by the model provider.
"""
response_metadata: dict = Field(default_factory=dict)
"""Response metadata. For example: response headers, logprobs, token counts."""
type: str
"""The type of the message. Must be a string that is unique to the message type.
The purpose of this field is to allow for easy identification of the message type
when deserializing messages.
"""
name: Optional[str] = None
"""An optional name for the message.
This can be used to provide a human-readable name for the message.
Usage of this field is optional, and whether it's used or not is up to the
model implementation.
"""
id: Optional[str] = None
"""An optional unique identifier for the message. This should ideally be
@ -57,6 +71,7 @@ class BaseMessage(Serializable):
return ["langchain", "schema", "messages"]
def __add__(self, other: Any) -> ChatPromptTemplate:
"""Concatenate this message with another message."""
from langchain_core.prompts.chat import ChatPromptTemplate
prompt = ChatPromptTemplate(messages=[self]) # type: ignore[call-arg]
@ -122,6 +137,17 @@ class BaseMessageChunk(BaseMessage):
return ["langchain", "schema", "messages"]
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
"""Message chunks support concatenation with other message chunks.
This functionality is useful to combine message chunks yielded from
a streaming model into a complete message.
For example,
`AIMessageChunk(content="Hello") + AIMessageChunk(content=" World")`
will give `AIMessageChunk(content="Hello World")`
"""
if isinstance(other, BaseMessageChunk):
# If both are (subclasses of) BaseMessageChunk,
# concat into a single BaseMessageChunk

@ -15,6 +15,7 @@ class ChatMessage(BaseMessage):
"""The speaker / role of the Message."""
type: Literal["chat"] = "chat"
"""The type of the message (used during serialization)."""
@classmethod
def get_lc_namespace(cls) -> List[str]:
@ -32,6 +33,7 @@ class ChatMessageChunk(ChatMessage, BaseMessageChunk):
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore
"""The type of the message (used during serialization)."""
@classmethod
def get_lc_namespace(cls) -> List[str]:

@ -9,12 +9,21 @@ from langchain_core.utils._merge import merge_dicts
class FunctionMessage(BaseMessage):
"""Message for passing the result of executing a function back to a model."""
"""Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
"""
name: str
"""The name of the function that was executed."""
type: Literal["function"] = "function"
"""The type of the message (used for serialization)."""
@classmethod
def get_lc_namespace(cls) -> List[str]:

@ -4,7 +4,29 @@ from langchain_core.messages.base import BaseMessage, BaseMessageChunk
class HumanMessage(BaseMessage):
"""Message from a human."""
"""Message from a human.
HumanMessages are messages that are passed in from a human to the model.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
"""
example: bool = False
"""Use to denote that a message is part of an example conversation.

@ -4,8 +4,29 @@ from langchain_core.messages.base import BaseMessage, BaseMessageChunk
class SystemMessage(BaseMessage):
"""Message for priming AI behavior, usually passed in as the first of a sequence
"""Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
"""
type: Literal["system"] = "system"

@ -12,7 +12,23 @@ from langchain_core.utils._merge import merge_dicts
class ToolMessage(BaseMessage):
"""Message for passing the result of executing a tool back to a model."""
"""Message for passing the result of executing a tool back to a model.
ToolMessages contain the result of a tool invocation. Typically, the result
is encoded inside the `content` field.
Example: A TooMessage representing a result of 42 from a tool call with id
.. code-block:: python
from langchain_core.messages import ToolMessage
ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
"""
tool_call_id: str
"""Tool call that this message is responding to."""
@ -75,15 +91,30 @@ class ToolMessageChunk(ToolMessage, BaseMessageChunk):
class ToolCall(TypedDict):
"""Represents a request to call a tool.
Attributes:
name: (str) the name of the tool to be called
args: (dict) the arguments to the tool call
id: (str) if provided, an identifier associated with the tool call
Example:
.. code-block:: python
{
"name": "foo",
"args": {"a": 1},
"id": "123"
}
This represents a request to call the tool named "foo" with arguments {"a": 1}
and an identifier of "123".
"""
name: str
"""The name of the tool to be called."""
args: Dict[str, Any]
"""The arguments to the tool call."""
id: Optional[str]
"""An identifier associated with the tool call.
An identifier is needed to associate a tool call request with a tool
call result in events when multiple concurrent tool calls are made.
"""
class ToolCallChunk(TypedDict):
@ -99,22 +130,21 @@ class ToolCallChunk(TypedDict):
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
right_chunks = [ToolCallChunk(name=None, args='1}', index=0)]
(
AIMessageChunk(content="", tool_call_chunks=left_chunks)
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
).tool_call_chunks == [ToolCallChunk(name='foo', args='{"a":1}', index=0)]
Attributes:
name: (str) if provided, a substring of the name of the tool to be called
args: (str) if provided, a JSON substring of the arguments to the tool call
id: (str) if provided, a substring of an identifier for the tool call
index: (int) if provided, the index of the tool call in a sequence
"""
name: Optional[str]
"""The name of the tool to be called."""
args: Optional[str]
"""The arguments to the tool call."""
id: Optional[str]
"""An identifier associated with the tool call."""
index: Optional[int]
"""The index of the tool call in a sequence."""
class InvalidToolCall(TypedDict):
@ -125,9 +155,13 @@ class InvalidToolCall(TypedDict):
"""
name: Optional[str]
"""The name of the tool to be called."""
args: Optional[str]
"""The arguments to the tool call."""
id: Optional[str]
"""An identifier associated with the tool call."""
error: Optional[str]
"""An error message associated with the tool call."""
def default_tool_parser(

@ -1,3 +1,11 @@
"""Module contains utility functions for working with messages.
Some examples of what you can do with these functions include:
* Convert messages to strings (serialization)
* Convert messages from dicts to Message objects (deserialization)
* Filter messages from a list of messages based on name, type or id etc.
"""
from __future__ import annotations
import inspect

@ -5216,7 +5216,15 @@
]),
'definitions': dict({
'AIMessage': dict({
'description': 'Message from an AI.',
'description': '''
Message from an AI.
AIMessage is returned from a chat model as a response to a prompt.
This message represents the output of the model and consists of both
the raw output as returned by the model together standardized fields
(e.g., tool calls, usage metadata) added by the LangChain framework.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -5404,7 +5412,16 @@
'type': 'object',
}),
'FunctionMessage': dict({
'description': 'Message for passing the result of executing a function back to a model.',
'description': '''
Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -5460,7 +5477,30 @@
'type': 'object',
}),
'HumanMessage': dict({
'description': 'Message from a human.',
'description': '''
Message from a human.
HumanMessages are messages that are passed in from a human to the model.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -5571,8 +5611,28 @@
}),
'SystemMessage': dict({
'description': '''
Message for priming AI behavior, usually passed in as the first of a sequence
Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
''',
'properties': dict({
'additional_kwargs': dict({
@ -5651,7 +5711,24 @@
'type': 'object',
}),
'ToolMessage': dict({
'description': 'Message for passing the result of executing a tool back to a model.',
'description': '''
Message for passing the result of executing a tool back to a model.
ToolMessages contain the result of a tool invocation. Typically, the result
is encoded inside the `content` field.
Example: A TooMessage representing a result of 42 from a tool call with id
.. code-block:: python
from langchain_core.messages import ToolMessage
ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -5777,7 +5854,15 @@
]),
'definitions': dict({
'AIMessage': dict({
'description': 'Message from an AI.',
'description': '''
Message from an AI.
AIMessage is returned from a chat model as a response to a prompt.
This message represents the output of the model and consists of both
the raw output as returned by the model together standardized fields
(e.g., tool calls, usage metadata) added by the LangChain framework.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -5965,7 +6050,16 @@
'type': 'object',
}),
'FunctionMessage': dict({
'description': 'Message for passing the result of executing a function back to a model.',
'description': '''
Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -6021,7 +6115,30 @@
'type': 'object',
}),
'HumanMessage': dict({
'description': 'Message from a human.',
'description': '''
Message from a human.
HumanMessages are messages that are passed in from a human to the model.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -6132,8 +6249,28 @@
}),
'SystemMessage': dict({
'description': '''
Message for priming AI behavior, usually passed in as the first of a sequence
Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
''',
'properties': dict({
'additional_kwargs': dict({
@ -6212,7 +6349,24 @@
'type': 'object',
}),
'ToolMessage': dict({
'description': 'Message for passing the result of executing a tool back to a model.',
'description': '''
Message for passing the result of executing a tool back to a model.
ToolMessages contain the result of a tool invocation. Typically, the result
is encoded inside the `content` field.
Example: A TooMessage representing a result of 42 from a tool call with id
.. code-block:: python
from langchain_core.messages import ToolMessage
ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -6322,7 +6476,15 @@
]),
'definitions': dict({
'AIMessage': dict({
'description': 'Message from an AI.',
'description': '''
Message from an AI.
AIMessage is returned from a chat model as a response to a prompt.
This message represents the output of the model and consists of both
the raw output as returned by the model together standardized fields
(e.g., tool calls, usage metadata) added by the LangChain framework.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -6463,7 +6625,16 @@
'type': 'object',
}),
'FunctionMessage': dict({
'description': 'Message for passing the result of executing a function back to a model.',
'description': '''
Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -6519,7 +6690,30 @@
'type': 'object',
}),
'HumanMessage': dict({
'description': 'Message from a human.',
'description': '''
Message from a human.
HumanMessages are messages that are passed in from a human to the model.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -6608,8 +6802,28 @@
}),
'SystemMessage': dict({
'description': '''
Message for priming AI behavior, usually passed in as the first of a sequence
Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
''',
'properties': dict({
'additional_kwargs': dict({
@ -6688,7 +6902,24 @@
'type': 'object',
}),
'ToolMessage': dict({
'description': 'Message for passing the result of executing a tool back to a model.',
'description': '''
Message for passing the result of executing a tool back to a model.
ToolMessages contain the result of a tool invocation. Typically, the result
is encoded inside the `content` field.
Example: A TooMessage representing a result of 42 from a tool call with id
.. code-block:: python
from langchain_core.messages import ToolMessage
ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -6786,7 +7017,15 @@
]),
'definitions': dict({
'AIMessage': dict({
'description': 'Message from an AI.',
'description': '''
Message from an AI.
AIMessage is returned from a chat model as a response to a prompt.
This message represents the output of the model and consists of both
the raw output as returned by the model together standardized fields
(e.g., tool calls, usage metadata) added by the LangChain framework.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -6974,7 +7213,16 @@
'type': 'object',
}),
'FunctionMessage': dict({
'description': 'Message for passing the result of executing a function back to a model.',
'description': '''
Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -7030,7 +7278,30 @@
'type': 'object',
}),
'HumanMessage': dict({
'description': 'Message from a human.',
'description': '''
Message from a human.
HumanMessages are messages that are passed in from a human to the model.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -7141,8 +7412,28 @@
}),
'SystemMessage': dict({
'description': '''
Message for priming AI behavior, usually passed in as the first of a sequence
Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
''',
'properties': dict({
'additional_kwargs': dict({
@ -7221,7 +7512,24 @@
'type': 'object',
}),
'ToolMessage': dict({
'description': 'Message for passing the result of executing a tool back to a model.',
'description': '''
Message for passing the result of executing a tool back to a model.
ToolMessages contain the result of a tool invocation. Typically, the result
is encoded inside the `content` field.
Example: A TooMessage representing a result of 42 from a tool call with id
.. code-block:: python
from langchain_core.messages import ToolMessage
ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -7319,7 +7627,15 @@
]),
'definitions': dict({
'AIMessage': dict({
'description': 'Message from an AI.',
'description': '''
Message from an AI.
AIMessage is returned from a chat model as a response to a prompt.
This message represents the output of the model and consists of both
the raw output as returned by the model together standardized fields
(e.g., tool calls, usage metadata) added by the LangChain framework.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -7507,7 +7823,16 @@
'type': 'object',
}),
'FunctionMessage': dict({
'description': 'Message for passing the result of executing a function back to a model.',
'description': '''
Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -7563,7 +7888,30 @@
'type': 'object',
}),
'HumanMessage': dict({
'description': 'Message from a human.',
'description': '''
Message from a human.
HumanMessages are messages that are passed in from a human to the model.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -7674,8 +8022,28 @@
}),
'SystemMessage': dict({
'description': '''
Message for priming AI behavior, usually passed in as the first of a sequence
Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
''',
'properties': dict({
'additional_kwargs': dict({
@ -7754,7 +8122,24 @@
'type': 'object',
}),
'ToolMessage': dict({
'description': 'Message for passing the result of executing a tool back to a model.',
'description': '''
Message for passing the result of executing a tool back to a model.
ToolMessages contain the result of a tool invocation. Typically, the result
is encoded inside the `content` field.
Example: A TooMessage representing a result of 42 from a tool call with id
.. code-block:: python
from langchain_core.messages import ToolMessage
ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -7844,7 +8229,15 @@
dict({
'definitions': dict({
'AIMessage': dict({
'description': 'Message from an AI.',
'description': '''
Message from an AI.
AIMessage is returned from a chat model as a response to a prompt.
This message represents the output of the model and consists of both
the raw output as returned by the model together standardized fields
(e.g., tool calls, usage metadata) added by the LangChain framework.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -8032,7 +8425,16 @@
'type': 'object',
}),
'FunctionMessage': dict({
'description': 'Message for passing the result of executing a function back to a model.',
'description': '''
Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -8088,7 +8490,30 @@
'type': 'object',
}),
'HumanMessage': dict({
'description': 'Message from a human.',
'description': '''
Message from a human.
HumanMessages are messages that are passed in from a human to the model.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -8210,8 +8635,28 @@
}),
'SystemMessage': dict({
'description': '''
Message for priming AI behavior, usually passed in as the first of a sequence
Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
''',
'properties': dict({
'additional_kwargs': dict({
@ -8290,7 +8735,24 @@
'type': 'object',
}),
'ToolMessage': dict({
'description': 'Message for passing the result of executing a tool back to a model.',
'description': '''
Message for passing the result of executing a tool back to a model.
ToolMessages contain the result of a tool invocation. Typically, the result
is encoded inside the `content` field.
Example: A TooMessage representing a result of 42 from a tool call with id
.. code-block:: python
from langchain_core.messages import ToolMessage
ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -8407,7 +8869,15 @@
]),
'definitions': dict({
'AIMessage': dict({
'description': 'Message from an AI.',
'description': '''
Message from an AI.
AIMessage is returned from a chat model as a response to a prompt.
This message represents the output of the model and consists of both
the raw output as returned by the model together standardized fields
(e.g., tool calls, usage metadata) added by the LangChain framework.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -8548,7 +9018,16 @@
'type': 'object',
}),
'FunctionMessage': dict({
'description': 'Message for passing the result of executing a function back to a model.',
'description': '''
Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -8604,7 +9083,30 @@
'type': 'object',
}),
'HumanMessage': dict({
'description': 'Message from a human.',
'description': '''
Message from a human.
HumanMessages are messages that are passed in from a human to the model.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
@ -8693,8 +9195,28 @@
}),
'SystemMessage': dict({
'description': '''
Message for priming AI behavior, usually passed in as the first of a sequence
Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
''',
'properties': dict({
'additional_kwargs': dict({
@ -8773,7 +9295,24 @@
'type': 'object',
}),
'ToolMessage': dict({
'description': 'Message for passing the result of executing a tool back to a model.',
'description': '''
Message for passing the result of executing a tool back to a model.
ToolMessages contain the result of a tool invocation. Typically, the result
is encoded inside the `content` field.
Example: A TooMessage representing a result of 42 from a tool call with id
.. code-block:: python
from langchain_core.messages import ToolMessage
ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',

@ -8,6 +8,7 @@ from langchain_core.output_parsers.string import StrOutputParser
from langchain_core.output_parsers.xml import XMLOutputParser
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.runnables.base import Runnable, RunnableConfig
from tests.unit_tests.stubs import AnyStr
def test_graph_single_runnable(snapshot: SnapshotAssertion) -> None:
@ -254,7 +255,7 @@ def test_graph_sequence_map(snapshot: SnapshotAssertion) -> None:
},
"AIMessage": {
"title": "AIMessage",
"description": "Message from an AI.",
"description": AnyStr(),
"type": "object",
"properties": {
"content": {
@ -313,7 +314,7 @@ def test_graph_sequence_map(snapshot: SnapshotAssertion) -> None:
},
"HumanMessage": {
"title": "HumanMessage",
"description": "Message from a human.",
"description": AnyStr(),
"type": "object",
"properties": {
"content": {
@ -357,7 +358,7 @@ def test_graph_sequence_map(snapshot: SnapshotAssertion) -> None:
},
"ChatMessage": {
"title": "ChatMessage",
"description": "Message that can be assigned an arbitrary speaker (i.e. role).", # noqa: E501
"description": AnyStr(),
"type": "object",
"properties": {
"content": {
@ -397,7 +398,7 @@ def test_graph_sequence_map(snapshot: SnapshotAssertion) -> None:
},
"SystemMessage": {
"title": "SystemMessage",
"description": "Message for priming AI behavior, usually passed in as the first of a sequence\nof input messages.", # noqa: E501
"description": AnyStr(),
"type": "object",
"properties": {
"content": {
@ -436,7 +437,7 @@ def test_graph_sequence_map(snapshot: SnapshotAssertion) -> None:
},
"FunctionMessage": {
"title": "FunctionMessage",
"description": "Message for passing the result of executing a function back to a model.", # noqa: E501
"description": AnyStr(),
"type": "object",
"properties": {
"content": {
@ -475,7 +476,7 @@ def test_graph_sequence_map(snapshot: SnapshotAssertion) -> None:
},
"ToolMessage": {
"title": "ToolMessage",
"description": "Message for passing the result of executing a tool back to a model.", # noqa: E501
"description": AnyStr(),
"type": "object",
"properties": {
"content": {

Loading…
Cancel
Save