core[minor]: Add ToolMessage.raw_output (#23994)

Decisions to discuss:
1.  is a new attr needed or could additional_kwargs be used for this
2. is raw_output a good name for this attr
3. should raw_output default to {} or None
4. should raw_output be included in serialization
5. do we need to update repr/str to  exclude raw_output
This commit is contained in:
Bagatur 2024-07-10 13:11:10 -07:00 committed by GitHub
parent 14dd89a1ee
commit 6928f4c438
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 3598 additions and 2038 deletions

View File

@ -13,7 +13,7 @@ tests:
poetry run pytest $(TEST_FILE)
test_watch:
poetry run ptw --snapshot-update --now . -- -vv -x tests/unit_tests
poetry run ptw --snapshot-update --now . -- -vv tests/unit_tests
test_profile:
poetry run pytest -vv tests/unit_tests/ --profile-svg

View File

@ -3,12 +3,8 @@ from typing import Any, Dict, List, Literal, Optional, Tuple, Union
from typing_extensions import TypedDict
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
)
from langchain_core.utils._merge import merge_dicts
from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content
from langchain_core.utils._merge import merge_dicts, merge_obj
class ToolMessage(BaseMessage):
@ -17,7 +13,7 @@ class ToolMessage(BaseMessage):
ToolMessages contain the result of a tool invocation. Typically, the result
is encoded inside the `content` field.
Example: A TooMessage representing a result of 42 from a tool call with id
Example: A ToolMessage representing a result of 42 from a tool call with id
.. code-block:: python
@ -25,10 +21,29 @@ class ToolMessage(BaseMessage):
ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
Example: A ToolMessage where only part of the tool output is sent to the model
and the full output is passed in to raw_output.
.. code-block:: python
from langchain_core.messages import ToolMessage
tool_output = {
"stdout": "From the graph we can see that the correlation between x and y is ...",
"stderr": None,
"artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
}
ToolMessage(
content=tool_output["stdout"],
raw_output=tool_output,
tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL',
)
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
"""
""" # noqa: E501
tool_call_id: str
"""Tool call that this message is responding to."""
@ -39,6 +54,14 @@ class ToolMessage(BaseMessage):
type: Literal["tool"] = "tool"
"""The type of the message (used for serialization). Defaults to "tool"."""
raw_output: Any = None
"""The raw output of the tool.
**Not part of the payload sent to the model.** Should only be specified if it is
different from the message content, i.e. if only a subset of the full tool output
is being passed as message content.
"""
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object.
@ -83,6 +106,7 @@ class ToolMessageChunk(ToolMessage, BaseMessageChunk):
return self.__class__(
tool_call_id=self.tool_call_id,
content=merge_content(self.content, other.content),
raw_output=merge_obj(self.raw_output, other.raw_output),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),

View File

@ -221,7 +221,8 @@ def _create_message_from_message_type(
elif message_type == "function":
message = FunctionMessage(content=content, **kwargs)
elif message_type == "tool":
message = ToolMessage(content=content, **kwargs)
raw_output = kwargs.get("additional_kwargs", {}).pop("raw_output", None)
message = ToolMessage(content=content, raw_output=raw_output, **kwargs)
elif message_type == "remove":
message = RemoveMessage(**kwargs)
else:

View File

@ -72,3 +72,26 @@ def merge_lists(left: Optional[List], *others: Optional[List]) -> Optional[List]
else:
merged.append(e)
return merged
def merge_obj(left: Any, right: Any) -> Any:
if left is None or right is None:
return left if left is not None else right
elif type(left) is not type(right):
raise TypeError(
f"left and right are of different types. Left type: {type(left)}. Right "
f"type: {type(right)}."
)
elif isinstance(left, str):
return left + right
elif isinstance(left, dict):
return merge_dicts(left, right)
elif isinstance(left, list):
return merge_lists(left, right)
elif left == right:
return left
else:
raise ValueError(
f"Unable to merge {left=} and {right=}. Both must be of type str, dict, or "
f"list, or else be two equal objects."
)

View File

@ -38,6 +38,11 @@ python = ">=3.12.4"
[tool.ruff.lint]
select = [ "E", "F", "I", "T201",]
[tool.ruff.lint.per-file-ignores]
"tests/unit_tests/prompts/test_chat.py" = ["E501"]
"tests/unit_tests/runnables/test_runnable.py" = ["E501"]
"tests/unit_tests/runnables/test_graph.py" = ["E501"]
[tool.coverage.run]
omit = [ "tests/*",]

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,7 @@ from pathlib import Path
from typing import Any, List, Union
import pytest
from syrupy import SnapshotAssertion
from langchain_core._api.deprecation import (
LangChainPendingDeprecationWarning,
@ -789,7 +790,7 @@ async def test_messages_prompt_accepts_list() -> None:
await prompt.ainvoke([("user", "Hi there")]) # type: ignore
def test_chat_input_schema() -> None:
def test_chat_input_schema(snapshot: SnapshotAssertion) -> None:
prompt_all_required = ChatPromptTemplate.from_messages(
messages=[MessagesPlaceholder("history", optional=False), ("user", "${input}")]
)
@ -797,601 +798,10 @@ def test_chat_input_schema() -> None:
prompt_all_required.optional_variables == {"history"}
with pytest.raises(ValidationError):
prompt_all_required.input_schema(input="")
assert prompt_all_required.input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {
"history": {
"title": "History",
"type": "array",
"items": {
"anyOf": [
{"$ref": "#/definitions/AIMessage"},
{"$ref": "#/definitions/HumanMessage"},
{"$ref": "#/definitions/ChatMessage"},
{"$ref": "#/definitions/SystemMessage"},
{"$ref": "#/definitions/FunctionMessage"},
{"$ref": "#/definitions/ToolMessage"},
]
},
},
"input": {"title": "Input", "type": "string"},
},
"required": ["history", "input"],
"definitions": {
"ToolCall": {
"title": "ToolCall",
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"args": {"title": "Args", "type": "object"},
"id": {"title": "Id", "type": "string"},
},
"required": ["name", "args", "id"],
},
"InvalidToolCall": {
"title": "InvalidToolCall",
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"args": {"title": "Args", "type": "string"},
"id": {"title": "Id", "type": "string"},
"error": {"title": "Error", "type": "string"},
},
"required": ["name", "args", "id", "error"],
},
"UsageMetadata": {
"title": "UsageMetadata",
"type": "object",
"properties": {
"input_tokens": {"title": "Input Tokens", "type": "integer"},
"output_tokens": {"title": "Output Tokens", "type": "integer"},
"total_tokens": {"title": "Total Tokens", "type": "integer"},
},
"required": ["input_tokens", "output_tokens", "total_tokens"],
},
"AIMessage": {
"title": "AIMessage",
"description": "Message from an AI.\n\nAIMessage is returned from a chat model as a response to a prompt.\n\nThis message represents the output of the model and consists of both\nthe raw output as returned by the model together standardized fields\n(e.g., tool calls, usage metadata) added by the LangChain framework.", # noqa: E501
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "ai",
"enum": ["ai"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
"example": {
"title": "Example",
"default": False,
"type": "boolean",
},
"tool_calls": {
"title": "Tool Calls",
"default": [],
"type": "array",
"items": {"$ref": "#/definitions/ToolCall"},
},
"invalid_tool_calls": {
"title": "Invalid Tool Calls",
"default": [],
"type": "array",
"items": {"$ref": "#/definitions/InvalidToolCall"},
},
"usage_metadata": {"$ref": "#/definitions/UsageMetadata"},
},
"required": ["content"],
},
"HumanMessage": {
"title": "HumanMessage",
"description": 'Message from a human.\n\nHumanMessages are messages that are passed in from a human to the model.\n\nExample:\n\n .. code-block:: python\n\n from langchain_core.messages import HumanMessage, SystemMessage\n\n messages = [\n SystemMessage(\n content="You are a helpful assistant! Your name is Bob."\n ),\n HumanMessage(\n content="What is your name?"\n )\n ]\n\n # Instantiate a chat model and invoke it with the messages\n model = ...\n print(model.invoke(messages))', # noqa: E501
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "human",
"enum": ["human"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
"example": {
"title": "Example",
"default": False,
"type": "boolean",
},
},
"required": ["content"],
},
"ChatMessage": {
"title": "ChatMessage",
"description": "Message that can be assigned an arbitrary speaker (i.e. role).", # noqa: E501
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "chat",
"enum": ["chat"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
"role": {"title": "Role", "type": "string"},
},
"required": ["content", "role"],
},
"SystemMessage": {
"title": "SystemMessage",
"description": 'Message for priming AI behavior.\n\nThe system message is usually passed in as the first of a sequence\nof input messages.\n\nExample:\n\n .. code-block:: python\n\n from langchain_core.messages import HumanMessage, SystemMessage\n\n messages = [\n SystemMessage(\n content="You are a helpful assistant! Your name is Bob."\n ),\n HumanMessage(\n content="What is your name?"\n )\n ]\n\n # Define a chat model and invoke it with the messages\n print(model.invoke(messages))', # noqa: E501
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "system",
"enum": ["system"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
},
"required": ["content"],
},
"FunctionMessage": {
"title": "FunctionMessage",
"description": "Message for passing the result of executing a tool back to a model.\n\nFunctionMessage are an older version of the ToolMessage schema, and\ndo not contain the tool_call_id field.\n\nThe tool_call_id field is used to associate the tool call request with the\ntool call response. This is useful in situations where a chat model is able\nto request multiple tool calls in parallel.", # noqa: E501
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "function",
"enum": ["function"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
},
"required": ["content", "name"],
},
"ToolMessage": {
"title": "ToolMessage",
"description": "Message for passing the result of executing a tool back to a model.\n\nToolMessages contain the result of a tool invocation. Typically, the result\nis encoded inside the `content` field.\n\nExample: A TooMessage representing a result of 42 from a tool call with id\n\n .. code-block:: python\n\n from langchain_core.messages import ToolMessage\n\n ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')\n\nThe tool_call_id field is used to associate the tool call request with the\ntool call response. This is useful in situations where a chat model is able\nto request multiple tool calls in parallel.", # noqa: E501
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "tool",
"enum": ["tool"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
"tool_call_id": {"title": "Tool Call Id", "type": "string"},
},
"required": ["content", "tool_call_id"],
},
},
}
assert prompt_all_required.input_schema.schema() == snapshot(name="required")
prompt_optional = ChatPromptTemplate.from_messages(
messages=[MessagesPlaceholder("history", optional=True), ("user", "${input}")]
)
prompt_optional.input_variables == {"history", "input"}
prompt_optional.input_schema(input="") # won't raise error
prompt_optional.input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {
"input": {"title": "Input", "type": "string"},
"history": {
"title": "History",
"type": "array",
"items": {
"anyOf": [
{"$ref": "#/definitions/AIMessage"},
{"$ref": "#/definitions/HumanMessage"},
{"$ref": "#/definitions/ChatMessage"},
{"$ref": "#/definitions/SystemMessage"},
{"$ref": "#/definitions/FunctionMessage"},
{"$ref": "#/definitions/ToolMessage"},
]
},
},
},
"required": ["input"],
"definitions": {
"ToolCall": {
"title": "ToolCall",
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"args": {"title": "Args", "type": "object"},
"id": {"title": "Id", "type": "string"},
},
"required": ["name", "args", "id"],
},
"InvalidToolCall": {
"title": "InvalidToolCall",
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"args": {"title": "Args", "type": "string"},
"id": {"title": "Id", "type": "string"},
"error": {"title": "Error", "type": "string"},
},
"required": ["name", "args", "id", "error"],
},
"UsageMetadata": {
"title": "UsageMetadata",
"type": "object",
"properties": {
"input_tokens": {"title": "Input Tokens", "type": "integer"},
"output_tokens": {"title": "Output Tokens", "type": "integer"},
"total_tokens": {"title": "Total Tokens", "type": "integer"},
},
"required": ["input_tokens", "output_tokens", "total_tokens"],
},
"AIMessage": {
"title": "AIMessage",
"description": "Message from an AI.\n\nAIMessage is returned from a chat model as a response to a prompt.\n\nThis message represents the output of the model and consists of both\nthe raw output as returned by the model together standardized fields\n(e.g., tool calls, usage metadata) added by the LangChain framework.", # noqa: E501
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "ai",
"enum": ["ai"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
"example": {
"title": "Example",
"default": False,
"type": "boolean",
},
"tool_calls": {
"title": "Tool Calls",
"default": [],
"type": "array",
"items": {"$ref": "#/definitions/ToolCall"},
},
"invalid_tool_calls": {
"title": "Invalid Tool Calls",
"default": [],
"type": "array",
"items": {"$ref": "#/definitions/InvalidToolCall"},
},
"usage_metadata": {"$ref": "#/definitions/UsageMetadata"},
},
"required": ["content"],
},
"HumanMessage": {
"title": "HumanMessage",
"description": 'Message from a human.\n\nHumanMessages are messages that are passed in from a human to the model.\n\nExample:\n\n .. code-block:: python\n\n from langchain_core.messages import HumanMessage, SystemMessage\n\n messages = [\n SystemMessage(\n content="You are a helpful assistant! Your name is Bob."\n ),\n HumanMessage(\n content="What is your name?"\n )\n ]\n\n # Instantiate a chat model and invoke it with the messages\n model = ...\n print(model.invoke(messages))', # noqa: E501
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "human",
"enum": ["human"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
"example": {
"title": "Example",
"default": False,
"type": "boolean",
},
},
"required": ["content"],
},
"ChatMessage": {
"title": "ChatMessage",
"description": "Message that can be assigned an arbitrary speaker (i.e. role).", # noqa: E501
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "chat",
"enum": ["chat"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
"role": {"title": "Role", "type": "string"},
},
"required": ["content", "role"],
},
"SystemMessage": {
"title": "SystemMessage",
"description": 'Message for priming AI behavior.\n\nThe system message is usually passed in as the first of a sequence\nof input messages.\n\nExample:\n\n .. code-block:: python\n\n from langchain_core.messages import HumanMessage, SystemMessage\n\n messages = [\n SystemMessage(\n content="You are a helpful assistant! Your name is Bob."\n ),\n HumanMessage(\n content="What is your name?"\n )\n ]\n\n # Define a chat model and invoke it with the messages\n print(model.invoke(messages))', # noqa: E501
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "system",
"enum": ["system"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
},
"required": ["content"],
},
"FunctionMessage": {
"title": "FunctionMessage",
"description": "Message for passing the result of executing a tool back to a model.\n\nFunctionMessage are an older version of the ToolMessage schema, and\ndo not contain the tool_call_id field.\n\nThe tool_call_id field is used to associate the tool call request with the\ntool call response. This is useful in situations where a chat model is able\nto request multiple tool calls in parallel.", # noqa: E501
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}] # noqa: E501
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "function",
"enum": ["function"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
},
"required": ["content", "name"],
},
"ToolMessage": {
"title": "ToolMessage",
"description": "Message for passing the result of executing a tool back to a model.\n\nToolMessages contain the result of a tool invocation. Typically, the result\nis encoded inside the `content` field.\n\nExample: A TooMessage representing a result of 42 from a tool call with id\n\n .. code-block:: python\n\n from langchain_core.messages import ToolMessage\n\n ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')\n\nThe tool_call_id field is used to associate the tool call request with the\ntool call response. This is useful in situations where a chat model is able\nto request multiple tool calls in parallel.", # noqa: E501
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "tool",
"enum": ["tool"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
"tool_call_id": {"title": "Tool Call Id", "type": "string"},
},
"required": ["content", "tool_call_id"],
},
},
}
prompt_optional.input_schema.schema() == snapshot(name="partial")

View File

@ -98,6 +98,897 @@
+--------------------------------+
'''
# ---
# name: test_graph_sequence_map[graph_no_schemas]
dict({
'edges': list([
dict({
'source': 0,
'target': 1,
}),
dict({
'source': 1,
'target': 2,
}),
dict({
'source': 3,
'target': 5,
}),
dict({
'source': 5,
'target': 4,
}),
dict({
'source': 6,
'target': 8,
}),
dict({
'source': 8,
'target': 7,
}),
dict({
'source': 6,
'target': 9,
}),
dict({
'source': 9,
'target': 7,
}),
dict({
'source': 3,
'target': 6,
}),
dict({
'source': 7,
'target': 4,
}),
dict({
'source': 2,
'target': 3,
}),
]),
'nodes': list([
dict({
'data': 'PromptInput',
'id': 0,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'prompts',
'prompt',
'PromptTemplate',
]),
'name': 'PromptTemplate',
}),
'id': 1,
'type': 'runnable',
}),
dict({
'data': dict({
'id': list([
'langchain_core',
'language_models',
'fake',
'FakeListLLM',
]),
'name': 'FakeListLLM',
}),
'id': 2,
'type': 'runnable',
}),
dict({
'data': 'Parallel<as_list,as_str>Input',
'id': 3,
'type': 'schema',
}),
dict({
'data': 'Parallel<as_list,as_str>Output',
'id': 4,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'output_parsers',
'list',
'CommaSeparatedListOutputParser',
]),
'name': 'CommaSeparatedListOutputParser',
}),
'id': 5,
'type': 'runnable',
}),
dict({
'data': 'conditional_str_parser_input',
'id': 6,
'type': 'schema',
}),
dict({
'data': 'conditional_str_parser_output',
'id': 7,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'schema',
'output_parser',
'StrOutputParser',
]),
'name': 'StrOutputParser',
}),
'id': 8,
'type': 'runnable',
}),
dict({
'data': dict({
'id': list([
'langchain_core',
'output_parsers',
'xml',
'XMLOutputParser',
]),
'name': 'XMLOutputParser',
}),
'id': 9,
'type': 'runnable',
}),
]),
})
# ---
# name: test_graph_sequence_map[graph_with_schema]
dict({
'edges': list([
dict({
'source': 0,
'target': 1,
}),
dict({
'source': 1,
'target': 2,
}),
dict({
'source': 3,
'target': 5,
}),
dict({
'source': 5,
'target': 4,
}),
dict({
'source': 6,
'target': 8,
}),
dict({
'source': 8,
'target': 7,
}),
dict({
'source': 6,
'target': 9,
}),
dict({
'source': 9,
'target': 7,
}),
dict({
'source': 3,
'target': 6,
}),
dict({
'source': 7,
'target': 4,
}),
dict({
'source': 2,
'target': 3,
}),
]),
'nodes': list([
dict({
'data': dict({
'properties': dict({
'name': dict({
'title': 'Name',
'type': 'string',
}),
}),
'required': list([
'name',
]),
'title': 'PromptInput',
'type': 'object',
}),
'id': 0,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'prompts',
'prompt',
'PromptTemplate',
]),
'name': 'PromptTemplate',
}),
'id': 1,
'type': 'runnable',
}),
dict({
'data': dict({
'id': list([
'langchain_core',
'language_models',
'fake',
'FakeListLLM',
]),
'name': 'FakeListLLM',
}),
'id': 2,
'type': 'runnable',
}),
dict({
'data': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'$ref': '#/definitions/AIMessage',
}),
dict({
'$ref': '#/definitions/HumanMessage',
}),
dict({
'$ref': '#/definitions/ChatMessage',
}),
dict({
'$ref': '#/definitions/SystemMessage',
}),
dict({
'$ref': '#/definitions/FunctionMessage',
}),
dict({
'$ref': '#/definitions/ToolMessage',
}),
]),
'definitions': dict({
'AIMessage': dict({
'description': '''
Message from an AI.
AIMessage is returned from a chat model as a response to a prompt.
This message represents the output of the model and consists of both
the raw output as returned by the model together standardized fields
(e.g., tool calls, usage metadata) added by the LangChain framework.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
'type': 'object',
}),
'content': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'items': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'type': 'object',
}),
]),
}),
'type': 'array',
}),
]),
'title': 'Content',
}),
'example': dict({
'default': False,
'title': 'Example',
'type': 'boolean',
}),
'id': dict({
'title': 'Id',
'type': 'string',
}),
'invalid_tool_calls': dict({
'default': list([
]),
'items': dict({
'$ref': '#/definitions/InvalidToolCall',
}),
'title': 'Invalid Tool Calls',
'type': 'array',
}),
'name': dict({
'title': 'Name',
'type': 'string',
}),
'response_metadata': dict({
'title': 'Response Metadata',
'type': 'object',
}),
'tool_calls': dict({
'default': list([
]),
'items': dict({
'$ref': '#/definitions/ToolCall',
}),
'title': 'Tool Calls',
'type': 'array',
}),
'type': dict({
'default': 'ai',
'enum': list([
'ai',
]),
'title': 'Type',
'type': 'string',
}),
'usage_metadata': dict({
'$ref': '#/definitions/UsageMetadata',
}),
}),
'required': list([
'content',
]),
'title': 'AIMessage',
'type': 'object',
}),
'ChatMessage': dict({
'description': 'Message that can be assigned an arbitrary speaker (i.e. role).',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
'type': 'object',
}),
'content': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'items': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'type': 'object',
}),
]),
}),
'type': 'array',
}),
]),
'title': 'Content',
}),
'id': dict({
'title': 'Id',
'type': 'string',
}),
'name': dict({
'title': 'Name',
'type': 'string',
}),
'response_metadata': dict({
'title': 'Response Metadata',
'type': 'object',
}),
'role': dict({
'title': 'Role',
'type': 'string',
}),
'type': dict({
'default': 'chat',
'enum': list([
'chat',
]),
'title': 'Type',
'type': 'string',
}),
}),
'required': list([
'content',
'role',
]),
'title': 'ChatMessage',
'type': 'object',
}),
'FunctionMessage': dict({
'description': '''
Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
'type': 'object',
}),
'content': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'items': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'type': 'object',
}),
]),
}),
'type': 'array',
}),
]),
'title': 'Content',
}),
'id': dict({
'title': 'Id',
'type': 'string',
}),
'name': dict({
'title': 'Name',
'type': 'string',
}),
'response_metadata': dict({
'title': 'Response Metadata',
'type': 'object',
}),
'type': dict({
'default': 'function',
'enum': list([
'function',
]),
'title': 'Type',
'type': 'string',
}),
}),
'required': list([
'content',
'name',
]),
'title': 'FunctionMessage',
'type': 'object',
}),
'HumanMessage': dict({
'description': '''
Message from a human.
HumanMessages are messages that are passed in from a human to the model.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Instantiate a chat model and invoke it with the messages
model = ...
print(model.invoke(messages))
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
'type': 'object',
}),
'content': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'items': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'type': 'object',
}),
]),
}),
'type': 'array',
}),
]),
'title': 'Content',
}),
'example': dict({
'default': False,
'title': 'Example',
'type': 'boolean',
}),
'id': dict({
'title': 'Id',
'type': 'string',
}),
'name': dict({
'title': 'Name',
'type': 'string',
}),
'response_metadata': dict({
'title': 'Response Metadata',
'type': 'object',
}),
'type': dict({
'default': 'human',
'enum': list([
'human',
]),
'title': 'Type',
'type': 'string',
}),
}),
'required': list([
'content',
]),
'title': 'HumanMessage',
'type': 'object',
}),
'InvalidToolCall': dict({
'properties': dict({
'args': dict({
'title': 'Args',
'type': 'string',
}),
'error': dict({
'title': 'Error',
'type': 'string',
}),
'id': dict({
'title': 'Id',
'type': 'string',
}),
'name': dict({
'title': 'Name',
'type': 'string',
}),
}),
'required': list([
'name',
'args',
'id',
'error',
]),
'title': 'InvalidToolCall',
'type': 'object',
}),
'SystemMessage': dict({
'description': '''
Message for priming AI behavior.
The system message is usually passed in as the first of a sequence
of input messages.
Example:
.. code-block:: python
from langchain_core.messages import HumanMessage, SystemMessage
messages = [
SystemMessage(
content="You are a helpful assistant! Your name is Bob."
),
HumanMessage(
content="What is your name?"
)
]
# Define a chat model and invoke it with the messages
print(model.invoke(messages))
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
'type': 'object',
}),
'content': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'items': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'type': 'object',
}),
]),
}),
'type': 'array',
}),
]),
'title': 'Content',
}),
'id': dict({
'title': 'Id',
'type': 'string',
}),
'name': dict({
'title': 'Name',
'type': 'string',
}),
'response_metadata': dict({
'title': 'Response Metadata',
'type': 'object',
}),
'type': dict({
'default': 'system',
'enum': list([
'system',
]),
'title': 'Type',
'type': 'string',
}),
}),
'required': list([
'content',
]),
'title': 'SystemMessage',
'type': 'object',
}),
'ToolCall': dict({
'properties': dict({
'args': dict({
'title': 'Args',
'type': 'object',
}),
'id': dict({
'title': 'Id',
'type': 'string',
}),
'name': dict({
'title': 'Name',
'type': 'string',
}),
}),
'required': list([
'name',
'args',
'id',
]),
'title': 'ToolCall',
'type': 'object',
}),
'ToolMessage': dict({
'description': '''
Message for passing the result of executing a tool back to a model.
ToolMessages contain the result of a tool invocation. Typically, the result
is encoded inside the `content` field.
Example: A ToolMessage representing a result of 42 from a tool call with id
.. code-block:: python
from langchain_core.messages import ToolMessage
ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')
Example: A ToolMessage where only part of the tool output is sent to the model
and the full output is passed in to raw_output.
.. code-block:: python
from langchain_core.messages import ToolMessage
tool_output = {
"stdout": "From the graph we can see that the correlation between x and y is ...",
"stderr": None,
"artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
}
ToolMessage(
content=tool_output["stdout"],
raw_output=tool_output,
tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL',
)
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
''',
'properties': dict({
'additional_kwargs': dict({
'title': 'Additional Kwargs',
'type': 'object',
}),
'content': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'items': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'type': 'object',
}),
]),
}),
'type': 'array',
}),
]),
'title': 'Content',
}),
'id': dict({
'title': 'Id',
'type': 'string',
}),
'name': dict({
'title': 'Name',
'type': 'string',
}),
'raw_output': dict({
'title': 'Raw Output',
}),
'response_metadata': dict({
'title': 'Response Metadata',
'type': 'object',
}),
'tool_call_id': dict({
'title': 'Tool Call Id',
'type': 'string',
}),
'type': dict({
'default': 'tool',
'enum': list([
'tool',
]),
'title': 'Type',
'type': 'string',
}),
}),
'required': list([
'content',
'tool_call_id',
]),
'title': 'ToolMessage',
'type': 'object',
}),
'UsageMetadata': dict({
'properties': dict({
'input_tokens': dict({
'title': 'Input Tokens',
'type': 'integer',
}),
'output_tokens': dict({
'title': 'Output Tokens',
'type': 'integer',
}),
'total_tokens': dict({
'title': 'Total Tokens',
'type': 'integer',
}),
}),
'required': list([
'input_tokens',
'output_tokens',
'total_tokens',
]),
'title': 'UsageMetadata',
'type': 'object',
}),
}),
'title': 'RunnableParallel<as_list,as_str>Input',
}),
'id': 3,
'type': 'schema',
}),
dict({
'data': dict({
'properties': dict({
'as_list': dict({
'items': dict({
'type': 'string',
}),
'title': 'As List',
'type': 'array',
}),
'as_str': dict({
'title': 'As Str',
}),
}),
'title': 'RunnableParallel<as_list,as_str>Output',
'type': 'object',
}),
'id': 4,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'output_parsers',
'list',
'CommaSeparatedListOutputParser',
]),
'name': 'CommaSeparatedListOutputParser',
}),
'id': 5,
'type': 'runnable',
}),
dict({
'data': dict({
'title': 'conditional_str_parser_input',
'type': 'string',
}),
'id': 6,
'type': 'schema',
}),
dict({
'data': dict({
'title': 'conditional_str_parser_output',
}),
'id': 7,
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'schema',
'output_parser',
'StrOutputParser',
]),
'name': 'StrOutputParser',
}),
'id': 8,
'type': 'runnable',
}),
dict({
'data': dict({
'id': list([
'langchain_core',
'output_parsers',
'xml',
'XMLOutputParser',
]),
'name': 'XMLOutputParser',
}),
'id': 9,
'type': 'runnable',
}),
]),
})
# ---
# name: test_graph_sequence_map[mermaid-simple]
'''
graph TD;

View File

@ -9,7 +9,6 @@ from langchain_core.output_parsers.xml import XMLOutputParser
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.runnables.base import Runnable, RunnableConfig
from langchain_core.runnables.graph_mermaid import _escape_node_label
from tests.unit_tests.stubs import AnyStr
def test_graph_single_runnable(snapshot: SnapshotAssertion) -> None:
@ -169,527 +168,8 @@ def test_graph_sequence_map(snapshot: SnapshotAssertion) -> None:
}
)
graph = sequence.get_graph()
assert graph.to_json(with_schemas=True) == {
"nodes": [
{
"id": 0,
"type": "schema",
"data": {
"title": "PromptInput",
"type": "object",
"properties": {"name": {"title": "Name", "type": "string"}},
"required": ["name"],
},
},
{
"id": 1,
"type": "runnable",
"data": {
"id": ["langchain", "prompts", "prompt", "PromptTemplate"],
"name": "PromptTemplate",
},
},
{
"id": 2,
"type": "runnable",
"data": {
"id": ["langchain_core", "language_models", "fake", "FakeListLLM"],
"name": "FakeListLLM",
},
},
{
"id": 3,
"type": "schema",
"data": {
"title": "RunnableParallel<as_list,as_str>Input",
"anyOf": [
{"type": "string"},
{"$ref": "#/definitions/AIMessage"},
{"$ref": "#/definitions/HumanMessage"},
{"$ref": "#/definitions/ChatMessage"},
{"$ref": "#/definitions/SystemMessage"},
{"$ref": "#/definitions/FunctionMessage"},
{"$ref": "#/definitions/ToolMessage"},
],
"definitions": {
"ToolCall": {
"title": "ToolCall",
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"args": {"title": "Args", "type": "object"},
"id": {"title": "Id", "type": "string"},
},
"required": ["name", "args", "id"],
},
"InvalidToolCall": {
"title": "InvalidToolCall",
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"args": {"title": "Args", "type": "string"},
"id": {"title": "Id", "type": "string"},
"error": {"title": "Error", "type": "string"},
},
"required": ["name", "args", "id", "error"],
},
"UsageMetadata": {
"title": "UsageMetadata",
"type": "object",
"properties": {
"input_tokens": {
"title": "Input Tokens",
"type": "integer",
},
"output_tokens": {
"title": "Output Tokens",
"type": "integer",
},
"total_tokens": {
"title": "Total Tokens",
"type": "integer",
},
},
"required": [
"input_tokens",
"output_tokens",
"total_tokens",
],
},
"AIMessage": {
"title": "AIMessage",
"description": AnyStr(),
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{"type": "object"},
]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "ai",
"enum": ["ai"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
"example": {
"title": "Example",
"default": False,
"type": "boolean",
},
"tool_calls": {
"title": "Tool Calls",
"default": [],
"type": "array",
"items": {"$ref": "#/definitions/ToolCall"},
},
"invalid_tool_calls": {
"title": "Invalid Tool Calls",
"default": [],
"type": "array",
"items": {"$ref": "#/definitions/InvalidToolCall"},
},
"usage_metadata": {
"$ref": "#/definitions/UsageMetadata"
},
},
"required": ["content"],
},
"HumanMessage": {
"title": "HumanMessage",
"description": AnyStr(),
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{"type": "object"},
]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "human",
"enum": ["human"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
"example": {
"title": "Example",
"default": False,
"type": "boolean",
},
},
"required": ["content"],
},
"ChatMessage": {
"title": "ChatMessage",
"description": AnyStr(),
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{"type": "object"},
]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "chat",
"enum": ["chat"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
"role": {"title": "Role", "type": "string"},
},
"required": ["content", "role"],
},
"SystemMessage": {
"title": "SystemMessage",
"description": AnyStr(),
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{"type": "object"},
]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "system",
"enum": ["system"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
},
"required": ["content"],
},
"FunctionMessage": {
"title": "FunctionMessage",
"description": AnyStr(),
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{"type": "object"},
]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "function",
"enum": ["function"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
},
"required": ["content", "name"],
},
"ToolMessage": {
"title": "ToolMessage",
"description": AnyStr(),
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [
{"type": "string"},
{"type": "object"},
]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "tool",
"enum": ["tool"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
"tool_call_id": {
"title": "Tool Call Id",
"type": "string",
},
},
"required": ["content", "tool_call_id"],
},
},
},
},
{
"id": 4,
"type": "schema",
"data": {
"title": "RunnableParallel<as_list,as_str>Output",
"type": "object",
"properties": {
"as_list": {
"title": "As List",
"type": "array",
"items": {"type": "string"},
},
"as_str": {"title": "As Str"},
},
},
},
{
"id": 5,
"type": "runnable",
"data": {
"id": [
"langchain",
"output_parsers",
"list",
"CommaSeparatedListOutputParser",
],
"name": "CommaSeparatedListOutputParser",
},
},
{
"id": 6,
"type": "schema",
"data": {"title": "conditional_str_parser_input", "type": "string"},
},
{
"id": 7,
"type": "schema",
"data": {"title": "conditional_str_parser_output"},
},
{
"id": 8,
"type": "runnable",
"data": {
"id": ["langchain", "schema", "output_parser", "StrOutputParser"],
"name": "StrOutputParser",
},
},
{
"id": 9,
"type": "runnable",
"data": {
"id": [
"langchain_core",
"output_parsers",
"xml",
"XMLOutputParser",
],
"name": "XMLOutputParser",
},
},
],
"edges": [
{"source": 0, "target": 1},
{"source": 1, "target": 2},
{"source": 3, "target": 5},
{"source": 5, "target": 4},
{"source": 6, "target": 8},
{"source": 8, "target": 7},
{"source": 6, "target": 9},
{"source": 9, "target": 7},
{"source": 3, "target": 6},
{"source": 7, "target": 4},
{"source": 2, "target": 3},
],
}
assert graph.to_json() == {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "PromptInput",
},
{
"id": 1,
"type": "runnable",
"data": {
"id": ["langchain", "prompts", "prompt", "PromptTemplate"],
"name": "PromptTemplate",
},
},
{
"id": 2,
"type": "runnable",
"data": {
"id": ["langchain_core", "language_models", "fake", "FakeListLLM"],
"name": "FakeListLLM",
},
},
{
"id": 3,
"type": "schema",
"data": "Parallel<as_list,as_str>Input",
},
{
"id": 4,
"type": "schema",
"data": "Parallel<as_list,as_str>Output",
},
{
"id": 5,
"type": "runnable",
"data": {
"id": [
"langchain",
"output_parsers",
"list",
"CommaSeparatedListOutputParser",
],
"name": "CommaSeparatedListOutputParser",
},
},
{
"id": 6,
"type": "schema",
"data": "conditional_str_parser_input",
},
{
"id": 7,
"type": "schema",
"data": "conditional_str_parser_output",
},
{
"id": 8,
"type": "runnable",
"data": {
"id": ["langchain", "schema", "output_parser", "StrOutputParser"],
"name": "StrOutputParser",
},
},
{
"id": 9,
"type": "runnable",
"data": {
"id": [
"langchain_core",
"output_parsers",
"xml",
"XMLOutputParser",
],
"name": "XMLOutputParser",
},
},
],
"edges": [
{"source": 0, "target": 1},
{"source": 1, "target": 2},
{"source": 3, "target": 5},
{"source": 5, "target": 4},
{"source": 6, "target": 8},
{"source": 8, "target": 7},
{"source": 6, "target": 9},
{"source": 9, "target": 7},
{"source": 3, "target": 6},
{"source": 7, "target": 4},
{"source": 2, "target": 3},
],
}
assert graph.to_json(with_schemas=True) == snapshot(name="graph_with_schema")
assert graph.to_json() == snapshot(name="graph_no_schemas")
assert graph.draw_ascii() == snapshot(name="ascii")
assert graph.draw_mermaid() == snapshot(name="mermaid")
assert graph.draw_mermaid(with_styles=False) == snapshot(name="mermaid-simple")

View File

@ -347,302 +347,12 @@ def test_schemas(snapshot: SnapshotAssertion) -> None:
]
)
assert chat_prompt.input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {
"history": {
"title": "History",
"type": "array",
"items": {
"anyOf": [
{"$ref": "#/definitions/AIMessage"},
{"$ref": "#/definitions/HumanMessage"},
{"$ref": "#/definitions/ChatMessage"},
{"$ref": "#/definitions/SystemMessage"},
{"$ref": "#/definitions/FunctionMessage"},
{"$ref": "#/definitions/ToolMessage"},
]
},
}
},
"required": ["history"],
"definitions": {
"ToolCall": {
"title": "ToolCall",
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"args": {"title": "Args", "type": "object"},
"id": {"title": "Id", "type": "string"},
},
"required": ["name", "args", "id"],
},
"InvalidToolCall": {
"title": "InvalidToolCall",
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"args": {"title": "Args", "type": "string"},
"id": {"title": "Id", "type": "string"},
"error": {"title": "Error", "type": "string"},
},
"required": ["name", "args", "id", "error"],
},
"UsageMetadata": {
"title": "UsageMetadata",
"type": "object",
"properties": {
"input_tokens": {"title": "Input Tokens", "type": "integer"},
"output_tokens": {"title": "Output Tokens", "type": "integer"},
"total_tokens": {"title": "Total Tokens", "type": "integer"},
},
"required": ["input_tokens", "output_tokens", "total_tokens"],
},
"AIMessage": {
"title": "AIMessage",
"description": "Message from an AI.\n\nAIMessage is returned from a chat model as a response to a prompt.\n\nThis message represents the output of the model and consists of both\nthe raw output as returned by the model together standardized fields\n(e.g., tool calls, usage metadata) added by the LangChain framework.", # noqa: E501
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "ai",
"enum": ["ai"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
"example": {
"title": "Example",
"default": False,
"type": "boolean",
},
"tool_calls": {
"title": "Tool Calls",
"default": [],
"type": "array",
"items": {"$ref": "#/definitions/ToolCall"},
},
"invalid_tool_calls": {
"title": "Invalid Tool Calls",
"default": [],
"type": "array",
"items": {"$ref": "#/definitions/InvalidToolCall"},
},
"usage_metadata": {"$ref": "#/definitions/UsageMetadata"},
},
"required": ["content"],
},
"HumanMessage": {
"title": "HumanMessage",
"description": 'Message from a human.\n\nHumanMessages are messages that are passed in from a human to the model.\n\nExample:\n\n .. code-block:: python\n\n from langchain_core.messages import HumanMessage, SystemMessage\n\n messages = [\n SystemMessage(\n content="You are a helpful assistant! Your name is Bob."\n ),\n HumanMessage(\n content="What is your name?"\n )\n ]\n\n # Instantiate a chat model and invoke it with the messages\n model = ...\n print(model.invoke(messages))', # noqa: E501
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "human",
"enum": ["human"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
"example": {
"title": "Example",
"default": False,
"type": "boolean",
},
},
"required": ["content"],
},
"ChatMessage": {
"title": "ChatMessage",
"description": "Message that can be assigned an arbitrary speaker (i.e. role).", # noqa: E501
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "chat",
"enum": ["chat"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
"role": {"title": "Role", "type": "string"},
},
"required": ["content", "role"],
},
"SystemMessage": {
"title": "SystemMessage",
"description": 'Message for priming AI behavior.\n\nThe system message is usually passed in as the first of a sequence\nof input messages.\n\nExample:\n\n .. code-block:: python\n\n from langchain_core.messages import HumanMessage, SystemMessage\n\n messages = [\n SystemMessage(\n content="You are a helpful assistant! Your name is Bob."\n ),\n HumanMessage(\n content="What is your name?"\n )\n ]\n\n # Define a chat model and invoke it with the messages\n print(model.invoke(messages))', # noqa: E501
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "system",
"enum": ["system"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
},
"required": ["content"],
},
"FunctionMessage": {
"title": "FunctionMessage",
"description": "Message for passing the result of executing a tool back to a model.\n\nFunctionMessage are an older version of the ToolMessage schema, and\ndo not contain the tool_call_id field.\n\nThe tool_call_id field is used to associate the tool call request with the\ntool call response. This is useful in situations where a chat model is able\nto request multiple tool calls in parallel.", # noqa: E501
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "function",
"enum": ["function"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
},
"required": ["content", "name"],
},
"ToolMessage": {
"title": "ToolMessage",
"description": "Message for passing the result of executing a tool back to a model.\n\nToolMessages contain the result of a tool invocation. Typically, the result\nis encoded inside the `content` field.\n\nExample: A TooMessage representing a result of 42 from a tool call with id\n\n .. code-block:: python\n\n from langchain_core.messages import ToolMessage\n\n ToolMessage(content='42', tool_call_id='call_Jja7J89XsjrOLA5r!MEOW!SL')\n\nThe tool_call_id field is used to associate the tool call request with the\ntool call response. This is useful in situations where a chat model is able\nto request multiple tool calls in parallel.", # noqa: E501
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"response_metadata": {
"title": "Response Metadata",
"type": "object",
},
"type": {
"title": "Type",
"default": "tool",
"enum": ["tool"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
"id": {"title": "Id", "type": "string"},
"tool_call_id": {"title": "Tool Call Id", "type": "string"},
},
"required": ["content", "tool_call_id"],
},
},
}
assert chat_prompt.output_schema.schema() == snapshot
assert chat_prompt.input_schema.schema() == snapshot(
name="chat_prompt_input_schema"
)
assert chat_prompt.output_schema.schema() == snapshot(
name="chat_prompt_output_schema"
)
prompt = PromptTemplate.from_template("Hello, {name}!")

View File

@ -3,6 +3,7 @@ from typing import List, Type
import pytest
from langchain_core.load import dumpd, load
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
@ -20,6 +21,7 @@ from langchain_core.messages import (
convert_to_messages,
get_buffer_string,
message_chunk_to_message,
message_to_dict,
messages_from_dict,
messages_to_dict,
)
@ -253,7 +255,7 @@ def test_function_message_chunks() -> None:
)
def test_ani_message_chunks() -> None:
def test_ai_message_chunks() -> None:
assert AIMessageChunk(example=True, content="I am") + AIMessageChunk(
example=True, content=" indeed."
) == AIMessageChunk(
@ -635,7 +637,7 @@ def test_tool_calls_merge() -> None:
def test_convert_to_messages() -> None:
# dicts
assert convert_to_messages(
actual = convert_to_messages(
[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"},
@ -656,9 +658,16 @@ def test_convert_to_messages() -> None:
],
},
{"role": "tool", "tool_call_id": "tool_id", "content": "Hi!"},
{
"role": "tool",
"tool_call_id": "tool_id2",
"content": "Bye!",
"raw_output": {"foo": 123},
},
{"role": "remove", "id": "message_to_remove", "content": ""},
]
) == [
)
expected = [
SystemMessage(content="You are a helpful assistant."),
HumanMessage(content="Hello!"),
AIMessage(content="Hi!", id="ai1"),
@ -676,8 +685,10 @@ def test_convert_to_messages() -> None:
tool_calls=[ToolCall(name="greet", args={"name": "Jane"}, id="tool_id")],
),
ToolMessage(tool_call_id="tool_id", content="Hi!"),
ToolMessage(tool_call_id="tool_id2", content="Bye!", raw_output={"foo": 123}),
RemoveMessage(id="message_to_remove"),
]
assert expected == actual
# tuples
assert convert_to_messages(
@ -773,3 +784,83 @@ def test_merge_tool_calls() -> None:
merged = merge_lists([left], [right])
assert merged is not None
assert len(merged) == 2
def test_tool_message_serdes() -> None:
message = ToolMessage("foo", raw_output={"bar": {"baz": 123}}, tool_call_id="1")
ser_message = {
"lc": 1,
"type": "constructor",
"id": ["langchain", "schema", "messages", "ToolMessage"],
"kwargs": {
"content": "foo",
"type": "tool",
"tool_call_id": "1",
"raw_output": {"bar": {"baz": 123}},
},
}
assert dumpd(message) == ser_message
assert load(dumpd(message)) == message
class BadObject:
""""""
def test_tool_message_ser_non_serializable() -> None:
bad_obj = BadObject()
message = ToolMessage("foo", raw_output=bad_obj, tool_call_id="1")
ser_message = {
"lc": 1,
"type": "constructor",
"id": ["langchain", "schema", "messages", "ToolMessage"],
"kwargs": {
"content": "foo",
"type": "tool",
"tool_call_id": "1",
"raw_output": {
"lc": 1,
"type": "not_implemented",
"id": ["tests", "unit_tests", "test_messages", "BadObject"],
"repr": repr(bad_obj),
},
},
}
assert dumpd(message) == ser_message
with pytest.raises(NotImplementedError):
load(dumpd(ser_message))
def test_tool_message_to_dict() -> None:
message = ToolMessage("foo", raw_output={"bar": {"baz": 123}}, tool_call_id="1")
expected = {
"type": "tool",
"data": {
"content": "foo",
"additional_kwargs": {},
"response_metadata": {},
"raw_output": {"bar": {"baz": 123}},
"type": "tool",
"name": None,
"id": None,
"tool_call_id": "1",
},
}
actual = message_to_dict(message)
assert actual == expected
def test_tool_message_repr() -> None:
message = ToolMessage("foo", raw_output={"bar": {"baz": 123}}, tool_call_id="1")
expected = (
"ToolMessage(content='foo', tool_call_id='1', raw_output={'bar': {'baz': 123}})"
)
actual = repr(message)
assert expected == actual
def test_tool_message_str() -> None:
message = ToolMessage("foo", raw_output={"bar": {"baz": 123}}, tool_call_id="1")
expected = "content='foo' tool_call_id='1' raw_output={'bar': {'baz': 123}}"
actual = str(message)
assert expected == actual