diff --git a/libs/partners/anthropic/langchain_anthropic/chat_models.py b/libs/partners/anthropic/langchain_anthropic/chat_models.py index 4a216d2c18..7d0dce4956 100644 --- a/libs/partners/anthropic/langchain_anthropic/chat_models.py +++ b/libs/partners/anthropic/langchain_anthropic/chat_models.py @@ -10,6 +10,7 @@ from typing import ( Dict, Iterator, List, + Literal, Mapping, Optional, Sequence, @@ -38,6 +39,7 @@ from langchain_core.messages import ( BaseMessage, HumanMessage, SystemMessage, + ToolCall, ToolMessage, ) from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult @@ -156,7 +158,7 @@ def _format_messages(messages: List[BaseMessage]) -> Tuple[Optional[str], List[D continue role = _message_type_lookups[message.type] - content: Union[str, List[Dict]] + content: Union[str, List] if not isinstance(message.content, str): # parse as dict @@ -195,6 +197,20 @@ def _format_messages(messages: List[BaseMessage]) -> Tuple[Optional[str], List[D raise ValueError( f"Content items must be str or dict, instead was: {type(item)}" ) + elif ( + isinstance(message, AIMessage) + and not isinstance(message.content, list) + and message.tool_calls + ): + content = ( + [] + if not message.content + else [{"type": "text", "text": message.content}] + ) + # Note: Anthropic can't have invalid tool calls as presently defined, + # since the model already returns dicts args not JSON strings, and invalid + # tool calls are those with invalid JSON for args. + content += _lc_tool_calls_to_anthropic_tool_use_blocks(message.tool_calls) else: content = message.content @@ -677,6 +693,29 @@ def _tools_in_params(params: dict) -> bool: ) +class _AnthropicToolUse(TypedDict): + type: Literal["tool_use"] + name: str + input: dict + id: str + + +def _lc_tool_calls_to_anthropic_tool_use_blocks( + tool_calls: List[ToolCall], +) -> List[_AnthropicToolUse]: + blocks = [] + for tool_call in tool_calls: + blocks.append( + _AnthropicToolUse( + type="tool_use", + name=tool_call["name"], + input=tool_call["args"], + id=cast(str, tool_call["id"]), + ) + ) + return blocks + + @deprecated(since="0.1.0", removal="0.2.0", alternative="ChatAnthropic") class ChatAnthropicMessages(ChatAnthropic): pass diff --git a/libs/partners/anthropic/pyproject.toml b/libs/partners/anthropic/pyproject.toml index e7320a9075..5d83e0fb67 100644 --- a/libs/partners/anthropic/pyproject.toml +++ b/libs/partners/anthropic/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain-anthropic" -version = "0.1.8" +version = "0.1.9" description = "An integration package connecting AnthropicMessages and LangChain" authors = [] readme = "README.md" diff --git a/libs/partners/anthropic/tests/unit_tests/test_chat_models.py b/libs/partners/anthropic/tests/unit_tests/test_chat_models.py index 85019553fe..9373b0a1ea 100644 --- a/libs/partners/anthropic/tests/unit_tests/test_chat_models.py +++ b/libs/partners/anthropic/tests/unit_tests/test_chat_models.py @@ -11,7 +11,11 @@ from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr from langchain_core.tools import BaseTool from langchain_anthropic import ChatAnthropic -from langchain_anthropic.chat_models import _merge_messages, convert_to_anthropic_tool +from langchain_anthropic.chat_models import ( + _format_messages, + _merge_messages, + convert_to_anthropic_tool, +) os.environ["ANTHROPIC_API_KEY"] = "foo" @@ -268,3 +272,131 @@ def test_convert_to_anthropic_tool( for fn in (pydantic, function, dummy_tool, json_schema, expected, openai_function): actual = convert_to_anthropic_tool(fn) # type: ignore assert actual == expected + + +def test__format_messages_with_tool_calls() -> None: + system = SystemMessage("fuzz") + human = HumanMessage("foo") + ai = AIMessage( + "", + tool_calls=[{"name": "bar", "id": "1", "args": {"baz": "buzz"}}], + ) + tool = ToolMessage( + "blurb", + tool_call_id="1", + ) + messages = [system, human, ai, tool] + expected = ( + "fuzz", + [ + {"role": "user", "content": "foo"}, + { + "role": "assistant", + "content": [ + { + "type": "tool_use", + "name": "bar", + "id": "1", + "input": {"baz": "buzz"}, + } + ], + }, + { + "role": "user", + "content": [ + {"type": "tool_result", "content": "blurb", "tool_use_id": "1"} + ], + }, + ], + ) + actual = _format_messages(messages) + assert expected == actual + + +def test__format_messages_with_str_content_and_tool_calls() -> None: + system = SystemMessage("fuzz") + human = HumanMessage("foo") + # If content and tool_calls are specified and content is a string, then both are + # included with content first. + ai = AIMessage( + "thought", + tool_calls=[{"name": "bar", "id": "1", "args": {"baz": "buzz"}}], + ) + tool = ToolMessage( + "blurb", + tool_call_id="1", + ) + messages = [system, human, ai, tool] + expected = ( + "fuzz", + [ + {"role": "user", "content": "foo"}, + { + "role": "assistant", + "content": [ + { + "type": "text", + "text": "thought", + }, + { + "type": "tool_use", + "name": "bar", + "id": "1", + "input": {"baz": "buzz"}, + }, + ], + }, + { + "role": "user", + "content": [ + {"type": "tool_result", "content": "blurb", "tool_use_id": "1"} + ], + }, + ], + ) + actual = _format_messages(messages) + assert expected == actual + + +def test__format_messages_with_list_content_and_tool_calls() -> None: + system = SystemMessage("fuzz") + human = HumanMessage("foo") + # If content and tool_calls are specified and content is a list, then content is + # preferred. + ai = AIMessage( + [ + { + "type": "text", + "text": "thought", + } + ], + tool_calls=[{"name": "bar", "id": "1", "args": {"baz": "buzz"}}], + ) + tool = ToolMessage( + "blurb", + tool_call_id="1", + ) + messages = [system, human, ai, tool] + expected = ( + "fuzz", + [ + {"role": "user", "content": "foo"}, + { + "role": "assistant", + "content": [ + { + "type": "text", + "text": "thought", + } + ], + }, + { + "role": "user", + "content": [ + {"type": "tool_result", "content": "blurb", "tool_use_id": "1"} + ], + }, + ], + ) + actual = _format_messages(messages) + assert expected == actual