From b15c7fdde6f010f68b2ab10c293516a98c0c669a Mon Sep 17 00:00:00 2001 From: Bagatur <22008038+baskaryan@users.noreply.github.com> Date: Wed, 27 Mar 2024 23:16:26 -0700 Subject: [PATCH] anthropic[patch]: fix response metadata type (#19683) --- .../langchain_anthropic/chat_models.py | 17 +++++----- .../tests/unit_tests/test_chat_models.py | 31 +++++++++++++++++++ 2 files changed, 39 insertions(+), 9 deletions(-) diff --git a/libs/partners/anthropic/langchain_anthropic/chat_models.py b/libs/partners/anthropic/langchain_anthropic/chat_models.py index 084c6c6ec8..0e3e668a3c 100644 --- a/libs/partners/anthropic/langchain_anthropic/chat_models.py +++ b/libs/partners/anthropic/langchain_anthropic/chat_models.py @@ -268,16 +268,15 @@ class ChatAnthropic(BaseChatModel): await run_manager.on_llm_new_token(text, chunk=chunk) yield chunk - def _format_output( - self, - data: Any, - **kwargs: Any, - ) -> ChatResult: + def _format_output(self, data: Any, **kwargs: Any) -> ChatResult: + data_dict = data.model_dump() + content = data_dict["content"] + llm_output = { + k: v for k, v in data_dict.items() if k not in ("content", "role", "type") + } return ChatResult( - generations=[ - ChatGeneration(message=AIMessage(content=data.content[0].text)) - ], - llm_output=data, + generations=[ChatGeneration(message=AIMessage(content=content[0]["text"]))], + llm_output=llm_output, ) def _generate( diff --git a/libs/partners/anthropic/tests/unit_tests/test_chat_models.py b/libs/partners/anthropic/tests/unit_tests/test_chat_models.py index 35232dc90c..e36dbccced 100644 --- a/libs/partners/anthropic/tests/unit_tests/test_chat_models.py +++ b/libs/partners/anthropic/tests/unit_tests/test_chat_models.py @@ -3,6 +3,9 @@ import os import pytest +from anthropic.types import ContentBlock, Message, Usage +from langchain_core.messages import AIMessage +from langchain_core.outputs import ChatGeneration, ChatResult from langchain_anthropic import ChatAnthropic, ChatAnthropicMessages @@ -52,3 +55,31 @@ def test_anthropic_initialization() -> None: # Verify that chat anthropic can be initialized using a secret key provided # as a parameter rather than an environment variable. ChatAnthropic(model="test", anthropic_api_key="test") + + +def test__format_output() -> None: + anthropic_msg = Message( + id="foo", + content=[ContentBlock(type="text", text="bar")], + model="baz", + role="assistant", + stop_reason=None, + stop_sequence=None, + usage=Usage(input_tokens=2, output_tokens=1), + type="message", + ) + expected = ChatResult( + generations=[ + ChatGeneration(message=AIMessage("bar")), + ], + llm_output={ + "id": "foo", + "model": "baz", + "stop_reason": None, + "stop_sequence": None, + "usage": {"input_tokens": 2, "output_tokens": 1}, + }, + ) + llm = ChatAnthropic(model="test", anthropic_api_key="test") + actual = llm._format_output(anthropic_msg) + assert expected == actual