Anthropic system message fix (#11301)

Removes human prompt prefix before system message for anthropic models

Bedrock anthropic api enforces that Human and Assistant messages must be
interleaved (cannot have same type twice in a row). We currently treat
System Messages as human messages when converting messages -> string
prompt. Our validation when using Bedrock/BedrockChat raises an error
when this happens. For ChatAnthropic we don't validate this so no error
is raised, but perhaps the behavior is still suboptimal
pull/11381/head
Bagatur 1 year ago committed by GitHub
parent 34a64101cc
commit b499de2926
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -34,7 +34,7 @@ def _convert_one_message_to_text(
elif isinstance(message, AIMessage):
message_text = f"{ai_prompt} {message.content}"
elif isinstance(message, SystemMessage):
message_text = f"{human_prompt} <admin>{message.content}</admin>"
message_text = message.content
else:
raise ValueError(f"Got unknown type {message}")
return message_text
@ -56,7 +56,6 @@ def convert_messages_to_prompt_anthropic(
"""
messages = messages.copy() # don't mutate the original list
if not isinstance(messages[-1], AIMessage):
messages.append(AIMessage(content=""))

@ -42,12 +42,12 @@ def _human_assistant_format(input_text: str) -> str:
if count % 2 == 0:
count += 1
else:
raise ValueError(ALTERNATION_ERROR)
raise ValueError(ALTERNATION_ERROR + f" Received {input_text}")
if input_text[i : i + len(ASSISTANT_PROMPT)] == ASSISTANT_PROMPT:
if count % 2 == 1:
count += 1
else:
raise ValueError(ALTERNATION_ERROR)
raise ValueError(ALTERNATION_ERROR + f" Received {input_text}")
if count % 2 == 1: # Only saw Human, no Assistant
input_text = input_text + ASSISTANT_PROMPT # SILENT CORRECTION

@ -6,7 +6,7 @@ import pytest
from langchain.chat_models import ChatAnthropic
from langchain.chat_models.anthropic import convert_messages_to_prompt_anthropic
from langchain.schema import AIMessage, BaseMessage, HumanMessage
from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage
os.environ["ANTHROPIC_API_KEY"] = "foo"
@ -50,11 +50,24 @@ def test_anthropic_initialization() -> None:
ChatAnthropic(model="test", anthropic_api_key="test")
def test_formatting() -> None:
messages: List[BaseMessage] = [HumanMessage(content="Hello")]
@pytest.mark.parametrize(
("messages", "expected"),
[
([HumanMessage(content="Hello")], "\n\nHuman: Hello\n\nAssistant:"),
(
[HumanMessage(content="Hello"), AIMessage(content="Answer:")],
"\n\nHuman: Hello\n\nAssistant: Answer:",
),
(
[
SystemMessage(content="You're an assistant"),
HumanMessage(content="Hello"),
AIMessage(content="Answer:"),
],
"You're an assistant\n\nHuman: Hello\n\nAssistant: Answer:",
),
],
)
def test_formatting(messages: List[BaseMessage], expected: str) -> None:
result = convert_messages_to_prompt_anthropic(messages)
assert result == "\n\nHuman: Hello\n\nAssistant:"
messages = [HumanMessage(content="Hello"), AIMessage(content="Answer:")]
result = convert_messages_to_prompt_anthropic(messages)
assert result == "\n\nHuman: Hello\n\nAssistant: Answer:"
assert result == expected

Loading…
Cancel
Save