2024-02-26 05:57:26 +00:00
|
|
|
"""Test ChatAnthropic chat model."""
|
|
|
|
|
core[minor], ...: add tool calls message (#18947)
core[minor], langchain[patch], openai[minor], anthropic[minor], fireworks[minor], groq[minor], mistralai[minor]
```python
class ToolCall(TypedDict):
name: str
args: Dict[str, Any]
id: Optional[str]
class InvalidToolCall(TypedDict):
name: Optional[str]
args: Optional[str]
id: Optional[str]
error: Optional[str]
class ToolCallChunk(TypedDict):
name: Optional[str]
args: Optional[str]
id: Optional[str]
index: Optional[int]
class AIMessage(BaseMessage):
...
tool_calls: List[ToolCall] = []
invalid_tool_calls: List[InvalidToolCall] = []
...
class AIMessageChunk(AIMessage, BaseMessageChunk):
...
tool_call_chunks: Optional[List[ToolCallChunk]] = None
...
```
Important considerations:
- Parsing logic occurs within different providers;
- ~Changing output type is a breaking change for anyone doing explicit
type checking;~
- ~Langsmith rendering will need to be updated:
https://github.com/langchain-ai/langchainplus/pull/3561~
- ~Langserve will need to be updated~
- Adding chunks:
- ~AIMessage + ToolCallsMessage = ToolCallsMessage if either has
non-null .tool_calls.~
- Tool call chunks are appended, merging when having equal values of
`index`.
- additional_kwargs accumulate the normal way.
- During streaming:
- ~Messages can change types (e.g., from AIMessageChunk to
AIToolCallsMessageChunk)~
- Output parsers parse additional_kwargs (during .invoke they read off
tool calls).
Packages outside of `partners/`:
- https://github.com/langchain-ai/langchain-cohere/pull/7
- https://github.com/langchain-ai/langchain-google/pull/123/files
---------
Co-authored-by: Chester Curme <chester.curme@gmail.com>
2024-04-09 23:41:42 +00:00
|
|
|
import json
|
2024-02-26 05:57:26 +00:00
|
|
|
from typing import List
|
|
|
|
|
|
|
|
from langchain_core.callbacks import CallbackManager
|
core[minor], ...: add tool calls message (#18947)
core[minor], langchain[patch], openai[minor], anthropic[minor], fireworks[minor], groq[minor], mistralai[minor]
```python
class ToolCall(TypedDict):
name: str
args: Dict[str, Any]
id: Optional[str]
class InvalidToolCall(TypedDict):
name: Optional[str]
args: Optional[str]
id: Optional[str]
error: Optional[str]
class ToolCallChunk(TypedDict):
name: Optional[str]
args: Optional[str]
id: Optional[str]
index: Optional[int]
class AIMessage(BaseMessage):
...
tool_calls: List[ToolCall] = []
invalid_tool_calls: List[InvalidToolCall] = []
...
class AIMessageChunk(AIMessage, BaseMessageChunk):
...
tool_call_chunks: Optional[List[ToolCallChunk]] = None
...
```
Important considerations:
- Parsing logic occurs within different providers;
- ~Changing output type is a breaking change for anyone doing explicit
type checking;~
- ~Langsmith rendering will need to be updated:
https://github.com/langchain-ai/langchainplus/pull/3561~
- ~Langserve will need to be updated~
- Adding chunks:
- ~AIMessage + ToolCallsMessage = ToolCallsMessage if either has
non-null .tool_calls.~
- Tool call chunks are appended, merging when having equal values of
`index`.
- additional_kwargs accumulate the normal way.
- During streaming:
- ~Messages can change types (e.g., from AIMessageChunk to
AIToolCallsMessageChunk)~
- Output parsers parse additional_kwargs (during .invoke they read off
tool calls).
Packages outside of `partners/`:
- https://github.com/langchain-ai/langchain-cohere/pull/7
- https://github.com/langchain-ai/langchain-google/pull/123/files
---------
Co-authored-by: Chester Curme <chester.curme@gmail.com>
2024-04-09 23:41:42 +00:00
|
|
|
from langchain_core.messages import (
|
|
|
|
AIMessage,
|
|
|
|
AIMessageChunk,
|
|
|
|
BaseMessage,
|
|
|
|
HumanMessage,
|
2024-04-17 19:37:04 +00:00
|
|
|
SystemMessage,
|
|
|
|
ToolMessage,
|
core[minor], ...: add tool calls message (#18947)
core[minor], langchain[patch], openai[minor], anthropic[minor], fireworks[minor], groq[minor], mistralai[minor]
```python
class ToolCall(TypedDict):
name: str
args: Dict[str, Any]
id: Optional[str]
class InvalidToolCall(TypedDict):
name: Optional[str]
args: Optional[str]
id: Optional[str]
error: Optional[str]
class ToolCallChunk(TypedDict):
name: Optional[str]
args: Optional[str]
id: Optional[str]
index: Optional[int]
class AIMessage(BaseMessage):
...
tool_calls: List[ToolCall] = []
invalid_tool_calls: List[InvalidToolCall] = []
...
class AIMessageChunk(AIMessage, BaseMessageChunk):
...
tool_call_chunks: Optional[List[ToolCallChunk]] = None
...
```
Important considerations:
- Parsing logic occurs within different providers;
- ~Changing output type is a breaking change for anyone doing explicit
type checking;~
- ~Langsmith rendering will need to be updated:
https://github.com/langchain-ai/langchainplus/pull/3561~
- ~Langserve will need to be updated~
- Adding chunks:
- ~AIMessage + ToolCallsMessage = ToolCallsMessage if either has
non-null .tool_calls.~
- Tool call chunks are appended, merging when having equal values of
`index`.
- additional_kwargs accumulate the normal way.
- During streaming:
- ~Messages can change types (e.g., from AIMessageChunk to
AIToolCallsMessageChunk)~
- Output parsers parse additional_kwargs (during .invoke they read off
tool calls).
Packages outside of `partners/`:
- https://github.com/langchain-ai/langchain-cohere/pull/7
- https://github.com/langchain-ai/langchain-google/pull/123/files
---------
Co-authored-by: Chester Curme <chester.curme@gmail.com>
2024-04-09 23:41:42 +00:00
|
|
|
)
|
2024-02-26 05:57:26 +00:00
|
|
|
from langchain_core.outputs import ChatGeneration, LLMResult
|
2023-12-20 02:55:19 +00:00
|
|
|
from langchain_core.prompts import ChatPromptTemplate
|
2024-04-17 19:37:04 +00:00
|
|
|
from langchain_core.tools import tool
|
2023-12-20 02:55:19 +00:00
|
|
|
|
2024-02-26 05:57:26 +00:00
|
|
|
from langchain_anthropic import ChatAnthropic, ChatAnthropicMessages
|
|
|
|
from tests.unit_tests._utils import FakeCallbackHandler
|
2023-12-20 02:55:19 +00:00
|
|
|
|
2024-03-04 15:03:51 +00:00
|
|
|
MODEL_NAME = "claude-3-sonnet-20240229"
|
|
|
|
|
2023-12-20 02:55:19 +00:00
|
|
|
|
|
|
|
def test_stream() -> None:
|
Fix: fix partners name typo in tests (#15066)
<!-- Thank you for contributing to LangChain!
Please title your PR "<package>: <description>", where <package> is
whichever of langchain, community, core, experimental, etc. is being
modified.
Replace this entire comment with:
- **Description:** a description of the change,
- **Issue:** the issue # it fixes if applicable,
- **Dependencies:** any dependencies required for this change,
- **Twitter handle:** we announce bigger features on Twitter. If your PR
gets announced, and you'd like a mention, we'll gladly shout you out!
Please make sure your PR is passing linting and testing before
submitting. Run `make format`, `make lint` and `make test` from the root
of the package you've modified to check this locally.
See contribution guidelines for more information on how to write/run
tests, lint, etc: https://python.langchain.com/docs/contributing/
If you're adding a new integration, please include:
1. a test for the integration, preferably unit tests that do not rely on
network access,
2. an example notebook showing its use. It lives in
`docs/docs/integrations` directory.
If no one reviews your PR within a few days, please @-mention one of
@baskaryan, @eyurtsev, @hwchase17.
-->
---------
Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
Co-authored-by: Ran <rccalman@gmail.com>
2023-12-22 19:48:39 +00:00
|
|
|
"""Test streaming tokens from Anthropic."""
|
2024-03-04 15:03:51 +00:00
|
|
|
llm = ChatAnthropicMessages(model_name=MODEL_NAME)
|
2023-12-20 02:55:19 +00:00
|
|
|
|
|
|
|
for token in llm.stream("I'm Pickle Rick"):
|
|
|
|
assert isinstance(token.content, str)
|
|
|
|
|
|
|
|
|
|
|
|
async def test_astream() -> None:
|
Fix: fix partners name typo in tests (#15066)
<!-- Thank you for contributing to LangChain!
Please title your PR "<package>: <description>", where <package> is
whichever of langchain, community, core, experimental, etc. is being
modified.
Replace this entire comment with:
- **Description:** a description of the change,
- **Issue:** the issue # it fixes if applicable,
- **Dependencies:** any dependencies required for this change,
- **Twitter handle:** we announce bigger features on Twitter. If your PR
gets announced, and you'd like a mention, we'll gladly shout you out!
Please make sure your PR is passing linting and testing before
submitting. Run `make format`, `make lint` and `make test` from the root
of the package you've modified to check this locally.
See contribution guidelines for more information on how to write/run
tests, lint, etc: https://python.langchain.com/docs/contributing/
If you're adding a new integration, please include:
1. a test for the integration, preferably unit tests that do not rely on
network access,
2. an example notebook showing its use. It lives in
`docs/docs/integrations` directory.
If no one reviews your PR within a few days, please @-mention one of
@baskaryan, @eyurtsev, @hwchase17.
-->
---------
Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
Co-authored-by: Ran <rccalman@gmail.com>
2023-12-22 19:48:39 +00:00
|
|
|
"""Test streaming tokens from Anthropic."""
|
2024-03-04 15:03:51 +00:00
|
|
|
llm = ChatAnthropicMessages(model_name=MODEL_NAME)
|
2023-12-20 02:55:19 +00:00
|
|
|
|
|
|
|
async for token in llm.astream("I'm Pickle Rick"):
|
|
|
|
assert isinstance(token.content, str)
|
|
|
|
|
|
|
|
|
|
|
|
async def test_abatch() -> None:
|
|
|
|
"""Test streaming tokens from ChatAnthropicMessages."""
|
2024-03-04 15:03:51 +00:00
|
|
|
llm = ChatAnthropicMessages(model_name=MODEL_NAME)
|
2023-12-20 02:55:19 +00:00
|
|
|
|
|
|
|
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
|
|
|
|
for token in result:
|
|
|
|
assert isinstance(token.content, str)
|
|
|
|
|
|
|
|
|
|
|
|
async def test_abatch_tags() -> None:
|
|
|
|
"""Test batch tokens from ChatAnthropicMessages."""
|
2024-03-04 15:03:51 +00:00
|
|
|
llm = ChatAnthropicMessages(model_name=MODEL_NAME)
|
2023-12-20 02:55:19 +00:00
|
|
|
|
|
|
|
result = await llm.abatch(
|
|
|
|
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
|
|
|
|
)
|
|
|
|
for token in result:
|
|
|
|
assert isinstance(token.content, str)
|
|
|
|
|
|
|
|
|
|
|
|
def test_batch() -> None:
|
|
|
|
"""Test batch tokens from ChatAnthropicMessages."""
|
2024-03-04 15:03:51 +00:00
|
|
|
llm = ChatAnthropicMessages(model_name=MODEL_NAME)
|
2023-12-20 02:55:19 +00:00
|
|
|
|
|
|
|
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
|
|
|
|
for token in result:
|
|
|
|
assert isinstance(token.content, str)
|
|
|
|
|
|
|
|
|
|
|
|
async def test_ainvoke() -> None:
|
|
|
|
"""Test invoke tokens from ChatAnthropicMessages."""
|
2024-03-04 15:03:51 +00:00
|
|
|
llm = ChatAnthropicMessages(model_name=MODEL_NAME)
|
2023-12-20 02:55:19 +00:00
|
|
|
|
|
|
|
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
|
|
|
|
assert isinstance(result.content, str)
|
|
|
|
|
|
|
|
|
|
|
|
def test_invoke() -> None:
|
|
|
|
"""Test invoke tokens from ChatAnthropicMessages."""
|
2024-03-04 15:03:51 +00:00
|
|
|
llm = ChatAnthropicMessages(model_name=MODEL_NAME)
|
2023-12-20 02:55:19 +00:00
|
|
|
|
|
|
|
result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
|
|
|
|
assert isinstance(result.content, str)
|
|
|
|
|
|
|
|
|
|
|
|
def test_system_invoke() -> None:
|
|
|
|
"""Test invoke tokens with a system message"""
|
2024-03-04 15:03:51 +00:00
|
|
|
llm = ChatAnthropicMessages(model_name=MODEL_NAME)
|
2023-12-20 02:55:19 +00:00
|
|
|
|
|
|
|
prompt = ChatPromptTemplate.from_messages(
|
|
|
|
[
|
|
|
|
(
|
|
|
|
"system",
|
|
|
|
"You are an expert cartographer. If asked, you are a cartographer. "
|
|
|
|
"STAY IN CHARACTER",
|
|
|
|
),
|
|
|
|
("human", "Are you a mathematician?"),
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
|
|
|
chain = prompt | llm
|
|
|
|
|
|
|
|
result = chain.invoke({})
|
|
|
|
assert isinstance(result.content, str)
|
2024-02-26 05:57:26 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_anthropic_call() -> None:
|
|
|
|
"""Test valid call to anthropic."""
|
|
|
|
chat = ChatAnthropic(model="test")
|
|
|
|
message = HumanMessage(content="Hello")
|
2024-04-24 23:39:23 +00:00
|
|
|
response = chat.invoke([message])
|
2024-02-26 05:57:26 +00:00
|
|
|
assert isinstance(response, AIMessage)
|
|
|
|
assert isinstance(response.content, str)
|
|
|
|
|
|
|
|
|
|
|
|
def test_anthropic_generate() -> None:
|
|
|
|
"""Test generate method of anthropic."""
|
|
|
|
chat = ChatAnthropic(model="test")
|
|
|
|
chat_messages: List[List[BaseMessage]] = [
|
|
|
|
[HumanMessage(content="How many toes do dogs have?")]
|
|
|
|
]
|
|
|
|
messages_copy = [messages.copy() for messages in chat_messages]
|
|
|
|
result: LLMResult = chat.generate(chat_messages)
|
|
|
|
assert isinstance(result, LLMResult)
|
|
|
|
for response in result.generations[0]:
|
|
|
|
assert isinstance(response, ChatGeneration)
|
|
|
|
assert isinstance(response.text, str)
|
|
|
|
assert response.text == response.message.content
|
|
|
|
assert chat_messages == messages_copy
|
|
|
|
|
|
|
|
|
|
|
|
def test_anthropic_streaming() -> None:
|
|
|
|
"""Test streaming tokens from anthropic."""
|
|
|
|
chat = ChatAnthropic(model="test")
|
|
|
|
message = HumanMessage(content="Hello")
|
|
|
|
response = chat.stream([message])
|
|
|
|
for token in response:
|
|
|
|
assert isinstance(token, AIMessageChunk)
|
|
|
|
assert isinstance(token.content, str)
|
|
|
|
|
|
|
|
|
|
|
|
def test_anthropic_streaming_callback() -> None:
|
|
|
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
|
|
|
callback_handler = FakeCallbackHandler()
|
|
|
|
callback_manager = CallbackManager([callback_handler])
|
|
|
|
chat = ChatAnthropic(
|
|
|
|
model="test",
|
|
|
|
callback_manager=callback_manager,
|
|
|
|
verbose=True,
|
|
|
|
)
|
|
|
|
message = HumanMessage(content="Write me a sentence with 10 words.")
|
|
|
|
for token in chat.stream([message]):
|
|
|
|
assert isinstance(token, AIMessageChunk)
|
|
|
|
assert isinstance(token.content, str)
|
|
|
|
assert callback_handler.llm_streams > 1
|
|
|
|
|
|
|
|
|
|
|
|
async def test_anthropic_async_streaming_callback() -> None:
|
|
|
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
|
|
|
callback_handler = FakeCallbackHandler()
|
|
|
|
callback_manager = CallbackManager([callback_handler])
|
|
|
|
chat = ChatAnthropic(
|
|
|
|
model="test",
|
|
|
|
callback_manager=callback_manager,
|
|
|
|
verbose=True,
|
|
|
|
)
|
|
|
|
chat_messages: List[BaseMessage] = [
|
|
|
|
HumanMessage(content="How many toes do dogs have?")
|
|
|
|
]
|
|
|
|
async for token in chat.astream(chat_messages):
|
|
|
|
assert isinstance(token, AIMessageChunk)
|
|
|
|
assert isinstance(token.content, str)
|
|
|
|
assert callback_handler.llm_streams > 1
|
2024-03-05 01:50:13 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_anthropic_multimodal() -> None:
|
|
|
|
"""Test that multimodal inputs are handled correctly."""
|
|
|
|
chat = ChatAnthropic(model=MODEL_NAME)
|
|
|
|
messages = [
|
|
|
|
HumanMessage(
|
|
|
|
content=[
|
|
|
|
{
|
|
|
|
"type": "image_url",
|
|
|
|
"image_url": {
|
|
|
|
# langchain logo
|
|
|
|
"url": "data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAMCAggHCQgGCQgICAcICAgICAgICAYICAgHDAgHCAgICAgIBggICAgICAgICBYICAgICwkKCAgNDQoIDggICQgBAwQEBgUGCgYGCBALCg0QCg0NEA0KCg8LDQoKCgoLDgoQDQoLDQoKCg4NDQ0NDgsQDw0OCg4NDQ4NDQoJDg8OCP/AABEIALAAsAMBEQACEQEDEQH/xAAdAAEAAgEFAQAAAAAAAAAAAAAABwgJAQIEBQYD/8QANBAAAgIBAwIDBwQCAgIDAAAAAQIAAwQFERIIEwYhMQcUFyJVldQjQVGBcZEJMzJiFRYk/8QAGwEBAAMAAwEAAAAAAAAAAAAAAAQFBgEDBwL/xAA5EQACAQIDBQQJBAIBBQAAAAAAAQIDEQQhMQVBUWGREhRxgRMVIjJSU8HR8CNyobFCguEGJGKi4v/aAAwDAQACEQMRAD8ApfJplBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBANl16qOTEKB6kkAD+z5Tkcj0On+z7Ub1FlOmanejeavj6dqV6kfsQ1OK4IP8AIM6pVYR1kuqJdLCV6qvCnJ/6v66nL+Ems/RNc+y63+BOvvFL411O/wBW4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6HE1D2e6lQpsu0zU6EXzZ8jTtSoUD9yWuxUAA/kmdkasJaSXVHRVwlekrzpyX+r+mh56m9WHJSGU+hUgg/wBjynaRORvnAEAQBAEAQBAEAQCbennpVzfER95LHE0tX4tlsnJr2B2srw6yQLCpBQ3Me1W+4/VZLKlh4jFRo5ay4cPH7f0XWA2XUxft37MONs34ffRcy/Xsu6bdG0UK2Nh1tkAbHMyAt+Wx2HIi11/SDcQe3jrTXv6IJRVcRUqe88uC0Nxhdn0MMv0458XnJ+e7wVlyJPJkYsTSAIAgCAIAgCAIBqDAIx9qHTbo2tBmycOtcgjYZmOBRlqdjxJtQDuhdye3ette/qhkmliKlP3XlwehXYrZ9DEr9SOfFZS6rXwd1yKCdQ3Srm+HT7yGOXpbPxXLVOLUMTtXXmVgkVliQgvU9qx9h+kz11Ne4fFRrZaS4cfD7f2YfH7LqYT279qHHevH76PlvhKTClEAQBAEAQBAJp6WOn0+I80i7mumYnF8x1LIbSSe3iV2DYq13ElnQ8q6gdijWUuIeKxHoY5e89PuXWy8D3qp7S9iOvN/D9+XiZRNN06uiuvHqrSqmpFrqqrVUrrrUBUREUBVVVAAUAAATNNtu7PR4xUUoxVkskloktxyCZwfRj26jetHPtzrMXSM4Uabj7Vrfj10O2ZdsDbb3bqrCKEYmpeyED8Hs53LZVwvsPg4qN6kbt+OS8t5hdobYqOo44edorK6SzfmtFpz14H16f8Arkz6cmrD1e9crBvsFZy3ropvxC2yo7NTXXXbjhtuXcTmisz91hX2yr4KLjemrNbuPXeMDtuoqihiGnF/5ZJx55ZNceF76GQSUJuhAEAQBAEAhb239WWl+H391s7mXnbAnExu2WqUjdWyLHda6Qw2IXdrCCGFZX5pMo4WdXNZLiyoxm1KOFfZl7UuCtdeN2kvzcRB4d/5JMV7OOVpWRRSWAFmPk1ZTKN9uT1PRi+QHnsj2H12DHYGXLZzS9mV3zVvuVFL/qGDlapSaXFST6qyfS/3tb4M8a4up49WoYlyZGLcCUsTf1B2ZGVgHrsRgVNbqrIwIYAjaVc4Sg+zJWZqaVWFWCnB3T0/PodnqOnV312Y9taW02o1dtViq9dlbAq6OjAqyspIKkEEGfKbTuj7lFSTjJXTyaejXAxd9U/T6fDmYBTzbTMvm+G7FnNRBHcxLLDuWankCrueVlRG5dq7nOlwuI9NHP3lr9zzjamA7rU9n3Jacn8P25eBC0mFKIAgCAIBtdwASfQDc/4nIbsZXulr2ZDR9HwsYpxybqxmZe4Xl71cquyMR69hO3jg+fy0r5n1OWxNX0lRvdovBflz1DZuG7vh4xtZtXl+55vpp5EsyKWZ5X2seH783TdRwsZgmVk4OVRQzMUUXPRYle7gEoCxA5gEqDvsdp2U5KM03omv7I+Ig6lKUIuzaaXmigPtb6HNQ0bEytTGXjZeLiKlhWuu6rINPMLbY1bFqkXHQ908b7CyK+wUqFe+pY2FSSjZpvnl+MwmJ2JVw9OVTtqUYq+Sadt+WaVtd9+W+uLLv5HzB8j/AIlgZ8yRdGfUXXq2JXpGTZtquFUE+cnfMxU2Wu9CzEvaicEsG+/MdzYLbsmexmHdOXaS9l/w+H2PQ9kY9V6apyftxVtdUtJc3x58iykrjQCAIAgFdurzqbPh+lMHFKHVspC6FuLLh427Icp0O4d2ZWREb5WZLGbktJrssMJhvSu8vdX8vh9zP7X2i8LBRp27b46Rj8Vt73JebyVnCfSz0jNqh/8AsGsrZZRcxuoxrms7ua7HmcvLYkOaXJ5Ctjvkb8n/AE+K3TcVi+x+nS6rdyX33eJTbL2S636+JTaeaTveTf8AlLlwjv35ZFmfHnSnoWo47Yo0/FxLOBWnJw8ejHuobb5GVqkUOqnY9qwOjDyI9CKyGKqwd+03ybdjS19mYarHs+jSe5pJNdP6KudBPiTIwNYz/D1jA1WJk91AWKLqGJctDWVg+QFlfdQtsGcVY+//AFgSzx0VKmqi5dJK/wCeZm9iVJ0sRPDye6WWdu1BpXWeV78M8uGd/wCURuCJuqX2YjWNHzMYJyyaKzmYm3Hl71SrOqKW8h307mOT5fLc3mPUSsNV9HUT3aPwf5crNpYbvGHlG2azj+5Zrrp5mKFHBAI9CNx/iak8vTubpwBAEAQDtPCekLk5WHiON0yczFx3H8pbkVVMP7VyJ8zfZi3wTfRHdRh26kI8ZRXk5IzREf6mPPXTSAIB1/iPQa8yjIwrVD05NFuPYrAFWrsrat1YHyIKsRsf2nMXZpo+ZR7UXF77rqYW2xHrJqsHG2smu1T6rapKWKf8OCP6mxvfNHj1nH2XqsnfW6yOVpGr241teVRY9ORS4sqtrPF67B6Mp/2NiCGBIIYMQeGlJWaujsp1JU5KcHZrQyZdK/U3X4ipONdwq1fGQNkVL5JkVbhfe8cE/wDgWKq1e5NFjKD8ttLPm8ThnSd17r0+35qej7N2hHFQs8prVfVcv6J4kIuBAKtdWnV8uj89I090fVeP/wCi8hXq05CvIcg26PmMpDCpgVqUrZaCGqrussLhPSe3P3f7/wCOf4s9tTaXd16On77/APXn48EU58OYl+RremrrRyHbJzdPbI9+LvZZjW21vUlgs5FMe4OqmshVrrscca9jtcSaVKXotydrcVr58zH04znioLFXd3G/a17L08E3u5vJEveGeobX/Cuq2YmttbbjX3NflUu7ZC1VW2OTlaZZuzDHrIbbGXZOFbV9qmwfLElh6Venelqsl4rc+fP6FtT2hicHiHDEu8W7u+ii8lKObtHL3fH/AC1tn1AdReJ4exVvJW/MyEJwcVWG9x2G1zkb8MVNwTbt83kqhmYCVVDDyqytot7/ADeanG46GFh2nm37q4/8c/qVr/4/fZ9k5Obm+J7+Xa430V2soVcrNuuW3LtT+RQUNZKjj3L2QHlRYqWOPqJRVJcvJJWRnth4epKp
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{"type": "text", "text": "What is this a logo for?"},
|
|
|
|
]
|
|
|
|
)
|
|
|
|
]
|
|
|
|
response = chat.invoke(messages)
|
|
|
|
assert isinstance(response, AIMessage)
|
|
|
|
assert isinstance(response.content, str)
|
2024-03-08 21:32:57 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_streaming() -> None:
|
|
|
|
"""Test streaming tokens from Anthropic."""
|
|
|
|
callback_handler = FakeCallbackHandler()
|
|
|
|
callback_manager = CallbackManager([callback_handler])
|
|
|
|
|
|
|
|
llm = ChatAnthropicMessages(
|
|
|
|
model_name=MODEL_NAME, streaming=True, callback_manager=callback_manager
|
|
|
|
)
|
|
|
|
|
|
|
|
response = llm.generate([[HumanMessage(content="I'm Pickle Rick")]])
|
|
|
|
assert callback_handler.llm_streams > 0
|
|
|
|
assert isinstance(response, LLMResult)
|
|
|
|
|
|
|
|
|
|
|
|
async def test_astreaming() -> None:
|
|
|
|
"""Test streaming tokens from Anthropic."""
|
|
|
|
callback_handler = FakeCallbackHandler()
|
|
|
|
callback_manager = CallbackManager([callback_handler])
|
|
|
|
|
|
|
|
llm = ChatAnthropicMessages(
|
|
|
|
model_name=MODEL_NAME, streaming=True, callback_manager=callback_manager
|
|
|
|
)
|
|
|
|
|
|
|
|
response = await llm.agenerate([[HumanMessage(content="I'm Pickle Rick")]])
|
|
|
|
assert callback_handler.llm_streams > 0
|
|
|
|
assert isinstance(response, LLMResult)
|
2024-04-04 20:22:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_tool_use() -> None:
|
|
|
|
llm = ChatAnthropic(
|
|
|
|
model="claude-3-opus-20240229",
|
|
|
|
)
|
|
|
|
|
|
|
|
llm_with_tools = llm.bind_tools(
|
|
|
|
[
|
|
|
|
{
|
|
|
|
"name": "get_weather",
|
|
|
|
"description": "Get weather report for a city",
|
|
|
|
"input_schema": {
|
|
|
|
"type": "object",
|
|
|
|
"properties": {"location": {"type": "string"}},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
]
|
|
|
|
)
|
|
|
|
response = llm_with_tools.invoke("what's the weather in san francisco, ca")
|
|
|
|
assert isinstance(response, AIMessage)
|
|
|
|
assert isinstance(response.content, list)
|
core[minor], ...: add tool calls message (#18947)
core[minor], langchain[patch], openai[minor], anthropic[minor], fireworks[minor], groq[minor], mistralai[minor]
```python
class ToolCall(TypedDict):
name: str
args: Dict[str, Any]
id: Optional[str]
class InvalidToolCall(TypedDict):
name: Optional[str]
args: Optional[str]
id: Optional[str]
error: Optional[str]
class ToolCallChunk(TypedDict):
name: Optional[str]
args: Optional[str]
id: Optional[str]
index: Optional[int]
class AIMessage(BaseMessage):
...
tool_calls: List[ToolCall] = []
invalid_tool_calls: List[InvalidToolCall] = []
...
class AIMessageChunk(AIMessage, BaseMessageChunk):
...
tool_call_chunks: Optional[List[ToolCallChunk]] = None
...
```
Important considerations:
- Parsing logic occurs within different providers;
- ~Changing output type is a breaking change for anyone doing explicit
type checking;~
- ~Langsmith rendering will need to be updated:
https://github.com/langchain-ai/langchainplus/pull/3561~
- ~Langserve will need to be updated~
- Adding chunks:
- ~AIMessage + ToolCallsMessage = ToolCallsMessage if either has
non-null .tool_calls.~
- Tool call chunks are appended, merging when having equal values of
`index`.
- additional_kwargs accumulate the normal way.
- During streaming:
- ~Messages can change types (e.g., from AIMessageChunk to
AIToolCallsMessageChunk)~
- Output parsers parse additional_kwargs (during .invoke they read off
tool calls).
Packages outside of `partners/`:
- https://github.com/langchain-ai/langchain-cohere/pull/7
- https://github.com/langchain-ai/langchain-google/pull/123/files
---------
Co-authored-by: Chester Curme <chester.curme@gmail.com>
2024-04-09 23:41:42 +00:00
|
|
|
assert isinstance(response.tool_calls, list)
|
|
|
|
assert len(response.tool_calls) == 1
|
|
|
|
tool_call = response.tool_calls[0]
|
|
|
|
assert tool_call["name"] == "get_weather"
|
|
|
|
assert isinstance(tool_call["args"], dict)
|
|
|
|
assert "location" in tool_call["args"]
|
|
|
|
|
|
|
|
# Test streaming
|
|
|
|
first = True
|
|
|
|
for chunk in llm_with_tools.stream("what's the weather in san francisco, ca"):
|
|
|
|
if first:
|
|
|
|
gathered = chunk
|
|
|
|
first = False
|
|
|
|
else:
|
|
|
|
gathered = gathered + chunk # type: ignore
|
|
|
|
assert isinstance(gathered, AIMessageChunk)
|
|
|
|
assert isinstance(gathered.tool_call_chunks, list)
|
|
|
|
assert len(gathered.tool_call_chunks) == 1
|
|
|
|
tool_call_chunk = gathered.tool_call_chunks[0]
|
|
|
|
assert tool_call_chunk["name"] == "get_weather"
|
|
|
|
assert isinstance(tool_call_chunk["args"], str)
|
|
|
|
assert "location" in json.loads(tool_call_chunk["args"])
|
2024-04-04 20:22:48 +00:00
|
|
|
|
|
|
|
|
2024-04-17 19:37:04 +00:00
|
|
|
def test_anthropic_with_empty_text_block() -> None:
|
|
|
|
"""Anthropic SDK can return an empty text block."""
|
|
|
|
|
|
|
|
@tool
|
|
|
|
def type_letter(letter: str) -> str:
|
|
|
|
"""Type the given letter."""
|
|
|
|
return "OK"
|
|
|
|
|
|
|
|
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0).bind_tools(
|
|
|
|
[type_letter]
|
|
|
|
)
|
|
|
|
|
|
|
|
messages = [
|
|
|
|
SystemMessage(
|
|
|
|
content="Repeat the given string using the provided tools. Do not write "
|
|
|
|
"anything else or provide any explanations. For example, "
|
|
|
|
"if the string is 'abc', you must print the "
|
|
|
|
"letters 'a', 'b', and 'c' one at a time and in that order. "
|
|
|
|
),
|
|
|
|
HumanMessage(content="dog"),
|
|
|
|
AIMessage(
|
|
|
|
content=[
|
|
|
|
{"text": "", "type": "text"},
|
|
|
|
{
|
|
|
|
"id": "toolu_01V6d6W32QGGSmQm4BT98EKk",
|
|
|
|
"input": {"letter": "d"},
|
|
|
|
"name": "type_letter",
|
|
|
|
"type": "tool_use",
|
|
|
|
},
|
|
|
|
],
|
|
|
|
tool_calls=[
|
|
|
|
{
|
|
|
|
"name": "type_letter",
|
|
|
|
"args": {"letter": "d"},
|
|
|
|
"id": "toolu_01V6d6W32QGGSmQm4BT98EKk",
|
|
|
|
},
|
|
|
|
],
|
|
|
|
),
|
|
|
|
ToolMessage(content="OK", tool_call_id="toolu_01V6d6W32QGGSmQm4BT98EKk"),
|
|
|
|
]
|
|
|
|
|
|
|
|
model.invoke(messages)
|
|
|
|
|
|
|
|
|
2024-04-04 20:22:48 +00:00
|
|
|
def test_with_structured_output() -> None:
|
|
|
|
llm = ChatAnthropic(
|
|
|
|
model="claude-3-opus-20240229",
|
|
|
|
)
|
|
|
|
|
|
|
|
structured_llm = llm.with_structured_output(
|
|
|
|
{
|
|
|
|
"name": "get_weather",
|
|
|
|
"description": "Get weather report for a city",
|
|
|
|
"input_schema": {
|
|
|
|
"type": "object",
|
|
|
|
"properties": {"location": {"type": "string"}},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
)
|
|
|
|
response = structured_llm.invoke("what's the weather in san francisco, ca")
|
|
|
|
assert isinstance(response, dict)
|
|
|
|
assert response["location"]
|