2024-07-02 20:33:22 +00:00
|
|
|
from typing import Any, List, Optional, Type, Union, cast
|
2024-04-04 20:22:48 +00:00
|
|
|
|
2024-07-02 20:33:22 +00:00
|
|
|
from langchain_core.messages import AIMessage, ToolCall
|
core[minor], integrations...[patch]: Support ToolCall as Tool input and ToolMessage as Tool output (#24038)
Changes:
- ToolCall, InvalidToolCall and ToolCallChunk can all accept a "type"
parameter now
- LLM integration packages add "type" to all the above
- Tool supports ToolCall inputs that have "type" specified
- Tool outputs ToolMessage when a ToolCall is passed as input
- Tools can separately specify ToolMessage.content and
ToolMessage.raw_output
- Tools emit events for validation errors (using on_tool_error and
on_tool_end)
Example:
```python
@tool("structured_api", response_format="content_and_raw_output")
def _mock_structured_tool_with_raw_output(
arg1: int, arg2: bool, arg3: Optional[dict] = None
) -> Tuple[str, dict]:
"""A Structured Tool"""
return f"{arg1} {arg2}", {"arg1": arg1, "arg2": arg2, "arg3": arg3}
def test_tool_call_input_tool_message_with_raw_output() -> None:
tool_call: Dict = {
"name": "structured_api",
"args": {"arg1": 1, "arg2": True, "arg3": {"img": "base64string..."}},
"id": "123",
"type": "tool_call",
}
expected = ToolMessage("1 True", raw_output=tool_call["args"], tool_call_id="123")
tool = _mock_structured_tool_with_raw_output
actual = tool.invoke(tool_call)
assert actual == expected
tool_call.pop("type")
with pytest.raises(ValidationError):
tool.invoke(tool_call)
actual_content = tool.invoke(tool_call["args"])
assert actual_content == expected.content
```
---------
Co-authored-by: Erick Friis <erick@langchain.dev>
2024-07-11 21:54:02 +00:00
|
|
|
from langchain_core.messages.tool import tool_call
|
2024-04-04 20:22:48 +00:00
|
|
|
from langchain_core.output_parsers import BaseGenerationOutputParser
|
|
|
|
from langchain_core.outputs import ChatGeneration, Generation
|
|
|
|
from langchain_core.pydantic_v1 import BaseModel
|
|
|
|
|
|
|
|
|
|
|
|
class ToolsOutputParser(BaseGenerationOutputParser):
|
2024-06-19 02:26:45 +00:00
|
|
|
"""Output parser for tool calls."""
|
|
|
|
|
2024-04-04 20:22:48 +00:00
|
|
|
first_tool_only: bool = False
|
2024-06-19 02:26:45 +00:00
|
|
|
"""Whether to return only the first tool call."""
|
2024-04-04 20:22:48 +00:00
|
|
|
args_only: bool = False
|
2024-06-19 02:26:45 +00:00
|
|
|
"""Whether to return only the arguments of the tool calls."""
|
2024-04-04 20:22:48 +00:00
|
|
|
pydantic_schemas: Optional[List[Type[BaseModel]]] = None
|
2024-06-19 02:26:45 +00:00
|
|
|
"""Pydantic schemas to parse tool calls into."""
|
2024-04-04 20:22:48 +00:00
|
|
|
|
|
|
|
class Config:
|
|
|
|
extra = "forbid"
|
|
|
|
|
|
|
|
def parse_result(self, result: List[Generation], *, partial: bool = False) -> Any:
|
|
|
|
"""Parse a list of candidate model Generations into a specific format.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
result: A list of Generations to be parsed. The Generations are assumed
|
|
|
|
to be different candidate outputs for a single model input.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Structured output.
|
|
|
|
"""
|
|
|
|
if not result or not isinstance(result[0], ChatGeneration):
|
|
|
|
return None if self.first_tool_only else []
|
2024-07-02 20:33:22 +00:00
|
|
|
message = cast(AIMessage, result[0].message)
|
|
|
|
tool_calls: List = [
|
|
|
|
dict(tc) for tc in _extract_tool_calls_from_message(message)
|
|
|
|
]
|
|
|
|
if isinstance(message.content, list):
|
core[minor], ...: add tool calls message (#18947)
core[minor], langchain[patch], openai[minor], anthropic[minor], fireworks[minor], groq[minor], mistralai[minor]
```python
class ToolCall(TypedDict):
name: str
args: Dict[str, Any]
id: Optional[str]
class InvalidToolCall(TypedDict):
name: Optional[str]
args: Optional[str]
id: Optional[str]
error: Optional[str]
class ToolCallChunk(TypedDict):
name: Optional[str]
args: Optional[str]
id: Optional[str]
index: Optional[int]
class AIMessage(BaseMessage):
...
tool_calls: List[ToolCall] = []
invalid_tool_calls: List[InvalidToolCall] = []
...
class AIMessageChunk(AIMessage, BaseMessageChunk):
...
tool_call_chunks: Optional[List[ToolCallChunk]] = None
...
```
Important considerations:
- Parsing logic occurs within different providers;
- ~Changing output type is a breaking change for anyone doing explicit
type checking;~
- ~Langsmith rendering will need to be updated:
https://github.com/langchain-ai/langchainplus/pull/3561~
- ~Langserve will need to be updated~
- Adding chunks:
- ~AIMessage + ToolCallsMessage = ToolCallsMessage if either has
non-null .tool_calls.~
- Tool call chunks are appended, merging when having equal values of
`index`.
- additional_kwargs accumulate the normal way.
- During streaming:
- ~Messages can change types (e.g., from AIMessageChunk to
AIToolCallsMessageChunk)~
- Output parsers parse additional_kwargs (during .invoke they read off
tool calls).
Packages outside of `partners/`:
- https://github.com/langchain-ai/langchain-cohere/pull/7
- https://github.com/langchain-ai/langchain-google/pull/123/files
---------
Co-authored-by: Chester Curme <chester.curme@gmail.com>
2024-04-09 23:41:42 +00:00
|
|
|
# Map tool call id to index
|
|
|
|
id_to_index = {
|
|
|
|
block["id"]: i
|
2024-07-02 20:33:22 +00:00
|
|
|
for i, block in enumerate(message.content)
|
|
|
|
if isinstance(block, dict) and block["type"] == "tool_use"
|
core[minor], ...: add tool calls message (#18947)
core[minor], langchain[patch], openai[minor], anthropic[minor], fireworks[minor], groq[minor], mistralai[minor]
```python
class ToolCall(TypedDict):
name: str
args: Dict[str, Any]
id: Optional[str]
class InvalidToolCall(TypedDict):
name: Optional[str]
args: Optional[str]
id: Optional[str]
error: Optional[str]
class ToolCallChunk(TypedDict):
name: Optional[str]
args: Optional[str]
id: Optional[str]
index: Optional[int]
class AIMessage(BaseMessage):
...
tool_calls: List[ToolCall] = []
invalid_tool_calls: List[InvalidToolCall] = []
...
class AIMessageChunk(AIMessage, BaseMessageChunk):
...
tool_call_chunks: Optional[List[ToolCallChunk]] = None
...
```
Important considerations:
- Parsing logic occurs within different providers;
- ~Changing output type is a breaking change for anyone doing explicit
type checking;~
- ~Langsmith rendering will need to be updated:
https://github.com/langchain-ai/langchainplus/pull/3561~
- ~Langserve will need to be updated~
- Adding chunks:
- ~AIMessage + ToolCallsMessage = ToolCallsMessage if either has
non-null .tool_calls.~
- Tool call chunks are appended, merging when having equal values of
`index`.
- additional_kwargs accumulate the normal way.
- During streaming:
- ~Messages can change types (e.g., from AIMessageChunk to
AIToolCallsMessageChunk)~
- Output parsers parse additional_kwargs (during .invoke they read off
tool calls).
Packages outside of `partners/`:
- https://github.com/langchain-ai/langchain-cohere/pull/7
- https://github.com/langchain-ai/langchain-google/pull/123/files
---------
Co-authored-by: Chester Curme <chester.curme@gmail.com>
2024-04-09 23:41:42 +00:00
|
|
|
}
|
2024-07-02 20:33:22 +00:00
|
|
|
tool_calls = [{**tc, "index": id_to_index[tc["id"]]} for tc in tool_calls]
|
2024-04-04 20:22:48 +00:00
|
|
|
if self.pydantic_schemas:
|
|
|
|
tool_calls = [self._pydantic_parse(tc) for tc in tool_calls]
|
|
|
|
elif self.args_only:
|
|
|
|
tool_calls = [tc["args"] for tc in tool_calls]
|
|
|
|
else:
|
|
|
|
pass
|
|
|
|
|
|
|
|
if self.first_tool_only:
|
|
|
|
return tool_calls[0] if tool_calls else None
|
|
|
|
else:
|
core[minor], ...: add tool calls message (#18947)
core[minor], langchain[patch], openai[minor], anthropic[minor], fireworks[minor], groq[minor], mistralai[minor]
```python
class ToolCall(TypedDict):
name: str
args: Dict[str, Any]
id: Optional[str]
class InvalidToolCall(TypedDict):
name: Optional[str]
args: Optional[str]
id: Optional[str]
error: Optional[str]
class ToolCallChunk(TypedDict):
name: Optional[str]
args: Optional[str]
id: Optional[str]
index: Optional[int]
class AIMessage(BaseMessage):
...
tool_calls: List[ToolCall] = []
invalid_tool_calls: List[InvalidToolCall] = []
...
class AIMessageChunk(AIMessage, BaseMessageChunk):
...
tool_call_chunks: Optional[List[ToolCallChunk]] = None
...
```
Important considerations:
- Parsing logic occurs within different providers;
- ~Changing output type is a breaking change for anyone doing explicit
type checking;~
- ~Langsmith rendering will need to be updated:
https://github.com/langchain-ai/langchainplus/pull/3561~
- ~Langserve will need to be updated~
- Adding chunks:
- ~AIMessage + ToolCallsMessage = ToolCallsMessage if either has
non-null .tool_calls.~
- Tool call chunks are appended, merging when having equal values of
`index`.
- additional_kwargs accumulate the normal way.
- During streaming:
- ~Messages can change types (e.g., from AIMessageChunk to
AIToolCallsMessageChunk)~
- Output parsers parse additional_kwargs (during .invoke they read off
tool calls).
Packages outside of `partners/`:
- https://github.com/langchain-ai/langchain-cohere/pull/7
- https://github.com/langchain-ai/langchain-google/pull/123/files
---------
Co-authored-by: Chester Curme <chester.curme@gmail.com>
2024-04-09 23:41:42 +00:00
|
|
|
return [tool_call for tool_call in tool_calls]
|
2024-04-04 20:22:48 +00:00
|
|
|
|
core[minor], ...: add tool calls message (#18947)
core[minor], langchain[patch], openai[minor], anthropic[minor], fireworks[minor], groq[minor], mistralai[minor]
```python
class ToolCall(TypedDict):
name: str
args: Dict[str, Any]
id: Optional[str]
class InvalidToolCall(TypedDict):
name: Optional[str]
args: Optional[str]
id: Optional[str]
error: Optional[str]
class ToolCallChunk(TypedDict):
name: Optional[str]
args: Optional[str]
id: Optional[str]
index: Optional[int]
class AIMessage(BaseMessage):
...
tool_calls: List[ToolCall] = []
invalid_tool_calls: List[InvalidToolCall] = []
...
class AIMessageChunk(AIMessage, BaseMessageChunk):
...
tool_call_chunks: Optional[List[ToolCallChunk]] = None
...
```
Important considerations:
- Parsing logic occurs within different providers;
- ~Changing output type is a breaking change for anyone doing explicit
type checking;~
- ~Langsmith rendering will need to be updated:
https://github.com/langchain-ai/langchainplus/pull/3561~
- ~Langserve will need to be updated~
- Adding chunks:
- ~AIMessage + ToolCallsMessage = ToolCallsMessage if either has
non-null .tool_calls.~
- Tool call chunks are appended, merging when having equal values of
`index`.
- additional_kwargs accumulate the normal way.
- During streaming:
- ~Messages can change types (e.g., from AIMessageChunk to
AIToolCallsMessageChunk)~
- Output parsers parse additional_kwargs (during .invoke they read off
tool calls).
Packages outside of `partners/`:
- https://github.com/langchain-ai/langchain-cohere/pull/7
- https://github.com/langchain-ai/langchain-google/pull/123/files
---------
Co-authored-by: Chester Curme <chester.curme@gmail.com>
2024-04-09 23:41:42 +00:00
|
|
|
def _pydantic_parse(self, tool_call: dict) -> BaseModel:
|
2024-04-04 20:22:48 +00:00
|
|
|
cls_ = {schema.__name__: schema for schema in self.pydantic_schemas or []}[
|
|
|
|
tool_call["name"]
|
|
|
|
]
|
|
|
|
return cls_(**tool_call["args"])
|
|
|
|
|
|
|
|
|
2024-07-02 20:33:22 +00:00
|
|
|
def _extract_tool_calls_from_message(message: AIMessage) -> List[ToolCall]:
|
|
|
|
"""Extract tool calls from a list of content blocks."""
|
|
|
|
if message.tool_calls:
|
|
|
|
return message.tool_calls
|
|
|
|
return extract_tool_calls(message.content)
|
|
|
|
|
|
|
|
|
|
|
|
def extract_tool_calls(content: Union[str, List[Union[str, dict]]]) -> List[ToolCall]:
|
2024-06-19 02:26:45 +00:00
|
|
|
"""Extract tool calls from a list of content blocks."""
|
2024-07-02 20:33:22 +00:00
|
|
|
if isinstance(content, list):
|
|
|
|
tool_calls = []
|
|
|
|
for block in content:
|
|
|
|
if isinstance(block, str):
|
|
|
|
continue
|
|
|
|
if block["type"] != "tool_use":
|
|
|
|
continue
|
|
|
|
tool_calls.append(
|
core[minor], integrations...[patch]: Support ToolCall as Tool input and ToolMessage as Tool output (#24038)
Changes:
- ToolCall, InvalidToolCall and ToolCallChunk can all accept a "type"
parameter now
- LLM integration packages add "type" to all the above
- Tool supports ToolCall inputs that have "type" specified
- Tool outputs ToolMessage when a ToolCall is passed as input
- Tools can separately specify ToolMessage.content and
ToolMessage.raw_output
- Tools emit events for validation errors (using on_tool_error and
on_tool_end)
Example:
```python
@tool("structured_api", response_format="content_and_raw_output")
def _mock_structured_tool_with_raw_output(
arg1: int, arg2: bool, arg3: Optional[dict] = None
) -> Tuple[str, dict]:
"""A Structured Tool"""
return f"{arg1} {arg2}", {"arg1": arg1, "arg2": arg2, "arg3": arg3}
def test_tool_call_input_tool_message_with_raw_output() -> None:
tool_call: Dict = {
"name": "structured_api",
"args": {"arg1": 1, "arg2": True, "arg3": {"img": "base64string..."}},
"id": "123",
"type": "tool_call",
}
expected = ToolMessage("1 True", raw_output=tool_call["args"], tool_call_id="123")
tool = _mock_structured_tool_with_raw_output
actual = tool.invoke(tool_call)
assert actual == expected
tool_call.pop("type")
with pytest.raises(ValidationError):
tool.invoke(tool_call)
actual_content = tool.invoke(tool_call["args"])
assert actual_content == expected.content
```
---------
Co-authored-by: Erick Friis <erick@langchain.dev>
2024-07-11 21:54:02 +00:00
|
|
|
tool_call(name=block["name"], args=block["input"], id=block["id"])
|
2024-07-02 20:33:22 +00:00
|
|
|
)
|
|
|
|
return tool_calls
|
|
|
|
else:
|
|
|
|
return []
|