core[patch], community[patch], openai[patch]: consolidate openai tool… (#16485)

… converters

One way to convert anything to an OAI function:
convert_to_openai_function
One way to convert anything to an OAI tool: convert_to_openai_tool
Corresponding bind functions on OAI models: bind_functions, bind_tools
pull/16588/head
Bagatur 5 months ago committed by GitHub
parent 148347e858
commit ef42d9d559
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

File diff suppressed because one or more lines are too long

@ -129,7 +129,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 1,
"id": "a0fbfbba-3c82-4298-a312-9cec016d9d2e",
"metadata": {},
"outputs": [],
@ -138,8 +138,7 @@
"from langchain.agents import AgentExecutor\n",
"from langchain.agents.format_scratchpad import format_to_openai_function_messages\n",
"from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n",
"from langchain.tools import DuckDuckGoSearchResults\n",
"from langchain_community.tools.convert_to_openai import format_tool_to_openai_function\n",
"from langchain_community.tools import DuckDuckGoSearchResults\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"# Fetches the latest version of this prompt\n",
@ -156,7 +155,7 @@
" ), # General internet search using DuckDuckGo\n",
"]\n",
"\n",
"llm_with_tools = llm.bind(functions=[format_tool_to_openai_function(t) for t in tools])\n",
"llm_with_tools = llm.bind_functions(tools)\n",
"\n",
"runnable_agent = (\n",
" {\n",
@ -334,7 +333,6 @@
"from langchain.agents import AgentExecutor, AgentType, initialize_agent, load_tools\n",
"from langchain.agents.format_scratchpad import format_to_openai_function_messages\n",
"from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n",
"from langchain_community.tools.convert_to_openai import format_tool_to_openai_function\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"\n",
@ -1345,9 +1343,9 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "poetry-venv",
"language": "python",
"name": "python3"
"name": "poetry-venv"
},
"language_info": {
"codemirror_mode": {
@ -1359,7 +1357,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.2"
"version": "3.9.1"
}
},
"nbformat": 4,

@ -43,7 +43,7 @@
"metadata": {},
"outputs": [],
"source": [
"# pip install chromadb"
"%pip install -qU chromadb langchain langchain-community langchain-openai"
]
},
{
@ -61,7 +61,7 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 2,
"id": "e3002ed7",
"metadata": {},
"outputs": [],
@ -96,14 +96,12 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 3,
"id": "204ef7ca",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents.agent_toolkits.conversational_retrieval.tool import (\n",
" create_retriever_tool,\n",
")\n",
"from langchain.tools.retriever import create_retriever_tool\n",
"\n",
"retriever_tool = create_retriever_tool(\n",
" retriever,\n",
@ -124,15 +122,14 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 4,
"id": "2df91723",
"metadata": {},
"outputs": [],
"source": [
"from typing import List\n",
"\n",
"from langchain.utils.openai_functions import convert_pydantic_to_openai_function\n",
"from pydantic import BaseModel, Field\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"class Response(BaseModel):\n",
@ -169,7 +166,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 5,
"id": "dfb73fe3",
"metadata": {},
"outputs": [],
@ -181,7 +178,7 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 6,
"id": "5b46cdb2",
"metadata": {},
"outputs": [],
@ -224,14 +221,13 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"id": "73c785f9",
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor\n",
"from langchain.agents.format_scratchpad import format_to_openai_function_messages\n",
"from langchain_community.tools.convert_to_openai import format_tool_to_openai_function\n",
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"from langchain_openai import ChatOpenAI"
]
@ -269,14 +265,7 @@
"metadata": {},
"outputs": [],
"source": [
"llm_with_tools = llm.bind(\n",
" functions=[\n",
" # The retriever tool\n",
" format_tool_to_openai_function(retriever_tool),\n",
" # Response schema\n",
" convert_pydantic_to_openai_function(Response),\n",
" ]\n",
")"
"llm_with_tools = llm.bind_functions([retriever_tool, Response])"
]
},
{
@ -302,7 +291,7 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 12,
"id": "2cfd783e",
"metadata": {},
"outputs": [],
@ -322,7 +311,7 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 20,
"id": "2667c9a4",
"metadata": {},
"outputs": [
@ -333,7 +322,55 @@
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m\u001b[0m\u001b[36;1m\u001b[1;3m[Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while youre at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, Id like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nations top legal minds, who will continue Justice Breyers legacy of excellence.', metadata={'page_chunk': 31, 'source': '../../state_of_the_union.txt'}), Document(page_content='One was stationed at bases and breathing in toxic smoke from “burn pits” that incinerated wastes of war—medical and hazard material, jet fuel, and more. \\n\\nWhen they came home, many of the worlds fittest and best trained warriors were never the same. \\n\\nHeadaches. Numbness. Dizziness. \\n\\nA cancer that would put them in a flag-draped coffin. \\n\\nI know. \\n\\nOne of those soldiers was my son Major Beau Biden. \\n\\nWe dont know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. \\n\\nBut Im committed to finding out everything we can. \\n\\nCommitted to military families like Danielle Robinson from Ohio. \\n\\nThe widow of Sergeant First Class Heath Robinson. \\n\\nHe was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq. \\n\\nStationed near Baghdad, just yards from burn pits the size of football fields. \\n\\nHeaths widow Danielle is here with us tonight. They loved going to Ohio State football games. He loved building Legos with their daughter.', metadata={'page_chunk': 37, 'source': '../../state_of_the_union.txt'}), Document(page_content='A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since shes been nominated, shes received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \\n\\nAnd if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \\n\\nWe can do both. At our border, weve installed new technology like cutting-edge scanners to better detect drug smuggling. \\n\\nWeve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \\n\\nWere putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \\n\\nWere securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.', metadata={'page_chunk': 32, 'source': '../../state_of_the_union.txt'}), Document(page_content='But cancer from prolonged exposure to burn pits ravaged Heaths lungs and body. \\n\\nDanielle says Heath was a fighter to the very end. \\n\\nHe didnt know how to stop fighting, and neither did she. \\n\\nThrough her pain she found purpose to demand we do better. \\n\\nTonight, Danielle—we are. \\n\\nThe VA is pioneering new ways of linking toxic exposures to diseases, already helping more veterans get benefits. \\n\\nAnd tonight, Im announcing were expanding eligibility to veterans suffering from nine respiratory cancers. \\n\\nIm also calling on Congress: pass a law to make sure veterans devastated by toxic exposures in Iraq and Afghanistan finally get the benefits and comprehensive health care they deserve. \\n\\nAnd fourth, lets end cancer as we know it. \\n\\nThis is personal to me and Jill, to Kamala, and to so many of you. \\n\\nCancer is the #2 cause of death in Americasecond only to heart disease.', metadata={'page_chunk': 38, 'source': '../../state_of_the_union.txt'})]\u001b[0m\u001b[32;1m\u001b[1;3m{'name': 'Response', 'arguments': '{\\n \"answer\": \"President mentioned Ketanji Brown Jackson as a nominee for the United States Supreme Court and praised her as one of the nation\\'s top legal minds.\",\\n \"sources\": [31]\\n}'}\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m\u001b[0m\u001b[36;1m\u001b[1;3mTonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while youre at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
"\n",
"Tonight, Id like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
"\n",
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
"\n",
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nations top legal minds, who will continue Justice Breyers legacy of excellence.\n",
"\n",
"And for our LGBTQ+ Americans, lets finally get the bipartisan Equality Act to my desk. The onslaught of state laws targeting transgender Americans and their families is wrong. \n",
"\n",
"As I said last year, especially to our younger transgender Americans, I will always have your back as your President, so you can be yourself and reach your God-given potential. \n",
"\n",
"While it often appears that we never agree, that isnt true. I signed 80 bipartisan bills into law last year. From preventing government shutdowns to protecting Asian-Americans from still-too-common hate crimes to reforming military justice. \n",
"\n",
"And soon, well strengthen the Violence Against Women Act that I first wrote three decades ago. It is important for us to show the nation that we can come together and do big things. \n",
"\n",
"So tonight Im offering a Unity Agenda for the Nation. Four big things we can do together. \n",
"\n",
"First, beat the opioid epidemic.\n",
"\n",
"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n",
"\n",
"Last year COVID-19 kept us apart. This year we are finally together again. \n",
"\n",
"Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n",
"\n",
"With a duty to one another to the American people to the Constitution. \n",
"\n",
"And with an unwavering resolve that freedom will always triumph over tyranny. \n",
"\n",
"Six days ago, Russias Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n",
"\n",
"He thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n",
"\n",
"He met the Ukrainian people. \n",
"\n",
"From President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world.\n",
"\n",
"A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since shes been nominated, shes received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n",
"\n",
"And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \n",
"\n",
"We can do both. At our border, weve installed new technology like cutting-edge scanners to better detect drug smuggling. \n",
"\n",
"Weve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \n",
"\n",
"Were putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \n",
"\n",
"Were securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.\u001b[0m\u001b[32;1m\u001b[1;3m{'arguments': '{\\n\"answer\": \"President Biden nominated Ketanji Brown Jackson for the United States Supreme Court and described her as one of our nation\\'s top legal minds who will continue Justice Breyer\\'s legacy of excellence.\",\\n\"sources\": [6]\\n}', 'name': 'Response'}\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
@ -341,18 +378,18 @@
{
"data": {
"text/plain": [
"{'answer': \"President mentioned Ketanji Brown Jackson as a nominee for the United States Supreme Court and praised her as one of the nation's top legal minds.\",\n",
" 'sources': [31]}"
"{'answer': \"President Biden nominated Ketanji Brown Jackson for the United States Supreme Court and described her as one of our nation's top legal minds who will continue Justice Breyer's legacy of excellence.\",\n",
" 'sources': [6]}"
]
},
"execution_count": 18,
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"agent_executor.invoke(\n",
" {\"input\": \"what did the president say about kentaji brown jackson\"},\n",
" {\"input\": \"what did the president say about ketanji brown jackson\"},\n",
" return_only_outputs=True,\n",
")"
]
@ -368,9 +405,9 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "poetry-venv",
"language": "python",
"name": "python3"
"name": "poetry-venv"
},
"language_info": {
"codemirror_mode": {
@ -382,7 +419,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.9.1"
}
},
"nbformat": 4,

@ -152,9 +152,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.tools.convert_to_openai import format_tool_to_openai_tool\n",
"\n",
"llm_with_tools = llm.bind(tools=[format_tool_to_openai_tool(tool) for tool in tools])"
"llm_with_tools = llm.bind_tools(tools)"
]
},
{
@ -229,9 +227,9 @@
{
"data": {
"text/plain": [
"[{'actions': [OpenAIToolAgentAction(tool='get_word_length', tool_input={'word': 'eudca'}, log=\"\\nInvoking: `get_word_length` with `{'word': 'eudca'}`\\n\\n\\n\", message_log=[AIMessageChunk(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_U9SR78eT398r9UbzID2N9LXh', 'function': {'arguments': '{\\n \"word\": \"eudca\"\\n}', 'name': 'get_word_length'}, 'type': 'function'}]})], tool_call_id='call_U9SR78eT398r9UbzID2N9LXh')],\n",
" 'messages': [AIMessageChunk(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_U9SR78eT398r9UbzID2N9LXh', 'function': {'arguments': '{\\n \"word\": \"eudca\"\\n}', 'name': 'get_word_length'}, 'type': 'function'}]})]},\n",
" {'steps': [AgentStep(action=OpenAIToolAgentAction(tool='get_word_length', tool_input={'word': 'eudca'}, log=\"\\nInvoking: `get_word_length` with `{'word': 'eudca'}`\\n\\n\\n\", message_log=[AIMessageChunk(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_U9SR78eT398r9UbzID2N9LXh', 'function': {'arguments': '{\\n \"word\": \"eudca\"\\n}', 'name': 'get_word_length'}, 'type': 'function'}]})], tool_call_id='call_U9SR78eT398r9UbzID2N9LXh'), observation=5)],\n",
"[{'actions': [OpenAIToolAgentAction(tool='get_word_length', tool_input={'word': 'eudca'}, log=\"\\nInvoking: `get_word_length` with `{'word': 'eudca'}`\\n\\n\\n\", message_log=[AIMessageChunk(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_A07D5TuyqcNIL0DIEVRPpZkg', 'function': {'arguments': '{\\n \"word\": \"eudca\"\\n}', 'name': 'get_word_length'}, 'type': 'function'}]})], tool_call_id='call_A07D5TuyqcNIL0DIEVRPpZkg')],\n",
" 'messages': [AIMessageChunk(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_A07D5TuyqcNIL0DIEVRPpZkg', 'function': {'arguments': '{\\n \"word\": \"eudca\"\\n}', 'name': 'get_word_length'}, 'type': 'function'}]})]},\n",
" {'steps': [AgentStep(action=OpenAIToolAgentAction(tool='get_word_length', tool_input={'word': 'eudca'}, log=\"\\nInvoking: `get_word_length` with `{'word': 'eudca'}`\\n\\n\\n\", message_log=[AIMessageChunk(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_A07D5TuyqcNIL0DIEVRPpZkg', 'function': {'arguments': '{\\n \"word\": \"eudca\"\\n}', 'name': 'get_word_length'}, 'type': 'function'}]})], tool_call_id='call_A07D5TuyqcNIL0DIEVRPpZkg'), observation=5)],\n",
" 'messages': [FunctionMessage(content='5', name='get_word_length')]},\n",
" {'output': 'There are 5 letters in the word \"eudca\".',\n",
" 'messages': [AIMessage(content='There are 5 letters in the word \"eudca\".')]}]"
@ -449,7 +447,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
"version": "3.9.1"
},
"vscode": {
"interpreter": {

@ -12,71 +12,101 @@
},
{
"cell_type": "code",
"execution_count": 1,
"id": "d65d8a60",
"execution_count": null,
"id": "bb220019-4012-4da4-bfee-01fb8189aa49",
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema import HumanMessage\n",
"from langchain_openai import ChatOpenAI"
"%pip install -qU langchain-community langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "abd8dc72",
"execution_count": 19,
"id": "d65d8a60",
"metadata": {},
"outputs": [],
"source": [
"model = ChatOpenAI(model=\"gpt-3.5-turbo-0613\")"
"from langchain_community.tools import MoveFileTool\n",
"from langchain_core.messages import HumanMessage\n",
"from langchain_core.utils.function_calling import convert_to_openai_function\n",
"from langchain_openai import ChatOpenAI"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "dce2cdb7",
"execution_count": 20,
"id": "abd8dc72",
"metadata": {},
"outputs": [],
"source": [
"from langchain.tools import MoveFileTool, format_tool_to_openai_function"
"model = ChatOpenAI(model=\"gpt-3.5-turbo\")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 21,
"id": "3b3dc766",
"metadata": {},
"outputs": [],
"source": [
"tools = [MoveFileTool()]\n",
"functions = [format_tool_to_openai_function(t) for t in tools]"
"functions = [convert_to_openai_function(t) for t in tools]"
]
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 12,
"id": "d38c4a22-2e9e-4d15-a9e1-bf8103c6303b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'name': 'move_file',\n",
" 'description': 'Move or rename a file from one location to another',\n",
" 'parameters': {'type': 'object',\n",
" 'properties': {'source_path': {'description': 'Path of the file to move',\n",
" 'type': 'string'},\n",
" 'destination_path': {'description': 'New path for the moved file',\n",
" 'type': 'string'}},\n",
" 'required': ['source_path', 'destination_path']}}"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"functions[0]"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "230a7939",
"metadata": {},
"outputs": [],
"source": [
"message = model.predict_messages(\n",
"message = model.invoke(\n",
" [HumanMessage(content=\"move file foo to bar\")], functions=functions\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 16,
"id": "c118c940",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'function_call': {'name': 'move_file', 'arguments': '{\\n \"source_path\": \"foo\",\\n \"destination_path\": \"bar\"\\n}'}}, example=False)"
"AIMessage(content='', additional_kwargs={'function_call': {'arguments': '{\\n \"source_path\": \"foo\",\\n \"destination_path\": \"bar\"\\n}', 'name': 'move_file'}})"
]
},
"execution_count": 6,
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
@ -107,13 +137,65 @@
"message.additional_kwargs[\"function_call\"]"
]
},
{
"cell_type": "markdown",
"id": "77dd0d9f-2f24-4535-a658-a061f91e009a",
"metadata": {},
"source": [
"With OpenAI chat models we can also automatically bind and convert function-like objects with `bind_functions`"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "751da79f",
"execution_count": 17,
"id": "24bb1518-8100-4ac3-acea-04acfac963d1",
"metadata": {},
"outputs": [],
"source": []
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'function_call': {'arguments': '{\\n \"source_path\": \"foo\",\\n \"destination_path\": \"bar\"\\n}', 'name': 'move_file'}})"
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model_with_functions = model.bind_functions(tools)\n",
"model_with_functions.invoke([HumanMessage(content=\"move file foo to bar\")])"
]
},
{
"cell_type": "markdown",
"id": "000ec6ff-ca67-4206-ba56-cc2a91b85ce6",
"metadata": {},
"source": [
"Or we can use the update OpenAI API that uses `tools` and `tool_choice` instead of `functions` and `function_call` by using `ChatOpenAI.bind_tools`:"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "1a333e4e-df55-4e15-9d2e-4fd142d969f3",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_btkY3xV71cEVAOHnNa5qwo44', 'function': {'arguments': '{\\n \"source_path\": \"foo\",\\n \"destination_path\": \"bar\"\\n}', 'name': 'move_file'}, 'type': 'function'}]})"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model_with_tools = model.bind_tools(tools)\n",
"model_with_tools.invoke([HumanMessage(content=\"move file foo to bar\")])"
]
}
],
"metadata": {

@ -0,0 +1,492 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "dae8d4ed-9150-45da-b494-7717ab0a2960",
"metadata": {},
"source": [
"# Function calling\n",
"\n",
"Certain chat models, like [OpenAI's](https://platform.openai.com/docs/guides/function-calling), have a function-calling API that lets you describe functions and their arguments, and have the model return a JSON object with a function to invoke and the inputs to that function. Function-calling is extremely useful for building [tool-using chains and agents](/docs/use_cases/tool_use/), and for getting structured outputs from models more generally.\n",
"\n",
"LangChain comes with a number of utilities to make function-calling easy. Namely, it comes with\n",
"\n",
"* simple syntax for binding functions to models\n",
"* converters for formatting various types of objects to the expected function schemas\n",
"* output parsers for extracting the function invocations from API responses\n",
"\n",
"We'll focus here on the first two bullets. To see how output parsing works as well check out the [OpenAI Tools output parsers](/docs/modules/model_io/output_parsers/types/openai_tools)."
]
},
{
"cell_type": "markdown",
"id": "a177c64b-7c99-495c-b362-5ed3b40aa26a",
"metadata": {},
"source": [
"## Defining functions\n",
"\n",
"We'll focus on the [OpenAI function format](https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools) here since as of this writing that is the main model provider that supports function calling. LangChain has a built-in converter that can turn Python functions, Pydantic classes, and LangChain Tools into the OpenAI function format:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "f6d1dc0c-6170-4977-809f-365099f628ea",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m23.3.2\u001b[0m\n",
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n",
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"%pip install -qU langchain-core langchain-openai"
]
},
{
"cell_type": "markdown",
"id": "6bd290bd-7621-466b-a73e-fc8480f879ec",
"metadata": {},
"source": [
"### Python function"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "41ebab5c-0e9f-4b49-86ee-9290ced2fe96",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"type\": \"function\",\n",
" \"function\": {\n",
" \"name\": \"multiply\",\n",
" \"description\": \"Multiply two integers together.\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"a\": {\n",
" \"type\": \"integer\",\n",
" \"description\": \"First integer\"\n",
" },\n",
" \"b\": {\n",
" \"type\": \"integer\",\n",
" \"description\": \"Second integer\"\n",
" }\n",
" },\n",
" \"required\": [\n",
" \"a\",\n",
" \"b\"\n",
" ]\n",
" }\n",
" }\n",
"}\n"
]
}
],
"source": [
"import json\n",
"\n",
"from langchain_core.utils.function_calling import convert_to_openai_tool\n",
"\n",
"\n",
"def multiply(a: int, b: int) -> int:\n",
" \"\"\"Multiply two integers together.\n",
"\n",
" Args:\n",
" a: First integer\n",
" b: Second integer\n",
" \"\"\"\n",
" return a * b\n",
"\n",
"\n",
"print(json.dumps(convert_to_openai_tool(multiply), indent=2))"
]
},
{
"cell_type": "markdown",
"id": "ecf22577-38ab-48f1-ba0b-371aaba1bacc",
"metadata": {},
"source": [
"### Pydantic class"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "ecc8ffd4-aed3-4f47-892d-1896cc1ca4dc",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"type\": \"function\",\n",
" \"function\": {\n",
" \"name\": \"multiply\",\n",
" \"description\": \"Multiply two integers together.\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"a\": {\n",
" \"description\": \"First integer\",\n",
" \"type\": \"integer\"\n",
" },\n",
" \"b\": {\n",
" \"description\": \"Second integer\",\n",
" \"type\": \"integer\"\n",
" }\n",
" },\n",
" \"required\": [\n",
" \"a\",\n",
" \"b\"\n",
" ]\n",
" }\n",
" }\n",
"}\n"
]
}
],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"class multiply(BaseModel):\n",
" \"\"\"Multiply two integers together.\"\"\"\n",
"\n",
" a: int = Field(..., description=\"First integer\")\n",
" b: int = Field(..., description=\"Second integer\")\n",
"\n",
"\n",
"print(json.dumps(convert_to_openai_tool(multiply), indent=2))"
]
},
{
"cell_type": "markdown",
"id": "b83d5a88-50ed-4ae4-85cf-8b895617496f",
"metadata": {},
"source": [
"### LangChain Tool"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "696c7dd6-660c-4797-909f-bf878b3acf93",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"type\": \"function\",\n",
" \"function\": {\n",
" \"name\": \"multiply\",\n",
" \"description\": \"Multiply two integers together.\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"a\": {\n",
" \"description\": \"First integer\",\n",
" \"type\": \"integer\"\n",
" },\n",
" \"b\": {\n",
" \"description\": \"Second integer\",\n",
" \"type\": \"integer\"\n",
" }\n",
" },\n",
" \"required\": [\n",
" \"a\",\n",
" \"b\"\n",
" ]\n",
" }\n",
" }\n",
"}\n"
]
}
],
"source": [
"from typing import Any, Type\n",
"\n",
"from langchain_core.tools import BaseTool\n",
"\n",
"\n",
"class MultiplySchema(BaseModel):\n",
" \"\"\"Multiply tool schema.\"\"\"\n",
"\n",
" a: int = Field(..., description=\"First integer\")\n",
" b: int = Field(..., description=\"Second integer\")\n",
"\n",
"\n",
"class Multiply(BaseTool):\n",
" args_schema: Type[BaseModel] = MultiplySchema\n",
" name: str = \"multiply\"\n",
" description: str = \"Multiply two integers together.\"\n",
"\n",
" def _run(self, a: int, b: int, **kwargs: Any) -> Any:\n",
" return a * b\n",
"\n",
"\n",
"# Note: we're passing in a Multiply object not the class itself.\n",
"print(json.dumps(convert_to_openai_tool(Multiply()), indent=2))"
]
},
{
"cell_type": "markdown",
"id": "04bda177-202f-4811-bb74-f3fa7094a15b",
"metadata": {},
"source": [
"## Binding functions\n",
"\n",
"Now that we've defined a function, we'll want to pass it in to our model."
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "a5aa93a7-6859-43e8-be85-619d975b908c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_JvOu9oUwMrQHiDekZTbpNCHY', 'function': {'arguments': '{\\n \"a\": 5,\\n \"b\": 3\\n}', 'name': 'multiply'}, 'type': 'function'}]})"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
"llm.invoke(\"what's 5 times three\", tools=[convert_to_openai_tool(multiply)])"
]
},
{
"cell_type": "markdown",
"id": "dd0e7365-32d0-46a3-b8f2-caf27d5d9262",
"metadata": {},
"source": [
"And if we wanted this function to be passed in every time we call the tool, we could bind it to the tool:"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "87165d64-31a7-4332-965e-18fa939fda50",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_cwRoTnD1ux1SnWXLrTj2KlWH', 'function': {'arguments': '{\\n \"a\": 5,\\n \"b\": 3\\n}', 'name': 'multiply'}, 'type': 'function'}]})"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm_with_tool = llm.bind(tools=[convert_to_openai_tool(multiply)])\n",
"llm_with_tool.invoke(\"what's 5 times three\")"
]
},
{
"cell_type": "markdown",
"id": "21b4d000-3828-4e32-9226-55119f47ee67",
"metadata": {},
"source": [
"We can also enforce that a tool is called using the [tool_choice](https://platform.openai.com/docs/api-reference/chat/create#chat-create-tools) parameter."
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "2daa354c-cc85-4a60-a9b2-b681ec22ca33",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_sWjLyioSZAtYMQRLMTzncz1v', 'function': {'arguments': '{\\n \"a\": 5,\\n \"b\": 4\\n}', 'name': 'multiply'}, 'type': 'function'}]})"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm_with_tool = llm.bind(\n",
" tools=[convert_to_openai_tool(multiply)],\n",
" tool_choice={\"type\": \"function\", \"function\": {\"name\": \"multiply\"}},\n",
")\n",
"llm_with_tool.invoke(\n",
" \"don't answer my question. no do answer my question. no don't. what's five times four\"\n",
")"
]
},
{
"cell_type": "markdown",
"id": "ce013d11-49ea-4de9-8bbc-bc9ae203002c",
"metadata": {},
"source": [
"The [ChatOpenAI](https://api.python.langchain.com/en/latest/chat_models/langchain_openai.chat_models.base.ChatOpenAI.html#langchain_openai.chat_models.base.ChatOpenAI) class even comes with a `bind_tools` helper function that handles converting function-like objects to the OpenAI format and binding them for you:"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "842c9914-ac28-428f-9fcc-556177e8e715",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_LCdBa4cbhMJPRdtkhDzpRh7x', 'function': {'arguments': '{\\n \"a\": 5,\\n \"b\": 3\\n}', 'name': 'multiply'}, 'type': 'function'}]})"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm_with_tool = llm.bind_tools([multiply], tool_choice=\"multiply\")\n",
"llm_with_tool.invoke(\"what's 5 times three\")"
]
},
{
"cell_type": "markdown",
"id": "7d6e22d8-9f33-4845-9364-0d276df35ff5",
"metadata": {},
"source": [
"## Legacy args `functions` and `function_call`\n",
"\n",
"Until Fall of 2023 the OpenAI API expected arguments `functions` and `funtion_call` instead of `tools` and `tool_choice`, and they had a slightly different format than `tools` and `tool_choice`. LangChain maintains utilities for using the old API if you need to use that as well:"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "a317f71e-177e-404b-b09c-8fb365a4d8a2",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'name': 'multiply',\n",
" 'description': 'Multiply two integers together.',\n",
" 'parameters': {'type': 'object',\n",
" 'properties': {'a': {'description': 'First integer', 'type': 'integer'},\n",
" 'b': {'description': 'Second integer', 'type': 'integer'}},\n",
" 'required': ['a', 'b']}}"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.utils.function_calling import convert_to_openai_function\n",
"\n",
"convert_to_openai_function(multiply)"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "dd124259-75e2-4704-9f57-824d3e463bfa",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'function_call': {'arguments': '{\\n \"a\": 3,\\n \"b\": 1000000\\n}', 'name': 'multiply'}})"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm_with_functions = llm.bind(\n",
" functions=[convert_to_openai_function(multiply)], function_call={\"name\": \"multiply\"}\n",
")\n",
"llm_with_functions.invoke(\"what's 3 times a million\")"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "d9a90af9-1c81-4ace-b155-1589f7308a1c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'function_call': {'arguments': '{\\n \"a\": 3,\\n \"b\": 1000000\\n}', 'name': 'multiply'}})"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm_with_functions = llm.bind_functions([multiply], function_call=\"multiply\")\n",
"llm_with_functions.invoke(\"what's 3 times a million\")"
]
},
{
"cell_type": "markdown",
"id": "7779808d-d75c-4d76-890d-ba8c6c571514",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"* **Output parsing**: See [OpenAI Tools output parsers](/docs/modules/model_io/output_parsers/types/openai_tools) and [OpenAI Functions output parsers](/docs/modules/model_io/output_parsers/types/openai_functions) to learn about extracting the function calling API responses into various formats.\n",
"* **Tool use**: See how to construct chains and agents that actually call the invoked tools in [these guides](/docs/use_cases/tool_use/)."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "poetry-venv",
"language": "python",
"name": "poetry-venv"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -24,5 +24,6 @@ We have several how-to guides for more advanced usage of LLMs.
This includes:
- [How to cache ChatModel responses](./chat_model_caching)
- [How to use ChatModels that support function calling](./function_calling)
- [How to stream responses from a ChatModel](./streaming)
- [How to track token usage in a ChatModel call](./token_usage_tracking)

@ -32,7 +32,9 @@ LangChain has lots of different types of output parsers. This is a list of outpu
| Name | Supports Streaming | Has Format Instructions | Calls LLM | Input Type | Output Type | Description |
|-----------------|--------------------|-------------------------------|-----------|----------------------------------|----------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| [OpenAIFunctions](./types/openai_functions) | ✅ | (Passes `functions` to model) | | `Message` (with `function_call`) | JSON object | Uses OpenAI function calling to structure the return output. If you are using a model that supports function calling, this is generally the most reliable method. |
| [OpenAITools](./types/openai_tools) | | (Passes `tools` to model) | | `Message` (with `tool_choice`) | JSON object | Uses latest OpenAI function calling args `tools` and `tool_choice` to structure the return output. If you are using a model that supports function calling, this is generally the most reliable method. |
| [OpenAIFunctions](./types/openai_functions) | ✅ | (Passes `functions` to model) | | `Message` (with `function_call`) | JSON object | Uses legacy OpenAI function calling args `functions` and `function_call` to structure the return output. |
| [JSON](./types/json) | ✅ | ✅ | | `str \| Message` | JSON object | Returns a JSON object as specified. You can specify a Pydantic model and it will return JSON for that model. Probably the most reliable output parser for getting structured data that does NOT use function calling. |
| [XML](./types/xml) | ✅ | ✅ | | `str \| Message` | `dict` | Returns a dictionary of tags. Use when XML output is needed. Use with models that are good at writing XML (like Anthropic's). |
| [CSV](./types/csv) | ✅ | ✅ | | `str \| Message` | `List[str]` | Returns a list of comma separated values. |

@ -0,0 +1,385 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "bcbe5c87",
"metadata": {},
"source": [
"# OpenAI Tools\n",
"\n",
"These output parsers extract tool calls from OpenAI's function calling API responses. This means they are only usable with models that support function calling, and specifically the latest `tools` and `tool_choice` parameters. We recommend familiarizing yourself with [function calling](/docs/modules/model_io/chat/function_calling) before reading this gu\n",
"\n",
"There are a few different variants of output parsers:\n",
"\n",
"- [JsonOutputToolsParser](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.openai_tools.JsonOutputToolsParser.html#langchain.output_parsers.openai_tools.JsonOutputToolsParser): Returns the arguments of the function call as JSON\n",
"- [JsonOutputKeyToolsParser](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.openai_tools.JsonOutputKeyToolsParser.html#langchain.output_parsers.openai_tools.JsonOutputKeyToolsParser): Returns the value of specific key in the function call as JSON\n",
"- [PydanticToolsParser](https://api.python.langchain.com/en/latest/output_parsers/langchain.output_parsers.openai_tools.PydanticToolsParser.html#langchain.output_parsers.openai_tools.PydanticToolsParser): Returns the arguments of the function call as a Pydantic Model"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "aac4262b",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.pydantic_v1 import BaseModel, Field, validator\n",
"from langchain_openai import ChatOpenAI"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "52cb351d",
"metadata": {},
"outputs": [],
"source": [
"class Joke(BaseModel):\n",
" \"\"\"Joke to tell user.\"\"\"\n",
"\n",
" setup: str = Field(description=\"question to set up a joke\")\n",
" punchline: str = Field(description=\"answer to resolve the joke\")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "2c3259c4",
"metadata": {},
"outputs": [],
"source": [
"model = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0).bind_tools([Joke])"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "75c33a76-ead8-43aa-ba18-c1822c38cfa9",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'type': 'function',\n",
" 'function': {'name': 'Joke',\n",
" 'description': 'Joke to tell user.',\n",
" 'parameters': {'type': 'object',\n",
" 'properties': {'setup': {'description': 'question to set up a joke',\n",
" 'type': 'string'},\n",
" 'punchline': {'description': 'answer to resolve the joke',\n",
" 'type': 'string'}},\n",
" 'required': ['setup', 'punchline']}}}]"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model.kwargs[\"tools\"]"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "d3e9007c",
"metadata": {},
"outputs": [],
"source": [
"prompt = ChatPromptTemplate.from_messages(\n",
" [(\"system\", \"You are helpful assistant\"), (\"user\", \"{input}\")]\n",
")"
]
},
{
"cell_type": "markdown",
"id": "87680951",
"metadata": {},
"source": [
"## JsonOutputToolsParser"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "cb065bdd",
"metadata": {},
"outputs": [],
"source": [
"from langchain.output_parsers.openai_tools import JsonOutputToolsParser"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "6ff758c8",
"metadata": {},
"outputs": [],
"source": [
"parser = JsonOutputToolsParser()"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "27a3acd1",
"metadata": {},
"outputs": [],
"source": [
"chain = prompt | model | parser"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "59b59179",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'type': 'Joke',\n",
" 'args': {'setup': \"Why don't scientists trust atoms?\",\n",
" 'punchline': 'Because they make up everything!'}}]"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"input\": \"tell me a joke\"})"
]
},
{
"cell_type": "markdown",
"id": "0f093b2b-ffd1-47b7-9221-b4265ae52701",
"metadata": {},
"source": [
"To include the tool call id we can specify `return_id=True`:"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "d43fd620-dcdc-4ad0-a3a9-e7d2d71d6e68",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'type': 'Joke',\n",
" 'args': {'setup': \"Why don't scientists trust atoms?\",\n",
" 'punchline': 'Because they make up everything!'},\n",
" 'id': 'call_Isuoh0RTeQzzOKGg5QlQ7UqI'}]"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"parser = JsonOutputToolsParser(return_id=True)\n",
"chain = prompt | model | parser\n",
"chain.invoke({\"input\": \"tell me a joke\"})"
]
},
{
"cell_type": "markdown",
"id": "7ca55ac9",
"metadata": {},
"source": [
"## JsonOutputKeyToolsParser\n",
"\n",
"This merely extracts a single key from the returned response. This is useful for when you are passing in a single tool and just want it's arguments."
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "f8bc404e",
"metadata": {},
"outputs": [],
"source": [
"from typing import List\n",
"\n",
"from langchain.output_parsers.openai_tools import JsonOutputKeyToolsParser"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "c91c5949",
"metadata": {},
"outputs": [],
"source": [
"parser = JsonOutputKeyToolsParser(key_name=\"Joke\")"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "b4583baf",
"metadata": {},
"outputs": [],
"source": [
"chain = prompt | model | parser"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "e8b766ff",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'setup': \"Why don't scientists trust atoms?\",\n",
" 'punchline': 'Because they make up everything!'}]"
]
},
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"input\": \"tell me a joke\"})"
]
},
{
"cell_type": "markdown",
"id": "fc5695c5-451f-482f-bde6-462d85f1a93e",
"metadata": {},
"source": [
"Certain models can return multiple tool invocations each call, so by default the output is a list. If we just want to return the first tool invocation, we can specify `return_single=True`"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "b1f3097a-5040-435e-9e26-bbdf9506aead",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'setup': \"Why don't scientists trust atoms?\",\n",
" 'punchline': 'Because they make up everything!'}"
]
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"parser = JsonOutputKeyToolsParser(key_name=\"Joke\", return_single=True)\n",
"chain = prompt | model | parser\n",
"chain.invoke({\"input\": \"tell me a joke\"})"
]
},
{
"cell_type": "markdown",
"id": "941a3d4e",
"metadata": {},
"source": [
"## PydanticToolsParser\n",
"\n",
"This builds on top of `JsonOutputToolsParser` but passes the results to a Pydantic Model. This allows for further validation should you choose."
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "f51823fe",
"metadata": {},
"outputs": [],
"source": [
"from langchain.output_parsers.openai_tools import PydanticToolsParser"
]
},
{
"cell_type": "code",
"execution_count": 28,
"id": "3c6a5e4d",
"metadata": {},
"outputs": [],
"source": [
"class Joke(BaseModel):\n",
" \"\"\"Joke to tell user.\"\"\"\n",
"\n",
" setup: str = Field(description=\"question to set up a joke\")\n",
" punchline: str = Field(description=\"answer to resolve the joke\")\n",
"\n",
" # You can add custom validation logic easily with Pydantic.\n",
" @validator(\"setup\")\n",
" def question_ends_with_question_mark(cls, field):\n",
" if field[-1] != \"?\":\n",
" raise ValueError(\"Badly formed question!\")\n",
" return field\n",
"\n",
"\n",
"parser = PydanticToolsParser(tools=[Joke])"
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "d2bbd54f",
"metadata": {},
"outputs": [],
"source": [
"model = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0).bind_tools([Joke])\n",
"chain = prompt | model | parser"
]
},
{
"cell_type": "code",
"execution_count": 31,
"id": "db1a06e8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[Joke(setup=\"Why don't scientists trust atoms?\", punchline='Because they make up everything!')]"
]
},
"execution_count": 31,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({\"input\": \"tell me a joke\"})"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -67,7 +67,7 @@
},
{
"cell_type": "code",
"execution_count": 25,
"execution_count": 2,
"id": "0221fdfd-2a18-4449-a123-e6b0b15bb3d9",
"metadata": {},
"outputs": [
@ -77,7 +77,7 @@
"[{'type': 'count_emails', 'args': {'last_n_days': 5}, 'output': 10}]"
]
},
"execution_count": 25,
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
@ -86,7 +86,6 @@
"from operator import itemgetter\n",
"\n",
"from langchain.output_parsers import JsonOutputToolsParser\n",
"from langchain_community.tools.convert_to_openai import format_tool_to_openai_tool\n",
"from langchain_core.runnables import Runnable, RunnableLambda, RunnablePassthrough\n",
"from langchain_core.tools import tool\n",
"from langchain_openai import ChatOpenAI\n",
@ -105,9 +104,7 @@
"\n",
"\n",
"tools = [count_emails, send_email]\n",
"model = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0).bind(\n",
" tools=[format_tool_to_openai_tool(t) for t in tools]\n",
")\n",
"model = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0).bind_tools(tools)\n",
"\n",
"\n",
"def call_tool(tool_invocation: dict) -> Runnable:\n",

@ -128,7 +128,7 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 5,
"id": "c35359ae-a740-48c5-b5e7-1a377fb25aa2",
"metadata": {},
"outputs": [],
@ -137,9 +137,6 @@
"from typing import Union\n",
"\n",
"from langchain.output_parsers import JsonOutputToolsParser\n",
"from langchain_community.tools.convert_to_openai import (\n",
" format_tool_to_openai_tool,\n",
")\n",
"from langchain_core.runnables import (\n",
" Runnable,\n",
" RunnableLambda,\n",
@ -150,7 +147,7 @@
"\n",
"model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
"tools = [multiply, exponentiate, add]\n",
"model_with_tools = model.bind(tools=[format_tool_to_openai_tool(t) for t in tools])\n",
"model_with_tools = model.bind_tools(tools)\n",
"tool_map = {tool.name: tool for tool in tools}\n",
"\n",
"\n",

@ -65,7 +65,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 1,
"id": "e13ec98c-8521-4d63-b521-caf92da87b70",
"metadata": {},
"outputs": [],
@ -103,7 +103,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 2,
"id": "c35359ae-a740-48c5-b5e7-1a377fb25aa2",
"metadata": {},
"outputs": [],
@ -112,9 +112,6 @@
"from typing import Union\n",
"\n",
"from langchain.output_parsers import JsonOutputToolsParser\n",
"from langchain_community.tools.convert_to_openai import (\n",
" format_tool_to_openai_tool,\n",
")\n",
"from langchain_core.runnables import (\n",
" Runnable,\n",
" RunnableLambda,\n",
@ -125,7 +122,7 @@
"\n",
"model = ChatOpenAI(model=\"gpt-3.5-turbo-1106\")\n",
"tools = [multiply, exponentiate, add]\n",
"model_with_tools = model.bind(tools=[format_tool_to_openai_tool(t) for t in tools])\n",
"model_with_tools = model.bind_tools(tools)\n",
"tool_map = {tool.name: tool for tool in tools}\n",
"\n",
"\n",

@ -146,7 +146,7 @@
"![chain](../../../static/img/tool_chain.svg)\n",
"\n",
"### Function calling\n",
"One of the most reliable ways to use tools with LLMs is with function calling APIs (also sometimes called tool calling or parallel function calling). This only works with models that explicitly support function calling, like OpenAI models.\n",
"One of the most reliable ways to use tools with LLMs is with function calling APIs (also sometimes called tool calling or parallel function calling). This only works with models that explicitly support function calling, like OpenAI models. To learn more head to the [function calling guide](/docs/modules/model_io/chat/function_calling).\n",
"\n",
"First we'll define our model and tools. We'll start with just a single tool, `multiply`."
]
@ -168,13 +168,23 @@
"id": "c22e6f0f-c5ad-4c0f-9514-e626704ea51c",
"metadata": {},
"source": [
"Next we'll convert our LangChain Tool to an OpenAI format JSONSchema function, and bind this as the `tools` argument to be passed to all ChatOpenAI calls. Since we only have a single Tool and in this initial chain we want to make sure it's always used, we'll also specify `tool_choice`. See the [OpenAI chat API reference](https://platform.openai.com/docs/api-reference/chat/create#chat-create-tool_choice) for more on these parameters."
"Next we'll convert our LangChain Tool to an OpenAI format JSONSchema function, and bind this as the `tools` argument to be passed to all ChatOpenAI calls. Since we only have a single Tool and in this initial chain we want to make sure it's always used, we'll also specify `tool_choice`. See the [OpenAI chat API reference](https://platform.openai.com/docs/api-reference/chat/create#chat-create-tool_choice) for more on these parameters:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "2babd759-bccd-4d50-95ad-365a07347926",
"id": "3bfe2cdc-7d72-457c-a9a1-5fa1e0bcde55",
"metadata": {},
"outputs": [],
"source": [
"model_with_tools = model.bind_tools([multiply], tool_choice=\"multiply\")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "19f6285f-d8b1-432c-8c07-f7aee3fc0fa4",
"metadata": {},
"outputs": [
{
@ -183,39 +193,40 @@
"[{'type': 'function',\n",
" 'function': {'name': 'multiply',\n",
" 'description': 'multiply(first_int: int, second_int: int) -> int - Multiply two integers together.',\n",
" 'parameters': {'title': 'multiplySchemaSchema',\n",
" 'type': 'object',\n",
" 'properties': {'first_int': {'title': 'First Int', 'type': 'integer'},\n",
" 'second_int': {'title': 'Second Int', 'type': 'integer'}},\n",
" 'parameters': {'type': 'object',\n",
" 'properties': {'first_int': {'type': 'integer'},\n",
" 'second_int': {'type': 'integer'}},\n",
" 'required': ['first_int', 'second_int']}}}]"
]
},
"execution_count": 5,
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_community.tools.convert_to_openai import (\n",
" format_tool_to_openai_tool,\n",
")\n",
"\n",
"formatted_tools = [format_tool_to_openai_tool(multiply)]\n",
"formatted_tools"
"model_with_tools.kwargs[\"tools\"]"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "3bfe2cdc-7d72-457c-a9a1-5fa1e0bcde55",
"execution_count": 8,
"id": "340c1b04-38cb-4467-83ca-8aa2b59176d8",
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/plain": [
"{'type': 'function', 'function': {'name': 'multiply'}}"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model_with_tools = model.bind(\n",
" tools=formatted_tools,\n",
" # We specify tool_choice to enforce that the 'multiply' function is called by the model.\n",
" tool_choice={\"type\": \"function\", \"function\": {\"name\": \"multiply\"}},\n",
")"
"model_with_tools.kwargs[\"tool_choice\"]"
]
},
{

@ -69,7 +69,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"id": "1d20604e-c4d1-4d21-841b-23e4f61aec36",
"metadata": {},
"outputs": [],
@ -92,13 +92,12 @@
"outputs": [],
"source": [
"# Define model and bind tool\n",
"from langchain_community.tools.convert_to_openai import format_tool_to_openai_tool\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
"model_with_tools = model.bind(\n",
" tools=[format_tool_to_openai_tool(complex_tool)],\n",
" tool_choice={\"type\": \"function\", \"function\": {\"name\": \"complex_tool\"}},\n",
"model_with_tools = model.bind_tools(\n",
" [complex_tool],\n",
" tool_choice=\"complex_tool\",\n",
")"
]
},
@ -259,9 +258,8 @@
" | JsonOutputKeyToolsParser(key_name=\"complex_tool\", return_single=True)\n",
" | complex_tool\n",
")\n",
"better_model = ChatOpenAI(model=\"gpt-4-1106-preview\", temperature=0).bind(\n",
" tools=[format_tool_to_openai_tool(complex_tool)],\n",
" tool_choice={\"type\": \"function\", \"function\": {\"name\": \"complex_tool\"}},\n",
"better_model = ChatOpenAI(model=\"gpt-4-1106-preview\", temperature=0).bind_tools(\n",
" [complex_tool], tool_choice=\"complex_tool\"\n",
")\n",
"better_chain = (\n",
" better_model\n",

@ -1,38 +1,6 @@
from langchain_core.tools import BaseTool
from langchain_community.utils.openai_functions import (
FunctionDescription,
ToolDescription,
convert_pydantic_to_openai_function,
from langchain_core.utils.function_calling import (
format_tool_to_openai_function,
format_tool_to_openai_tool,
)
def format_tool_to_openai_function(tool: BaseTool) -> FunctionDescription:
"""Format tool into the OpenAI function API."""
if tool.args_schema:
return convert_pydantic_to_openai_function(
tool.args_schema, name=tool.name, description=tool.description
)
else:
return {
"name": tool.name,
"description": tool.description,
"parameters": {
# This is a hack to get around the fact that some tools
# do not expose an args_schema, and expect an argument
# which is a string.
# And Open AI does not support an array type for the
# parameters.
"properties": {
"__arg1": {"title": "__arg1", "type": "string"},
},
"required": ["__arg1"],
"type": "object",
},
}
def format_tool_to_openai_tool(tool: BaseTool) -> ToolDescription:
"""Format tool into the OpenAI function API."""
function = format_tool_to_openai_function(tool)
return {"type": "function", "function": function}
__all__ = ["format_tool_to_openai_function", "format_tool_to_openai_tool"]

@ -1,44 +1,6 @@
"""Different methods for rendering Tools to be passed to LLMs.
Depending on the LLM you are using and the prompting strategy you are using,
you may want Tools to be rendered in a different way.
This module contains various ways to render tools.
"""
from langchain_core.tools import BaseTool
from langchain_community.utils.openai_functions import (
FunctionDescription,
ToolDescription,
convert_pydantic_to_openai_function,
from langchain_core.utils.function_calling import (
format_tool_to_openai_function,
format_tool_to_openai_tool,
)
def format_tool_to_openai_function(tool: BaseTool) -> FunctionDescription:
"""Format tool into the OpenAI function API."""
if tool.args_schema:
return convert_pydantic_to_openai_function(
tool.args_schema, name=tool.name, description=tool.description
)
else:
return {
"name": tool.name,
"description": tool.description,
"parameters": {
# This is a hack to get around the fact that some tools
# do not expose an args_schema, and expect an argument
# which is a string.
# And Open AI does not support an array type for the
# parameters.
"properties": {
"__arg1": {"title": "__arg1", "type": "string"},
},
"required": ["__arg1"],
"type": "object",
},
}
def format_tool_to_openai_tool(tool: BaseTool) -> ToolDescription:
"""Format tool into the OpenAI function API."""
function = format_tool_to_openai_function(tool)
return {"type": "function", "function": function}
__all__ = ["format_tool_to_openai_function", "format_tool_to_openai_tool"]

@ -1,7 +1,9 @@
"""Methods for creating function specs in the style of OpenAI Functions"""
from __future__ import annotations
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
@ -16,12 +18,16 @@ from typing import (
from typing_extensions import TypedDict
from langchain_core._api import deprecated
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.utils.json_schema import dereference_refs
if TYPE_CHECKING:
from langchain_core.tools import BaseTool
PYTHON_TO_JSON_TYPES = {
"str": "string",
"int": "number",
"int": "integer",
"float": "number",
"bool": "boolean",
}
@ -45,22 +51,47 @@ class ToolDescription(TypedDict):
function: FunctionDescription
def _rm_titles(kv: dict) -> dict:
new_kv = {}
for k, v in kv.items():
if k == "title":
continue
elif isinstance(v, dict):
new_kv[k] = _rm_titles(v)
else:
new_kv[k] = v
return new_kv
@deprecated(
"0.1.16",
alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
removal="0.2.0",
)
def convert_pydantic_to_openai_function(
model: Type[BaseModel],
*,
name: Optional[str] = None,
description: Optional[str] = None,
rm_titles: bool = True,
) -> FunctionDescription:
"""Converts a Pydantic model to a function description for the OpenAI API."""
schema = dereference_refs(model.schema())
schema.pop("definitions", None)
title = schema.pop("title", "")
default_description = schema.pop("description", "")
return {
"name": name or schema["title"],
"description": description or schema["description"],
"parameters": schema,
"name": name or title,
"description": description or default_description,
"parameters": _rm_titles(schema) if rm_titles else schema,
}
@deprecated(
"0.1.16",
alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
removal="0.2.0",
)
def convert_pydantic_to_openai_tool(
model: Type[BaseModel],
*,
@ -132,8 +163,19 @@ def _get_python_function_arguments(function: Callable, arg_descriptions: dict) -
# Mypy error:
# "type" has no attribute "schema"
properties[arg] = arg_type.schema() # type: ignore[attr-defined]
elif arg_type.__name__ in PYTHON_TO_JSON_TYPES:
elif (
hasattr(arg_type, "__name__")
and getattr(arg_type, "__name__") in PYTHON_TO_JSON_TYPES
):
properties[arg] = {"type": PYTHON_TO_JSON_TYPES[arg_type.__name__]}
elif (
hasattr(arg_type, "__dict__")
and getattr(arg_type, "__dict__").get("__origin__", None) == Literal
):
properties[arg] = {
"enum": list(arg_type.__args__), # type: ignore
"type": PYTHON_TO_JSON_TYPES[arg_type.__args__[0].__class__.__name__], # type: ignore
}
if arg in arg_descriptions:
if arg not in properties:
properties[arg] = {}
@ -153,6 +195,11 @@ def _get_python_function_required_args(function: Callable) -> List[str]:
return required
@deprecated(
"0.1.16",
alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
removal="0.2.0",
)
def convert_python_function_to_openai_function(
function: Callable,
) -> Dict[str, Any]:
@ -174,8 +221,49 @@ def convert_python_function_to_openai_function(
}
@deprecated(
"0.1.16",
alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
removal="0.2.0",
)
def format_tool_to_openai_function(tool: BaseTool) -> FunctionDescription:
"""Format tool into the OpenAI function API."""
if tool.args_schema:
return convert_pydantic_to_openai_function(
tool.args_schema, name=tool.name, description=tool.description
)
else:
return {
"name": tool.name,
"description": tool.description,
"parameters": {
# This is a hack to get around the fact that some tools
# do not expose an args_schema, and expect an argument
# which is a string.
# And Open AI does not support an array type for the
# parameters.
"properties": {
"__arg1": {"title": "__arg1", "type": "string"},
},
"required": ["__arg1"],
"type": "object",
},
}
@deprecated(
"0.1.16",
alternative="langchain_core.utils.function_calling.convert_to_openai_function()",
removal="0.2.0",
)
def format_tool_to_openai_tool(tool: BaseTool) -> ToolDescription:
"""Format tool into the OpenAI function API."""
function = format_tool_to_openai_function(tool)
return {"type": "function", "function": function}
def convert_to_openai_function(
function: Union[Dict[str, Any], Type[BaseModel], Callable],
function: Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool],
) -> Dict[str, Any]:
"""Convert a raw function/class to an OpenAI function.
@ -188,15 +276,38 @@ def convert_to_openai_function(
A dict version of the passed in function which is compatible with the
OpenAI function-calling API.
"""
from langchain_core.tools import BaseTool
if isinstance(function, dict):
return function
elif isinstance(function, type) and issubclass(function, BaseModel):
return cast(Dict, convert_pydantic_to_openai_function(function))
elif isinstance(function, BaseTool):
return format_tool_to_openai_function(function)
elif callable(function):
return convert_python_function_to_openai_function(function)
else:
raise ValueError(
f"Unsupported function type {type(function)}. Functions must be passed in"
f" as Dict, pydantic.BaseModel, or Callable."
)
def convert_to_openai_tool(
tool: Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool],
) -> Dict[str, Any]:
"""Convert a raw function/class to an OpenAI tool.
Args:
tool: Either a dictionary, a pydantic.BaseModel class, Python function, or
BaseTool. If a dictionary is passed in, it is assumed to already be a valid
OpenAI tool or OpenAI function.
Returns:
A dict version of the passed in tool which is compatible with the
OpenAI tool-calling API.
"""
if isinstance(tool, dict) and "type" in tool:
return tool
function = convert_to_openai_function(tool)
return {"type": "function", "function": function}

@ -0,0 +1,74 @@
from typing import Any, Callable, Literal, Type
import pytest
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_function
@pytest.fixture()
def pydantic() -> Type[BaseModel]:
class dummy_function(BaseModel):
"""dummy function"""
arg1: int = Field(..., description="foo")
arg2: Literal["bar", "baz"] = Field(..., description="one of 'bar', 'baz'")
return dummy_function
@pytest.fixture()
def function() -> Callable:
def dummy_function(arg1: int, arg2: Literal["bar", "baz"]) -> None:
"""dummy function
Args:
arg1: foo
arg2: one of 'bar', 'baz'
"""
pass
return dummy_function
@pytest.fixture()
def tool() -> BaseTool:
class Schema(BaseModel):
arg1: int = Field(..., description="foo")
arg2: Literal["bar", "baz"] = Field(..., description="one of 'bar', 'baz'")
class DummyFunction(BaseTool):
args_schema: Type[BaseModel] = Schema
name: str = "dummy_function"
description: str = "dummy function"
def _run(self, *args: Any, **kwargs: Any) -> Any:
pass
return DummyFunction()
def test_convert_to_openai_function(
pydantic: Type[BaseModel], function: Callable, tool: BaseTool
) -> None:
expected = {
"name": "dummy_function",
"description": "dummy function",
"parameters": {
"type": "object",
"properties": {
"arg1": {"description": "foo", "type": "integer"},
"arg2": {
"description": "one of 'bar', 'baz'",
"enum": ["bar", "baz"],
"type": "string",
},
},
"required": ["arg1", "arg2"],
},
}
for fn in (pydantic, function, tool, expected):
actual = convert_to_openai_function(fn) # type: ignore
assert actual == expected

@ -5,13 +5,13 @@ from json import JSONDecodeError
from time import sleep
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union
from langchain_community.tools.convert_to_openai import format_tool_to_openai_tool
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import CallbackManager
from langchain_core.load import dumpd
from langchain_core.pydantic_v1 import Field
from langchain_core.runnables import RunnableConfig, RunnableSerializable, ensure_config
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_tool
if TYPE_CHECKING:
import openai
@ -180,16 +180,10 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
OpenAIAssistantRunnable configured to run using the created assistant.
"""
client = client or _get_openai_client()
openai_tools: List = []
for tool in tools:
oai_tool = (
tool if isinstance(tool, dict) else format_tool_to_openai_tool(tool)
)
openai_tools.append(oai_tool)
assistant = client.beta.assistants.create(
name=name,
instructions=instructions,
tools=openai_tools,
tools=[convert_to_openai_tool(tool) for tool in tools],
model=model,
)
return cls(assistant_id=assistant.id, client=client, **kwargs)

@ -1,7 +1,6 @@
"""Module implements an agent that uses OpenAI's APIs function enabled API."""
from typing import Any, List, Optional, Sequence, Tuple, Type, Union
from langchain_community.tools.convert_to_openai import format_tool_to_openai_function
from langchain_core._api import deprecated
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackManager, Callbacks
@ -20,6 +19,7 @@ from langchain_core.prompts.chat import (
from langchain_core.pydantic_v1 import root_validator
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_function
from langchain.agents import BaseSingleActionAgent
from langchain.agents.format_scratchpad.openai_functions import (
@ -71,7 +71,7 @@ class OpenAIFunctionsAgent(BaseSingleActionAgent):
@property
def functions(self) -> List[dict]:
return [dict(format_tool_to_openai_function(t)) for t in self.tools]
return [dict(convert_to_openai_function(t)) for t in self.tools]
def plan(
self,
@ -303,9 +303,7 @@ def create_openai_functions_agent(
"Prompt must have input variable `agent_scratchpad`, but wasn't found. "
f"Found {prompt.input_variables} instead."
)
llm_with_tools = llm.bind(
functions=[format_tool_to_openai_function(t) for t in tools]
)
llm_with_tools = llm.bind(functions=[convert_to_openai_function(t) for t in tools])
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_to_openai_function_messages(

@ -1,10 +1,10 @@
from typing import Sequence
from langchain_community.tools.convert_to_openai import format_tool_to_openai_tool
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_tool
from langchain.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages,
@ -82,9 +82,7 @@ def create_openai_tools_agent(
if missing_vars:
raise ValueError(f"Prompt missing required variables: {missing_vars}")
llm_with_tools = llm.bind(
tools=[format_tool_to_openai_tool(tool) for tool in tools]
)
llm_with_tools = llm.bind(tools=[convert_to_openai_tool(tool) for tool in tools])
agent = (
RunnablePassthrough.assign(

@ -1,4 +1,4 @@
from langchain_community.tools.convert_to_openai import format_tool_to_openai_function
from langchain_core.utils.function_calling import format_tool_to_openai_function
# For backwards compatibility
__all__ = ["format_tool_to_openai_function"]

@ -7,11 +7,11 @@ This module contains various ways to render tools.
from typing import List
# For backwards compatibility
from langchain_community.tools.convert_to_openai import (
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import (
format_tool_to_openai_function,
format_tool_to_openai_tool,
)
from langchain_core.tools import BaseTool
__all__ = [
"render_text_description",

@ -15,13 +15,10 @@ def test_convert_pydantic_to_openai_function() -> None:
"name": "Data",
"description": "The data to return.",
"parameters": {
"title": "Data",
"description": "The data to return.",
"type": "object",
"properties": {
"key": {"title": "Key", "description": "API key", "type": "string"},
"key": {"description": "API key", "type": "string"},
"days": {
"title": "Days",
"description": "Number of days to forecast",
"default": 0,
"type": "integer",
@ -50,22 +47,17 @@ def test_convert_pydantic_to_openai_function_nested() -> None:
"name": "Model",
"description": "The model to return.",
"parameters": {
"title": "Model",
"description": "The model to return.",
"type": "object",
"properties": {
"data": {
"title": "Data",
"description": "The data to return.",
"type": "object",
"properties": {
"key": {
"title": "Key",
"description": "API key",
"type": "string",
},
"days": {
"title": "Days",
"description": "Number of days to forecast",
"default": 0,
"type": "integer",

@ -12,6 +12,7 @@ from typing import (
Dict,
Iterator,
List,
Literal,
Mapping,
Optional,
Sequence,
@ -52,11 +53,15 @@ from langchain_core.messages import (
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.pydantic_v1 import BaseModel, Field, root_validator
from langchain_core.runnables import Runnable
from langchain_core.tools import BaseTool
from langchain_core.utils import (
get_from_dict_or_env,
get_pydantic_field_names,
)
from langchain_core.utils.function_calling import convert_to_openai_function
from langchain_core.utils.function_calling import (
convert_to_openai_function,
convert_to_openai_tool,
)
logger = logging.getLogger(__name__)
@ -626,12 +631,18 @@ class ChatOpenAI(BaseChatModel):
def bind_functions(
self,
functions: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]],
functions: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
function_call: Optional[str] = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind functions (and other objects) to this chat model.
Assumes model is compatible with OpenAI function-calling API.
NOTE: Using bind_tools is recommended instead, as the `functions` and
`function_call` request parameters are officially marked as deprecated by
OpenAI.
Args:
functions: A list of function definitions to bind to this chat model.
Can be a dictionary, pydantic model, or callable. Pydantic
@ -663,3 +674,51 @@ class ChatOpenAI(BaseChatModel):
functions=formatted_functions,
**kwargs,
)
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
tool_choice: Optional[Union[dict, str, Literal["auto", "none"]]] = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind tool-like objects to this chat model.
Assumes model is compatible with OpenAI tool-calling API.
Args:
tools: A list of tool definitions to bind to this chat model.
Can be a dictionary, pydantic model, callable, or BaseTool. Pydantic
models, callables, and BaseTools will be automatically converted to
their schema dictionary representation.
tool_choice: Which tool to require the model to call.
Must be the name of the single provided function or
"auto" to automatically determine which function to call
(if any), or a dict of the form:
{"type": "function", "function": {"name": <<tool_name>>}}.
kwargs: Any additional parameters to pass to the
:class:`~langchain.runnable.Runnable` constructor.
"""
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
if tool_choice is not None:
if isinstance(tool_choice, str) and tool_choice not in ("auto", "none"):
tool_choice = {"type": "function", "function": {"name": tool_choice}}
if isinstance(tool_choice, dict) and len(formatted_tools) != 1:
raise ValueError(
"When specifying `tool_choice`, you must provide exactly one "
f"tool. Received {len(formatted_tools)} tools."
)
if (
isinstance(tool_choice, dict)
and formatted_tools[0]["function"]["name"]
!= tool_choice["function"]["name"]
):
raise ValueError(
f"Tool choice {tool_choice} was specified, but the only "
f"provided tool was {formatted_tools[0]['function']['name']}."
)
kwargs["tool_choice"] = tool_choice
return super().bind(
tools=formatted_tools,
**kwargs,
)

Loading…
Cancel
Save