Update example cookbook for Anthropic tool use (#20029)

pull/20030/head
Lance Martin 1 month ago committed by GitHub
parent 3856dedff4
commit e76b9210dd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -60,7 +60,9 @@
"cell_type": "code",
"execution_count": null,
"id": "9caa2aaf-1918-4a8a-982d-f8052b92ed44",
"metadata": {},
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"from langchain_anthropic import ChatAnthropic\n",
@ -92,18 +94,18 @@
},
{
"cell_type": "code",
"execution_count": 197,
"execution_count": 2,
"id": "9025bfdc-6060-4042-9a61-4e361dda7087",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'text': \"<thinking>\\n- To answer this request, we need to use the `code` function, which takes 3 required parameters: `prefix`, `imports`, and `code`.\\n- The user has provided enough context to generate the full code snippet and explanation, so we have values for all the required parameters:\\n - `prefix`: A brief description of what the code does\\n - `imports`: No imports needed for this simple program \\n - `code`: A Python print statement to print 'hello world'\\n</thinking>\",\n",
"{'text': \"<thinking>\\nThe tool 'code' is relevant for writing a Python program to print a string.\\n\\nTo use the 'code' tool, I need values for these required parameters:\\nprefix: A description of the problem and approach. I can provide this based on the request.\\nimports: The import statements needed for the code. For this simple program, no imports are needed, so I can leave this blank.\\ncode: The actual Python code, not including imports. I can write a simple print statement to output the string.\\n\\nI have all the required parameters, so I can proceed with calling the 'code' tool.\\n</thinking>\",\n",
" 'type': 'text'}"
]
},
"execution_count": 197,
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
@ -115,7 +117,7 @@
},
{
"cell_type": "code",
"execution_count": 198,
"execution_count": 3,
"id": "2393d9b6-67a2-41ea-ac01-dc038b4800f5",
"metadata": {},
"outputs": [
@ -124,14 +126,14 @@
"text/plain": [
"{'text': None,\n",
" 'type': 'tool_use',\n",
" 'id': 'toolu_012Dsij8yn87daCuGPpgkwas',\n",
" 'id': 'toolu_01UwZVQub6vL36wiBww6CU7a',\n",
" 'name': 'code',\n",
" 'input': {'prefix': \"This Python program prints the string 'hello world' to the console.\",\n",
" 'input': {'prefix': \"To print the string 'hello world' in Python:\",\n",
" 'imports': '',\n",
" 'code': \"print('hello world')\"}}"
]
},
"execution_count": 198,
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
@ -143,19 +145,19 @@
},
{
"cell_type": "code",
"execution_count": 199,
"execution_count": 4,
"id": "f4f390ac-fbda-4173-892a-ffd12844228c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'prefix': \"This Python program prints the string 'hello world' to the console.\",\n",
"{'prefix': \"To print the string 'hello world' in Python:\",\n",
" 'imports': '',\n",
" 'code': \"print('hello world')\"}"
]
},
"execution_count": 199,
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
@ -167,7 +169,7 @@
},
{
"cell_type": "code",
"execution_count": 200,
"execution_count": 5,
"id": "ba77d0f8-f79b-4656-9023-085ffdaf35f5",
"metadata": {},
"outputs": [],
@ -179,7 +181,7 @@
},
{
"cell_type": "code",
"execution_count": 201,
"execution_count": 6,
"id": "cd854451-68d7-43df-bcae-4f3c3565536a",
"metadata": {},
"outputs": [],
@ -190,17 +192,17 @@
},
{
"cell_type": "code",
"execution_count": 202,
"execution_count": 7,
"id": "47b3405f-0aea-460e-8603-f6092019fcd4",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"This Python program prints the string 'hello world' to the console.\""
"\"To print the string 'hello world' in Python:\""
]
},
"execution_count": 202,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@ -211,7 +213,7 @@
},
{
"cell_type": "code",
"execution_count": 203,
"execution_count": 8,
"id": "85b16b62-1b72-4b6e-81fa-b1d707b728fa",
"metadata": {},
"outputs": [
@ -221,7 +223,7 @@
"''"
]
},
"execution_count": 203,
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
@ -232,7 +234,7 @@
},
{
"cell_type": "code",
"execution_count": 204,
"execution_count": 9,
"id": "23857441-3e67-460c-b6be-b57cf0dd17ad",
"metadata": {},
"outputs": [
@ -242,7 +244,7 @@
"\"print('hello world')\""
]
},
"execution_count": 204,
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
@ -278,7 +280,7 @@
},
{
"cell_type": "code",
"execution_count": 205,
"execution_count": 10,
"id": "97dd1b8c-724a-436a-88b1-b38204fc81f5",
"metadata": {},
"outputs": [],
@ -310,18 +312,22 @@
"\n",
"`What if we want to enforce tool use?`\n",
"\n",
"We can use fallbacks."
"We can use fallbacks.\n",
"\n",
"Let's select a code gen prompt that -- from some of my testing -- does not correctly invoke the tool.\n",
"\n",
"We can see if we can correct from this."
]
},
{
"cell_type": "code",
"execution_count": 206,
"execution_count": 12,
"id": "94e77be5-dddb-4386-b523-6f1136150bbd",
"metadata": {},
"outputs": [],
"source": [
"# Code solution prompt\n",
"code_gen_prompt = ChatPromptTemplate.from_messages(\n",
"# This code gen prompt invokes tool use\n",
"code_gen_prompt_working = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\n",
" \"system\",\n",
@ -335,6 +341,21 @@
" ]\n",
")\n",
"\n",
"# This code gen prompt does not invoke tool use\n",
"code_gen_prompt_bad = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\n",
" \"system\",\n",
" \"\"\"You are a coding assistant with expertise in LCEL, LangChain expression language. \\n \n",
" Here is a full set of LCEL documentation: \\n ------- \\n {context} \\n ------- \\n Answer the user \n",
" question based on the above provided documentation. Ensure any code you provide can be executed \\n \n",
" with all required imports and variables defined. Structure your answer with a description of the code solution. \\n\n",
" Then list the imports. And finally list the functioning code block. Here is the user question:\"\"\",\n",
" ),\n",
" (\"placeholder\", \"{messages}\"),\n",
" ]\n",
")\n",
"\n",
"\n",
"# Data model\n",
"class code(BaseModel):\n",
@ -358,21 +379,30 @@
"\n",
"\n",
"# Check for errors\n",
"def check(tool_output):\n",
" \"\"\"Check for errors\"\"\"\n",
"def check_claude_output(tool_output):\n",
" \"\"\"Check for parse error or failure to call the tool\"\"\"\n",
"\n",
" # Error with parsing\n",
" if tool_output[\"parsing_error\"]:\n",
" # Report back output and parsing errors\n",
" print(\"Parsing error!\")\n",
" raw_output = str(code_output[\"raw\"].content)\n",
" error = tool_output[\"parsing_error\"]\n",
" raise ValueError(\n",
" f\"Error parsing your output! Be sure to invoke the tool. Output: {raw_output}. \\n Parse error: {error}\"\n",
" )\n",
"\n",
" # Tool was not invoked\n",
" elif not tool_output[\"parsed\"]:\n",
" print(\"Failed to invoke tool!\")\n",
" raise ValueError(\n",
" \"You did not use the provided tool! Be sure to invoke the tool to structure the output.\"\n",
" )\n",
" return tool_output\n",
"\n",
"\n",
"# Chain with output check\n",
"code_chain = code_gen_prompt | structured_llm | check"
"code_chain = code_gen_prompt_bad | structured_llm | check_claude_output"
]
},
{
@ -385,12 +415,15 @@
},
{
"cell_type": "code",
"execution_count": 207,
"execution_count": 13,
"id": "efae1ff7-4413-4c47-a403-1630dd453219",
"metadata": {},
"outputs": [],
"source": [
"def insert_errors(inputs):\n",
" \"\"\"Insert errors in the messages\"\"\"\n",
"\n",
" # Get errors\n",
" error = inputs[\"error\"]\n",
" messages = inputs[\"messages\"]\n",
" messages += [\n",
@ -401,8 +434,7 @@
" ]\n",
" return {\n",
" \"messages\": messages,\n",
" \"document\": inputs[\"document\"],\n",
" \"question\": inputs[\"question\"],\n",
" \"context\": inputs[\"context\"],\n",
" }\n",
"\n",
"\n",
@ -416,13 +448,21 @@
},
{
"cell_type": "code",
"execution_count": 208,
"execution_count": 14,
"id": "c7712c49-ee8c-4a61-927e-3c0beb83782b",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Failed to invoke tool!\n"
]
}
],
"source": [
"# Test\n",
"messages = [(\"user\", \"Write a RAG chain in LCEL.\")]\n",
"messages = [(\"user\", \"How do I build a RAG chain in LCEL?\")]\n",
"code_output_lcel = code_chain_re_try.invoke(\n",
" {\"context\": concatenated_content, \"messages\": messages}\n",
")"
@ -430,7 +470,7 @@
},
{
"cell_type": "code",
"execution_count": 209,
"execution_count": 15,
"id": "c8027a6f-6992-4bb4-9d6e-9d0778b04e28",
"metadata": {},
"outputs": [],
@ -440,17 +480,17 @@
},
{
"cell_type": "code",
"execution_count": 210,
"execution_count": 16,
"id": "209186ac-3121-43a9-8358-86ace7e07f61",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"Here's how you can write a retrieval-augmented generation (RAG) chain in LCEL:\""
"\"To build a RAG chain using LCEL, we'll use a vector store to retrieve relevant documents, a prompt template that incorporates the retrieved context, a chat model (like OpenAI) to generate a response based on the prompt, and an output parser to clean up the model output.\""
]
},
"execution_count": 210,
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
@ -461,17 +501,17 @@
},
{
"cell_type": "code",
"execution_count": 211,
"execution_count": 17,
"id": "b8d6d189-e5df-49b6-ada8-83f6c0b26886",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'from langchain_community.vectorstores import DocArrayInMemorySearch\\nfrom langchain_core.output_parsers import StrOutputParser\\nfrom langchain_core.prompts import ChatPromptTemplate\\nfrom langchain_core.runnables import RunnableParallel, RunnablePassthrough\\nfrom langchain_openai import OpenAIEmbeddings'"
"'from langchain_community.vectorstores import DocArrayInMemorySearch\\nfrom langchain_core.output_parsers import StrOutputParser\\nfrom langchain_core.prompts import ChatPromptTemplate\\nfrom langchain_core.runnables import RunnablePassthrough\\nfrom langchain_openai import ChatOpenAI, OpenAIEmbeddings'"
]
},
"execution_count": 211,
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
@ -482,17 +522,17 @@
},
{
"cell_type": "code",
"execution_count": 212,
"execution_count": 18,
"id": "e3822253-d28b-4f7e-9364-79974d04eff1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'# Create a vector store retriever\\nvectorstore = DocArrayInMemorySearch.from_texts(\\n [\"harrison worked at kensho\", \"bears like to eat honey\"], \\n embedding=OpenAIEmbeddings(),\\n)\\nretriever = vectorstore.as_retriever()\\n\\n# Define a prompt template\\ntemplate = \"\"\"Answer the question based only on the following context:\\n{context}\\n\\nQuestion: {question}\"\"\"\\nprompt = ChatPromptTemplate.from_template(template)\\n\\n# Define the model and output parser \\nmodel = ChatOpenAI()\\noutput_parser = StrOutputParser()\\n\\n# Compose the chain using LCEL\\nsetup_and_retrieval = RunnableParallel(\\n {\"context\": retriever, \"question\": RunnablePassthrough()}\\n)\\nchain = setup_and_retrieval | prompt | model | output_parser\\n\\n# Test the chain\\nchain.invoke(\"where did harrison work?\")'"
"'vectorstore = DocArrayInMemorySearch.from_texts(\\n [\"harrison worked at kensho\", \"bears like to eat honey\"], \\n embedding=OpenAIEmbeddings(),\\n)\\n\\nretriever = vectorstore.as_retriever()\\n\\ntemplate = \"\"\"Answer the question based only on the following context:\\n{context}\\nQuestion: {question}\"\"\"\\nprompt = ChatPromptTemplate.from_template(template)\\n\\noutput_parser = StrOutputParser()\\n\\nrag_chain = (\\n {\"context\": retriever, \"question\": RunnablePassthrough()} \\n | prompt \\n | ChatOpenAI()\\n | output_parser\\n)\\n\\nprint(rag_chain.invoke(\"where did harrison work?\"))'"
]
},
"execution_count": 212,
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
@ -502,19 +542,19 @@
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e27cd768-a274-4ded-b7ba-14aaf6c57164",
"cell_type": "markdown",
"id": "80d63a3d-bad8-4385-bd85-40ca95c260c6",
"metadata": {},
"outputs": [],
"source": [
"exec(parsed_result_lcel.code)"
"Example trace catching an error and correcting:\n",
"\n",
"https://smith.langchain.com/public/f06e62cb-2fac-46ae-80cd-0470b3155eae/r"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "17b44e14-af01-45cd-9c37-c4105e2ddd40",
"id": "5f70e45c-eb68-4679-979c-0c04502affd1",
"metadata": {},
"outputs": [],
"source": []

Loading…
Cancel
Save