langchain[patch]: add deprecations (#24792)

This commit is contained in:
ccurme 2024-08-09 10:34:43 -04:00 committed by GitHub
parent 02300471be
commit 4825dc0d76
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
38 changed files with 3314 additions and 108 deletions

View File

@ -54,12 +54,9 @@
"id": "00df631d-5121-4918-94aa-b88acce9b769",
"metadata": {},
"source": [
"import { ColumnContainer, Column } from \"@theme/Columns\";\n",
"## Legacy\n",
"\n",
"<ColumnContainer>\n",
"<Column>\n",
"\n",
"#### Legacy\n"
"<details open>"
]
},
{
@ -111,12 +108,11 @@
"id": "f8e36b0e-c7dc-4130-a51b-189d4b756c7f",
"metadata": {},
"source": [
"</Column>\n",
"</details>\n",
"\n",
"<Column>\n",
"## LCEL\n",
"\n",
"#### LCEL\n",
"\n"
"<details open>"
]
},
{
@ -174,10 +170,6 @@
"id": "6b386ce6-895e-442c-88f3-7bec0ab9f401",
"metadata": {},
"source": [
"\n",
"</Column>\n",
"</ColumnContainer>\n",
"\n",
"The above example uses the same `history` for all sessions. The example below shows how to use a different chat history for each session."
]
},
@ -230,6 +222,8 @@
"id": "b2717810",
"metadata": {},
"source": [
"</details>\n",
"\n",
"## Next steps\n",
"\n",
"See [this tutorial](/docs/tutorials/chatbot) for a more end-to-end guide on building with [`RunnableWithMessageHistory`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html).\n",

View File

@ -83,13 +83,9 @@
"id": "8bc06416",
"metadata": {},
"source": [
"import { ColumnContainer, Column } from \"@theme/Columns\";\n",
"## Legacy\n",
"\n",
"<ColumnContainer>\n",
"\n",
"<Column>\n",
"\n",
"#### Legacy"
"<details open>"
]
},
{
@ -165,12 +161,11 @@
"id": "43a8a23c",
"metadata": {},
"source": [
"</Column>\n",
"</details>\n",
"\n",
"<Column>\n",
"## LCEL\n",
"\n",
"#### LCEL\n",
"\n"
"<details open>"
]
},
{
@ -253,9 +248,7 @@
"id": "b2717810",
"metadata": {},
"source": [
"</Column>\n",
"\n",
"</ColumnContainer>\n",
"</details>\n",
"\n",
"## Next steps\n",
"\n",
@ -263,6 +256,14 @@
"\n",
"Next, check out the [LCEL conceptual docs](/docs/concepts/#langchain-expression-language-lcel) for more background information."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7bfc38bd-0ff8-40ee-83a3-9d7553364fd7",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {

View File

@ -2,33 +2,48 @@
sidebar_position: 1
---
# How to migrate chains to LCEL
# How to migrate from v0.0 chains
:::info Prerequisites
This guide assumes familiarity with the following concepts:
- [LangChain Expression Language](/docs/concepts#langchain-expression-language-lcel)
- [LangGraph](https://langchain-ai.github.io/langgraph/)
:::
LCEL is designed to streamline the process of building useful apps with LLMs and combining related components. It does this by providing:
LangChain maintains a number of legacy abstractions. Many of these can be reimplemented via short combinations of LCEL and LangGraph primitives.
### LCEL
[LCEL](/docs/concepts/#langchain-expression-language-lcel) is designed to streamline the process of building useful apps with LLMs and combining related components. It does this by providing:
1. **A unified interface**: Every LCEL object implements the `Runnable` interface, which defines a common set of invocation methods (`invoke`, `batch`, `stream`, `ainvoke`, ...). This makes it possible to also automatically and consistently support useful operations like streaming of intermediate steps and batching, since every chain composed of LCEL objects is itself an LCEL object.
2. **Composition primitives**: LCEL provides a number of primitives that make it easy to compose chains, parallelize components, add fallbacks, dynamically configure chain internals, and more.
LangChain maintains a number of legacy abstractions. Many of these can be reimplemented via short combinations of LCEL primitives. Doing so confers some general advantages:
### LangGraph
[LangGraph](https://langchain-ai.github.io/langgraph/), built on top of LCEL, allows for performant orchestrations of application components while maintaining concise and readable code. It includes built-in persistence, support for cycles, and prioritizes controllability.
If LCEL grows unwieldy for larger or more complex chains, they may benefit from a LangGraph implementation.
### Advantages
Using these frameworks for existing v0.0 chains confers some advantages:
- The resulting chains typically implement the full `Runnable` interface, including streaming and asynchronous support where appropriate;
- The chains may be more easily extended or modified;
- The parameters of the chain are typically surfaced for easier customization (e.g., prompts) over previous versions, which tended to be subclasses and had opaque parameters and internals.
- If using LangGraph, the chain supports built-in persistence, allowing for conversational experiences via a "memory" of the chat history.
- If using LangGraph, the steps of the chain can be streamed, allowing for greater control and customizability.
The LCEL implementations can be slightly more verbose, but there are significant benefits in transparency and customizability.
The below pages assist with migration from various specific chains to LCEL:
The below pages assist with migration from various specific chains to LCEL and LangGraph:
- [LLMChain](/docs/versions/migrating_chains/llm_chain)
- [ConversationChain](/docs/versions/migrating_chains/conversation_chain)
- [RetrievalQA](/docs/versions/migrating_chains/retrieval_qa)
- [ConversationalRetrievalChain](/docs/versions/migrating_chains/conversation_retrieval_chain)
- [StuffDocumentsChain](/docs/versions/migrating_chains/stuff_docs_chain)
- [MapReduceDocumentsChain](/docs/versions/migrating_chains/map_reduce_chain)
- [MapRerankDocumentsChain](/docs/versions/migrating_chains/map_rerank_docs_chain)
- [RefineDocumentsChain](/docs/versions/migrating_chains/refine_docs_chain)
- [LLMRouterChain](/docs/versions/migrating_chains/llm_router_chain)
- [MultiPromptChain](/docs/versions/migrating_chains/multi_prompt_chain)
Check out the [LCEL conceptual docs](/docs/concepts/#langchain-expression-language-lcel) for more background information.
Check out the [LCEL conceptual docs](/docs/concepts/#langchain-expression-language-lcel) and [LangGraph docs](https://langchain-ai.github.io/langgraph/) for more background information.

View File

@ -52,13 +52,9 @@
"id": "e3621b62-a037-42b8-8faa-59575608bb8b",
"metadata": {},
"source": [
"import { ColumnContainer, Column } from \"@theme/Columns\";\n",
"## Legacy\n",
"\n",
"<ColumnContainer>\n",
"\n",
"<Column>\n",
"\n",
"#### Legacy\n"
"<details open>"
]
},
{
@ -98,13 +94,11 @@
"id": "cdc3b527-c09e-4c77-9711-c3cc4506cd95",
"metadata": {},
"source": [
"</details>\n",
"\n",
"</Column>\n",
"## LCEL\n",
"\n",
"<Column>\n",
"\n",
"#### LCEL\n",
"\n"
"<details open>"
]
},
{
@ -143,10 +137,6 @@
"id": "3c0b0513-77b8-4371-a20e-3e487cec7e7f",
"metadata": {},
"source": [
"\n",
"</Column>\n",
"</ColumnContainer>\n",
"\n",
"Note that `LLMChain` by default returns a `dict` containing both the input and the output. If this behavior is desired, we can replicate it using another LCEL primitive, [`RunnablePassthrough`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.passthrough.RunnablePassthrough.html):"
]
},
@ -181,6 +171,8 @@
"id": "b2717810",
"metadata": {},
"source": [
"</details>\n",
"\n",
"## Next steps\n",
"\n",
"See [this tutorial](/docs/tutorials/llm_chain) for more detail on building with prompt templates, LLMs, and output parsers.\n",

View File

@ -0,0 +1,283 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "575befea-4d98-4941-8e55-1581b169a674",
"metadata": {},
"source": [
"---\n",
"title: Migrating from LLMRouterChain\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "14625d35-efca-41cf-b203-be9f4c375700",
"metadata": {},
"source": [
"The [`LLMRouterChain`](https://api.python.langchain.com/en/latest/chains/langchain.chains.router.llm_router.LLMRouterChain.html) routed an input query to one of multiple destinations-- that is, given an input query, it used a LLM to select from a list of destination chains, and passed its inputs to the selected chain.\n",
"\n",
"`LLMRouterChain` does not support common [chat model](/docs/concepts/#chat-models) features, such as message roles and [tool calling](/docs/concepts/#functiontool-calling). Under the hood, `LLMRouterChain` routes a query by instructing the LLM to generate JSON-formatted text, and parsing out the intended destination.\n",
"\n",
"Consider an example from a [MultiPromptChain](/docs/versions/migrating_chains/multi_prompt_chain), which uses `LLMRouterChain`. Below is an (example) default prompt:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "364814a5-d15c-41bb-bf3f-581df51a4721",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Given a raw text input to a language model select the model prompt best suited for the input. You will be given the names of the available prompts and a description of what the prompt is best suited for. You may also revise the original input if you think that revising it will ultimately lead to a better response from the language model.\n",
"\n",
"<< FORMATTING >>\n",
"Return a markdown code snippet with a JSON object formatted to look like:\n",
"'''json\n",
"{{\n",
" \"destination\": string \\ name of the prompt to use or \"DEFAULT\"\n",
" \"next_inputs\": string \\ a potentially modified version of the original input\n",
"}}\n",
"'''\n",
"\n",
"REMEMBER: \"destination\" MUST be one of the candidate prompt names specified below OR it can be \"DEFAULT\" if the input is not well suited for any of the candidate prompts.\n",
"REMEMBER: \"next_inputs\" can just be the original input if you don't think any modifications are needed.\n",
"\n",
"<< CANDIDATE PROMPTS >>\n",
"\n",
"animals: prompt for animal expert\n",
"vegetables: prompt for a vegetable expert\n",
"\n",
"\n",
"<< INPUT >>\n",
"{input}\n",
"\n",
"<< OUTPUT (must include '''json at the start of the response) >>\n",
"<< OUTPUT (must end with ''') >>\n",
"\n"
]
}
],
"source": [
"from langchain.chains.router.multi_prompt import MULTI_PROMPT_ROUTER_TEMPLATE\n",
"\n",
"destinations = \"\"\"\n",
"animals: prompt for animal expert\n",
"vegetables: prompt for a vegetable expert\n",
"\"\"\"\n",
"\n",
"router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format(destinations=destinations)\n",
"\n",
"print(router_template.replace(\"`\", \"'\")) # for rendering purposes"
]
},
{
"cell_type": "markdown",
"id": "934937d1-fc0a-4d3f-b297-29f96e6a8f5e",
"metadata": {},
"source": [
"Most of the behavior is determined via a single natural language prompt. Chat models that support [tool calling](/docs/how_to/tool_calling/) features confer a number of advantages for this task:\n",
"\n",
"- Supports chat prompt templates, including messages with `system` and other roles;\n",
"- Tool-calling models are fine-tuned to generate structured output;\n",
"- Support for runnable methods like streaming and async operations.\n",
"\n",
"Now let's look at `LLMRouterChain` side-by-side with an LCEL implementation that uses tool-calling. Note that for this guide we will `langchain-openai >= 0.1.20`:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ed12b22b-5452-4776-aee3-b67d9f965082",
"metadata": {},
"outputs": [],
"source": [
"%pip install -qU langchain-core langchain-openai"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b0edbba1-a497-49ef-ade7-4fe7967360eb",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()"
]
},
{
"cell_type": "markdown",
"id": "5d4dc41c-3fdc-4093-ba5e-31a9ebb54e13",
"metadata": {},
"source": [
"## Legacy\n",
"\n",
"<details open>"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "c58c9269-5a1d-4234-88b5-7168944618bf",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParser\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
"\n",
"router_prompt = PromptTemplate(\n",
" # Note: here we use the prompt template from above. Generally this would need\n",
" # to be customized.\n",
" template=router_template,\n",
" input_variables=[\"input\"],\n",
" output_parser=RouterOutputParser(),\n",
")\n",
"\n",
"chain = LLMRouterChain.from_llm(llm, router_prompt)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "a22ebdca-5f53-459e-9cff-a97b2354ffe0",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"vegetables\n"
]
}
],
"source": [
"result = chain.invoke({\"input\": \"What color are carrots?\"})\n",
"\n",
"print(result[\"destination\"])"
]
},
{
"cell_type": "markdown",
"id": "6fd48120-056f-4c58-a04f-da5198c23068",
"metadata": {},
"source": [
"</details>\n",
"\n",
"## LCEL\n",
"\n",
"<details open>"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "5bbebac2-df19-4f59-8a69-f61cd7286e59",
"metadata": {},
"outputs": [],
"source": [
"from operator import itemgetter\n",
"from typing import Literal\n",
"\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI\n",
"from typing_extensions import TypedDict\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-4o-mini\")\n",
"\n",
"route_system = \"Route the user's query to either the animal or vegetable expert.\"\n",
"route_prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", route_system),\n",
" (\"human\", \"{input}\"),\n",
" ]\n",
")\n",
"\n",
"\n",
"# Define schema for output:\n",
"class RouteQuery(TypedDict):\n",
" \"\"\"Route query to destination expert.\"\"\"\n",
"\n",
" destination: Literal[\"animal\", \"vegetable\"]\n",
"\n",
"\n",
"# Instead of writing formatting instructions into the prompt, we\n",
"# leverage .with_structured_output to coerce the output into a simple\n",
"# schema.\n",
"chain = route_prompt | llm.with_structured_output(RouteQuery)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "88012e10-8def-44fa-833f-989935824182",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"vegetable\n"
]
}
],
"source": [
"result = chain.invoke({\"input\": \"What color are carrots?\"})\n",
"\n",
"print(result[\"destination\"])"
]
},
{
"cell_type": "markdown",
"id": "baf7ba9e-65b4-48af-8a39-453c01a7b7cb",
"metadata": {},
"source": [
"</details>\n",
"\n",
"## Next steps\n",
"\n",
"See [this tutorial](/docs/tutorials/llm_chain) for more detail on building with prompt templates, LLMs, and output parsers.\n",
"\n",
"Check out the [LCEL conceptual docs](/docs/concepts/#langchain-expression-language-lcel) for more background information."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "353e4bab-3b8a-4e89-89e2-200a8d8eb8dd",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -82,13 +82,9 @@
"id": "c7e16438",
"metadata": {},
"source": [
"import { ColumnContainer, Column } from \"@theme/Columns\";\n",
"## Legacy\n",
"\n",
"<ColumnContainer>\n",
"\n",
"<Column>\n",
"\n",
"#### Legacy"
"<details open>"
]
},
{
@ -128,12 +124,11 @@
"id": "081948e5",
"metadata": {},
"source": [
"</Column>\n",
"</details>\n",
"\n",
"<Column>\n",
"## LCEL\n",
"\n",
"#### LCEL\n",
"\n"
"<details open>"
]
},
{
@ -184,9 +179,6 @@
"id": "d6f44fe8",
"metadata": {},
"source": [
"</Column>\n",
"</ColumnContainer>\n",
"\n",
"The LCEL implementation exposes the internals of what's happening around retrieving, formatting documents, and passing them through a prompt to the LLM, but it is more verbose. You can customize and wrap this composition logic in a helper function, or use the higher-level [`create_retrieval_chain`](https://api.python.langchain.com/en/latest/chains/langchain.chains.retrieval.create_retrieval_chain.html) and [`create_stuff_documents_chain`](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.stuff.create_stuff_documents_chain.html) helper method:"
]
},
@ -231,6 +223,8 @@
"id": "b2717810",
"metadata": {},
"source": [
"</details>\n",
"\n",
"## Next steps\n",
"\n",
"Check out the [LCEL conceptual docs](/docs/concepts/#langchain-expression-language-lcel) for more background information."

View File

@ -0,0 +1,281 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "ed78c53c-55ad-4ea2-9cc2-a39a1963c098",
"metadata": {},
"source": [
"---\n",
"title: Migrating from StuffDocumentsChain\n",
"---\n",
"\n",
"[StuffDocumentsChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.stuff.StuffDocumentsChain.html) combines documents by concatenating them into a single context window. It is a straightforward and effective strategy for combining documents for question-answering, summarization, and other purposes.\n",
"\n",
"[create_stuff_documents_chain](https://api.python.langchain.com/en/latest/chains/langchain.chains.combine_documents.stuff.create_stuff_documents_chain.html) is the recommended alternative. It functions the same as `StuffDocumentsChain`, with better support for streaming and batch functionality. Because it is a simple combination of [LCEL primitives](/docs/concepts/#langchain-expression-language-lcel), it is also easier to extend and incorporate into other LangChain applications.\n",
"\n",
"Below we will go through both `StuffDocumentsChain` and `create_stuff_documents_chain` on a simple example for illustrative purposes.\n",
"\n",
"Let's first load a chat model:\n",
"\n",
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs customVarName=\"llm\" />\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "dac0bef2-9453-46f2-a893-f7569b6a0170",
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
]
},
{
"cell_type": "markdown",
"id": "d4022d03-7b5e-4c81-98ff-5b82a2a4eaae",
"metadata": {},
"source": [
"## Example\n",
"\n",
"Let's go through an example where we analyze a set of documents. We first generate some simple documents for illustrative purposes:"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "24fa0ba9-e245-47d1-bc2e-6286dd884117",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.documents import Document\n",
"\n",
"documents = [\n",
" Document(page_content=\"Apples are red\", metadata={\"title\": \"apple_book\"}),\n",
" Document(page_content=\"Blueberries are blue\", metadata={\"title\": \"blueberry_book\"}),\n",
" Document(page_content=\"Bananas are yelow\", metadata={\"title\": \"banana_book\"}),\n",
"]"
]
},
{
"cell_type": "markdown",
"id": "3a769128-205f-417d-a25d-519e7cb03be7",
"metadata": {},
"source": [
"### Legacy\n",
"\n",
"<details open>\n",
"\n",
"Below we show an implementation with `StuffDocumentsChain`. We define the prompt template for a summarization task and instantiate a [LLMChain](https://api.python.langchain.com/en/latest/chains/langchain.chains.llm.LLMChain.html) object for this purpose. We define how documents are formatted into the prompt and ensure consistency among the keys in the various prompts."
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "9734c0f3-64e7-4ae6-8578-df03b3dabb26",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chains import LLMChain, StuffDocumentsChain\n",
"from langchain_core.prompts import ChatPromptTemplate, PromptTemplate\n",
"\n",
"# This controls how each document will be formatted. Specifically,\n",
"# it will be passed to `format_document` - see that function for more\n",
"# details.\n",
"document_prompt = PromptTemplate(\n",
" input_variables=[\"page_content\"], template=\"{page_content}\"\n",
")\n",
"document_variable_name = \"context\"\n",
"# The prompt here should take as an input variable the\n",
"# `document_variable_name`\n",
"prompt = ChatPromptTemplate.from_template(\"Summarize this content: {context}\")\n",
"\n",
"llm_chain = LLMChain(llm=llm, prompt=prompt)\n",
"chain = StuffDocumentsChain(\n",
" llm_chain=llm_chain,\n",
" document_prompt=document_prompt,\n",
" document_variable_name=document_variable_name,\n",
")"
]
},
{
"cell_type": "markdown",
"id": "0cb733bf-eb71-4fae-a8f4-d522924020cb",
"metadata": {},
"source": [
"We can now invoke our chain:"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "d7d1ce10-bbee-4cb0-879d-7de4f69191c4",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'This content describes the colors of different fruits: apples are red, blueberries are blue, and bananas are yellow.'"
]
},
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"result = chain.invoke(documents)\n",
"result[\"output_text\"]"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "79b10d40-1521-433b-9026-6ec836ffeeb3",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'input_documents': [Document(metadata={'title': 'apple_book'}, page_content='Apples are red'), Document(metadata={'title': 'blueberry_book'}, page_content='Blueberries are blue'), Document(metadata={'title': 'banana_book'}, page_content='Bananas are yelow')], 'output_text': 'This content describes the colors of different fruits: apples are red, blueberries are blue, and bananas are yellow.'}\n"
]
}
],
"source": [
"for chunk in chain.stream(documents):\n",
" print(chunk)"
]
},
{
"cell_type": "markdown",
"id": "b4cb6a5b-37ea-48cc-a096-b948d3ff7e9f",
"metadata": {},
"source": [
"</details>\n",
"\n",
"### LCEL\n",
"\n",
"<details open>\n",
"\n",
"Below we show an implementation using `create_stuff_documents_chain`:"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "de38f27a-c648-44be-8c37-0a458c2920a9",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chains.combine_documents import create_stuff_documents_chain\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"prompt = ChatPromptTemplate.from_template(\"Summarize this content: {context}\")\n",
"chain = create_stuff_documents_chain(llm, prompt)"
]
},
{
"cell_type": "markdown",
"id": "9d0e6996-9bf8-4097-9c1a-1c539eac3ed1",
"metadata": {},
"source": [
"Invoking the chain, we obtain a similar result as before:"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "f2d2bdfb-3a6a-464b-b4c2-e4252b2e53a0",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'This content describes the colors of different fruits: apples are red, blueberries are blue, and bananas are yellow.'"
]
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"result = chain.invoke({\"context\": documents})\n",
"result"
]
},
{
"cell_type": "markdown",
"id": "493e6270-c61d-46c5-91b3-0cf7740a88f9",
"metadata": {},
"source": [
"Note that this implementation supports streaming of output tokens:"
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "b5adcabd-9bc1-4c91-a12b-7be82d64e457",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" | This | content | describes | the | colors | of | different | fruits | : | apples | are | red | , | blue | berries | are | blue | , | and | bananas | are | yellow | . | | "
]
}
],
"source": [
"for chunk in chain.stream({\"context\": documents}):\n",
" print(chunk, end=\" | \")"
]
},
{
"cell_type": "markdown",
"id": "181c5633-38ea-4692-a869-32f4f78398e4",
"metadata": {},
"source": [
"</details>\n",
"\n",
"## Next steps\n",
"\n",
"Check out the [LCEL conceptual docs](/docs/concepts/#langchain-expression-language-lcel) for more background information.\n",
"\n",
"See these [how-to guides](/docs/how_to/#qa-with-rag) for more on question-answering tasks with RAG.\n",
"\n",
"See [this tutorial](/docs/tutorials/summarization/) for more LLM-based summarization strategies."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -93,7 +93,7 @@ module.exports = {
},
{
type: "category",
label: "Migrating to LCEL",
label: "Migrating from v0.0 chains",
link: {type: 'doc', id: 'versions/migrating_chains/index'},
collapsible: false,
collapsed: false,

View File

@ -2,6 +2,7 @@
from typing import Any, Dict, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks.base import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
@ -15,6 +16,16 @@ from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.chains.llm import LLMChain
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"See API reference for this function for a replacement implementation: "
"https://api.python.langchain.com/en/latest/agents/langchain.agents.agent_toolkits.vectorstore.base.create_vectorstore_agent.html " # noqa: E501
"Read more here on how to create agents that query vector stores: "
"https://python.langchain.com/v0.2/docs/how_to/qa_chat_history_how_to/#agents"
),
)
def create_vectorstore_agent(
llm: BaseLanguageModel,
toolkit: VectorStoreToolkit,
@ -26,6 +37,44 @@ def create_vectorstore_agent(
) -> AgentExecutor:
"""Construct a VectorStore agent from an LLM and tools.
Note: this class is deprecated. See below for a replacement that uses tool
calling methods and LangGraph. Install LangGraph with:
.. code-block:: bash
pip install -U langgraph
.. code-block:: python
from langchain_core.tools import create_retriever_tool
from langchain_core.vectorstores import InMemoryVectorStore
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langgraph.prebuilt import create_react_agent
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
vector_store = InMemoryVectorStore.from_texts(
[
"Dogs are great companions, known for their loyalty and friendliness.",
"Cats are independent pets that often enjoy their own space.",
],
OpenAIEmbeddings(),
)
tool = create_retriever_tool(
vector_store.as_retriever(),
"pet_information_retriever",
"Fetches information about pets.",
)
agent = create_react_agent(llm, [tool])
for step in agent.stream(
{"messages": [("human", "What are dogs known for?")]},
stream_mode="values",
):
step["messages"][-1].pretty_print()
Args:
llm (BaseLanguageModel): LLM that will be used by the agent
toolkit (VectorStoreToolkit): Set of tools for the agent
@ -56,6 +105,16 @@ def create_vectorstore_agent(
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"See API reference for this function for a replacement implementation: "
"https://api.python.langchain.com/en/latest/agents/langchain.agents.agent_toolkits.vectorstore.base.create_vectorstore_router_agent.html " # noqa: E501
"Read more here on how to create agents that query vector stores: "
"https://python.langchain.com/v0.2/docs/how_to/qa_chat_history_how_to/#agents"
),
)
def create_vectorstore_router_agent(
llm: BaseLanguageModel,
toolkit: VectorStoreRouterToolkit,
@ -67,6 +126,59 @@ def create_vectorstore_router_agent(
) -> AgentExecutor:
"""Construct a VectorStore router agent from an LLM and tools.
Note: this class is deprecated. See below for a replacement that uses tool
calling methods and LangGraph. Install LangGraph with:
.. code-block:: bash
pip install -U langgraph
.. code-block:: python
from langchain_core.tools import create_retriever_tool
from langchain_core.vectorstores import InMemoryVectorStore
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langgraph.prebuilt import create_react_agent
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
pet_vector_store = InMemoryVectorStore.from_texts(
[
"Dogs are great companions, known for their loyalty and friendliness.",
"Cats are independent pets that often enjoy their own space.",
],
OpenAIEmbeddings(),
)
food_vector_store = InMemoryVectorStore.from_texts(
[
"Carrots are orange and delicious.",
"Apples are red and delicious.",
],
OpenAIEmbeddings(),
)
tools = [
create_retriever_tool(
pet_vector_store.as_retriever(),
"pet_information_retriever",
"Fetches information about pets.",
),
create_retriever_tool(
food_vector_store.as_retriever(),
"food_information_retriever",
"Fetches information about food.",
)
]
agent = create_react_agent(llm, tools)
for step in agent.stream(
{"messages": [("human", "Tell me about carrots.")]},
stream_mode="values",
):
step["messages"][-1].pretty_print()
Args:
llm (BaseLanguageModel): LLM that will be used by the agent
toolkit (VectorStoreRouterToolkit): Set of tools for the agent which have routing capability with multiple vector stores

View File

@ -59,6 +59,7 @@ _module_lookup = {
"OpenAIModerationChain": "langchain.chains.moderation",
"NatBotChain": "langchain.chains.natbot.base",
"create_citation_fuzzy_match_chain": "langchain.chains.openai_functions",
"create_citation_fuzzy_match_runnable": "langchain.chains.openai_functions",
"create_extraction_chain": "langchain.chains.openai_functions",
"create_extraction_chain_pydantic": "langchain.chains.openai_functions",
"create_qa_with_sources_chain": "langchain.chains.openai_functions",

View File

@ -5,6 +5,7 @@ from __future__ import annotations
from typing import Any, Dict, List, Optional, Sequence, Tuple
from urllib.parse import urlparse
from langchain_core._api import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
@ -53,6 +54,15 @@ def _check_in_allowed_domain(url: str, limit_to_domains: Sequence[str]) -> bool:
try:
from langchain_community.utilities.requests import TextRequestsWrapper
@deprecated(
since="0.2.13",
message=(
"This class is deprecated and will be removed in langchain 1.0. "
"See API reference for replacement: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.api.base.APIChain.html" # noqa: E501
),
removal="1.0",
)
class APIChain(Chain):
"""Chain that makes API calls and summarizes the responses to answer a question.
@ -69,7 +79,117 @@ try:
what network access it has.
See https://python.langchain.com/docs/security for more information.
"""
Note: this class is deprecated. See below for a replacement implementation
using LangGraph. The benefits of this implementation are:
- Uses LLM tool calling features to encourage properly-formatted API requests;
- Support for both token-by-token and step-by-step streaming;
- Support for checkpointing and memory of chat history;
- Easier to modify or extend (e.g., with additional tools, structured responses, etc.)
Install LangGraph with:
.. code-block:: bash
pip install -U langgraph
.. code-block:: python
from typing import Annotated, Sequence
from typing_extensions import TypedDict
from langchain.chains.api.prompt import API_URL_PROMPT
from langchain_community.agent_toolkits.openapi.toolkit import RequestsToolkit
from langchain_community.utilities.requests import TextRequestsWrapper
from langchain_core.messages import BaseMessage
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langchain_core.runnables import RunnableConfig
from langgraph.graph import END, StateGraph
from langgraph.graph.message import add_messages
from langgraph.prebuilt.tool_node import ToolNode
# NOTE: There are inherent risks in giving models discretion
# to execute real-world actions. We must "opt-in" to these
# risks by setting allow_dangerous_request=True to use these tools.
# This can be dangerous for calling unwanted requests. Please make
# sure your custom OpenAPI spec (yaml) is safe and that permissions
# associated with the tools are narrowly-scoped.
ALLOW_DANGEROUS_REQUESTS = True
# Subset of spec for https://jsonplaceholder.typicode.com
api_spec = \"\"\"
openapi: 3.0.0
info:
title: JSONPlaceholder API
version: 1.0.0
servers:
- url: https://jsonplaceholder.typicode.com
paths:
/posts:
get:
summary: Get posts
parameters: &id001
- name: _limit
in: query
required: false
schema:
type: integer
example: 2
description: Limit the number of results
\"\"\"
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
toolkit = RequestsToolkit(
requests_wrapper=TextRequestsWrapper(headers={}), # no auth required
allow_dangerous_requests=ALLOW_DANGEROUS_REQUESTS,
)
tools = toolkit.get_tools()
api_request_chain = (
API_URL_PROMPT.partial(api_docs=api_spec)
| llm.bind_tools(tools, tool_choice="any")
)
class ChainState(TypedDict):
\"\"\"LangGraph state.\"\"\"
messages: Annotated[Sequence[BaseMessage], add_messages]
async def acall_request_chain(state: ChainState, config: RunnableConfig):
last_message = state["messages"][-1]
response = await api_request_chain.ainvoke(
{"question": last_message.content}, config
)
return {"messages": [response]}
async def acall_model(state: ChainState, config: RunnableConfig):
response = await llm.ainvoke(state["messages"], config)
return {"messages": [response]}
graph_builder = StateGraph(ChainState)
graph_builder.add_node("call_tool", acall_request_chain)
graph_builder.add_node("execute_tool", ToolNode(tools))
graph_builder.add_node("call_model", acall_model)
graph_builder.set_entry_point("call_tool")
graph_builder.add_edge("call_tool", "execute_tool")
graph_builder.add_edge("execute_tool", "call_model")
graph_builder.add_edge("call_model", END)
chain = graph_builder.compile()
.. code-block:: python
example_query = "Fetch the top two posts. What are their titles?"
events = chain.astream(
{"messages": [("user", example_query)]},
stream_mode="values",
)
async for event in events:
event["messages"][-1].pretty_print()
""" # noqa: E501
api_request_chain: LLMChain
api_answer_chain: LLMChain

View File

@ -25,7 +25,7 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
Example:
.. code-block:: python
from langchain.chains import StuffDocumentsChain, LLMChain
from langchain.chains import MapRerankDocumentsChain, LLMChain
from langchain_core.prompts import PromptTemplate
from langchain_community.llms import OpenAI
from langchain.output_parsers.regex import RegexParser
@ -39,7 +39,7 @@ class MapRerankDocumentsChain(BaseCombineDocumentsChain):
prompt_template = (
"Use the following context to tell me the chemical formula "
"for water. Output both your answer and a score of how confident "
"you are. Context: {content}"
"you are. Context: {context}"
)
output_parser = RegexParser(
regex=r"(.*?)\nScore: (.*)",

View File

@ -2,6 +2,7 @@
from typing import Any, Dict, List, Optional, Tuple
from langchain_core._api import deprecated
from langchain_core.callbacks import Callbacks
from langchain_core.documents import Document
from langchain_core.language_models import LanguageModelLike
@ -95,6 +96,15 @@ def create_stuff_documents_chain(
).with_config(run_name="stuff_documents_chain")
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. Use the `create_stuff_documents_chain` constructor "
"instead. See migration guide here: "
"https://python.langchain.com/v0.2/docs/versions/migrating_chains/stuff_docs_chain/" # noqa: E501
),
)
class StuffDocumentsChain(BaseCombineDocumentsChain):
"""Chain that combines documents by stuffing into context.

View File

@ -6,14 +6,14 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseLLMOutputParser
from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
from langchain_core.output_parsers.json import SimpleJsonOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.pydantic_v1 import root_validator
from langchain_core.runnables import Runnable
from langchain.chains.base import Chain
from langchain.chains.elasticsearch_database.prompts import ANSWER_PROMPT, DSL_PROMPT
from langchain.chains.llm import LLMChain
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
@ -35,9 +35,9 @@ class ElasticsearchDatabaseChain(Chain):
db_chain = ElasticsearchDatabaseChain.from_llm(OpenAI(), database)
"""
query_chain: LLMChain
query_chain: Runnable
"""Chain for creating the ES query."""
answer_chain: LLMChain
answer_chain: Runnable
"""Chain for answering the user question."""
database: Any
"""Elasticsearch database to connect to of type elasticsearch.Elasticsearch."""
@ -135,9 +135,9 @@ class ElasticsearchDatabaseChain(Chain):
intermediate_steps: List = []
try:
intermediate_steps.append(query_inputs) # input: es generation
es_cmd = self.query_chain.run(
callbacks=_run_manager.get_child(),
**query_inputs,
es_cmd = self.query_chain.invoke(
query_inputs,
config={"callbacks": _run_manager.get_child()},
)
_run_manager.on_text(es_cmd, color="green", verbose=self.verbose)
@ -154,9 +154,9 @@ class ElasticsearchDatabaseChain(Chain):
_run_manager.on_text("\nAnswer:", verbose=self.verbose)
answer_inputs: dict = {"data": result, "input": input_text}
intermediate_steps.append(answer_inputs) # input: final answer
final_result = self.answer_chain.run(
callbacks=_run_manager.get_child(),
**answer_inputs,
final_result = self.answer_chain.invoke(
answer_inputs,
config={"callbacks": _run_manager.get_child()},
)
intermediate_steps.append(final_result) # output: final answer
@ -183,7 +183,7 @@ class ElasticsearchDatabaseChain(Chain):
*,
query_prompt: Optional[BasePromptTemplate] = None,
answer_prompt: Optional[BasePromptTemplate] = None,
query_output_parser: Optional[BaseLLMOutputParser] = None,
query_output_parser: Optional[BaseOutputParser] = None,
**kwargs: Any,
) -> ElasticsearchDatabaseChain:
"""Convenience method to construct ElasticsearchDatabaseChain from an LLM.
@ -199,11 +199,9 @@ class ElasticsearchDatabaseChain(Chain):
"""
query_prompt = query_prompt or DSL_PROMPT
query_output_parser = query_output_parser or SimpleJsonOutputParser()
query_chain = LLMChain(
llm=llm, prompt=query_prompt, output_parser=query_output_parser
)
query_chain = query_prompt | llm | query_output_parser
answer_prompt = answer_prompt or ANSWER_PROMPT
answer_chain = LLMChain(llm=llm, prompt=answer_prompt)
answer_chain = answer_prompt | llm | StrOutputParser()
return cls(
query_chain=query_chain,
answer_chain=answer_chain,

View File

@ -1,11 +1,10 @@
from typing import List
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts.few_shot import FewShotPromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain.chains.llm import LLMChain
TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
@ -19,5 +18,5 @@ def generate_example(
input_variables=[],
example_prompt=prompt_template,
)
chain = LLMChain(llm=llm, prompt=prompt)
return chain.predict()
chain = prompt | llm | StrOutputParser()
return chain.invoke({})

View File

@ -5,6 +5,7 @@ from __future__ import annotations
import warnings
from typing import Any, Dict, List, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import PromptTemplate
@ -63,6 +64,15 @@ def _load_question_to_checked_assertions_chain(
return question_to_checked_assertions_chain
@deprecated(
since="0.2.13",
message=(
"See LangGraph guides for a variety of self-reflection and corrective "
"strategies for question-answering and other tasks: "
"https://langchain-ai.github.io/langgraph/tutorials/rag/langgraph_self_rag/"
),
removal="1.0",
)
class LLMCheckerChain(Chain):
"""Chain for question-answering with self-verification.

View File

@ -6,6 +6,7 @@ import warnings
from pathlib import Path
from typing import Any, Dict, List, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.prompt import PromptTemplate
@ -65,6 +66,15 @@ def _load_sequential_chain(
return chain
@deprecated(
since="0.2.13",
message=(
"See LangGraph guides for a variety of self-reflection and corrective "
"strategies for question-answering and other tasks: "
"https://langchain-ai.github.io/langgraph/tutorials/rag/langgraph_self_rag/"
),
removal="1.0",
)
class LLMSummarizationCheckerChain(Chain):
"""Chain for question-answering with self-verification.

View File

@ -7,6 +7,7 @@ from pathlib import Path
from typing import TYPE_CHECKING, Any, Union
import yaml
from langchain_core._api import deprecated
from langchain_core.prompts.loading import (
_load_output_parser,
load_prompt,
@ -649,6 +650,14 @@ type_to_loader_dict = {
}
@deprecated(
since="0.2.13",
message=(
"This function is deprecated and will be removed in langchain 1.0. "
"At that point chains must be imported from their respective modules."
),
removal="1.0",
)
def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
"""Load chain from Config Dict."""
if "_type" not in config:
@ -662,6 +671,14 @@ def load_chain_from_config(config: dict, **kwargs: Any) -> Chain:
return chain_loader(config, **kwargs)
@deprecated(
since="0.2.13",
message=(
"This function is deprecated and will be removed in langchain 1.0. "
"At that point chains must be imported from their respective modules."
),
removal="1.0",
)
def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain:
"""Unified method for loading a chain from LangChainHub or local fs."""
if isinstance(path, str) and path.startswith("lc://"):

View File

@ -8,6 +8,7 @@ from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun, Callbacks
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
@ -22,6 +23,16 @@ from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"Refer here for a recommended map-reduce implementation using langgraph: "
"https://langchain-ai.github.io/langgraph/how-tos/map-reduce/. See also "
"migration guide: "
"https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_reduce_chain/" # noqa: E501
),
)
class MapReduceChain(Chain):
"""Map-reduce chain."""

View File

@ -6,6 +6,7 @@ from langchain.chains.openai_functions.base import (
)
from langchain.chains.openai_functions.citation_fuzzy_match import (
create_citation_fuzzy_match_chain,
create_citation_fuzzy_match_runnable,
)
from langchain.chains.openai_functions.extraction import (
create_extraction_chain,
@ -32,6 +33,7 @@ __all__ = [
"create_extraction_chain_pydantic",
"create_extraction_chain",
"create_citation_fuzzy_match_chain",
"create_citation_fuzzy_match_runnable",
"create_qa_with_structure_chain",
"create_qa_with_sources_chain",
"create_structured_output_chain",

View File

@ -1,10 +1,12 @@
from typing import Iterator, List
from langchain_core.language_models import BaseLanguageModel
from langchain_core._api import deprecated
from langchain_core.language_models import BaseChatModel, BaseLanguageModel
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers.openai_functions import PydanticOutputFunctionsParser
from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.runnables import Runnable
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import get_llm_kwargs
@ -61,6 +63,57 @@ class QuestionAnswer(BaseModel):
)
def create_citation_fuzzy_match_runnable(llm: BaseChatModel) -> Runnable:
"""Create a citation fuzzy match Runnable.
Example usage:
.. code-block:: python
from langchain.chains import create_citation_fuzzy_match_runnable
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-4o-mini")
context = "Alice has blue eyes. Bob has brown eyes. Charlie has green eyes."
question = "What color are Bob's eyes?"
chain = create_citation_fuzzy_match_runnable(llm)
chain.invoke({"question": question, "context": context})
Args:
llm: Language model to use for the chain. Must implement bind_tools.
Returns:
Runnable that can be used to answer questions with citations.
"""
if llm.bind_tools is BaseChatModel.bind_tools:
raise ValueError(
"Language model must implement bind_tools to use this function."
)
prompt = ChatPromptTemplate(
[
SystemMessage(
"You are a world class algorithm to answer "
"questions with correct and exact citations."
),
HumanMessagePromptTemplate.from_template(
"Answer question using the following context."
"\n\n{context}"
"\n\nQuestion: {question}"
"\n\nTips: Make sure to cite your sources, "
"and use the exact words from the context."
),
]
)
return prompt | llm.with_structured_output(QuestionAnswer)
@deprecated(
since="0.2.13",
removal="1.0",
alternative="create_citation_fuzzy_match_runnable",
)
def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) -> LLMChain:
"""Create a citation fuzzy match chain.

View File

@ -6,6 +6,7 @@ from collections import defaultdict
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
import requests
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
@ -242,6 +243,15 @@ class SimpleRequestChain(Chain):
return {self.output_key: response}
@deprecated(
since="0.2.13",
message=(
"This function is deprecated and will be removed in langchain 1.0. "
"See API reference for replacement: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.openapi.get_openapi_chain.html" # noqa: E501
),
removal="1.0",
)
def get_openapi_chain(
spec: Union[OpenAPISpec, str],
llm: Optional[BaseLanguageModel] = None,
@ -255,13 +265,90 @@ def get_openapi_chain(
) -> SequentialChain:
"""Create a chain for querying an API from a OpenAPI spec.
Note: this class is deprecated. See below for a replacement implementation.
The benefits of this implementation are:
- Uses LLM tool calling features to encourage properly-formatted API requests;
- Includes async support.
.. code-block:: python
from typing import Any
from langchain.chains.openai_functions.openapi import openapi_spec_to_openai_fn
from langchain_community.utilities.openapi import OpenAPISpec
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
# Define API spec. Can be JSON or YAML
api_spec = \"\"\"
{
"openapi": "3.1.0",
"info": {
"title": "JSONPlaceholder API",
"version": "1.0.0"
},
"servers": [
{
"url": "https://jsonplaceholder.typicode.com"
}
],
"paths": {
"/posts": {
"get": {
"summary": "Get posts",
"parameters": [
{
"name": "_limit",
"in": "query",
"required": false,
"schema": {
"type": "integer",
"example": 2
},
"description": "Limit the number of results"
}
]
}
}
}
}
\"\"\"
parsed_spec = OpenAPISpec.from_text(api_spec)
openai_fns, call_api_fn = openapi_spec_to_openai_fn(parsed_spec)
tools = [
{"type": "function", "function": fn}
for fn in openai_fns
]
prompt = ChatPromptTemplate.from_template(
"Use the provided APIs to respond to this user query:\\n\\n{query}"
)
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0).bind_tools(tools)
def _execute_tool(message) -> Any:
if tool_calls := message.tool_calls:
tool_call = message.tool_calls[0]
response = call_api_fn(name=tool_call["name"], fn_args=tool_call["args"])
response.raise_for_status()
return response.json()
else:
return message.content
chain = prompt | llm | _execute_tool
.. code-block:: python
response = chain.invoke({"query": "Get me top two posts."})
Args:
spec: OpenAPISpec or url/file/text string corresponding to one.
llm: language model, should be an OpenAI function-calling model, e.g.
`ChatOpenAI(model="gpt-3.5-turbo-0613")`.
prompt: Main prompt template to use.
request_chain: Chain for taking the functions output and executing the request.
"""
""" # noqa: E501
try:
from langchain_community.utilities.openapi import OpenAPISpec
except ImportError as e:

View File

@ -1,5 +1,6 @@
from typing import Any, List, Optional, Type, Union, cast
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import BaseLLMOutputParser
@ -25,6 +26,15 @@ class AnswerWithSources(BaseModel):
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with structured responses: "
"https://python.langchain.com/v0.2/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
),
)
def create_qa_with_structure_chain(
llm: BaseLanguageModel,
schema: Union[dict, Type[BaseModel]],
@ -95,6 +105,15 @@ def create_qa_with_structure_chain(
return chain
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/v0.2/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
),
)
def create_qa_with_sources_chain(
llm: BaseLanguageModel, verbose: bool = False, **kwargs: Any
) -> LLMChain:

View File

@ -1,5 +1,6 @@
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers.openai_functions import (
JsonOutputFunctionsParser,
@ -29,6 +30,21 @@ Passage:
"""
@deprecated(
since="0.2.13",
message=(
"LangChain has introduced a method called `with_structured_output` that "
"is available on ChatModels capable of tool calling. "
"See API reference for this function for replacement: "
"<https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.tagging.create_tagging_chain.html> " # noqa: E501
"You can read more about `with_structured_output` here: "
"<https://python.langchain.com/v0.2/docs/how_to/structured_output/>. "
"If you notice other issues, please provide "
"feedback here: "
"<https://github.com/langchain-ai/langchain/discussions/18154>"
),
removal="1.0",
)
def create_tagging_chain(
schema: dict,
llm: BaseLanguageModel,
@ -38,6 +54,32 @@ def create_tagging_chain(
"""Create a chain that extracts information from a passage
based on a schema.
This function is deprecated. Please use `with_structured_output` instead.
See example usage below:
.. code-block:: python
from typing_extensions import Annotated, TypedDict
from langchain_anthropic import ChatAnthropic
class Joke(TypedDict):
\"\"\"Tagged joke.\"\"\"
setup: Annotated[str, ..., "The setup of the joke"]
punchline: Annotated[str, ..., "The punchline of the joke"]
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-haiku-20240307", temperature=0)
structured_llm = model.with_structured_output(Joke)
structured_llm.invoke(
"Why did the cat cross the road? To get to the other "
"side... and then lay down in the middle of it!"
)
Read more here: https://python.langchain.com/v0.2/docs/how_to/structured_output/
Args:
schema: The schema of the entities to extract.
llm: The language model to use.
@ -59,6 +101,21 @@ def create_tagging_chain(
return chain
@deprecated(
since="0.2.13",
message=(
"LangChain has introduced a method called `with_structured_output` that "
"is available on ChatModels capable of tool calling. "
"See API reference for this function for replacement: "
"<https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.tagging.create_tagging_chain_pydantic.html> " # noqa: E501
"You can read more about `with_structured_output` here: "
"<https://python.langchain.com/v0.2/docs/how_to/structured_output/>. "
"If you notice other issues, please provide "
"feedback here: "
"<https://github.com/langchain-ai/langchain/discussions/18154>"
),
removal="1.0",
)
def create_tagging_chain_pydantic(
pydantic_schema: Any,
llm: BaseLanguageModel,
@ -68,6 +125,30 @@ def create_tagging_chain_pydantic(
"""Create a chain that extracts information from a passage
based on a pydantic schema.
This function is deprecated. Please use `with_structured_output` instead.
See example usage below:
.. code-block:: python
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke)
structured_llm.invoke(
"Why did the cat cross the road? To get to the other "
"side... and then lay down in the middle of it!"
)
Read more here: https://python.langchain.com/v0.2/docs/how_to/structured_output/
Args:
pydantic_schema: The pydantic schema of the entities to extract.
llm: The language model to use.

View File

@ -7,6 +7,7 @@ import re
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Tuple
from langchain_core._api import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
@ -30,6 +31,15 @@ from langchain.chains.qa_with_sources.map_reduce_prompt import (
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/v0.2/docs/how_to/qa_sources/"
),
)
class BaseQAWithSourcesChain(Chain, ABC):
"""Question answering chain with sources over documents."""
@ -198,6 +208,15 @@ class BaseQAWithSourcesChain(Chain, ABC):
return result
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/v0.2/docs/how_to/qa_sources/"
),
)
class QAWithSourcesChain(BaseQAWithSourcesChain):
"""Question answering with sources over documents."""

View File

@ -4,6 +4,7 @@ from __future__ import annotations
from typing import Any, Mapping, Optional, Protocol
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
@ -151,6 +152,21 @@ def _load_refine_chain(
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/v0.2/docs/how_to/qa_sources/"
"\nSee also the following migration guides for replacements "
"based on `chain_type`:\n"
"stuff: https://python.langchain.com/v0.2/docs/versions/migrating_chains/stuff_docs_chain\n" # noqa: E501
"map_reduce: https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_reduce_chain\n" # noqa: E501
"refine: https://python.langchain.com/v0.2/docs/versions/migrating_chains/refine_chain\n" # noqa: E501
"map_rerank: https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_rerank_docs_chain\n" # noqa: E501
),
)
def load_qa_with_sources_chain(
llm: BaseLanguageModel,
chain_type: str = "stuff",

View File

@ -5,6 +5,7 @@ from __future__ import annotations
import json
from typing import Any, Callable, List, Optional, Sequence, Tuple, Union, cast
from langchain_core._api import deprecated
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser
@ -257,6 +258,11 @@ def get_query_constructor_prompt(
)
@deprecated(
since="0.2.13",
alternative="load_query_constructor_runnable",
removal="1.0",
)
def load_query_constructor_chain(
llm: BaseLanguageModel,
document_contents: str,

View File

@ -2,6 +2,7 @@
from typing import Any, Mapping, Optional, Protocol
from langchain_core._api import deprecated
from langchain_core.callbacks import BaseCallbackManager, Callbacks
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
@ -216,6 +217,20 @@ def _load_refine_chain(
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. See the following migration guides for replacements "
"based on `chain_type`:\n"
"stuff: https://python.langchain.com/v0.2/docs/versions/migrating_chains/stuff_docs_chain\n" # noqa: E501
"map_reduce: https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_reduce_chain\n" # noqa: E501
"refine: https://python.langchain.com/v0.2/docs/versions/migrating_chains/refine_chain\n" # noqa: E501
"map_rerank: https://python.langchain.com/v0.2/docs/versions/migrating_chains/map_rerank_docs_chain\n" # noqa: E501
"\nSee also guides on retrieval and question-answering here: "
"https://python.langchain.com/v0.2/docs/how_to/#qa-with-rag"
),
)
def load_qa_chain(
llm: BaseLanguageModel,
chain_type: str = "stuff",

View File

@ -28,6 +28,15 @@ from langchain.chains.question_answering import load_qa_chain
from langchain.chains.question_answering.stuff_prompt import PROMPT_SELECTOR
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. Use the `create_retrieval_chain` constructor "
"instead. See migration guide here: "
"https://python.langchain.com/v0.2/docs/versions/migrating_chains/retrieval_qa/"
),
)
class BaseRetrievalQA(Chain):
"""Base class for question-answering chains."""
@ -194,7 +203,15 @@ class BaseRetrievalQA(Chain):
return {self.output_key: answer}
@deprecated(since="0.1.17", alternative="create_retrieval_chain", removal="0.3.0")
@deprecated(
since="0.1.17",
removal="1.0",
message=(
"This class is deprecated. Use the `create_retrieval_chain` constructor "
"instead. See migration guide here: "
"https://python.langchain.com/v0.2/docs/versions/migrating_chains/retrieval_qa/"
),
)
class RetrievalQA(BaseRetrievalQA):
"""Chain for question-answering against an index.
@ -271,6 +288,15 @@ class RetrievalQA(BaseRetrievalQA):
return "retrieval_qa"
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This class is deprecated. Use the `create_retrieval_chain` constructor "
"instead. See migration guide here: "
"https://python.langchain.com/v0.2/docs/versions/migrating_chains/retrieval_qa/"
),
)
class VectorDBQA(BaseRetrievalQA):
"""Chain for question-answering against a vector database."""

View File

@ -4,6 +4,7 @@ from __future__ import annotations
from typing import Any, Dict, List, Optional, Type, cast
from langchain_core._api import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
@ -19,8 +20,82 @@ from langchain.chains import LLMChain
from langchain.chains.router.base import RouterChain
@deprecated(
since="0.2.12",
removal="1.0",
message=(
"Use RunnableLambda to select from multiple prompt templates. See example "
"in API reference: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.router.llm_router.LLMRouterChain.html" # noqa: E501
),
)
class LLMRouterChain(RouterChain):
"""A router chain that uses an LLM chain to perform routing."""
"""A router chain that uses an LLM chain to perform routing.
This class is deprecated. See below for a replacement, which offers several
benefits, including streaming and batch support.
Below is an example implementation:
.. code-block:: python
from operator import itemgetter
from typing import Literal
from typing_extensions import TypedDict
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-4o-mini")
prompt_1 = ChatPromptTemplate.from_messages(
[
("system", "You are an expert on animals."),
("human", "{query}"),
]
)
prompt_2 = ChatPromptTemplate.from_messages(
[
("system", "You are an expert on vegetables."),
("human", "{query}"),
]
)
chain_1 = prompt_1 | llm | StrOutputParser()
chain_2 = prompt_2 | llm | StrOutputParser()
route_system = "Route the user's query to either the animal or vegetable expert."
route_prompt = ChatPromptTemplate.from_messages(
[
("system", route_system),
("human", "{query}"),
]
)
class RouteQuery(TypedDict):
\"\"\"Route query to destination.\"\"\"
destination: Literal["animal", "vegetable"]
route_chain = (
route_prompt
| llm.with_structured_output(RouteQuery)
| itemgetter("destination")
)
chain = {
"destination": route_chain, # "animal" or "vegetable"
"query": lambda x: x["query"], # pass through input query
} | RunnableLambda(
# if animal, chain_1. otherwise, chain_2.
lambda x: chain_1 if x["destination"] == "animal" else chain_2,
)
chain.invoke({"query": "what color are carrots"})
""" # noqa: E501
llm_chain: LLMChain
"""LLM chain used to perform routing"""

View File

@ -4,6 +4,7 @@ from __future__ import annotations
from typing import Any, Dict, List, Optional
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import PromptTemplate
@ -15,8 +16,82 @@ from langchain.chains.router.llm_router import LLMRouterChain, RouterOutputParse
from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMPLATE
@deprecated(
since="0.2.12",
removal="1.0",
message=(
"Use RunnableLambda to select from multiple prompt templates. See example "
"in API reference: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.router.multi_prompt.MultiPromptChain.html" # noqa: E501
),
)
class MultiPromptChain(MultiRouteChain):
"""A multi-route chain that uses an LLM router chain to choose amongst prompts."""
"""A multi-route chain that uses an LLM router chain to choose amongst prompts.
This class is deprecated. See below for a replacement, which offers several
benefits, including streaming and batch support.
Below is an example implementation:
.. code-block:: python
from operator import itemgetter
from typing import Literal
from typing_extensions import TypedDict
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-4o-mini")
prompt_1 = ChatPromptTemplate.from_messages(
[
("system", "You are an expert on animals."),
("human", "{query}"),
]
)
prompt_2 = ChatPromptTemplate.from_messages(
[
("system", "You are an expert on vegetables."),
("human", "{query}"),
]
)
chain_1 = prompt_1 | llm | StrOutputParser()
chain_2 = prompt_2 | llm | StrOutputParser()
route_system = "Route the user's query to either the animal or vegetable expert."
route_prompt = ChatPromptTemplate.from_messages(
[
("system", route_system),
("human", "{query}"),
]
)
class RouteQuery(TypedDict):
\"\"\"Route query to destination.\"\"\"
destination: Literal["animal", "vegetable"]
route_chain = (
route_prompt
| llm.with_structured_output(RouteQuery)
| itemgetter("destination")
)
chain = {
"destination": route_chain, # "animal" or "vegetable"
"query": lambda x: x["query"], # pass through input query
} | RunnableLambda(
# if animal, chain_1. otherwise, chain_2.
lambda x: chain_1 if x["destination"] == "animal" else chain_2,
)
chain.invoke({"query": "what color are carrots"})
""" # noqa: E501
@property
def output_keys(self) -> List[str]:

View File

@ -2,6 +2,7 @@ from __future__ import annotations
from typing import Any, Dict, List, Type
from langchain_core._api import deprecated
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, SystemMessage, get_buffer_string
@ -14,6 +15,14 @@ from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import SUMMARY_PROMPT
@deprecated(
since="0.2.12",
removal="1.0",
message=(
"Refer here for how to incorporate summaries of conversation history: "
"https://langchain-ai.github.io/langgraph/how-tos/memory/add-summary-conversation-history/" # noqa: E501
),
)
class SummarizerMixin(BaseModel):
"""Mixin for summarizer."""

View File

@ -1,25 +1,38 @@
import os
from pathlib import Path
import json
import pytest
from langchain.chains.openai_functions.openapi import get_openapi_chain
def test_openai_opeanapi() -> None:
chain = get_openapi_chain(
"https://www.klarna.com/us/shopping/public/openai/v0/api-docs/"
)
output = chain.run("What are some options for a men's large blue button down shirt")
assert isinstance(output, dict)
api_spec = {
"openapi": "3.0.0",
"info": {"title": "JSONPlaceholder API", "version": "1.0.0"},
"servers": [{"url": "https://jsonplaceholder.typicode.com"}],
"paths": {
"/posts": {
"get": {
"summary": "Get posts",
"parameters": [
{
"name": "_limit",
"in": "query",
"required": False,
"schema": {"type": "integer", "example": 2},
"description": "Limit the number of results",
},
],
}
}
},
}
def test_openai_opeanapi_headers() -> None:
BRANDFETCH_API_KEY = os.environ.get("BRANDFETCH_API_KEY")
headers = {"Authorization": f"Bearer {BRANDFETCH_API_KEY}"}
file_path = str(
Path(__file__).parents[2] / "examples/brandfetch-brandfetch-2.0.0-resolved.json"
)
chain = get_openapi_chain(file_path, headers=headers)
output = chain.run("I want to know about nike.comgg")
@pytest.mark.requires("openapi_pydantic")
@pytest.mark.requires("langchain_openai")
def test_openai_openapi_chain() -> None:
from langchain_openai import ChatOpenAI
assert isinstance(output, str)
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
chain = get_openapi_chain(json.dumps(api_spec), llm)
output = chain.invoke({"query": "Fetch the top two posts."})
assert len(output["response"]) == 2

View File

@ -49,6 +49,7 @@ EXPECTED_ALL = [
"VectorDBQA",
"VectorDBQAWithSourcesChain",
"create_citation_fuzzy_match_chain",
"create_citation_fuzzy_match_runnable",
"create_extraction_chain",
"create_extraction_chain_pydantic",
"create_qa_with_sources_chain",