From 7de6a1b78e9e86ebe7ee99c3194cfd97022ce789 Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Tue, 8 Aug 2023 22:39:08 -0700 Subject: [PATCH] parent document retriever (#8941) --- .../parent_document_retriever.ipynb | 440 ++++++++++++++++++ .../langchain/retrievers/__init__.py | 2 + .../retrievers/parent_document_retriever.py | 139 ++++++ 3 files changed, 581 insertions(+) create mode 100644 docs/extras/modules/data_connection/retrievers/parent_document_retriever.ipynb create mode 100644 libs/langchain/langchain/retrievers/parent_document_retriever.py diff --git a/docs/extras/modules/data_connection/retrievers/parent_document_retriever.ipynb b/docs/extras/modules/data_connection/retrievers/parent_document_retriever.ipynb new file mode 100644 index 0000000000..dba7e728a8 --- /dev/null +++ b/docs/extras/modules/data_connection/retrievers/parent_document_retriever.ipynb @@ -0,0 +1,440 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "34883374", + "metadata": {}, + "source": [ + "# Parent Document Retriever\n", + "\n", + "When splitting documents for retrieval, there are often conflicting desires:\n", + "\n", + "1. You may want to have small documents, so that their embeddings can most\n", + " accurately reflect their meaning. If too long, then the embeddings can\n", + " lose meaning.\n", + "2. You want to have long enough documents that the context of each chunk is\n", + " retained.\n", + "\n", + "The ParentDocumentRetriever strikes that balance by splitting and storing\n", + "small chunks of data. During retrieval, it first fetches the small chunks\n", + "but then looks up the parent ids for those chunks and returns those larger\n", + "documents.\n", + "\n", + "Note that \"parent document\" refers to the document that a small chunk\n", + "originated from. This can either be the whole raw document OR a larger\n", + "chunk." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "8b6e74b2", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.retrievers import ParentDocumentRetriever" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "1d17af96", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.vectorstores import Chroma\n", + "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", + "from langchain.storage import InMemoryStore\n", + "from langchain.document_loaders import TextLoader" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "604ff981", + "metadata": {}, + "outputs": [], + "source": [ + "loaders = [\n", + " TextLoader('../../paul_graham_essay.txt'),\n", + " TextLoader('../../state_of_the_union.txt'),\n", + "]\n", + "docs = []\n", + "for l in loaders:\n", + " docs.extend(l.load())" + ] + }, + { + "cell_type": "markdown", + "id": "d3943f72", + "metadata": {}, + "source": [ + "## Retrieving Full Documents\n", + "\n", + "In this mode, we want to retrieve the full documents. Therefor, we only specify a child splitter." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "1a8b2e5f", + "metadata": {}, + "outputs": [], + "source": [ + "# This text splitter is used to create the child documents\n", + "\n", + "child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)\n", + "# The vectorstore to use to index the child chunks\n", + "vectorstore = Chroma(\n", + " collection_name=\"full_documents\",\n", + " embedding_function=OpenAIEmbeddings()\n", + ")\n", + "# The storage layer for the parent documents\n", + "store = InMemoryStore()\n", + "retriever = ParentDocumentRetriever(\n", + " vectorstore=vectorstore, \n", + " docstore=store, \n", + " child_splitter=child_splitter,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "2b107935", + "metadata": {}, + "outputs": [], + "source": [ + "retriever.add_documents(docs)" + ] + }, + { + "cell_type": "markdown", + "id": "d05b97b7", + "metadata": {}, + "source": [ + "This should yield two keys, because we added two documents." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "30e3812b", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['05fe8d8a-bf60-4f87-b576-4351b23df266',\n", + " '571cc9e5-9ef7-4f6c-b800-835c83a1858b']" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "list(store.yield_keys())" + ] + }, + { + "cell_type": "markdown", + "id": "f895d62b", + "metadata": {}, + "source": [ + "Let's now call the vectorstore search functionality - we should see that it returns small chunks (since we're storing the small chunks" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "b261c02c", + "metadata": {}, + "outputs": [], + "source": [ + "sub_docs = vectorstore.similarity_search(\"justice breyer\")" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "5108222f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", + "\n", + "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.\n" + ] + } + ], + "source": [ + "print(sub_docs[0].page_content)" + ] + }, + { + "cell_type": "markdown", + "id": "bda8ed5a", + "metadata": {}, + "source": [ + "Let's now retrieve from the overall retriever. This should return large documents - since it returns the documents where the smaller chunks are located." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "419a91c4", + "metadata": {}, + "outputs": [], + "source": [ + "retrieved_docs = retriever.get_relevant_documents(\"justice breyer\")" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "cf10d250", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "38539" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(retrieved_docs[0].page_content)" + ] + }, + { + "cell_type": "markdown", + "id": "14f813a5", + "metadata": {}, + "source": [ + "## Retrieving Larger Chunks\n", + "\n", + "Sometimes, the full documents can be too big to want to retrieve them as is. In that case, what we really want to do is to first split the raw documents into larger chunks, and then split it into smaller chunks. We then index the smaller chunks, but on retrieval we retrieve the larger chunks (but still not the full documents)." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "b6f9a4f0", + "metadata": {}, + "outputs": [], + "source": [ + "# This text splitter is used to create the parent documents\n", + "parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)\n", + "# This text splitter is used to create the child documents\n", + "# It should create documents smaller than the parent\n", + "child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)\n", + "# The vectorstore to use to index the child chunks\n", + "vectorstore = Chroma(collection_name=\"split_parents\", embedding_function=OpenAIEmbeddings())\n", + "# The storage layer for the parent documents\n", + "store = InMemoryStore()" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "19478ff3", + "metadata": {}, + "outputs": [], + "source": [ + "retriever = ParentDocumentRetriever(\n", + " vectorstore=vectorstore, \n", + " docstore=store, \n", + " child_splitter=child_splitter,\n", + " parent_splitter=parent_splitter,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "fe16e620", + "metadata": {}, + "outputs": [], + "source": [ + "retriever.add_documents(docs)" + ] + }, + { + "cell_type": "markdown", + "id": "64ad3c8c", + "metadata": {}, + "source": [ + "We can see that there are much more than two documents now - these are the larger chunks" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "24d81886", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "66" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(list(store.yield_keys()))" + ] + }, + { + "cell_type": "markdown", + "id": "baaef673", + "metadata": {}, + "source": [ + "Let's make sure the underlying vectorstore still retrieves the small chunks." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "b1c859de", + "metadata": {}, + "outputs": [], + "source": [ + "sub_docs = vectorstore.similarity_search(\"justice breyer\")" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "6fffa2eb", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", + "\n", + "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.\n" + ] + } + ], + "source": [ + "print(sub_docs[0].page_content)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "3a3202df", + "metadata": {}, + "outputs": [], + "source": [ + "retrieved_docs = retriever.get_relevant_documents(\"justice breyer\")" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "684fdb2c", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "1849" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(retrieved_docs[0].page_content)" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "9f17f662", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \n", + "\n", + "We cannot let this happen. \n", + "\n", + "Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", + "\n", + "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", + "\n", + "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", + "\n", + "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. \n", + "\n", + "A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n", + "\n", + "And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \n", + "\n", + "We can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. \n", + "\n", + "We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \n", + "\n", + "We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \n", + "\n", + "We’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.\n" + ] + } + ], + "source": [ + "print(retrieved_docs[0].page_content)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "facfdacb", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/libs/langchain/langchain/retrievers/__init__.py b/libs/langchain/langchain/retrievers/__init__.py index f0922460a1..1e5497c93c 100644 --- a/libs/langchain/langchain/retrievers/__init__.py +++ b/libs/langchain/langchain/retrievers/__init__.py @@ -40,6 +40,7 @@ from langchain.retrievers.merger_retriever import MergerRetriever from langchain.retrievers.metal import MetalRetriever from langchain.retrievers.milvus import MilvusRetriever from langchain.retrievers.multi_query import MultiQueryRetriever +from langchain.retrievers.parent_document_retriever import ParentDocumentRetriever from langchain.retrievers.pinecone_hybrid_search import PineconeHybridSearchRetriever from langchain.retrievers.pubmed import PubMedRetriever from langchain.retrievers.re_phraser import RePhraseQueryRetriever @@ -90,4 +91,5 @@ __all__ = [ "RePhraseQueryRetriever", "WebResearchRetriever", "EnsembleRetriever", + "ParentDocumentRetriever", ] diff --git a/libs/langchain/langchain/retrievers/parent_document_retriever.py b/libs/langchain/langchain/retrievers/parent_document_retriever.py new file mode 100644 index 0000000000..ce69067134 --- /dev/null +++ b/libs/langchain/langchain/retrievers/parent_document_retriever.py @@ -0,0 +1,139 @@ +import uuid +from typing import Any, Dict, List, Optional + +from langchain.callbacks.base import Callbacks +from langchain.schema.document import Document +from langchain.schema.retriever import BaseRetriever +from langchain.schema.storage import BaseStore +from langchain.text_splitter import TextSplitter +from langchain.vectorstores.base import VectorStore + + +class ParentDocumentRetriever(BaseRetriever): + """Fetches small chunks, then fetches their parent documents. + + When splitting documents for retrieval, there are often conflicting desires: + + 1. You may want to have small documents, so that their embeddings can most + accurately reflect their meaning. If too long, then the embeddings can + lose meaning. + 2. You want to have long enough documents that the context of each chunk is + retained. + + The ParentDocumentRetriever strikes that balance by splitting and storing + small chunks of data. During retrieval, it first fetches the small chunks + but then looks up the parent ids for those chunks and returns those larger + documents. + + Note that "parent document" refers to the document that a small chunk + originated from. This can either be the whole raw document OR a larger + chunk. + + Examples: + ... code-block:: python + + # Imports + from langchain.vectorstores import Chroma + from langchain.embeddings import OpenAIEmbeddings + from langchain.text_splitter import RecursiveCharacterTextSplitter + from langchain.storage import InMemoryStore + + # This text splitter is used to create the parent documents + parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000) + # This text splitter is used to create the child documents + # It should create documents smaller than the parent + child_splitter = RecursiveCharacterTextSplitter(chunk_size=400) + # The vectorstore to use to index the child chunks + vectorstore = Chroma(embedding_function=OpenAIEmbeddings()) + # The storage layer for the parent documents + store = InMemoryStore() + + # Initialize the retriever + retriever = ParentDocumentRetriever( + vectorstore=vectorstore, + docstore=store, + child_splitter=child_splitter, + parent_splitter=parent_splitter, + ) + """ + + vectorstore: VectorStore + """The underlying vectorstore to use to store small chunks + and their embedding vectors""" + docstore: BaseStore[str, Document] + """The storage layer for the parent documents""" + child_splitter: TextSplitter + """The text splitter to use to create child documents.""" + id_key: str = "doc_id" + """The key to use to track the parent id. This will be stored in the + metadata of child documents.""" + parent_splitter: Optional[TextSplitter] = None + """The text splitter to use to create parent documents. + If none, then the parent documents will be the raw documents passed in.""" + + def get_relevant_documents( + self, + query: str, + *, + callbacks: Callbacks = None, + tags: Optional[List[str]] = None, + metadata: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ) -> List[Document]: + sub_docs = self.vectorstore.similarity_search(query) + # We do this to maintain the order of the ids that are returned + ids = [] + for d in sub_docs: + if d.metadata[self.id_key] not in ids: + ids.append(d.metadata[self.id_key]) + docs = self.docstore.mget(ids) + return [d for d in docs if d is not None] + + def add_documents( + self, + documents: List[Document], + ids: Optional[List[str]], + add_to_docstore: bool = True, + ) -> None: + """Adds documents to the docstore and vectorstores. + + Args: + documents: List of documents to add + ids: Optional list of ids for documents. If provided should be the same + length as the list of documents. Can provided if parent documents + are already in the document store and you don't want to re-add + to the docstore. If not provided, random UUIDs will be used as + ids. + add_to_docstore: Boolean of whether to add documents to docstore. + This can be false if and only if `ids` are provided. You may want + to set this to False if the documents are already in the docstore + and you don't want to re-add them. + """ + if self.parent_splitter is not None: + documents = self.parent_splitter.split_documents(documents) + if ids is None: + doc_ids = [str(uuid.uuid4()) for _ in documents] + if not add_to_docstore: + raise ValueError( + "If ids are not passed in, `add_to_docstore` MUST be True" + ) + else: + if len(documents) != len(ids): + raise ValueError( + "Got uneven list of documents and ids. " + "If `ids` is provided, should be same length as `documents`." + ) + doc_ids = ids + + docs = [] + full_docs = [] + for i, doc in enumerate(documents): + _id = doc_ids[i] + sub_docs = self.child_splitter.split_documents([doc]) + for _doc in sub_docs: + _doc.metadata[self.id_key] = _id + docs.extend(sub_docs) + full_docs.append((_id, doc)) + self.vectorstore.add_documents(docs) + if add_to_docstore: + self.docstore.mset(full_docs)