diff --git a/docs/ecosystem/deeplake.md b/docs/ecosystem/deeplake.md new file mode 100644 index 0000000000..ca51051d48 --- /dev/null +++ b/docs/ecosystem/deeplake.md @@ -0,0 +1,25 @@ +# Deep Lake + +This page covers how to use the Deep Lake ecosystem within LangChain. +It is broken into two parts: installation and setup, and then references to specific Deep Lake wrappers. For more information. + +1. Here is [whitepaper](https://www.deeplake.ai/whitepaper) and [academic paper](https://arxiv.org/pdf/2209.10785.pdf) for Deep Lake + +2. Here is a set of additional resources available for review: [Deep Lake](https://github.com/activeloopai/deeplake), [Getting Started](https://docs.activeloop.ai/getting-started) and [Tutorials](https://docs.activeloop.ai/hub-tutorials) + +## Installation and Setup +- Install the Python package with `pip install deeplake` + +## Wrappers + +### VectorStore + +There exists a wrapper around Deep Lake, a data lake for Deep Learning applications, allowing you to use it as a vectorstore (for now), whether for semantic search or example selection. + +To import this vectorstore: +```python +from langchain.vectorstores import DeepLake +``` + + +For a more detailed walkthrough of the Deep Lake wrapper, see [this notebook](../modules/indexes/vectorstore_examples/deeplake.ipynb) diff --git a/docs/modules/indexes/how_to_guides.rst b/docs/modules/indexes/how_to_guides.rst index a68c991acb..de82bc0ed6 100644 --- a/docs/modules/indexes/how_to_guides.rst +++ b/docs/modules/indexes/how_to_guides.rst @@ -36,6 +36,8 @@ In the below guides, we cover different types of vectorstores and how to use the `Chroma <./vectorstore_examples/chroma.html>`_: A walkthrough of how to use the Chroma vectorstore wrapper. +`DeepLake <./vectorstore_examples/deeplake.html>`_: A walkthrough of how to use the Deep Lake, data lake, wrapper. + `FAISS <./vectorstore_examples/faiss.html>`_: A walkthrough of how to use the FAISS vectorstore wrapper. `Elastic Search <./vectorstore_examples/elasticsearch.html>`_: A walkthrough of how to use the ElasticSearch wrapper. diff --git a/docs/modules/indexes/vectorstore_examples/deeplake.ipynb b/docs/modules/indexes/vectorstore_examples/deeplake.ipynb new file mode 100644 index 0000000000..3db7619c46 --- /dev/null +++ b/docs/modules/indexes/vectorstore_examples/deeplake.ipynb @@ -0,0 +1,234 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Deep Lake\n", + "\n", + "This notebook showcases basic functionality related to Deep Lake. While Deep Lake can store embeddings, it is capable of storing any type of data. It is a fully fledged serverless data lake with version control, query engine and streaming dataloader to deep learning frameworks. \n", + "\n", + "For more information, please see the Deep Lake [documentation](docs.activeloop.ai) or [api reference](docs.deeplake.ai)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain.vectorstores import DeepLake\n", + "from langchain.document_loaders import TextLoader" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.document_loaders import TextLoader\n", + "loader = TextLoader('../../state_of_the_union.txt')\n", + "documents = loader.load()\n", + "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", + "docs = text_splitter.split_documents(documents)\n", + "\n", + "embeddings = OpenAIEmbeddings()" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Evaluating ingest: 100%|██████████| 41/41 [00:00<00:00\n" + ] + } + ], + "source": [ + "db = DeepLake.from_documents(docs, embeddings)\n", + "\n", + "query = \"What did the president say about Ketanji Brown Jackson\"\n", + "docs = db.similarity_search(query)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \n", + "\n", + "We cannot let this happen. \n", + "\n", + "Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", + "\n", + "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", + "\n", + "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", + "\n", + "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n" + ] + } + ], + "source": [ + "print(docs[0].page_content)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Deep Lake datasets on cloud or local\n", + "By default deep lake datasets are stored in memory, in case you want to persist locally or to any object storage you can simply provide path to the dataset. You can retrieve token from [app.activeloop.ai](https://app.activeloop.ai/)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/bin/bash: -c: line 0: syntax error near unexpected token `newline'\n", + "/bin/bash: -c: line 0: `activeloop login -t '\n" + ] + } + ], + "source": [ + "!activeloop login -t " + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Evaluating ingest: 100%|██████████| 4/4 [00:00<00:00\n" + ] + } + ], + "source": [ + "# Embed and store the texts\n", + "dataset_path = \"hub://{username}/{dataset_name}\" # could be also ./local/path (much faster locally), s3://bucket/path/to/dataset, gcs://, etc.\n", + "\n", + "embedding = OpenAIEmbeddings()\n", + "vectordb = DeepLake.from_documents(documents=docs, embedding=embedding, dataset_path=dataset_path)" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \n", + "\n", + "We cannot let this happen. \n", + "\n", + "Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", + "\n", + "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", + "\n", + "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", + "\n", + "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n" + ] + } + ], + "source": [ + "query = \"What did the president say about Ketanji Brown Jackson\"\n", + "docs = db.similarity_search(query)\n", + "print(docs[0].page_content)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Dataset(path='./local/path', tensors=['embedding', 'ids', 'metadata', 'text'])\n", + "\n", + " tensor htype shape dtype compression\n", + " ------- ------- ------- ------- ------- \n", + " embedding generic (4, 1536) None None \n", + " ids text (4, 1) str None \n", + " metadata json (4, 1) str None \n", + " text text (4, 1) str None \n" + ] + } + ], + "source": [ + "vectordb.ds.summary()" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "embeddings = vectordb.ds.embedding.numpy()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "base", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.7" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "7b14174bb6f9d4680b62ac2a6390e1ce94fbfabf172a10844870451d539c58d6" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/reference/integrations.md b/docs/reference/integrations.md index 8d7e6b2d7c..2691358b72 100644 --- a/docs/reference/integrations.md +++ b/docs/reference/integrations.md @@ -50,6 +50,8 @@ The following use cases require specific installs and api keys: - _OpenSearch_: - Install requirements with `pip install opensearch-py` - If you want to set up OpenSearch on your local, [here](https://opensearch.org/docs/latest/) +- _DeepLake_: + - Install requirements with `pip install deeplake` If you are using the `NLTKTextSplitter` or the `SpacyTextSplitter`, you will also need to install the appropriate models. For example, if you want to use the `SpacyTextSplitter`, you will need to install the `en_core_web_sm` model with `python -m spacy download en_core_web_sm`. Similarly, if you want to use the `NLTKTextSplitter`, you will need to install the `punkt` model with `python -m nltk.downloader punkt`. diff --git a/langchain/vectorstores/__init__.py b/langchain/vectorstores/__init__.py index d250980ce2..4f86ab6e1b 100644 --- a/langchain/vectorstores/__init__.py +++ b/langchain/vectorstores/__init__.py @@ -2,6 +2,7 @@ from langchain.vectorstores.atlas import AtlasDB from langchain.vectorstores.base import VectorStore from langchain.vectorstores.chroma import Chroma +from langchain.vectorstores.deeplake import DeepLake from langchain.vectorstores.elastic_vector_search import ElasticVectorSearch from langchain.vectorstores.faiss import FAISS from langchain.vectorstores.milvus import Milvus @@ -21,4 +22,5 @@ __all__ = [ "Chroma", "OpenSearchVectorSearch", "AtlasDB", + "DeepLake", ] diff --git a/langchain/vectorstores/deeplake.py b/langchain/vectorstores/deeplake.py new file mode 100644 index 0000000000..c003b7df42 --- /dev/null +++ b/langchain/vectorstores/deeplake.py @@ -0,0 +1,211 @@ +"""Wrapper around Activeloop Deep Lake.""" +from __future__ import annotations + +import logging +import uuid +from typing import Any, Iterable, List, Optional, Sequence + +import numpy as np + +from langchain.docstore.document import Document +from langchain.embeddings.base import Embeddings +from langchain.vectorstores.base import VectorStore + +logger = logging.getLogger() + + +def L2_search( + query_embedding: np.ndarray, data_vectors: np.ndarray, k: int = 4 +) -> list: + """naive L2 search for nearest neighbors""" + # Calculate the L2 distance between the query_vector and all data_vectors + distances = np.linalg.norm(data_vectors - query_embedding, axis=1) + + # Sort the distances and return the indices of the k nearest vectors + nearest_indices = np.argsort(distances)[:k] + return nearest_indices.tolist() + + +class DeepLake(VectorStore): + """Wrapper around Deep Lake, a data lake for deep learning applications. + + It not only stores embeddings, but also the original data and queries with + version control automatically enabled. + + It is more than just a vector store. You can use the dataset to fine-tune + your own LLM models or use it for other downstream tasks. + + We implement naive similiarity search, but it can be extended with Tensor + Query Language (TQL for production use cases) over billion rows. + + To use, you should have the ``deeplake`` python package installed. + + Example: + .. code-block:: python + + from langchain.vectorstores import DeepLake + from langchain.embeddings.openai import OpenAIEmbeddings + + embeddings = OpenAIEmbeddings() + vectorstore = DeepLake("langchain_store", embeddings.embed_query) + """ + + _LANGCHAIN_DEFAULT_DEEPLAKE_PATH = "mem://langchain" + + def __init__( + self, + dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH, + token: Optional[str] = None, + embedding_function: Optional[Embeddings] = None, + ) -> None: + """Initialize with Deep Lake client.""" + + try: + import deeplake + except ImportError: + raise ValueError( + "Could not import deeplake python package. " + "Please it install it with `pip install deeplake`." + ) + self._deeplake = deeplake + + if deeplake.exists(dataset_path, token=token): + self.ds = deeplake.load(dataset_path, token=token) + logger.warning( + f"Deep Lake Dataset in {dataset_path} already exists, " + f"loading from the storage" + ) + self.ds.summary() + else: + self.ds = deeplake.empty(dataset_path, token=token, overwrite=True) + with self.ds: + self.ds.create_tensor("text", htype="text") + self.ds.create_tensor("metadata", htype="json") + self.ds.create_tensor("embedding", htype="generic") + self.ds.create_tensor("ids", htype="text") + + self._embedding_function = embedding_function + + def add_texts( + self, + texts: Iterable[str], + metadatas: Optional[List[dict]] = None, + ids: Optional[List[str]] = None, + **kwargs: Any, + ) -> List[str]: + """Run more texts through the embeddings and add to the vectorstore. + + Args: + texts (Iterable[str]): Texts to add to the vectorstore. + metadatas (Optional[List[dict]], optional): Optional list of metadatas. + ids (Optional[List[str]], optional): Optional list of IDs. + + Returns: + List[str]: List of IDs of the added texts. + """ + + if ids is None: + ids = [str(uuid.uuid1()) for _ in texts] + + text_list = list(texts) + + if self._embedding_function is None: + embeddings: Sequence[Optional[List[float]]] = [None] * len(text_list) + else: + embeddings = self._embedding_function.embed_documents(text_list) + + if metadatas is None: + metadatas_to_use: Sequence[Optional[dict]] = [None] * len(text_list) + else: + metadatas_to_use = metadatas + + elements = zip(text_list, embeddings, metadatas_to_use, ids) + + @self._deeplake.compute + def ingest(sample_in: list, sample_out: list) -> None: + s = { + "text": sample_in[0], + "embedding": sample_in[1], + "metadata": sample_in[2], + "ids": sample_in[3], + } + sample_out.append(s) + + ingest().eval(list(elements), self.ds) + self.ds.commit() + + return ids + + def similarity_search( + self, query: str, k: int = 4, **kwargs: Any + ) -> List[Document]: + """Return docs most similar to query.""" + if self._embedding_function is None: + self.ds.summary() + ds_view = self.ds.filter(lambda x: query in x["text"].data()["value"]) + else: + query_emb = np.array(self._embedding_function.embed_query(query)) + embeddings = self.ds.embedding.numpy() + indices = L2_search(query_emb, embeddings, k=k) + ds_view = self.ds[indices] + + docs = [ + Document( + page_content=el["text"].data()["value"], + metadata=el["metadata"].data()["value"], + ) + for el in ds_view + ] + return docs + + @classmethod + def from_texts( + cls, + texts: List[str], + embedding: Optional[Embeddings] = None, + metadatas: Optional[List[dict]] = None, + ids: Optional[List[str]] = None, + dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH, + **kwargs: Any, + ) -> DeepLake: + """Create a Deep Lake dataset from a raw documents. + + If a persist_directory is specified, the collection will be persisted there. + Otherwise, the data will be ephemeral in-memory. + + Args: + path (str, pathlib.Path): - The full path to the dataset. Can be: + - a Deep Lake cloud path of the form ``hub://username/datasetname``. + To write to Deep Lake cloud datasets, + ensure that you are logged in to Deep Lake + (use 'activeloop login' from command line) + - an s3 path of the form ``s3://bucketname/path/to/dataset``. + Credentials are required in either the environment or + passed to the creds argument. + - a local file system path of the form ``./path/to/dataset`` or + ``~/path/to/dataset`` or ``path/to/dataset``. + - a memory path of the form ``mem://path/to/dataset`` which doesn't + save the dataset but keeps it in memory instead. + Should be used only for testing as it does not persist. + documents (List[Document]): List of documents to add. + embedding (Optional[Embeddings]): Embedding function. Defaults to None. + metadatas (Optional[List[dict]]): List of metadatas. Defaults to None. + ids (Optional[List[str]]): List of document IDs. Defaults to None. + + Returns: + DeepLake: Deep Lake dataset. + """ + deeplake_dataset = cls( + dataset_path=dataset_path, + embedding_function=embedding, + ) + deeplake_dataset.add_texts(texts=texts, metadatas=metadatas, ids=ids) + return deeplake_dataset + + def delete_dataset(self) -> None: + """Delete the collection.""" + self.ds.delete() + + def persist(self) -> None: + """Persist the collection.""" + self.ds.flush() diff --git a/poetry.lock b/poetry.lock index 1eb608e5ea..18aa9dd091 100644 --- a/poetry.lock +++ b/poetry.lock @@ -602,6 +602,46 @@ lxml = ">=4.9,<5.0" pycryptodomex = ">=3.8,<4.0" urllib3 = ">=1.25.3,<3" +[[package]] +name = "boto3" +version = "1.26.79" +description = "The AWS SDK for Python" +category = "main" +optional = true +python-versions = ">= 3.7" +files = [ + {file = "boto3-1.26.79-py3-none-any.whl", hash = "sha256:049de631cc03726a14b8eb24ac9ec2a48b0624197796f36166da809fdc9b9a7f"}, + {file = "boto3-1.26.79.tar.gz", hash = "sha256:73d7bd1f16118ef0dfe936e0420cd76b02d1aedb75330ebda51168458ab752ac"}, +] + +[package.dependencies] +botocore = ">=1.29.79,<1.30.0" +jmespath = ">=0.7.1,<2.0.0" +s3transfer = ">=0.6.0,<0.7.0" + +[package.extras] +crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] + +[[package]] +name = "botocore" +version = "1.29.79" +description = "Low-level, data-driven core of boto 3." +category = "main" +optional = true +python-versions = ">= 3.7" +files = [ + {file = "botocore-1.29.79-py3-none-any.whl", hash = "sha256:5f254f019e8641f8b2ba6dddc1f7541e8c6d25d976802392710b2fc4bac925b1"}, + {file = "botocore-1.29.79.tar.gz", hash = "sha256:c7ded44062bed3b928944cfb09e1578ed3fed0e4c98de4f233f3c2056a8d491e"}, +] + +[package.dependencies] +jmespath = ">=0.7.1,<2.0.0" +python-dateutil = ">=2.1,<3.0.0" +urllib3 = ">=1.25.4,<1.27" + +[package.extras] +crt = ["awscrt (==0.16.9)"] + [[package]] name = "cachetools" version = "5.3.0" @@ -1016,6 +1056,42 @@ files = [ {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, ] +[[package]] +name = "deeplake" +version = "3.2.9" +description = "Activeloop Deep Lake" +category = "main" +optional = true +python-versions = "*" +files = [ + {file = "deeplake-3.2.9.tar.gz", hash = "sha256:c1c4de39ea4c3e9cf822daf882bcd480638c042f2d99e0a50325588f15e41c6f"}, +] + +[package.dependencies] +boto3 = "*" +click = "*" +hub = ">=2.8.7" +humbug = ">=0.2.6" +numcodecs = "*" +numpy = "*" +pathos = "*" +pillow = "*" +pyjwt = "*" +tqdm = "*" + +[package.extras] +all = ["IPython", "av (>=8.1.0)", "flask", "google-api-python-client (>=2.31.0,<2.32.0)", "google-auth (>=2.0.1,<2.1.0)", "google-auth-oauthlib (>=0.4.5,<0.5.0)", "google-cloud-storage (>=1.42.0,<1.43.0)", "laspy", "libdeeplake (==0.0.37)", "nibabel", "oauth2client (>=4.1.3,<4.2.0)", "pydicom"] +audio = ["av (>=8.1.0)"] +av = ["av (>=8.1.0)"] +dicom = ["nibabel", "pydicom"] +enterprise = ["libdeeplake (==0.0.37)", "pyjwt"] +gcp = ["google-auth (>=2.0.1,<2.1.0)", "google-auth-oauthlib (>=0.4.5,<0.5.0)", "google-cloud-storage (>=1.42.0,<1.43.0)"] +gdrive = ["google-api-python-client (>=2.31.0,<2.32.0)", "google-auth (>=2.0.1,<2.1.0)", "google-auth-oauthlib (>=0.4.5,<0.5.0)", "oauth2client (>=4.1.3,<4.2.0)"] +medical = ["nibabel", "pydicom"] +point-cloud = ["laspy"] +video = ["av (>=8.1.0)"] +visualizer = ["IPython", "flask"] + [[package]] name = "defusedxml" version = "0.7.1" @@ -1191,6 +1267,18 @@ elastic-transport = ">=8,<9" async = ["aiohttp (>=3,<4)"] requests = ["requests (>=2.4.0,<3.0.0)"] +[[package]] +name = "entrypoints" +version = "0.4" +description = "Discover and load entry points from installed packages." +category = "main" +optional = true +python-versions = ">=3.6" +files = [ + {file = "entrypoints-0.4-py3-none-any.whl", hash = "sha256:f174b5ff827504fd3cd97cc3f8649f3693f51538c7e4bdf3ef002c8429d42f9f"}, + {file = "entrypoints-0.4.tar.gz", hash = "sha256:b706eddaa9218a19ebcd67b56818f05bb27589b1ca9e8d797b74affad4ccacd4"}, +] + [[package]] name = "exceptiongroup" version = "1.1.0" @@ -1906,6 +1994,21 @@ cli = ["click (>=8.0.0,<9.0.0)", "pygments (>=2.0.0,<3.0.0)", "rich (>=10,<13)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (>=1.0.0,<2.0.0)"] +[[package]] +name = "hub" +version = "3.0.1" +description = "Activeloop Deep Lake" +category = "main" +optional = true +python-versions = "*" +files = [ + {file = "hub-3.0.1-py3-none-any.whl", hash = "sha256:16d20fbf44700b438dc372d697ee683f0d7c58b178ad01d2daf81efe88bc692d"}, + {file = "hub-3.0.1.tar.gz", hash = "sha256:3866425914ed522090f0634887f06ff77517e8e6d7b9370e42009d774b725514"}, +] + +[package.dependencies] +deeplake = "*" + [[package]] name = "huggingface-hub" version = "0.12.0" @@ -1937,6 +2040,25 @@ testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "isort (>=5.5.4)", "jedi" torch = ["torch"] typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"] +[[package]] +name = "humbug" +version = "0.2.8" +description = "Humbug: Do you build developer tools? Humbug helps you know your users." +category = "main" +optional = true +python-versions = "*" +files = [ + {file = "humbug-0.2.8-py3-none-any.whl", hash = "sha256:e6f0b69843af2e17bf4fdd6ca0537c062889ec8ced5e5dd5b19919e0736d10d2"}, + {file = "humbug-0.2.8.tar.gz", hash = "sha256:efa09dce7e88a49ae14d6456e4b754c11fbe69ccd9c9cf6f8c6bec245e4b88b2"}, +] + +[package.dependencies] +requests = "*" + +[package.extras] +dev = ["black", "mypy", "types-dataclasses", "types-pkg-resources", "types-requests", "wheel"] +distribute = ["setuptools", "twine", "wheel"] + [[package]] name = "hyperframe" version = "6.0.1" @@ -2200,6 +2322,18 @@ MarkupSafe = ">=2.0" [package.extras] i18n = ["Babel (>=2.7)"] +[[package]] +name = "jmespath" +version = "1.0.1" +description = "JSON Matching Expressions" +category = "main" +optional = true +python-versions = ">=3.7" +files = [ + {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, + {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, +] + [[package]] name = "joblib" version = "1.2.0" @@ -2997,6 +3131,33 @@ files = [ {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"}, ] +[[package]] +name = "multiprocess" +version = "0.70.14" +description = "better multiprocessing and multithreading in python" +category = "main" +optional = true +python-versions = ">=3.7" +files = [ + {file = "multiprocess-0.70.14-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:560a27540daef4ce8b24ed3cc2496a3c670df66c96d02461a4da67473685adf3"}, + {file = "multiprocess-0.70.14-pp37-pypy37_pp73-manylinux_2_24_i686.whl", hash = "sha256:bfbbfa36f400b81d1978c940616bc77776424e5e34cb0c94974b178d727cfcd5"}, + {file = "multiprocess-0.70.14-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:89fed99553a04ec4f9067031f83a886d7fdec5952005551a896a4b6a59575bb9"}, + {file = "multiprocess-0.70.14-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:40a5e3685462079e5fdee7c6789e3ef270595e1755199f0d50685e72523e1d2a"}, + {file = "multiprocess-0.70.14-pp38-pypy38_pp73-manylinux_2_24_i686.whl", hash = "sha256:44936b2978d3f2648727b3eaeab6d7fa0bedf072dc5207bf35a96d5ee7c004cf"}, + {file = "multiprocess-0.70.14-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:e628503187b5d494bf29ffc52d3e1e57bb770ce7ce05d67c4bbdb3a0c7d3b05f"}, + {file = "multiprocess-0.70.14-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0d5da0fc84aacb0e4bd69c41b31edbf71b39fe2fb32a54eaedcaea241050855c"}, + {file = "multiprocess-0.70.14-pp39-pypy39_pp73-manylinux_2_24_i686.whl", hash = "sha256:6a7b03a5b98e911a7785b9116805bd782815c5e2bd6c91c6a320f26fd3e7b7ad"}, + {file = "multiprocess-0.70.14-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:cea5bdedd10aace3c660fedeac8b087136b4366d4ee49a30f1ebf7409bce00ae"}, + {file = "multiprocess-0.70.14-py310-none-any.whl", hash = "sha256:7dc1f2f6a1d34894c8a9a013fbc807971e336e7cc3f3ff233e61b9dc679b3b5c"}, + {file = "multiprocess-0.70.14-py37-none-any.whl", hash = "sha256:93a8208ca0926d05cdbb5b9250a604c401bed677579e96c14da3090beb798193"}, + {file = "multiprocess-0.70.14-py38-none-any.whl", hash = "sha256:6725bc79666bbd29a73ca148a0fb5f4ea22eed4a8f22fce58296492a02d18a7b"}, + {file = "multiprocess-0.70.14-py39-none-any.whl", hash = "sha256:63cee628b74a2c0631ef15da5534c8aedbc10c38910b9c8b18dcd327528d1ec7"}, + {file = "multiprocess-0.70.14.tar.gz", hash = "sha256:3eddafc12f2260d27ae03fe6069b12570ab4764ab59a75e81624fac453fbf46a"}, +] + +[package.dependencies] +dill = ">=0.3.6" + [[package]] name = "murmurhash" version = "1.0.9" @@ -3445,6 +3606,39 @@ jupyter-server = ">=1.8,<3" [package.extras] test = ["pytest", "pytest-console-scripts", "pytest-tornasync"] +[[package]] +name = "numcodecs" +version = "0.11.0" +description = "A Python package providing buffer compression and transformation codecs for use" +category = "main" +optional = true +python-versions = ">=3.8" +files = [ + {file = "numcodecs-0.11.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bc116752be45b4f9dca4315e5a2b4185e3b46f68c997dbb84aef334ceb5a1d"}, + {file = "numcodecs-0.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c27dfca402f69fbfa01c46fb572086e77f38121192160cc8ed1177dc30702c52"}, + {file = "numcodecs-0.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:0fabc7dfdf64a9555bf8a34911e05b415793c67a1377207dc79cd96342291fa1"}, + {file = "numcodecs-0.11.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7dae3f5678f247336c84e7315a0c59a4fec7c33eb7db72d78ff5c776479a812e"}, + {file = "numcodecs-0.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32697785b786bb0039d3feeaabdc10f25eda6c149700cde954653aaa47637832"}, + {file = "numcodecs-0.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:8c2f36b21162c6ebccc05d3fe896f86b91dcf8709946809f730cc23a37f8234d"}, + {file = "numcodecs-0.11.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c240858bf29e0ff254b1db60430e8b2658b8c8328b684f80033289d94807a7c"}, + {file = "numcodecs-0.11.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee5bda16e9d26a7a39fc20b6c1cec23b4debc314df5cfae3ed505149c2eeafc4"}, + {file = "numcodecs-0.11.0-cp38-cp38-win_amd64.whl", hash = "sha256:bd05cdb853c7bcfde2efc809a9df2c5e205b96f70405b810e5788b45d0d81f73"}, + {file = "numcodecs-0.11.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:694dc2e80b1f169b7deb14bdd0a04b20e5f17ef32cb0f81b71ab690406ec6bd9"}, + {file = "numcodecs-0.11.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf3925eeb37aed0e6c04d7fb9614133a3c8426dc77f8bda54c99c601a44b3bd3"}, + {file = "numcodecs-0.11.0-cp39-cp39-win_amd64.whl", hash = "sha256:11596b71267417425ea8afb407477a67d684f434c8b07b1dd59c25a97d5c3ccb"}, + {file = "numcodecs-0.11.0.tar.gz", hash = "sha256:6c058b321de84a1729299b0eae4d652b2e48ea1ca7f9df0da65cb13470e635eb"}, +] + +[package.dependencies] +entrypoints = "*" +numpy = ">=1.7" + +[package.extras] +docs = ["mock", "numpydoc", "sphinx", "sphinx-issues"] +msgpack = ["msgpack"] +test = ["coverage", "flake8", "pytest", "pytest-cov"] +zfpy = ["zfpy (>=1.0.0)"] + [[package]] name = "numpy" version = "1.24.2" @@ -3669,6 +3863,24 @@ files = [ qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] testing = ["docopt", "pytest (<6.0.0)"] +[[package]] +name = "pathos" +version = "0.3.0" +description = "parallel graph management and execution in heterogeneous computing" +category = "main" +optional = true +python-versions = ">=3.7" +files = [ + {file = "pathos-0.3.0-py3-none-any.whl", hash = "sha256:b1f5a79b1c79a594330d451832642ee5bb61dd77dc75ba9e5c72087c77e8994c"}, + {file = "pathos-0.3.0.tar.gz", hash = "sha256:24fa8db51fbd9284da8e191794097c4bb2aa3fce411090e57af6385e61b97e09"}, +] + +[package.dependencies] +dill = ">=0.3.6" +multiprocess = ">=0.70.14" +pox = ">=0.3.2" +ppft = ">=1.7.6.6" + [[package]] name = "pathspec" version = "0.11.0" @@ -3913,6 +4125,33 @@ files = [ dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] +[[package]] +name = "pox" +version = "0.3.2" +description = "utilities for filesystem exploration and automated builds" +category = "main" +optional = true +python-versions = ">=3.7" +files = [ + {file = "pox-0.3.2-py3-none-any.whl", hash = "sha256:56fe2f099ecd8a557b8948082504492de90e8598c34733c9b1fdeca8f7b6de61"}, + {file = "pox-0.3.2.tar.gz", hash = "sha256:e825225297638d6e3d49415f8cfb65407a5d15e56f2fb7fe9d9b9e3050c65ee1"}, +] + +[[package]] +name = "ppft" +version = "1.7.6.6" +description = "distributed and parallel python" +category = "main" +optional = true +python-versions = ">=3.7" +files = [ + {file = "ppft-1.7.6.6-py3-none-any.whl", hash = "sha256:f355d2caeed8bd7c9e4a860c471f31f7e66d1ada2791ab5458ea7dca15a51e41"}, + {file = "ppft-1.7.6.6.tar.gz", hash = "sha256:f933f0404f3e808bc860745acb3b79cd4fe31ea19a20889a645f900415be60f1"}, +] + +[package.extras] +dill = ["dill (>=0.3.6)"] + [[package]] name = "preshed" version = "3.0.8" @@ -4369,6 +4608,24 @@ files = [ [package.extras] plugins = ["importlib-metadata"] +[[package]] +name = "pyjwt" +version = "2.6.0" +description = "JSON Web Token implementation in Python" +category = "main" +optional = true +python-versions = ">=3.7" +files = [ + {file = "PyJWT-2.6.0-py3-none-any.whl", hash = "sha256:d83c3d892a77bbb74d3e1a2cfa90afaadb60945205d1095d9221f04466f64c14"}, + {file = "PyJWT-2.6.0.tar.gz", hash = "sha256:69285c7e31fc44f68a1feb309e948e0df53259d579295e6cfe2b1792329f05fd"}, +] + +[package.extras] +crypto = ["cryptography (>=3.4.0)"] +dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] +docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"] +tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] + [[package]] name = "pyparsing" version = "3.0.9" @@ -5120,6 +5377,24 @@ files = [ {file = "ruff-0.0.249.tar.gz", hash = "sha256:b590689f08ecef971c45555cbda6854cdf48f3828fc326802828e851b1a14b3d"}, ] +[[package]] +name = "s3transfer" +version = "0.6.0" +description = "An Amazon S3 Transfer Manager" +category = "main" +optional = true +python-versions = ">= 3.7" +files = [ + {file = "s3transfer-0.6.0-py3-none-any.whl", hash = "sha256:06176b74f3a15f61f1b4f25a1fc29a4429040b7647133a463da8fa5bd28d5ecd"}, + {file = "s3transfer-0.6.0.tar.gz", hash = "sha256:2ed07d3866f523cc561bf4a00fc5535827981b117dd7876f036b0c1aca42c947"}, +] + +[package.dependencies] +botocore = ">=1.12.36,<2.0a.0" + +[package.extras] +crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"] + [[package]] name = "scikit-learn" version = "1.2.1" @@ -7229,4 +7504,4 @@ llms = ["anthropic", "cohere", "openai", "nlpcloud", "huggingface_hub", "manifes [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "2eb321893be595ceba7c7965ef3bc999dcc0941867c46bde6732eeb2c039a6d6" +content-hash = "449d9958004f9b0af5667b02f866313913f9bd9c939870898873c0e3198a9cb4" diff --git a/pyproject.toml b/pyproject.toml index 01d628d817..33c2a20339 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,6 +50,7 @@ aiohttp = "^3.8.3" pypdf = {version = "^3.4.0", optional = true} networkx = {version="^2.6.3", optional = true} aleph-alpha-client = {version="^2.15.0", optional = true} +deeplake = {version = "^3.2.9", optional = true} [tool.poetry.group.docs.dependencies] autodoc_pydantic = "^1.8.0" diff --git a/tests/integration_tests/vectorstores/test_deeplake.py b/tests/integration_tests/vectorstores/test_deeplake.py new file mode 100644 index 0000000000..a8316f2167 --- /dev/null +++ b/tests/integration_tests/vectorstores/test_deeplake.py @@ -0,0 +1,58 @@ +"""Test Deep Lake functionality.""" +from langchain.docstore.document import Document +from langchain.vectorstores import DeepLake +from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings + + +def test_deeplake() -> None: + """Test end to end construction and search.""" + texts = ["foo", "bar", "baz"] + docsearch = DeepLake.from_texts( + dataset_path="mem://test_path", texts=texts, embedding=FakeEmbeddings() + ) + output = docsearch.similarity_search("foo", k=1) + assert output == [Document(page_content="foo")] + + +def test_deeplake_with_metadatas() -> None: + """Test end to end construction and search.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": str(i)} for i in range(len(texts))] + docsearch = DeepLake.from_texts( + dataset_path="mem://test_path", + texts=texts, + embedding=FakeEmbeddings(), + metadatas=metadatas, + ) + output = docsearch.similarity_search("foo", k=1) + assert output == [Document(page_content="foo", metadata={"page": "0"})] + + +def test_deeplakewith_persistence() -> None: + """Test end to end construction and search, with persistence.""" + dataset_path = "./tests/persist_dir" + texts = ["foo", "bar", "baz"] + docsearch = DeepLake.from_texts( + dataset_path=dataset_path, + texts=texts, + embedding=FakeEmbeddings(), + ) + + output = docsearch.similarity_search("foo", k=1) + assert output == [Document(page_content="foo")] + + docsearch.persist() + + # Get a new VectorStore from the persisted directory + docsearch = DeepLake( + dataset_path=dataset_path, + embedding_function=FakeEmbeddings(), + ) + output = docsearch.similarity_search("foo", k=1) + + # Clean up + docsearch.delete_dataset() + + # Persist doesn't need to be called again + # Data will be automatically persisted on object deletion + # Or on program exit