From 1f9124ceaa41582b53a8c77fc32657d1f618646d Mon Sep 17 00:00:00 2001 From: Ash Vardanian <1983160+ashvardanian@users.noreply.github.com> Date: Tue, 8 Aug 2023 04:41:00 +0100 Subject: [PATCH] Add: USearch Vector Store (#8835) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Description I am excited to propose an integration with USearch, a lightweight vector-search engine available for both Python and JavaScript, among other languages. ## Dependencies It introduces a new PyPi dependency - `usearch`. I am unsure if it must be added to the Poetry file, as this would make the PR too clunky. Please let me know. ## Profiles - Maintainers: @ashvardanian @davvard - Twitter handles: @ashvardanian @unum_cloud --------- Co-authored-by: Davit Vardanyan <78792753+davvard@users.noreply.github.com> Co-authored-by: Bagatur --- .../integrations/vectorstores/usearch.ipynb | 195 ++++++++++++++++++ .../langchain/vectorstores/__init__.py | 2 + .../langchain/vectorstores/usearch.py | 176 ++++++++++++++++ .../vectorstores/test_usearch.py | 59 ++++++ 4 files changed, 432 insertions(+) create mode 100644 docs/extras/integrations/vectorstores/usearch.ipynb create mode 100644 libs/langchain/langchain/vectorstores/usearch.py create mode 100644 libs/langchain/tests/integration_tests/vectorstores/test_usearch.py diff --git a/docs/extras/integrations/vectorstores/usearch.ipynb b/docs/extras/integrations/vectorstores/usearch.ipynb new file mode 100644 index 0000000000..9778a9884d --- /dev/null +++ b/docs/extras/integrations/vectorstores/usearch.ipynb @@ -0,0 +1,195 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "bb384510-d9b4-4fa1-84c2-f181eb28487d", + "metadata": {}, + "source": [ + "# USearch\n", + ">[USearch](https://unum-cloud.github.io/usearch/) is a Smaller & Faster Single-File Vector Search Engine\n", + "\n", + "USearch's base functionality is identical to FAISS, and the interface should look familiar if you have ever investigated Approximate Nearest Neigbors search. FAISS is a widely recognized standard for high-performance vector search engines. USearch and FAISS both employ the same HNSW algorithm, but they differ significantly in their design principles. USearch is compact and broadly compatible without sacrificing performance, with a primary focus on user-defined metrics and fewer dependencies." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "497fcd89-e832-46a7-a74a-c71199666206", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "!pip install usearch" + ] + }, + { + "cell_type": "markdown", + "id": "38237514-b3fa-44a4-9cff-30cd6bf50073", + "metadata": {}, + "source": [ + "We want to use OpenAIEmbeddings so we have to get the OpenAI API Key. " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "47f9b495-88f1-4286-8d5d-1416103931a7", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import os\n", + "import getpass\n", + "\n", + "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "aac9563e", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain.vectorstores import USearch\n", + "from langchain.document_loaders import TextLoader" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "a3c3999a", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from langchain.document_loaders import TextLoader\n", + "\n", + "loader = TextLoader(\"../../../extras/modules/state_of_the_union.txt\")\n", + "documents = loader.load()\n", + "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", + "docs = text_splitter.split_documents(documents)\n", + "\n", + "embeddings = OpenAIEmbeddings()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "5eabdb75", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "db = USearch.from_documents(docs, embeddings)\n", + "\n", + "query = \"What did the president say about Ketanji Brown Jackson\"\n", + "docs = db.similarity_search(query)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "4b172de8", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", + "\n", + "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", + "\n", + "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", + "\n", + "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n" + ] + } + ], + "source": [ + "print(docs[0].page_content)" + ] + }, + { + "cell_type": "markdown", + "id": "f13473b5", + "metadata": {}, + "source": [ + "## Similarity Search with score\n", + "The `similarity_search_with_score` method allows you to return not only the documents but also the distance score of the query to them. The returned distance score is L2 distance. Therefore, a lower score is better." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "186ee1d8", + "metadata": {}, + "outputs": [], + "source": [ + "docs_and_scores = db.similarity_search_with_score(query)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "284e04b5", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': '../../../extras/modules/state_of_the_union.txt'}),\n", + " 0.1845687)" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "docs_and_scores[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "483f6013-fb32-4756-a9e2-3d529fb81f68", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/libs/langchain/langchain/vectorstores/__init__.py b/libs/langchain/langchain/vectorstores/__init__.py index bd5ed839c9..aefd5e69ca 100644 --- a/libs/langchain/langchain/vectorstores/__init__.py +++ b/libs/langchain/langchain/vectorstores/__init__.py @@ -62,6 +62,7 @@ from langchain.vectorstores.supabase import SupabaseVectorStore from langchain.vectorstores.tair import Tair from langchain.vectorstores.tigris import Tigris from langchain.vectorstores.typesense import Typesense +from langchain.vectorstores.usearch import USearch from langchain.vectorstores.vectara import Vectara from langchain.vectorstores.weaviate import Weaviate from langchain.vectorstores.zilliz import Zilliz @@ -120,4 +121,5 @@ __all__ = [ "Weaviate", "Zilliz", "PGVector", + "USearch", ] diff --git a/libs/langchain/langchain/vectorstores/usearch.py b/libs/langchain/langchain/vectorstores/usearch.py new file mode 100644 index 0000000000..1d0e754e1d --- /dev/null +++ b/libs/langchain/langchain/vectorstores/usearch.py @@ -0,0 +1,176 @@ +"""Wrapper around USearch vector database.""" +from __future__ import annotations + +from typing import Any, Dict, Iterable, List, Optional, Tuple + +import numpy as np + +from langchain.docstore.base import AddableMixin, Docstore +from langchain.docstore.document import Document +from langchain.docstore.in_memory import InMemoryDocstore +from langchain.embeddings.base import Embeddings +from langchain.vectorstores.base import VectorStore + + +def dependable_usearch_import() -> Any: + """ + Import usearch if available, otherwise raise error. + """ + try: + import usearch.index + except ImportError: + raise ImportError( + "Could not import usearch python package. " + "Please install it with `pip install usearch` " + ) + return usearch.index + + +class USearch(VectorStore): + """Wrapper around USearch vector database. + To use, you should have the ``usearch`` python package installed. + """ + + def __init__( + self, + embedding: Embeddings, + index: Any, + docstore: Docstore, + ids: List[str], + ): + """Initialize with necessary components.""" + self.embedding = embedding + self.index = index + self.docstore = docstore + self.ids = ids + + def add_texts( + self, + texts: Iterable[str], + metadatas: Optional[List[Dict]] = None, + ids: Optional[np.ndarray] = None, + **kwargs: Any, + ) -> List[str]: + """Run more texts through the embeddings and add to the vectorstore. + + Args: + texts: Iterable of strings to add to the vectorstore. + metadatas: Optional list of metadatas associated with the texts. + ids: Optional list of unique IDs. + + Returns: + List of ids from adding the texts into the vectorstore. + """ + if not isinstance(self.docstore, AddableMixin): + raise ValueError( + "If trying to add texts, the underlying docstore should support " + f"adding items, which {self.docstore} does not" + ) + + embeddings = self.embedding.embed_documents(list(texts)) + documents = [] + for i, text in enumerate(texts): + metadata = metadatas[i] if metadatas else {} + documents.append(Document(page_content=text, metadata=metadata)) + last_id = int(self.ids[-1]) + 1 + if ids is None: + ids = np.array([str(last_id + id) for id, _ in enumerate(texts)]) + + self.index.add(np.array(ids), np.array(embeddings)) + self.docstore.add(dict(zip(ids, documents))) + self.ids.extend(ids) + return ids.tolist() + + def similarity_search_with_score( + self, + query: str, + k: int = 4, + ) -> List[Tuple[Document, float]]: + """Return docs most similar to query. + + Args: + query: Text to look up documents similar to. + k: Number of Documents to return. Defaults to 4. + + Returns: + List of documents most similar to the query with distance. + """ + query_embedding = self.embedding.embed_query(query) + matches = self.index.search(np.array(query_embedding), k) + + docs_with_scores: List[Tuple[Document, float]] = [] + for id, score in zip(matches.keys, matches.distances): + doc = self.docstore.search(str(id)) + if not isinstance(doc, Document): + raise ValueError(f"Could not find document for id {id}, got {doc}") + docs_with_scores.append((doc, score)) + + return docs_with_scores + + def similarity_search( + self, + query: str, + k: int = 4, + **kwargs: Any, + ) -> List[Document]: + """Return docs most similar to query. + + Args: + query: Text to look up documents similar to. + k: Number of Documents to return. Defaults to 4. + + Returns: + List of Documents most similar to the query. + """ + query_embedding = self.embedding.embed_query(query) + matches = self.index.search(np.array(query_embedding), k) + + docs: List[Document] = [] + for id in matches.keys: + doc = self.docstore.search(str(id)) + if not isinstance(doc, Document): + raise ValueError(f"Could not find document for id {id}, got {doc}") + docs.append(doc) + + return docs + + @classmethod + def from_texts( + cls, + texts: List[str], + embedding: Embeddings, + metadatas: Optional[List[Dict]] = None, + ids: Optional[np.ndarray] = None, + metric: str = "cos", + **kwargs: Any, + ) -> USearch: + """Construct USearch wrapper from raw documents. + This is a user friendly interface that: + 1. Embeds documents. + 2. Creates an in memory docstore + 3. Initializes the USearch database + This is intended to be a quick way to get started. + + Example: + .. code-block:: python + + from langchain.vectorstores import USearch + from langchain.embeddings import OpenAIEmbeddings + + embeddings = OpenAIEmbeddings() + usearch = USearch.from_texts(texts, embeddings) + """ + embeddings = embedding.embed_documents(texts) + + documents: List[Document] = [] + if ids is None: + ids = np.array([str(id) for id, _ in enumerate(texts)]) + for i, text in enumerate(texts): + metadata = metadatas[i] if metadatas else {} + documents.append(Document(page_content=text, metadata=metadata)) + + docstore = InMemoryDocstore(dict(zip(ids, documents))) + usearch = dependable_usearch_import() + index = usearch.Index(ndim=len(embeddings[0]), metric=metric) + index.add(np.array(ids), np.array(embeddings)) + return cls(embedding, index, docstore, ids.tolist()) diff --git a/libs/langchain/tests/integration_tests/vectorstores/test_usearch.py b/libs/langchain/tests/integration_tests/vectorstores/test_usearch.py new file mode 100644 index 0000000000..88a7764508 --- /dev/null +++ b/libs/langchain/tests/integration_tests/vectorstores/test_usearch.py @@ -0,0 +1,59 @@ +"""Test USearch functionality.""" +import pytest + +from langchain.docstore.document import Document +from langchain.vectorstores.usearch import USearch +from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings + + +def test_usearch_from_texts() -> None: + """Test end to end construction and search.""" + texts = ["foo", "bar", "baz"] + docsearch = USearch.from_texts(texts, FakeEmbeddings()) + output = docsearch.similarity_search("foo", k=1) + assert output == [Document(page_content="foo")] + + +def test_usearch_from_documents() -> None: + """Test from_documents constructor.""" + texts = ["foo", "bar", "baz"] + docs = [Document(page_content=t, metadata={"a": "b"}) for t in texts] + docsearch = USearch.from_documents(docs, FakeEmbeddings()) + output = docsearch.similarity_search("foo", k=1) + assert output == [Document(page_content="foo", metadata={"a": "b"})] + + +def test_usearch_add_texts() -> None: + """Test adding a new document""" + texts = ["foo", "bar", "baz"] + docsearch = USearch.from_texts(texts, FakeEmbeddings()) + docsearch.add_texts(["foo"]) + output = docsearch.similarity_search("foo", k=2) + assert output == [Document(page_content="foo"), Document(page_content="foo")] + + +def test_ip() -> None: + """Test inner product distance.""" + texts = ["foo", "bar", "baz"] + docsearch = USearch.from_texts(texts, FakeEmbeddings(), metric="ip") + output = docsearch.similarity_search_with_score("far", k=2) + _, score = output[1] + assert score == -8.0 + + +def test_l2() -> None: + """Test Flat L2 distance.""" + texts = ["foo", "bar", "baz"] + docsearch = USearch.from_texts(texts, FakeEmbeddings(), metric="l2_sq") + output = docsearch.similarity_search_with_score("far", k=2) + _, score = output[1] + assert score == 1.0 + + +def test_cos() -> None: + """Test cosine distance.""" + texts = ["foo", "bar", "baz"] + docsearch = USearch.from_texts(texts, FakeEmbeddings(), metric="cos") + output = docsearch.similarity_search_with_score("far", k=2) + _, score = output[1] + assert score == pytest.approx(0.05, abs=0.002)