diff --git a/docs/extras/integrations/vectorstores/supabase.ipynb b/docs/extras/integrations/vectorstores/supabase.ipynb index 9ba5dacd08..9a5f583adb 100644 --- a/docs/extras/integrations/vectorstores/supabase.ipynb +++ b/docs/extras/integrations/vectorstores/supabase.ipynb @@ -28,43 +28,41 @@ "The following function determines cosine similarity, but you can adjust to your needs.\n", "\n", "```sql\n", - " -- Enable the pgvector extension to work with embedding vectors\n", - " create extension vector;\n", + "-- Enable the pgvector extension to work with embedding vectors\n", + "create extension if not exists vector;\n", "\n", - " -- Create a table to store your documents\n", - " create table documents (\n", - " id uuid primary key,\n", - " content text, -- corresponds to Document.pageContent\n", - " metadata jsonb, -- corresponds to Document.metadata\n", - " embedding vector(1536) -- 1536 works for OpenAI embeddings, change if needed\n", - " );\n", + "-- Create a table to store your documents\n", + "create table\n", + " documents (\n", + " id uuid primary key,\n", + " content text, -- corresponds to Document.pageContent\n", + " metadata jsonb, -- corresponds to Document.metadata\n", + " embedding vector (1536) -- 1536 works for OpenAI embeddings, change if needed\n", + " );\n", "\n", - " CREATE FUNCTION match_documents(query_embedding vector(1536), match_count int)\n", - " RETURNS TABLE(\n", - " id uuid,\n", - " content text,\n", - " metadata jsonb,\n", - " -- we return matched vectors to enable maximal marginal relevance searches\n", - " embedding vector(1536),\n", - " similarity float)\n", - " LANGUAGE plpgsql\n", - " AS $$\n", - " # variable_conflict use_column\n", - " BEGIN\n", - " RETURN query\n", - " SELECT\n", - " id,\n", - " content,\n", - " metadata,\n", - " embedding,\n", - " 1 -(documents.embedding <=> query_embedding) AS similarity\n", - " FROM\n", - " documents\n", - " ORDER BY\n", - " documents.embedding <=> query_embedding\n", - " LIMIT match_count;\n", - " END;\n", - " $$;\n", + "-- Create a function to search for documents\n", + "create function match_documents (\n", + " query_embedding vector (1536),\n", + " filter jsonb default '{}'\n", + ") returns table (\n", + " id uuid,\n", + " content text,\n", + " metadata jsonb,\n", + " similarity float\n", + ") language plpgsql as $$\n", + "#variable_conflict use_column\n", + "begin\n", + " return query\n", + " select\n", + " id,\n", + " content,\n", + " metadata,\n", + " 1 - (documents.embedding <=> query_embedding) as similarity\n", + " from documents\n", + " where metadata @> filter\n", + " order by documents.embedding <=> query_embedding;\n", + "end;\n", + "$$;\n", "```" ] }, diff --git a/docs/extras/modules/data_connection/retrievers/self_query/supabase_self_query.ipynb b/docs/extras/modules/data_connection/retrievers/self_query/supabase_self_query.ipynb new file mode 100644 index 0000000000..1414f70d38 --- /dev/null +++ b/docs/extras/modules/data_connection/retrievers/self_query/supabase_self_query.ipynb @@ -0,0 +1,587 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "13afcae7", + "metadata": {}, + "source": [ + "# Supabase Vector self-querying \n", + "\n", + ">[Supabase](https://supabase.com/docs) is an open source `Firebase` alternative. \n", + "> `Supabase` is built on top of `PostgreSQL`, which offers strong `SQL` \n", + "> querying capabilities and enables a simple interface with already-existing tools and frameworks.\n", + "\n", + ">[PostgreSQL](https://en.wikipedia.org/wiki/PostgreSQL) also known as `Postgres`,\n", + "> is a free and open-source relational database management system (RDBMS) \n", + "> emphasizing extensibility and `SQL` compliance.\n", + "\n", + "In the notebook we'll demo the `SelfQueryRetriever` wrapped around a Supabase vector store.\n", + "\n", + "Specifically we will:\n", + "1. Create a Supabase database\n", + "2. Enable the `pgvector` extension\n", + "3. Create a `documents` table and `match_documents` function that will be used by `SupabaseVectorStore`\n", + "4. Load sample documents into the vector store (database table)\n", + "5. Build and test a self-querying retriever" + ] + }, + { + "cell_type": "markdown", + "id": "347935ad", + "metadata": {}, + "source": [ + "## Setup Supabase Database\n", + "\n", + "1. Head over to https://database.new to provision your Supabase database.\n", + "2. In the studio, jump to the [SQL editor](https://supabase.com/dashboard/project/_/sql/new) and run the following script to enable `pgvector` and setup your database as a vector store:\n", + " ```sql\n", + " -- Enable the pgvector extension to work with embedding vectors\n", + " create extension if not exists vector;\n", + "\n", + " -- Create a table to store your documents\n", + " create table\n", + " documents (\n", + " id uuid primary key,\n", + " content text, -- corresponds to Document.pageContent\n", + " metadata jsonb, -- corresponds to Document.metadata\n", + " embedding vector (1536) -- 1536 works for OpenAI embeddings, change if needed\n", + " );\n", + "\n", + " -- Create a function to search for documents\n", + " create function match_documents (\n", + " query_embedding vector (1536),\n", + " filter jsonb default '{}'\n", + " ) returns table (\n", + " id uuid,\n", + " content text,\n", + " metadata jsonb,\n", + " similarity float\n", + " ) language plpgsql as $$\n", + " #variable_conflict use_column\n", + " begin\n", + " return query\n", + " select\n", + " id,\n", + " content,\n", + " metadata,\n", + " 1 - (documents.embedding <=> query_embedding) as similarity\n", + " from documents\n", + " where metadata @> filter\n", + " order by documents.embedding <=> query_embedding;\n", + " end;\n", + " $$;\n", + " ```" + ] + }, + { + "cell_type": "markdown", + "id": "68e75fb9", + "metadata": {}, + "source": [ + "## Creating a Supabase vector store\n", + "Next we'll want to create a Supabase vector store and seed it with some data. We've created a small demo set of documents that contain summaries of movies.\n", + "\n", + "Be sure to install the latest version of `langchain`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "78546fd7", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install langchain" + ] + }, + { + "cell_type": "markdown", + "id": "e06df198", + "metadata": {}, + "source": [ + "The self-query retriever requires you to have `lark` installed:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "63a8af5b", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "%pip install lark" + ] + }, + { + "cell_type": "markdown", + "id": "114f768f", + "metadata": {}, + "source": [ + "We also need the `openai` and `supabase` packages:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "434ae558", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install openai" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22431060-52c4-48a7-a97b-9f542b8b0928", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "%pip install supabase==1.0.0" + ] + }, + { + "cell_type": "markdown", + "id": "83811610-7df3-4ede-b268-68a6a83ba9e2", + "metadata": {}, + "source": [ + "Since we are using `SupabaseVectorStore` and `OpenAIEmbeddings`, we have to load their API keys.\n", + "\n", + "- To find your `SUPABASE_URL` and `SUPABASE_SERVICE_KEY`, head to your Supabase project's [API settings](https://supabase.com/dashboard/project/_/settings/api).\n", + " - `SUPABASE_URL` corresponds to the Project URL\n", + " - `SUPABASE_SERVICE_KEY` corresponds to the `service_role` API key\n", + "\n", + "- To get your `OPENAI_API_KEY`, navigate to [API keys](https://platform.openai.com/account/api-keys) on your OpenAI account and create a new secret key." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "dd01b61b-7d32-4a55-85d6-b2d2d4f18840", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import os\n", + "import getpass\n", + "\n", + "os.environ[\"SUPABASE_URL\"] = getpass.getpass(\"Supabase URL:\")\n", + "os.environ[\"SUPABASE_SERVICE_KEY\"] = getpass.getpass(\"Supabase Service Key:\")\n", + "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")" + ] + }, + { + "cell_type": "markdown", + "id": "3aaf5075", + "metadata": {}, + "source": [ + "_Optional:_ If you're storing your Supabase and OpenAI API keys in a `.env` file, you can load them with [`dotenv`](https://github.com/theskumar/python-dotenv)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e0089221", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install python-dotenv" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3d56c5ef", + "metadata": {}, + "outputs": [], + "source": [ + "from dotenv import load_dotenv\n", + "\n", + "load_dotenv()" + ] + }, + { + "cell_type": "markdown", + "id": "f6dd9aef", + "metadata": {}, + "source": [ + "First we'll create a Supabase client and instantiate a OpenAI embeddings class." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "cb4a5787", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import os\n", + "from supabase.client import Client, create_client\n", + "from langchain.schema import Document\n", + "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.vectorstores import SupabaseVectorStore\n", + "\n", + "supabase_url = os.environ.get(\"SUPABASE_URL\")\n", + "supabase_key = os.environ.get(\"SUPABASE_SERVICE_KEY\")\n", + "supabase: Client = create_client(supabase_url, supabase_key)\n", + "\n", + "embeddings = OpenAIEmbeddings()" + ] + }, + { + "cell_type": "markdown", + "id": "0fca9b0b", + "metadata": {}, + "source": [ + "Next let's create our documents." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "bcbe04d9", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "docs = [\n", + " Document(\n", + " page_content=\"A bunch of scientists bring back dinosaurs and mayhem breaks loose\",\n", + " metadata={\"year\": 1993, \"rating\": 7.7, \"genre\": \"science fiction\"},\n", + " ),\n", + " Document(\n", + " page_content=\"Leo DiCaprio gets lost in a dream within a dream within a dream within a ...\",\n", + " metadata={\"year\": 2010, \"director\": \"Christopher Nolan\", \"rating\": 8.2},\n", + " ),\n", + " Document(\n", + " page_content=\"A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea\",\n", + " metadata={\"year\": 2006, \"director\": \"Satoshi Kon\", \"rating\": 8.6},\n", + " ),\n", + " Document(\n", + " page_content=\"A bunch of normal-sized women are supremely wholesome and some men pine after them\",\n", + " metadata={\"year\": 2019, \"director\": \"Greta Gerwig\", \"rating\": 8.3},\n", + " ),\n", + " Document(\n", + " page_content=\"Toys come alive and have a blast doing so\",\n", + " metadata={\"year\": 1995, \"genre\": \"animated\"},\n", + " ),\n", + " Document(\n", + " page_content=\"Three men walk into the Zone, three men walk out of the Zone\",\n", + " metadata={\n", + " \"year\": 1979,\n", + " \"rating\": 9.9,\n", + " \"director\": \"Andrei Tarkovsky\",\n", + " \"genre\": \"science fiction\",\n", + " \"rating\": 9.9,\n", + " },\n", + " ),\n", + "]\n", + "\n", + "vectorstore = SupabaseVectorStore.from_documents(docs, embeddings, client=supabase, table_name=\"documents\", query_name=\"match_documents\")" + ] + }, + { + "cell_type": "markdown", + "id": "5ecaab6d", + "metadata": {}, + "source": [ + "## Creating our self-querying retriever\n", + "Now we can instantiate our retriever. To do this we'll need to provide some information upfront about the metadata fields that our documents support and a short description of the document contents." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "86e34dbf", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from langchain.llms import OpenAI\n", + "from langchain.retrievers.self_query.base import SelfQueryRetriever\n", + "from langchain.chains.query_constructor.base import AttributeInfo\n", + "\n", + "metadata_field_info = [\n", + " AttributeInfo(\n", + " name=\"genre\",\n", + " description=\"The genre of the movie\",\n", + " type=\"string or list[string]\",\n", + " ),\n", + " AttributeInfo(\n", + " name=\"year\",\n", + " description=\"The year the movie was released\",\n", + " type=\"integer\",\n", + " ),\n", + " AttributeInfo(\n", + " name=\"director\",\n", + " description=\"The name of the movie director\",\n", + " type=\"string\",\n", + " ),\n", + " AttributeInfo(\n", + " name=\"rating\", description=\"A 1-10 rating for the movie\", type=\"float\"\n", + " ),\n", + "]\n", + "document_content_description = \"Brief summary of a movie\"\n", + "llm = OpenAI(temperature=0)\n", + "retriever = SelfQueryRetriever.from_llm(\n", + " llm, vectorstore, document_content_description, metadata_field_info, verbose=True\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "ea9df8d4", + "metadata": {}, + "source": [ + "## Testing it out\n", + "And now we can try actually using our retriever!" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "38a126e9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "query='dinosaur' filter=None limit=None\n" + ] + }, + { + "data": { + "text/plain": [ + "[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'year': 1993, 'genre': 'science fiction', 'rating': 7.7}),\n", + " Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'}),\n", + " Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'year': 1979, 'genre': 'science fiction', 'rating': 9.9, 'director': 'Andrei Tarkovsky'}),\n", + " Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'year': 2006, 'rating': 8.6, 'director': 'Satoshi Kon'})]" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# This example only specifies a relevant query\n", + "retriever.get_relevant_documents(\"What are some movies about dinosaurs\")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "fc3f1e6e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "query=' ' filter=Comparison(comparator=, attribute='rating', value=8.5) limit=None\n" + ] + }, + { + "data": { + "text/plain": [ + "[Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'year': 1979, 'genre': 'science fiction', 'rating': 9.9, 'director': 'Andrei Tarkovsky'}),\n", + " Document(page_content='A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea', metadata={'year': 2006, 'rating': 8.6, 'director': 'Satoshi Kon'})]" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# This example only specifies a filter\n", + "retriever.get_relevant_documents(\"I want to watch a movie rated higher than 8.5\")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "b19d4da0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "query='women' filter=Comparison(comparator=, attribute='director', value='Greta Gerwig') limit=None\n" + ] + }, + { + "data": { + "text/plain": [ + "[Document(page_content='A bunch of normal-sized women are supremely wholesome and some men pine after them', metadata={'year': 2019, 'rating': 8.3, 'director': 'Greta Gerwig'})]" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# This example specifies a query and a filter\n", + "retriever.get_relevant_documents(\"Has Greta Gerwig directed any movies about women?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "f900e40e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "query=' ' filter=Operation(operator=, arguments=[Comparison(comparator=, attribute='rating', value=8.5), Comparison(comparator=, attribute='genre', value='science fiction')]) limit=None\n" + ] + }, + { + "data": { + "text/plain": [ + "[Document(page_content='Three men walk into the Zone, three men walk out of the Zone', metadata={'year': 1979, 'genre': 'science fiction', 'rating': 9.9, 'director': 'Andrei Tarkovsky'})]" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# This example specifies a composite filter\n", + "retriever.get_relevant_documents(\n", + " \"What's a highly rated (above 8.5) science fiction film?\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "12a51522", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "query='toys' filter=Operation(operator=, arguments=[Comparison(comparator=, attribute='year', value=1990), Comparison(comparator=, attribute='year', value=2005), Comparison(comparator=, attribute='genre', value='animated')]) limit=None\n" + ] + }, + { + "data": { + "text/plain": [ + "[Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'})]" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# This example specifies a query and composite filter\n", + "retriever.get_relevant_documents(\n", + " \"What's a movie after 1990 but before (or on) 2005 that's all about toys, and preferably is animated\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "39bd1de1-b9fe-4a98-89da-58d8a7a6ae51", + "metadata": {}, + "source": [ + "## Filter k\n", + "\n", + "We can also use the self query retriever to specify `k`: the number of documents to fetch.\n", + "\n", + "We can do this by passing `enable_limit=True` to the constructor." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "bff36b88-b506-4877-9c63-e5a1a8d78e64", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "retriever = SelfQueryRetriever.from_llm(\n", + " llm,\n", + " vectorstore,\n", + " document_content_description,\n", + " metadata_field_info,\n", + " enable_limit=True,\n", + " verbose=True,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "2758d229-4f97-499c-819f-888acaf8ee10", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "query='dinosaur' filter=None limit=2\n" + ] + }, + { + "data": { + "text/plain": [ + "[Document(page_content='A bunch of scientists bring back dinosaurs and mayhem breaks loose', metadata={'year': 1993, 'genre': 'science fiction', 'rating': 7.7}),\n", + " Document(page_content='Toys come alive and have a blast doing so', metadata={'year': 1995, 'genre': 'animated'})]" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# This example only specifies a relevant query\n", + "retriever.get_relevant_documents(\"what are two movies about dinosaurs\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/libs/langchain/langchain/retrievers/self_query/base.py b/libs/langchain/langchain/retrievers/self_query/base.py index a34c756bd4..62bce34847 100644 --- a/libs/langchain/langchain/retrievers/self_query/base.py +++ b/libs/langchain/langchain/retrievers/self_query/base.py @@ -16,6 +16,7 @@ from langchain.retrievers.self_query.milvus import MilvusTranslator from langchain.retrievers.self_query.myscale import MyScaleTranslator from langchain.retrievers.self_query.pinecone import PineconeTranslator from langchain.retrievers.self_query.qdrant import QdrantTranslator +from langchain.retrievers.self_query.supabase import SupabaseVectorTranslator from langchain.retrievers.self_query.vectara import VectaraTranslator from langchain.retrievers.self_query.weaviate import WeaviateTranslator from langchain.schema import BaseRetriever, Document @@ -29,6 +30,7 @@ from langchain.vectorstores import ( MyScale, Pinecone, Qdrant, + SupabaseVectorStore, Vectara, VectorStore, Weaviate, @@ -49,6 +51,7 @@ def _get_builtin_translator(vectorstore: VectorStore) -> Visitor: DeepLake: DeepLakeTranslator, ElasticsearchStore: ElasticsearchTranslator, Milvus: MilvusTranslator, + SupabaseVectorStore: SupabaseVectorTranslator, } if vectorstore_cls not in BUILTIN_TRANSLATORS: raise ValueError( diff --git a/libs/langchain/langchain/retrievers/self_query/supabase.py b/libs/langchain/langchain/retrievers/self_query/supabase.py new file mode 100644 index 0000000000..267e228fcd --- /dev/null +++ b/libs/langchain/langchain/retrievers/self_query/supabase.py @@ -0,0 +1,97 @@ +from typing import Any, Dict, Tuple + +from langchain.chains.query_constructor.ir import ( + Comparator, + Comparison, + Operation, + Operator, + StructuredQuery, + Visitor, +) + + +class SupabaseVectorTranslator(Visitor): + """Translate Langchain filters to Supabase PostgREST filters.""" + + allowed_operators = [Operator.AND, Operator.OR] + """Subset of allowed logical operators.""" + + allowed_comparators = [ + Comparator.EQ, + Comparator.NE, + Comparator.GT, + Comparator.GTE, + Comparator.LT, + Comparator.LTE, + Comparator.LIKE, + ] + """Subset of allowed logical comparators.""" + + metadata_column = "metadata" + + def _map_comparator(self, comparator: Comparator) -> str: + """ + Maps Langchain comparator to PostgREST comparator: + + https://postgrest.org/en/stable/references/api/tables_views.html#operators + """ + postgrest_comparator = { + Comparator.EQ: "eq", + Comparator.NE: "neq", + Comparator.GT: "gt", + Comparator.GTE: "gte", + Comparator.LT: "lt", + Comparator.LTE: "lte", + Comparator.LIKE: "like", + }.get(comparator) + + if postgrest_comparator is None: + raise Exception( + f"Comparator '{comparator}' is not currently " + "supported in Supabase Vector" + ) + + return postgrest_comparator + + def _get_json_operator(self, value: Any) -> str: + if isinstance(value, str): + return "->>" + else: + return "->" + + def visit_operation(self, operation: Operation) -> str: + args = [arg.accept(self) for arg in operation.arguments] + return f"{operation.operator.value}({','.join(args)})" + + def visit_comparison(self, comparison: Comparison) -> str: + if isinstance(comparison.value, list): + return self.visit_operation( + Operation( + operator=Operator.AND, + arguments=( + Comparison( + comparator=comparison.comparator, + attribute=comparison.attribute, + value=value, + ) + for value in comparison.value + ), + ) + ) + + return ".".join( + [ + f"{self.metadata_column}{self._get_json_operator(comparison.value)}{comparison.attribute}", + f"{self._map_comparator(comparison.comparator)}", + f"{comparison.value}", + ] + ) + + def visit_structured_query( + self, structured_query: StructuredQuery + ) -> Tuple[str, Dict[str, str]]: + if structured_query.filter is None: + kwargs = {} + else: + kwargs = {"postgrest_filter": structured_query.filter.accept(self)} + return structured_query.query, kwargs diff --git a/libs/langchain/langchain/vectorstores/supabase.py b/libs/langchain/langchain/vectorstores/supabase.py index 4214e085f6..71b7c2cc8b 100644 --- a/libs/langchain/langchain/vectorstores/supabase.py +++ b/libs/langchain/langchain/vectorstores/supabase.py @@ -199,18 +199,31 @@ class SupabaseVectorStore(VectorStore): ) def match_args( - self, query: List[float], k: int, filter: Optional[Dict[str, Any]] + self, query: List[float], filter: Optional[Dict[str, Any]] ) -> Dict[str, Any]: - ret = dict(query_embedding=query, match_count=k) + ret: Dict[str, Any] = dict(query_embedding=query) if filter: ret["filter"] = filter return ret def similarity_search_by_vector_with_relevance_scores( - self, query: List[float], k: int, filter: Optional[Dict[str, Any]] = None + self, + query: List[float], + k: int, + filter: Optional[Dict[str, Any]] = None, + postgrest_filter: Optional[str] = None, ) -> List[Tuple[Document, float]]: - match_documents_params = self.match_args(query, k, filter) - res = self._client.rpc(self.query_name, match_documents_params).execute() + match_documents_params = self.match_args(query, filter) + query_builder = self._client.rpc(self.query_name, match_documents_params) + + if postgrest_filter: + query_builder.params = query_builder.params.set( + "and", f"({postgrest_filter})" + ) + + query_builder.params = query_builder.params.set("limit", k) + + res = query_builder.execute() match_result = [ ( @@ -227,10 +240,23 @@ class SupabaseVectorStore(VectorStore): return match_result def similarity_search_by_vector_returning_embeddings( - self, query: List[float], k: int, filter: Optional[Dict[str, Any]] = None + self, + query: List[float], + k: int, + filter: Optional[Dict[str, Any]] = None, + postgrest_filter: Optional[str] = None, ) -> List[Tuple[Document, float, np.ndarray[np.float32, Any]]]: - match_documents_params = self.match_args(query, k, filter) - res = self._client.rpc(self.query_name, match_documents_params).execute() + match_documents_params = self.match_args(query, filter) + query_builder = self._client.rpc(self.query_name, match_documents_params) + + if postgrest_filter: + query_builder.params = query_builder.params.set( + "and", f"({postgrest_filter})" + ) + + query_builder.params = query_builder.params.set("limit", k) + + res = query_builder.execute() match_result = [ ( diff --git a/libs/langchain/tests/unit_tests/retrievers/self_query/test_supabase.py b/libs/langchain/tests/unit_tests/retrievers/self_query/test_supabase.py new file mode 100644 index 0000000000..de9b04fabf --- /dev/null +++ b/libs/langchain/tests/unit_tests/retrievers/self_query/test_supabase.py @@ -0,0 +1,85 @@ +from typing import Dict, Tuple + +from langchain.chains.query_constructor.ir import ( + Comparator, + Comparison, + Operation, + Operator, + StructuredQuery, +) +from langchain.retrievers.self_query.supabase import SupabaseVectorTranslator + +DEFAULT_TRANSLATOR = SupabaseVectorTranslator() + + +def test_visit_comparison() -> None: + comp = Comparison(comparator=Comparator.LT, attribute="foo", value=["1", "2"]) + expected = "and(metadata->>foo.lt.1,metadata->>foo.lt.2)" + actual = DEFAULT_TRANSLATOR.visit_comparison(comp) + assert expected == actual + + +def test_visit_operation() -> None: + op = Operation( + operator=Operator.AND, + arguments=[ + Comparison(comparator=Comparator.LT, attribute="foo", value=2), + Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"), + Comparison(comparator=Comparator.LT, attribute="abc", value=["1", "2"]), + ], + ) + expected = ( + "and(" + "metadata->foo.lt.2," + "metadata->>bar.eq.baz," + "and(metadata->>abc.lt.1,metadata->>abc.lt.2)" + ")" + ) + actual = DEFAULT_TRANSLATOR.visit_operation(op) + assert expected == actual + + +def test_visit_structured_query() -> None: + query = "What is the capital of France?" + structured_query = StructuredQuery( + query=query, + filter=None, + ) + expected: Tuple[str, Dict] = (query, {}) + actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) + assert expected == actual + + comp = Comparison(comparator=Comparator.LT, attribute="foo", value=["1", "2"]) + expected = ( + query, + {"postgrest_filter": "and(metadata->>foo.lt.1,metadata->>foo.lt.2)"}, + ) + structured_query = StructuredQuery( + query=query, + filter=comp, + ) + actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) + assert expected == actual + + op = Operation( + operator=Operator.AND, + arguments=[ + Comparison(comparator=Comparator.LT, attribute="foo", value=2), + Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"), + Comparison(comparator=Comparator.LT, attribute="abc", value=["1", "2"]), + ], + ) + structured_query = StructuredQuery( + query=query, + filter=op, + ) + expected = ( + query, + { + "postgrest_filter": ( + "and(metadata->foo.lt.2,metadata->>bar.eq.baz,and(metadata->>abc.lt.1,metadata->>abc.lt.2))" + ) + }, + ) + actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) + assert expected == actual