mirror of
https://github.com/hwchase17/langchain
synced 2024-11-06 03:20:49 +00:00
supabase vectorstore - first cut (#3100)
First cut of a supabase vectorstore loosely patterned on the langchainjs equivalent. Doesn't support async operations which is a limitation of the supabase python client. --------- Co-authored-by: Daniel Chalef <daniel.chalef@private.org>
This commit is contained in:
parent
9a0356d276
commit
27cdf8d675
401
docs/modules/indexes/vectorstores/examples/supabase.ipynb
Normal file
401
docs/modules/indexes/vectorstores/examples/supabase.ipynb
Normal file
@ -0,0 +1,401 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "683953b3",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# SupabaseVectorStore\n",
|
||||||
|
"\n",
|
||||||
|
"This notebook shows how to use Supabase and `pgvector` as your VectorStore.\n",
|
||||||
|
"\n",
|
||||||
|
"To run this notebook, please ensure:\n",
|
||||||
|
"\n",
|
||||||
|
"- the `pgvector` extension is enabled\n",
|
||||||
|
"- you have installed the `supabase-py` package\n",
|
||||||
|
"- that you have created a `match_documents` function in your database\n",
|
||||||
|
"- that you have a `documents` table in your `public` schema similar to the one below.\n",
|
||||||
|
"\n",
|
||||||
|
"The following function determines cosine similarity, but you can adjust to your needs.\n",
|
||||||
|
"\n",
|
||||||
|
"```sql\n",
|
||||||
|
" -- Enable the pgvector extension to work with embedding vectors\n",
|
||||||
|
" create extension vector;\n",
|
||||||
|
"\n",
|
||||||
|
" -- Create a table to store your documents\n",
|
||||||
|
" create table documents (\n",
|
||||||
|
" id bigserial primary key,\n",
|
||||||
|
" content text, -- corresponds to Document.pageContent\n",
|
||||||
|
" metadata jsonb, -- corresponds to Document.metadata\n",
|
||||||
|
" embedding vector(1536) -- 1536 works for OpenAI embeddings, change if needed\n",
|
||||||
|
" );\n",
|
||||||
|
"\n",
|
||||||
|
" CREATE FUNCTION match_documents(query_embedding vector(1536), match_count int)\n",
|
||||||
|
" RETURNS TABLE(\n",
|
||||||
|
" id bigint,\n",
|
||||||
|
" content text,\n",
|
||||||
|
" metadata jsonb,\n",
|
||||||
|
" -- we return matched vectors to allow to execute maximal marginal relevance searches\n",
|
||||||
|
" embedding vector(1536),\n",
|
||||||
|
" similarity float)\n",
|
||||||
|
" LANGUAGE plpgsql\n",
|
||||||
|
" AS $$\n",
|
||||||
|
" # variable_conflict use_column\n",
|
||||||
|
" BEGIN\n",
|
||||||
|
" RETURN query\n",
|
||||||
|
" SELECT\n",
|
||||||
|
" id,\n",
|
||||||
|
" content,\n",
|
||||||
|
" metadata,\n",
|
||||||
|
" embedding,\n",
|
||||||
|
" 1 -(docstore.embedding <=> query_embedding) AS similarity\n",
|
||||||
|
" FROM\n",
|
||||||
|
" docstore\n",
|
||||||
|
" ORDER BY\n",
|
||||||
|
" docstore.embedding <=> query_embedding\n",
|
||||||
|
" LIMIT match_count;\n",
|
||||||
|
" END;\n",
|
||||||
|
" $$;\n",
|
||||||
|
"```\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"id": "6bd4498b",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# with pip\n",
|
||||||
|
"# !pip install supabase\n",
|
||||||
|
"\n",
|
||||||
|
"# with conda\n",
|
||||||
|
"# !conda install -c conda-forge supabase"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"id": "90afc6df",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"True"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 2,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# If you're storing your Supabase and OpenAI API keys in a .env file, you can load them with dotenv\n",
|
||||||
|
"from dotenv import load_dotenv\n",
|
||||||
|
"\n",
|
||||||
|
"load_dotenv()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"id": "5ce44f7c",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import os\n",
|
||||||
|
"from supabase.client import Client, create_client\n",
|
||||||
|
"\n",
|
||||||
|
"supabase_url = os.environ.get(\"SUPABASE_URL\")\n",
|
||||||
|
"supabase_key = os.environ.get(\"SUPABASE_SERVICE_KEY\")\n",
|
||||||
|
"supabase: Client = create_client(supabase_url, supabase_key)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"id": "aac9563e",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"2023-04-19 20:12:28,593:INFO - NumExpr defaulting to 8 threads.\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
|
||||||
|
"from langchain.text_splitter import CharacterTextSplitter\n",
|
||||||
|
"from langchain.vectorstores.supabase import SupabaseVectorStore\n",
|
||||||
|
"from langchain.document_loaders import TextLoader"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"id": "a3c3999a",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from langchain.document_loaders import TextLoader\n",
|
||||||
|
"\n",
|
||||||
|
"loader = TextLoader(\"../../../state_of_the_union.txt\")\n",
|
||||||
|
"documents = loader.load()\n",
|
||||||
|
"text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
|
||||||
|
"docs = text_splitter.split_documents(documents)\n",
|
||||||
|
"\n",
|
||||||
|
"embeddings = OpenAIEmbeddings()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"id": "efec97f8",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# We're using the default `documents` table here. You can modify this by passing in a `table_name` argument to the `from_documents` method.\n",
|
||||||
|
"vector_store = SupabaseVectorStore.from_documents(\n",
|
||||||
|
" docs, embeddings, client=supabase\n",
|
||||||
|
")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"id": "5eabdb75",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
|
||||||
|
"matched_docs = vector_store.similarity_search(query)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"id": "4b172de8",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||||
|
"\n",
|
||||||
|
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||||
|
"\n",
|
||||||
|
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||||
|
"\n",
|
||||||
|
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"print(matched_docs[0].page_content)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "18152965",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Similarity search with score\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
|
"id": "72aaa9c8",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"matched_docs = vector_store.similarity_search_with_relevance_scores(query)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 10,
|
||||||
|
"id": "d88e958e",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"(Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': '../../../state_of_the_union.txt'}),\n",
|
||||||
|
" 0.802509746274066)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 10,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"matched_docs[0]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"attachments": {},
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"id": "794a7552",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## Retriever options\n",
|
||||||
|
"\n",
|
||||||
|
"This section goes over different options for how to use SupabaseVectorStore as a retriever.\n",
|
||||||
|
"\n",
|
||||||
|
"### Maximal Marginal Relevance Searches\n",
|
||||||
|
"\n",
|
||||||
|
"In addition to using similarity search in the retriever object, you can also use `mmr`.\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 11,
|
||||||
|
"id": "96ff911a",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"retriever = vector_store.as_retriever(search_type=\"mmr\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 12,
|
||||||
|
"id": "f00be6d0",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"matched_docs = retriever.get_relevant_documents(query)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 13,
|
||||||
|
"id": "a559c3f1",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"\n",
|
||||||
|
"## Document 0\n",
|
||||||
|
"\n",
|
||||||
|
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
|
||||||
|
"\n",
|
||||||
|
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
|
||||||
|
"\n",
|
||||||
|
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
|
||||||
|
"\n",
|
||||||
|
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n",
|
||||||
|
"\n",
|
||||||
|
"## Document 1\n",
|
||||||
|
"\n",
|
||||||
|
"One was stationed at bases and breathing in toxic smoke from “burn pits” that incinerated wastes of war—medical and hazard material, jet fuel, and more. \n",
|
||||||
|
"\n",
|
||||||
|
"When they came home, many of the world’s fittest and best trained warriors were never the same. \n",
|
||||||
|
"\n",
|
||||||
|
"Headaches. Numbness. Dizziness. \n",
|
||||||
|
"\n",
|
||||||
|
"A cancer that would put them in a flag-draped coffin. \n",
|
||||||
|
"\n",
|
||||||
|
"I know. \n",
|
||||||
|
"\n",
|
||||||
|
"One of those soldiers was my son Major Beau Biden. \n",
|
||||||
|
"\n",
|
||||||
|
"We don’t know for sure if a burn pit was the cause of his brain cancer, or the diseases of so many of our troops. \n",
|
||||||
|
"\n",
|
||||||
|
"But I’m committed to finding out everything we can. \n",
|
||||||
|
"\n",
|
||||||
|
"Committed to military families like Danielle Robinson from Ohio. \n",
|
||||||
|
"\n",
|
||||||
|
"The widow of Sergeant First Class Heath Robinson. \n",
|
||||||
|
"\n",
|
||||||
|
"He was born a soldier. Army National Guard. Combat medic in Kosovo and Iraq. \n",
|
||||||
|
"\n",
|
||||||
|
"Stationed near Baghdad, just yards from burn pits the size of football fields. \n",
|
||||||
|
"\n",
|
||||||
|
"Heath’s widow Danielle is here with us tonight. They loved going to Ohio State football games. He loved building Legos with their daughter.\n",
|
||||||
|
"\n",
|
||||||
|
"## Document 2\n",
|
||||||
|
"\n",
|
||||||
|
"And I’m taking robust action to make sure the pain of our sanctions is targeted at Russia’s economy. And I will use every tool at our disposal to protect American businesses and consumers. \n",
|
||||||
|
"\n",
|
||||||
|
"Tonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n",
|
||||||
|
"\n",
|
||||||
|
"America will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n",
|
||||||
|
"\n",
|
||||||
|
"These steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n",
|
||||||
|
"\n",
|
||||||
|
"But I want you to know that we are going to be okay. \n",
|
||||||
|
"\n",
|
||||||
|
"When the history of this era is written Putin’s war on Ukraine will have left Russia weaker and the rest of the world stronger. \n",
|
||||||
|
"\n",
|
||||||
|
"While it shouldn’t have taken something so terrible for people around the world to see what’s at stake now everyone sees it clearly.\n",
|
||||||
|
"\n",
|
||||||
|
"## Document 3\n",
|
||||||
|
"\n",
|
||||||
|
"We can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n",
|
||||||
|
"\n",
|
||||||
|
"I recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n",
|
||||||
|
"\n",
|
||||||
|
"They were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n",
|
||||||
|
"\n",
|
||||||
|
"Officer Mora was 27 years old. \n",
|
||||||
|
"\n",
|
||||||
|
"Officer Rivera was 22. \n",
|
||||||
|
"\n",
|
||||||
|
"Both Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n",
|
||||||
|
"\n",
|
||||||
|
"I spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves. \n",
|
||||||
|
"\n",
|
||||||
|
"I’ve worked on these issues a long time. \n",
|
||||||
|
"\n",
|
||||||
|
"I know what works: Investing in crime preventionand community police officers who’ll walk the beat, who’ll know the neighborhood, and who can restore trust and safety.\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"for i, d in enumerate(matched_docs):\n",
|
||||||
|
" print(f\"\\n## Document {i}\\n\")\n",
|
||||||
|
" print(d.page_content)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"id": "79b1198e",
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3 (ipykernel)",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.11.3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 5
|
||||||
|
}
|
322
langchain/vectorstores/supabase.py
Normal file
322
langchain/vectorstores/supabase.py
Normal file
@ -0,0 +1,322 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from itertools import repeat
|
||||||
|
from typing import (
|
||||||
|
TYPE_CHECKING,
|
||||||
|
Any,
|
||||||
|
Iterable,
|
||||||
|
List,
|
||||||
|
Optional,
|
||||||
|
Tuple,
|
||||||
|
Type,
|
||||||
|
Union,
|
||||||
|
)
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from langchain.docstore.document import Document
|
||||||
|
from langchain.embeddings.base import Embeddings
|
||||||
|
from langchain.vectorstores.base import VectorStore
|
||||||
|
from langchain.vectorstores.utils import maximal_marginal_relevance
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
import supabase
|
||||||
|
|
||||||
|
|
||||||
|
class SupabaseVectorStore(VectorStore):
|
||||||
|
"""VectorStore for a Supabase postgres database. Assumes you have the `pgvector`
|
||||||
|
extension installed and a `match_documents` (or similar) function. For more details:
|
||||||
|
https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/supabase
|
||||||
|
|
||||||
|
You can implement your own `match_documents` function in order to limit the search
|
||||||
|
space to a subset of documents based on your own authorization or business logic.
|
||||||
|
|
||||||
|
Note that the Supabase Python client does not yet support async operations.
|
||||||
|
|
||||||
|
If you'd like to use `max_marginal_relevance_search`, please review the instructions
|
||||||
|
below on modifying the `match_documents` function to return matched embeddings.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_client: supabase.client.Client
|
||||||
|
# This is the embedding function. Don't confuse with the embedding vectors.
|
||||||
|
# We should perhaps rename the underlying Embedding base class to EmbeddingFunction
|
||||||
|
# or something
|
||||||
|
_embedding: Embeddings
|
||||||
|
table_name: str
|
||||||
|
query_name: str
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
client: supabase.client.Client,
|
||||||
|
embedding: Embeddings,
|
||||||
|
table_name: str,
|
||||||
|
query_name: Union[str, None] = None,
|
||||||
|
) -> None:
|
||||||
|
"""Initialize with supabase client."""
|
||||||
|
try:
|
||||||
|
import supabase # noqa: F401
|
||||||
|
except ImportError:
|
||||||
|
raise ValueError(
|
||||||
|
"Could not import supabase python package. "
|
||||||
|
"Please install it with `pip install supabase`."
|
||||||
|
)
|
||||||
|
|
||||||
|
self._client = client
|
||||||
|
self._embedding: Embeddings = embedding
|
||||||
|
self.table_name = table_name or "documents"
|
||||||
|
self.query_name = query_name or "match_documents"
|
||||||
|
|
||||||
|
def add_texts(
|
||||||
|
self,
|
||||||
|
texts: Iterable[str],
|
||||||
|
metadatas: Optional[List[dict[Any, Any]]] = None,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> List[str]:
|
||||||
|
docs = self._texts_to_documents(texts, metadatas)
|
||||||
|
|
||||||
|
vectors = self._embedding.embed_documents(list(texts))
|
||||||
|
return self.add_vectors(vectors, docs)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_texts(
|
||||||
|
cls: Type["SupabaseVectorStore"],
|
||||||
|
texts: List[str],
|
||||||
|
embedding: Embeddings,
|
||||||
|
metadatas: Optional[List[dict]] = None,
|
||||||
|
client: Optional[supabase.client.Client] = None,
|
||||||
|
table_name: Optional[str] = "documents",
|
||||||
|
query_name: Union[str, None] = "match_documents",
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> "SupabaseVectorStore":
|
||||||
|
"""Return VectorStore initialized from texts and embeddings."""
|
||||||
|
|
||||||
|
if not client:
|
||||||
|
raise ValueError("Supabase client is required.")
|
||||||
|
|
||||||
|
if not table_name:
|
||||||
|
raise ValueError("Supabase document table_name is required.")
|
||||||
|
|
||||||
|
embeddings = embedding.embed_documents(texts)
|
||||||
|
docs = cls._texts_to_documents(texts, metadatas)
|
||||||
|
_ids = cls._add_vectors(client, table_name, embeddings, docs)
|
||||||
|
|
||||||
|
return cls(
|
||||||
|
client=client,
|
||||||
|
embedding=embedding,
|
||||||
|
table_name=table_name,
|
||||||
|
query_name=query_name,
|
||||||
|
)
|
||||||
|
|
||||||
|
def add_vectors(
|
||||||
|
self, vectors: List[List[float]], documents: List[Document]
|
||||||
|
) -> List[str]:
|
||||||
|
return self._add_vectors(self._client, self.table_name, vectors, documents)
|
||||||
|
|
||||||
|
def similarity_search(
|
||||||
|
self, query: str, k: int = 4, **kwargs: Any
|
||||||
|
) -> List[Document]:
|
||||||
|
vectors = self._embedding.embed_documents([query])
|
||||||
|
return self.similarity_search_by_vector(vectors[0], k)
|
||||||
|
|
||||||
|
def similarity_search_by_vector(
|
||||||
|
self, embedding: List[float], k: int = 4, **kwargs: Any
|
||||||
|
) -> List[Document]:
|
||||||
|
result = self.similarity_search_by_vector_with_relevance_scores(embedding, k)
|
||||||
|
|
||||||
|
documents = [doc for doc, _ in result]
|
||||||
|
|
||||||
|
return documents
|
||||||
|
|
||||||
|
def similarity_search_with_relevance_scores(
|
||||||
|
self, query: str, k: int = 4, **kwargs: Any
|
||||||
|
) -> List[Tuple[Document, float]]:
|
||||||
|
vectors = self._embedding.embed_documents([query])
|
||||||
|
return self.similarity_search_by_vector_with_relevance_scores(vectors[0], k)
|
||||||
|
|
||||||
|
def similarity_search_by_vector_with_relevance_scores(
|
||||||
|
self, query: List[float], k: int
|
||||||
|
) -> List[Tuple[Document, float]]:
|
||||||
|
match_documents_params = dict(query_embedding=query, match_count=k)
|
||||||
|
res = self._client.rpc(self.query_name, match_documents_params).execute()
|
||||||
|
|
||||||
|
match_result = [
|
||||||
|
(
|
||||||
|
Document(
|
||||||
|
metadata=search.get("metadata", {}), # type: ignore
|
||||||
|
page_content=search.get("content", ""),
|
||||||
|
),
|
||||||
|
search.get("similarity", 0.0),
|
||||||
|
)
|
||||||
|
for search in res.data
|
||||||
|
if search.get("content")
|
||||||
|
]
|
||||||
|
|
||||||
|
return match_result
|
||||||
|
|
||||||
|
def similarity_search_by_vector_returning_embeddings(
|
||||||
|
self, query: List[float], k: int
|
||||||
|
) -> List[Tuple[Document, float, np.ndarray[np.float32, Any]]]:
|
||||||
|
match_documents_params = dict(query_embedding=query, match_count=k)
|
||||||
|
res = self._client.rpc(self.query_name, match_documents_params).execute()
|
||||||
|
|
||||||
|
match_result = [
|
||||||
|
(
|
||||||
|
Document(
|
||||||
|
metadata=search.get("metadata", {}), # type: ignore
|
||||||
|
page_content=search.get("content", ""),
|
||||||
|
),
|
||||||
|
search.get("similarity", 0.0),
|
||||||
|
# Supabase returns a vector type as its string represation (!).
|
||||||
|
# This is a hack to convert the string to numpy array.
|
||||||
|
np.fromstring(
|
||||||
|
search.get("embedding", "").strip("[]"), np.float32, sep=","
|
||||||
|
),
|
||||||
|
)
|
||||||
|
for search in res.data
|
||||||
|
if search.get("content")
|
||||||
|
]
|
||||||
|
|
||||||
|
return match_result
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _texts_to_documents(
|
||||||
|
texts: Iterable[str],
|
||||||
|
metadatas: Optional[Iterable[dict[Any, Any]]] = None,
|
||||||
|
) -> List[Document]:
|
||||||
|
"""Return list of Documents from list of texts and metadatas."""
|
||||||
|
if metadatas is None:
|
||||||
|
metadatas = repeat({})
|
||||||
|
|
||||||
|
docs = [
|
||||||
|
Document(page_content=text, metadata=metadata)
|
||||||
|
for text, metadata in zip(texts, metadatas)
|
||||||
|
]
|
||||||
|
|
||||||
|
return docs
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _add_vectors(
|
||||||
|
client: supabase.client.Client,
|
||||||
|
table_name: str,
|
||||||
|
vectors: List[List[float]],
|
||||||
|
documents: List[Document],
|
||||||
|
) -> List[str]:
|
||||||
|
"""Add vectors to Supabase table."""
|
||||||
|
|
||||||
|
rows: List[dict[str, Any]] = [
|
||||||
|
{
|
||||||
|
"content": documents[idx].page_content,
|
||||||
|
"embedding": embedding,
|
||||||
|
"metadata": documents[idx].metadata, # type: ignore
|
||||||
|
}
|
||||||
|
for idx, embedding in enumerate(vectors)
|
||||||
|
]
|
||||||
|
|
||||||
|
# According to the SupabaseVectorStore JS implementation, the best chunk size
|
||||||
|
# is 500
|
||||||
|
chunk_size = 500
|
||||||
|
id_list: List[str] = []
|
||||||
|
for i in range(0, len(rows), chunk_size):
|
||||||
|
chunk = rows[i : i + chunk_size]
|
||||||
|
|
||||||
|
result = client.from_(table_name).insert(chunk).execute() # type: ignore
|
||||||
|
|
||||||
|
if len(result.data) == 0:
|
||||||
|
raise Exception("Error inserting: No rows added")
|
||||||
|
|
||||||
|
# VectorStore.add_vectors returns ids as strings
|
||||||
|
ids = [str(i.get("id")) for i in result.data if i.get("id")]
|
||||||
|
|
||||||
|
id_list.extend(ids)
|
||||||
|
|
||||||
|
return id_list
|
||||||
|
|
||||||
|
def max_marginal_relevance_search_by_vector(
|
||||||
|
self,
|
||||||
|
embedding: List[float],
|
||||||
|
k: int = 4,
|
||||||
|
fetch_k: int = 20,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> List[Document]:
|
||||||
|
"""Return docs selected using the maximal marginal relevance.
|
||||||
|
|
||||||
|
Maximal marginal relevance optimizes for similarity to query AND diversity
|
||||||
|
among selected documents.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
embedding: Embedding to look up documents similar to.
|
||||||
|
k: Number of Documents to return. Defaults to 4.
|
||||||
|
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of Documents selected by maximal marginal relevance.
|
||||||
|
"""
|
||||||
|
result = self.similarity_search_by_vector_returning_embeddings(
|
||||||
|
embedding, fetch_k
|
||||||
|
)
|
||||||
|
|
||||||
|
matched_documents = [doc_tuple[0] for doc_tuple in result]
|
||||||
|
matched_embeddings = [doc_tuple[2] for doc_tuple in result]
|
||||||
|
|
||||||
|
mmr_selected = maximal_marginal_relevance(
|
||||||
|
np.array([embedding], dtype=np.float32), matched_embeddings, k=k
|
||||||
|
)
|
||||||
|
|
||||||
|
filtered_documents = [matched_documents[i] for i in mmr_selected]
|
||||||
|
|
||||||
|
return filtered_documents
|
||||||
|
|
||||||
|
def max_marginal_relevance_search(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
k: int = 4,
|
||||||
|
fetch_k: int = 20,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> List[Document]:
|
||||||
|
"""Return docs selected using the maximal marginal relevance.
|
||||||
|
|
||||||
|
Maximal marginal relevance optimizes for similarity to query AND diversity
|
||||||
|
among selected documents.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
query: Text to look up documents similar to.
|
||||||
|
k: Number of Documents to return. Defaults to 4.
|
||||||
|
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of Documents selected by maximal marginal relevance.
|
||||||
|
|
||||||
|
`max_marginal_relevance_search` requires that `query_name` returns matched
|
||||||
|
embeddings alongside the match documents. The following function function
|
||||||
|
demonstrates how to do this:
|
||||||
|
```sql
|
||||||
|
CREATE FUNCTION match_documents_embeddings(query_embedding vector(1536),
|
||||||
|
match_count int)
|
||||||
|
RETURNS TABLE(
|
||||||
|
id bigint,
|
||||||
|
content text,
|
||||||
|
metadata jsonb,
|
||||||
|
embedding vector(1536),
|
||||||
|
similarity float)
|
||||||
|
LANGUAGE plpgsql
|
||||||
|
AS $$
|
||||||
|
# variable_conflict use_column
|
||||||
|
BEGIN
|
||||||
|
RETURN query
|
||||||
|
SELECT
|
||||||
|
id,
|
||||||
|
content,
|
||||||
|
metadata,
|
||||||
|
embedding,
|
||||||
|
1 -(docstore.embedding <=> query_embedding) AS similarity
|
||||||
|
FROM
|
||||||
|
docstore
|
||||||
|
ORDER BY
|
||||||
|
docstore.embedding <=> query_embedding
|
||||||
|
LIMIT match_count;
|
||||||
|
END;
|
||||||
|
$$;```
|
||||||
|
"""
|
||||||
|
embedding = self._embedding.embed_documents([query])
|
||||||
|
docs = self.max_marginal_relevance_search_by_vector(embedding[0], k, fetch_k)
|
||||||
|
return docs
|
Loading…
Reference in New Issue
Block a user