mirror of
https://github.com/hwchase17/langchain
synced 2024-11-18 09:25:54 +00:00
community[minor]: infinity embedding local option (#17671)
**drop-in-replacement for sentence-transformers inference.** https://github.com/langchain-ai/langchain/discussions/17670 tldr from the discussion above -> around a 4x-22x speedup over using SentenceTransformers / huggingface embeddings. For more info: https://github.com/michaelfeil/infinity (pure-python dependency) --------- Co-authored-by: Erick Friis <erick@langchain.dev>
This commit is contained in:
parent
581095b9b5
commit
242981b8f0
@ -24,14 +24,127 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain_community.embeddings import InfinityEmbeddings"
|
||||
"from langchain_community.embeddings import InfinityEmbeddings, InfinityEmbeddingsLocal"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Optional: Make sure to start the Infinity instance\n",
|
||||
"# Option 1: Use infinity from Python"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Optional: install infinity\n",
|
||||
"\n",
|
||||
"To install infinity use the following command. For further details check out the [Docs on Github](https://github.com/michaelfeil/infinity).\n",
|
||||
"Install the torch and onnx dependencies. \n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"pip install infinity_emb[torch,optimum]\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"documents = [\n",
|
||||
" \"Baguette is a dish.\",\n",
|
||||
" \"Paris is the capital of France.\",\n",
|
||||
" \"numpy is a lib for linear algebra\",\n",
|
||||
" \"You escaped what I've escaped - You'd be in Paris getting fucked up too\",\n",
|
||||
"]\n",
|
||||
"query = \"Where is Paris?\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/home/michael/langchain/libs/langchain/.venv/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
||||
" from .autonotebook import tqdm as notebook_tqdm\n",
|
||||
"The BetterTransformer implementation does not support padding during training, as the fused kernels do not support attention masks. Beware that passing padded batched data during training may result in unexpected outputs. Please refer to https://huggingface.co/docs/optimum/bettertransformer/overview for more details.\n",
|
||||
"/home/michael/langchain/libs/langchain/.venv/lib/python3.10/site-packages/optimum/bettertransformer/models/encoder_models.py:301: UserWarning: The PyTorch API of nested tensors is in prototype stage and will change in the near future. (Triggered internally at ../aten/src/ATen/NestedTensorImpl.cpp:177.)\n",
|
||||
" hidden_states = torch._nested_tensor_from_mask(hidden_states, ~attention_mask)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"embeddings = InfinityEmbeddingsLocal(\n",
|
||||
" model=\"sentence-transformers/all-MiniLM-L6-v2\",\n",
|
||||
" # revision\n",
|
||||
" revision=None,\n",
|
||||
" # best to keep at 32\n",
|
||||
" batch_size=32,\n",
|
||||
" # for AMD/Nvidia GPUs via torch\n",
|
||||
" device=\"cuda\",\n",
|
||||
" # warm up model before execution\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def embed():\n",
|
||||
" # TODO: This function is just to showcase that your call can run async.\n",
|
||||
"\n",
|
||||
" # important: use engine inside of `async with` statement to start/stop the batching engine.\n",
|
||||
" async with embeddings:\n",
|
||||
" # avoid closing and starting the engine often.\n",
|
||||
" # rather keep it running.\n",
|
||||
" # you may call `await embeddings.__aenter__()` and `__aexit__()\n",
|
||||
" # if you are sure when to manually start/stop execution` in a more granular way\n",
|
||||
"\n",
|
||||
" documents_embedded = await embeddings.aembed_documents(documents)\n",
|
||||
" query_result = await embeddings.aembed_query(query)\n",
|
||||
" print(\"embeddings created successful\")\n",
|
||||
" return documents_embedded, query_result"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# run the async code however you would like\n",
|
||||
"# if you are in a jupyter notebook, you can use the following\n",
|
||||
"documents_embedded, query_result = await embed()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# (demo) compute similarity\n",
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"scores = np.array(documents_embedded) @ np.array(query_result).T\n",
|
||||
"dict(zip(documents, scores))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Option 2: Run the server, and connect via the API"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Optional: Make sure to start the Infinity instance\n",
|
||||
"\n",
|
||||
"To install infinity use the following command. For further details check out the [Docs on Github](https://github.com/michaelfeil/infinity).\n",
|
||||
"```bash\n",
|
||||
@ -40,25 +153,11 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Requirement already satisfied: infinity_emb[cli] in /home/michi/langchain/.venv/lib/python3.10/site-packages (0.0.8)\n",
|
||||
"\u001b[33mWARNING: infinity-emb 0.0.8 does not provide the extra 'cli'\u001b[0m\u001b[33m\n",
|
||||
"\u001b[0mRequirement already satisfied: numpy>=1.20.0 in /home/michi/langchain/.venv/lib/python3.10/site-packages (from infinity_emb[cli]) (1.24.4)\n",
|
||||
"\u001b[33mWARNING: There was an error checking the latest version of pip.\u001b[0m\u001b[33m\n",
|
||||
"\u001b[0m"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Install the infinity package\n",
|
||||
"%pip install --upgrade --quiet infinity_emb[cli,torch]"
|
||||
"%pip install --upgrade --quiet infinity_emb[all]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -90,7 +189,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@ -105,14 +204,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"embeddings created successful\n"
|
||||
"Make sure the infinity instance is running. Verify by clicking on http://localhost:7797/docs Exception: HTTPConnectionPool(host='localhost', port=7797): Max retries exceeded with url: /v1/embeddings (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x7f91c35dbd30>: Failed to establish a new connection: [Errno 111] Connection refused')). \n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@ -136,7 +235,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
|
@ -51,6 +51,7 @@ from langchain_community.embeddings.huggingface import (
|
||||
)
|
||||
from langchain_community.embeddings.huggingface_hub import HuggingFaceHubEmbeddings
|
||||
from langchain_community.embeddings.infinity import InfinityEmbeddings
|
||||
from langchain_community.embeddings.infinity_local import InfinityEmbeddingsLocal
|
||||
from langchain_community.embeddings.javelin_ai_gateway import JavelinAIGatewayEmbeddings
|
||||
from langchain_community.embeddings.jina import JinaEmbeddings
|
||||
from langchain_community.embeddings.johnsnowlabs import JohnSnowLabsEmbeddings
|
||||
@ -105,6 +106,7 @@ __all__ = [
|
||||
"HuggingFaceEmbeddings",
|
||||
"HuggingFaceInferenceAPIEmbeddings",
|
||||
"InfinityEmbeddings",
|
||||
"InfinityEmbeddingsLocal",
|
||||
"GradientEmbeddings",
|
||||
"JinaEmbeddings",
|
||||
"LlamaCppEmbeddings",
|
||||
|
156
libs/community/langchain_community/embeddings/infinity_local.py
Normal file
156
libs/community/langchain_community/embeddings/infinity_local.py
Normal file
@ -0,0 +1,156 @@
|
||||
"""written under MIT Licence, Michael Feil 2023."""
|
||||
|
||||
import asyncio
|
||||
from logging import getLogger
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from langchain_core.embeddings import Embeddings
|
||||
from langchain_core.pydantic_v1 import BaseModel, Extra, root_validator
|
||||
|
||||
__all__ = ["InfinityEmbeddingsLocal"]
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
||||
|
||||
class InfinityEmbeddingsLocal(BaseModel, Embeddings):
|
||||
"""Optimized Embedding models https://github.com/michaelfeil/infinity
|
||||
This class deploys a local Infinity instance to embed text.
|
||||
The class requires async usage.
|
||||
|
||||
Infinity is a class to interact with Embedding Models on https://github.com/michaelfeil/infinity
|
||||
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain_community.embeddings import InfinityEmbeddingsLocal
|
||||
async with InfinityEmbeddingsLocal(
|
||||
model="BAAI/bge-small-en-v1.5",
|
||||
revision=None,
|
||||
device="cpu",
|
||||
) as embedder:
|
||||
embeddings = await engine.aembed_documents(["text1", "text2"])
|
||||
"""
|
||||
|
||||
model: str
|
||||
"Underlying model id from huggingface, e.g. BAAI/bge-small-en-v1.5"
|
||||
|
||||
revision: Optional[str] = None
|
||||
"Model version, the commit hash from huggingface"
|
||||
|
||||
batch_size: int = 32
|
||||
"Internal batch size for inference, e.g. 32"
|
||||
|
||||
device: str = "auto"
|
||||
"Device to use for inference, e.g. 'cpu' or 'cuda', or 'mps'"
|
||||
|
||||
backend: str = "torch"
|
||||
"Backend for inference, e.g. 'torch' (recommended for ROCm/Nvidia)"
|
||||
" or 'optimum' for onnx/tensorrt"
|
||||
|
||||
model_warmup: bool = True
|
||||
"Warmup the model with the max batch size."
|
||||
|
||||
engine: Any = None #: :meta private:
|
||||
"""Infinity's AsyncEmbeddingEngine."""
|
||||
|
||||
# LLM call kwargs
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.forbid
|
||||
|
||||
@root_validator(allow_reuse=True)
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
|
||||
try:
|
||||
from infinity_emb import AsyncEmbeddingEngine # type: ignore
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Please install the "
|
||||
"`pip install 'infinity_emb[optimum,torch]>=0.0.24'` "
|
||||
"package to use the InfinityEmbeddingsLocal."
|
||||
)
|
||||
logger.debug(f"Using InfinityEmbeddingsLocal with kwargs {values}")
|
||||
|
||||
values["engine"] = AsyncEmbeddingEngine(
|
||||
model_name_or_path=values["model"],
|
||||
device=values["device"],
|
||||
revision=values["revision"],
|
||||
model_warmup=values["model_warmup"],
|
||||
batch_size=values["batch_size"],
|
||||
engine=values["backend"],
|
||||
)
|
||||
return values
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
"""start the background worker.
|
||||
recommended usage is with the async with statement.
|
||||
|
||||
async with InfinityEmbeddingsLocal(
|
||||
model="BAAI/bge-small-en-v1.5",
|
||||
revision=None,
|
||||
device="cpu",
|
||||
) as embedder:
|
||||
embeddings = await engine.aembed_documents(["text1", "text2"])
|
||||
"""
|
||||
await self.engine.__aenter__()
|
||||
|
||||
async def __aexit__(self, *args: Any) -> None:
|
||||
"""stop the background worker,
|
||||
required to free references to the pytorch model."""
|
||||
await self.engine.__aexit__(*args)
|
||||
|
||||
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
|
||||
"""Async call out to Infinity's embedding endpoint.
|
||||
|
||||
Args:
|
||||
texts: The list of texts to embed.
|
||||
|
||||
Returns:
|
||||
List of embeddings, one for each text.
|
||||
"""
|
||||
if not self.engine.running:
|
||||
logger.warning(
|
||||
"Starting Infinity engine on the fly. This is not recommended."
|
||||
"Please start the engine before using it."
|
||||
)
|
||||
async with self:
|
||||
# spawning threadpool for multithreaded encode, tokenization
|
||||
embeddings, _ = await self.engine.embed(texts)
|
||||
# stopping threadpool on exit
|
||||
logger.warning("Stopped infinity engine after usage.")
|
||||
else:
|
||||
embeddings, _ = await self.engine.embed(texts)
|
||||
return embeddings
|
||||
|
||||
async def aembed_query(self, text: str) -> List[float]:
|
||||
"""Async call out to Infinity's embedding endpoint.
|
||||
|
||||
Args:
|
||||
text: The text to embed.
|
||||
|
||||
Returns:
|
||||
Embeddings for the text.
|
||||
"""
|
||||
embeddings = await self.aembed_documents([text])
|
||||
return embeddings[0]
|
||||
|
||||
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
||||
"""
|
||||
This method is async only.
|
||||
"""
|
||||
logger.warning(
|
||||
"This method is async only. "
|
||||
"Please use the async version `await aembed_documents`."
|
||||
)
|
||||
return asyncio.run(self.aembed_documents(texts))
|
||||
|
||||
def embed_query(self, text: str) -> List[float]:
|
||||
""" """
|
||||
logger.warning(
|
||||
"This method is async only."
|
||||
" Please use the async version `await aembed_query`."
|
||||
)
|
||||
return asyncio.run(self.aembed_query(text))
|
@ -12,6 +12,7 @@ EXPECTED_ALL = [
|
||||
"HuggingFaceEmbeddings",
|
||||
"HuggingFaceInferenceAPIEmbeddings",
|
||||
"InfinityEmbeddings",
|
||||
"InfinityEmbeddingsLocal",
|
||||
"GradientEmbeddings",
|
||||
"JinaEmbeddings",
|
||||
"LlamaCppEmbeddings",
|
||||
|
@ -0,0 +1,43 @@
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from langchain_community.embeddings.infinity_local import InfinityEmbeddingsLocal
|
||||
|
||||
try:
|
||||
import torch # noqa
|
||||
import infinity_emb # noqa
|
||||
|
||||
IMPORTED_TORCH = True
|
||||
except ImportError:
|
||||
IMPORTED_TORCH = False
|
||||
|
||||
|
||||
@pytest.mark.skipif(not IMPORTED_TORCH, reason="torch not installed")
|
||||
@pytest.mark.asyncio
|
||||
async def test_local_infinity_embeddings() -> None:
|
||||
embedder = InfinityEmbeddingsLocal(
|
||||
model="TaylorAI/bge-micro-v2",
|
||||
device="cpu",
|
||||
backend="torch",
|
||||
revision=None,
|
||||
batch_size=2,
|
||||
model_warmup=False,
|
||||
)
|
||||
|
||||
async with embedder:
|
||||
embeddings = await embedder.aembed_documents(["text1", "text2", "text1"])
|
||||
assert len(embeddings) == 3
|
||||
# model has 384 dim output
|
||||
assert len(embeddings[0]) == 384
|
||||
assert len(embeddings[1]) == 384
|
||||
assert len(embeddings[2]) == 384
|
||||
# assert all different embeddings
|
||||
assert (np.array(embeddings[0]) - np.array(embeddings[1]) != 0).all()
|
||||
# assert identical embeddings, up to floating point error
|
||||
np.testing.assert_array_equal(embeddings[0], embeddings[2])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import asyncio
|
||||
|
||||
asyncio.run(test_local_infinity_embeddings())
|
Loading…
Reference in New Issue
Block a user