community[minor]: Add support for OVHcloud AI Endpoints Embedding (#22667)

**Description:** Add support for [OVHcloud AI
Endpoints](https://endpoints.ai.cloud.ovh.net/) Embedding models.

Inspired by:
https://gist.github.com/gmasse/e1f99339e161f4830df6be5d0095349a

Signed-off-by: Joffref <mariusjoffre@gmail.com>
pull/21573/merge
Mathis Joffre 4 months ago committed by GitHub
parent 2aaf86ddae
commit ea43f40daf
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

File diff suppressed because one or more lines are too long

@ -176,6 +176,9 @@ if TYPE_CHECKING:
from langchain_community.embeddings.oracleai import (
OracleEmbeddings,
)
from langchain_community.embeddings.ovhcloud import (
OVHCloudEmbeddings,
)
from langchain_community.embeddings.premai import (
PremAIEmbeddings,
)
@ -283,6 +286,7 @@ __all__ = [
"OpenVINOBgeEmbeddings",
"OpenVINOEmbeddings",
"OracleEmbeddings",
"OVHCloudEmbeddings",
"PremAIEmbeddings",
"QianfanEmbeddingsEndpoint",
"QuantizedBgeEmbeddings",
@ -365,6 +369,7 @@ _module_lookup = {
"QuantizedBgeEmbeddings": "langchain_community.embeddings.itrex",
"QuantizedBiEncoderEmbeddings": "langchain_community.embeddings.optimum_intel",
"OracleEmbeddings": "langchain_community.embeddings.oracleai",
"OVHCloudEmbeddings": "langchain_community.embeddings.ovhcloud",
"SagemakerEndpointEmbeddings": "langchain_community.embeddings.sagemaker_endpoint",
"SambaStudioEmbeddings": "langchain_community.embeddings.sambanova",
"SelfHostedEmbeddings": "langchain_community.embeddings.self_hosted",

@ -0,0 +1,101 @@
import logging
import time
from typing import Any, List, Optional
import requests
from langchain_core.embeddings import Embeddings
from langchain_core.pydantic_v1 import BaseModel, Extra
logger = logging.getLogger(__name__)
class OVHCloudEmbeddings(BaseModel, Embeddings):
"""
Usage:
OVH_AI_ENDPOINTS_ACCESS_TOKEN="your-token" python3 langchain_embedding.py
NB: Make sure you are using a valid token.
In the contrary, document indexing will be long due to rate-limiting.
"""
""" OVHcloud AI Endpoints Access Token"""
access_token: Optional[str] = None
""" OVHcloud AI Endpoints model name for embeddings generation"""
model_name: str = ""
""" OVHcloud AI Endpoints region"""
region: str = "kepler"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
if self.access_token is None:
logger.warning(
"No access token provided indexing will be slow due to rate limiting."
)
if self.model_name == "":
raise ValueError("Model name is required for OVHCloud embeddings.")
if self.region == "":
raise ValueError("Region is required for OVHCloud embeddings.")
def _generate_embedding(self, text: str) -> List[float]:
"""Generate embeddings from OVHCLOUD AIE.
Args:
text (str): The text to embed.
Returns:
List[float]: Embeddings for the text.
"""
headers = {
"content-type": "text/plain",
"Authorization": f"Bearer {self.access_token}",
}
session = requests.session()
while True:
response = session.post(
f"https://{self.model_name}.endpoints.{self.region}.ai.cloud.ovh.net/api/text2vec",
headers=headers,
data=text,
)
if response.status_code != 200:
if response.status_code == 429:
"""Rate limit exceeded, wait for reset"""
reset_time = int(response.headers.get("RateLimit-Reset", 0))
logger.info("Rate limit exceeded. Waiting %d seconds.", reset_time)
if reset_time > 0:
time.sleep(reset_time)
continue
else:
"""Rate limit reset time has passed, retry immediately"""
continue
""" Handle other non-200 status codes """
raise ValueError(
"Request failed with status code: {status_code}, {text}".format(
status_code=response.status_code, text=response.text
)
)
return response.json()
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Create a retry decorator for PremAIEmbeddings.
Args:
texts (List[str]): The list of texts to embed.
Returns:
List[List[float]]: List of embeddings, one for each input text.
"""
return [self._generate_embedding(text) for text in texts]
def embed_query(self, text: str) -> List[float]:
"""Embed a single query text.
Args:
text (str): The text to embed.
Returns:
List[float]: Embeddings for the text.
"""
return self._generate_embedding(text)

@ -60,6 +60,7 @@ EXPECTED_ALL = [
"JavelinAIGatewayEmbeddings",
"OllamaEmbeddings",
"OracleEmbeddings",
"OVHCloudEmbeddings",
"QianfanEmbeddingsEndpoint",
"JohnSnowLabsEmbeddings",
"VoyageEmbeddings",

@ -0,0 +1,32 @@
import pytest
from langchain_community.embeddings.ovhcloud import OVHCloudEmbeddings
def test_ovhcloud_correct_instantiation() -> None:
llm = OVHCloudEmbeddings(model_name="multilingual-e5-base")
assert isinstance(llm, OVHCloudEmbeddings)
def test_ovhcloud_empty_model_name_should_raise_error() -> None:
with pytest.raises(ValueError):
OVHCloudEmbeddings(model_name="")
def test_ovhcloud_empty_region_should_raise_error() -> None:
with pytest.raises(ValueError):
OVHCloudEmbeddings(model_name="multilingual-e5-base", region="")
def test_ovhcloud_empty_access_token_should_not_raise_error() -> None:
llm = OVHCloudEmbeddings(
model_name="multilingual-e5-base", region="kepler", access_token=""
)
assert isinstance(llm, OVHCloudEmbeddings)
def test_ovhcloud_embed_documents() -> None:
llm = OVHCloudEmbeddings(model_name="multilingual-e5-base")
docs = ["Hello", "World"]
output = llm.embed_documents(docs)
assert len(output) == len(docs)
Loading…
Cancel
Save