mirror of
https://github.com/hwchase17/langchain
synced 2024-11-10 01:10:59 +00:00
25fbe356b4
This PR upgrades community to a recent version of mypy. It inserts type: ignore on all existing failures.
40 lines
1.3 KiB
Python
40 lines
1.3 KiB
Python
"""Test Baidu Qianfan Embedding Endpoint."""
|
|
from langchain_community.embeddings.baidu_qianfan_endpoint import (
|
|
QianfanEmbeddingsEndpoint,
|
|
)
|
|
|
|
|
|
def test_embedding_multiple_documents() -> None:
|
|
documents = ["foo", "bar"]
|
|
embedding = QianfanEmbeddingsEndpoint() # type: ignore[call-arg]
|
|
output = embedding.embed_documents(documents)
|
|
assert len(output) == 2
|
|
assert len(output[0]) == 384
|
|
assert len(output[1]) == 384
|
|
|
|
|
|
def test_embedding_query() -> None:
|
|
query = "foo"
|
|
embedding = QianfanEmbeddingsEndpoint() # type: ignore[call-arg]
|
|
output = embedding.embed_query(query)
|
|
assert len(output) == 384
|
|
|
|
|
|
def test_model() -> None:
|
|
documents = ["hi", "qianfan"]
|
|
embedding = QianfanEmbeddingsEndpoint(model="Embedding-V1") # type: ignore[call-arg]
|
|
output = embedding.embed_documents(documents)
|
|
assert len(output) == 2
|
|
|
|
|
|
def test_rate_limit() -> None:
|
|
llm = QianfanEmbeddingsEndpoint( # type: ignore[call-arg]
|
|
model="Embedding-V1", init_kwargs={"query_per_second": 2}
|
|
)
|
|
assert llm.client._client._rate_limiter._sync_limiter._query_per_second == 2
|
|
documents = ["foo", "bar"]
|
|
output = llm.embed_documents(documents)
|
|
assert len(output) == 2
|
|
assert len(output[0]) == 384
|
|
assert len(output[1]) == 384
|