mirror of
https://github.com/hwchase17/langchain
synced 2024-11-10 01:10:59 +00:00
4d7f6fa968
Description: Added support for batching when using AI21 Embeddings model Twitter handle: https://github.com/AI21Labs --------- Co-authored-by: Asaf Gardin <asafg@ai21.com> Co-authored-by: Erick Friis <erick@langchain.dev>
38 lines
1.2 KiB
Python
38 lines
1.2 KiB
Python
"""Test AI21 embeddings."""
|
|
|
|
from langchain_ai21.embeddings import AI21Embeddings
|
|
|
|
|
|
def test_langchain_ai21_embedding_documents() -> None:
|
|
"""Test AI21 embeddings."""
|
|
documents = ["foo bar"]
|
|
embedding = AI21Embeddings()
|
|
output = embedding.embed_documents(documents)
|
|
assert len(output) == 1
|
|
assert len(output[0]) > 0
|
|
|
|
|
|
def test_langchain_ai21_embedding_query() -> None:
|
|
"""Test AI21 embeddings."""
|
|
document = "foo bar"
|
|
embedding = AI21Embeddings()
|
|
output = embedding.embed_query(document)
|
|
assert len(output) > 0
|
|
|
|
|
|
def test_langchain_ai21_embedding_documents__with_explicit_chunk_size() -> None:
|
|
"""Test AI21 embeddings with chunk size passed as an argument."""
|
|
documents = ["foo", "bar"]
|
|
embedding = AI21Embeddings()
|
|
output = embedding.embed_documents(documents, batch_size=1)
|
|
assert len(output) == 2
|
|
assert len(output[0]) > 0
|
|
|
|
|
|
def test_langchain_ai21_embedding_query__with_explicit_chunk_size() -> None:
|
|
"""Test AI21 embeddings with chunk size passed as an argument."""
|
|
documents = "foo bar"
|
|
embedding = AI21Embeddings()
|
|
output = embedding.embed_query(documents, batch_size=1)
|
|
assert len(output) > 0
|