ibm[patch]: add async tests, add tokenize support (#18898)

- **Description:** add async tests, add tokenize support
- **Dependencies:**
[ibm-watsonx-ai](https://pypi.org/project/ibm-watsonx-ai/),
  - **Tag maintainer:** 

Please make sure your PR is passing linting and testing before
submitting. Run `make format`, `make lint` and `make test` to check this
locally -> 
Please make sure integration_tests passing locally -> 

---------

Co-authored-by: Erick Friis <erick@langchain.dev>
pull/19094/head
Mateusz Szewczyk 4 months ago committed by GitHub
parent 7253b816cc
commit b15d150d22
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -419,3 +419,10 @@ class WatsonxLLM(BaseLLM):
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
yield chunk
def get_num_tokens(self, text: str) -> int:
response = self.watsonx_model.tokenize(text, return_tokens=False)
return response["result"]["token_count"]
def get_token_ids(self, text: str) -> List[int]:
raise NotImplementedError("API does not support returning token ids.")

@ -245,13 +245,13 @@ ibm-cos-sdk-core = "2.13.4"
[[package]]
name = "ibm-watson-machine-learning"
version = "1.0.349"
version = "1.0.352"
description = "IBM Watson Machine Learning API Client"
optional = false
python-versions = ">=3.10"
files = [
{file = "ibm_watson_machine_learning-1.0.349-py3-none-any.whl", hash = "sha256:b5bc4cdec2a9cda1c9fa6681558f721a7a3058937257ae3040618c7183fe2f55"},
{file = "ibm_watson_machine_learning-1.0.349.tar.gz", hash = "sha256:46dd3d67bee39c3e84b047e651bafa06dfb0cb973354c4e2c582928340b51a17"},
{file = "ibm_watson_machine_learning-1.0.352-py3-none-any.whl", hash = "sha256:be233468bdd8e11ee57975310febefd013bcae35b7d153053e064f0be6a00242"},
{file = "ibm_watson_machine_learning-1.0.352.tar.gz", hash = "sha256:937b339c76d4ea143439f98ea60cfabacf802102e39873f361ece82174a791d3"},
]
[package.dependencies]
@ -274,13 +274,13 @@ fl-rt23-1-py3-10 = ["GPUtil", "cloudpickle (==1.3.0)", "cryptography (==39.0.1)"
[[package]]
name = "ibm-watsonx-ai"
version = "0.2.0"
version = "0.2.2"
description = "IBM watsonx.ai API Client"
optional = false
python-versions = ">=3.10"
files = [
{file = "ibm_watsonx_ai-0.2.0-py3-none-any.whl", hash = "sha256:75234916b2cd9accedb401a3c11925845efa08361cff4978589f58e39dad48f6"},
{file = "ibm_watsonx_ai-0.2.0.tar.gz", hash = "sha256:9af9e402c6f0c74e30ac11c0ae1b80e55e21fb2b2d7c546fc506ab1b9be4851e"},
{file = "ibm_watsonx_ai-0.2.2-py3-none-any.whl", hash = "sha256:e2fff3ed3d35be037548a96f6fea211ab1b9cef6a7c1c66c2f5479aafa868e9e"},
{file = "ibm_watsonx_ai-0.2.2.tar.gz", hash = "sha256:00e0d1b46742a6a1b08b2923a8134033f17c2800d347ea06548bc2b649eab78f"},
]
[package.dependencies]
@ -306,22 +306,22 @@ files = [
[[package]]
name = "importlib-metadata"
version = "7.0.1"
version = "7.0.2"
description = "Read metadata from Python packages"
optional = false
python-versions = ">=3.8"
files = [
{file = "importlib_metadata-7.0.1-py3-none-any.whl", hash = "sha256:4805911c3a4ec7c3966410053e9ec6a1fecd629117df5adee56dfc9432a1081e"},
{file = "importlib_metadata-7.0.1.tar.gz", hash = "sha256:f238736bb06590ae52ac1fab06a3a9ef1d8dce2b7a35b5ab329371d6c8f5d2cc"},
{file = "importlib_metadata-7.0.2-py3-none-any.whl", hash = "sha256:f4bc4c0c070c490abf4ce96d715f68e95923320370efb66143df00199bb6c100"},
{file = "importlib_metadata-7.0.2.tar.gz", hash = "sha256:198f568f3230878cb1b44fbd7975f87906c22336dba2e4a7f05278c281fbd792"},
]
[package.dependencies]
zipp = ">=0.5"
[package.extras]
docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"]
docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
perf = ["ipython"]
testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"]
testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"]
[[package]]
name = "iniconfig"
@ -372,7 +372,7 @@ files = [
[[package]]
name = "langchain-core"
version = "0.1.28"
version = "0.1.31"
description = "Building applications with LLMs through composability"
optional = false
python-versions = ">=3.8.1,<4.0"
@ -398,13 +398,13 @@ url = "../../core"
[[package]]
name = "langsmith"
version = "0.1.12"
version = "0.1.24"
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "langsmith-0.1.12-py3-none-any.whl", hash = "sha256:4f3d03c365c4d9eb4ed151055e2830ea73235d8c8be0841a63334f9d4fcf8b2b"},
{file = "langsmith-0.1.12.tar.gz", hash = "sha256:cf8d371f92f1035fd98a9692edd8af9c3a60947db4a77864a9c07dec96d3b039"},
{file = "langsmith-0.1.24-py3-none-any.whl", hash = "sha256:898ef5265bca8fc912f7fbf207e1d69cacd86055faecf6811bd42641e6319840"},
{file = "langsmith-0.1.24.tar.gz", hash = "sha256:432b829e763f5077df411bc59bb35449813f18174d2ebc8bbbb38427071d5e7d"},
]
[package.dependencies]
@ -666,13 +666,13 @@ testing = ["pytest", "pytest-benchmark"]
[[package]]
name = "pydantic"
version = "2.6.3"
version = "2.6.4"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.8"
files = [
{file = "pydantic-2.6.3-py3-none-any.whl", hash = "sha256:72c6034df47f46ccdf81869fddb81aade68056003900a8724a4f160700016a2a"},
{file = "pydantic-2.6.3.tar.gz", hash = "sha256:e07805c4c7f5c6826e33a1d4c9d47950d7eaf34868e2690f8594d2e30241f11f"},
{file = "pydantic-2.6.4-py3-none-any.whl", hash = "sha256:cc46fce86607580867bdc3361ad462bab9c222ef042d3da86f2fb333e1d916c5"},
{file = "pydantic-2.6.4.tar.gz", hash = "sha256:b1704e0847db01817624a6b86766967f552dd9dbf3afba4004409f908dcc84e6"},
]
[package.dependencies]
@ -896,7 +896,6 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
@ -1055,13 +1054,13 @@ files = [
[[package]]
name = "types-requests"
version = "2.31.0.20240218"
version = "2.31.0.20240311"
description = "Typing stubs for requests"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-requests-2.31.0.20240218.tar.gz", hash = "sha256:f1721dba8385958f504a5386240b92de4734e047a08a40751c1654d1ac3349c5"},
{file = "types_requests-2.31.0.20240218-py3-none-any.whl", hash = "sha256:a82807ec6ddce8f00fe0e949da6d6bc1fbf1715420218a9640d695f70a9e5a9b"},
{file = "types-requests-2.31.0.20240311.tar.gz", hash = "sha256:b1c1b66abfb7fa79aae09097a811c4aa97130eb8831c60e47aee4ca344731ca5"},
{file = "types_requests-2.31.0.20240311-py3-none-any.whl", hash = "sha256:47872893d65a38e282ee9f277a4ee50d1b28bd592040df7d1fdaffdf3779937d"},
]
[package.dependencies]
@ -1137,20 +1136,20 @@ watchmedo = ["PyYAML (>=3.10)"]
[[package]]
name = "zipp"
version = "3.17.0"
version = "3.18.0"
description = "Backport of pathlib-compatible object wrapper for zip files"
optional = false
python-versions = ">=3.8"
files = [
{file = "zipp-3.17.0-py3-none-any.whl", hash = "sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31"},
{file = "zipp-3.17.0.tar.gz", hash = "sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0"},
{file = "zipp-3.18.0-py3-none-any.whl", hash = "sha256:c1bb803ed69d2cce2373152797064f7e79bc43f0a3748eb494096a867e0ebf79"},
{file = "zipp-3.18.0.tar.gz", hash = "sha256:df8d042b02765029a09b157efd8e820451045890acc30f8e37dd2f94a060221f"},
]
[package.extras]
docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"]
testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"]
docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.10,<4.0"
content-hash = "96cc11c3e73681170c57bf7198ad3a1091620b2272e82dde34bcce4df8721d62"
content-hash = "84020398bee800f849046e3b88b8501508e2065c120b52823b876d4ed03ad76b"

@ -1,6 +1,6 @@
[tool.poetry]
name = "langchain-ibm"
version = "0.1.1"
version = "0.1.2"
description = "An integration package connecting IBM watsonx.ai and LangChain"
authors = ["IBM"]
readme = "README.md"
@ -12,7 +12,7 @@ license = "MIT"
[tool.poetry.dependencies]
python = ">=3.10,<4.0"
langchain-core = "^0.1.27"
langchain-core = "^0.1.29"
ibm-watsonx-ai = "^0.2.0"
[tool.poetry.group.test]

@ -17,11 +17,12 @@ from langchain_ibm import WatsonxLLM
WX_APIKEY = os.environ.get("WATSONX_APIKEY", "")
WX_PROJECT_ID = os.environ.get("WATSONX_PROJECT_ID", "")
MODEL_ID = "google/flan-ul2"
def test_watsonxllm_invoke() -> None:
watsonxllm = WatsonxLLM(
model_id="google/flan-ul2",
model_id=MODEL_ID,
url="https://us-south.ml.cloud.ibm.com",
project_id=WX_PROJECT_ID,
)
@ -39,7 +40,7 @@ def test_watsonxllm_invoke_with_params() -> None:
}
watsonxllm = WatsonxLLM(
model_id="google/flan-ul2",
model_id=MODEL_ID,
url="https://us-south.ml.cloud.ibm.com",
project_id=WX_PROJECT_ID,
params=parameters,
@ -52,7 +53,7 @@ def test_watsonxllm_invoke_with_params() -> None:
def test_watsonxllm_generate() -> None:
watsonxllm = WatsonxLLM(
model_id="google/flan-ul2",
model_id=MODEL_ID,
url="https://us-south.ml.cloud.ibm.com",
project_id=WX_PROJECT_ID,
)
@ -66,7 +67,7 @@ def test_watsonxllm_generate() -> None:
def test_watsonxllm_generate_with_multiple_prompts() -> None:
watsonxllm = WatsonxLLM(
model_id="google/flan-ul2",
model_id=MODEL_ID,
url="https://us-south.ml.cloud.ibm.com",
project_id=WX_PROJECT_ID,
)
@ -82,7 +83,7 @@ def test_watsonxllm_generate_with_multiple_prompts() -> None:
def test_watsonxllm_generate_stream() -> None:
watsonxllm = WatsonxLLM(
model_id="google/flan-ul2",
model_id=MODEL_ID,
url="https://us-south.ml.cloud.ibm.com",
project_id=WX_PROJECT_ID,
)
@ -96,7 +97,7 @@ def test_watsonxllm_generate_stream() -> None:
def test_watsonxllm_stream() -> None:
watsonxllm = WatsonxLLM(
model_id="google/flan-ul2",
model_id=MODEL_ID,
url="https://us-south.ml.cloud.ibm.com",
project_id=WX_PROJECT_ID,
)
@ -119,7 +120,7 @@ def test_watsonxllm_stream() -> None:
def test_watsonxllm_invoke_from_wx_model() -> None:
model = Model(
model_id="google/flan-ul2",
model_id=MODEL_ID,
credentials={
"apikey": WX_APIKEY,
"url": "https://us-south.ml.cloud.ibm.com",
@ -135,7 +136,7 @@ def test_watsonxllm_invoke_from_wx_model() -> None:
def test_watsonxllm_invoke_from_wx_model_inference() -> None:
model = ModelInference(
model_id="google/flan-ul2",
model_id=MODEL_ID,
credentials={
"apikey": WX_APIKEY,
"url": "https://us-south.ml.cloud.ibm.com",
@ -159,7 +160,7 @@ def test_watsonxllm_invoke_from_wx_model_inference_with_params() -> None:
GenTextParamsMetaNames.TOP_P: 1,
}
model = ModelInference(
model_id="google/flan-ul2",
model_id=MODEL_ID,
credentials={
"apikey": WX_APIKEY,
"url": "https://us-south.ml.cloud.ibm.com",
@ -197,3 +198,36 @@ def test_watsonxllm_invoke_from_wx_model_inference_with_params_as_enum() -> None
print(f"\nResponse: {response}")
assert isinstance(response, str)
assert len(response) > 0
async def test_watsonx_ainvoke() -> None:
watsonxllm = WatsonxLLM(
model_id=MODEL_ID,
url="https://us-south.ml.cloud.ibm.com",
project_id=WX_PROJECT_ID,
)
response = await watsonxllm.ainvoke("What color sunflower is?")
assert isinstance(response, str)
async def test_watsonx_agenerate() -> None:
watsonxllm = WatsonxLLM(
model_id=MODEL_ID,
url="https://us-south.ml.cloud.ibm.com",
project_id=WX_PROJECT_ID,
)
response = await watsonxllm.agenerate(
["What color sunflower is?", "What color turtle is?"]
)
assert len(response.generations) > 0
assert response.llm_output["token_usage"]["generated_token_count"] != 0 # type: ignore
def test_get_num_tokens() -> None:
watsonxllm = WatsonxLLM(
model_id=MODEL_ID,
url="https://us-south.ml.cloud.ibm.com",
project_id=WX_PROJECT_ID,
)
num_tokens = watsonxllm.get_num_tokens("What color sunflower is?")
assert num_tokens > 0

Loading…
Cancel
Save