mirror of
https://github.com/hwchase17/langchain
synced 2024-11-10 01:10:59 +00:00
fd1061e7bf
- **Description**: - **add support for more data types**: by default `IpexLLM` will load the model in int4 format. This PR adds more data types support such as `sym_in5`, `sym_int8`, etc. Data formats like NF3, NF4, FP4 and FP8 are only supported on GPU and will be added in future PR. - Fix a small issue in saving/loading, update api docs - **Dependencies**: `ipex-llm` library - **Document**: In `docs/docs/integrations/llms/ipex_llm.ipynb`, added instructions for saving/loading low-bit model. - **Tests**: added new test cases to `libs/community/tests/integration_tests/llms/test_ipex_llm.py`, added config params. - **Contribution maintainer**: @shane-huang
46 lines
1.3 KiB
Python
46 lines
1.3 KiB
Python
"""Test BigdlLLM"""
|
|
import os
|
|
|
|
import pytest
|
|
from langchain_core.outputs import LLMResult
|
|
|
|
from langchain_community.llms.bigdl_llm import BigdlLLM
|
|
|
|
model_ids_to_test = os.getenv("TEST_BIGDLLLM_MODEL_IDS") or ""
|
|
skip_if_no_model_ids = pytest.mark.skipif(
|
|
not model_ids_to_test,
|
|
reason="TEST_BIGDLLLM_MODEL_IDS environment variable not set.",
|
|
)
|
|
model_ids_to_test = [model_id.strip() for model_id in model_ids_to_test.split(",")] # type: ignore
|
|
|
|
|
|
@skip_if_no_model_ids
|
|
@pytest.mark.parametrize(
|
|
"model_id",
|
|
model_ids_to_test,
|
|
)
|
|
def test_call(model_id: str) -> None:
|
|
"""Test valid call to bigdl-llm."""
|
|
llm = BigdlLLM.from_model_id(
|
|
model_id=model_id,
|
|
model_kwargs={"temperature": 0, "max_length": 16, "trust_remote_code": True},
|
|
)
|
|
output = llm.invoke("Hello!")
|
|
assert isinstance(output, str)
|
|
|
|
|
|
@skip_if_no_model_ids
|
|
@pytest.mark.parametrize(
|
|
"model_id",
|
|
model_ids_to_test,
|
|
)
|
|
def test_generate(model_id: str) -> None:
|
|
"""Test valid call to bigdl-llm."""
|
|
llm = BigdlLLM.from_model_id(
|
|
model_id=model_id,
|
|
model_kwargs={"temperature": 0, "max_length": 16, "trust_remote_code": True},
|
|
)
|
|
output = llm.generate(["Hello!"])
|
|
assert isinstance(output, LLMResult)
|
|
assert isinstance(output.generations, list)
|