mirror of
https://github.com/hwchase17/langchain
synced 2024-11-10 01:10:59 +00:00
fd1061e7bf
- **Description**: - **add support for more data types**: by default `IpexLLM` will load the model in int4 format. This PR adds more data types support such as `sym_in5`, `sym_int8`, etc. Data formats like NF3, NF4, FP4 and FP8 are only supported on GPU and will be added in future PR. - Fix a small issue in saving/loading, update api docs - **Dependencies**: `ipex-llm` library - **Document**: In `docs/docs/integrations/llms/ipex_llm.ipynb`, added instructions for saving/loading low-bit model. - **Tests**: added new test cases to `libs/community/tests/integration_tests/llms/test_ipex_llm.py`, added config params. - **Contribution maintainer**: @shane-huang
89 lines
2.4 KiB
Python
89 lines
2.4 KiB
Python
"""Test IPEX LLM"""
|
|
import os
|
|
from typing import Any
|
|
|
|
import pytest
|
|
from langchain_core.outputs import LLMResult
|
|
|
|
from langchain_community.llms import IpexLLM
|
|
|
|
model_ids_to_test = os.getenv("TEST_IPEXLLM_MODEL_IDS") or ""
|
|
skip_if_no_model_ids = pytest.mark.skipif(
|
|
not model_ids_to_test, reason="TEST_IPEXLLM_MODEL_IDS environment variable not set."
|
|
)
|
|
model_ids_to_test = [model_id.strip() for model_id in model_ids_to_test.split(",")] # type: ignore
|
|
|
|
|
|
def load_model(model_id: str) -> Any:
|
|
llm = IpexLLM.from_model_id(
|
|
model_id=model_id,
|
|
model_kwargs={"temperature": 0, "max_length": 16, "trust_remote_code": True},
|
|
)
|
|
return llm
|
|
|
|
|
|
def load_model_more_types(model_id: str, load_in_low_bit: str) -> Any:
|
|
llm = IpexLLM.from_model_id(
|
|
model_id=model_id,
|
|
load_in_low_bit=load_in_low_bit,
|
|
model_kwargs={"temperature": 0, "max_length": 16, "trust_remote_code": True},
|
|
)
|
|
return llm
|
|
|
|
|
|
@skip_if_no_model_ids
|
|
@pytest.mark.parametrize(
|
|
"model_id",
|
|
model_ids_to_test,
|
|
)
|
|
def test_call(model_id: str) -> None:
|
|
"""Test valid call."""
|
|
llm = load_model(model_id)
|
|
output = llm.invoke("Hello!")
|
|
assert isinstance(output, str)
|
|
|
|
|
|
@skip_if_no_model_ids
|
|
@pytest.mark.parametrize(
|
|
"model_id",
|
|
model_ids_to_test,
|
|
)
|
|
def test_asym_int4(model_id: str) -> None:
|
|
"""Test asym int4 data type."""
|
|
llm = load_model_more_types(model_id=model_id, load_in_low_bit="asym_int4")
|
|
output = llm.invoke("Hello!")
|
|
assert isinstance(output, str)
|
|
|
|
|
|
@skip_if_no_model_ids
|
|
@pytest.mark.parametrize(
|
|
"model_id",
|
|
model_ids_to_test,
|
|
)
|
|
def test_generate(model_id: str) -> None:
|
|
"""Test valid generate."""
|
|
llm = load_model(model_id)
|
|
output = llm.generate(["Hello!"])
|
|
assert isinstance(output, LLMResult)
|
|
assert isinstance(output.generations, list)
|
|
|
|
|
|
@skip_if_no_model_ids
|
|
@pytest.mark.parametrize(
|
|
"model_id",
|
|
model_ids_to_test,
|
|
)
|
|
def test_save_load_lowbit(model_id: str) -> None:
|
|
"""Test save and load lowbit model."""
|
|
saved_lowbit_path = "/tmp/saved_model"
|
|
llm = load_model(model_id)
|
|
llm.model.save_low_bit(saved_lowbit_path)
|
|
del llm
|
|
loaded_llm = IpexLLM.from_model_id_low_bit(
|
|
model_id=saved_lowbit_path,
|
|
tokenizer_id=model_id,
|
|
model_kwargs={"temperature": 0, "max_length": 16, "trust_remote_code": True},
|
|
)
|
|
output = loaded_llm.invoke("Hello!")
|
|
assert isinstance(output, str)
|