mirror of
https://github.com/hwchase17/langchain
synced 2024-11-06 03:20:49 +00:00
ae471a7dcb
- **Description**: [`bigdl-llm`](https://github.com/intel-analytics/BigDL) is a library for running LLM on Intel XPU (from Laptop to GPU to Cloud) using INT4/FP4/INT8/FP8 with very low latency (for any PyTorch model). This PR adds bigdl-llm integrations to langchain. - **Issue**: NA - **Dependencies**: `bigdl-llm` library - **Contribution maintainer**: @shane-huang Examples added: - docs/docs/integrations/llms/bigdl.ipynb
26 lines
778 B
Python
26 lines
778 B
Python
"""Test BigDL LLM"""
|
|
from langchain_core.outputs import LLMResult
|
|
|
|
from langchain_community.llms.bigdl import BigdlLLM
|
|
|
|
|
|
def test_call() -> None:
|
|
"""Test valid call to baichuan."""
|
|
llm = BigdlLLM.from_model_id(
|
|
model_id="lmsys/vicuna-7b-v1.5",
|
|
model_kwargs={"temperature": 0, "max_length": 16, "trust_remote_code": True},
|
|
)
|
|
output = llm("Hello!")
|
|
assert isinstance(output, str)
|
|
|
|
|
|
def test_generate() -> None:
|
|
"""Test valid call to baichuan."""
|
|
llm = BigdlLLM.from_model_id(
|
|
model_id="lmsys/vicuna-7b-v1.5",
|
|
model_kwargs={"temperature": 0, "max_length": 16, "trust_remote_code": True},
|
|
)
|
|
output = llm.generate(["Hello!"])
|
|
assert isinstance(output, LLMResult)
|
|
assert isinstance(output.generations, list)
|