langchain/libs/community/tests/integration_tests/chat_models/text_mlx.py
Prince Canuma 1f9f4d8742
community[minor]: Add support for MLX models (chat & llm) (#18152)
**Description:** This PR adds support for MLX models both chat (i.e.,
instruct) and llm (i.e., pretrained) types/
**Dependencies:** mlx, mlx_lm, transformers
**Twitter handle:** @Prince_Canuma

---------

Co-authored-by: Bagatur <baskaryan@gmail.com>
Co-authored-by: Bagatur <22008038+baskaryan@users.noreply.github.com>
2024-04-09 14:17:07 +00:00

38 lines
1.1 KiB
Python

"""Test MLX Chat Model."""
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from langchain_community.chat_models.mlx import ChatMLX
from langchain_community.llms.mlx_pipeline import MLXPipeline
def test_default_call() -> None:
"""Test default model call."""
llm = MLXPipeline.from_model_id(
model_id="mlx-community/quantized-gemma-2b-it",
pipeline_kwargs={"max_new_tokens": 10},
)
chat = ChatMLX(llm=llm)
response = chat.invoke(input=[HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_multiple_history() -> None:
"""Tests multiple history works."""
llm = MLXPipeline.from_model_id(
model_id="mlx-community/quantized-gemma-2b-it",
pipeline_kwargs={"max_new_tokens": 10},
)
chat = ChatMLX(llm=llm)
response = chat.invoke(
input=[
HumanMessage(content="Hello."),
AIMessage(content="Hello!"),
HumanMessage(content="How are you doing?"),
]
)
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)