langchain/libs/partners/together/tests/integration_tests/test_chat_models.py
Hassan El Mghari d6ef5fe86a
together: add chat models, use openai base (#21337)
**Description:** Adding chat completions to the Together AI package,
which is our most popular API. Also staying backwards compatible with
the old API so folks can continue to use the completions API as well.
Also moved the embedding API to use the OpenAI library to standardize it
further.

**Twitter handle:** @nutlope

- [x] **Add tests and docs**: If you're adding a new integration, please
include
- [x] **Lint and test**: Run `make format`, `make lint` and `make test`
from the root of the package(s) you've modified. See contribution
guidelines for more: https://python.langchain.com/docs/contributing/

If no one reviews your PR within a few days, please @-mention one of
baskaryan, efriis, eyurtsev, hwchase17.

---------

Co-authored-by: Erick Friis <erick@langchain.dev>
2024-05-06 17:47:06 -07:00

137 lines
4.3 KiB
Python

import pytest
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
from langchain_together import ChatTogether
def test_chat_together_model() -> None:
"""Test ChatTogether wrapper handles model_name."""
chat = ChatTogether(model="foo")
assert chat.model_name == "foo"
chat = ChatTogether(model_name="bar")
assert chat.model_name == "bar"
def test_chat_together_system_message() -> None:
"""Test ChatOpenAI wrapper with system message."""
chat = ChatTogether(max_tokens=10)
system_message = SystemMessage(content="You are to chat with the user.")
human_message = HumanMessage(content="Hello")
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_chat_together_llm_output_contains_model_name() -> None:
"""Test llm_output contains model_name."""
chat = ChatTogether(max_tokens=10)
message = HumanMessage(content="Hello")
llm_result = chat.generate([[message]])
assert llm_result.llm_output is not None
assert llm_result.llm_output["model_name"] == chat.model_name
def test_chat_together_streaming_llm_output_contains_model_name() -> None:
"""Test llm_output contains model_name."""
chat = ChatTogether(max_tokens=10, streaming=True)
message = HumanMessage(content="Hello")
llm_result = chat.generate([[message]])
assert llm_result.llm_output is not None
assert llm_result.llm_output["model_name"] == chat.model_name
def test_chat_together_invalid_streaming_params() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
with pytest.raises(ValueError):
ChatTogether(
max_tokens=10,
streaming=True,
temperature=0,
n=5,
)
def test_chat_together_extra_kwargs() -> None:
"""Test extra kwargs to chat together."""
# Check that foo is saved in extra_kwargs.
llm = ChatTogether(foo=3, max_tokens=10)
assert llm.max_tokens == 10
assert llm.model_kwargs == {"foo": 3}
# Test that if extra_kwargs are provided, they are added to it.
llm = ChatTogether(foo=3, model_kwargs={"bar": 2})
assert llm.model_kwargs == {"foo": 3, "bar": 2}
# Test that if provided twice it errors
with pytest.raises(ValueError):
ChatTogether(foo=3, model_kwargs={"foo": 2})
# Test that if explicit param is specified in kwargs it errors
with pytest.raises(ValueError):
ChatTogether(model_kwargs={"temperature": 0.2})
# Test that "model" cannot be specified in kwargs
with pytest.raises(ValueError):
ChatTogether(model_kwargs={"model": "meta-llama/Llama-3-8b-chat-hf"})
def test_stream() -> None:
"""Test streaming tokens from Together AI."""
llm = ChatTogether()
for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token.content, str)
async def test_astream() -> None:
"""Test streaming tokens from Together AI."""
llm = ChatTogether()
async for token in llm.astream("I'm Pickle Rick"):
assert isinstance(token.content, str)
async def test_abatch() -> None:
"""Test streaming tokens from ChatTogether."""
llm = ChatTogether()
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token.content, str)
async def test_abatch_tags() -> None:
"""Test batch tokens from ChatTogether."""
llm = ChatTogether()
result = await llm.abatch(
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
)
for token in result:
assert isinstance(token.content, str)
def test_batch() -> None:
"""Test batch tokens from ChatTogether."""
llm = ChatTogether()
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token.content, str)
async def test_ainvoke() -> None:
"""Test invoke tokens from ChatTogether."""
llm = ChatTogether()
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
assert isinstance(result.content, str)
def test_invoke() -> None:
"""Test invoke tokens from ChatTogether."""
llm = ChatTogether()
result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
assert isinstance(result.content, str)