|
|
|
@ -14,6 +14,7 @@ from langchain.schema.messages import BaseMessage, HumanMessage, SystemMessage
|
|
|
|
|
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
|
|
|
def test_chat_openai() -> None:
|
|
|
|
|
"""Test ChatOpenAI wrapper."""
|
|
|
|
|
chat = ChatOpenAI(max_tokens=10)
|
|
|
|
@ -41,6 +42,7 @@ def test_chat_openai_system_message() -> None:
|
|
|
|
|
assert isinstance(response.content, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
|
|
|
def test_chat_openai_generate() -> None:
|
|
|
|
|
"""Test ChatOpenAI wrapper with generate."""
|
|
|
|
|
chat = ChatOpenAI(max_tokens=10, n=2)
|
|
|
|
@ -56,6 +58,7 @@ def test_chat_openai_generate() -> None:
|
|
|
|
|
assert generation.text == generation.message.content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
|
|
|
def test_chat_openai_multiple_completions() -> None:
|
|
|
|
|
"""Test ChatOpenAI wrapper with multiple completions."""
|
|
|
|
|
chat = ChatOpenAI(max_tokens=10, n=5)
|
|
|
|
@ -68,6 +71,7 @@ def test_chat_openai_multiple_completions() -> None:
|
|
|
|
|
assert isinstance(generation.message.content, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
|
|
|
def test_chat_openai_streaming() -> None:
|
|
|
|
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
|
|
|
|
callback_handler = FakeCallbackHandler()
|
|
|
|
@ -114,6 +118,7 @@ def test_chat_openai_invalid_streaming_params() -> None:
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
|
async def test_async_chat_openai() -> None:
|
|
|
|
|
"""Test async generation."""
|
|
|
|
@ -130,6 +135,7 @@ async def test_async_chat_openai() -> None:
|
|
|
|
|
assert generation.text == generation.message.content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
|
async def test_async_chat_openai_streaming() -> None:
|
|
|
|
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
|
|
|
|
@ -179,6 +185,7 @@ def test_chat_openai_extra_kwargs() -> None:
|
|
|
|
|
ChatOpenAI(model_kwargs={"model": "text-davinci-003"})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
|
|
|
def test_openai_streaming() -> None:
|
|
|
|
|
"""Test streaming tokens from OpenAI."""
|
|
|
|
|
llm = ChatOpenAI(max_tokens=10)
|
|
|
|
@ -187,6 +194,7 @@ def test_openai_streaming() -> None:
|
|
|
|
|
assert isinstance(token.content, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
|
async def test_openai_astream() -> None:
|
|
|
|
|
"""Test streaming tokens from OpenAI."""
|
|
|
|
@ -196,6 +204,7 @@ async def test_openai_astream() -> None:
|
|
|
|
|
assert isinstance(token.content, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
|
async def test_openai_abatch() -> None:
|
|
|
|
|
"""Test streaming tokens from ChatOpenAI."""
|
|
|
|
@ -206,6 +215,7 @@ async def test_openai_abatch() -> None:
|
|
|
|
|
assert isinstance(token.content, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
|
async def test_openai_abatch_tags() -> None:
|
|
|
|
|
"""Test batch tokens from ChatOpenAI."""
|
|
|
|
@ -218,6 +228,7 @@ async def test_openai_abatch_tags() -> None:
|
|
|
|
|
assert isinstance(token.content, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
|
|
|
def test_openai_batch() -> None:
|
|
|
|
|
"""Test batch tokens from ChatOpenAI."""
|
|
|
|
|
llm = ChatOpenAI(max_tokens=10)
|
|
|
|
@ -227,6 +238,7 @@ def test_openai_batch() -> None:
|
|
|
|
|
assert isinstance(token.content, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
|
async def test_openai_ainvoke() -> None:
|
|
|
|
|
"""Test invoke tokens from ChatOpenAI."""
|
|
|
|
@ -236,6 +248,7 @@ async def test_openai_ainvoke() -> None:
|
|
|
|
|
assert isinstance(result.content, str)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.scheduled
|
|
|
|
|
def test_openai_invoke() -> None:
|
|
|
|
|
"""Test invoke tokens from ChatOpenAI."""
|
|
|
|
|
llm = ChatOpenAI(max_tokens=10)
|
|
|
|
|