community[patch]: Fix chat openai unit test (#17124)

- **Description:** 
Actually the test named `test_openai_apredict` isn't testing the
apredict method from ChatOpenAI.
  - **Twitter handle:**
  https://twitter.com/OAlmofadas
pull/17218/head
Luiz Ferreira 5 months ago committed by GitHub
parent f92738a6f6
commit 34d2daffb3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -17,9 +17,9 @@ from langchain_community.chat_models.openai import ChatOpenAI
@pytest.mark.requires("openai")
def test_openai_model_param() -> None:
llm = ChatOpenAI(model="foo")
llm = ChatOpenAI(model="foo", openai_api_key="foo")
assert llm.model_name == "foo"
llm = ChatOpenAI(model_name="foo")
llm = ChatOpenAI(model_name="foo", openai_api_key="foo")
assert llm.model_name == "foo"
@ -81,7 +81,7 @@ def mock_completion() -> dict:
@pytest.mark.requires("openai")
def test_openai_predict(mock_completion: dict) -> None:
llm = ChatOpenAI()
llm = ChatOpenAI(openai_api_key="foo")
mock_client = MagicMock()
completed = False
@ -103,11 +103,11 @@ def test_openai_predict(mock_completion: dict) -> None:
@pytest.mark.requires("openai")
async def test_openai_apredict(mock_completion: dict) -> None:
llm = ChatOpenAI()
llm = ChatOpenAI(openai_api_key="foo")
mock_client = MagicMock()
completed = False
def mock_create(*args: Any, **kwargs: Any) -> Any:
async def mock_create(*args: Any, **kwargs: Any) -> Any:
nonlocal completed
completed = True
return mock_completion
@ -115,9 +115,9 @@ async def test_openai_apredict(mock_completion: dict) -> None:
mock_client.create = mock_create
with patch.object(
llm,
"client",
"async_client",
mock_client,
):
res = llm.predict("bar")
res = await llm.apredict("bar")
assert res == "Bar Baz"
assert completed

@ -1,11 +1,7 @@
import os
import pytest
from langchain_community.embeddings.openai import OpenAIEmbeddings
os.environ["OPENAI_API_KEY"] = "foo"
@pytest.mark.requires("openai")
def test_openai_invalid_model_kwargs() -> None:
@ -16,5 +12,5 @@ def test_openai_invalid_model_kwargs() -> None:
@pytest.mark.requires("openai")
def test_openai_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = OpenAIEmbeddings(foo="bar")
llm = OpenAIEmbeddings(foo="bar", openai_api_key="foo")
assert llm.model_kwargs == {"foo": "bar"}

@ -1,12 +1,8 @@
import os
import pytest
from langchain_community.llms.openai import OpenAI
from langchain_community.utils.openai import is_openai_v1
os.environ["OPENAI_API_KEY"] = "foo"
def _openai_v1_installed() -> bool:
try:
@ -17,15 +13,15 @@ def _openai_v1_installed() -> bool:
@pytest.mark.requires("openai")
def test_openai_model_param() -> None:
llm = OpenAI(model="foo")
llm = OpenAI(model="foo", openai_api_key="foo")
assert llm.model_name == "foo"
llm = OpenAI(model_name="foo")
llm = OpenAI(model_name="foo", openai_api_key="foo")
assert llm.model_name == "foo"
@pytest.mark.requires("openai")
def test_openai_model_kwargs() -> None:
llm = OpenAI(model_kwargs={"foo": "bar"})
llm = OpenAI(model_kwargs={"foo": "bar"}, openai_api_key="foo")
assert llm.model_kwargs == {"foo": "bar"}
@ -42,7 +38,7 @@ def test_openai_invalid_model_kwargs() -> None:
@pytest.mark.requires("openai")
def test_openai_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = OpenAI(foo="bar")
llm = OpenAI(foo="bar", openai_api_key="foo")
assert llm.model_kwargs == {"foo": "bar"}

Loading…
Cancel
Save