From 34d2daffb3169f2d8f83d0c02d6b8159c138f733 Mon Sep 17 00:00:00 2001 From: Luiz Ferreira Date: Thu, 8 Feb 2024 00:08:26 -0300 Subject: [PATCH] community[patch]: Fix chat openai unit test (#17124) - **Description:** Actually the test named `test_openai_apredict` isn't testing the apredict method from ChatOpenAI. - **Twitter handle:** https://twitter.com/OAlmofadas --- .../tests/unit_tests/chat_models/test_openai.py | 14 +++++++------- .../tests/unit_tests/embeddings/test_openai.py | 6 +----- .../community/tests/unit_tests/llms/test_openai.py | 12 ++++-------- 3 files changed, 12 insertions(+), 20 deletions(-) diff --git a/libs/community/tests/unit_tests/chat_models/test_openai.py b/libs/community/tests/unit_tests/chat_models/test_openai.py index 22fde4f335..ad7033e4ea 100644 --- a/libs/community/tests/unit_tests/chat_models/test_openai.py +++ b/libs/community/tests/unit_tests/chat_models/test_openai.py @@ -17,9 +17,9 @@ from langchain_community.chat_models.openai import ChatOpenAI @pytest.mark.requires("openai") def test_openai_model_param() -> None: - llm = ChatOpenAI(model="foo") + llm = ChatOpenAI(model="foo", openai_api_key="foo") assert llm.model_name == "foo" - llm = ChatOpenAI(model_name="foo") + llm = ChatOpenAI(model_name="foo", openai_api_key="foo") assert llm.model_name == "foo" @@ -81,7 +81,7 @@ def mock_completion() -> dict: @pytest.mark.requires("openai") def test_openai_predict(mock_completion: dict) -> None: - llm = ChatOpenAI() + llm = ChatOpenAI(openai_api_key="foo") mock_client = MagicMock() completed = False @@ -103,11 +103,11 @@ def test_openai_predict(mock_completion: dict) -> None: @pytest.mark.requires("openai") async def test_openai_apredict(mock_completion: dict) -> None: - llm = ChatOpenAI() + llm = ChatOpenAI(openai_api_key="foo") mock_client = MagicMock() completed = False - def mock_create(*args: Any, **kwargs: Any) -> Any: + async def mock_create(*args: Any, **kwargs: Any) -> Any: nonlocal completed completed = True return mock_completion @@ -115,9 +115,9 @@ async def test_openai_apredict(mock_completion: dict) -> None: mock_client.create = mock_create with patch.object( llm, - "client", + "async_client", mock_client, ): - res = llm.predict("bar") + res = await llm.apredict("bar") assert res == "Bar Baz" assert completed diff --git a/libs/community/tests/unit_tests/embeddings/test_openai.py b/libs/community/tests/unit_tests/embeddings/test_openai.py index e9c3979138..c9499722ff 100644 --- a/libs/community/tests/unit_tests/embeddings/test_openai.py +++ b/libs/community/tests/unit_tests/embeddings/test_openai.py @@ -1,11 +1,7 @@ -import os - import pytest from langchain_community.embeddings.openai import OpenAIEmbeddings -os.environ["OPENAI_API_KEY"] = "foo" - @pytest.mark.requires("openai") def test_openai_invalid_model_kwargs() -> None: @@ -16,5 +12,5 @@ def test_openai_invalid_model_kwargs() -> None: @pytest.mark.requires("openai") def test_openai_incorrect_field() -> None: with pytest.warns(match="not default parameter"): - llm = OpenAIEmbeddings(foo="bar") + llm = OpenAIEmbeddings(foo="bar", openai_api_key="foo") assert llm.model_kwargs == {"foo": "bar"} diff --git a/libs/community/tests/unit_tests/llms/test_openai.py b/libs/community/tests/unit_tests/llms/test_openai.py index 3b1c4d8dfe..73f6fad283 100644 --- a/libs/community/tests/unit_tests/llms/test_openai.py +++ b/libs/community/tests/unit_tests/llms/test_openai.py @@ -1,12 +1,8 @@ -import os - import pytest from langchain_community.llms.openai import OpenAI from langchain_community.utils.openai import is_openai_v1 -os.environ["OPENAI_API_KEY"] = "foo" - def _openai_v1_installed() -> bool: try: @@ -17,15 +13,15 @@ def _openai_v1_installed() -> bool: @pytest.mark.requires("openai") def test_openai_model_param() -> None: - llm = OpenAI(model="foo") + llm = OpenAI(model="foo", openai_api_key="foo") assert llm.model_name == "foo" - llm = OpenAI(model_name="foo") + llm = OpenAI(model_name="foo", openai_api_key="foo") assert llm.model_name == "foo" @pytest.mark.requires("openai") def test_openai_model_kwargs() -> None: - llm = OpenAI(model_kwargs={"foo": "bar"}) + llm = OpenAI(model_kwargs={"foo": "bar"}, openai_api_key="foo") assert llm.model_kwargs == {"foo": "bar"} @@ -42,7 +38,7 @@ def test_openai_invalid_model_kwargs() -> None: @pytest.mark.requires("openai") def test_openai_incorrect_field() -> None: with pytest.warns(match="not default parameter"): - llm = OpenAI(foo="bar") + llm = OpenAI(foo="bar", openai_api_key="foo") assert llm.model_kwargs == {"foo": "bar"}