langchain/libs/community/tests/integration_tests/llms/test_promptlayer_openai.py

70 lines
2.6 KiB
Python
Raw Normal View History

"""Test PromptLayer OpenAI API wrapper."""
from pathlib import Path
from typing import Generator
import pytest
community[major], core[patch], langchain[patch], experimental[patch]: Create langchain-community (#14463) Moved the following modules to new package langchain-community in a backwards compatible fashion: ``` mv langchain/langchain/adapters community/langchain_community mv langchain/langchain/callbacks community/langchain_community/callbacks mv langchain/langchain/chat_loaders community/langchain_community mv langchain/langchain/chat_models community/langchain_community mv langchain/langchain/document_loaders community/langchain_community mv langchain/langchain/docstore community/langchain_community mv langchain/langchain/document_transformers community/langchain_community mv langchain/langchain/embeddings community/langchain_community mv langchain/langchain/graphs community/langchain_community mv langchain/langchain/llms community/langchain_community mv langchain/langchain/memory/chat_message_histories community/langchain_community mv langchain/langchain/retrievers community/langchain_community mv langchain/langchain/storage community/langchain_community mv langchain/langchain/tools community/langchain_community mv langchain/langchain/utilities community/langchain_community mv langchain/langchain/vectorstores community/langchain_community mv langchain/langchain/agents/agent_toolkits community/langchain_community mv langchain/langchain/cache.py community/langchain_community mv langchain/langchain/adapters community/langchain_community mv langchain/langchain/callbacks community/langchain_community/callbacks mv langchain/langchain/chat_loaders community/langchain_community mv langchain/langchain/chat_models community/langchain_community mv langchain/langchain/document_loaders community/langchain_community mv langchain/langchain/docstore community/langchain_community mv langchain/langchain/document_transformers community/langchain_community mv langchain/langchain/embeddings community/langchain_community mv langchain/langchain/graphs community/langchain_community mv langchain/langchain/llms community/langchain_community mv langchain/langchain/memory/chat_message_histories community/langchain_community mv langchain/langchain/retrievers community/langchain_community mv langchain/langchain/storage community/langchain_community mv langchain/langchain/tools community/langchain_community mv langchain/langchain/utilities community/langchain_community mv langchain/langchain/vectorstores community/langchain_community mv langchain/langchain/agents/agent_toolkits community/langchain_community mv langchain/langchain/cache.py community/langchain_community ``` Moved the following to core ``` mv langchain/langchain/utils/json_schema.py core/langchain_core/utils mv langchain/langchain/utils/html.py core/langchain_core/utils mv langchain/langchain/utils/strings.py core/langchain_core/utils cat langchain/langchain/utils/env.py >> core/langchain_core/utils/env.py rm langchain/langchain/utils/env.py ``` See .scripts/community_split/script_integrations.sh for all changes
2023-12-11 21:53:30 +00:00
from langchain_community.llms.loading import load_llm
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI
def test_promptlayer_openai_call() -> None:
"""Test valid call to promptlayer openai."""
llm = PromptLayerOpenAI(max_tokens=10) # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_promptlayer_openai_extra_kwargs() -> None:
"""Test extra kwargs to promptlayer openai."""
# Check that foo is saved in extra_kwargs.
llm = PromptLayerOpenAI(foo=3, max_tokens=10) # type: ignore[call-arg]
assert llm.max_tokens == 10
assert llm.model_kwargs == {"foo": 3}
# Test that if extra_kwargs are provided, they are added to it.
llm = PromptLayerOpenAI(foo=3, model_kwargs={"bar": 2}) # type: ignore[call-arg]
assert llm.model_kwargs == {"foo": 3, "bar": 2}
# Test that if provided twice it errors
with pytest.raises(ValueError):
PromptLayerOpenAI(foo=3, model_kwargs={"foo": 2}) # type: ignore[call-arg]
def test_promptlayer_openai_stop_valid() -> None:
"""Test promptlayer openai stop logic on valid configuration."""
query = "write an ordered list of five items"
first_llm = PromptLayerOpenAI(stop="3", temperature=0) # type: ignore[call-arg]
first_output = first_llm.invoke(query)
second_llm = PromptLayerOpenAI(temperature=0) # type: ignore[call-arg]
second_output = second_llm.invoke(query, stop=["3"])
# Because it stops on new lines, shouldn't return anything
assert first_output == second_output
def test_promptlayer_openai_stop_error() -> None:
"""Test promptlayer openai stop logic on bad configuration."""
llm = PromptLayerOpenAI(stop="3", temperature=0) # type: ignore[call-arg]
with pytest.raises(ValueError):
llm.invoke("write an ordered list of five items", stop=["\n"])
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an promptlayer OpenAPI LLM."""
llm = PromptLayerOpenAI(max_tokens=10) # type: ignore[call-arg]
llm.save(file_path=tmp_path / "openai.yaml")
loaded_llm = load_llm(tmp_path / "openai.yaml")
assert loaded_llm == llm
def test_promptlayer_openai_streaming() -> None:
"""Test streaming tokens from promptalyer OpenAI."""
llm = PromptLayerOpenAI(max_tokens=10) # type: ignore[call-arg]
generator = llm.stream("I'm Pickle Rick")
assert isinstance(generator, Generator)
for token in generator:
assert isinstance(token["choices"][0]["text"], str)