mirror of
https://github.com/hwchase17/langchain
synced 2024-11-06 03:20:49 +00:00
5f839beab9
This is technically a breaking change because it'll switch out default models from `text-davinci-003` to `gpt-3.5-turbo-instruct`, but OpenAI is shutting off those endpoints on 1/4 anyways. Feels less disruptive to switch out the default instead.
57 lines
1.4 KiB
Python
57 lines
1.4 KiB
Python
import os
|
|
|
|
import pytest
|
|
|
|
from langchain_community.llms.openai import OpenAI
|
|
from langchain_community.utils.openai import is_openai_v1
|
|
|
|
os.environ["OPENAI_API_KEY"] = "foo"
|
|
|
|
|
|
def _openai_v1_installed() -> bool:
|
|
try:
|
|
return is_openai_v1()
|
|
except Exception as _:
|
|
return False
|
|
|
|
|
|
@pytest.mark.requires("openai")
|
|
def test_openai_model_param() -> None:
|
|
llm = OpenAI(model="foo")
|
|
assert llm.model_name == "foo"
|
|
llm = OpenAI(model_name="foo")
|
|
assert llm.model_name == "foo"
|
|
|
|
|
|
@pytest.mark.requires("openai")
|
|
def test_openai_model_kwargs() -> None:
|
|
llm = OpenAI(model_kwargs={"foo": "bar"})
|
|
assert llm.model_kwargs == {"foo": "bar"}
|
|
|
|
|
|
@pytest.mark.requires("openai")
|
|
def test_openai_invalid_model_kwargs() -> None:
|
|
with pytest.raises(ValueError):
|
|
OpenAI(model_kwargs={"model_name": "foo"})
|
|
|
|
|
|
@pytest.mark.requires("openai")
|
|
def test_openai_incorrect_field() -> None:
|
|
with pytest.warns(match="not default parameter"):
|
|
llm = OpenAI(foo="bar")
|
|
assert llm.model_kwargs == {"foo": "bar"}
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_completion() -> dict:
|
|
return {
|
|
"id": "cmpl-3evkmQda5Hu7fcZavknQda3SQ",
|
|
"object": "text_completion",
|
|
"created": 1689989000,
|
|
"model": "gpt-3.5-turbo-instruct",
|
|
"choices": [
|
|
{"text": "Bar Baz", "index": 0, "logprobs": None, "finish_reason": "length"}
|
|
],
|
|
"usage": {"prompt_tokens": 1, "completion_tokens": 2, "total_tokens": 3},
|
|
}
|