mirror of
https://github.com/hwchase17/langchain
synced 2024-11-06 03:20:49 +00:00
ebc75c5ca7
Todo - [x] copy over integration tests - [x] update docs with new instructions in #15513 - [x] add linear ticket to bump core -> community, community->langchain, and core->openai deps - [ ] (optional): add `pip install langchain-openai` command to each notebook using it - [x] Update docstrings to not need `openai` install - [x] Add serialization - [x] deprecate old models Contributor steps: - [x] Add secret names to manual integrations workflow in .github/workflows/_integration_test.yml - [x] Add secrets to release workflow (for pre-release testing) in .github/workflows/_release.yml Maintainer steps (Contributors should not do these): - [x] set up pypi and test pypi projects - [x] add credential secrets to Github Actions - [ ] add package to conda-forge Functional changes to existing classes: - now relies on openai client v1 (1.6.1) via concrete dep in langchain-openai package Codebase organization - some function calling stuff moved to `langchain_core.utils.function_calling` in order to be used in both community and langchain-openai
61 lines
1.6 KiB
Python
61 lines
1.6 KiB
Python
import os
|
|
|
|
import pytest
|
|
|
|
from langchain_community.llms.openai import OpenAI
|
|
from langchain_community.utils.openai import is_openai_v1
|
|
|
|
os.environ["OPENAI_API_KEY"] = "foo"
|
|
|
|
|
|
def _openai_v1_installed() -> bool:
|
|
try:
|
|
return is_openai_v1()
|
|
except Exception as _:
|
|
return False
|
|
|
|
|
|
@pytest.mark.requires("openai")
|
|
def test_openai_model_param() -> None:
|
|
llm = OpenAI(model="foo")
|
|
assert llm.model_name == "foo"
|
|
llm = OpenAI(model_name="foo")
|
|
assert llm.model_name == "foo"
|
|
|
|
|
|
@pytest.mark.requires("openai")
|
|
def test_openai_model_kwargs() -> None:
|
|
llm = OpenAI(model_kwargs={"foo": "bar"})
|
|
assert llm.model_kwargs == {"foo": "bar"}
|
|
|
|
|
|
@pytest.mark.requires("openai")
|
|
def test_openai_invalid_model_kwargs() -> None:
|
|
with pytest.raises(ValueError):
|
|
OpenAI(model_kwargs={"model_name": "foo"})
|
|
|
|
# Test that "model" cannot be specified in kwargs
|
|
with pytest.raises(ValueError):
|
|
OpenAI(model_kwargs={"model": "gpt-3.5-turbo-instruct"})
|
|
|
|
|
|
@pytest.mark.requires("openai")
|
|
def test_openai_incorrect_field() -> None:
|
|
with pytest.warns(match="not default parameter"):
|
|
llm = OpenAI(foo="bar")
|
|
assert llm.model_kwargs == {"foo": "bar"}
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_completion() -> dict:
|
|
return {
|
|
"id": "cmpl-3evkmQda5Hu7fcZavknQda3SQ",
|
|
"object": "text_completion",
|
|
"created": 1689989000,
|
|
"model": "gpt-3.5-turbo-instruct",
|
|
"choices": [
|
|
{"text": "Bar Baz", "index": 0, "logprobs": None, "finish_reason": "length"}
|
|
],
|
|
"usage": {"prompt_tokens": 1, "completion_tokens": 2, "total_tokens": 3},
|
|
}
|