Allow the regular openai class to be used for ChatGPT models (#1393)

Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
searx-doc
Nuno Campos 1 year ago committed by GitHub
parent 8947797250
commit 499e76b199
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

1
.gitignore vendored

@ -106,6 +106,7 @@ celerybeat.pid
# Environments
.env
.envrc
.venv
.venvs
env/

@ -161,6 +161,12 @@ class BaseOpenAI(BaseLLM, BaseModel):
streaming: bool = False
"""Whether to stream the results or not."""
def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore
"""Initialize the OpenAI object."""
if data.get("model_name", "").startswith("gpt-3.5-turbo"):
return OpenAIChat(**data)
return super().__new__(cls)
class Config:
"""Configuration for this pydantic object."""

@ -1,6 +1,6 @@
[tool.poetry]
name = "langchain"
version = "0.0.99"
version = "0.0.100"
description = "Building applications with LLMs through composability"
authors = []
license = "MIT"

@ -144,6 +144,13 @@ async def test_openai_async_streaming_callback() -> None:
assert isinstance(result, LLMResult)
def test_openai_chat_wrong_class() -> None:
"""Test OpenAIChat with wrong class still works."""
llm = OpenAI(model_name="gpt-3.5-turbo")
output = llm("Say foo:")
assert isinstance(output, str)
def test_openai_chat() -> None:
"""Test OpenAIChat."""
llm = OpenAIChat(max_tokens=10)

Loading…
Cancel
Save