chatgpt wrapper (#1367)

searx-doc
Harrison Chase 1 year ago committed by GitHub
parent 4d4b43cf5a
commit 4b5e850361
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -76,19 +76,11 @@
"\n",
"llm_chain.run(question)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4797d719",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.9.12 ('palm')",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
@ -102,7 +94,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.12"
"version": "3.9.1"
},
"vscode": {
"interpreter": {

@ -0,0 +1,174 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "e49f1e0d",
"metadata": {},
"source": [
"# OpenAIChat\n",
"\n",
"OpenAI also has a [chat model](https://platform.openai.com/docs/guides/chat) you can use. The interface is very similar to the normal OpenAI model."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "522686de",
"metadata": {},
"outputs": [],
"source": [
"from langchain.llms import OpenAIChat\n",
"from langchain import PromptTemplate, LLMChain"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "62e0dbc3",
"metadata": {},
"outputs": [],
"source": [
"llm = OpenAIChat(temperature=0)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "fbb043e6",
"metadata": {},
"outputs": [],
"source": [
"template = \"\"\"Question: {question}\n",
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "3f945b76",
"metadata": {},
"outputs": [],
"source": [
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "25260808",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'\\n\\nJustin Bieber was born on March 1, 1994. \\n\\nThe Super Bowl is played in February of each year. \\n\\nTherefore, the Super Bowl that was played in the year Justin Bieber was born was Super Bowl XXVIII, which was played on January 30, 1994. \\n\\nThe Dallas Cowboys won Super Bowl XXVIII by defeating the Buffalo Bills with a score of 30-13.'"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
"\n",
"llm_chain.run(question)"
]
},
{
"cell_type": "markdown",
"id": "75a05b79",
"metadata": {},
"source": [
"## Prefix Messages\n",
"\n",
"OpenAI Chat also supports the idea of [prefix messages](https://platform.openai.com/docs/guides/chat/chat-vs-completions), eg messages that would appear before the user input. These can be used as system messages to give more context/purpose the LLM."
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "c27a1501",
"metadata": {},
"outputs": [],
"source": [
"prefix_messages = [{\"role\": \"system\", \"content\": \"You are a helpful assistant that is very good at problem solving who thinks step by step.\"}]"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "e46a914e",
"metadata": {},
"outputs": [],
"source": [
"llm = OpenAIChat(temperature=0, prefix_messages=prefix_messages)"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "d683d9f2",
"metadata": {},
"outputs": [],
"source": [
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "6f5b8e78",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Step 1: Justin Bieber was born on March 1, 1994.\\nStep 2: The Super Bowl is played in February of each year.\\nStep 3: Therefore, the Super Bowl that was played in the year Justin Bieber was born was Super Bowl XXVIII, which was played on January 30, 1994.\\nStep 4: The Dallas Cowboys won Super Bowl XXVIII by defeating the Buffalo Bills with a score of 30-13.\\nStep 5: Therefore, the Dallas Cowboys were the NFL team that won the Super Bowl in the year Justin Bieber was born.'"
]
},
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
"\n",
"llm_chain.run(question)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "44c0330d",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -16,7 +16,7 @@ from langchain.llms.huggingface_hub import HuggingFaceHub
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
from langchain.llms.modal import Modal
from langchain.llms.nlpcloud import NLPCloud
from langchain.llms.openai import AzureOpenAI, OpenAI
from langchain.llms.openai import AzureOpenAI, OpenAI, OpenAIChat
from langchain.llms.petals import Petals
from langchain.llms.promptlayer_openai import PromptLayerOpenAI
from langchain.llms.self_hosted import SelfHostedPipeline
@ -36,6 +36,7 @@ __all__ = [
"Modal",
"NLPCloud",
"OpenAI",
"OpenAIChat",
"Petals",
"HuggingFaceEndpoint",
"HuggingFaceHub",

@ -23,7 +23,7 @@ from tenacity import (
wait_exponential,
)
from langchain.llms.base import BaseLLM
from langchain.llms.base import LLM, BaseLLM
from langchain.schema import Generation, LLMResult
from langchain.utils import get_from_dict_or_env
@ -513,3 +513,130 @@ class AzureOpenAI(BaseOpenAI):
@property
def _invocation_params(self) -> Dict[str, Any]:
return {**{"engine": self.deployment_name}, **super()._invocation_params}
class OpenAIChat(LLM, BaseModel):
"""Wrapper around OpenAI Chat large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import OpenAI
openai = OpenAI(model_name="gpt-3.5-turbo")
"""
client: Any #: :meta private:
model_name: str = "gpt-3.5-turbo"
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[str] = None
max_retries: int = 6
"""Maximum number of retries to make when generating."""
prefix_messages: List = Field(default_factory=list)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.ignore
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
try:
import openai
openai.api_key = openai_api_key
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please it install it with `pip install openai`."
)
try:
values["client"] = openai.ChatCompletion
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return self.model_kwargs
def _create_retry_decorator(self) -> Callable[[Any], Any]:
import openai
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(self.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def completion_with_retry(self, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = self._create_retry_decorator()
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.create(**kwargs)
return _completion_with_retry(**kwargs)
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
messages = self.prefix_messages + [{"role": "user", "content": prompt}]
params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params}
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
response = self.completion_with_retry(messages=messages, **params)
return response["choices"][0]["message"]["content"]
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "openai-chat"

@ -1,6 +1,6 @@
[tool.poetry]
name = "langchain"
version = "0.0.97"
version = "0.0.98"
description = "Building applications with LLMs through composability"
authors = []
license = "MIT"

Loading…
Cancel
Save