From 4b5e850361bd4b47ac739748232d696deac79eaf Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Wed, 1 Mar 2023 11:47:01 -0800 Subject: [PATCH] chatgpt wrapper (#1367) --- docs/modules/llms/integrations/openai.ipynb | 12 +- .../llms/integrations/openaichat.ipynb | 174 ++++++++++++++++++ langchain/llms/__init__.py | 3 +- langchain/llms/openai.py | 129 ++++++++++++- pyproject.toml | 2 +- 5 files changed, 307 insertions(+), 13 deletions(-) create mode 100644 docs/modules/llms/integrations/openaichat.ipynb diff --git a/docs/modules/llms/integrations/openai.ipynb b/docs/modules/llms/integrations/openai.ipynb index f8133615..be1986ec 100644 --- a/docs/modules/llms/integrations/openai.ipynb +++ b/docs/modules/llms/integrations/openai.ipynb @@ -76,19 +76,11 @@ "\n", "llm_chain.run(question)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4797d719", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3.9.12 ('palm')", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -102,7 +94,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.12" + "version": "3.9.1" }, "vscode": { "interpreter": { diff --git a/docs/modules/llms/integrations/openaichat.ipynb b/docs/modules/llms/integrations/openaichat.ipynb new file mode 100644 index 00000000..784ce068 --- /dev/null +++ b/docs/modules/llms/integrations/openaichat.ipynb @@ -0,0 +1,174 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# OpenAIChat\n", + "\n", + "OpenAI also has a [chat model](https://platform.openai.com/docs/guides/chat) you can use. The interface is very similar to the normal OpenAI model." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "522686de", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.llms import OpenAIChat\n", + "from langchain import PromptTemplate, LLMChain" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "62e0dbc3", + "metadata": {}, + "outputs": [], + "source": [ + "llm = OpenAIChat(temperature=0)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "fbb043e6", + "metadata": {}, + "outputs": [], + "source": [ + "template = \"\"\"Question: {question}\n", + "\n", + "Answer: Let's think step by step.\"\"\"\n", + "\n", + "prompt = PromptTemplate(template=template, input_variables=[\"question\"])" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "3f945b76", + "metadata": {}, + "outputs": [], + "source": [ + "llm_chain = LLMChain(prompt=prompt, llm=llm)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "25260808", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'\\n\\nJustin Bieber was born on March 1, 1994. \\n\\nThe Super Bowl is played in February of each year. \\n\\nTherefore, the Super Bowl that was played in the year Justin Bieber was born was Super Bowl XXVIII, which was played on January 30, 1994. \\n\\nThe Dallas Cowboys won Super Bowl XXVIII by defeating the Buffalo Bills with a score of 30-13.'" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n", + "\n", + "llm_chain.run(question)" + ] + }, + { + "cell_type": "markdown", + "id": "75a05b79", + "metadata": {}, + "source": [ + "## Prefix Messages\n", + "\n", + "OpenAI Chat also supports the idea of [prefix messages](https://platform.openai.com/docs/guides/chat/chat-vs-completions), eg messages that would appear before the user input. These can be used as system messages to give more context/purpose the LLM." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "c27a1501", + "metadata": {}, + "outputs": [], + "source": [ + "prefix_messages = [{\"role\": \"system\", \"content\": \"You are a helpful assistant that is very good at problem solving who thinks step by step.\"}]" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "e46a914e", + "metadata": {}, + "outputs": [], + "source": [ + "llm = OpenAIChat(temperature=0, prefix_messages=prefix_messages)" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "d683d9f2", + "metadata": {}, + "outputs": [], + "source": [ + "llm_chain = LLMChain(prompt=prompt, llm=llm)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "6f5b8e78", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Step 1: Justin Bieber was born on March 1, 1994.\\nStep 2: The Super Bowl is played in February of each year.\\nStep 3: Therefore, the Super Bowl that was played in the year Justin Bieber was born was Super Bowl XXVIII, which was played on January 30, 1994.\\nStep 4: The Dallas Cowboys won Super Bowl XXVIII by defeating the Buffalo Bills with a score of 30-13.\\nStep 5: Therefore, the Dallas Cowboys were the NFL team that won the Super Bowl in the year Justin Bieber was born.'" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n", + "\n", + "llm_chain.run(question)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "44c0330d", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/langchain/llms/__init__.py b/langchain/llms/__init__.py index 60482dab..a10d8c65 100644 --- a/langchain/llms/__init__.py +++ b/langchain/llms/__init__.py @@ -16,7 +16,7 @@ from langchain.llms.huggingface_hub import HuggingFaceHub from langchain.llms.huggingface_pipeline import HuggingFacePipeline from langchain.llms.modal import Modal from langchain.llms.nlpcloud import NLPCloud -from langchain.llms.openai import AzureOpenAI, OpenAI +from langchain.llms.openai import AzureOpenAI, OpenAI, OpenAIChat from langchain.llms.petals import Petals from langchain.llms.promptlayer_openai import PromptLayerOpenAI from langchain.llms.self_hosted import SelfHostedPipeline @@ -36,6 +36,7 @@ __all__ = [ "Modal", "NLPCloud", "OpenAI", + "OpenAIChat", "Petals", "HuggingFaceEndpoint", "HuggingFaceHub", diff --git a/langchain/llms/openai.py b/langchain/llms/openai.py index 728d5087..3fa181de 100644 --- a/langchain/llms/openai.py +++ b/langchain/llms/openai.py @@ -23,7 +23,7 @@ from tenacity import ( wait_exponential, ) -from langchain.llms.base import BaseLLM +from langchain.llms.base import LLM, BaseLLM from langchain.schema import Generation, LLMResult from langchain.utils import get_from_dict_or_env @@ -513,3 +513,130 @@ class AzureOpenAI(BaseOpenAI): @property def _invocation_params(self) -> Dict[str, Any]: return {**{"engine": self.deployment_name}, **super()._invocation_params} + + +class OpenAIChat(LLM, BaseModel): + """Wrapper around OpenAI Chat large language models. + + To use, you should have the ``openai`` python package installed, and the + environment variable ``OPENAI_API_KEY`` set with your API key. + + Any parameters that are valid to be passed to the openai.create call can be passed + in, even if not explicitly saved on this class. + + Example: + .. code-block:: python + + from langchain.llms import OpenAI + openai = OpenAI(model_name="gpt-3.5-turbo") + """ + + client: Any #: :meta private: + model_name: str = "gpt-3.5-turbo" + """Model name to use.""" + model_kwargs: Dict[str, Any] = Field(default_factory=dict) + """Holds any model parameters valid for `create` call not explicitly specified.""" + openai_api_key: Optional[str] = None + max_retries: int = 6 + """Maximum number of retries to make when generating.""" + prefix_messages: List = Field(default_factory=list) + + class Config: + """Configuration for this pydantic object.""" + + extra = Extra.ignore + + @root_validator(pre=True) + def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: + """Build extra kwargs from additional params that were passed in.""" + all_required_field_names = {field.alias for field in cls.__fields__.values()} + + extra = values.get("model_kwargs", {}) + for field_name in list(values): + if field_name not in all_required_field_names: + if field_name in extra: + raise ValueError(f"Found {field_name} supplied twice.") + extra[field_name] = values.pop(field_name) + values["model_kwargs"] = extra + return values + + @root_validator() + def validate_environment(cls, values: Dict) -> Dict: + """Validate that api key and python package exists in environment.""" + openai_api_key = get_from_dict_or_env( + values, "openai_api_key", "OPENAI_API_KEY" + ) + try: + import openai + + openai.api_key = openai_api_key + except ImportError: + raise ValueError( + "Could not import openai python package. " + "Please it install it with `pip install openai`." + ) + try: + values["client"] = openai.ChatCompletion + except AttributeError: + raise ValueError( + "`openai` has no `ChatCompletion` attribute, this is likely " + "due to an old version of the openai package. Try upgrading it " + "with `pip install --upgrade openai`." + ) + return values + + @property + def _default_params(self) -> Dict[str, Any]: + """Get the default parameters for calling OpenAI API.""" + return self.model_kwargs + + def _create_retry_decorator(self) -> Callable[[Any], Any]: + import openai + + min_seconds = 4 + max_seconds = 10 + # Wait 2^x * 1 second between each retry starting with + # 4 seconds, then up to 10 seconds, then 10 seconds afterwards + return retry( + reraise=True, + stop=stop_after_attempt(self.max_retries), + wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), + retry=( + retry_if_exception_type(openai.error.Timeout) + | retry_if_exception_type(openai.error.APIError) + | retry_if_exception_type(openai.error.APIConnectionError) + | retry_if_exception_type(openai.error.RateLimitError) + | retry_if_exception_type(openai.error.ServiceUnavailableError) + ), + before_sleep=before_sleep_log(logger, logging.WARNING), + ) + + def completion_with_retry(self, **kwargs: Any) -> Any: + """Use tenacity to retry the completion call.""" + retry_decorator = self._create_retry_decorator() + + @retry_decorator + def _completion_with_retry(**kwargs: Any) -> Any: + return self.client.create(**kwargs) + + return _completion_with_retry(**kwargs) + + def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: + messages = self.prefix_messages + [{"role": "user", "content": prompt}] + params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params} + if stop is not None: + if "stop" in params: + raise ValueError("`stop` found in both the input and default params.") + params["stop"] = stop + response = self.completion_with_retry(messages=messages, **params) + return response["choices"][0]["message"]["content"] + + @property + def _identifying_params(self) -> Mapping[str, Any]: + """Get the identifying parameters.""" + return {**{"model_name": self.model_name}, **self._default_params} + + @property + def _llm_type(self) -> str: + """Return type of llm.""" + return "openai-chat" diff --git a/pyproject.toml b/pyproject.toml index aef37722..c5e98ebc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain" -version = "0.0.97" +version = "0.0.98" description = "Building applications with LLMs through composability" authors = [] license = "MIT"