Harrison/prompt layer (#1547)

Co-authored-by: Jonathan Pedoeem <jonathanped@gmail.com>
Co-authored-by: AbuBakar <abubakarsohail123@gmail.com>
fix-searx
Harrison Chase 1 year ago committed by GitHub
parent c844d1fd46
commit 3ee32a01ea
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -29,3 +29,5 @@ This LLM is identical to the [OpenAI LLM](./openai), except that
- all your requests will be logged to your PromptLayer account
- you can add `pl_tags` when instantializing to tag your requests on PromptLayer
PromptLayer also provides native wrappers for [`PromptLayerChatOpenAI`](../modules/chat/examples/promptlayer_chat_openai.ipynb)

@ -0,0 +1,154 @@
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"id": "959300d4",
"metadata": {},
"source": [
"# PromptLayer ChatOpenAI\n",
"\n",
"This example showcases how to connect to [PromptLayer](https://www.promptlayer.com) to start recording your ChatOpenAI requests."
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "6a45943e",
"metadata": {},
"source": [
"## Install PromptLayer\n",
"The `promptlayer` package is required to use PromptLayer with OpenAI. Install `promptlayer` using pip."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dbe09bd8",
"metadata": {
"vscode": {
"languageId": "powershell"
}
},
"outputs": [],
"source": [
"pip install promptlayer"
]
},
{
"cell_type": "markdown",
"id": "536c1dfa",
"metadata": {},
"source": [
"## Imports"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "c16da3b5",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from langchain.chat_models import PromptLayerChatOpenAI\n",
"from langchain.schema import HumanMessage"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "8564ce7d",
"metadata": {},
"source": [
"## Set the Environment API Key\n",
"You can create a PromptLayer API Key at [wwww.promptlayer.com](https://ww.promptlayer.com) by clicking the settings cog in the navbar.\n",
"\n",
"Set it as an environment variable called `PROMPTLAYER_API_KEY`."
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "46ba25dc",
"metadata": {},
"outputs": [],
"source": [
"os.environ[\"PROMPTLAYER_API_KEY\"] = \"**********\""
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "bf0294de",
"metadata": {},
"source": [
"## Use the PromptLayerOpenAI LLM like normal\n",
"*You can optionally pass in `pl_tags` to track your requests with PromptLayer's tagging feature.*"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "3acf0069",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='to take a nap in a cozy spot. I search around for a suitable place and finally settle on a soft cushion on the window sill. I curl up into a ball and close my eyes, relishing the warmth of the sun on my fur. As I drift off to sleep, I can hear the birds chirping outside and feel the gentle breeze blowing through the window. This is the life of a contented cat.', additional_kwargs={})"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chat = PromptLayerChatOpenAI(pl_tags=[\"langchain\"])\n",
"chat([HumanMessage(content=\"I am a cat and I want\")])"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "a2d76826",
"metadata": {},
"source": [
"**The above request should now appear on your [PromptLayer dashboard](https://ww.promptlayer.com).**"
]
},
{
"cell_type": "markdown",
"id": "05e9e2fe",
"metadata": {},
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "base",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
},
"vscode": {
"interpreter": {
"hash": "c4fe2cd85a8d9e8baaec5340ce66faff1c77581a9f43e6c45e85e09b6fced008"
}
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -1,3 +1,4 @@
from langchain.chat_models.openai import ChatOpenAI
from langchain.chat_models.promptlayer_openai import PromptLayerChatOpenAI
__all__ = ["ChatOpenAI"]
__all__ = ["ChatOpenAI", "PromptLayerChatOpenAI"]

@ -0,0 +1,84 @@
"""PromptLayer wrapper."""
import datetime
from typing import List, Optional
from pydantic import BaseModel
from langchain.chat_models import ChatOpenAI
from langchain.schema import BaseMessage, ChatResult
class PromptLayerChatOpenAI(ChatOpenAI, BaseModel):
"""Wrapper around OpenAI Chat large language models and PromptLayer.
To use, you should have the ``openai`` and ``promptlayer`` python
package installed, and the environment variable ``OPENAI_API_KEY``
and ``PROMPTLAYER_API_KEY`` set with your openAI API key and
promptlayer key respectively.
All parameters that can be passed to the OpenAI LLM can also
be passed here. The PromptLayerChatOpenAI LLM adds an extra
``pl_tags`` parameter that can be used to tag the request.
Example:
.. code-block:: python
from langchain.chat_models import PromptLayerChatOpenAI
openai = PromptLayerChatOpenAI(model_name="gpt-3.5-turbo")
"""
pl_tags: Optional[List[str]]
def _generate(
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> ChatResult:
"""Call ChatOpenAI generate and then call PromptLayer API to log the request."""
from promptlayer.utils import get_api_key, promptlayer_api_request
request_start_time = datetime.datetime.now().timestamp()
generated_responses = super()._generate(messages, stop)
request_end_time = datetime.datetime.now().timestamp()
message_dicts, params = super()._create_message_dicts(messages, stop)
for i, generation in enumerate(generated_responses.generations):
response_dict, params = super()._create_message_dicts(
[generation.message], stop
)
promptlayer_api_request(
"langchain.PromptLayerChatOpenAI",
"langchain",
message_dicts,
params,
self.pl_tags,
response_dict,
request_start_time,
request_end_time,
get_api_key(),
)
return generated_responses
async def _agenerate(
self, messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> ChatResult:
"""Call ChatOpenAI agenerate and then call PromptLayer to log."""
from promptlayer.utils import get_api_key, promptlayer_api_request
request_start_time = datetime.datetime.now().timestamp()
generated_responses = await super()._agenerate(messages, stop)
request_end_time = datetime.datetime.now().timestamp()
message_dicts, params = super()._create_message_dicts(messages, stop)
for i, generation in enumerate(generated_responses.generations):
response_dict, params = super()._create_message_dicts(
[generation.message], stop
)
promptlayer_api_request(
"langchain.PromptLayerChatOpenAI.async",
"langchain",
message_dicts,
params,
self.pl_tags,
response_dict,
request_start_time,
request_end_time,
get_api_key(),
)
return generated_responses

@ -18,7 +18,7 @@ from langchain.llms.modal import Modal
from langchain.llms.nlpcloud import NLPCloud
from langchain.llms.openai import AzureOpenAI, OpenAI, OpenAIChat
from langchain.llms.petals import Petals
from langchain.llms.promptlayer_openai import PromptLayerOpenAI
from langchain.llms.promptlayer_openai import PromptLayerOpenAI, PromptLayerOpenAIChat
from langchain.llms.self_hosted import SelfHostedPipeline
from langchain.llms.self_hosted_hugging_face import SelfHostedHuggingFaceLLM
from langchain.llms.stochasticai import StochasticAI
@ -46,6 +46,7 @@ __all__ = [
"SelfHostedPipeline",
"SelfHostedHuggingFaceLLM",
"PromptLayerOpenAI",
"PromptLayerOpenAIChat",
"StochasticAI",
"Writer",
]

@ -4,7 +4,7 @@ from typing import List, Optional
from pydantic import BaseModel
from langchain.llms import OpenAI
from langchain.llms import OpenAI, OpenAIChat
from langchain.schema import LLMResult
@ -23,8 +23,8 @@ class PromptLayerOpenAI(OpenAI, BaseModel):
Example:
.. code-block:: python
from langchain.llms import OpenAI
openai = OpenAI(model_name="text-davinci-003")
from langchain.llms import PromptLayerOpenAI
openai = PromptLayerOpenAI(model_name="text-davinci-003")
"""
pl_tags: Optional[List[str]]
@ -40,14 +40,17 @@ class PromptLayerOpenAI(OpenAI, BaseModel):
request_end_time = datetime.datetime.now().timestamp()
for i in range(len(prompts)):
prompt = prompts[i]
resp = generated_responses.generations[i]
resp = {
"text": generated_responses.generations[i][0].text,
"llm_output": generated_responses.llm_output,
}
promptlayer_api_request(
"langchain.PromptLayerOpenAI",
"langchain",
[prompt],
self._identifying_params,
self.pl_tags,
resp[0].text,
resp,
request_start_time,
request_end_time,
get_api_key(),
@ -64,13 +67,90 @@ class PromptLayerOpenAI(OpenAI, BaseModel):
request_end_time = datetime.datetime.now().timestamp()
for i in range(len(prompts)):
prompt = prompts[i]
resp = generated_responses.generations[i]
resp = {
"text": generated_responses.generations[i][0].text,
"llm_output": generated_responses.llm_output,
}
promptlayer_api_request(
"langchain.PromptLayerOpenAI.async",
"langchain",
[prompt],
self._identifying_params,
self.pl_tags,
resp,
request_start_time,
request_end_time,
get_api_key(),
)
return generated_responses
class PromptLayerOpenAIChat(OpenAIChat, BaseModel):
"""Wrapper around OpenAI large language models.
To use, you should have the ``openai`` and ``promptlayer`` python
package installed, and the environment variable ``OPENAI_API_KEY``
and ``PROMPTLAYER_API_KEY`` set with your openAI API key and
promptlayer key respectively.
All parameters that can be passed to the OpenAIChat LLM can also
be passed here. The PromptLayerOpenAIChat LLM adds an extra
``pl_tags`` parameter that can be used to tag the request.
Example:
.. code-block:: python
from langchain.llms import PromptLayerOpenAIChat
openaichat = PromptLayerOpenAIChat(model_name="gpt-3.5-turbo")
"""
pl_tags: Optional[List[str]]
def _generate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
"""Call OpenAI generate and then call PromptLayer API to log the request."""
from promptlayer.utils import get_api_key, promptlayer_api_request
request_start_time = datetime.datetime.now().timestamp()
generated_responses = super()._generate(prompts, stop)
request_end_time = datetime.datetime.now().timestamp()
for i in range(len(prompts)):
prompt = prompts[i]
resp = {
"text": generated_responses.generations[i][0].text,
"llm_output": generated_responses.llm_output,
}
promptlayer_api_request(
"langchain.PromptLayerOpenAIChat",
"langchain",
[prompt],
self._identifying_params,
self.pl_tags,
resp,
request_start_time,
request_end_time,
get_api_key(),
)
return generated_responses
async def _agenerate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
from promptlayer.utils import get_api_key, promptlayer_api_request
request_start_time = datetime.datetime.now().timestamp()
generated_responses = await super()._agenerate(prompts, stop)
request_end_time = datetime.datetime.now().timestamp()
for i in range(len(prompts)):
prompt = prompts[i]
resp = generated_responses.generations[i]
promptlayer_api_request(
"langchain.PromptLayerOpenAIChat.async",
"langchain",
[prompt],
self._identifying_params,
self.pl_tags,
resp[0].text,
request_start_time,
request_end_time,

@ -0,0 +1,130 @@
"""Test PromptLayerChatOpenAI wrapper."""
import pytest
from langchain.callbacks.base import CallbackManager
from langchain.chat_models.promptlayer_openai import PromptLayerChatOpenAI
from langchain.schema import (
BaseMessage,
ChatGeneration,
ChatResult,
HumanMessage,
LLMResult,
SystemMessage,
)
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_promptlayer_chat_openai() -> None:
"""Test PromptLayerChatOpenAI wrapper."""
chat = PromptLayerChatOpenAI(max_tokens=10)
message = HumanMessage(content="Hello")
response = chat([message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_promptlayer_chat_openai_system_message() -> None:
"""Test PromptLayerChatOpenAI wrapper with system message."""
chat = PromptLayerChatOpenAI(max_tokens=10)
system_message = SystemMessage(content="You are to chat with the user.")
human_message = HumanMessage(content="Hello")
response = chat([system_message, human_message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_promptlayer_chat_openai_generate() -> None:
"""Test PromptLayerChatOpenAI wrapper with generate."""
chat = PromptLayerChatOpenAI(max_tokens=10, n=2)
message = HumanMessage(content="Hello")
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 2
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
def test_promptlayer_chat_openai_multiple_completions() -> None:
"""Test PromptLayerChatOpenAI wrapper with multiple completions."""
chat = PromptLayerChatOpenAI(max_tokens=10, n=5)
message = HumanMessage(content="Hello")
response = chat._generate([message])
assert isinstance(response, ChatResult)
assert len(response.generations) == 5
for generation in response.generations:
assert isinstance(generation.message, BaseMessage)
assert isinstance(generation.message.content, str)
def test_promptlayer_chat_openai_streaming() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = PromptLayerChatOpenAI(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
verbose=True,
)
message = HumanMessage(content="Hello")
response = chat([message])
assert callback_handler.llm_streams > 0
assert isinstance(response, BaseMessage)
def test_promptlayer_chat_openai_invalid_streaming_params() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
with pytest.raises(ValueError):
PromptLayerChatOpenAI(
max_tokens=10,
streaming=True,
temperature=0,
n=5,
)
@pytest.mark.asyncio
async def test_async_promptlayer_chat_openai() -> None:
"""Test async generation."""
chat = PromptLayerChatOpenAI(max_tokens=10, n=2)
message = HumanMessage(content="Hello")
response = await chat.agenerate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 2
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
@pytest.mark.asyncio
async def test_async_promptlayer_chat_openai_streaming() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = PromptLayerChatOpenAI(
max_tokens=10,
streaming=True,
temperature=0,
callback_manager=callback_manager,
verbose=True,
)
message = HumanMessage(content="Hello")
response = await chat.agenerate([[message], [message]])
assert callback_handler.llm_streams > 0
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content

@ -0,0 +1,41 @@
"""Test PromptLayer OpenAIChat API wrapper."""
from pathlib import Path
import pytest
from langchain.llms.loading import load_llm
from langchain.llms.promptlayer_openai import PromptLayerOpenAIChat
def test_promptlayer_openai_chat_call() -> None:
"""Test valid call to promptlayer openai."""
llm = PromptLayerOpenAIChat(max_tokens=10)
output = llm("Say foo:")
assert isinstance(output, str)
def test_promptlayer_openai_chat_stop_valid() -> None:
"""Test promptlayer openai stop logic on valid configuration."""
query = "write an ordered list of five items"
first_llm = PromptLayerOpenAIChat(stop="3", temperature=0)
first_output = first_llm(query)
second_llm = PromptLayerOpenAIChat(temperature=0)
second_output = second_llm(query, stop=["3"])
# Because it stops on new lines, shouldn't return anything
assert first_output == second_output
def test_promptlayer_openai_chat_stop_error() -> None:
"""Test promptlayer openai stop logic on bad configuration."""
llm = PromptLayerOpenAIChat(stop="3", temperature=0)
with pytest.raises(ValueError):
llm("write an ordered list of five items", stop=["\n"])
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an promptlayer OpenAPI LLM."""
llm = PromptLayerOpenAIChat(max_tokens=10)
llm.save(file_path=tmp_path / "openai.yaml")
loaded_llm = load_llm(tmp_path / "openai.yaml")
assert loaded_llm == llm
Loading…
Cancel
Save