2023-02-14 06:06:25 +00:00
|
|
|
"""PromptLayer wrapper."""
|
|
|
|
import datetime
|
|
|
|
from typing import List, Optional
|
|
|
|
|
2023-04-30 18:14:09 +00:00
|
|
|
from langchain.callbacks.manager import (
|
|
|
|
AsyncCallbackManagerForLLMRun,
|
|
|
|
CallbackManagerForLLMRun,
|
|
|
|
)
|
2023-03-09 05:24:27 +00:00
|
|
|
from langchain.llms import OpenAI, OpenAIChat
|
2023-02-14 06:06:25 +00:00
|
|
|
from langchain.schema import LLMResult
|
|
|
|
|
|
|
|
|
2023-04-06 19:45:16 +00:00
|
|
|
class PromptLayerOpenAI(OpenAI):
|
2023-02-14 06:06:25 +00:00
|
|
|
"""Wrapper around OpenAI large language models.
|
|
|
|
|
|
|
|
To use, you should have the ``openai`` and ``promptlayer`` python
|
|
|
|
package installed, and the environment variable ``OPENAI_API_KEY``
|
|
|
|
and ``PROMPTLAYER_API_KEY`` set with your openAI API key and
|
|
|
|
promptlayer key respectively.
|
|
|
|
|
|
|
|
All parameters that can be passed to the OpenAI LLM can also
|
2023-03-17 00:05:23 +00:00
|
|
|
be passed here. The PromptLayerOpenAI LLM adds two optional
|
2023-06-01 20:06:17 +00:00
|
|
|
|
2023-03-17 00:05:23 +00:00
|
|
|
parameters:
|
|
|
|
``pl_tags``: List of strings to tag the request with.
|
|
|
|
``return_pl_id``: If True, the PromptLayer request ID will be
|
|
|
|
returned in the ``generation_info`` field of the
|
|
|
|
``Generation`` object.
|
2023-02-14 06:06:25 +00:00
|
|
|
|
|
|
|
Example:
|
|
|
|
.. code-block:: python
|
|
|
|
|
2023-03-09 05:24:27 +00:00
|
|
|
from langchain.llms import PromptLayerOpenAI
|
|
|
|
openai = PromptLayerOpenAI(model_name="text-davinci-003")
|
2023-02-14 06:06:25 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
pl_tags: Optional[List[str]]
|
2023-03-17 00:05:23 +00:00
|
|
|
return_pl_id: Optional[bool] = False
|
2023-02-14 06:06:25 +00:00
|
|
|
|
|
|
|
def _generate(
|
2023-04-30 18:14:09 +00:00
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
stop: Optional[List[str]] = None,
|
|
|
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
2023-02-14 06:06:25 +00:00
|
|
|
) -> LLMResult:
|
|
|
|
"""Call OpenAI generate and then call PromptLayer API to log the request."""
|
|
|
|
from promptlayer.utils import get_api_key, promptlayer_api_request
|
|
|
|
|
|
|
|
request_start_time = datetime.datetime.now().timestamp()
|
2023-04-30 18:14:09 +00:00
|
|
|
generated_responses = super()._generate(prompts, stop, run_manager)
|
2023-02-14 06:06:25 +00:00
|
|
|
request_end_time = datetime.datetime.now().timestamp()
|
|
|
|
for i in range(len(prompts)):
|
|
|
|
prompt = prompts[i]
|
2023-03-17 00:05:23 +00:00
|
|
|
generation = generated_responses.generations[i][0]
|
2023-03-09 05:24:27 +00:00
|
|
|
resp = {
|
2023-03-17 00:05:23 +00:00
|
|
|
"text": generation.text,
|
2023-03-09 05:24:27 +00:00
|
|
|
"llm_output": generated_responses.llm_output,
|
|
|
|
}
|
2023-03-17 00:05:23 +00:00
|
|
|
pl_request_id = promptlayer_api_request(
|
2023-02-14 06:06:25 +00:00
|
|
|
"langchain.PromptLayerOpenAI",
|
|
|
|
"langchain",
|
|
|
|
[prompt],
|
|
|
|
self._identifying_params,
|
|
|
|
self.pl_tags,
|
2023-03-09 05:24:27 +00:00
|
|
|
resp,
|
2023-02-14 06:06:25 +00:00
|
|
|
request_start_time,
|
|
|
|
request_end_time,
|
|
|
|
get_api_key(),
|
2023-03-17 00:05:23 +00:00
|
|
|
return_pl_id=self.return_pl_id,
|
2023-02-14 06:06:25 +00:00
|
|
|
)
|
2023-03-17 00:05:23 +00:00
|
|
|
if self.return_pl_id:
|
|
|
|
if generation.generation_info is None or not isinstance(
|
|
|
|
generation.generation_info, dict
|
|
|
|
):
|
|
|
|
generation.generation_info = {}
|
|
|
|
generation.generation_info["pl_request_id"] = pl_request_id
|
2023-02-14 06:06:25 +00:00
|
|
|
return generated_responses
|
2023-02-16 06:48:09 +00:00
|
|
|
|
|
|
|
async def _agenerate(
|
2023-04-30 18:14:09 +00:00
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
stop: Optional[List[str]] = None,
|
|
|
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
2023-02-16 06:48:09 +00:00
|
|
|
) -> LLMResult:
|
2023-03-27 22:24:53 +00:00
|
|
|
from promptlayer.utils import get_api_key, promptlayer_api_request_async
|
2023-02-16 06:48:09 +00:00
|
|
|
|
|
|
|
request_start_time = datetime.datetime.now().timestamp()
|
2023-04-30 18:14:09 +00:00
|
|
|
generated_responses = await super()._agenerate(prompts, stop, run_manager)
|
2023-02-16 06:48:09 +00:00
|
|
|
request_end_time = datetime.datetime.now().timestamp()
|
|
|
|
for i in range(len(prompts)):
|
|
|
|
prompt = prompts[i]
|
2023-03-17 00:05:23 +00:00
|
|
|
generation = generated_responses.generations[i][0]
|
2023-03-09 05:24:27 +00:00
|
|
|
resp = {
|
2023-03-17 00:05:23 +00:00
|
|
|
"text": generation.text,
|
2023-03-09 05:24:27 +00:00
|
|
|
"llm_output": generated_responses.llm_output,
|
|
|
|
}
|
2023-03-27 22:24:53 +00:00
|
|
|
pl_request_id = await promptlayer_api_request_async(
|
2023-02-16 06:48:09 +00:00
|
|
|
"langchain.PromptLayerOpenAI.async",
|
|
|
|
"langchain",
|
|
|
|
[prompt],
|
|
|
|
self._identifying_params,
|
|
|
|
self.pl_tags,
|
2023-03-09 05:24:27 +00:00
|
|
|
resp,
|
|
|
|
request_start_time,
|
|
|
|
request_end_time,
|
|
|
|
get_api_key(),
|
2023-03-17 00:05:23 +00:00
|
|
|
return_pl_id=self.return_pl_id,
|
2023-03-09 05:24:27 +00:00
|
|
|
)
|
2023-03-17 00:05:23 +00:00
|
|
|
if self.return_pl_id:
|
|
|
|
if generation.generation_info is None or not isinstance(
|
|
|
|
generation.generation_info, dict
|
|
|
|
):
|
|
|
|
generation.generation_info = {}
|
|
|
|
generation.generation_info["pl_request_id"] = pl_request_id
|
2023-03-09 05:24:27 +00:00
|
|
|
return generated_responses
|
|
|
|
|
|
|
|
|
2023-04-06 19:45:16 +00:00
|
|
|
class PromptLayerOpenAIChat(OpenAIChat):
|
2023-03-09 05:24:27 +00:00
|
|
|
"""Wrapper around OpenAI large language models.
|
|
|
|
|
|
|
|
To use, you should have the ``openai`` and ``promptlayer`` python
|
|
|
|
package installed, and the environment variable ``OPENAI_API_KEY``
|
|
|
|
and ``PROMPTLAYER_API_KEY`` set with your openAI API key and
|
|
|
|
promptlayer key respectively.
|
|
|
|
|
|
|
|
All parameters that can be passed to the OpenAIChat LLM can also
|
2023-03-17 00:05:23 +00:00
|
|
|
be passed here. The PromptLayerOpenAIChat adds two optional
|
2023-06-01 20:06:17 +00:00
|
|
|
|
2023-03-17 00:05:23 +00:00
|
|
|
parameters:
|
|
|
|
``pl_tags``: List of strings to tag the request with.
|
|
|
|
``return_pl_id``: If True, the PromptLayer request ID will be
|
|
|
|
returned in the ``generation_info`` field of the
|
|
|
|
``Generation`` object.
|
2023-03-09 05:24:27 +00:00
|
|
|
|
|
|
|
Example:
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
from langchain.llms import PromptLayerOpenAIChat
|
|
|
|
openaichat = PromptLayerOpenAIChat(model_name="gpt-3.5-turbo")
|
|
|
|
"""
|
|
|
|
|
|
|
|
pl_tags: Optional[List[str]]
|
2023-03-17 00:05:23 +00:00
|
|
|
return_pl_id: Optional[bool] = False
|
2023-03-09 05:24:27 +00:00
|
|
|
|
|
|
|
def _generate(
|
2023-04-30 18:14:09 +00:00
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
stop: Optional[List[str]] = None,
|
|
|
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
2023-03-09 05:24:27 +00:00
|
|
|
) -> LLMResult:
|
|
|
|
"""Call OpenAI generate and then call PromptLayer API to log the request."""
|
|
|
|
from promptlayer.utils import get_api_key, promptlayer_api_request
|
|
|
|
|
|
|
|
request_start_time = datetime.datetime.now().timestamp()
|
2023-04-30 18:14:09 +00:00
|
|
|
generated_responses = super()._generate(prompts, stop, run_manager)
|
2023-03-09 05:24:27 +00:00
|
|
|
request_end_time = datetime.datetime.now().timestamp()
|
|
|
|
for i in range(len(prompts)):
|
|
|
|
prompt = prompts[i]
|
2023-03-17 00:05:23 +00:00
|
|
|
generation = generated_responses.generations[i][0]
|
2023-03-09 05:24:27 +00:00
|
|
|
resp = {
|
2023-03-17 00:05:23 +00:00
|
|
|
"text": generation.text,
|
2023-03-09 05:24:27 +00:00
|
|
|
"llm_output": generated_responses.llm_output,
|
|
|
|
}
|
2023-03-17 00:05:23 +00:00
|
|
|
pl_request_id = promptlayer_api_request(
|
2023-03-09 05:24:27 +00:00
|
|
|
"langchain.PromptLayerOpenAIChat",
|
|
|
|
"langchain",
|
|
|
|
[prompt],
|
|
|
|
self._identifying_params,
|
|
|
|
self.pl_tags,
|
|
|
|
resp,
|
|
|
|
request_start_time,
|
|
|
|
request_end_time,
|
|
|
|
get_api_key(),
|
2023-03-17 00:05:23 +00:00
|
|
|
return_pl_id=self.return_pl_id,
|
2023-03-09 05:24:27 +00:00
|
|
|
)
|
2023-03-17 00:05:23 +00:00
|
|
|
if self.return_pl_id:
|
|
|
|
if generation.generation_info is None or not isinstance(
|
|
|
|
generation.generation_info, dict
|
|
|
|
):
|
|
|
|
generation.generation_info = {}
|
|
|
|
generation.generation_info["pl_request_id"] = pl_request_id
|
2023-03-09 05:24:27 +00:00
|
|
|
return generated_responses
|
|
|
|
|
|
|
|
async def _agenerate(
|
2023-04-30 18:14:09 +00:00
|
|
|
self,
|
|
|
|
prompts: List[str],
|
|
|
|
stop: Optional[List[str]] = None,
|
|
|
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
2023-03-09 05:24:27 +00:00
|
|
|
) -> LLMResult:
|
2023-03-27 22:24:53 +00:00
|
|
|
from promptlayer.utils import get_api_key, promptlayer_api_request_async
|
2023-03-09 05:24:27 +00:00
|
|
|
|
|
|
|
request_start_time = datetime.datetime.now().timestamp()
|
2023-04-30 18:14:09 +00:00
|
|
|
generated_responses = await super()._agenerate(prompts, stop, run_manager)
|
2023-03-09 05:24:27 +00:00
|
|
|
request_end_time = datetime.datetime.now().timestamp()
|
|
|
|
for i in range(len(prompts)):
|
|
|
|
prompt = prompts[i]
|
2023-03-17 00:05:23 +00:00
|
|
|
generation = generated_responses.generations[i][0]
|
|
|
|
resp = {
|
|
|
|
"text": generation.text,
|
|
|
|
"llm_output": generated_responses.llm_output,
|
|
|
|
}
|
2023-03-27 22:24:53 +00:00
|
|
|
pl_request_id = await promptlayer_api_request_async(
|
2023-03-09 05:24:27 +00:00
|
|
|
"langchain.PromptLayerOpenAIChat.async",
|
|
|
|
"langchain",
|
|
|
|
[prompt],
|
|
|
|
self._identifying_params,
|
|
|
|
self.pl_tags,
|
2023-03-17 00:05:23 +00:00
|
|
|
resp,
|
2023-02-16 06:48:09 +00:00
|
|
|
request_start_time,
|
|
|
|
request_end_time,
|
|
|
|
get_api_key(),
|
2023-03-17 00:05:23 +00:00
|
|
|
return_pl_id=self.return_pl_id,
|
2023-02-16 06:48:09 +00:00
|
|
|
)
|
2023-03-17 00:05:23 +00:00
|
|
|
if self.return_pl_id:
|
|
|
|
if generation.generation_info is None or not isinstance(
|
|
|
|
generation.generation_info, dict
|
|
|
|
):
|
|
|
|
generation.generation_info = {}
|
|
|
|
generation.generation_info["pl_request_id"] = pl_request_id
|
2023-02-16 06:48:09 +00:00
|
|
|
return generated_responses
|