mirror of
https://github.com/hwchase17/langchain
synced 2024-11-06 03:20:49 +00:00
Update PromptLayerOpenAI LLM to include support for ASYNC API (#1066)
This PR updates `PromptLayerOpenAI` to now support requests using the [Async API](https://langchain.readthedocs.io/en/latest/modules/llms/async_llm.html) It also updates the documentation on Async API to let users know that PromptLayerOpenAI also supports this. `PromptLayerOpenAI` now redefines `_agenerate` a similar was to how it redefines `_generate`
This commit is contained in:
parent
98186ef180
commit
05ad399abe
@ -1,6 +1,7 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"id": "f6574496-b360-4ffa-9523-7fd34a590164",
|
||||
"metadata": {},
|
||||
@ -9,7 +10,7 @@
|
||||
"\n",
|
||||
"LangChain provides async support for LLMs by leveraging the [asyncio](https://docs.python.org/3/library/asyncio.html) library.\n",
|
||||
"\n",
|
||||
"Async support is particularly useful for calling multiple LLMs concurrently, as these calls are network-bound. Currently, only `OpenAI` is supported, but async support for other LLMs is on the roadmap.\n",
|
||||
"Async support is particularly useful for calling multiple LLMs concurrently, as these calls are network-bound. Currently, only `OpenAI` and `PromptLayerOpenAI` is supported, but async support for other LLMs is on the roadmap.\n",
|
||||
"\n",
|
||||
"You can use the `agenerate` method to call an OpenAI LLM asynchronously."
|
||||
]
|
||||
|
@ -53,3 +53,27 @@ class PromptLayerOpenAI(OpenAI, BaseModel):
|
||||
get_api_key(),
|
||||
)
|
||||
return generated_responses
|
||||
|
||||
async def _agenerate(
|
||||
self, prompts: List[str], stop: Optional[List[str]] = None
|
||||
) -> LLMResult:
|
||||
from promptlayer.utils import get_api_key, promptlayer_api_request
|
||||
|
||||
request_start_time = datetime.datetime.now().timestamp()
|
||||
generated_responses = await super()._agenerate(prompts, stop)
|
||||
request_end_time = datetime.datetime.now().timestamp()
|
||||
for i in range(len(prompts)):
|
||||
prompt = prompts[i]
|
||||
resp = generated_responses.generations[i]
|
||||
promptlayer_api_request(
|
||||
"langchain.PromptLayerOpenAI.async",
|
||||
"langchain",
|
||||
[prompt],
|
||||
self._identifying_params,
|
||||
self.pl_tags,
|
||||
resp[0].text,
|
||||
request_start_time,
|
||||
request_end_time,
|
||||
get_api_key(),
|
||||
)
|
||||
return generated_responses
|
||||
|
Loading…
Reference in New Issue
Block a user