Updating PromptLayer request in PromptLayer Models to be async in agenerate (#2058)

Currently in agenerate, the PromptLayer request is blocking and should
be make async. In this PR we update all of that in order to work as it
should
searx
Jonathan Pedoeem 1 year ago committed by GitHub
parent 024efb09f8
commit 725b668aef
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -72,7 +72,7 @@ class PromptLayerChatOpenAI(ChatOpenAI, BaseModel):
self, messages: List[BaseMessage], stop: Optional[List[str]] = None self, messages: List[BaseMessage], stop: Optional[List[str]] = None
) -> ChatResult: ) -> ChatResult:
"""Call ChatOpenAI agenerate and then call PromptLayer to log.""" """Call ChatOpenAI agenerate and then call PromptLayer to log."""
from promptlayer.utils import get_api_key, promptlayer_api_request from promptlayer.utils import get_api_key, promptlayer_api_request_async
request_start_time = datetime.datetime.now().timestamp() request_start_time = datetime.datetime.now().timestamp()
generated_responses = await super()._agenerate(messages, stop) generated_responses = await super()._agenerate(messages, stop)
@ -82,7 +82,7 @@ class PromptLayerChatOpenAI(ChatOpenAI, BaseModel):
response_dict, params = super()._create_message_dicts( response_dict, params = super()._create_message_dicts(
[generation.message], stop [generation.message], stop
) )
pl_request_id = promptlayer_api_request( pl_request_id = await promptlayer_api_request_async(
"langchain.PromptLayerChatOpenAI.async", "langchain.PromptLayerChatOpenAI.async",
"langchain", "langchain",
message_dicts, message_dicts,

@ -73,7 +73,7 @@ class PromptLayerOpenAI(OpenAI, BaseModel):
async def _agenerate( async def _agenerate(
self, prompts: List[str], stop: Optional[List[str]] = None self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult: ) -> LLMResult:
from promptlayer.utils import get_api_key, promptlayer_api_request from promptlayer.utils import get_api_key, promptlayer_api_request_async
request_start_time = datetime.datetime.now().timestamp() request_start_time = datetime.datetime.now().timestamp()
generated_responses = await super()._agenerate(prompts, stop) generated_responses = await super()._agenerate(prompts, stop)
@ -85,7 +85,7 @@ class PromptLayerOpenAI(OpenAI, BaseModel):
"text": generation.text, "text": generation.text,
"llm_output": generated_responses.llm_output, "llm_output": generated_responses.llm_output,
} }
pl_request_id = promptlayer_api_request( pl_request_id = await promptlayer_api_request_async(
"langchain.PromptLayerOpenAI.async", "langchain.PromptLayerOpenAI.async",
"langchain", "langchain",
[prompt], [prompt],
@ -171,7 +171,7 @@ class PromptLayerOpenAIChat(OpenAIChat, BaseModel):
async def _agenerate( async def _agenerate(
self, prompts: List[str], stop: Optional[List[str]] = None self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult: ) -> LLMResult:
from promptlayer.utils import get_api_key, promptlayer_api_request from promptlayer.utils import get_api_key, promptlayer_api_request_async
request_start_time = datetime.datetime.now().timestamp() request_start_time = datetime.datetime.now().timestamp()
generated_responses = await super()._agenerate(prompts, stop) generated_responses = await super()._agenerate(prompts, stop)
@ -183,7 +183,7 @@ class PromptLayerOpenAIChat(OpenAIChat, BaseModel):
"text": generation.text, "text": generation.text,
"llm_output": generated_responses.llm_output, "llm_output": generated_responses.llm_output,
} }
pl_request_id = promptlayer_api_request( pl_request_id = await promptlayer_api_request_async(
"langchain.PromptLayerOpenAIChat.async", "langchain.PromptLayerOpenAIChat.async",
"langchain", "langchain",
[prompt], [prompt],

Loading…
Cancel
Save