From 725b668aef714a19f8c3463d6a0db52c8cadb3bd Mon Sep 17 00:00:00 2001 From: Jonathan Pedoeem Date: Mon, 27 Mar 2023 18:24:53 -0400 Subject: [PATCH] Updating PromptLayer request in PromptLayer Models to be async in agenerate (#2058) Currently in agenerate, the PromptLayer request is blocking and should be make async. In this PR we update all of that in order to work as it should --- langchain/chat_models/promptlayer_openai.py | 4 ++-- langchain/llms/promptlayer_openai.py | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/langchain/chat_models/promptlayer_openai.py b/langchain/chat_models/promptlayer_openai.py index a42acbe241..faf3726934 100644 --- a/langchain/chat_models/promptlayer_openai.py +++ b/langchain/chat_models/promptlayer_openai.py @@ -72,7 +72,7 @@ class PromptLayerChatOpenAI(ChatOpenAI, BaseModel): self, messages: List[BaseMessage], stop: Optional[List[str]] = None ) -> ChatResult: """Call ChatOpenAI agenerate and then call PromptLayer to log.""" - from promptlayer.utils import get_api_key, promptlayer_api_request + from promptlayer.utils import get_api_key, promptlayer_api_request_async request_start_time = datetime.datetime.now().timestamp() generated_responses = await super()._agenerate(messages, stop) @@ -82,7 +82,7 @@ class PromptLayerChatOpenAI(ChatOpenAI, BaseModel): response_dict, params = super()._create_message_dicts( [generation.message], stop ) - pl_request_id = promptlayer_api_request( + pl_request_id = await promptlayer_api_request_async( "langchain.PromptLayerChatOpenAI.async", "langchain", message_dicts, diff --git a/langchain/llms/promptlayer_openai.py b/langchain/llms/promptlayer_openai.py index 1cfb317700..8a3ae4c155 100644 --- a/langchain/llms/promptlayer_openai.py +++ b/langchain/llms/promptlayer_openai.py @@ -73,7 +73,7 @@ class PromptLayerOpenAI(OpenAI, BaseModel): async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: - from promptlayer.utils import get_api_key, promptlayer_api_request + from promptlayer.utils import get_api_key, promptlayer_api_request_async request_start_time = datetime.datetime.now().timestamp() generated_responses = await super()._agenerate(prompts, stop) @@ -85,7 +85,7 @@ class PromptLayerOpenAI(OpenAI, BaseModel): "text": generation.text, "llm_output": generated_responses.llm_output, } - pl_request_id = promptlayer_api_request( + pl_request_id = await promptlayer_api_request_async( "langchain.PromptLayerOpenAI.async", "langchain", [prompt], @@ -171,7 +171,7 @@ class PromptLayerOpenAIChat(OpenAIChat, BaseModel): async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: - from promptlayer.utils import get_api_key, promptlayer_api_request + from promptlayer.utils import get_api_key, promptlayer_api_request_async request_start_time = datetime.datetime.now().timestamp() generated_responses = await super()._agenerate(prompts, stop) @@ -183,7 +183,7 @@ class PromptLayerOpenAIChat(OpenAIChat, BaseModel): "text": generation.text, "llm_output": generated_responses.llm_output, } - pl_request_id = promptlayer_api_request( + pl_request_id = await promptlayer_api_request_async( "langchain.PromptLayerOpenAIChat.async", "langchain", [prompt],