fix: overwrite stream for ChatOpenAI in runtime (#8288)

<!-- Thank you for contributing to LangChain!

Replace this comment with:
  - Description: a description of the change, 
  - Issue: the issue # it fixes (if applicable),
  - Dependencies: any dependencies required for this change,
- Tag maintainer: for a quicker response, tag the relevant maintainer
(see below),
- Twitter handle: we announce bigger features on Twitter. If your PR
gets announced and you'd like a mention, we'll gladly shout you out!

Please make sure you're PR is passing linting and testing before
submitting. Run `make format`, `make lint` and `make test` to check this
locally.

If you're adding a new integration, please include:
1. a test for the integration, preferably unit tests that do not rely on
network access,
  2. an example notebook showing its use.

Maintainer responsibilities:
  - General / Misc / if you don't know who to tag: @baskaryan
  - DataLoaders / VectorStores / Retrievers: @rlancemartin, @eyurtsev
  - Models / Prompts: @hwchase17, @baskaryan
  - Memory: @hwchase17
  - Agents / Tools / Toolkits: @hinthornw
  - Tracing / Callbacks: @agola11
  - Async: @agola11

If no one reviews your PR within a few days, feel free to @-mention the
same people again.

See contribution guidelines for more information on how to write/run
tests, lint, etc:
https://github.com/hwchase17/langchain/blob/master/.github/CONTRIBUTING.md
 -->
@hwchase17, @baskaryan

---------

Co-authored-by: Bagatur <baskaryan@gmail.com>
Co-authored-by: Nuno Campos <nuno@boringbits.io>
This commit is contained in:
Vic Cao 2023-08-07 17:18:30 +08:00 committed by GitHub
parent 5a9765b1b5
commit c9da300e4d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 12 additions and 4 deletions

View File

@ -381,9 +381,10 @@ class ChatOpenAI(BaseChatModel):
messages: List[BaseMessage], messages: List[BaseMessage],
stop: Optional[List[str]] = None, stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None, run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any, **kwargs: Any,
) -> ChatResult: ) -> ChatResult:
if self.streaming: if stream if stream is not None else self.streaming:
generation: Optional[ChatGenerationChunk] = None generation: Optional[ChatGenerationChunk] = None
for chunk in self._stream( for chunk in self._stream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs messages=messages, stop=stop, run_manager=run_manager, **kwargs
@ -454,9 +455,10 @@ class ChatOpenAI(BaseChatModel):
messages: List[BaseMessage], messages: List[BaseMessage],
stop: Optional[List[str]] = None, stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any, **kwargs: Any,
) -> ChatResult: ) -> ChatResult:
if self.streaming: if stream if stream is not None else self.streaming:
generation: Optional[ChatGenerationChunk] = None generation: Optional[ChatGenerationChunk] = None
async for chunk in self._astream( async for chunk in self._astream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs messages=messages, stop=stop, run_manager=run_manager, **kwargs

View File

@ -43,13 +43,16 @@ class PromptLayerChatOpenAI(ChatOpenAI):
messages: List[BaseMessage], messages: List[BaseMessage],
stop: Optional[List[str]] = None, stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None, run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any **kwargs: Any
) -> ChatResult: ) -> ChatResult:
"""Call ChatOpenAI generate and then call PromptLayer API to log the request.""" """Call ChatOpenAI generate and then call PromptLayer API to log the request."""
from promptlayer.utils import get_api_key, promptlayer_api_request from promptlayer.utils import get_api_key, promptlayer_api_request
request_start_time = datetime.datetime.now().timestamp() request_start_time = datetime.datetime.now().timestamp()
generated_responses = super()._generate(messages, stop, run_manager, **kwargs) generated_responses = super()._generate(
messages, stop, run_manager, stream=stream, **kwargs
)
request_end_time = datetime.datetime.now().timestamp() request_end_time = datetime.datetime.now().timestamp()
message_dicts, params = super()._create_message_dicts(messages, stop) message_dicts, params = super()._create_message_dicts(messages, stop)
for i, generation in enumerate(generated_responses.generations): for i, generation in enumerate(generated_responses.generations):
@ -82,13 +85,16 @@ class PromptLayerChatOpenAI(ChatOpenAI):
messages: List[BaseMessage], messages: List[BaseMessage],
stop: Optional[List[str]] = None, stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any **kwargs: Any
) -> ChatResult: ) -> ChatResult:
"""Call ChatOpenAI agenerate and then call PromptLayer to log.""" """Call ChatOpenAI agenerate and then call PromptLayer to log."""
from promptlayer.utils import get_api_key, promptlayer_api_request_async from promptlayer.utils import get_api_key, promptlayer_api_request_async
request_start_time = datetime.datetime.now().timestamp() request_start_time = datetime.datetime.now().timestamp()
generated_responses = await super()._agenerate(messages, stop, run_manager) generated_responses = await super()._agenerate(
messages, stop, run_manager, stream=stream, **kwargs
)
request_end_time = datetime.datetime.now().timestamp() request_end_time = datetime.datetime.now().timestamp()
message_dicts, params = super()._create_message_dicts(messages, stop) message_dicts, params = super()._create_message_dicts(messages, stop)
for i, generation in enumerate(generated_responses.generations): for i, generation in enumerate(generated_responses.generations):