core: BaseChatModel modify chat message before passing to run_manager (#19939)

Thank you for contributing to LangChain!

- [ ] **PR title**: "package: description"
- Where "package" is whichever of langchain, community, core,
experimental, etc. is being modified. Use "docs: ..." for purely docs
changes, "templates: ..." for template changes, "infra: ..." for CI
changes.
  - Example: "community: add foobar LLM"


- [ ] **PR message**: ***Delete this entire checklist*** and replace
with
    - **Description:** a description of the change
    - **Issue:** the issue # it fixes, if applicable
    - **Dependencies:** any dependencies required for this change
- **Twitter handle:** if your PR gets announced, and you'd like a
mention, we'll gladly shout you out!


- [ ] **Add tests and docs**: If you're adding a new integration, please
include
1. a test for the integration, preferably unit tests that do not rely on
network access,
2. an example notebook showing its use. It lives in
`docs/docs/integrations` directory.


- [ ] **Lint and test**: Run `make format`, `make lint` and `make test`
from the root of the package(s) you've modified. See contribution
guidelines for more: https://python.langchain.com/docs/contributing/

Additional guidelines:
- Make sure optional dependencies are imported within a function.
- Please do not add dependencies to pyproject.toml files (even optional
ones) unless they are required for unit tests.
- Most PRs should not touch more than one package.
- Changes should be backwards compatible.
- If you are adding something to community, do not re-import it in
langchain.

If no one reviews your PR within a few days, please @-mention one of
baskaryan, efriis, eyurtsev, hwchase17.
This commit is contained in:
Nuno Campos 2024-04-02 16:40:27 -07:00 committed by GitHub
parent 73ebe78249
commit f4568fe0c6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -221,12 +221,12 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
generation: Optional[ChatGenerationChunk] = None generation: Optional[ChatGenerationChunk] = None
try: try:
for chunk in self._stream(messages, stop=stop, **kwargs): for chunk in self._stream(messages, stop=stop, **kwargs):
run_manager.on_llm_new_token(
cast(str, chunk.message.content), chunk=chunk
)
if chunk.message.id is None: if chunk.message.id is None:
chunk.message.id = f"run-{run_manager.run_id}" chunk.message.id = f"run-{run_manager.run_id}"
chunk.message.response_metadata = _gen_info_and_msg_metadata(chunk) chunk.message.response_metadata = _gen_info_and_msg_metadata(chunk)
run_manager.on_llm_new_token(
cast(str, chunk.message.content), chunk=chunk
)
yield chunk.message yield chunk.message
if generation is None: if generation is None:
generation = chunk generation = chunk
@ -293,12 +293,12 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
stop=stop, stop=stop,
**kwargs, **kwargs,
): ):
await run_manager.on_llm_new_token(
cast(str, chunk.message.content), chunk=chunk
)
if chunk.message.id is None: if chunk.message.id is None:
chunk.message.id = f"run-{run_manager.run_id}" chunk.message.id = f"run-{run_manager.run_id}"
chunk.message.response_metadata = _gen_info_and_msg_metadata(chunk) chunk.message.response_metadata = _gen_info_and_msg_metadata(chunk)
await run_manager.on_llm_new_token(
cast(str, chunk.message.content), chunk=chunk
)
yield chunk.message yield chunk.message
if generation is None: if generation is None:
generation = chunk generation = chunk
@ -610,13 +610,13 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
): ):
chunks: List[ChatGenerationChunk] = [] chunks: List[ChatGenerationChunk] = []
for chunk in self._stream(messages, stop=stop, **kwargs): for chunk in self._stream(messages, stop=stop, **kwargs):
chunk.message.response_metadata = _gen_info_and_msg_metadata(chunk)
if run_manager: if run_manager:
if chunk.message.id is None: if chunk.message.id is None:
chunk.message.id = f"run-{run_manager.run_id}" chunk.message.id = f"run-{run_manager.run_id}"
run_manager.on_llm_new_token( run_manager.on_llm_new_token(
cast(str, chunk.message.content), chunk=chunk cast(str, chunk.message.content), chunk=chunk
) )
chunk.message.response_metadata = _gen_info_and_msg_metadata(chunk)
chunks.append(chunk) chunks.append(chunk)
result = generate_from_stream(iter(chunks)) result = generate_from_stream(iter(chunks))
else: else:
@ -691,13 +691,13 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
): ):
chunks: List[ChatGenerationChunk] = [] chunks: List[ChatGenerationChunk] = []
async for chunk in self._astream(messages, stop=stop, **kwargs): async for chunk in self._astream(messages, stop=stop, **kwargs):
chunk.message.response_metadata = _gen_info_and_msg_metadata(chunk)
if run_manager: if run_manager:
if chunk.message.id is None: if chunk.message.id is None:
chunk.message.id = f"run-{run_manager.run_id}" chunk.message.id = f"run-{run_manager.run_id}"
await run_manager.on_llm_new_token( await run_manager.on_llm_new_token(
cast(str, chunk.message.content), chunk=chunk cast(str, chunk.message.content), chunk=chunk
) )
chunk.message.response_metadata = _gen_info_and_msg_metadata(chunk)
chunks.append(chunk) chunks.append(chunk)
result = generate_from_stream(iter(chunks)) result = generate_from_stream(iter(chunks))
else: else: