add invocation params as extra params in llm callbacks (#4506)

# Your PR Title (What it does)

<!--
Thank you for contributing to LangChain! Your PR will appear in our next
release under the title you set. Please make sure it highlights your
valuable contribution.

Replace this with a description of the change, the issue it fixes (if
applicable), and relevant context. List any dependencies required for
this change.

After you're done, someone will review your PR. They may suggest
improvements. If no one reviews your PR within a few days, feel free to
@-mention the same people again, as notifications can get lost.
-->

<!-- Remove if not applicable -->

Fixes # (issue)

## Before submitting

<!-- If you're adding a new integration, include an integration test and
an example notebook showing its use! -->

## Who can review?

Community members can review the PR once tests pass. Tag
maintainers/contributors who might be interested:

<!-- For a quicker response, figure out the right person to tag with @

        @hwchase17 - project lead

        Tracing / Callbacks
        - @agola11

        Async
        - @agola11

        DataLoader Abstractions
        - @eyurtsev

        LLM/Chat Wrappers
        - @hwchase17
        - @agola11

        Tools / Toolkits
        - @vowelparrot
 -->
This commit is contained in:
Ankush Gola 2023-05-11 15:33:52 -07:00 committed by GitHub
parent 1c0ec26e40
commit 59853fc876
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -149,6 +149,14 @@ class BaseLLM(BaseLanguageModel, ABC):
"Argument 'prompts' is expected to be of type List[str], received"
f" argument of type {type(prompts)}."
)
params = self.dict()
params["stop"] = stop
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts)
disregard_cache = self.cache is not None and not self.cache
callback_manager = CallbackManager.configure(
callbacks, self.callbacks, self.verbose
@ -163,7 +171,7 @@ class BaseLLM(BaseLanguageModel, ABC):
"Asked to cache, but no cache found at `langchain.cache`."
)
run_manager = callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts
{"name": self.__class__.__name__}, prompts, invocation_params=params
)
try:
output = (
@ -176,17 +184,11 @@ class BaseLLM(BaseLanguageModel, ABC):
raise e
run_manager.on_llm_end(output)
return output
params = self.dict()
params["stop"] = stop
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts)
if len(missing_prompts) > 0:
run_manager = callback_manager.on_llm_start(
{"name": self.__class__.__name__}, missing_prompts
{"name": self.__class__.__name__},
missing_prompts,
invocation_params=params,
)
try:
new_results = (
@ -213,6 +215,14 @@ class BaseLLM(BaseLanguageModel, ABC):
callbacks: Callbacks = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
params = self.dict()
params["stop"] = stop
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts)
disregard_cache = self.cache is not None and not self.cache
callback_manager = AsyncCallbackManager.configure(
callbacks, self.callbacks, self.verbose
@ -227,7 +237,7 @@ class BaseLLM(BaseLanguageModel, ABC):
"Asked to cache, but no cache found at `langchain.cache`."
)
run_manager = await callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts
{"name": self.__class__.__name__}, prompts, invocation_params=params
)
try:
output = (
@ -240,18 +250,11 @@ class BaseLLM(BaseLanguageModel, ABC):
raise e
await run_manager.on_llm_end(output, verbose=self.verbose)
return output
params = self.dict()
params["stop"] = stop
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts)
if len(missing_prompts) > 0:
run_manager = await callback_manager.on_llm_start(
{"name": self.__class__.__name__},
missing_prompts,
invocation_params=params,
)
try:
new_results = (