forked from Archives/langchain
add invocation params as extra params in llm callbacks (#4506)
# Your PR Title (What it does) <!-- Thank you for contributing to LangChain! Your PR will appear in our next release under the title you set. Please make sure it highlights your valuable contribution. Replace this with a description of the change, the issue it fixes (if applicable), and relevant context. List any dependencies required for this change. After you're done, someone will review your PR. They may suggest improvements. If no one reviews your PR within a few days, feel free to @-mention the same people again, as notifications can get lost. --> <!-- Remove if not applicable --> Fixes # (issue) ## Before submitting <!-- If you're adding a new integration, include an integration test and an example notebook showing its use! --> ## Who can review? Community members can review the PR once tests pass. Tag maintainers/contributors who might be interested: <!-- For a quicker response, figure out the right person to tag with @ @hwchase17 - project lead Tracing / Callbacks - @agola11 Async - @agola11 DataLoader Abstractions - @eyurtsev LLM/Chat Wrappers - @hwchase17 - @agola11 Tools / Toolkits - @vowelparrot -->
This commit is contained in:
parent
1c0ec26e40
commit
59853fc876
@ -149,6 +149,14 @@ class BaseLLM(BaseLanguageModel, ABC):
|
|||||||
"Argument 'prompts' is expected to be of type List[str], received"
|
"Argument 'prompts' is expected to be of type List[str], received"
|
||||||
f" argument of type {type(prompts)}."
|
f" argument of type {type(prompts)}."
|
||||||
)
|
)
|
||||||
|
params = self.dict()
|
||||||
|
params["stop"] = stop
|
||||||
|
(
|
||||||
|
existing_prompts,
|
||||||
|
llm_string,
|
||||||
|
missing_prompt_idxs,
|
||||||
|
missing_prompts,
|
||||||
|
) = get_prompts(params, prompts)
|
||||||
disregard_cache = self.cache is not None and not self.cache
|
disregard_cache = self.cache is not None and not self.cache
|
||||||
callback_manager = CallbackManager.configure(
|
callback_manager = CallbackManager.configure(
|
||||||
callbacks, self.callbacks, self.verbose
|
callbacks, self.callbacks, self.verbose
|
||||||
@ -163,7 +171,7 @@ class BaseLLM(BaseLanguageModel, ABC):
|
|||||||
"Asked to cache, but no cache found at `langchain.cache`."
|
"Asked to cache, but no cache found at `langchain.cache`."
|
||||||
)
|
)
|
||||||
run_manager = callback_manager.on_llm_start(
|
run_manager = callback_manager.on_llm_start(
|
||||||
{"name": self.__class__.__name__}, prompts
|
{"name": self.__class__.__name__}, prompts, invocation_params=params
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
output = (
|
output = (
|
||||||
@ -176,17 +184,11 @@ class BaseLLM(BaseLanguageModel, ABC):
|
|||||||
raise e
|
raise e
|
||||||
run_manager.on_llm_end(output)
|
run_manager.on_llm_end(output)
|
||||||
return output
|
return output
|
||||||
params = self.dict()
|
|
||||||
params["stop"] = stop
|
|
||||||
(
|
|
||||||
existing_prompts,
|
|
||||||
llm_string,
|
|
||||||
missing_prompt_idxs,
|
|
||||||
missing_prompts,
|
|
||||||
) = get_prompts(params, prompts)
|
|
||||||
if len(missing_prompts) > 0:
|
if len(missing_prompts) > 0:
|
||||||
run_manager = callback_manager.on_llm_start(
|
run_manager = callback_manager.on_llm_start(
|
||||||
{"name": self.__class__.__name__}, missing_prompts
|
{"name": self.__class__.__name__},
|
||||||
|
missing_prompts,
|
||||||
|
invocation_params=params,
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
new_results = (
|
new_results = (
|
||||||
@ -213,6 +215,14 @@ class BaseLLM(BaseLanguageModel, ABC):
|
|||||||
callbacks: Callbacks = None,
|
callbacks: Callbacks = None,
|
||||||
) -> LLMResult:
|
) -> LLMResult:
|
||||||
"""Run the LLM on the given prompt and input."""
|
"""Run the LLM on the given prompt and input."""
|
||||||
|
params = self.dict()
|
||||||
|
params["stop"] = stop
|
||||||
|
(
|
||||||
|
existing_prompts,
|
||||||
|
llm_string,
|
||||||
|
missing_prompt_idxs,
|
||||||
|
missing_prompts,
|
||||||
|
) = get_prompts(params, prompts)
|
||||||
disregard_cache = self.cache is not None and not self.cache
|
disregard_cache = self.cache is not None and not self.cache
|
||||||
callback_manager = AsyncCallbackManager.configure(
|
callback_manager = AsyncCallbackManager.configure(
|
||||||
callbacks, self.callbacks, self.verbose
|
callbacks, self.callbacks, self.verbose
|
||||||
@ -227,7 +237,7 @@ class BaseLLM(BaseLanguageModel, ABC):
|
|||||||
"Asked to cache, but no cache found at `langchain.cache`."
|
"Asked to cache, but no cache found at `langchain.cache`."
|
||||||
)
|
)
|
||||||
run_manager = await callback_manager.on_llm_start(
|
run_manager = await callback_manager.on_llm_start(
|
||||||
{"name": self.__class__.__name__}, prompts
|
{"name": self.__class__.__name__}, prompts, invocation_params=params
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
output = (
|
output = (
|
||||||
@ -240,18 +250,11 @@ class BaseLLM(BaseLanguageModel, ABC):
|
|||||||
raise e
|
raise e
|
||||||
await run_manager.on_llm_end(output, verbose=self.verbose)
|
await run_manager.on_llm_end(output, verbose=self.verbose)
|
||||||
return output
|
return output
|
||||||
params = self.dict()
|
|
||||||
params["stop"] = stop
|
|
||||||
(
|
|
||||||
existing_prompts,
|
|
||||||
llm_string,
|
|
||||||
missing_prompt_idxs,
|
|
||||||
missing_prompts,
|
|
||||||
) = get_prompts(params, prompts)
|
|
||||||
if len(missing_prompts) > 0:
|
if len(missing_prompts) > 0:
|
||||||
run_manager = await callback_manager.on_llm_start(
|
run_manager = await callback_manager.on_llm_start(
|
||||||
{"name": self.__class__.__name__},
|
{"name": self.__class__.__name__},
|
||||||
missing_prompts,
|
missing_prompts,
|
||||||
|
invocation_params=params,
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
new_results = (
|
new_results = (
|
||||||
|
Loading…
Reference in New Issue
Block a user