core: remove batch size from llm start callbacks

pull/20863/head
Erick Friis 3 weeks ago
parent 9efab3ed66
commit 043c998708

@ -222,7 +222,6 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
options=options,
name=config.get("run_name"),
run_id=config.pop("run_id", None),
batch_size=1,
)
generation: Optional[ChatGenerationChunk] = None
try:
@ -289,7 +288,6 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
options=options,
name=config.get("run_name"),
run_id=config.pop("run_id", None),
batch_size=1,
)
generation: Optional[ChatGenerationChunk] = None
@ -402,7 +400,6 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
options=options,
name=run_name,
run_id=run_id,
batch_size=len(messages),
)
results = []
for i, m in enumerate(messages):
@ -489,7 +486,6 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
invocation_params=params,
options=options,
name=run_name,
batch_size=len(messages),
run_id=run_id,
)

@ -435,7 +435,6 @@ class BaseLLM(BaseLanguageModel[str], ABC):
options=options,
name=config.get("run_name"),
run_id=config.pop("run_id", None),
batch_size=1,
)
generation: Optional[GenerationChunk] = None
try:
@ -496,7 +495,6 @@ class BaseLLM(BaseLanguageModel[str], ABC):
options=options,
name=config.get("run_name"),
run_id=config.pop("run_id", None),
batch_size=1,
)
generation: Optional[GenerationChunk] = None
try:
@ -793,7 +791,6 @@ class BaseLLM(BaseLanguageModel[str], ABC):
invocation_params=params,
options=options,
name=run_name,
batch_size=len(prompts),
run_id=run_id_,
)[0]
for callback_manager, prompt, run_name, run_id_ in zip(
@ -812,7 +809,6 @@ class BaseLLM(BaseLanguageModel[str], ABC):
invocation_params=params,
options=options,
name=run_name_list[idx],
batch_size=len(missing_prompts),
)[0]
for idx in missing_prompt_idxs
]
@ -1006,7 +1002,6 @@ class BaseLLM(BaseLanguageModel[str], ABC):
invocation_params=params,
options=options,
name=run_name,
batch_size=len(prompts),
run_id=run_id_,
)
for callback_manager, prompt, run_name, run_id_ in zip(
@ -1032,7 +1027,6 @@ class BaseLLM(BaseLanguageModel[str], ABC):
invocation_params=params,
options=options,
name=run_name_list[idx],
batch_size=len(missing_prompts),
)
for idx in missing_prompt_idxs
]

Loading…
Cancel
Save