|
|
|
@ -1,5 +1,6 @@
|
|
|
|
|
from __future__ import annotations
|
|
|
|
|
|
|
|
|
|
import asyncio
|
|
|
|
|
from abc import ABC, abstractmethod
|
|
|
|
|
from typing import Any, Dict, Generic, List, Optional, TypeVar, Union
|
|
|
|
|
|
|
|
|
@ -27,6 +28,20 @@ class BaseLLMOutputParser(Serializable, Generic[T], ABC):
|
|
|
|
|
Structured output.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
async def aparse_result(self, result: List[Generation]) -> T:
|
|
|
|
|
"""Parse a list of candidate model Generations into a specific format.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
result: A list of Generations to be parsed. The Generations are assumed
|
|
|
|
|
to be different candidate outputs for a single model input.
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
Structured output.
|
|
|
|
|
"""
|
|
|
|
|
return await asyncio.get_running_loop().run_in_executor(
|
|
|
|
|
None, self.parse_result, result
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class BaseGenerationOutputParser(
|
|
|
|
|
BaseLLMOutputParser, Runnable[Union[str, BaseMessage], T]
|
|
|
|
@ -51,6 +66,26 @@ class BaseGenerationOutputParser(
|
|
|
|
|
run_type="parser",
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
async def ainvoke(
|
|
|
|
|
self, input: str | BaseMessage, config: RunnableConfig | None = None
|
|
|
|
|
) -> T:
|
|
|
|
|
if isinstance(input, BaseMessage):
|
|
|
|
|
return await self._acall_with_config(
|
|
|
|
|
lambda inner_input: self.aparse_result(
|
|
|
|
|
[ChatGeneration(message=inner_input)]
|
|
|
|
|
),
|
|
|
|
|
input,
|
|
|
|
|
config,
|
|
|
|
|
run_type="parser",
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
return await self._acall_with_config(
|
|
|
|
|
lambda inner_input: self.aparse_result([Generation(text=inner_input)]),
|
|
|
|
|
input,
|
|
|
|
|
config,
|
|
|
|
|
run_type="parser",
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class BaseOutputParser(BaseLLMOutputParser, Runnable[Union[str, BaseMessage], T]):
|
|
|
|
|
"""Base class to parse the output of an LLM call.
|
|
|
|
@ -99,6 +134,26 @@ class BaseOutputParser(BaseLLMOutputParser, Runnable[Union[str, BaseMessage], T]
|
|
|
|
|
run_type="parser",
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
async def ainvoke(
|
|
|
|
|
self, input: str | BaseMessage, config: RunnableConfig | None = None
|
|
|
|
|
) -> T:
|
|
|
|
|
if isinstance(input, BaseMessage):
|
|
|
|
|
return await self._acall_with_config(
|
|
|
|
|
lambda inner_input: self.aparse_result(
|
|
|
|
|
[ChatGeneration(message=inner_input)]
|
|
|
|
|
),
|
|
|
|
|
input,
|
|
|
|
|
config,
|
|
|
|
|
run_type="parser",
|
|
|
|
|
)
|
|
|
|
|
else:
|
|
|
|
|
return await self._acall_with_config(
|
|
|
|
|
lambda inner_input: self.aparse_result([Generation(text=inner_input)]),
|
|
|
|
|
input,
|
|
|
|
|
config,
|
|
|
|
|
run_type="parser",
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def parse_result(self, result: List[Generation]) -> T:
|
|
|
|
|
"""Parse a list of candidate model Generations into a specific format.
|
|
|
|
|
|
|
|
|
@ -125,6 +180,32 @@ class BaseOutputParser(BaseLLMOutputParser, Runnable[Union[str, BaseMessage], T]
|
|
|
|
|
Structured output.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
async def aparse_result(self, result: List[Generation]) -> T:
|
|
|
|
|
"""Parse a list of candidate model Generations into a specific format.
|
|
|
|
|
|
|
|
|
|
The return value is parsed from only the first Generation in the result, which
|
|
|
|
|
is assumed to be the highest-likelihood Generation.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
result: A list of Generations to be parsed. The Generations are assumed
|
|
|
|
|
to be different candidate outputs for a single model input.
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
Structured output.
|
|
|
|
|
"""
|
|
|
|
|
return await self.aparse(result[0].text)
|
|
|
|
|
|
|
|
|
|
async def aparse(self, text: str) -> T:
|
|
|
|
|
"""Parse a single string model output into some structure.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
text: String output of a language model.
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
Structured output.
|
|
|
|
|
"""
|
|
|
|
|
return await asyncio.get_running_loop().run_in_executor(None, self.parse, text)
|
|
|
|
|
|
|
|
|
|
# TODO: rename 'completion' -> 'text'.
|
|
|
|
|
def parse_with_prompt(self, completion: str, prompt: PromptValue) -> Any:
|
|
|
|
|
"""Parse the output of an LLM call with the input prompt for context.
|
|
|
|
|