From 6be67279fba808f9b67ebaa18fc62140e3672471 Mon Sep 17 00:00:00 2001 From: Tim Asp <707699+timothyasp@users.noreply.github.com> Date: Wed, 29 Mar 2023 22:12:50 -0700 Subject: [PATCH] Add apredict_and_parse to LLM (#2164) `predict_and_parse` exists, and it's a nice abstraction to allow for applying output parsers to LLM generations. And async is very useful. As an aside, the difference between `call/acall`, `predict/apredict` and `generate/agenerate` isn't entirely clear to me other than they all call into the LLM in slightly different ways. Is there some documentation or a good way to think about these differences? One thought: output parsers should just work magically for all those LLM calls. If the `output_parser` arg is set on the prompt, the LLM has access, so it seems like extra work on the user's end to have to call `output_parser.parse` If this sounds reasonable, happy to throw something together. @hwchase17 --- langchain/chains/llm.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/langchain/chains/llm.py b/langchain/chains/llm.py index 46b3cfb7ce..62cc7a9112 100644 --- a/langchain/chains/llm.py +++ b/langchain/chains/llm.py @@ -174,6 +174,16 @@ class LLMChain(Chain, BaseModel): else: return result + async def apredict_and_parse( + self, **kwargs: Any + ) -> Union[str, List[str], Dict[str, str]]: + """Call apredict and then parse the results.""" + result = await self.apredict(**kwargs) + if self.prompt.output_parser is not None: + return self.prompt.output_parser.parse(result) + else: + return result + def apply_and_parse( self, input_list: List[Dict[str, Any]] ) -> Sequence[Union[str, List[str], Dict[str, str]]]: