forked from Archives/langchain
Output parsing variation allowance (#5178)
# Output parsing variation allowance for self-ask with search This change makes self-ask with search easier for Llama models to follow, as they tend toward returning 'Followup:' instead of 'Follow up:' despite an otherwise valid remaining output. Co-authored-by: Dev 2049 <dev.dev2049@gmail.com>
This commit is contained in:
parent
c173bf1c62
commit
d8eed6018f
@ -1,24 +1,21 @@
|
||||
from typing import Union
|
||||
from typing import Sequence, Union
|
||||
|
||||
from langchain.agents.agent import AgentOutputParser
|
||||
from langchain.schema import AgentAction, AgentFinish, OutputParserException
|
||||
|
||||
|
||||
class SelfAskOutputParser(AgentOutputParser):
|
||||
followups: Sequence[str] = ("Follow up:", "Followup:")
|
||||
finish_string: str = "So the final answer is: "
|
||||
|
||||
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
||||
followup = "Follow up:"
|
||||
last_line = text.split("\n")[-1]
|
||||
|
||||
if followup not in last_line:
|
||||
finish_string = "So the final answer is: "
|
||||
if finish_string not in last_line:
|
||||
if not any([follow in last_line for follow in self.followups]):
|
||||
if self.finish_string not in last_line:
|
||||
raise OutputParserException(f"Could not parse output: {text}")
|
||||
return AgentFinish({"output": last_line[len(finish_string) :]}, text)
|
||||
return AgentFinish({"output": last_line[len(self.finish_string) :]}, text)
|
||||
|
||||
after_colon = text.split(":")[-1]
|
||||
|
||||
if " " == after_colon[0]:
|
||||
after_colon = after_colon[1:]
|
||||
after_colon = text.split(":")[-1].strip()
|
||||
return AgentAction("Intermediate Answer", after_colon, text)
|
||||
|
||||
@property
|
||||
|
Loading…
Reference in New Issue
Block a user