forked from Archives/langchain
use output parsers in agents (#2987)
parent
8b9e02da9d
commit
e12e00df12
@ -0,0 +1,22 @@
|
||||
import json
|
||||
from typing import Union
|
||||
|
||||
from langchain.agents.agent import AgentOutputParser
|
||||
from langchain.schema import AgentAction, AgentFinish
|
||||
|
||||
FINAL_ANSWER_ACTION = "Final Answer:"
|
||||
|
||||
|
||||
class ChatOutputParser(AgentOutputParser):
|
||||
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
||||
if FINAL_ANSWER_ACTION in text:
|
||||
return AgentFinish(
|
||||
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
|
||||
)
|
||||
try:
|
||||
_, action, _ = text.split("```")
|
||||
response = json.loads(action.strip())
|
||||
return AgentAction(response["action"], response["action_input"], text)
|
||||
|
||||
except Exception:
|
||||
raise ValueError(f"Could not parse LLM output: {text}")
|
@ -1,53 +0,0 @@
|
||||
from typing import Any, List, Optional, Sequence
|
||||
|
||||
from langchain.agents.agent import AgentOutputParser, LLMSingleActionAgent
|
||||
from langchain.agents.chat_v2.prompt import (
|
||||
FORMAT_INSTRUCTIONS,
|
||||
PREFIX,
|
||||
SUFFIX,
|
||||
ChatOutputParser,
|
||||
create_prompt,
|
||||
)
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.schema import BaseLanguageModel
|
||||
from langchain.tools import BaseTool
|
||||
|
||||
|
||||
class ChatAgentV2(LLMSingleActionAgent):
|
||||
@classmethod
|
||||
def from_llm_and_tools(
|
||||
cls,
|
||||
llm: BaseLanguageModel,
|
||||
tools: Sequence[BaseTool],
|
||||
callback_manager: Optional[BaseCallbackManager] = None,
|
||||
prefix: str = PREFIX,
|
||||
suffix: str = SUFFIX,
|
||||
format_instructions: str = FORMAT_INSTRUCTIONS,
|
||||
input_variables: Optional[List[str]] = None,
|
||||
output_parser: Optional[AgentOutputParser] = None,
|
||||
stop: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> LLMSingleActionAgent:
|
||||
"""Construct an agent from an LLM and tools."""
|
||||
_stop = stop or ["Observation:"]
|
||||
_output_parser = output_parser or ChatOutputParser()
|
||||
prompt = create_prompt(
|
||||
tools,
|
||||
prefix=prefix,
|
||||
suffix=suffix,
|
||||
format_instructions=format_instructions,
|
||||
input_variables=input_variables,
|
||||
)
|
||||
llm_chain = LLMChain(
|
||||
llm=llm,
|
||||
prompt=prompt,
|
||||
callback_manager=callback_manager,
|
||||
)
|
||||
return cls(
|
||||
llm_chain=llm_chain, output_parser=_output_parser, stop=_stop, **kwargs
|
||||
)
|
||||
|
||||
@property
|
||||
def _agent_type(self) -> str:
|
||||
raise ValueError
|
@ -1,84 +0,0 @@
|
||||
# flake8: noqa
|
||||
import json
|
||||
from langchain.prompts.chat import (
|
||||
HumanMessagePromptTemplate,
|
||||
SystemMessagePromptTemplate,
|
||||
)
|
||||
from langchain.agents.schema import AgentScratchPadChatPromptTemplate
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.schema import AgentAction, AgentFinish
|
||||
from langchain.tools.base import BaseTool
|
||||
from typing import Sequence, Optional, List, Union
|
||||
from langchain.agents.agent import AgentOutputParser
|
||||
|
||||
PREFIX = """Answer the following questions as best you can. You have access to the following tools:"""
|
||||
FORMAT_INSTRUCTIONS = """The way you use the tools is by specifying a json blob.
|
||||
Specifically, this json should have a `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here).
|
||||
|
||||
The only values that should be in the "action" field are: {tool_names}
|
||||
|
||||
The $JSON_BLOB should only contain a SINGLE action, do NOT return a list of multiple actions. Here is an example of a valid $JSON_BLOB:
|
||||
|
||||
```
|
||||
{{{{
|
||||
"action": $TOOL_NAME,
|
||||
"action_input": $INPUT
|
||||
}}}}
|
||||
```
|
||||
|
||||
ALWAYS use the following format:
|
||||
|
||||
Question: the input question you must answer
|
||||
Thought: you should always think about what to do
|
||||
Action:
|
||||
```
|
||||
$JSON_BLOB
|
||||
```
|
||||
Observation: the result of the action
|
||||
... (this Thought/Action/Observation can repeat N times)
|
||||
Thought: I now know the final answer
|
||||
Final Answer: the final answer to the original input question"""
|
||||
SUFFIX = """Begin! Reminder to always use the exact characters `Final Answer` when responding."""
|
||||
|
||||
|
||||
def create_prompt(
|
||||
tools: Sequence[BaseTool],
|
||||
prefix: str = PREFIX,
|
||||
suffix: str = SUFFIX,
|
||||
format_instructions: str = FORMAT_INSTRUCTIONS,
|
||||
input_variables: Optional[List[str]] = None,
|
||||
) -> BasePromptTemplate:
|
||||
tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools])
|
||||
tool_names = ", ".join([tool.name for tool in tools])
|
||||
format_instructions = format_instructions.format(tool_names=tool_names)
|
||||
template = "\n\n".join([prefix, tool_strings, format_instructions, suffix])
|
||||
messages = [
|
||||
SystemMessagePromptTemplate.from_template(template),
|
||||
HumanMessagePromptTemplate.from_template("{input}\n\n{agent_scratchpad}"),
|
||||
]
|
||||
if input_variables is None:
|
||||
input_variables = ["input", "intermediate_steps"]
|
||||
return AgentScratchPadChatPromptTemplate(
|
||||
input_variables=input_variables, messages=messages
|
||||
)
|
||||
|
||||
|
||||
class ChatOutputParser(AgentOutputParser):
|
||||
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
||||
if "Final Answer:" in text:
|
||||
return AgentFinish(
|
||||
# Return values is generally always a dictionary with a single `output` key
|
||||
# It is not recommended to try anything else at the moment :)
|
||||
return_values={"output": text.split("Final Answer:")[-1].strip()},
|
||||
log=text,
|
||||
)
|
||||
try:
|
||||
_, action, _ = text.split("```")
|
||||
response = json.loads(action.strip())
|
||||
agent_action = AgentAction(
|
||||
tool=response["action"], tool_input=response["action_input"], log=text
|
||||
)
|
||||
return agent_action
|
||||
|
||||
except Exception:
|
||||
raise ValueError(f"Could not parse LLM output: {text}")
|
@ -0,0 +1,22 @@
|
||||
import re
|
||||
from typing import Union
|
||||
|
||||
from langchain.agents.agent import AgentOutputParser
|
||||
from langchain.schema import AgentAction, AgentFinish
|
||||
|
||||
|
||||
class ConvoOutputParser(AgentOutputParser):
|
||||
ai_prefix: str = "AI"
|
||||
|
||||
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
||||
if f"{self.ai_prefix}:" in text:
|
||||
return AgentFinish(
|
||||
{"output": text.split(f"{self.ai_prefix}:")[-1].strip()}, text
|
||||
)
|
||||
regex = r"Action: (.*?)[\n]*Action Input: (.*)"
|
||||
match = re.search(regex, text)
|
||||
if not match:
|
||||
raise ValueError(f"Could not parse LLM output: `{text}`")
|
||||
action = match.group(1)
|
||||
action_input = match.group(2)
|
||||
return AgentAction(action.strip(), action_input.strip(" ").strip('"'), text)
|
@ -0,0 +1,33 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Union
|
||||
|
||||
from langchain.agents import AgentOutputParser
|
||||
from langchain.agents.conversational_chat.prompt import FORMAT_INSTRUCTIONS
|
||||
from langchain.schema import AgentAction, AgentFinish
|
||||
|
||||
|
||||
class ConvoOutputParser(AgentOutputParser):
|
||||
def get_format_instructions(self) -> str:
|
||||
return FORMAT_INSTRUCTIONS
|
||||
|
||||
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
||||
cleaned_output = text.strip()
|
||||
if "```json" in cleaned_output:
|
||||
_, cleaned_output = cleaned_output.split("```json")
|
||||
if "```" in cleaned_output:
|
||||
cleaned_output, _ = cleaned_output.split("```")
|
||||
if cleaned_output.startswith("```json"):
|
||||
cleaned_output = cleaned_output[len("```json") :]
|
||||
if cleaned_output.startswith("```"):
|
||||
cleaned_output = cleaned_output[len("```") :]
|
||||
if cleaned_output.endswith("```"):
|
||||
cleaned_output = cleaned_output[: -len("```")]
|
||||
cleaned_output = cleaned_output.strip()
|
||||
response = json.loads(cleaned_output)
|
||||
action, action_input = response["action"], response["action_input"]
|
||||
if action == "Final Answer":
|
||||
return AgentFinish({"output": action_input}, text)
|
||||
else:
|
||||
return AgentAction(action, action_input, text)
|
@ -0,0 +1,23 @@
|
||||
import re
|
||||
from typing import Union
|
||||
|
||||
from langchain.agents.agent import AgentOutputParser
|
||||
from langchain.schema import AgentAction, AgentFinish
|
||||
|
||||
FINAL_ANSWER_ACTION = "Final Answer:"
|
||||
|
||||
|
||||
class MRKLOutputParser(AgentOutputParser):
|
||||
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
||||
if FINAL_ANSWER_ACTION in text:
|
||||
return AgentFinish(
|
||||
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
|
||||
)
|
||||
# \s matches against tab/newline/whitespace
|
||||
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
|
||||
match = re.search(regex, text, re.DOTALL)
|
||||
if not match:
|
||||
raise ValueError(f"Could not parse LLM output: `{text}`")
|
||||
action = match.group(1).strip()
|
||||
action_input = match.group(2)
|
||||
return AgentAction(action, action_input.strip(" ").strip('"'), text)
|
@ -0,0 +1,24 @@
|
||||
import re
|
||||
from typing import Union
|
||||
|
||||
from langchain.agents.agent import AgentOutputParser
|
||||
from langchain.schema import AgentAction, AgentFinish
|
||||
|
||||
|
||||
class ReActOutputParser(AgentOutputParser):
|
||||
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
||||
action_prefix = "Action: "
|
||||
if not text.strip().split("\n")[-1].startswith(action_prefix):
|
||||
raise ValueError(f"Could not parse LLM Output: {text}")
|
||||
action_block = text.strip().split("\n")[-1]
|
||||
|
||||
action_str = action_block[len(action_prefix) :]
|
||||
# Parse out the action and the directive.
|
||||
re_matches = re.search(r"(.*?)\[(.*?)\]", action_str)
|
||||
if re_matches is None:
|
||||
raise ValueError(f"Could not parse action directive: {action_str}")
|
||||
action, action_input = re_matches.group(1), re_matches.group(2)
|
||||
if action == "Finish":
|
||||
return AgentFinish({"output": action_input}, text)
|
||||
else:
|
||||
return AgentAction(action, action_input, text)
|
@ -0,0 +1,22 @@
|
||||
from typing import Union
|
||||
|
||||
from langchain.agents.agent import AgentOutputParser
|
||||
from langchain.schema import AgentAction, AgentFinish
|
||||
|
||||
|
||||
class SelfAskOutputParser(AgentOutputParser):
|
||||
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
||||
followup = "Follow up:"
|
||||
last_line = text.split("\n")[-1]
|
||||
|
||||
if followup not in last_line:
|
||||
finish_string = "So the final answer is: "
|
||||
if finish_string not in last_line:
|
||||
raise ValueError(f"Could not parse output: {text}")
|
||||
return AgentFinish({"output": last_line[len(finish_string) :]}, text)
|
||||
|
||||
after_colon = text.split(":")[-1]
|
||||
|
||||
if " " == after_colon[0]:
|
||||
after_colon = after_colon[1:]
|
||||
return AgentAction("Intermediate Answer", after_colon, text)
|
Loading…
Reference in New Issue