mirror of https://github.com/corca-ai/EVAL
feat: agent manager manage memory by key
parent
7242023eaa
commit
28ed048240
@ -0,0 +1,126 @@
|
||||
from typing import Any, List, Optional, Sequence, Tuple
|
||||
|
||||
from langchain.agents.agent import Agent
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
from langchain.chains import LLMChain
|
||||
from langchain.output_parsers.base import BaseOutputParser
|
||||
from langchain.prompts.base import BasePromptTemplate
|
||||
from langchain.prompts.chat import (
|
||||
ChatPromptTemplate,
|
||||
HumanMessagePromptTemplate,
|
||||
MessagesPlaceholder,
|
||||
SystemMessagePromptTemplate,
|
||||
)
|
||||
from langchain.schema import (
|
||||
AgentAction,
|
||||
AIMessage,
|
||||
BaseLanguageModel,
|
||||
BaseMessage,
|
||||
HumanMessage,
|
||||
)
|
||||
from langchain.tools.base import BaseTool
|
||||
|
||||
from prompts.input import EVAL_TOOL_RESPONSE
|
||||
|
||||
|
||||
class ConversationalChatAgent(Agent):
|
||||
"""An agent designed to hold a conversation in addition to using tools."""
|
||||
|
||||
output_parser: BaseOutputParser
|
||||
|
||||
@property
|
||||
def _agent_type(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def observation_prefix(self) -> str:
|
||||
"""Prefix to append the observation with."""
|
||||
return "Observation: "
|
||||
|
||||
@property
|
||||
def llm_prefix(self) -> str:
|
||||
"""Prefix to append the llm call with."""
|
||||
return "Thought: "
|
||||
|
||||
@classmethod
|
||||
def create_prompt(
|
||||
cls,
|
||||
tools: Sequence[BaseTool],
|
||||
system_message: str,
|
||||
human_message: str,
|
||||
output_parser: BaseOutputParser,
|
||||
input_variables: Optional[List[str]] = None,
|
||||
) -> BasePromptTemplate:
|
||||
tool_strings = "\n".join(
|
||||
[f"> {tool.name}: {tool.description}" for tool in tools]
|
||||
)
|
||||
tool_names = ", ".join([tool.name for tool in tools])
|
||||
format_instructions = human_message.format(
|
||||
format_instructions=output_parser.get_format_instructions()
|
||||
)
|
||||
final_prompt = format_instructions.format(
|
||||
tool_names=tool_names, tools=tool_strings
|
||||
)
|
||||
if input_variables is None:
|
||||
input_variables = ["input", "chat_history", "agent_scratchpad"]
|
||||
messages = [
|
||||
SystemMessagePromptTemplate.from_template(system_message),
|
||||
MessagesPlaceholder(variable_name="chat_history"),
|
||||
HumanMessagePromptTemplate.from_template(final_prompt),
|
||||
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
||||
]
|
||||
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
|
||||
|
||||
def _extract_tool_and_input(self, llm_output: str) -> Optional[Tuple[str, str]]:
|
||||
try:
|
||||
response = self.output_parser.parse(llm_output)
|
||||
return response["action"], response["action_input"]
|
||||
except Exception:
|
||||
raise ValueError(f"Could not parse LLM output: {llm_output}")
|
||||
|
||||
def _construct_scratchpad(
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]]
|
||||
) -> List[BaseMessage]:
|
||||
"""Construct the scratchpad that lets the agent continue its thought process."""
|
||||
thoughts: List[BaseMessage] = []
|
||||
for action, observation in intermediate_steps:
|
||||
thoughts.append(AIMessage(content=action.log))
|
||||
human_message = HumanMessage(
|
||||
content=EVAL_TOOL_RESPONSE.format(observation=observation)
|
||||
)
|
||||
thoughts.append(human_message)
|
||||
return thoughts
|
||||
|
||||
@classmethod
|
||||
def from_llm_and_tools(
|
||||
cls,
|
||||
llm: BaseLanguageModel,
|
||||
tools: Sequence[BaseTool],
|
||||
system_message: str,
|
||||
human_message: str,
|
||||
output_parser: BaseOutputParser,
|
||||
callback_manager: Optional[BaseCallbackManager] = None,
|
||||
input_variables: Optional[List[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> Agent:
|
||||
"""Construct an agent from an LLM and tools."""
|
||||
cls._validate_tools(tools)
|
||||
prompt = cls.create_prompt(
|
||||
tools,
|
||||
system_message=system_message,
|
||||
human_message=human_message,
|
||||
input_variables=input_variables,
|
||||
output_parser=output_parser,
|
||||
)
|
||||
llm_chain = LLMChain(
|
||||
llm=llm,
|
||||
prompt=prompt,
|
||||
callback_manager=callback_manager,
|
||||
)
|
||||
tool_names = [tool.name for tool in tools]
|
||||
return cls(
|
||||
llm_chain=llm_chain,
|
||||
allowed_tools=tool_names,
|
||||
output_parser=output_parser,
|
||||
**kwargs,
|
||||
)
|
@ -0,0 +1,47 @@
|
||||
from typing import Dict
|
||||
|
||||
from langchain.agents.agent import Agent, AgentExecutor
|
||||
from langchain.chains.conversation.memory import ConversationBufferMemory
|
||||
from langchain.memory.chat_memory import BaseChatMemory
|
||||
|
||||
from tools.base import BaseToolSet
|
||||
|
||||
from .builder import AgentBuilder
|
||||
|
||||
|
||||
class AgentManager:
|
||||
def __init__(self, agent: Agent, tools: list[BaseToolSet]):
|
||||
self.agent: Agent = agent
|
||||
self.tools: list[BaseToolSet] = tools
|
||||
self.executors: Dict[str, AgentExecutor] = {}
|
||||
|
||||
def create_memory(self) -> BaseChatMemory:
|
||||
return ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
||||
|
||||
def create_executor(self) -> AgentExecutor:
|
||||
memory: BaseChatMemory = self.create_memory()
|
||||
return AgentExecutor.from_agent_and_tools(
|
||||
agent=self.agent,
|
||||
tools=self.tools,
|
||||
memory=memory,
|
||||
)
|
||||
|
||||
def remove_executor(self, key: str) -> None:
|
||||
if key in self.executors:
|
||||
del self.executors[key]
|
||||
|
||||
def get_or_create_executor(self, key: str) -> AgentExecutor:
|
||||
if not (key in self.executors):
|
||||
self.executors[key] = self.create_executor()
|
||||
return self.executors[key]
|
||||
|
||||
@staticmethod
|
||||
def create(toolsets: list[BaseToolSet]) -> "AgentManager":
|
||||
builder = AgentBuilder()
|
||||
builder.build_llm()
|
||||
builder.build_parser()
|
||||
builder.build_tools(toolsets)
|
||||
agent = builder.get_agent()
|
||||
tools = builder.get_tools()
|
||||
|
||||
return AgentManager(agent, tools)
|
Loading…
Reference in New Issue