From 7cf46b3fee5b33363f3c63ad7863ddebe2bddab5 Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Tue, 14 Mar 2023 09:42:24 -0700 Subject: [PATCH] Harrison/convo agent (#1642) --- .../examples/chat_conversation_agent.ipynb | 309 ++++++++++++++++++ langchain/agents/agent.py | 4 +- langchain/agents/chat/base.py | 2 + .../agents/conversational_chat/__init__.py | 1 + langchain/agents/conversational_chat/base.py | 151 +++++++++ .../agents/conversational_chat/prompt.py | 57 ++++ langchain/agents/loading.py | 2 + 7 files changed, 524 insertions(+), 2 deletions(-) create mode 100644 docs/modules/agents/examples/chat_conversation_agent.ipynb create mode 100644 langchain/agents/conversational_chat/__init__.py create mode 100644 langchain/agents/conversational_chat/base.py create mode 100644 langchain/agents/conversational_chat/prompt.py diff --git a/docs/modules/agents/examples/chat_conversation_agent.ipynb b/docs/modules/agents/examples/chat_conversation_agent.ipynb new file mode 100644 index 0000000000..061a5a6019 --- /dev/null +++ b/docs/modules/agents/examples/chat_conversation_agent.ipynb @@ -0,0 +1,309 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "4658d71a", + "metadata": {}, + "source": [ + "# Conversation Agent (for Chat Models)\n", + "\n", + "This notebook walks through using an agent optimized for conversation, using ChatModels. Other agents are often optimized for using tools to figure out the best response, which is not ideal in a conversational setting where you may want the agent to be able to chat with the user as well.\n", + "\n", + "This is accomplished with a specific type of agent (`chat-conversational-react-description`) which expects to be used with a memory component." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "f4f5d1a8", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "os.environ[\"LANGCHAIN_HANDLER\"] = \"langchain\"" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "f65308ab", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.agents import Tool\n", + "from langchain.memory import ConversationBufferMemory\n", + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.utilities import SerpAPIWrapper\n", + "from langchain.agents import initialize_agent" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "5fb14d6d", + "metadata": {}, + "outputs": [], + "source": [ + "search = SerpAPIWrapper()\n", + "tools = [\n", + " Tool(\n", + " name = \"Current Search\",\n", + " func=search.run,\n", + " description=\"useful for when you need to answer questions about current events or the current state of the world. the input to this should be a single search term.\"\n", + " ),\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "dddc34c4", + "metadata": {}, + "outputs": [], + "source": [ + "memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "cafe9bc1", + "metadata": {}, + "outputs": [], + "source": [ + "llm=ChatOpenAI(temperature=0)\n", + "agent_chain = initialize_agent(tools, llm, agent=\"chat-conversational-react-description\", verbose=True, memory=memory)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "dc70b454", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", + "\u001b[32;1m\u001b[1;3m{\n", + " \"action\": \"Final Answer\",\n", + " \"action_input\": \"Hello Bob! How can I assist you today?\"\n", + "}\u001b[0m\n", + "\n", + "\u001b[1m> Finished chain.\u001b[0m\n" + ] + }, + { + "data": { + "text/plain": [ + "'Hello Bob! How can I assist you today?'" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "agent_chain.run(input=\"hi, i am bob\")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "3dcf7953", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", + "\u001b[32;1m\u001b[1;3m{\n", + " \"action\": \"Final Answer\",\n", + " \"action_input\": \"Your name is Bob.\"\n", + "}\u001b[0m\n", + "\n", + "\u001b[1m> Finished chain.\u001b[0m\n" + ] + }, + { + "data": { + "text/plain": [ + "'Your name is Bob.'" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "agent_chain.run(input=\"what's my name?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "aa05f566", + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", + "\u001b[32;1m\u001b[1;3m{\n", + " \"action\": \"Current Search\",\n", + " \"action_input\": \"Thai food dinner recipes\"\n", + "}\u001b[0m\n", + "Observation: \u001b[36;1m\u001b[1;3m59 easy Thai recipes for any night of the week · Marion Grasby's Thai spicy chilli and basil fried rice · Thai curry noodle soup · Marion Grasby's ...\u001b[0m\n", + "Thought:\u001b[32;1m\u001b[1;3m{\n", + " \"action\": \"Final Answer\",\n", + " \"action_input\": \"Here are some Thai food dinner recipes you can make this week: Thai spicy chilli and basil fried rice, Thai curry noodle soup, and many more. You can find 59 easy Thai recipes for any night of the week on Marion Grasby's website.\"\n", + "}\u001b[0m\n", + "\n", + "\u001b[1m> Finished chain.\u001b[0m\n" + ] + }, + { + "data": { + "text/plain": [ + "\"Here are some Thai food dinner recipes you can make this week: Thai spicy chilli and basil fried rice, Thai curry noodle soup, and many more. You can find 59 easy Thai recipes for any night of the week on Marion Grasby's website.\"" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "agent_chain.run(\"what are some good dinners to make this week, if i like thai food?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "c5d8b7ea", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", + "\u001b[32;1m\u001b[1;3m```json\n", + "{\n", + " \"action\": \"Current Search\",\n", + " \"action_input\": \"who won the world cup in 1978\"\n", + "}\n", + "```\u001b[0m\n", + "Observation: \u001b[36;1m\u001b[1;3mThe Argentina national football team represents Argentina in men's international football and is administered by the Argentine Football Association, the governing body for football in Argentina. Nicknamed La Albiceleste, they are the reigning world champions, having won the most recent World Cup in 2022.\u001b[0m\n", + "Thought:\u001b[32;1m\u001b[1;3m```json\n", + "{\n", + " \"action\": \"Final Answer\",\n", + " \"action_input\": \"The last letter in your name is 'b'. The Argentina national football team won the World Cup in 1978.\"\n", + "}\n", + "```\u001b[0m\n", + "\n", + "\u001b[1m> Finished chain.\u001b[0m\n" + ] + }, + { + "data": { + "text/plain": [ + "\"The last letter in your name is 'b'. The Argentina national football team won the World Cup in 1978.\"" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "agent_chain.run(input=\"tell me the last letter in my name, and also tell me who won the world cup in 1978?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "f608889b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", + "\u001b[32;1m\u001b[1;3m{\n", + " \"action\": \"Current Search\",\n", + " \"action_input\": \"weather in pomfret\"\n", + "}\u001b[0m\n", + "Observation: \u001b[36;1m\u001b[1;3mMostly cloudy with gusty winds developing during the afternoon. A few flurries or snow showers possible. High near 40F. Winds NNW at 20 to 30 mph.\u001b[0m\n", + "Thought:\u001b[32;1m\u001b[1;3m{\n", + " \"action\": \"Final Answer\",\n", + " \"action_input\": \"The weather in Pomfret is mostly cloudy with gusty winds developing during the afternoon. A few flurries or snow showers are possible. High near 40F. Winds NNW at 20 to 30 mph.\"\n", + "}\u001b[0m\n", + "\n", + "\u001b[1m> Finished chain.\u001b[0m\n" + ] + }, + { + "data": { + "text/plain": [ + "'The weather in Pomfret is mostly cloudy with gusty winds developing during the afternoon. A few flurries or snow showers are possible. High near 40F. Winds NNW at 20 to 30 mph.'" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "agent_chain.run(input=\"whats the weather like in pomfret?\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0084efd6", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/langchain/agents/agent.py b/langchain/agents/agent.py index e3eeb98cf7..cf9096016a 100644 --- a/langchain/agents/agent.py +++ b/langchain/agents/agent.py @@ -19,7 +19,7 @@ from langchain.llms.base import BaseLLM from langchain.prompts.base import BasePromptTemplate from langchain.prompts.few_shot import FewShotPromptTemplate from langchain.prompts.prompt import PromptTemplate -from langchain.schema import AgentAction, AgentFinish +from langchain.schema import AgentAction, AgentFinish, BaseMessage from langchain.tools.base import BaseTool logger = logging.getLogger() @@ -54,7 +54,7 @@ class Agent(BaseModel): def _construct_scratchpad( self, intermediate_steps: List[Tuple[AgentAction, str]] - ) -> str: + ) -> Union[str, List[BaseMessage]]: """Construct the scratchpad that lets the agent continue its thought process.""" thoughts = "" for action, observation in intermediate_steps: diff --git a/langchain/agents/chat/base.py b/langchain/agents/chat/base.py index 971d833240..1f843d839c 100644 --- a/langchain/agents/chat/base.py +++ b/langchain/agents/chat/base.py @@ -32,6 +32,8 @@ class ChatAgent(Agent): self, intermediate_steps: List[Tuple[AgentAction, str]] ) -> str: agent_scratchpad = super()._construct_scratchpad(intermediate_steps) + if not isinstance(agent_scratchpad, str): + raise ValueError("agent_scratchpad should be of type string.") if agent_scratchpad: return ( f"This was your previous work " diff --git a/langchain/agents/conversational_chat/__init__.py b/langchain/agents/conversational_chat/__init__.py new file mode 100644 index 0000000000..94290c9cb9 --- /dev/null +++ b/langchain/agents/conversational_chat/__init__.py @@ -0,0 +1 @@ +"""An agent designed to hold a conversation in addition to using tools.""" diff --git a/langchain/agents/conversational_chat/base.py b/langchain/agents/conversational_chat/base.py new file mode 100644 index 0000000000..509fe870b9 --- /dev/null +++ b/langchain/agents/conversational_chat/base.py @@ -0,0 +1,151 @@ +"""An agent designed to hold a conversation in addition to using tools.""" +from __future__ import annotations + +import json +from typing import Any, List, Optional, Sequence, Tuple + +from langchain.agents.agent import Agent +from langchain.agents.conversational_chat.prompt import ( + FORMAT_INSTRUCTIONS, + PREFIX, + SUFFIX, + TEMPLATE_TOOL_RESPONSE, +) +from langchain.callbacks.base import BaseCallbackManager +from langchain.chains import LLMChain +from langchain.output_parsers.base import BaseOutputParser +from langchain.prompts.base import BasePromptTemplate +from langchain.prompts.chat import ( + ChatPromptTemplate, + HumanMessagePromptTemplate, + MessagesPlaceholder, + SystemMessagePromptTemplate, +) +from langchain.schema import ( + AgentAction, + AIMessage, + BaseLanguageModel, + BaseMessage, + HumanMessage, +) +from langchain.tools.base import BaseTool + + +class AgentOutputParser(BaseOutputParser): + def get_format_instructions(self) -> str: + return FORMAT_INSTRUCTIONS + + def parse(self, text: str) -> Any: + cleaned_output = text.strip() + if "```json" in cleaned_output: + _, cleaned_output = cleaned_output.split("```json") + if cleaned_output.startswith("```json"): + cleaned_output = cleaned_output[len("```json") :] + if cleaned_output.startswith("```"): + cleaned_output = cleaned_output[len("```") :] + if cleaned_output.endswith("```"): + cleaned_output = cleaned_output[: -len("```")] + cleaned_output = cleaned_output.strip() + response = json.loads(cleaned_output) + return {"action": response["action"], "action_input": response["action_input"]} + + +class ConversationalChatAgent(Agent): + """An agent designed to hold a conversation in addition to using tools.""" + + output_parser: BaseOutputParser + + @property + def observation_prefix(self) -> str: + """Prefix to append the observation with.""" + return "Observation: " + + @property + def llm_prefix(self) -> str: + """Prefix to append the llm call with.""" + return "Thought:" + + @classmethod + def create_prompt( + cls, + tools: Sequence[BaseTool], + system_message: str = PREFIX, + human_message: str = SUFFIX, + input_variables: Optional[List[str]] = None, + output_parser: Optional[BaseOutputParser] = None, + ) -> BasePromptTemplate: + tool_strings = "\n".join( + [f"> {tool.name}: {tool.description}" for tool in tools] + ) + tool_names = ", ".join([tool.name for tool in tools]) + _output_parser = output_parser or AgentOutputParser() + format_instructions = human_message.format( + format_instructions=_output_parser.get_format_instructions() + ) + final_prompt = format_instructions.format( + tool_names=tool_names, tools=tool_strings + ) + if input_variables is None: + input_variables = ["input", "chat_history", "agent_scratchpad"] + messages = [ + SystemMessagePromptTemplate.from_template(system_message), + MessagesPlaceholder(variable_name="chat_history"), + HumanMessagePromptTemplate.from_template(final_prompt), + MessagesPlaceholder(variable_name="agent_scratchpad"), + ] + return ChatPromptTemplate(input_variables=input_variables, messages=messages) + + def _extract_tool_and_input(self, llm_output: str) -> Optional[Tuple[str, str]]: + try: + response = self.output_parser.parse(llm_output) + return response["action"], response["action_input"] + except Exception: + raise ValueError(f"Could not parse LLM output: {llm_output}") + + def _construct_scratchpad( + self, intermediate_steps: List[Tuple[AgentAction, str]] + ) -> List[BaseMessage]: + """Construct the scratchpad that lets the agent continue its thought process.""" + thoughts: List[BaseMessage] = [] + for action, observation in intermediate_steps: + thoughts.append(AIMessage(content=action.log)) + human_message = HumanMessage( + content=TEMPLATE_TOOL_RESPONSE.format(observation=observation) + ) + thoughts.append(human_message) + return thoughts + + @classmethod + def from_llm_and_tools( + cls, + llm: BaseLanguageModel, + tools: Sequence[BaseTool], + callback_manager: Optional[BaseCallbackManager] = None, + system_message: str = PREFIX, + human_message: str = SUFFIX, + input_variables: Optional[List[str]] = None, + output_parser: Optional[BaseOutputParser] = None, + **kwargs: Any, + ) -> Agent: + """Construct an agent from an LLM and tools.""" + cls._validate_tools(tools) + _output_parser = output_parser or AgentOutputParser() + prompt = cls.create_prompt( + tools, + system_message=system_message, + human_message=human_message, + input_variables=input_variables, + output_parser=_output_parser, + ) + llm_chain = LLMChain( + llm=llm, + prompt=prompt, + callback_manager=callback_manager, + ) + tool_names = [tool.name for tool in tools] + return cls( + llm_chain=llm_chain, + allowed_tools=tool_names, + output_parser=_output_parser, + **kwargs, + ) diff --git a/langchain/agents/conversational_chat/prompt.py b/langchain/agents/conversational_chat/prompt.py new file mode 100644 index 0000000000..8c4737bbcf --- /dev/null +++ b/langchain/agents/conversational_chat/prompt.py @@ -0,0 +1,57 @@ +# flake8: noqa +PREFIX = """Assistant is a large language model trained by OpenAI. + +Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. + +Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics. + +Overall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.""" + +FORMAT_INSTRUCTIONS = """RESPONSE FORMAT INSTRUCTIONS +---------------------------- + +When responding to me please, please output a response in one of two formats: + +**Option 1:** +Use this if you want the human to use a tool. +Markdown code snippet formatted in the following schema: + +```json +{{{{ + "action": string \\ The action to take. Must be one of {tool_names} + "action_input": string \\ The input to the action +}}}} +``` + +**Option #2:** +Use this if you want to respond directly to the human. Markdown code snippet formatted in the following schema: + +```json +{{{{ + "action": "Final Answer", + "action_input": string \\ You should put what you want to return to use here +}}}} +```""" + +SUFFIX = """TOOLS +------ +Assistant can ask the user to use tools to look up information that may be helpful in answering the users original question. The tools the human can use are: + +{{tools}} + +{format_instructions} + +USER'S INPUT +-------------------- +Here is the user's input (remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else): + +{{{{input}}}}""" + +TEMPLATE_TOOL_RESPONSE = """TOOL RESPONSE: +--------------------- +{observation} + +USER'S INPUT +-------------------- + +Okay, so what is the response to my original question? If using information from tools, you must say it explicitly - I have forgotten all TOOL RESPONSES! Remember to respond with a markdown code snippet of a json blob with a single action, and NOTHING else.""" diff --git a/langchain/agents/loading.py b/langchain/agents/loading.py index 7a6bd987de..930ce925f4 100644 --- a/langchain/agents/loading.py +++ b/langchain/agents/loading.py @@ -8,6 +8,7 @@ import yaml from langchain.agents.agent import Agent from langchain.agents.chat.base import ChatAgent from langchain.agents.conversational.base import ConversationalAgent +from langchain.agents.conversational_chat.base import ConversationalChatAgent from langchain.agents.mrkl.base import ZeroShotAgent from langchain.agents.react.base import ReActDocstoreAgent from langchain.agents.self_ask_with_search.base import SelfAskWithSearchAgent @@ -22,6 +23,7 @@ AGENT_TO_CLASS = { "self-ask-with-search": SelfAskWithSearchAgent, "conversational-react-description": ConversationalAgent, "chat-zero-shot-react-description": ChatAgent, + "chat-conversational-react-description": ConversationalChatAgent, } URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"