From f329196cf40a5b1c6979d6b8cf43649f34a1217d Mon Sep 17 00:00:00 2001 From: Zander Chase <130414180+vowelparrot@users.noreply.github.com> Date: Tue, 18 Apr 2023 21:41:03 -0700 Subject: [PATCH] Agents 4 18 (#3122) Creating an experimental agents folder, containing BabyAGI, AutoGPT, and later, other examples --------- Co-authored-by: Rahul Behal Co-authored-by: Harrison Chase --- docs/use_cases/agent_simulations.md | 15 + .../camel_role_playing.ipynb | 693 +++++++++ .../agent_simulations/characters.ipynb | 1261 +++++++++++++++++ docs/use_cases/autonomous_agents.md | 20 + .../use_cases/autonomous_agents/autogpt.ipynb | 454 ++++++ .../autonomous_agents/baby_agi.ipynb | 257 ++++ .../baby_agi_with_agent.ipynb | 387 +++++ docs/use_cases/personal_assistants.md | 4 - langchain/agents/tools.py | 10 + langchain/experimental/__init__.py | 3 + .../autonomous_agents/__init__.py | 3 + .../autonomous_agents/autogpt/__init__.py | 0 .../autonomous_agents/autogpt/agent.py | 135 ++ .../autonomous_agents/autogpt/memory.py | 30 + .../autogpt/output_parser.py | 50 + .../autonomous_agents/autogpt/prompt.py | 75 + .../autogpt/prompt_generator.py | 186 +++ .../autonomous_agents/baby_agi/__init__.py | 17 + .../autonomous_agents/baby_agi/baby_agi.py | 181 +++ .../baby_agi/task_creation.py | 30 + .../baby_agi/task_execution.py | 21 + .../baby_agi/task_prioritization.py | 24 + langchain/tools/base.py | 25 +- langchain/tools/file_management/read.py | 2 +- langchain/tools/file_management/write.py | 2 +- 25 files changed, 3867 insertions(+), 18 deletions(-) create mode 100644 docs/use_cases/agent_simulations.md create mode 100644 docs/use_cases/agent_simulations/camel_role_playing.ipynb create mode 100644 docs/use_cases/agent_simulations/characters.ipynb create mode 100644 docs/use_cases/autonomous_agents.md create mode 100644 docs/use_cases/autonomous_agents/autogpt.ipynb create mode 100644 docs/use_cases/autonomous_agents/baby_agi.ipynb create mode 100644 docs/use_cases/autonomous_agents/baby_agi_with_agent.ipynb create mode 100644 langchain/experimental/__init__.py create mode 100644 langchain/experimental/autonomous_agents/__init__.py create mode 100644 langchain/experimental/autonomous_agents/autogpt/__init__.py create mode 100644 langchain/experimental/autonomous_agents/autogpt/agent.py create mode 100644 langchain/experimental/autonomous_agents/autogpt/memory.py create mode 100644 langchain/experimental/autonomous_agents/autogpt/output_parser.py create mode 100644 langchain/experimental/autonomous_agents/autogpt/prompt.py create mode 100644 langchain/experimental/autonomous_agents/autogpt/prompt_generator.py create mode 100644 langchain/experimental/autonomous_agents/baby_agi/__init__.py create mode 100644 langchain/experimental/autonomous_agents/baby_agi/baby_agi.py create mode 100644 langchain/experimental/autonomous_agents/baby_agi/task_creation.py create mode 100644 langchain/experimental/autonomous_agents/baby_agi/task_execution.py create mode 100644 langchain/experimental/autonomous_agents/baby_agi/task_prioritization.py diff --git a/docs/use_cases/agent_simulations.md b/docs/use_cases/agent_simulations.md new file mode 100644 index 00000000..bcbcc3aa --- /dev/null +++ b/docs/use_cases/agent_simulations.md @@ -0,0 +1,15 @@ +# Agent Simulations + +Agent simulations involve interacting one of more agents with eachother. +Agent simulations generally involve two main components: + +- Long Term Memory +- Simulation Environment + +Specific implementations of agent simulations (or parts of agent simulations) include + +## CAMEL +- [CAMEL](agent_simulations/camel_role_playing.ipynb): an implementation of the CAMEL (Communicative Agents for “Mind” Exploration of Large Scale Language Model Society) paper, where two agents communicate with eachother. + +## Generative Agents +- [Generative Agents](agent_simulations/characters.ipynb): This notebook implements a generative agent based on the paper [Generative Agents: Interactive Simulacra of Human Behavior](https://arxiv.org/abs/2304.03442) by Park, et. al. diff --git a/docs/use_cases/agent_simulations/camel_role_playing.ipynb b/docs/use_cases/agent_simulations/camel_role_playing.ipynb new file mode 100644 index 00000000..ef5a0fa3 --- /dev/null +++ b/docs/use_cases/agent_simulations/camel_role_playing.ipynb @@ -0,0 +1,693 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CAMEL Role-Playing Autonomous Cooperative Agents\n", + "\n", + "This is a langchain implementation of paper: \"CAMEL: Communicative Agents for “Mind” Exploration of Large Scale Language Model Society\".\n", + "\n", + "Overview:\n", + "\n", + "The rapid advancement of conversational and chat-based language models has led to remarkable progress in complex task-solving. However, their success heavily relies on human input to guide the conversation, which can be challenging and time-consuming. This paper explores the potential of building scalable techniques to facilitate autonomous cooperation among communicative agents and provide insight into their \"cognitive\" processes. To address the challenges of achieving autonomous cooperation, we propose a novel communicative agent framework named role-playing. Our approach involves using inception prompting to guide chat agents toward task completion while maintaining consistency with human intentions. We showcase how role-playing can be used to generate conversational data for studying the behaviors and capabilities of chat agents, providing a valuable resource for investigating conversational language models. Our contributions include introducing a novel communicative agent framework, offering a scalable approach for studying the cooperative behaviors and capabilities of multi-agent systems, and open-sourcing our library to support research on communicative agents and beyond.\n", + "\n", + "The original implementation: https://github.com/lightaime/camel\n", + "\n", + "Project website: https://www.camel-ai.org/\n", + "\n", + "Arxiv paper: https://arxiv.org/abs/2303.17760\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Import LangChain related modules " + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import List\n", + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.prompts.chat import (\n", + " SystemMessagePromptTemplate,\n", + " HumanMessagePromptTemplate,\n", + ")\n", + "from langchain.schema import (\n", + " AIMessage,\n", + " HumanMessage,\n", + " SystemMessage,\n", + " BaseMessage,\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Define a CAMEL agent helper class" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "class CAMELAgent:\n", + "\n", + " def __init__(\n", + " self,\n", + " system_message: SystemMessage,\n", + " model: ChatOpenAI,\n", + " ) -> None:\n", + " self.system_message = system_message\n", + " self.model = model\n", + " self.init_messages()\n", + "\n", + " def reset(self) -> None:\n", + " self.init_messages()\n", + " return self.stored_messages\n", + "\n", + " def init_messages(self) -> None:\n", + " self.stored_messages = [self.system_message]\n", + "\n", + " def update_messages(self, message: BaseMessage) -> List[BaseMessage]:\n", + " self.stored_messages.append(message)\n", + " return self.stored_messages\n", + "\n", + " def step(\n", + " self,\n", + " input_message: HumanMessage,\n", + " ) -> AIMessage:\n", + " messages = self.update_messages(input_message)\n", + "\n", + " output_message = self.model(messages)\n", + " self.update_messages(output_message)\n", + "\n", + " return output_message\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup OpenAI API key and roles and task for role-playing" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "os.environ[\"OPENAI_API_KEY\"] = \"\"\n", + "\n", + "assistant_role_name = \"Python Programmer\"\n", + "user_role_name = \"Stock Trader\"\n", + "task = \"Develop a trading bot for the stock market\"\n", + "word_limit = 50 # word limit for task brainstorming" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create a task specify agent for brainstorming and get the specified task" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Specified task: Develop a Python-based swing trading bot that scans market trends, monitors stocks, and generates trading signals to help a stock trader to place optimal buy and sell orders with defined stop losses and profit targets.\n" + ] + } + ], + "source": [ + "task_specifier_sys_msg = SystemMessage(content=\"You can make a task more specific.\")\n", + "task_specifier_prompt = (\n", + "\"\"\"Here is a task that {assistant_role_name} will help {user_role_name} to complete: {task}.\n", + "Please make it more specific. Be creative and imaginative.\n", + "Please reply with the specified task in {word_limit} words or less. Do not add anything else.\"\"\"\n", + ")\n", + "task_specifier_template = HumanMessagePromptTemplate.from_template(template=task_specifier_prompt)\n", + "task_specify_agent = CAMELAgent(task_specifier_sys_msg, ChatOpenAI(temperature=1.0))\n", + "task_specifier_msg = task_specifier_template.format_messages(assistant_role_name=assistant_role_name,\n", + " user_role_name=user_role_name,\n", + " task=task, word_limit=word_limit)[0]\n", + "specified_task_msg = task_specify_agent.step(task_specifier_msg)\n", + "print(f\"Specified task: {specified_task_msg.content}\")\n", + "specified_task = specified_task_msg.content" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create inception prompts for AI assistant and AI user for role-playing" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "assistant_inception_prompt = (\n", + "\"\"\"Never forget you are a {assistant_role_name} and I am a {user_role_name}. Never flip roles! Never instruct me!\n", + "We share a common interest in collaborating to successfully complete a task.\n", + "You must help me to complete the task.\n", + "Here is the task: {task}. Never forget our task!\n", + "I must instruct you based on your expertise and my needs to complete the task.\n", + "\n", + "I must give you one instruction at a time.\n", + "You must write a specific solution that appropriately completes the requested instruction.\n", + "You must decline my instruction honestly if you cannot perform the instruction due to physical, moral, legal reasons or your capability and explain the reasons.\n", + "Do not add anything else other than your solution to my instruction.\n", + "You are never supposed to ask me any questions you only answer questions.\n", + "You are never supposed to reply with a flake solution. Explain your solutions.\n", + "Your solution must be declarative sentences and simple present tense.\n", + "Unless I say the task is completed, you should always start with:\n", + "\n", + "Solution: \n", + "\n", + " should be specific and provide preferable implementations and examples for task-solving.\n", + "Always end with: Next request.\"\"\"\n", + ")\n", + "\n", + "user_inception_prompt = (\n", + "\"\"\"Never forget you are a {user_role_name} and I am a {assistant_role_name}. Never flip roles! You will always instruct me.\n", + "We share a common interest in collaborating to successfully complete a task.\n", + "I must help you to complete the task.\n", + "Here is the task: {task}. Never forget our task!\n", + "You must instruct me based on my expertise and your needs to complete the task ONLY in the following two ways:\n", + "\n", + "1. Instruct with a necessary input:\n", + "Instruction: \n", + "Input: \n", + "\n", + "2. Instruct without any input:\n", + "Instruction: \n", + "Input: None\n", + "\n", + "The \"Instruction\" describes a task or question. The paired \"Input\" provides further context or information for the requested \"Instruction\".\n", + "\n", + "You must give me one instruction at a time.\n", + "I must write a response that appropriately completes the requested instruction.\n", + "I must decline your instruction honestly if I cannot perform the instruction due to physical, moral, legal reasons or my capability and explain the reasons.\n", + "You should instruct me not ask me questions.\n", + "Now you must start to instruct me using the two ways described above.\n", + "Do not add anything else other than your instruction and the optional corresponding input!\n", + "Keep giving me instructions and necessary inputs until you think the task is completed.\n", + "When the task is completed, you must only reply with a single word .\n", + "Never say unless my responses have solved your task.\"\"\"\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create a helper helper to get system messages for AI assistant and AI user from role names and the task" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def get_sys_msgs(assistant_role_name: str, user_role_name: str, task: str):\n", + " \n", + " assistant_sys_template = SystemMessagePromptTemplate.from_template(template=assistant_inception_prompt)\n", + " assistant_sys_msg = assistant_sys_template.format_messages(assistant_role_name=assistant_role_name, user_role_name=user_role_name, task=task)[0]\n", + " \n", + " user_sys_template = SystemMessagePromptTemplate.from_template(template=user_inception_prompt)\n", + " user_sys_msg = user_sys_template.format_messages(assistant_role_name=assistant_role_name, user_role_name=user_role_name, task=task)[0]\n", + " \n", + " return assistant_sys_msg, user_sys_msg" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create AI assistant agent and AI user agent from obtained system messages" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "assistant_sys_msg, user_sys_msg = get_sys_msgs(assistant_role_name, user_role_name, specified_task)\n", + "assistant_agent = CAMELAgent(assistant_sys_msg, ChatOpenAI(temperature=0.2))\n", + "user_agent = CAMELAgent(user_sys_msg, ChatOpenAI(temperature=0.2))\n", + "\n", + "# Reset agents\n", + "assistant_agent.reset()\n", + "user_agent.reset()\n", + "\n", + "# Initialize chats \n", + "assistant_msg = HumanMessage(\n", + " content=(f\"{user_sys_msg.content}. \"\n", + " \"Now start to give me introductions one by one. \"\n", + " \"Only reply with Instruction and Input.\"))\n", + "\n", + "user_msg = HumanMessage(content=f\"{assistant_sys_msg.content}\")\n", + "user_msg = assistant_agent.step(user_msg)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Start role-playing session to solve the task!" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Original task prompt:\n", + "Develop a trading bot for the stock market\n", + "\n", + "Specified task prompt:\n", + "Develop a Python-based swing trading bot that scans market trends, monitors stocks, and generates trading signals to help a stock trader to place optimal buy and sell orders with defined stop losses and profit targets.\n", + "\n", + "AI User (Stock Trader):\n", + "\n", + "Instruction: Install the necessary Python libraries for data analysis and trading.\n", + "Input: None\n", + "\n", + "\n", + "AI Assistant (Python Programmer):\n", + "\n", + "Solution: We can install the necessary Python libraries using pip, a package installer for Python. We can install pandas, numpy, matplotlib, and ta-lib for data analysis and trading. We can use the following command to install these libraries:\n", + "\n", + "```\n", + "pip install pandas numpy matplotlib ta-lib\n", + "```\n", + "\n", + "Next request.\n", + "\n", + "\n", + "AI User (Stock Trader):\n", + "\n", + "Instruction: Import the necessary libraries in the Python script.\n", + "Input: None\n", + "\n", + "\n", + "AI Assistant (Python Programmer):\n", + "\n", + "Solution: We can import the necessary libraries in the Python script using the import statement. We need to import pandas, numpy, matplotlib, and ta-lib for data analysis and trading. We can use the following code to import these libraries:\n", + "\n", + "```\n", + "import pandas as pd\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import talib as ta\n", + "```\n", + "\n", + "Next request.\n", + "\n", + "\n", + "AI User (Stock Trader):\n", + "\n", + "Instruction: Load historical stock data into a pandas DataFrame.\n", + "Input: The path to the CSV file containing the historical stock data.\n", + "\n", + "\n", + "AI Assistant (Python Programmer):\n", + "\n", + "Solution: We can load historical stock data into a pandas DataFrame using the `read_csv()` function from pandas. We need to pass the path to the CSV file containing the historical stock data as an argument to this function. We can use the following code to load the historical stock data:\n", + "\n", + "```\n", + "df = pd.read_csv('path/to/csv/file.csv')\n", + "```\n", + "\n", + "This will load the historical stock data into a pandas DataFrame called `df`. Next request.\n", + "\n", + "\n", + "AI User (Stock Trader):\n", + "\n", + "Instruction: Preprocess the historical stock data by setting the date column as the index and sorting the DataFrame in ascending order by date.\n", + "Input: None.\n", + "\n", + "\n", + "AI Assistant (Python Programmer):\n", + "\n", + "Solution: We can preprocess the historical stock data by setting the date column as the index and sorting the DataFrame in ascending order by date using the `set_index()` and `sort_index()` functions from pandas. We can use the following code to preprocess the historical stock data:\n", + "\n", + "```\n", + "df = df.set_index('date')\n", + "df = df.sort_index(ascending=True)\n", + "```\n", + "\n", + "This will set the date column as the index and sort the DataFrame in ascending order by date. Next request.\n", + "\n", + "\n", + "AI User (Stock Trader):\n", + "\n", + "Instruction: Calculate the short-term and long-term moving averages for the stock data using the `ta.SMA()` function from ta-lib.\n", + "Input: The period for the short-term moving average and the period for the long-term moving average.\n", + "\n", + "\n", + "AI Assistant (Python Programmer):\n", + "\n", + "Solution: We can calculate the short-term and long-term moving averages for the stock data using the `ta.SMA()` function from ta-lib. We need to pass the period for the short-term moving average and the period for the long-term moving average as arguments to this function. We can use the following code to calculate the short-term and long-term moving averages:\n", + "\n", + "```\n", + "short_ma = ta.SMA(df['close'], timeperiod=short_period)\n", + "long_ma = ta.SMA(df['close'], timeperiod=long_period)\n", + "```\n", + "\n", + "This will calculate the short-term and long-term moving averages for the stock data and store them in the `short_ma` and `long_ma` variables, respectively. Next request.\n", + "\n", + "\n", + "AI User (Stock Trader):\n", + "\n", + "Instruction: Create a new DataFrame that combines the historical stock data with the short-term and long-term moving averages.\n", + "Input: None.\n", + "\n", + "\n", + "AI Assistant (Python Programmer):\n", + "\n", + "Solution: We can create a new DataFrame that combines the historical stock data with the short-term and long-term moving averages using the `concat()` function from pandas. We need to pass the historical stock data, the short-term moving average, and the long-term moving average as arguments to this function. We can use the following code to create the new DataFrame:\n", + "\n", + "```\n", + "new_df = pd.concat([df, short_ma, long_ma], axis=1)\n", + "new_df.columns = ['open', 'high', 'low', 'close', 'volume', 'short_ma', 'long_ma']\n", + "```\n", + "\n", + "This will create a new DataFrame called `new_df` that combines the historical stock data with the short-term and long-term moving averages. The columns of the new DataFrame are named 'open', 'high', 'low', 'close', 'volume', 'short_ma', and 'long_ma'. Next request.\n", + "\n", + "\n", + "AI User (Stock Trader):\n", + "\n", + "Instruction: Create a new column in the DataFrame that indicates when to buy or sell the stock based on the crossover of the short-term and long-term moving averages.\n", + "Input: None.\n", + "\n", + "\n", + "AI Assistant (Python Programmer):\n", + "\n", + "Solution: We can create a new column in the DataFrame that indicates when to buy or sell the stock based on the crossover of the short-term and long-term moving averages. We can use the following code to create the new column:\n", + "\n", + "```\n", + "new_df['signal'] = np.where(new_df['short_ma'] > new_df['long_ma'], 1, -1)\n", + "```\n", + "\n", + "This will create a new column called 'signal' in the DataFrame that indicates when to buy or sell the stock based on the crossover of the short-term and long-term moving averages. If the short-term moving average is greater than the long-term moving average, the signal is 1 (buy), otherwise the signal is -1 (sell). Next request.\n", + "\n", + "\n", + "AI User (Stock Trader):\n", + "\n", + "Instruction: Create a new column in the DataFrame that indicates the profit or loss for each trade based on the buy and sell signals and the defined stop loss and profit target.\n", + "Input: The stop loss and profit target as percentages.\n", + "\n", + "\n", + "AI Assistant (Python Programmer):\n", + "\n", + "Solution: We can create a new column in the DataFrame that indicates the profit or loss for each trade based on the buy and sell signals and the defined stop loss and profit target. We need to pass the stop loss and profit target as percentages as arguments to this function. We can use the following code to create the new column:\n", + "\n", + "```\n", + "stop_loss = stop_loss_percent / 100\n", + "profit_target = profit_target_percent / 100\n", + "\n", + "new_df['pnl'] = 0.0\n", + "buy_price = 0.0\n", + "for i in range(1, len(new_df)):\n", + " if new_df['signal'][i] == 1 and new_df['signal'][i-1] == -1:\n", + " buy_price = new_df['close'][i]\n", + " elif new_df['signal'][i] == -1 and new_df['signal'][i-1] == 1:\n", + " sell_price = new_df['close'][i]\n", + " if sell_price <= buy_price * (1 - stop_loss):\n", + " new_df['pnl'][i] = -stop_loss\n", + " elif sell_price >= buy_price * (1 + profit_target):\n", + " new_df['pnl'][i] = profit_target\n", + " else:\n", + " new_df['pnl'][i] = (sell_price - buy_price) / buy_price\n", + "```\n", + "\n", + "This will create a new column called 'pnl' in the DataFrame that indicates the profit or loss for each trade based on the buy and sell signals and the defined stop loss and profit target. The stop loss and profit target are calculated based on the stop_loss_percent and profit_target_percent variables, respectively. The buy and sell prices are stored in the buy_price and sell_price variables, respectively. If the sell price is less than or equal to the stop loss, the profit or loss is set to -stop_loss. If the sell price is greater than or equal to the profit target, the profit or loss is set to profit_target. Otherwise, the profit or loss is calculated as (sell_price - buy_price) / buy_price. Next request.\n", + "\n", + "\n", + "AI User (Stock Trader):\n", + "\n", + "Instruction: Calculate the total profit or loss for all trades.\n", + "Input: None.\n", + "\n", + "\n", + "AI Assistant (Python Programmer):\n", + "\n", + "Solution: We can calculate the total profit or loss for all trades by summing the values in the 'pnl' column of the DataFrame. We can use the following code to calculate the total profit or loss:\n", + "\n", + "```\n", + "total_pnl = new_df['pnl'].sum()\n", + "```\n", + "\n", + "This will calculate the total profit or loss for all trades and store it in the total_pnl variable. Next request.\n", + "\n", + "\n", + "AI User (Stock Trader):\n", + "\n", + "Instruction: Visualize the stock data, short-term moving average, and long-term moving average using a line chart.\n", + "Input: None.\n", + "\n", + "\n", + "AI Assistant (Python Programmer):\n", + "\n", + "Solution: We can visualize the stock data, short-term moving average, and long-term moving average using a line chart using the `plot()` function from pandas. We can use the following code to visualize the data:\n", + "\n", + "```\n", + "plt.figure(figsize=(12,6))\n", + "plt.plot(new_df.index, new_df['close'], label='Close')\n", + "plt.plot(new_df.index, new_df['short_ma'], label='Short MA')\n", + "plt.plot(new_df.index, new_df['long_ma'], label='Long MA')\n", + "plt.xlabel('Date')\n", + "plt.ylabel('Price')\n", + "plt.title('Stock Data with Moving Averages')\n", + "plt.legend()\n", + "plt.show()\n", + "```\n", + "\n", + "This will create a line chart that visualizes the stock data, short-term moving average, and long-term moving average. The x-axis represents the date and the y-axis represents the price. The chart also includes a legend that labels each line. Next request.\n", + "\n", + "\n", + "AI User (Stock Trader):\n", + "\n", + "Instruction: Visualize the buy and sell signals using a scatter plot.\n", + "Input: None.\n", + "\n", + "\n", + "AI Assistant (Python Programmer):\n", + "\n", + "Solution: We can visualize the buy and sell signals using a scatter plot using the `scatter()` function from matplotlib. We can use the following code to visualize the signals:\n", + "\n", + "```\n", + "buy_signals = new_df[new_df['signal'] == 1]\n", + "sell_signals = new_df[new_df['signal'] == -1]\n", + "\n", + "plt.figure(figsize=(12,6))\n", + "plt.scatter(buy_signals.index, buy_signals['close'], label='Buy', marker='^', color='green')\n", + "plt.scatter(sell_signals.index, sell_signals['close'], label='Sell', marker='v', color='red')\n", + "plt.plot(new_df.index, new_df['close'], label='Close')\n", + "plt.xlabel('Date')\n", + "plt.ylabel('Price')\n", + "plt.title('Buy and Sell Signals')\n", + "plt.legend()\n", + "plt.show()\n", + "```\n", + "\n", + "This will create a scatter plot that visualizes the buy and sell signals. The x-axis represents the date and the y-axis represents the price. The chart also includes a legend that labels each point. Next request.\n", + "\n", + "\n", + "AI User (Stock Trader):\n", + "\n", + "Instruction: Print the total profit or loss for all trades.\n", + "Input: None.\n", + "\n", + "\n", + "AI Assistant (Python Programmer):\n", + "\n", + "Solution: We can print the total profit or loss for all trades using the `print()` function. We can use the following code to print the total profit or loss:\n", + "\n", + "```\n", + "print('Total Profit/Loss: {:.2%}'.format(total_pnl))\n", + "```\n", + "\n", + "This will print the total profit or loss for all trades in percentage format with two decimal places. Next request.\n", + "\n", + "\n", + "AI User (Stock Trader):\n", + "\n", + "Instruction: Finalize the code by putting all the code snippets together in a single Python script.\n", + "Input: The necessary inputs for each code snippet.\n", + "\n", + "\n", + "AI Assistant (Python Programmer):\n", + "\n", + "Solution: Here is the complete Python script that puts all the code snippets together:\n", + "\n", + "```\n", + "import pandas as pd\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import talib as ta\n", + "\n", + "# Load historical stock data into a pandas DataFrame\n", + "df = pd.read_csv('path/to/csv/file.csv')\n", + "\n", + "# Preprocess the historical stock data\n", + "df = df.set_index('date')\n", + "df = df.sort_index(ascending=True)\n", + "\n", + "# Calculate the short-term and long-term moving averages\n", + "short_period = 10\n", + "long_period = 30\n", + "short_ma = ta.SMA(df['close'], timeperiod=short_period)\n", + "long_ma = ta.SMA(df['close'], timeperiod=long_period)\n", + "\n", + "# Create a new DataFrame that combines the historical stock data with the short-term and long-term moving averages\n", + "new_df = pd.concat([df, short_ma, long_ma], axis=1)\n", + "new_df.columns = ['open', 'high', 'low', 'close', 'volume', 'short_ma', 'long_ma']\n", + "\n", + "# Create a new column in the DataFrame that indicates when to buy or sell the stock based on the crossover of the short-term and long-term moving averages\n", + "new_df['signal'] = np.where(new_df['short_ma'] > new_df['long_ma'], 1, -1)\n", + "\n", + "# Create a new column in the DataFrame that indicates the profit or loss for each trade based on the buy and sell signals and the defined stop loss and profit target\n", + "stop_loss_percent = 5\n", + "profit_target_percent = 10\n", + "stop_loss = stop_loss_percent / 100\n", + "profit_target = profit_target_percent / 100\n", + "new_df['pnl'] = 0.0\n", + "buy_price = 0.0\n", + "for i in range(1, len(new_df)):\n", + " if new_df['signal'][i] == 1 and new_df['signal'][i-1] == -1:\n", + " buy_price = new_df['close'][i]\n", + " elif new_df['signal'][i] == -1 and new_df['signal'][i-1] == 1:\n", + " sell_price = new_df['close'][i]\n", + " if sell_price <= buy_price * (1 - stop_loss):\n", + " new_df['pnl'][i] = -stop_loss\n", + " elif sell_price >= buy_price * (1 + profit_target):\n", + " new_df['pnl'][i] = profit_target\n", + " else:\n", + " new_df['pnl'][i] = (sell_price - buy_price) / buy_price\n", + "\n", + "# Calculate the total profit or loss for all trades\n", + "total_pnl = new_df['pnl'].sum()\n", + "\n", + "# Visualize the stock data, short-term moving average, and long-term moving average using a line chart\n", + "plt.figure(figsize=(12,6))\n", + "plt.plot(new_df.index, new_df['close'], label='Close')\n", + "plt.plot(new_df.index, new_df['short_ma'], label='Short MA')\n", + "plt.plot(new_df.index, new_df['long_ma'], label='Long MA')\n", + "plt.xlabel('Date')\n", + "plt.ylabel('Price')\n", + "plt.title('Stock Data with Moving Averages')\n", + "plt.legend()\n", + "plt.show()\n", + "\n", + "# Visualize the buy and sell signals using a scatter plot\n", + "buy_signals = new_df[new_df['signal'] == 1]\n", + "sell_signals = new_df[new_df['signal'] == -1]\n", + "plt.figure(figsize=(12,6))\n", + "plt.scatter(buy_signals.index, buy_signals['close'], label='Buy', marker='^', color='green')\n", + "plt.scatter(sell_signals.index, sell_signals['close'], label='Sell', marker='v', color='red')\n", + "plt.plot(new_df.index, new_df['close'], label='Close')\n", + "plt.xlabel('Date')\n", + "plt.ylabel('Price')\n", + "plt.title('Buy and Sell Signals')\n", + "plt.legend()\n", + "plt.show()\n", + "\n", + "# Print the total profit or loss for all trades\n", + "print('Total Profit/Loss: {:.2%}'.format(total_pnl))\n", + "```\n", + "\n", + "You need to replace the path/to/csv/file.csv with the actual path to the CSV file containing the historical stock data. You can also adjust the short_period, long_period, stop_loss_percent, and profit_target_percent variables to suit your needs.\n", + "\n", + "\n", + "AI User (Stock Trader):\n", + "\n", + "\n", + "\n", + "\n", + "AI Assistant (Python Programmer):\n", + "\n", + "Great! Let me know if you need any further assistance.\n", + "\n", + "\n" + ] + } + ], + "source": [ + "print(f\"Original task prompt:\\n{task}\\n\")\n", + "print(f\"Specified task prompt:\\n{specified_task}\\n\")\n", + "\n", + "chat_turn_limit, n = 30, 0\n", + "while n < chat_turn_limit:\n", + " n += 1\n", + " user_ai_msg = user_agent.step(assistant_msg)\n", + " user_msg = HumanMessage(content=user_ai_msg.content)\n", + " print(f\"AI User ({user_role_name}):\\n\\n{user_msg.content}\\n\\n\")\n", + " \n", + " assistant_ai_msg = assistant_agent.step(user_msg)\n", + " assistant_msg = HumanMessage(content=assistant_ai_msg.content)\n", + " print(f\"AI Assistant ({assistant_role_name}):\\n\\n{assistant_msg.content}\\n\\n\")\n", + " if \"\" in user_msg.content:\n", + " break" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "camel", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.9" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/use_cases/agent_simulations/characters.ipynb b/docs/use_cases/agent_simulations/characters.ipynb new file mode 100644 index 00000000..8de85cdf --- /dev/null +++ b/docs/use_cases/agent_simulations/characters.ipynb @@ -0,0 +1,1261 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "e9732067-71c7-46f7-ad09-381b3bf21a27", + "metadata": {}, + "source": [ + "# Generative Agents in LangChain\n", + "\n", + "This notebook implements a generative agent based on the paper [Generative Agents: Interactive Simulacra of Human Behavior](https://arxiv.org/abs/2304.03442) by Park, et. al.\n", + "\n", + "In it, we leverage a time-weighted Memory object backed by a LangChain Retriever." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "53f81c37-db45-4fdc-843c-aa8fd2a9e99d", + "metadata": {}, + "outputs": [], + "source": [ + "# Use termcolor to make it easy to colorize the outputs.\n", + "!pip install termcolor > /dev/null" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "8851c370-b395-4b80-a79d-486a38ffc244", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import re\n", + "from datetime import datetime, timedelta\n", + "from typing import List, Optional, Tuple\n", + "from termcolor import colored\n", + "\n", + "from pydantic import BaseModel, Field\n", + "\n", + "from langchain import LLMChain\n", + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.docstore import InMemoryDocstore\n", + "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.prompts import PromptTemplate\n", + "from langchain.retrievers import TimeWeightedVectorStoreRetriever\n", + "from langchain.schema import BaseLanguageModel, Document\n", + "from langchain.vectorstores import FAISS\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "81824e76", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "USER_NAME = \"Person A\" # The name you want to use when interviewing the agent.\n", + "LLM = ChatOpenAI(max_tokens=1500) # Can be any LLM you want." + ] + }, + { + "cell_type": "markdown", + "id": "c3da1649-d88f-4973-b655-7042975cde7e", + "metadata": {}, + "source": [ + "### Generative Agent Memory Components\n", + "\n", + "This tutorial highlights the memory of generative agents and its impact on their behavior. The memory varies from standard LangChain Chat memory in two aspects:\n", + "\n", + "1. **Memory Formation**\n", + "\n", + " Generative Agents have extended memories, stored in a single stream:\n", + " 1. Observations - from dialogues or interactions with the virtual world, about self or others\n", + " 2. Reflections - resurfaced and summarized core memories\n", + "\n", + "2. **Memory Recall**\n", + "\n", + " Memories are retrieved using a weighted sum of salience, recency, and importance.\n", + "\n", + "Review the definition below, focusing on `add_memory` and `summarize_related_memories` methods." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "043e5203-6a41-431c-9efa-3e1743d7d25a", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "class GenerativeAgent(BaseModel):\n", + " \"\"\"A character with memory and innate characteristics.\"\"\"\n", + " \n", + " name: str\n", + " age: int\n", + " traits: str\n", + " \"\"\"The traits of the character you wish not to change.\"\"\"\n", + " status: str\n", + " \"\"\"Current activities of the character.\"\"\"\n", + " llm: BaseLanguageModel\n", + " memory_retriever: TimeWeightedVectorStoreRetriever\n", + " \"\"\"The retriever to fetch related memories.\"\"\"\n", + " verbose: bool = False\n", + " \n", + " reflection_threshold: Optional[float] = None\n", + " \"\"\"When the total 'importance' of memories exceeds the above threshold, stop to reflect.\"\"\"\n", + " \n", + " current_plan: List[str] = []\n", + " \"\"\"The current plan of the agent.\"\"\"\n", + " \n", + " summary: str = \"\" #: :meta private:\n", + " summary_refresh_seconds: int= 3600 #: :meta private:\n", + " last_refreshed: datetime =Field(default_factory=datetime.now) #: :meta private:\n", + " daily_summaries: List[str] #: :meta private:\n", + " memory_importance: float = 0.0 #: :meta private:\n", + " max_tokens_limit: int = 1200 #: :meta private:\n", + " \n", + " class Config:\n", + " \"\"\"Configuration for this pydantic object.\"\"\"\n", + "\n", + " arbitrary_types_allowed = True\n", + "\n", + " @staticmethod\n", + " def _parse_list(text: str) -> List[str]:\n", + " \"\"\"Parse a newline-separated string into a list of strings.\"\"\"\n", + " lines = re.split(r'\\n', text.strip())\n", + " return [re.sub(r'^\\s*\\d+\\.\\s*', '', line).strip() for line in lines]\n", + "\n", + "\n", + " def _compute_agent_summary(self):\n", + " \"\"\"\"\"\"\n", + " prompt = PromptTemplate.from_template(\n", + " \"How would you summarize {name}'s core characteristics given the\"\n", + " +\" following statements:\\n\"\n", + " +\"{related_memories}\"\n", + " + \"Do not embellish.\"\n", + " +\"\\n\\nSummary: \"\n", + " )\n", + " # The agent seeks to think about their core characteristics.\n", + " relevant_memories = self.fetch_memories(f\"{self.name}'s core characteristics\")\n", + " relevant_memories_str = \"\\n\".join([f\"{mem.page_content}\" for mem in relevant_memories])\n", + " chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", + " return chain.run(name=self.name, related_memories=relevant_memories_str).strip()\n", + " \n", + " def _get_topics_of_reflection(self, last_k: int = 50) -> Tuple[str, str, str]:\n", + " \"\"\"Return the 3 most salient high-level questions about recent observations.\"\"\"\n", + " prompt = PromptTemplate.from_template(\n", + " \"{observations}\\n\\n\"\n", + " + \"Given only the information above, what are the 3 most salient\"\n", + " + \" high-level questions we can answer about the subjects in the statements?\"\n", + " + \" Provide each question on a new line.\\n\\n\"\n", + " )\n", + " reflection_chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", + " observations = self.memory_retriever.memory_stream[-last_k:]\n", + " observation_str = \"\\n\".join([o.page_content for o in observations])\n", + " result = reflection_chain.run(observations=observation_str)\n", + " return self._parse_list(result)\n", + " \n", + " def _get_insights_on_topic(self, topic: str) -> List[str]:\n", + " \"\"\"Generate 'insights' on a topic of reflection, based on pertinent memories.\"\"\"\n", + " prompt = PromptTemplate.from_template(\n", + " \"Statements about {topic}\\n\"\n", + " +\"{related_statements}\\n\\n\"\n", + " + \"What 5 high-level insights can you infer from the above statements?\"\n", + " + \" (example format: insight (because of 1, 5, 3))\"\n", + " )\n", + " related_memories = self.fetch_memories(topic)\n", + " related_statements = \"\\n\".join([f\"{i+1}. {memory.page_content}\" \n", + " for i, memory in \n", + " enumerate(related_memories)])\n", + " reflection_chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", + " result = reflection_chain.run(topic=topic, related_statements=related_statements)\n", + " # TODO: Parse the connections between memories and insights\n", + " return self._parse_list(result)\n", + " \n", + " def pause_to_reflect(self) -> List[str]:\n", + " \"\"\"Reflect on recent observations and generate 'insights'.\"\"\"\n", + " print(colored(f\"Character {self.name} is reflecting\", \"blue\"))\n", + " new_insights = []\n", + " topics = self._get_topics_of_reflection()\n", + " for topic in topics:\n", + " insights = self._get_insights_on_topic( topic)\n", + " for insight in insights:\n", + " self.add_memory(insight)\n", + " new_insights.extend(insights)\n", + " return new_insights\n", + " \n", + " def _score_memory_importance(self, memory_content: str, weight: float = 0.15) -> float:\n", + " \"\"\"Score the absolute importance of the given memory.\"\"\"\n", + " # A weight of 0.25 makes this less important than it\n", + " # would be otherwise, relative to salience and time\n", + " prompt = PromptTemplate.from_template(\n", + " \"On the scale of 1 to 10, where 1 is purely mundane\"\n", + " +\" (e.g., brushing teeth, making bed) and 10 is\"\n", + " + \" extremely poignant (e.g., a break up, college\"\n", + " + \" acceptance), rate the likely poignancy of the\"\n", + " + \" following piece of memory. Respond with a single integer.\"\n", + " + \"\\nMemory: {memory_content}\"\n", + " + \"\\nRating: \"\n", + " )\n", + " chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", + " score = chain.run(memory_content=memory_content).strip()\n", + " match = re.search(r\"^\\D*(\\d+)\", score)\n", + " if match:\n", + " return (float(score[0]) / 10) * weight\n", + " else:\n", + " return 0.0\n", + "\n", + "\n", + " def add_memory(self, memory_content: str) -> List[str]:\n", + " \"\"\"Add an observation or memory to the agent's memory.\"\"\"\n", + " importance_score = self._score_memory_importance(memory_content)\n", + " self.memory_importance += importance_score\n", + " document = Document(page_content=memory_content, metadata={\"importance\": importance_score})\n", + " result = self.memory_retriever.add_documents([document])\n", + "\n", + " # After an agent has processed a certain amount of memories (as measured by\n", + " # aggregate importance), it is time to reflect on recent events to add\n", + " # more synthesized memories to the agent's memory stream.\n", + " if (self.reflection_threshold is not None \n", + " and self.memory_importance > self.reflection_threshold\n", + " and self.status != \"Reflecting\"):\n", + " old_status = self.status\n", + " self.status = \"Reflecting\"\n", + " self.pause_to_reflect()\n", + " # Hack to clear the importance from reflection\n", + " self.memory_importance = 0.0\n", + " self.status = old_status\n", + " return result\n", + " \n", + " def fetch_memories(self, observation: str) -> List[Document]:\n", + " \"\"\"Fetch related memories.\"\"\"\n", + " return self.memory_retriever.get_relevant_documents(observation)\n", + " \n", + " \n", + " def get_summary(self, force_refresh: bool = False) -> str:\n", + " \"\"\"Return a descriptive summary of the agent.\"\"\"\n", + " current_time = datetime.now()\n", + " since_refresh = (current_time - self.last_refreshed).seconds\n", + " if not self.summary or since_refresh >= self.summary_refresh_seconds or force_refresh:\n", + " self.summary = self._compute_agent_summary()\n", + " self.last_refreshed = current_time\n", + " return (\n", + " f\"Name: {self.name} (age: {self.age})\"\n", + " +f\"\\nInnate traits: {self.traits}\"\n", + " +f\"\\n{self.summary}\"\n", + " )\n", + " \n", + " def get_full_header(self, force_refresh: bool = False) -> str:\n", + " \"\"\"Return a full header of the agent's status, summary, and current time.\"\"\"\n", + " summary = self.get_summary(force_refresh=force_refresh)\n", + " current_time_str = datetime.now().strftime(\"%B %d, %Y, %I:%M %p\")\n", + " return f\"{summary}\\nIt is {current_time_str}.\\n{self.name}'s status: {self.status}\"\n", + "\n", + " \n", + " \n", + " def _get_entity_from_observation(self, observation: str) -> str:\n", + " prompt = PromptTemplate.from_template(\n", + " \"What is the observed entity in the following observation? {observation}\"\n", + " +\"\\nEntity=\"\n", + " )\n", + " chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", + " return chain.run(observation=observation).strip()\n", + "\n", + " def _get_entity_action(self, observation: str, entity_name: str) -> str:\n", + " prompt = PromptTemplate.from_template(\n", + " \"What is the {entity} doing in the following observation? {observation}\"\n", + " +\"\\nThe {entity} is\"\n", + " )\n", + " chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", + " return chain.run(entity=entity_name, observation=observation).strip()\n", + " \n", + " def _format_memories_to_summarize(self, relevant_memories: List[Document]) -> str:\n", + " content_strs = set()\n", + " content = []\n", + " for mem in relevant_memories:\n", + " if mem.page_content in content_strs:\n", + " continue\n", + " content_strs.add(mem.page_content)\n", + " created_time = mem.metadata[\"created_at\"].strftime(\"%B %d, %Y, %I:%M %p\")\n", + " content.append(f\"- {created_time}: {mem.page_content.strip()}\")\n", + " return \"\\n\".join([f\"{mem}\" for mem in content])\n", + " \n", + " def summarize_related_memories(self, observation: str) -> str:\n", + " \"\"\"Summarize memories that are most relevant to an observation.\"\"\"\n", + " entity_name = self._get_entity_from_observation(observation)\n", + " entity_action = self._get_entity_action(observation, entity_name)\n", + " q1 = f\"What is the relationship between {self.name} and {entity_name}\"\n", + " relevant_memories = self.fetch_memories(q1) # Fetch memories related to the agent's relationship with the entity\n", + " q2 = f\"{entity_name} is {entity_action}\"\n", + " relevant_memories += self.fetch_memories(q2) # Fetch things related to the entity-action pair\n", + " context_str = self._format_memories_to_summarize(relevant_memories)\n", + " prompt = PromptTemplate.from_template(\n", + " \"{q1}?\\nContext from memory:\\n{context_str}\\nRelevant context: \"\n", + " )\n", + " chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", + " return chain.run(q1=q1, context_str=context_str.strip()).strip()\n", + " \n", + " def _get_memories_until_limit(self, consumed_tokens: int) -> str:\n", + " \"\"\"Reduce the number of tokens in the documents.\"\"\"\n", + " result = []\n", + " for doc in self.memory_retriever.memory_stream[::-1]:\n", + " if consumed_tokens >= self.max_tokens_limit:\n", + " break\n", + " consumed_tokens += self.llm.get_num_tokens(doc.page_content)\n", + " if consumed_tokens < self.max_tokens_limit:\n", + " result.append(doc.page_content) \n", + " return \"; \".join(result[::-1])\n", + " \n", + " def _generate_reaction(\n", + " self,\n", + " observation: str,\n", + " suffix: str\n", + " ) -> str:\n", + " \"\"\"React to a given observation.\"\"\"\n", + " prompt = PromptTemplate.from_template(\n", + " \"{agent_summary_description}\"\n", + " +\"\\nIt is {current_time}.\"\n", + " +\"\\n{agent_name}'s status: {agent_status}\"\n", + " + \"\\nSummary of relevant context from {agent_name}'s memory:\"\n", + " +\"\\n{relevant_memories}\"\n", + " +\"\\nMost recent observations: {recent_observations}\"\n", + " + \"\\nObservation: {observation}\"\n", + " + \"\\n\\n\" + suffix\n", + " )\n", + " agent_summary_description = self.get_summary()\n", + " relevant_memories_str = self.summarize_related_memories(observation)\n", + " current_time_str = datetime.now().strftime(\"%B %d, %Y, %I:%M %p\")\n", + " kwargs = dict(agent_summary_description=agent_summary_description,\n", + " current_time=current_time_str,\n", + " relevant_memories=relevant_memories_str,\n", + " agent_name=self.name,\n", + " observation=observation,\n", + " agent_status=self.status)\n", + " consumed_tokens = self.llm.get_num_tokens(prompt.format(recent_observations=\"\", **kwargs))\n", + " kwargs[\"recent_observations\"] = self._get_memories_until_limit(consumed_tokens)\n", + " action_prediction_chain = LLMChain(llm=self.llm, prompt=prompt)\n", + " result = action_prediction_chain.run(**kwargs)\n", + " return result.strip()\n", + " \n", + " def generate_reaction(self, observation: str) -> Tuple[bool, str]:\n", + " \"\"\"React to a given observation.\"\"\"\n", + " call_to_action_template = (\n", + " \"Should {agent_name} react to the observation, and if so,\"\n", + " +\" what would be an appropriate reaction? Respond in one line.\"\n", + " +' If the action is to engage in dialogue, write:\\nSAY: \"what to say\"'\n", + " +\"\\notherwise, write:\\nREACT: {agent_name}'s reaction (if anything).\"\n", + " + \"\\nEither do nothing, react, or say something but not both.\\n\\n\"\n", + " )\n", + " full_result = self._generate_reaction(observation, call_to_action_template)\n", + " result = full_result.strip().split('\\n')[0]\n", + " self.add_memory(f\"{self.name} observed {observation} and reacted by {result}\")\n", + " if \"REACT:\" in result:\n", + " reaction = result.split(\"REACT:\")[-1].strip()\n", + " return False, f\"{self.name} {reaction}\"\n", + " if \"SAY:\" in result:\n", + " said_value = result.split(\"SAY:\")[-1].strip()\n", + " return True, f\"{self.name} said {said_value}\"\n", + " else:\n", + " return False, result\n", + "\n", + " def generate_dialogue_response(self, observation: str) -> Tuple[bool, str]:\n", + " \"\"\"React to a given observation.\"\"\"\n", + " call_to_action_template = (\n", + " 'What would {agent_name} say? To end the conversation, write: GOODBYE: \"what to say\". Otherwise to continue the conversation, write: SAY: \"what to say next\"\\n\\n'\n", + " )\n", + " full_result = self._generate_reaction(observation, call_to_action_template)\n", + " result = full_result.strip().split('\\n')[0]\n", + " if \"GOODBYE:\" in result:\n", + " farewell = result.split(\"GOODBYE:\")[-1].strip()\n", + " self.add_memory(f\"{self.name} observed {observation} and said {farewell}\")\n", + " return False, f\"{self.name} said {farewell}\"\n", + " if \"SAY:\" in result:\n", + " response_text = result.split(\"SAY:\")[-1].strip()\n", + " self.add_memory(f\"{self.name} observed {observation} and said {response_text}\")\n", + " return True, f\"{self.name} said {response_text}\"\n", + " else:\n", + " return False, result" + ] + }, + { + "cell_type": "markdown", + "id": "361bd49e", + "metadata": { + "jp-MarkdownHeadingCollapsed": true, + "tags": [] + }, + "source": [ + "## Memory Lifecycle\n", + "\n", + "Summarizing the above key methods: `add_memory` and `summarize_related_memories`.\n", + "\n", + "When an agent makes an observation, it stores the memory:\n", + " \n", + "1. Language model scores the memory's importance (1 for mundane, 10 for poignant)\n", + "2. Observation and importance are stored within a document by TimeWeightedVectorStoreRetriever, with a `last_accessed_time`.\n", + "\n", + "When an agent responds to an observation:\n", + "\n", + "1. Generates query(s) for retriever, which fetches documents based on salience, recency, and importance.\n", + "2. Summarizes the retrieved information\n", + "3. Updates the `last_accessed_time` for the used documents.\n" + ] + }, + { + "cell_type": "markdown", + "id": "2fa3ca02", + "metadata": {}, + "source": [ + "## Create a Generative Character\n", + "\n", + "\n", + "\n", + "Now that we've walked through the definition, we will create two characters named \"Tommie\" and \"Eve\"." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "ee9c1a1d-c311-4f1c-8131-75fccd9025b1", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import math\n", + "import faiss\n", + "\n", + "def relevance_score_fn(score: float) -> float:\n", + " \"\"\"Return a similarity score on a scale [0, 1].\"\"\"\n", + " # This will differ depending on a few things:\n", + " # - the distance / similarity metric used by the VectorStore\n", + " # - the scale of your embeddings (OpenAI's are unit norm. Many others are not!)\n", + " # This function converts the euclidean norm of normalized embeddings\n", + " # (0 is most similar, sqrt(2) most dissimilar)\n", + " # to a similarity function (0 to 1)\n", + " return 1.0 - score / math.sqrt(2)\n", + "\n", + "def create_new_memory_retriever():\n", + " \"\"\"Create a new vector store retriever unique to the agent.\"\"\"\n", + " # Define your embedding model\n", + " embeddings_model = OpenAIEmbeddings()\n", + " # Initialize the vectorstore as empty\n", + " embedding_size = 1536\n", + " index = faiss.IndexFlatL2(embedding_size)\n", + " vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}, relevance_score_fn=relevance_score_fn)\n", + " return TimeWeightedVectorStoreRetriever(vectorstore=vectorstore, other_score_keys=[\"importance\"], k=15) " + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "7884f9dd-c597-4c27-8c77-1402c71bc2f8", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "tommie = GenerativeAgent(name=\"Tommie\", \n", + " age=25,\n", + " traits=\"anxious, likes design\", # You can add more persistent traits here \n", + " status=\"looking for a job\", # When connected to a virtual world, we can have the characters update their status\n", + " memory_retriever=create_new_memory_retriever(),\n", + " llm=LLM,\n", + " daily_summaries = [\n", + " \"Drove across state to move to a new town but doesn't have a job yet.\"\n", + " ],\n", + " reflection_threshold = 8, # we will give this a relatively low number to show how reflection works\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "c524d529", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Name: Tommie (age: 25)\n", + "Innate traits: anxious, likes design\n", + "Unfortunately, there are no statements provided to summarize Tommie's core characteristics.\n" + ] + } + ], + "source": [ + "# The current \"Summary\" of a character can't be made because the agent hasn't made\n", + "# any observations yet.\n", + "print(tommie.get_summary())" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "4be60979-d56e-4abf-a636-b34ffa8b7fba", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# We can give the character memories directly\n", + "tommie_memories = [\n", + " \"Tommie remembers his dog, Bruno, from when he was a kid\",\n", + " \"Tommie feels tired from driving so far\",\n", + " \"Tommie sees the new home\",\n", + " \"The new neighbors have a cat\",\n", + " \"The road is noisy at night\",\n", + " \"Tommie is hungry\",\n", + " \"Tommie tries to get some rest.\",\n", + "]\n", + "for memory in tommie_memories:\n", + " tommie.add_memory(memory)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "6992b48b-697f-4973-9560-142ef85357d7", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Name: Tommie (age: 25)\n", + "Innate traits: anxious, likes design\n", + "Tommie is observant, nostalgic, tired, and hungry.\n" + ] + } + ], + "source": [ + "# Now that Tommie has 'memories', their self-summary is more descriptive, though still rudimentary.\n", + "# We will see how this summary updates after more observations to create a more rich description.\n", + "print(tommie.get_summary(force_refresh=True))" + ] + }, + { + "cell_type": "markdown", + "id": "40d39a32-838c-4a03-8b27-a52c76c402e7", + "metadata": { + "tags": [] + }, + "source": [ + "## Pre-Interview with Character\n", + "\n", + "Before sending our character on their way, let's ask them a few questions." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "eaf125d8-f54c-4c5f-b6af-32789b1f7d3a", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "def interview_agent(agent: GenerativeAgent, message: str) -> str:\n", + " \"\"\"Help the notebook user interact with the agent.\"\"\"\n", + " new_message = f\"{USER_NAME} says {message}\"\n", + " return agent.generate_dialogue_response(new_message)[1]\n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "54024d41-6e83-4914-91e5-73140e2dd9c8", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'Tommie said \"I really enjoy design, especially interior design. I find it calming and rewarding to create a space that is both functional and aesthetically pleasing. Unfortunately, I haven\\'t been able to find a job in that field yet.\"'" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "interview_agent(tommie, \"What do you like to do?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "71e2e8cc-921e-4816-82f1-66962b2c1055", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'Tommie said \"Well, I\\'m actually on the hunt for a job right now. I\\'m hoping to find something in the design field, but I\\'m open to exploring other options as well. How about you, what are your plans for the day?\"'" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "interview_agent(tommie, \"What are you looking forward to doing today?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "a2521ffc-7050-4ac3-9a18-4cccfc798c31", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'Tommie said \"Honestly, I\\'m feeling pretty anxious about finding a job. It\\'s been a bit of a struggle and I\\'m not sure what my next step should be. But I\\'m trying to stay positive and keep pushing forward.\"'" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "interview_agent(tommie, \"What are you most worried about today?\")" + ] + }, + { + "cell_type": "markdown", + "id": "e509c468-f7cd-4d72-9f3a-f4aba28b1eea", + "metadata": {}, + "source": [ + "## Step through the day's observations." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "154dee3d-bfe0-4828-b963-ed7e885799b3", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Let's have Tommie start going through a day in the life.\n", + "observations = [\n", + " \"Tommie wakes up to the sound of a noisy construction site outside his window.\",\n", + " \"Tommie gets out of bed and heads to the kitchen to make himself some coffee.\",\n", + " \"Tommie realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some.\",\n", + " \"Tommie finally finds the filters and makes himself a cup of coffee.\",\n", + " \"The coffee tastes bitter, and Tommie regrets not buying a better brand.\",\n", + " \"Tommie checks his email and sees that he has no job offers yet.\",\n", + " \"Tommie spends some time updating his resume and cover letter.\",\n", + " \"Tommie heads out to explore the city and look for job openings.\",\n", + " \"Tommie sees a sign for a job fair and decides to attend.\",\n", + " \"The line to get in is long, and Tommie has to wait for an hour.\",\n", + " \"Tommie meets several potential employers at the job fair but doesn't receive any offers.\",\n", + " \"Tommie leaves the job fair feeling disappointed.\",\n", + " \"Tommie stops by a local diner to grab some lunch.\",\n", + " \"The service is slow, and Tommie has to wait for 30 minutes to get his food.\",\n", + " \"Tommie overhears a conversation at the next table about a job opening.\",\n", + " \"Tommie asks the diners about the job opening and gets some information about the company.\",\n", + " \"Tommie decides to apply for the job and sends his resume and cover letter.\",\n", + " \"Tommie continues his search for job openings and drops off his resume at several local businesses.\",\n", + " \"Tommie takes a break from his job search to go for a walk in a nearby park.\",\n", + " \"A dog approaches and licks Tommie's feet, and he pets it for a few minutes.\",\n", + " \"Tommie sees a group of people playing frisbee and decides to join in.\",\n", + " \"Tommie has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose.\",\n", + " \"Tommie goes back to his apartment to rest for a bit.\",\n", + " \"A raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor.\",\n", + " \"Tommie starts to feel frustrated with his job search.\",\n", + " \"Tommie calls his best friend to vent about his struggles.\",\n", + " \"Tommie's friend offers some words of encouragement and tells him to keep trying.\",\n", + " \"Tommie feels slightly better after talking to his friend.\",\n", + "]\n" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "238be49c-edb3-4e26-a2b6-98777ba8de86", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[32mTommie wakes up to the sound of a noisy construction site outside his window.\u001b[0m Tommie Tommie groans and covers their head with a pillow, trying to block out the noise.\n", + "\u001b[32mTommie gets out of bed and heads to the kitchen to make himself some coffee.\u001b[0m Tommie Tommie starts making coffee, feeling grateful for the little bit of energy it will give him.\n", + "\u001b[32mTommie realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some.\u001b[0m Tommie Tommie sighs in frustration and continues to search for the coffee filters.\n", + "\u001b[32mTommie finally finds the filters and makes himself a cup of coffee.\u001b[0m Tommie Tommie takes a sip of the coffee and feels a little more awake.\n", + "\u001b[32mThe coffee tastes bitter, and Tommie regrets not buying a better brand.\u001b[0m Tommie Tommie grimaces at the taste of the coffee and decides to make a mental note to buy a better brand next time.\n", + "\u001b[32mTommie checks his email and sees that he has no job offers yet.\u001b[0m Tommie Tommie feels disappointed and discouraged, but tries to stay positive and continue the job search.\n", + "\u001b[32mTommie spends some time updating his resume and cover letter.\u001b[0m Tommie Tommie feels determined to keep working on his job search.\n", + "\u001b[32mTommie heads out to explore the city and look for job openings.\u001b[0m Tommie Tommie feels hopeful but also anxious as he heads out to explore the city and look for job openings.\n", + "\u001b[32mTommie sees a sign for a job fair and decides to attend.\u001b[0m Tommie said \"That job fair could be a great opportunity to meet potential employers.\"\n", + "\u001b[32mThe line to get in is long, and Tommie has to wait for an hour.\u001b[0m Tommie Tommie feels frustrated and restless while waiting in line.\n", + "\u001b[32mTommie meets several potential employers at the job fair but doesn't receive any offers.\u001b[0m Tommie Tommie feels disappointed but remains determined to keep searching for job openings.\n", + "\u001b[32mTommie leaves the job fair feeling disappointed.\u001b[0m Tommie Tommie feels discouraged but remains determined to keep searching for job openings.\n", + "\u001b[32mTommie stops by a local diner to grab some lunch.\u001b[0m Tommie Tommie feels relieved to take a break from job searching and enjoy a meal.\n", + "\u001b[32mThe service is slow, and Tommie has to wait for 30 minutes to get his food.\u001b[0m Tommie Tommie feels impatient and frustrated while waiting for his food.\n", + "\u001b[32mTommie overhears a conversation at the next table about a job opening.\u001b[0m Tommie said \"Excuse me, I couldn't help but overhear about the job opening. Could you tell me more about it?\"\n", + "\u001b[32mTommie asks the diners about the job opening and gets some information about the company.\u001b[0m Tommie said \"Could you tell me more about it?\"\n", + "\u001b[32mTommie decides to apply for the job and sends his resume and cover letter.\u001b[0m Tommie said \"Thank you for the information, I'll definitely apply for the job and keep my fingers crossed.\"\n", + "\u001b[32mTommie continues his search for job openings and drops off his resume at several local businesses.\u001b[0m Tommie Tommie feels hopeful but also anxious as he continues his search for job openings and drops off his resume at several local businesses.\n", + "\u001b[32mTommie takes a break from his job search to go for a walk in a nearby park.\u001b[0m Tommie Tommie takes a deep breath and enjoys the fresh air in the park.\n", + "\u001b[32mA dog approaches and licks Tommie's feet, and he pets it for a few minutes.\u001b[0m Tommie Tommie smiles and enjoys the momentary distraction from his job search.\n", + "****************************************\n", + "\u001b[34mAfter 20 observations, Tommie's summary is:\n", + "Name: Tommie (age: 25)\n", + "Innate traits: anxious, likes design\n", + "Tommie is a determined individual who is actively searching for job opportunities. He feels both hopeful and anxious about his search and remains positive despite facing disappointments. He takes breaks to rest and enjoy the little things in life, like going for a walk or grabbing a meal. Tommie is also open to asking for help and seeking information about potential job openings. He is grateful for the little things that give him energy and tries to stay positive even when faced with discouragement. Overall, Tommie's core characteristics include determination, positivity, and a willingness to seek help and take breaks when needed.\u001b[0m\n", + "****************************************\n", + "\u001b[32mTommie sees a group of people playing frisbee and decides to join in.\u001b[0m Tommie said \"Mind if I join in on the game?\"\n", + "\u001b[32mTommie has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose.\u001b[0m Tommie Tommie winces in pain and puts his hand to his nose to check for any bleeding.\n", + "\u001b[32mTommie goes back to his apartment to rest for a bit.\u001b[0m Tommie Tommie takes a deep breath and sits down to rest for a bit.\n", + "\u001b[32mA raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor.\u001b[0m Tommie Tommie sighs and grabs a broom to clean up the mess.\n", + "\u001b[32mTommie starts to feel frustrated with his job search.\u001b[0m Tommie Tommie takes a deep breath and reminds himself to stay positive and keep searching for job opportunities.\n", + "\u001b[32mTommie calls his best friend to vent about his struggles.\u001b[0m Tommie said \"Hey, can I vent to you for a bit about my job search struggles?\"\n", + "\u001b[32mTommie's friend offers some words of encouragement and tells him to keep trying.\u001b[0m Tommie said \"Thank you for the encouragement, it means a lot. I'll keep trying.\"\n", + "\u001b[32mTommie feels slightly better after talking to his friend.\u001b[0m Tommie said \"Thank you for your support, it really means a lot to me.\"\n" + ] + } + ], + "source": [ + "# Let's send Tommie on their way. We'll check in on their summary every few observations to watch it evolve\n", + "for i, observation in enumerate(observations):\n", + " _, reaction = tommie.generate_reaction(observation)\n", + " print(colored(observation, \"green\"), reaction)\n", + " if ((i+1) % 20) == 0:\n", + " print('*'*40)\n", + " print(colored(f\"After {i+1} observations, Tommie's summary is:\\n{tommie.get_summary(force_refresh=True)}\", \"blue\"))\n", + " print('*'*40)" + ] + }, + { + "cell_type": "markdown", + "id": "dd62a275-7290-43ca-aa0f-504f3a706d09", + "metadata": {}, + "source": [ + "## Interview after the day" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "6336ab5d-3074-4831-951f-c9e2cba5dfb5", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'Tommie said \"It\\'s been a bit of a rollercoaster, to be honest. I went to a job fair and met some potential employers, but didn\\'t get any offers. But then I overheard about a job opening at a diner and applied for it. I also took a break to go for a walk in the park and played frisbee with some people, which was a nice distraction. Overall, it\\'s been a bit frustrating, but I\\'m trying to stay positive and keep searching for job opportunities.\"'" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "interview_agent(tommie, \"Tell me about how your day has been going\")" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "809ac906-69b7-4326-99ec-af638d32bb20", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'Tommie would say: \"I rely on coffee to give me a little boost, but I regret not buying a better brand lately. The taste has been pretty bitter. But overall, it\\'s not a huge factor in my life.\" '" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "interview_agent(tommie, \"How do you feel about coffee?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "f733a431-19ea-421a-9101-ae2593a8c626", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'Tommie said \"Oh, I actually don\\'t have a childhood dog, but I do love animals. Have you had any pets?\"'" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "interview_agent(tommie, \"Tell me about your childhood dog!\")" + ] + }, + { + "cell_type": "markdown", + "id": "c9261428-778a-4c0b-b725-bc9e91b71391", + "metadata": {}, + "source": [ + "## Adding Multiple Characters\n", + "\n", + "Let's add a second character to have a conversation with Tommie. Feel free to configure different traits." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "ec8bbe18-a021-419c-bf1f-23d34732cd99", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "eve = GenerativeAgent(name=\"Eve\", \n", + " age=34, \n", + " traits=\"curious, helpful\", # You can add more persistent traits here \n", + " status=\"N/A\", # When connected to a virtual world, we can have the characters update their status\n", + " memory_retriever=create_new_memory_retriever(),\n", + " llm=LLM,\n", + " daily_summaries = [\n", + " (\"Eve started her new job as a career counselor last week and received her first assignment, a client named Tommie.\")\n", + " ],\n", + " reflection_threshold = 5,\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "1e2745f5-e0da-4abd-98b4-830802ce6698", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "yesterday = (datetime.now() - timedelta(days=1)).strftime(\"%A %B %d\")\n", + "eve_memories = [\n", + " \"Eve overhears her colleague say something about a new client being hard to work with\",\n", + " \"Eve wakes up and hear's the alarm\",\n", + " \"Eve eats a boal of porridge\",\n", + " \"Eve helps a coworker on a task\",\n", + " \"Eve plays tennis with her friend Xu before going to work\",\n", + " \"Eve overhears her colleague say something about Tommie being hard to work with\",\n", + " \n", + "]\n", + "for memory in eve_memories:\n", + " eve.add_memory(memory)" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "de4726e3-4bb1-47da-8fd9-f317a036fe0f", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Name: Eve (age: 34)\n", + "Innate traits: curious, helpful\n", + "Eve is helpful, active, eats breakfast, is attentive to her surroundings, and works with colleagues.\n" + ] + } + ], + "source": [ + "print(eve.get_summary())" + ] + }, + { + "cell_type": "markdown", + "id": "837524e9-7f7e-4e9f-b610-f454062f5915", + "metadata": {}, + "source": [ + "## Pre-conversation interviews\n", + "\n", + "\n", + "Let's \"Interview\" Eve before she speaks with Tommie." + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "6cda916d-800c-47bc-a7f9-6a2f19187472", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'Eve said \"I\\'m feeling curious about what\\'s on the agenda for today. Anything special we should be aware of?\"'" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "interview_agent(eve, \"How are you feeling about today?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "448ae644-0a66-4eb2-a03a-319f36948b37", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'Eve said \"I overheard someone say Tommie is hard to work with. Is there something I can help with?\"'" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "interview_agent(eve, \"What do you know about Tommie?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "493fc5b8-8730-4ef8-9820-0f1769ce1691", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'Eve said \"Oh, I didn\\'t realize Tommie was looking for a new job. Is there anything I can do to help? Maybe I could introduce him to some people in my network or help him with his resume.\"'" + ] + }, + "execution_count": 24, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "interview_agent(eve, \"Tommie is looking to find a job. What are are some things you'd like to ask him?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "4b46452a-6c54-4db2-9d87-18597f70fec8", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'Eve said \"Sure, I can definitely help keep the conversation going and ask him plenty of questions. Is there anything specific you would like me to ask him about his skills or experience? I want to make sure the conversation is productive.\"'" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "interview_agent(eve, \"You'll have to ask him. He may be a bit anxious, so I'd appreciate it if you keep the conversation going and ask as many questions as possible.\")" + ] + }, + { + "cell_type": "markdown", + "id": "dd780655-1d73-4fcb-a78d-79fd46a20636", + "metadata": {}, + "source": [ + "## Dialogue between Generative Agents\n", + "\n", + "Generative agents are much more complex when they interact with a virtual environment or with each other. Below, we run a simple conversation between Tommie and Eve." + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "042ea271-4bf1-4247-9082-239a6fea43b8", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "def run_conversation(agents: List[GenerativeAgent], initial_observation: str) -> None:\n", + " \"\"\"Runs a conversation between agents.\"\"\"\n", + " _, observation = agents[1].generate_reaction(initial_observation)\n", + " print(observation)\n", + " turns = 0\n", + " while True:\n", + " break_dialogue = False\n", + " for agent in agents:\n", + " stay_in_dialogue, observation = agent.generate_dialogue_response(observation)\n", + " print(observation)\n", + " # observation = f\"{agent.name} said {reaction}\"\n", + " if not stay_in_dialogue:\n", + " break_dialogue = True \n", + " if break_dialogue:\n", + " break\n", + " turns += 1\n" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "d5462b14-218e-4d85-b035-df57ea8e0f80", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Eve said \"Of course, Tommie! I'm happy to help in any way I can. What specifically would you like advice on?\"\n", + "Tommie said \"Thank you, Eve. I was actually wondering if you knew of any job openings in the design field? That's my area of interest and expertise.\"\n", + "Eve said \"Sure, Tommie! I can definitely keep an eye out for any job openings in the design field and let you know if I come across anything. Have you updated your resume and portfolio recently? I could take a look and provide some feedback if you'd like.\"\n", + "Tommie said \"Thank you so much, Eve! That would be incredibly helpful. And yes, I have updated my resume and portfolio recently, but I would love any feedback you can provide. I really appreciate your support and willingness to help.\"\n", + "Eve said \"Great, Tommie! I'll take a look at your resume and portfolio and provide you with some feedback. Do you have any specific companies or types of design jobs that you're interested in? I might be able to provide more targeted advice and resources based on that information.\"\n", + "Tommie said \"Thank you, Eve! I'm primarily interested in graphic design and UI/UX design positions, but I'm open to other opportunities as well. I appreciate any advice or resources you can provide. Thanks again for your help.\"\n", + "Eve said \"Great, Tommie! Those are both really interesting fields. I'll definitely keep an eye out for any job openings or resources related to graphic design and UI/UX design. In the meantime, I can take a look at your resume and portfolio and provide you with some feedback. Would you like me to email you my feedback or would you prefer to discuss it in person?\"\n", + "Tommie said \"Thank you, Eve! I would really appreciate discussing your feedback in person. When would be a good time for us to meet?\"\n", + "Eve said \"Sure, Tommie! How about we schedule a time to meet next week? I have some availability on Tuesday or Thursday afternoon. What works best for you?\"\n", + "Tommie said \"Thank you, Eve! Tuesday afternoon works better for me. How about 2 PM?\"\n", + "Eve said \"Great, Tommie! Tuesday at 2 PM works for me as well. Let's plan to meet then. In the meantime, if you have any questions or if there's anything else I can help with, please don't hesitate to reach out. Good luck with your job search!\"\n", + "Tommie said \"Thank you so much, Eve! I really appreciate your support and willingness to help. I'll see you next Tuesday at 2 PM. Have a great day!\"\n", + "Eve said \"You're welcome, Tommie! Looking forward to meeting with you on Tuesday. Have a great day and good luck with your job search!\"\n" + ] + } + ], + "source": [ + "agents = [tommie, eve]\n", + "run_conversation(agents, \"Tommie said: Hi, Eve. Thanks for agreeing to share your story with me and give me advice. I have a bunch of questions.\")" + ] + }, + { + "cell_type": "markdown", + "id": "1b28fe80-03dc-4399-961d-6e9ee1980216", + "metadata": { + "tags": [] + }, + "source": [ + "## Let's interview our agents after their conversation\n", + "\n", + "Since the generative agents retain their memories from the day, we can ask them about their plans, conversations, and other memoreis." + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "c4d252f3-fcc1-474c-846e-a7605a6b4ce7", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Name: Tommie (age: 25)\n", + "Innate traits: anxious, likes design\n", + "Tommie is a determined person who is actively searching for job opportunities. He feels both hopeful and anxious about his job search, and remains persistent despite facing disappointment and discouragement. He seeks support from friends and takes breaks to recharge. He tries to stay positive and continues to work on improving his resume and cover letter. He also values the importance of self-care and takes breaks to rest and enjoy nature.\n" + ] + } + ], + "source": [ + "# We can see a current \"Summary\" of a character based on their own perception of self\n", + "# has changed\n", + "print(tommie.get_summary(force_refresh=True))" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "c04db9a4", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Name: Eve (age: 34)\n", + "Innate traits: curious, helpful\n", + "Eve is a helpful and proactive coworker who values relationships and communication. She is attentive to her colleagues' needs and willing to offer support and assistance. She is also curious and interested in learning more about her work and the people around her. Overall, Eve demonstrates a strong sense of empathy and collaboration in her interactions with others.\n" + ] + } + ], + "source": [ + "print(eve.get_summary(force_refresh=True))" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "id": "71762558-8fb6-44d7-8483-f5b47fb2a862", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'Tommie said \"It was really helpful! Eve offered to provide feedback on my resume and portfolio, and she\\'s going to keep an eye out for job openings in the design field. We\\'re planning to meet next Tuesday to discuss her feedback. Thanks for asking!\"'" + ] + }, + "execution_count": 30, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "interview_agent(tommie, \"How was your conversation with Eve?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "id": "085af3d8-ac21-41ea-8f8b-055c56976a67", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'Eve said \"It was really productive! Tommie is interested in graphic design and UI/UX design positions, so I\\'m going to keep an eye out for any job openings or resources related to those fields. I\\'m also going to provide him with some feedback on his resume and portfolio. We\\'re scheduled to meet next Tuesday at 2 PM to discuss everything in person. Is there anything else you would like me to ask him or anything else I can do to help?\".'" + ] + }, + "execution_count": 31, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "interview_agent(eve, \"How was your conversation with Tommie?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "id": "5b439f3c-7849-4432-a697-2bcc85b89dae", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'Eve said \"I feel like I covered everything I wanted to with Tommie, but thank you for asking! If there\\'s anything else that comes up or if you have any further questions, please let me know.\"'" + ] + }, + "execution_count": 32, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "interview_agent(eve, \"What do you wish you would have said to Tommie?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "id": "526e8863-8b32-4216-8e61-2dfe82e3fb47", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'Tommie said \"Oh, I actually forgot to buy coffee filters yesterday, so I couldn\\'t make coffee this morning. But I\\'m planning to grab some later today. Thanks for asking!\"'" + ] + }, + "execution_count": 33, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "interview_agent(tommie, \"What happened with your coffee this morning?\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a17ff5bc-5ad9-4184-8f80-33643e06c589", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/use_cases/autonomous_agents.md b/docs/use_cases/autonomous_agents.md new file mode 100644 index 00000000..b4c9d672 --- /dev/null +++ b/docs/use_cases/autonomous_agents.md @@ -0,0 +1,20 @@ +# Autonomous Agents + + +Autonomous Agents are agents that designed to be more long running. +You give them one or multiple long term goals, and they independently execute towards those goals. +The applications combine tool usage and long term memory. + +At the moment, Autonomous Agents are fairly experimental and based off of other open-source projects. +By implementing these open source projects in LangChain primitives we can get the benefits of LangChain - +easy switching an experimenting with multiple LLMs, usage of different vectorstores as memory, +usage of LangChain's collection of tools. + +## Baby AGI ([Original Repo](https://github.com/yoheinakajima/babyagi)) + +- [Baby AGI](autonomous_agents/baby_agi.ipynb): a notebook implementing BabyAGI as LLM Chains +- [Baby AGI with Tools](autonomous_agents/baby_agi_with_agent.ipynb): building off the above notebook, this example substitutes in an agent with tools as the execution tools, allowing it to actually take actions. + + +## AutoGPT ([Original Repo] (https://github.com/Significant-Gravitas/Auto-GPT)) +- [AutoGPT](autonomous_agents/autogpt.ipynb): a notebook implementing AutoGPT in LangChain primitives diff --git a/docs/use_cases/autonomous_agents/autogpt.ipynb b/docs/use_cases/autonomous_agents/autogpt.ipynb new file mode 100644 index 00000000..9dab8e69 --- /dev/null +++ b/docs/use_cases/autonomous_agents/autogpt.ipynb @@ -0,0 +1,454 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "14f8b67b", + "metadata": {}, + "source": [ + "# AutoGPT\n", + "\n", + "Implementation of https://github.com/Significant-Gravitas/Auto-GPT but with LangChain primitives (LLMs, PromptTemplates, VectorStores, Embeddings, Tools)" + ] + }, + { + "cell_type": "markdown", + "id": "192496a7", + "metadata": {}, + "source": [ + "## Set up tools\n", + "\n", + "We'll set up an AutoGPT with a search tool, and write-file tool, and a read-file tool" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "7c2c9b54", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.utilities import SerpAPIWrapper\n", + "from langchain.agents import Tool\n", + "from langchain.tools.file_management.write import WriteFileTool\n", + "from langchain.tools.file_management.read import ReadFileTool\n", + "\n", + "search = SerpAPIWrapper()\n", + "tools = [\n", + " Tool(\n", + " name = \"search\",\n", + " func=search.run,\n", + " description=\"useful for when you need to answer questions about current events. You should ask targeted questions\"\n", + " ),\n", + " WriteFileTool(),\n", + " ReadFileTool(),\n", + "]" + ] + }, + { + "cell_type": "markdown", + "id": "8e39ee28", + "metadata": {}, + "source": [ + "## Set up memory\n", + "\n", + "The memory here is used for the agents intermediate steps" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "72bc204d", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.vectorstores import FAISS\n", + "from langchain.docstore import InMemoryDocstore\n", + "from langchain.embeddings import OpenAIEmbeddings" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "1df7b724", + "metadata": {}, + "outputs": [], + "source": [ + "# Define your embedding model\n", + "embeddings_model = OpenAIEmbeddings()\n", + "# Initialize the vectorstore as empty\n", + "import faiss\n", + "\n", + "embedding_size = 1536\n", + "index = faiss.IndexFlatL2(embedding_size)\n", + "vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})" + ] + }, + { + "cell_type": "markdown", + "id": "e40fd657", + "metadata": {}, + "source": [ + "## Setup model and AutoGPT\n", + "\n", + "Initialize everything! We will use ChatOpenAI model" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "3393bc23", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.experimental.autonomous_agents.autogpt.agent import AutoGPT\n", + "from langchain.chat_models import ChatOpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "709c08c2", + "metadata": {}, + "outputs": [], + "source": [ + "agent = AutoGPT.from_llm_and_tools(\n", + " ai_name=\"Tom\",\n", + " ai_role=\"Assistant\",\n", + " tools=tools,\n", + " llm=ChatOpenAI(temperature=0),\n", + " memory=vectorstore.as_retriever()\n", + ")\n", + "# Set verbose to be true\n", + "agent.chain.verbose = True" + ] + }, + { + "cell_type": "markdown", + "id": "fc9b51ba", + "metadata": {}, + "source": [ + "## Run an example\n", + "\n", + "Here we will make it write a weather report for SF" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "c032b182", + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", + "Prompt after formatting:\n", + "\u001b[32;1m\u001b[1;3mSystem: You are Tom, Assistant\n", + "Your decisions must always be made independently \n", + " without seeking user assistance. Play to your strengths \n", + " as an LLM and pursue simple strategies with no legal complications. \n", + " If you have completed all your tasks, \n", + " make sure to use the \"finish\" command.\n", + "\n", + "GOALS:\n", + "\n", + "1. write a weather report for SF today\n", + "\n", + "\n", + "Constraints:\n", + "1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.\n", + "2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.\n", + "3. No user assistance\n", + "4. Exclusively use the commands listed in double quotes e.g. \"command name\"\n", + "\n", + "Commands:\n", + "1. search: useful for when you need to answer questions about current events. You should ask targeted questions, args json schema: {\"query\": {\"title\": \"Query\", \"type\": \"string\"}}\n", + "2. write_file: Write file to disk, args json schema: {\"file_path\": {\"title\": \"File Path\", \"description\": \"name of file\", \"type\": \"string\"}, \"text\": {\"title\": \"Text\", \"description\": \"text to write to file\", \"type\": \"string\"}}\n", + "3. read_file: Read file from disk, args json schema: {\"file_path\": {\"title\": \"File Path\", \"description\": \"name of file\", \"type\": \"string\"}}\n", + "4. finish: use this to signal that you have finished all your objectives, args: \"response\": \"final response to let people know you have finished your objectives\"\n", + "\n", + "Resources:\n", + "1. Internet access for searches and information gathering.\n", + "2. Long Term memory management.\n", + "3. GPT-3.5 powered Agents for delegation of simple tasks.\n", + "4. File output.\n", + "\n", + "Performance Evaluation:\n", + "1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\n", + "2. Constructively self-criticize your big-picture behavior constantly.\n", + "3. Reflect on past decisions and strategies to refine your approach.\n", + "4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\n", + "\n", + "You should only respond in JSON format as described below \n", + "Response Format: \n", + "{\n", + " \"thoughts\": {\n", + " \"text\": \"thought\",\n", + " \"reasoning\": \"reasoning\",\n", + " \"plan\": \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n", + " \"criticism\": \"constructive self-criticism\",\n", + " \"speak\": \"thoughts summary to say to user\"\n", + " },\n", + " \"command\": {\n", + " \"name\": \"command name\",\n", + " \"args\": {\n", + " \"arg name\": \"value\"\n", + " }\n", + " }\n", + "} \n", + "Ensure the response can be parsed by Python json.loads\n", + "System: The current time and date is Tue Apr 18 21:31:28 2023\n", + "System: This reminds you of these events from your past:\n", + "[]\n", + "\n", + "\n", + "Human: Determine which next command to use, and respond using the format specified above:\u001b[0m\n", + "\n", + "\u001b[1m> Finished chain.\u001b[0m\n", + "{\n", + " \"thoughts\": {\n", + " \"text\": \"I will start by writing a weather report for San Francisco today. I will use the 'search' command to find the current weather conditions.\",\n", + " \"reasoning\": \"I need to gather information about the current weather conditions in San Francisco to write an accurate weather report.\",\n", + " \"plan\": \"- Use the 'search' command to find the current weather conditions in San Francisco\\n- Write a weather report based on the information gathered\",\n", + " \"criticism\": \"I need to make sure that the information I gather is accurate and up-to-date.\",\n", + " \"speak\": \"I will use the 'search' command to find the current weather conditions in San Francisco.\"\n", + " },\n", + " \"command\": {\n", + " \"name\": \"search\",\n", + " \"args\": {\n", + " \"query\": \"what is the current weather in san francisco\"\n", + " }\n", + " }\n", + "}\n", + "\n", + "\n", + "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", + "Prompt after formatting:\n", + "\u001b[32;1m\u001b[1;3mSystem: You are Tom, Assistant\n", + "Your decisions must always be made independently \n", + " without seeking user assistance. Play to your strengths \n", + " as an LLM and pursue simple strategies with no legal complications. \n", + " If you have completed all your tasks, \n", + " make sure to use the \"finish\" command.\n", + "\n", + "GOALS:\n", + "\n", + "1. write a weather report for SF today\n", + "\n", + "\n", + "Constraints:\n", + "1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.\n", + "2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.\n", + "3. No user assistance\n", + "4. Exclusively use the commands listed in double quotes e.g. \"command name\"\n", + "\n", + "Commands:\n", + "1. search: useful for when you need to answer questions about current events. You should ask targeted questions, args json schema: {\"query\": {\"title\": \"Query\", \"type\": \"string\"}}\n", + "2. write_file: Write file to disk, args json schema: {\"file_path\": {\"title\": \"File Path\", \"description\": \"name of file\", \"type\": \"string\"}, \"text\": {\"title\": \"Text\", \"description\": \"text to write to file\", \"type\": \"string\"}}\n", + "3. read_file: Read file from disk, args json schema: {\"file_path\": {\"title\": \"File Path\", \"description\": \"name of file\", \"type\": \"string\"}}\n", + "4. finish: use this to signal that you have finished all your objectives, args: \"response\": \"final response to let people know you have finished your objectives\"\n", + "\n", + "Resources:\n", + "1. Internet access for searches and information gathering.\n", + "2. Long Term memory management.\n", + "3. GPT-3.5 powered Agents for delegation of simple tasks.\n", + "4. File output.\n", + "\n", + "Performance Evaluation:\n", + "1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\n", + "2. Constructively self-criticize your big-picture behavior constantly.\n", + "3. Reflect on past decisions and strategies to refine your approach.\n", + "4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\n", + "\n", + "You should only respond in JSON format as described below \n", + "Response Format: \n", + "{\n", + " \"thoughts\": {\n", + " \"text\": \"thought\",\n", + " \"reasoning\": \"reasoning\",\n", + " \"plan\": \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n", + " \"criticism\": \"constructive self-criticism\",\n", + " \"speak\": \"thoughts summary to say to user\"\n", + " },\n", + " \"command\": {\n", + " \"name\": \"command name\",\n", + " \"args\": {\n", + " \"arg name\": \"value\"\n", + " }\n", + " }\n", + "} \n", + "Ensure the response can be parsed by Python json.loads\n", + "System: The current time and date is Tue Apr 18 21:31:39 2023\n", + "System: This reminds you of these events from your past:\n", + "['Assistant Reply: {\\n \"thoughts\": {\\n \"text\": \"I will start by writing a weather report for San Francisco today. I will use the \\'search\\' command to find the current weather conditions.\",\\n \"reasoning\": \"I need to gather information about the current weather conditions in San Francisco to write an accurate weather report.\",\\n \"plan\": \"- Use the \\'search\\' command to find the current weather conditions in San Francisco\\\\n- Write a weather report based on the information gathered\",\\n \"criticism\": \"I need to make sure that the information I gather is accurate and up-to-date.\",\\n \"speak\": \"I will use the \\'search\\' command to find the current weather conditions in San Francisco.\"\\n },\\n \"command\": {\\n \"name\": \"search\",\\n \"args\": {\\n \"query\": \"what is the current weather in san francisco\"\\n }\\n }\\n} \\nResult: Command search returned: Current Weather ; 54°F · Sunny ; RealFeel® 66°. Pleasant. RealFeel Guide. Pleasant. 63° to 81°. Most consider this temperature range ideal. LEARN MORE. RealFeel ... ']\n", + "\n", + "\n", + "Human: Determine which next command to use, and respond using the format specified above:\n", + "AI: {\n", + " \"thoughts\": {\n", + " \"text\": \"I will start by writing a weather report for San Francisco today. I will use the 'search' command to find the current weather conditions.\",\n", + " \"reasoning\": \"I need to gather information about the current weather conditions in San Francisco to write an accurate weather report.\",\n", + " \"plan\": \"- Use the 'search' command to find the current weather conditions in San Francisco\\n- Write a weather report based on the information gathered\",\n", + " \"criticism\": \"I need to make sure that the information I gather is accurate and up-to-date.\",\n", + " \"speak\": \"I will use the 'search' command to find the current weather conditions in San Francisco.\"\n", + " },\n", + " \"command\": {\n", + " \"name\": \"search\",\n", + " \"args\": {\n", + " \"query\": \"what is the current weather in san francisco\"\n", + " }\n", + " }\n", + "}\n", + "System: Command search returned: Current Weather ; 54°F · Sunny ; RealFeel® 66°. Pleasant. RealFeel Guide. Pleasant. 63° to 81°. Most consider this temperature range ideal. LEARN MORE. RealFeel ...\n", + "Human: Determine which next command to use, and respond using the format specified above:\u001b[0m\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\u001b[1m> Finished chain.\u001b[0m\n", + "{\n", + " \"thoughts\": {\n", + " \"text\": \"I have found that the current weather in San Francisco is sunny with a temperature of 54°F. I will now write a weather report for San Francisco today using the 'write_file' command.\",\n", + " \"reasoning\": \"I need to write a weather report for San Francisco today based on the information I gathered from the 'search' command.\",\n", + " \"plan\": \"- Use the 'write_file' command to write a weather report for San Francisco today based on the information gathered\",\n", + " \"criticism\": \"I need to make sure that the weather report is accurate and informative.\",\n", + " \"speak\": \"I will use the 'write_file' command to write a weather report for San Francisco today.\"\n", + " },\n", + " \"command\": {\n", + " \"name\": \"write_file\",\n", + " \"args\": {\n", + " \"file_path\": \"weather_report.txt\",\n", + " \"text\": \"Weather Report for San Francisco Today:\\n\\nThe current weather in San Francisco is sunny with a temperature of 54°F. It is expected to remain sunny throughout the day with a high of 62°F and a low of 50°F. There is no chance of precipitation today. It is recommended to wear light clothing and sunscreen if spending time outdoors.\\n\\nStay safe and enjoy the beautiful weather!\"\n", + " }\n", + " }\n", + "}\n", + "\n", + "\n", + "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", + "Prompt after formatting:\n", + "\u001b[32;1m\u001b[1;3mSystem: You are Tom, Assistant\n", + "Your decisions must always be made independently \n", + " without seeking user assistance. Play to your strengths \n", + " as an LLM and pursue simple strategies with no legal complications. \n", + " If you have completed all your tasks, \n", + " make sure to use the \"finish\" command.\n", + "\n", + "GOALS:\n", + "\n", + "1. write a weather report for SF today\n", + "\n", + "\n", + "Constraints:\n", + "1. ~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.\n", + "2. If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.\n", + "3. No user assistance\n", + "4. Exclusively use the commands listed in double quotes e.g. \"command name\"\n", + "\n", + "Commands:\n", + "1. search: useful for when you need to answer questions about current events. You should ask targeted questions, args json schema: {\"query\": {\"title\": \"Query\", \"type\": \"string\"}}\n", + "2. write_file: Write file to disk, args json schema: {\"file_path\": {\"title\": \"File Path\", \"description\": \"name of file\", \"type\": \"string\"}, \"text\": {\"title\": \"Text\", \"description\": \"text to write to file\", \"type\": \"string\"}}\n", + "3. read_file: Read file from disk, args json schema: {\"file_path\": {\"title\": \"File Path\", \"description\": \"name of file\", \"type\": \"string\"}}\n", + "4. finish: use this to signal that you have finished all your objectives, args: \"response\": \"final response to let people know you have finished your objectives\"\n", + "\n", + "Resources:\n", + "1. Internet access for searches and information gathering.\n", + "2. Long Term memory management.\n", + "3. GPT-3.5 powered Agents for delegation of simple tasks.\n", + "4. File output.\n", + "\n", + "Performance Evaluation:\n", + "1. Continuously review and analyze your actions to ensure you are performing to the best of your abilities.\n", + "2. Constructively self-criticize your big-picture behavior constantly.\n", + "3. Reflect on past decisions and strategies to refine your approach.\n", + "4. Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.\n", + "\n", + "You should only respond in JSON format as described below \n", + "Response Format: \n", + "{\n", + " \"thoughts\": {\n", + " \"text\": \"thought\",\n", + " \"reasoning\": \"reasoning\",\n", + " \"plan\": \"- short bulleted\\n- list that conveys\\n- long-term plan\",\n", + " \"criticism\": \"constructive self-criticism\",\n", + " \"speak\": \"thoughts summary to say to user\"\n", + " },\n", + " \"command\": {\n", + " \"name\": \"command name\",\n", + " \"args\": {\n", + " \"arg name\": \"value\"\n", + " }\n", + " }\n", + "} \n", + "Ensure the response can be parsed by Python json.loads\n", + "System: The current time and date is Tue Apr 18 21:31:55 2023\n", + "System: This reminds you of these events from your past:\n", + "['Assistant Reply: {\\n \"thoughts\": {\\n \"text\": \"I have found that the current weather in San Francisco is sunny with a temperature of 54°F. I will now write a weather report for San Francisco today using the \\'write_file\\' command.\",\\n \"reasoning\": \"I need to write a weather report for San Francisco today based on the information I gathered from the \\'search\\' command.\",\\n \"plan\": \"- Use the \\'write_file\\' command to write a weather report for San Francisco today based on the information gathered\",\\n \"criticism\": \"I need to make sure that the weather report is accurate and informative.\",\\n \"speak\": \"I will use the \\'write_file\\' command to write a weather report for San Francisco today.\"\\n },\\n \"command\": {\\n \"name\": \"write_file\",\\n \"args\": {\\n \"file_path\": \"weather_report.txt\",\\n \"text\": \"Weather Report for San Francisco Today:\\\\n\\\\nThe current weather in San Francisco is sunny with a temperature of 54°F. It is expected to remain sunny throughout the day with a high of 62°F and a low of 50°F. There is no chance of precipitation today. It is recommended to wear light clothing and sunscreen if spending time outdoors.\\\\n\\\\nStay safe and enjoy the beautiful weather!\"\\n }\\n }\\n} \\nResult: Command write_file returned: File written to successfully. ', 'Assistant Reply: {\\n \"thoughts\": {\\n \"text\": \"I will start by writing a weather report for San Francisco today. I will use the \\'search\\' command to find the current weather conditions.\",\\n \"reasoning\": \"I need to gather information about the current weather conditions in San Francisco to write an accurate weather report.\",\\n \"plan\": \"- Use the \\'search\\' command to find the current weather conditions in San Francisco\\\\n- Write a weather report based on the information gathered\",\\n \"criticism\": \"I need to make sure that the information I gather is accurate and up-to-date.\",\\n \"speak\": \"I will use the \\'search\\' command to find the current weather conditions in San Francisco.\"\\n },\\n \"command\": {\\n \"name\": \"search\",\\n \"args\": {\\n \"query\": \"what is the current weather in san francisco\"\\n }\\n }\\n} \\nResult: Command search returned: Current Weather ; 54°F · Sunny ; RealFeel® 66°. Pleasant. RealFeel Guide. Pleasant. 63° to 81°. Most consider this temperature range ideal. LEARN MORE. RealFeel ... ']\n", + "\n", + "\n", + "System: Command write_file returned: File written to successfully.\n", + "Human: Determine which next command to use, and respond using the format specified above:\u001b[0m\n", + "\n", + "\u001b[1m> Finished chain.\u001b[0m\n", + "{\n", + " \"thoughts\": {\n", + " \"text\": \"I have completed my task of writing a weather report for San Francisco today. I will now use the \\'finish\\' command to signal that I have finished all my objectives.\",\n", + " \"reasoning\": \"I have completed all my objectives and there are no further tasks to be completed.\",\n", + " \"plan\": \"- Use the \\'finish\\' command to signal that I have completed all my objectives.\",\n", + " \"criticism\": \"I need to make sure that I have completed all my objectives before using the \\'finish\\' command.\",\n", + " \"speak\": \"I have completed my task of writing a weather report for San Francisco today. I will now use the \\'finish\\' command to signal that I have finished all my objectives.\"\n", + " },\n", + " \"command\": {\n", + " \"name\": \"finish\",\n", + " \"args\": {\n", + " \"response\": \"I have completed all my objectives.\"\n", + " }\n", + " }\n", + "}\n" + ] + }, + { + "data": { + "text/plain": [ + "'I have completed all my objectives.'" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "agent.run([\"write a weather report for SF today\"])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/use_cases/autonomous_agents/baby_agi.ipynb b/docs/use_cases/autonomous_agents/baby_agi.ipynb new file mode 100644 index 00000000..5e4bff5f --- /dev/null +++ b/docs/use_cases/autonomous_agents/baby_agi.ipynb @@ -0,0 +1,257 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "517a9fd4", + "metadata": {}, + "source": [ + "# BabyAGI User Guide\n", + "\n", + "This notebook demonstrates how to implement [BabyAGI](https://github.com/yoheinakajima/babyagi/tree/main) by [Yohei Nakajima](https://twitter.com/yoheinakajima). BabyAGI is an AI agent that can generate and pretend to execute tasks based on a given objective.\n", + "\n", + "This guide will help you understand the components to create your own recursive agents.\n", + "\n", + "Although BabyAGI uses specific vectorstores/model providers (Pinecone, OpenAI), one of the benefits of implementing it with LangChain is that you can easily swap those out for different options. In this implementation we use a FAISS vectorstore (because it runs locally and is free)." + ] + }, + { + "cell_type": "markdown", + "id": "556af556", + "metadata": {}, + "source": [ + "## Install and Import Required Modules" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "c8a354b6", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from collections import deque\n", + "from typing import Dict, List, Optional, Any\n", + "\n", + "from langchain import LLMChain, OpenAI, PromptTemplate\n", + "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.llms import BaseLLM\n", + "from langchain.vectorstores.base import VectorStore\n", + "from pydantic import BaseModel, Field\n", + "from langchain.chains.base import Chain\n", + "from langchain.experimental import BabyAGI" + ] + }, + { + "cell_type": "markdown", + "id": "09f70772", + "metadata": {}, + "source": [ + "## Connect to the Vector Store\n", + "\n", + "Depending on what vectorstore you use, this step may look different." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "794045d4", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.vectorstores import FAISS\n", + "from langchain.docstore import InMemoryDocstore" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "6e0305eb", + "metadata": {}, + "outputs": [], + "source": [ + "# Define your embedding model\n", + "embeddings_model = OpenAIEmbeddings()\n", + "# Initialize the vectorstore as empty\n", + "import faiss\n", + "\n", + "embedding_size = 1536\n", + "index = faiss.IndexFlatL2(embedding_size)\n", + "vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})" + ] + }, + { + "cell_type": "markdown", + "id": "05ba762e", + "metadata": {}, + "source": [ + "### Run the BabyAGI\n", + "\n", + "Now it's time to create the BabyAGI controller and watch it try to accomplish your objective." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "3d220b69", + "metadata": {}, + "outputs": [], + "source": [ + "OBJECTIVE = \"Write a weather report for SF today\"" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "8a8e5543", + "metadata": {}, + "outputs": [], + "source": [ + "llm = OpenAI(temperature=0)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "3d69899b", + "metadata": {}, + "outputs": [], + "source": [ + "# Logging of LLMChains\n", + "verbose = False\n", + "# If None, will keep on going forever\n", + "max_iterations: Optional[int] = 3\n", + "baby_agi = BabyAGI.from_llm(\n", + " llm=llm, vectorstore=vectorstore, verbose=verbose, max_iterations=max_iterations\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "f7957b51", + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[95m\u001b[1m\n", + "*****TASK LIST*****\n", + "\u001b[0m\u001b[0m\n", + "1: Make a todo list\n", + "\u001b[92m\u001b[1m\n", + "*****NEXT TASK*****\n", + "\u001b[0m\u001b[0m\n", + "1: Make a todo list\n", + "\u001b[93m\u001b[1m\n", + "*****TASK RESULT*****\n", + "\u001b[0m\u001b[0m\n", + "\n", + "\n", + "1. Check the weather forecast for San Francisco today\n", + "2. Make note of the temperature, humidity, wind speed, and other relevant weather conditions\n", + "3. Write a weather report summarizing the forecast\n", + "4. Check for any weather alerts or warnings\n", + "5. Share the report with the relevant stakeholders\n", + "\u001b[95m\u001b[1m\n", + "*****TASK LIST*****\n", + "\u001b[0m\u001b[0m\n", + "2: Check the current temperature in San Francisco\n", + "3: Check the current humidity in San Francisco\n", + "4: Check the current wind speed in San Francisco\n", + "5: Check for any weather alerts or warnings in San Francisco\n", + "6: Check the forecast for the next 24 hours in San Francisco\n", + "7: Check the forecast for the next 48 hours in San Francisco\n", + "8: Check the forecast for the next 72 hours in San Francisco\n", + "9: Check the forecast for the next week in San Francisco\n", + "10: Check the forecast for the next month in San Francisco\n", + "11: Check the forecast for the next 3 months in San Francisco\n", + "1: Write a weather report for SF today\n", + "\u001b[92m\u001b[1m\n", + "*****NEXT TASK*****\n", + "\u001b[0m\u001b[0m\n", + "2: Check the current temperature in San Francisco\n", + "\u001b[93m\u001b[1m\n", + "*****TASK RESULT*****\n", + "\u001b[0m\u001b[0m\n", + "\n", + "\n", + "I will check the current temperature in San Francisco. I will use an online weather service to get the most up-to-date information.\n", + "\u001b[95m\u001b[1m\n", + "*****TASK LIST*****\n", + "\u001b[0m\u001b[0m\n", + "3: Check the current UV index in San Francisco.\n", + "4: Check the current air quality in San Francisco.\n", + "5: Check the current precipitation levels in San Francisco.\n", + "6: Check the current cloud cover in San Francisco.\n", + "7: Check the current barometric pressure in San Francisco.\n", + "8: Check the current dew point in San Francisco.\n", + "9: Check the current wind direction in San Francisco.\n", + "10: Check the current humidity levels in San Francisco.\n", + "1: Check the current temperature in San Francisco to the average temperature for this time of year.\n", + "2: Check the current visibility in San Francisco.\n", + "11: Write a weather report for SF today.\n", + "\u001b[92m\u001b[1m\n", + "*****NEXT TASK*****\n", + "\u001b[0m\u001b[0m\n", + "3: Check the current UV index in San Francisco.\n", + "\u001b[93m\u001b[1m\n", + "*****TASK RESULT*****\n", + "\u001b[0m\u001b[0m\n", + "\n", + "\n", + "The current UV index in San Francisco is moderate. The UV index is expected to remain at moderate levels throughout the day. It is recommended to wear sunscreen and protective clothing when outdoors.\n", + "\u001b[91m\u001b[1m\n", + "*****TASK ENDING*****\n", + "\u001b[0m\u001b[0m\n" + ] + }, + { + "data": { + "text/plain": [ + "{'objective': 'Write a weather report for SF today'}" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "baby_agi({\"objective\": OBJECTIVE})" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "898a210b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/use_cases/autonomous_agents/baby_agi_with_agent.ipynb b/docs/use_cases/autonomous_agents/baby_agi_with_agent.ipynb new file mode 100644 index 00000000..171d0a6d --- /dev/null +++ b/docs/use_cases/autonomous_agents/baby_agi_with_agent.ipynb @@ -0,0 +1,387 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "517a9fd4", + "metadata": {}, + "source": [ + "# BabyAGI with Tools\n", + "\n", + "This notebook builds on top of [baby agi](baby_agi.ipynb), but shows how you can swap out the execution chain. The previous execution chain was just an LLM which made stuff up. By swapping it out with an agent that has access to tools, we can hopefully get real reliable information" + ] + }, + { + "cell_type": "markdown", + "id": "556af556", + "metadata": {}, + "source": [ + "## Install and Import Required Modules" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "c8a354b6", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from collections import deque\n", + "from typing import Dict, List, Optional, Any\n", + "\n", + "from langchain import LLMChain, OpenAI, PromptTemplate\n", + "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.llms import BaseLLM\n", + "from langchain.vectorstores.base import VectorStore\n", + "from pydantic import BaseModel, Field\n", + "from langchain.chains.base import Chain\n", + "from langchain.experimental import BabyAGI" + ] + }, + { + "cell_type": "markdown", + "id": "09f70772", + "metadata": {}, + "source": [ + "## Connect to the Vector Store\n", + "\n", + "Depending on what vectorstore you use, this step may look different." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "794045d4", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Note: you may need to restart the kernel to use updated packages.\n", + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "%pip install faiss-cpu > /dev/null\n", + "%pip install google-search-results > /dev/null\n", + "from langchain.vectorstores import FAISS\n", + "from langchain.docstore import InMemoryDocstore" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "6e0305eb", + "metadata": {}, + "outputs": [], + "source": [ + "# Define your embedding model\n", + "embeddings_model = OpenAIEmbeddings()\n", + "# Initialize the vectorstore as empty\n", + "import faiss\n", + "\n", + "embedding_size = 1536\n", + "index = faiss.IndexFlatL2(embedding_size)\n", + "vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})" + ] + }, + { + "cell_type": "markdown", + "id": "0f3b72bf", + "metadata": {}, + "source": [ + "## Define the Chains\n", + "\n", + "BabyAGI relies on three LLM chains:\n", + "- Task creation chain to select new tasks to add to the list\n", + "- Task prioritization chain to re-prioritize tasks\n", + "- Execution Chain to execute the tasks\n", + "\n", + "\n", + "NOTE: in this notebook, the Execution chain will now be an agent." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "b43cd580", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.agents import ZeroShotAgent, Tool, AgentExecutor\n", + "from langchain import OpenAI, SerpAPIWrapper, LLMChain\n", + "\n", + "todo_prompt = PromptTemplate.from_template(\n", + " \"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}\"\n", + ")\n", + "todo_chain = LLMChain(llm=OpenAI(temperature=0), prompt=todo_prompt)\n", + "search = SerpAPIWrapper()\n", + "tools = [\n", + " Tool(\n", + " name=\"Search\",\n", + " func=search.run,\n", + " description=\"useful for when you need to answer questions about current events\",\n", + " ),\n", + " Tool(\n", + " name=\"TODO\",\n", + " func=todo_chain.run,\n", + " description=\"useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!\",\n", + " ),\n", + "]\n", + "\n", + "\n", + "prefix = \"\"\"You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}.\"\"\"\n", + "suffix = \"\"\"Question: {task}\n", + "{agent_scratchpad}\"\"\"\n", + "prompt = ZeroShotAgent.create_prompt(\n", + " tools,\n", + " prefix=prefix,\n", + " suffix=suffix,\n", + " input_variables=[\"objective\", \"task\", \"context\", \"agent_scratchpad\"],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "4b00ae2e", + "metadata": {}, + "outputs": [], + "source": [ + "llm = OpenAI(temperature=0)\n", + "llm_chain = LLMChain(llm=llm, prompt=prompt)\n", + "tool_names = [tool.name for tool in tools]\n", + "agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names)\n", + "agent_executor = AgentExecutor.from_agent_and_tools(\n", + " agent=agent, tools=tools, verbose=True\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "05ba762e", + "metadata": {}, + "source": [ + "### Run the BabyAGI\n", + "\n", + "Now it's time to create the BabyAGI controller and watch it try to accomplish your objective." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "3d220b69", + "metadata": {}, + "outputs": [], + "source": [ + "OBJECTIVE = \"Write a weather report for SF today\"" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "3d69899b", + "metadata": {}, + "outputs": [], + "source": [ + "# Logging of LLMChains\n", + "verbose = False\n", + "# If None, will keep on going forever\n", + "max_iterations: Optional[int] = 3\n", + "baby_agi = BabyAGI.from_llm(\n", + " llm=llm, vectorstore=vectorstore, task_execution_chain=agent_executor, verbose=verbose, max_iterations=max_iterations\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "f7957b51", + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[95m\u001b[1m\n", + "*****TASK LIST*****\n", + "\u001b[0m\u001b[0m\n", + "1: Make a todo list\n", + "\u001b[92m\u001b[1m\n", + "*****NEXT TASK*****\n", + "\u001b[0m\u001b[0m\n", + "1: Make a todo list\n", + "\n", + "\n", + "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", + "\u001b[32;1m\u001b[1;3mThought: I need to come up with a todo list\n", + "Action: TODO\n", + "Action Input: Write a weather report for SF today\u001b[0m\u001b[33;1m\u001b[1;3m\n", + "\n", + "1. Research current weather conditions in San Francisco\n", + "2. Gather data on temperature, humidity, wind speed, and other relevant weather conditions\n", + "3. Analyze data to determine current weather trends\n", + "4. Write a brief introduction to the weather report\n", + "5. Describe current weather conditions in San Francisco\n", + "6. Discuss any upcoming weather changes\n", + "7. Summarize the weather report\n", + "8. Proofread and edit the report\n", + "9. Submit the report\u001b[0m\u001b[32;1m\u001b[1;3m I now know the final answer\n", + "Final Answer: The todo list for writing a weather report for SF today is: 1. Research current weather conditions in San Francisco; 2. Gather data on temperature, humidity, wind speed, and other relevant weather conditions; 3. Analyze data to determine current weather trends; 4. Write a brief introduction to the weather report; 5. Describe current weather conditions in San Francisco; 6. Discuss any upcoming weather changes; 7. Summarize the weather report; 8. Proofread and edit the report; 9. Submit the report.\u001b[0m\n", + "\n", + "\u001b[1m> Finished chain.\u001b[0m\n", + "\u001b[93m\u001b[1m\n", + "*****TASK RESULT*****\n", + "\u001b[0m\u001b[0m\n", + "The todo list for writing a weather report for SF today is: 1. Research current weather conditions in San Francisco; 2. Gather data on temperature, humidity, wind speed, and other relevant weather conditions; 3. Analyze data to determine current weather trends; 4. Write a brief introduction to the weather report; 5. Describe current weather conditions in San Francisco; 6. Discuss any upcoming weather changes; 7. Summarize the weather report; 8. Proofread and edit the report; 9. Submit the report.\n", + "\u001b[95m\u001b[1m\n", + "*****TASK LIST*****\n", + "\u001b[0m\u001b[0m\n", + "2: Gather data on precipitation, cloud cover, and other relevant weather conditions;\n", + "3: Analyze data to determine any upcoming weather changes;\n", + "4: Research current weather forecasts for San Francisco;\n", + "5: Create a visual representation of the weather report;\n", + "6: Include relevant images and graphics in the report;\n", + "7: Format the report for readability;\n", + "8: Publish the report online;\n", + "9: Monitor the report for accuracy.\n", + "\u001b[92m\u001b[1m\n", + "*****NEXT TASK*****\n", + "\u001b[0m\u001b[0m\n", + "2: Gather data on precipitation, cloud cover, and other relevant weather conditions;\n", + "\n", + "\n", + "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", + "\u001b[32;1m\u001b[1;3mThought: I need to search for current weather conditions in San Francisco\n", + "Action: Search\n", + "Action Input: Current weather conditions in San Francisco\u001b[0m\u001b[36;1m\u001b[1;3mCurrent Weather for Popular Cities ; San Francisco, CA 46 · Partly Cloudy ; Manhattan, NY warning 52 · Cloudy ; Schiller Park, IL (60176) 40 · Sunny ; Boston, MA 54 ...\u001b[0m\u001b[32;1m\u001b[1;3m I need to compile the data into a weather report\n", + "Action: TODO\n", + "Action Input: Compile data into a weather report\u001b[0m\u001b[33;1m\u001b[1;3m\n", + "\n", + "1. Gather data from reliable sources such as the National Weather Service, local weather stations, and other meteorological organizations.\n", + "\n", + "2. Analyze the data to identify trends and patterns.\n", + "\n", + "3. Create a chart or graph to visualize the data.\n", + "\n", + "4. Write a summary of the data and its implications.\n", + "\n", + "5. Compile the data into a report format.\n", + "\n", + "6. Proofread the report for accuracy and clarity.\n", + "\n", + "7. Publish the report to a website or other platform.\n", + "\n", + "8. Distribute the report to relevant stakeholders.\u001b[0m\u001b[32;1m\u001b[1;3m I now know the final answer\n", + "Final Answer: Today in San Francisco, the temperature is 46 degrees Fahrenheit with partly cloudy skies. The forecast for the rest of the day is expected to remain partly cloudy.\u001b[0m\n", + "\n", + "\u001b[1m> Finished chain.\u001b[0m\n", + "\u001b[93m\u001b[1m\n", + "*****TASK RESULT*****\n", + "\u001b[0m\u001b[0m\n", + "Today in San Francisco, the temperature is 46 degrees Fahrenheit with partly cloudy skies. The forecast for the rest of the day is expected to remain partly cloudy.\n", + "\u001b[95m\u001b[1m\n", + "*****TASK LIST*****\n", + "\u001b[0m\u001b[0m\n", + "3: Format the report for readability;\n", + "4: Include relevant images and graphics in the report;\n", + "5: Compare the current weather conditions in San Francisco to the forecasted conditions;\n", + "6: Identify any potential weather-related hazards in the area;\n", + "7: Research historical weather patterns in San Francisco;\n", + "8: Identify any potential trends in the weather data;\n", + "9: Include relevant data sources in the report;\n", + "10: Summarize the weather report in a concise manner;\n", + "11: Include a summary of the forecasted weather conditions;\n", + "12: Include a summary of the current weather conditions;\n", + "13: Include a summary of the historical weather patterns;\n", + "14: Include a summary of the potential weather-related hazards;\n", + "15: Include a summary of the potential trends in the weather data;\n", + "16: Include a summary of the data sources used in the report;\n", + "17: Analyze data to determine any upcoming weather changes;\n", + "18: Research current weather forecasts for San Francisco;\n", + "19: Create a visual representation of the weather report;\n", + "20: Publish the report online;\n", + "21: Monitor the report for accuracy\n", + "\u001b[92m\u001b[1m\n", + "*****NEXT TASK*****\n", + "\u001b[0m\u001b[0m\n", + "3: Format the report for readability;\n", + "\n", + "\n", + "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", + "\u001b[32;1m\u001b[1;3mThought: I need to make sure the report is easy to read;\n", + "Action: TODO\n", + "Action Input: Make the report easy to read\u001b[0m\u001b[33;1m\u001b[1;3m\n", + "\n", + "1. Break up the report into sections with clear headings\n", + "2. Use bullet points and numbered lists to organize information\n", + "3. Use short, concise sentences\n", + "4. Use simple language and avoid jargon\n", + "5. Include visuals such as charts, graphs, and diagrams to illustrate points\n", + "6. Use bold and italicized text to emphasize key points\n", + "7. Include a table of contents and page numbers\n", + "8. Use a consistent font and font size throughout the report\n", + "9. Include a summary at the end of the report\n", + "10. Proofread the report for typos and errors\u001b[0m\u001b[32;1m\u001b[1;3m I now know the final answer\n", + "Final Answer: The report should be formatted for readability by breaking it up into sections with clear headings, using bullet points and numbered lists to organize information, using short, concise sentences, using simple language and avoiding jargon, including visuals such as charts, graphs, and diagrams to illustrate points, using bold and italicized text to emphasize key points, including a table of contents and page numbers, using a consistent font and font size throughout the report, including a summary at the end of the report, and proofreading the report for typos and errors.\u001b[0m\n", + "\n", + "\u001b[1m> Finished chain.\u001b[0m\n", + "\u001b[93m\u001b[1m\n", + "*****TASK RESULT*****\n", + "\u001b[0m\u001b[0m\n", + "The report should be formatted for readability by breaking it up into sections with clear headings, using bullet points and numbered lists to organize information, using short, concise sentences, using simple language and avoiding jargon, including visuals such as charts, graphs, and diagrams to illustrate points, using bold and italicized text to emphasize key points, including a table of contents and page numbers, using a consistent font and font size throughout the report, including a summary at the end of the report, and proofreading the report for typos and errors.\n", + "\u001b[91m\u001b[1m\n", + "*****TASK ENDING*****\n", + "\u001b[0m\u001b[0m\n" + ] + }, + { + "data": { + "text/plain": [ + "{'objective': 'Write a weather report for SF today'}" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "baby_agi({\"objective\": OBJECTIVE})" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "898a210b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/use_cases/personal_assistants.md b/docs/use_cases/personal_assistants.md index 0e5135fb..ebf17072 100644 --- a/docs/use_cases/personal_assistants.md +++ b/docs/use_cases/personal_assistants.md @@ -19,8 +19,4 @@ Highlighting specific parts: Specific examples of this include: -- [Baby AGI](agents/baby_agi.ipynb): a notebook implementing [BabyAGI](https://github.com/yoheinakajima/babyagi) by Yohei Nakajima as LLM Chains -- [Baby AGI with Tools](agents/baby_agi_with_agent.ipynb): building off the above notebook, this example substitutes in an agent with tools as the execution tools, allowing it to actually take actions. -- [CAMEL](agents/camel_role_playing.ipynb): an implementation of the CAMEL (Communicative Agents for “Mind” Exploration of Large Scale Language Model Society) paper, where two agents communicate with eachother. - [AI Plugins](agents/custom_agent_with_plugin_retrieval.ipynb): an implementation of an agent that is designed to be able to use all AI Plugins. -- [Generative Agents](agents/characters.ipynb): This notebook implements a generative agent based on the paper [Generative Agents: Interactive Simulacra of Human Behavior](https://arxiv.org/abs/2304.03442) by Park, et. al. diff --git a/langchain/agents/tools.py b/langchain/agents/tools.py index fa40bce8..c5aa094e 100644 --- a/langchain/agents/tools.py +++ b/langchain/agents/tools.py @@ -16,6 +16,16 @@ class Tool(BaseTool): coroutine: Optional[Callable[..., Awaitable[str]]] = None """The asynchronous version of the function.""" + @property + def args(self) -> dict: + if self.args_schema is not None: + return self.args_schema.schema()["properties"] + else: + inferred_model = validate_arguments(self.func).model # type: ignore + schema = inferred_model.schema()["properties"] + valid_keys = signature(self.func).parameters + return {k: schema[k] for k in valid_keys} + def _run(self, *args: Any, **kwargs: Any) -> str: """Use the tool.""" return self.func(*args, **kwargs) diff --git a/langchain/experimental/__init__.py b/langchain/experimental/__init__.py new file mode 100644 index 00000000..653100de --- /dev/null +++ b/langchain/experimental/__init__.py @@ -0,0 +1,3 @@ +from langchain.experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI + +__all__ = ["BabyAGI"] diff --git a/langchain/experimental/autonomous_agents/__init__.py b/langchain/experimental/autonomous_agents/__init__.py new file mode 100644 index 00000000..653100de --- /dev/null +++ b/langchain/experimental/autonomous_agents/__init__.py @@ -0,0 +1,3 @@ +from langchain.experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI + +__all__ = ["BabyAGI"] diff --git a/langchain/experimental/autonomous_agents/autogpt/__init__.py b/langchain/experimental/autonomous_agents/autogpt/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/langchain/experimental/autonomous_agents/autogpt/agent.py b/langchain/experimental/autonomous_agents/autogpt/agent.py new file mode 100644 index 00000000..ef0b71be --- /dev/null +++ b/langchain/experimental/autonomous_agents/autogpt/agent.py @@ -0,0 +1,135 @@ +from __future__ import annotations + +from typing import List, Optional + +from pydantic import ValidationError + +from langchain.chains.llm import LLMChain +from langchain.chat_models.base import BaseChatModel +from langchain.experimental.autonomous_agents.autogpt.output_parser import ( + AutoGPTOutputParser, + BaseAutoGPTOutputParser, +) +from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt +from langchain.experimental.autonomous_agents.autogpt.prompt_generator import ( + FINISH_NAME, +) +from langchain.schema import ( + AIMessage, + BaseMessage, + Document, + HumanMessage, + SystemMessage, +) +from langchain.tools.base import BaseTool +from langchain.tools.human.tool import HumanInputRun +from langchain.vectorstores.base import VectorStoreRetriever + + +class AutoGPT: + """Agent class for interacting with Auto-GPT.""" + + def __init__( + self, + ai_name: str, + memory: VectorStoreRetriever, + chain: LLMChain, + output_parser: BaseAutoGPTOutputParser, + tools: List[BaseTool], + feedback_tool: Optional[HumanInputRun] = None, + ): + self.ai_name = ai_name + self.memory = memory + self.full_message_history: List[BaseMessage] = [] + self.next_action_count = 0 + self.chain = chain + self.output_parser = output_parser + self.tools = tools + self.feedback_tool = feedback_tool + + @classmethod + def from_llm_and_tools( + cls, + ai_name: str, + ai_role: str, + memory: VectorStoreRetriever, + tools: List[BaseTool], + llm: BaseChatModel, + human_in_the_loop: bool = False, + output_parser: Optional[BaseAutoGPTOutputParser] = None, + ) -> AutoGPT: + prompt = AutoGPTPrompt( + ai_name=ai_name, + ai_role=ai_role, + tools=tools, + input_variables=["memory", "messages", "goals", "user_input"], + token_counter=llm.get_num_tokens, + ) + human_feedback_tool = HumanInputRun() if human_in_the_loop else None + chain = LLMChain(llm=llm, prompt=prompt) + return cls( + ai_name, + memory, + chain, + output_parser or AutoGPTOutputParser(), + tools, + feedback_tool=human_feedback_tool, + ) + + def run(self, goals: List[str]) -> str: + user_input = ( + "Determine which next command to use, " + "and respond using the format specified above:" + ) + # Interaction Loop + loop_count = 0 + while True: + # Discontinue if continuous limit is reached + loop_count += 1 + + # Send message to AI, get response + assistant_reply = self.chain.run( + goals=goals, + messages=self.full_message_history, + memory=self.memory, + user_input=user_input, + ) + + # Print Assistant thoughts + print(assistant_reply) + self.full_message_history.append(HumanMessage(content=user_input)) + self.full_message_history.append(AIMessage(content=assistant_reply)) + + # Get command name and arguments + action = self.output_parser.parse(assistant_reply) + tools = {t.name: t for t in self.tools} + if action.name == FINISH_NAME: + return action.args["response"] + if action.name in tools: + tool = tools[action.name] + try: + observation = tool.run(action.args) + except ValidationError as e: + observation = f"Error in args: {str(e)}" + result = f"Command {tool.name} returned: {observation}" + elif action.name == "ERROR": + result = f"Error: {action.args}. " + else: + result = ( + f"Unknown command '{action.name}'. " + f"Please refer to the 'COMMANDS' list for available " + f"commands and only respond in the specified JSON format." + ) + + memory_to_add = ( + f"Assistant Reply: {assistant_reply} " f"\nResult: {result} " + ) + if self.feedback_tool is not None: + feedback = f"\n{self.feedback_tool.run('Input: ')}" + if feedback in {"q", "stop"}: + print("EXITING") + return "EXITING" + memory_to_add += feedback + + self.memory.add_documents([Document(page_content=memory_to_add)]) + self.full_message_history.append(SystemMessage(content=result)) diff --git a/langchain/experimental/autonomous_agents/autogpt/memory.py b/langchain/experimental/autonomous_agents/autogpt/memory.py new file mode 100644 index 00000000..5c48674a --- /dev/null +++ b/langchain/experimental/autonomous_agents/autogpt/memory.py @@ -0,0 +1,30 @@ +from typing import Any, Dict, List + +from pydantic import Field + +from langchain.memory.chat_memory import BaseChatMemory, get_prompt_input_key +from langchain.vectorstores.base import VectorStoreRetriever + + +class AutoGPTMemory(BaseChatMemory): + retriever: VectorStoreRetriever = Field(exclude=True) + """VectorStoreRetriever object to connect to.""" + + @property + def memory_variables(self) -> List[str]: + return ["chat_history", "relevant_context"] + + def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str: + """Get the input key for the prompt.""" + if self.input_key is None: + return get_prompt_input_key(inputs, self.memory_variables) + return self.input_key + + def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: + input_key = self._get_prompt_input_key(inputs) + query = inputs[input_key] + docs = self.retriever.get_relevant_documents(query) + return { + "chat_history": self.chat_memory.messages[-10:], + "relevant_context": docs, + } diff --git a/langchain/experimental/autonomous_agents/autogpt/output_parser.py b/langchain/experimental/autonomous_agents/autogpt/output_parser.py new file mode 100644 index 00000000..d2b98c39 --- /dev/null +++ b/langchain/experimental/autonomous_agents/autogpt/output_parser.py @@ -0,0 +1,50 @@ +import json +import re +from abc import abstractmethod +from typing import Dict, NamedTuple + +from langchain.schema import BaseOutputParser + + +class AutoGPTAction(NamedTuple): + name: str + args: Dict + + +class BaseAutoGPTOutputParser(BaseOutputParser): + @abstractmethod + def parse(self, text: str) -> AutoGPTAction: + """Return AutoGPTAction""" + + +def preprocess_json_input(input_str: str) -> str: + # Replace single backslashes with double backslashes, + # while leaving already escaped ones intact + corrected_str = re.sub( + r'(? AutoGPTAction: + try: + parsed = json.loads(text, strict=False) + except json.JSONDecodeError: + preprocessed_text = preprocess_json_input(text) + try: + parsed = json.loads(preprocessed_text, strict=False) + except Exception: + return AutoGPTAction( + name="ERROR", + args={"error": f"Could not parse invalid json: {text}"}, + ) + try: + return AutoGPTAction( + name=parsed["command"]["name"], + args=parsed["command"]["args"], + ) + except KeyError: + return AutoGPTAction( + name="ERROR", args={"error": f"Incomplete command args: {parsed}"} + ) diff --git a/langchain/experimental/autonomous_agents/autogpt/prompt.py b/langchain/experimental/autonomous_agents/autogpt/prompt.py new file mode 100644 index 00000000..a9a79e3e --- /dev/null +++ b/langchain/experimental/autonomous_agents/autogpt/prompt.py @@ -0,0 +1,75 @@ +import time +from typing import Any, Callable, List + +from pydantic import BaseModel + +from langchain.experimental.autonomous_agents.autogpt.prompt_generator import get_prompt +from langchain.prompts.chat import ( + BaseChatPromptTemplate, +) +from langchain.schema import BaseMessage, HumanMessage, SystemMessage +from langchain.tools.base import BaseTool +from langchain.vectorstores.base import VectorStoreRetriever + + +class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel): + ai_name: str + ai_role: str + tools: List[BaseTool] + token_counter: Callable[[str], int] + send_token_limit: int = 4196 + + def construct_full_prompt(self, goals: List[str]) -> str: + prompt_start = """Your decisions must always be made independently + without seeking user assistance. Play to your strengths + as an LLM and pursue simple strategies with no legal complications. + If you have completed all your tasks, + make sure to use the "finish" command.""" + + # Construct full prompt + full_prompt = ( + f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n" + ) + for i, goal in enumerate(goals): + full_prompt += f"{i+1}. {goal}\n" + + full_prompt += f"\n\n{get_prompt(self.tools)}" + return full_prompt + + def format_messages(self, **kwargs: Any) -> List[BaseMessage]: + base_prompt = SystemMessage(content=self.construct_full_prompt(kwargs["goals"])) + time_prompt = SystemMessage( + content=f"The current time and date is {time.strftime('%c')}" + ) + used_tokens = self.token_counter(base_prompt.content) + self.token_counter( + time_prompt.content + ) + memory: VectorStoreRetriever = kwargs["memory"] + previous_messages = kwargs["messages"] + relevant_docs = memory.get_relevant_documents(str(previous_messages[-10:])) + relevant_memory = [d.page_content for d in relevant_docs] + relevant_memory_tokens = sum( + [self.token_counter(doc) for doc in relevant_memory] + ) + while used_tokens + relevant_memory_tokens > 2500: + relevant_memory = relevant_memory[:-1] + relevant_memory_tokens = sum( + [self.token_counter(doc) for doc in relevant_memory] + ) + content_format = ( + f"This reminds you of these events " + f"from your past:\n{relevant_memory}\n\n" + ) + memory_message = SystemMessage(content=content_format) + used_tokens += len(memory_message.content) + historical_messages: List[BaseMessage] = [] + for message in previous_messages[-10:][::-1]: + message_tokens = self.token_counter(message.content) + if used_tokens + message_tokens > self.send_token_limit - 1000: + break + historical_messages = [message] + historical_messages + input_message = HumanMessage(content=kwargs["user_input"]) + messages: List[BaseMessage] = [base_prompt, time_prompt, memory_message] + messages += historical_messages + messages.append(input_message) + return messages diff --git a/langchain/experimental/autonomous_agents/autogpt/prompt_generator.py b/langchain/experimental/autonomous_agents/autogpt/prompt_generator.py new file mode 100644 index 00000000..b8014eb9 --- /dev/null +++ b/langchain/experimental/autonomous_agents/autogpt/prompt_generator.py @@ -0,0 +1,186 @@ +import json +from typing import List + +from langchain.tools.base import BaseTool + +FINISH_NAME = "finish" + + +class PromptGenerator: + """A class for generating custom prompt strings. + + Does this based on constraints, commands, resources, and performance evaluations. + """ + + def __init__(self) -> None: + """Initialize the PromptGenerator object. + + Starts with empty lists of constraints, commands, resources, + and performance evaluations. + """ + self.constraints: List[str] = [] + self.commands: List[BaseTool] = [] + self.resources: List[str] = [] + self.performance_evaluation: List[str] = [] + self.response_format = { + "thoughts": { + "text": "thought", + "reasoning": "reasoning", + "plan": "- short bulleted\n- list that conveys\n- long-term plan", + "criticism": "constructive self-criticism", + "speak": "thoughts summary to say to user", + }, + "command": {"name": "command name", "args": {"arg name": "value"}}, + } + + def add_constraint(self, constraint: str) -> None: + """ + Add a constraint to the constraints list. + + Args: + constraint (str): The constraint to be added. + """ + self.constraints.append(constraint) + + def add_tool(self, tool: BaseTool) -> None: + self.commands.append(tool) + + def _generate_command_string(self, tool: BaseTool) -> str: + output = f"{tool.name}: {tool.description}" + output += f", args json schema: {json.dumps(tool.args)}" + return output + + def add_resource(self, resource: str) -> None: + """ + Add a resource to the resources list. + + Args: + resource (str): The resource to be added. + """ + self.resources.append(resource) + + def add_performance_evaluation(self, evaluation: str) -> None: + """ + Add a performance evaluation item to the performance_evaluation list. + + Args: + evaluation (str): The evaluation item to be added. + """ + self.performance_evaluation.append(evaluation) + + def _generate_numbered_list(self, items: list, item_type: str = "list") -> str: + """ + Generate a numbered list from given items based on the item_type. + + Args: + items (list): A list of items to be numbered. + item_type (str, optional): The type of items in the list. + Defaults to 'list'. + + Returns: + str: The formatted numbered list. + """ + if item_type == "command": + command_strings = [ + f"{i + 1}. {self._generate_command_string(item)}" + for i, item in enumerate(items) + ] + finish_description = ( + "use this to signal that you have finished all your objectives" + ) + finish_args = ( + '"response": "final response to let ' + 'people know you have finished your objectives"' + ) + finish_string = ( + f"{len(items) + 1}. {FINISH_NAME}: " + f"{finish_description}, args: {finish_args}" + ) + return "\n".join(command_strings + [finish_string]) + else: + return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items)) + + def generate_prompt_string(self) -> str: + """Generate a prompt string. + + Returns: + str: The generated prompt string. + """ + formatted_response_format = json.dumps(self.response_format, indent=4) + prompt_string = ( + f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n" + f"Commands:\n" + f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n" + f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n" + f"Performance Evaluation:\n" + f"{self._generate_numbered_list(self.performance_evaluation)}\n\n" + f"You should only respond in JSON format as described below " + f"\nResponse Format: \n{formatted_response_format} " + f"\nEnsure the response can be parsed by Python json.loads" + ) + + return prompt_string + + +def get_prompt(tools: List[BaseTool]) -> str: + """This function generates a prompt string. + + It includes various constraints, commands, resources, and performance evaluations. + + Returns: + str: The generated prompt string. + """ + + # Initialize the PromptGenerator object + prompt_generator = PromptGenerator() + + # Add constraints to the PromptGenerator object + prompt_generator.add_constraint( + "~4000 word limit for short term memory. " + "Your short term memory is short, " + "so immediately save important information to files." + ) + prompt_generator.add_constraint( + "If you are unsure how you previously did something " + "or want to recall past events, " + "thinking about similar events will help you remember." + ) + prompt_generator.add_constraint("No user assistance") + prompt_generator.add_constraint( + 'Exclusively use the commands listed in double quotes e.g. "command name"' + ) + + # Add commands to the PromptGenerator object + for tool in tools: + prompt_generator.add_tool(tool) + + # Add resources to the PromptGenerator object + prompt_generator.add_resource( + "Internet access for searches and information gathering." + ) + prompt_generator.add_resource("Long Term memory management.") + prompt_generator.add_resource( + "GPT-3.5 powered Agents for delegation of simple tasks." + ) + prompt_generator.add_resource("File output.") + + # Add performance evaluations to the PromptGenerator object + prompt_generator.add_performance_evaluation( + "Continuously review and analyze your actions " + "to ensure you are performing to the best of your abilities." + ) + prompt_generator.add_performance_evaluation( + "Constructively self-criticize your big-picture behavior constantly." + ) + prompt_generator.add_performance_evaluation( + "Reflect on past decisions and strategies to refine your approach." + ) + prompt_generator.add_performance_evaluation( + "Every command has a cost, so be smart and efficient. " + "Aim to complete tasks in the least number of steps." + ) + + # Generate the prompt string + prompt_string = prompt_generator.generate_prompt_string() + + return prompt_string diff --git a/langchain/experimental/autonomous_agents/baby_agi/__init__.py b/langchain/experimental/autonomous_agents/baby_agi/__init__.py new file mode 100644 index 00000000..7d28e683 --- /dev/null +++ b/langchain/experimental/autonomous_agents/baby_agi/__init__.py @@ -0,0 +1,17 @@ +from langchain.experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI +from langchain.experimental.autonomous_agents.baby_agi.task_creation import ( + TaskCreationChain, +) +from langchain.experimental.autonomous_agents.baby_agi.task_execution import ( + TaskExecutionChain, +) +from langchain.experimental.autonomous_agents.baby_agi.task_prioritization import ( + TaskPrioritizationChain, +) + +__all__ = [ + "BabyAGI", + "TaskPrioritizationChain", + "TaskExecutionChain", + "TaskCreationChain", +] diff --git a/langchain/experimental/autonomous_agents/baby_agi/baby_agi.py b/langchain/experimental/autonomous_agents/baby_agi/baby_agi.py new file mode 100644 index 00000000..fffb413a --- /dev/null +++ b/langchain/experimental/autonomous_agents/baby_agi/baby_agi.py @@ -0,0 +1,181 @@ +from collections import deque +from typing import Any, Dict, List, Optional + +from pydantic import BaseModel, Field + +from langchain.chains.base import Chain +from langchain.experimental.autonomous_agents.baby_agi.task_creation import ( + TaskCreationChain, +) +from langchain.experimental.autonomous_agents.baby_agi.task_execution import ( + TaskExecutionChain, +) +from langchain.experimental.autonomous_agents.baby_agi.task_prioritization import ( + TaskPrioritizationChain, +) +from langchain.schema import BaseLanguageModel +from langchain.vectorstores.base import VectorStore + + +class BabyAGI(Chain, BaseModel): + """Controller model for the BabyAGI agent.""" + + task_list: deque = Field(default_factory=deque) + task_creation_chain: Chain = Field(...) + task_prioritization_chain: Chain = Field(...) + execution_chain: Chain = Field(...) + task_id_counter: int = Field(1) + vectorstore: VectorStore = Field(init=False) + max_iterations: Optional[int] = None + + class Config: + """Configuration for this pydantic object.""" + + arbitrary_types_allowed = True + + def add_task(self, task: Dict) -> None: + self.task_list.append(task) + + def print_task_list(self) -> None: + print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m") + for t in self.task_list: + print(str(t["task_id"]) + ": " + t["task_name"]) + + def print_next_task(self, task: Dict) -> None: + print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m") + print(str(task["task_id"]) + ": " + task["task_name"]) + + def print_task_result(self, result: str) -> None: + print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m") + print(result) + + @property + def input_keys(self) -> List[str]: + return ["objective"] + + @property + def output_keys(self) -> List[str]: + return [] + + def get_next_task( + self, result: str, task_description: str, objective: str + ) -> List[Dict]: + """Get the next task.""" + task_names = [t["task_name"] for t in self.task_list] + + incomplete_tasks = ", ".join(task_names) + response = self.task_creation_chain.run( + result=result, + task_description=task_description, + incomplete_tasks=incomplete_tasks, + objective=objective, + ) + new_tasks = response.split("\n") + return [ + {"task_name": task_name} for task_name in new_tasks if task_name.strip() + ] + + def prioritize_tasks(self, this_task_id: int, objective: str) -> List[Dict]: + """Prioritize tasks.""" + task_names = [t["task_name"] for t in list(self.task_list)] + next_task_id = int(this_task_id) + 1 + response = self.task_prioritization_chain.run( + task_names=", ".join(task_names), + next_task_id=str(next_task_id), + objective=objective, + ) + new_tasks = response.split("\n") + prioritized_task_list = [] + for task_string in new_tasks: + if not task_string.strip(): + continue + task_parts = task_string.strip().split(".", 1) + if len(task_parts) == 2: + task_id = task_parts[0].strip() + task_name = task_parts[1].strip() + prioritized_task_list.append( + {"task_id": task_id, "task_name": task_name} + ) + return prioritized_task_list + + def _get_top_tasks(self, query: str, k: int) -> List[str]: + """Get the top k tasks based on the query.""" + results = self.vectorstore.similarity_search(query, k=k) + if not results: + return [] + return [str(item.metadata["task"]) for item in results] + + def execute_task(self, objective: str, task: str, k: int = 5) -> str: + """Execute a task.""" + context = self._get_top_tasks(query=objective, k=k) + return self.execution_chain.run( + objective=objective, context="\n".join(context), task=task + ) + + def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]: + """Run the agent.""" + objective = inputs["objective"] + first_task = inputs.get("first_task", "Make a todo list") + self.add_task({"task_id": 1, "task_name": first_task}) + num_iters = 0 + while True: + if self.task_list: + self.print_task_list() + + # Step 1: Pull the first task + task = self.task_list.popleft() + self.print_next_task(task) + + # Step 2: Execute the task + result = self.execute_task(objective, task["task_name"]) + this_task_id = int(task["task_id"]) + self.print_task_result(result) + + # Step 3: Store the result in Pinecone + result_id = f"result_{task['task_id']}" + self.vectorstore.add_texts( + texts=[result], + metadatas=[{"task": task["task_name"]}], + ids=[result_id], + ) + + # Step 4: Create new tasks and reprioritize task list + new_tasks = self.get_next_task(result, task["task_name"], objective) + for new_task in new_tasks: + self.task_id_counter += 1 + new_task.update({"task_id": self.task_id_counter}) + self.add_task(new_task) + self.task_list = deque(self.prioritize_tasks(this_task_id, objective)) + num_iters += 1 + if self.max_iterations is not None and num_iters == self.max_iterations: + print( + "\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m" + ) + break + return {} + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + vectorstore: VectorStore, + verbose: bool = False, + task_execution_chain: Optional[Chain] = None, + **kwargs: Dict[str, Any], + ) -> "BabyAGI": + """Initialize the BabyAGI Controller.""" + task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose) + task_prioritization_chain = TaskPrioritizationChain.from_llm( + llm, verbose=verbose + ) + if task_execution_chain is None: + execution_chain: Chain = TaskExecutionChain.from_llm(llm, verbose=verbose) + else: + execution_chain = task_execution_chain + return cls( + task_creation_chain=task_creation_chain, + task_prioritization_chain=task_prioritization_chain, + execution_chain=execution_chain, + vectorstore=vectorstore, + **kwargs, + ) diff --git a/langchain/experimental/autonomous_agents/baby_agi/task_creation.py b/langchain/experimental/autonomous_agents/baby_agi/task_creation.py new file mode 100644 index 00000000..122b0dbf --- /dev/null +++ b/langchain/experimental/autonomous_agents/baby_agi/task_creation.py @@ -0,0 +1,30 @@ +from langchain import LLMChain, PromptTemplate +from langchain.schema import BaseLanguageModel + + +class TaskCreationChain(LLMChain): + """Chain to generates tasks.""" + + @classmethod + def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain: + """Get the response parser.""" + task_creation_template = ( + "You are an task creation AI that uses the result of an execution agent" + " to create new tasks with the following objective: {objective}," + " The last completed task has the result: {result}." + " This result was based on this task description: {task_description}." + " These are incomplete tasks: {incomplete_tasks}." + " Based on the result, create new tasks to be completed" + " by the AI system that do not overlap with incomplete tasks." + " Return the tasks as an array." + ) + prompt = PromptTemplate( + template=task_creation_template, + input_variables=[ + "result", + "task_description", + "incomplete_tasks", + "objective", + ], + ) + return cls(prompt=prompt, llm=llm, verbose=verbose) diff --git a/langchain/experimental/autonomous_agents/baby_agi/task_execution.py b/langchain/experimental/autonomous_agents/baby_agi/task_execution.py new file mode 100644 index 00000000..b85619f8 --- /dev/null +++ b/langchain/experimental/autonomous_agents/baby_agi/task_execution.py @@ -0,0 +1,21 @@ +from langchain import LLMChain, PromptTemplate +from langchain.schema import BaseLanguageModel + + +class TaskExecutionChain(LLMChain): + """Chain to execute tasks.""" + + @classmethod + def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain: + """Get the response parser.""" + execution_template = ( + "You are an AI who performs one task based on the following objective: " + "{objective}." + "Take into account these previously completed tasks: {context}." + " Your task: {task}. Response:" + ) + prompt = PromptTemplate( + template=execution_template, + input_variables=["objective", "context", "task"], + ) + return cls(prompt=prompt, llm=llm, verbose=verbose) diff --git a/langchain/experimental/autonomous_agents/baby_agi/task_prioritization.py b/langchain/experimental/autonomous_agents/baby_agi/task_prioritization.py new file mode 100644 index 00000000..19e9d79a --- /dev/null +++ b/langchain/experimental/autonomous_agents/baby_agi/task_prioritization.py @@ -0,0 +1,24 @@ +from langchain import LLMChain, PromptTemplate +from langchain.schema import BaseLanguageModel + + +class TaskPrioritizationChain(LLMChain): + """Chain to prioritize tasks.""" + + @classmethod + def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain: + """Get the response parser.""" + task_prioritization_template = ( + "You are a task prioritization AI tasked with cleaning the formatting of " + "and reprioritizing the following tasks: {task_names}." + " Consider the ultimate objective of your team: {objective}." + " Do not remove any tasks. Return the result as a numbered list, like:" + " #. First task" + " #. Second task" + " Start the task list with number {next_task_id}." + ) + prompt = PromptTemplate( + template=task_prioritization_template, + input_variables=["task_names", "next_task_id", "objective"], + ) + return cls(prompt=prompt, llm=llm, verbose=verbose) diff --git a/langchain/tools/base.py b/langchain/tools/base.py index eb03dfdd..e8cd3b0c 100644 --- a/langchain/tools/base.py +++ b/langchain/tools/base.py @@ -1,9 +1,10 @@ """Base implementation for tools or skills.""" from abc import ABC, abstractmethod +from inspect import signature from typing import Any, Dict, Optional, Sequence, Tuple, Type, Union -from pydantic import BaseModel, Extra, Field, validator +from pydantic import BaseModel, Extra, Field, validate_arguments, validator from langchain.callbacks import get_callback_manager from langchain.callbacks.base import BaseCallbackManager @@ -36,28 +37,28 @@ class BaseTool(ABC, BaseModel): arbitrary_types_allowed = True @property - def args(self) -> Union[Type[BaseModel], Type[str]]: - """Generate an input pydantic model.""" - return str if self.args_schema is None else self.args_schema + def args(self) -> dict: + if self.args_schema is not None: + return self.args_schema.schema()["properties"] + else: + inferred_model = validate_arguments(self._run).model # type: ignore + schema = inferred_model.schema()["properties"] + valid_keys = signature(self._run).parameters + return {k: schema[k] for k in valid_keys} def _parse_input( self, tool_input: Union[str, Dict], ) -> None: """Convert tool input to pydantic model.""" - input_args = self.args + input_args = self.args_schema if isinstance(tool_input, str): - if issubclass(input_args, BaseModel): + if input_args is not None: key_ = next(iter(input_args.__fields__.keys())) input_args.validate({key_: tool_input}) else: - if issubclass(input_args, BaseModel): + if input_args is not None: input_args.validate(tool_input) - else: - raise ValueError( - f"args_schema required for tool {self.name} in order to" - f" accept input of type {type(tool_input)}" - ) @validator("callback_manager", pre=True, always=True) def set_callback_manager( diff --git a/langchain/tools/file_management/read.py b/langchain/tools/file_management/read.py index a42df67e..229bd63e 100644 --- a/langchain/tools/file_management/read.py +++ b/langchain/tools/file_management/read.py @@ -13,7 +13,7 @@ class ReadFileInput(BaseModel): class ReadFileTool(BaseTool): name: str = "read_file" - tool_args: Type[BaseModel] = ReadFileInput + args_schema: Type[BaseModel] = ReadFileInput description: str = "Read file from disk" def _run(self, file_path: str) -> str: diff --git a/langchain/tools/file_management/write.py b/langchain/tools/file_management/write.py index 0748ee82..4c3c1c0a 100644 --- a/langchain/tools/file_management/write.py +++ b/langchain/tools/file_management/write.py @@ -15,7 +15,7 @@ class WriteFileInput(BaseModel): class WriteFileTool(BaseTool): name: str = "write_file" - tool_args: Type[BaseModel] = WriteFileInput + args_schema: Type[BaseModel] = WriteFileInput description: str = "Write file to disk" def _run(self, file_path: str, text: str) -> str: