diff --git a/docs/reference.rst b/docs/reference.rst index fc19ba72..ba9cb6f1 100644 --- a/docs/reference.rst +++ b/docs/reference.rst @@ -14,3 +14,4 @@ Full documentation on all methods, classes, and APIs in LangChain. ./reference/modules/chains.rst ./reference/agents.rst ./reference/modules/utilities.rst + ./reference/modules/experimental.rst diff --git a/docs/reference/modules/experimental.rst b/docs/reference/modules/experimental.rst new file mode 100644 index 00000000..22c124b9 --- /dev/null +++ b/docs/reference/modules/experimental.rst @@ -0,0 +1,28 @@ +========== +Experimental Modules +========== + +This module contains experimental modules and reproductions of existing work using LangChain primitives. + +Autonomous Agents +------------------ + +Here, we document the BabyAGI and AutoGPT classes from the langchain.experimental module. + +.. autoclass:: langchain.experimental.BabyAGI + :members: + +.. autoclass:: langchain.experimental.AutoGPT + :members: + + +Generative Agents +------------------ + +Here, we document the GenerativeAgent and GenerativeAgentMemory classes from the langchain.experimental module. + +.. autoclass:: langchain.experimental.GenerativeAgent + :members: + +.. autoclass:: langchain.experimental.GenerativeAgentMemory + :members: diff --git a/docs/use_cases/agent_simulations/characters.ipynb b/docs/use_cases/agent_simulations/characters.ipynb index 8de85cdf..b3bf6054 100644 --- a/docs/use_cases/agent_simulations/characters.ipynb +++ b/docs/use_cases/agent_simulations/characters.ipynb @@ -26,32 +26,38 @@ { "cell_type": "code", "execution_count": 2, + "id": "3128fc21", + "metadata": {}, + "outputs": [], + "source": [ + "import logging\n", + "logging.basicConfig(level=logging.ERROR)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, "id": "8851c370-b395-4b80-a79d-486a38ffc244", "metadata": { "tags": [] }, "outputs": [], "source": [ - "import re\n", "from datetime import datetime, timedelta\n", - "from typing import List, Optional, Tuple\n", + "from typing import List\n", "from termcolor import colored\n", "\n", - "from pydantic import BaseModel, Field\n", "\n", - "from langchain import LLMChain\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain.docstore import InMemoryDocstore\n", "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.prompts import PromptTemplate\n", "from langchain.retrievers import TimeWeightedVectorStoreRetriever\n", - "from langchain.schema import BaseLanguageModel, Document\n", "from langchain.vectorstores import FAISS\n" ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "id": "81824e76", "metadata": { "tags": [] @@ -63,6 +69,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "c3da1649-d88f-4973-b655-7042975cde7e", "metadata": {}, @@ -77,310 +84,24 @@ " 1. Observations - from dialogues or interactions with the virtual world, about self or others\n", " 2. Reflections - resurfaced and summarized core memories\n", "\n", + "\n", "2. **Memory Recall**\n", "\n", " Memories are retrieved using a weighted sum of salience, recency, and importance.\n", "\n", - "Review the definition below, focusing on `add_memory` and `summarize_related_memories` methods." + "You can review the definitions of the `GenerativeAgent` and `GenerativeAgentMemory` in the [reference documentation](\"../../reference/modules/experimental\") for the following imports, focusing on `add_memory` and `summarize_related_memories` methods." ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "id": "043e5203-6a41-431c-9efa-3e1743d7d25a", "metadata": { "tags": [] }, "outputs": [], "source": [ - "class GenerativeAgent(BaseModel):\n", - " \"\"\"A character with memory and innate characteristics.\"\"\"\n", - " \n", - " name: str\n", - " age: int\n", - " traits: str\n", - " \"\"\"The traits of the character you wish not to change.\"\"\"\n", - " status: str\n", - " \"\"\"Current activities of the character.\"\"\"\n", - " llm: BaseLanguageModel\n", - " memory_retriever: TimeWeightedVectorStoreRetriever\n", - " \"\"\"The retriever to fetch related memories.\"\"\"\n", - " verbose: bool = False\n", - " \n", - " reflection_threshold: Optional[float] = None\n", - " \"\"\"When the total 'importance' of memories exceeds the above threshold, stop to reflect.\"\"\"\n", - " \n", - " current_plan: List[str] = []\n", - " \"\"\"The current plan of the agent.\"\"\"\n", - " \n", - " summary: str = \"\" #: :meta private:\n", - " summary_refresh_seconds: int= 3600 #: :meta private:\n", - " last_refreshed: datetime =Field(default_factory=datetime.now) #: :meta private:\n", - " daily_summaries: List[str] #: :meta private:\n", - " memory_importance: float = 0.0 #: :meta private:\n", - " max_tokens_limit: int = 1200 #: :meta private:\n", - " \n", - " class Config:\n", - " \"\"\"Configuration for this pydantic object.\"\"\"\n", - "\n", - " arbitrary_types_allowed = True\n", - "\n", - " @staticmethod\n", - " def _parse_list(text: str) -> List[str]:\n", - " \"\"\"Parse a newline-separated string into a list of strings.\"\"\"\n", - " lines = re.split(r'\\n', text.strip())\n", - " return [re.sub(r'^\\s*\\d+\\.\\s*', '', line).strip() for line in lines]\n", - "\n", - "\n", - " def _compute_agent_summary(self):\n", - " \"\"\"\"\"\"\n", - " prompt = PromptTemplate.from_template(\n", - " \"How would you summarize {name}'s core characteristics given the\"\n", - " +\" following statements:\\n\"\n", - " +\"{related_memories}\"\n", - " + \"Do not embellish.\"\n", - " +\"\\n\\nSummary: \"\n", - " )\n", - " # The agent seeks to think about their core characteristics.\n", - " relevant_memories = self.fetch_memories(f\"{self.name}'s core characteristics\")\n", - " relevant_memories_str = \"\\n\".join([f\"{mem.page_content}\" for mem in relevant_memories])\n", - " chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", - " return chain.run(name=self.name, related_memories=relevant_memories_str).strip()\n", - " \n", - " def _get_topics_of_reflection(self, last_k: int = 50) -> Tuple[str, str, str]:\n", - " \"\"\"Return the 3 most salient high-level questions about recent observations.\"\"\"\n", - " prompt = PromptTemplate.from_template(\n", - " \"{observations}\\n\\n\"\n", - " + \"Given only the information above, what are the 3 most salient\"\n", - " + \" high-level questions we can answer about the subjects in the statements?\"\n", - " + \" Provide each question on a new line.\\n\\n\"\n", - " )\n", - " reflection_chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", - " observations = self.memory_retriever.memory_stream[-last_k:]\n", - " observation_str = \"\\n\".join([o.page_content for o in observations])\n", - " result = reflection_chain.run(observations=observation_str)\n", - " return self._parse_list(result)\n", - " \n", - " def _get_insights_on_topic(self, topic: str) -> List[str]:\n", - " \"\"\"Generate 'insights' on a topic of reflection, based on pertinent memories.\"\"\"\n", - " prompt = PromptTemplate.from_template(\n", - " \"Statements about {topic}\\n\"\n", - " +\"{related_statements}\\n\\n\"\n", - " + \"What 5 high-level insights can you infer from the above statements?\"\n", - " + \" (example format: insight (because of 1, 5, 3))\"\n", - " )\n", - " related_memories = self.fetch_memories(topic)\n", - " related_statements = \"\\n\".join([f\"{i+1}. {memory.page_content}\" \n", - " for i, memory in \n", - " enumerate(related_memories)])\n", - " reflection_chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", - " result = reflection_chain.run(topic=topic, related_statements=related_statements)\n", - " # TODO: Parse the connections between memories and insights\n", - " return self._parse_list(result)\n", - " \n", - " def pause_to_reflect(self) -> List[str]:\n", - " \"\"\"Reflect on recent observations and generate 'insights'.\"\"\"\n", - " print(colored(f\"Character {self.name} is reflecting\", \"blue\"))\n", - " new_insights = []\n", - " topics = self._get_topics_of_reflection()\n", - " for topic in topics:\n", - " insights = self._get_insights_on_topic( topic)\n", - " for insight in insights:\n", - " self.add_memory(insight)\n", - " new_insights.extend(insights)\n", - " return new_insights\n", - " \n", - " def _score_memory_importance(self, memory_content: str, weight: float = 0.15) -> float:\n", - " \"\"\"Score the absolute importance of the given memory.\"\"\"\n", - " # A weight of 0.25 makes this less important than it\n", - " # would be otherwise, relative to salience and time\n", - " prompt = PromptTemplate.from_template(\n", - " \"On the scale of 1 to 10, where 1 is purely mundane\"\n", - " +\" (e.g., brushing teeth, making bed) and 10 is\"\n", - " + \" extremely poignant (e.g., a break up, college\"\n", - " + \" acceptance), rate the likely poignancy of the\"\n", - " + \" following piece of memory. Respond with a single integer.\"\n", - " + \"\\nMemory: {memory_content}\"\n", - " + \"\\nRating: \"\n", - " )\n", - " chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", - " score = chain.run(memory_content=memory_content).strip()\n", - " match = re.search(r\"^\\D*(\\d+)\", score)\n", - " if match:\n", - " return (float(score[0]) / 10) * weight\n", - " else:\n", - " return 0.0\n", - "\n", - "\n", - " def add_memory(self, memory_content: str) -> List[str]:\n", - " \"\"\"Add an observation or memory to the agent's memory.\"\"\"\n", - " importance_score = self._score_memory_importance(memory_content)\n", - " self.memory_importance += importance_score\n", - " document = Document(page_content=memory_content, metadata={\"importance\": importance_score})\n", - " result = self.memory_retriever.add_documents([document])\n", - "\n", - " # After an agent has processed a certain amount of memories (as measured by\n", - " # aggregate importance), it is time to reflect on recent events to add\n", - " # more synthesized memories to the agent's memory stream.\n", - " if (self.reflection_threshold is not None \n", - " and self.memory_importance > self.reflection_threshold\n", - " and self.status != \"Reflecting\"):\n", - " old_status = self.status\n", - " self.status = \"Reflecting\"\n", - " self.pause_to_reflect()\n", - " # Hack to clear the importance from reflection\n", - " self.memory_importance = 0.0\n", - " self.status = old_status\n", - " return result\n", - " \n", - " def fetch_memories(self, observation: str) -> List[Document]:\n", - " \"\"\"Fetch related memories.\"\"\"\n", - " return self.memory_retriever.get_relevant_documents(observation)\n", - " \n", - " \n", - " def get_summary(self, force_refresh: bool = False) -> str:\n", - " \"\"\"Return a descriptive summary of the agent.\"\"\"\n", - " current_time = datetime.now()\n", - " since_refresh = (current_time - self.last_refreshed).seconds\n", - " if not self.summary or since_refresh >= self.summary_refresh_seconds or force_refresh:\n", - " self.summary = self._compute_agent_summary()\n", - " self.last_refreshed = current_time\n", - " return (\n", - " f\"Name: {self.name} (age: {self.age})\"\n", - " +f\"\\nInnate traits: {self.traits}\"\n", - " +f\"\\n{self.summary}\"\n", - " )\n", - " \n", - " def get_full_header(self, force_refresh: bool = False) -> str:\n", - " \"\"\"Return a full header of the agent's status, summary, and current time.\"\"\"\n", - " summary = self.get_summary(force_refresh=force_refresh)\n", - " current_time_str = datetime.now().strftime(\"%B %d, %Y, %I:%M %p\")\n", - " return f\"{summary}\\nIt is {current_time_str}.\\n{self.name}'s status: {self.status}\"\n", - "\n", - " \n", - " \n", - " def _get_entity_from_observation(self, observation: str) -> str:\n", - " prompt = PromptTemplate.from_template(\n", - " \"What is the observed entity in the following observation? {observation}\"\n", - " +\"\\nEntity=\"\n", - " )\n", - " chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", - " return chain.run(observation=observation).strip()\n", - "\n", - " def _get_entity_action(self, observation: str, entity_name: str) -> str:\n", - " prompt = PromptTemplate.from_template(\n", - " \"What is the {entity} doing in the following observation? {observation}\"\n", - " +\"\\nThe {entity} is\"\n", - " )\n", - " chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", - " return chain.run(entity=entity_name, observation=observation).strip()\n", - " \n", - " def _format_memories_to_summarize(self, relevant_memories: List[Document]) -> str:\n", - " content_strs = set()\n", - " content = []\n", - " for mem in relevant_memories:\n", - " if mem.page_content in content_strs:\n", - " continue\n", - " content_strs.add(mem.page_content)\n", - " created_time = mem.metadata[\"created_at\"].strftime(\"%B %d, %Y, %I:%M %p\")\n", - " content.append(f\"- {created_time}: {mem.page_content.strip()}\")\n", - " return \"\\n\".join([f\"{mem}\" for mem in content])\n", - " \n", - " def summarize_related_memories(self, observation: str) -> str:\n", - " \"\"\"Summarize memories that are most relevant to an observation.\"\"\"\n", - " entity_name = self._get_entity_from_observation(observation)\n", - " entity_action = self._get_entity_action(observation, entity_name)\n", - " q1 = f\"What is the relationship between {self.name} and {entity_name}\"\n", - " relevant_memories = self.fetch_memories(q1) # Fetch memories related to the agent's relationship with the entity\n", - " q2 = f\"{entity_name} is {entity_action}\"\n", - " relevant_memories += self.fetch_memories(q2) # Fetch things related to the entity-action pair\n", - " context_str = self._format_memories_to_summarize(relevant_memories)\n", - " prompt = PromptTemplate.from_template(\n", - " \"{q1}?\\nContext from memory:\\n{context_str}\\nRelevant context: \"\n", - " )\n", - " chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", - " return chain.run(q1=q1, context_str=context_str.strip()).strip()\n", - " \n", - " def _get_memories_until_limit(self, consumed_tokens: int) -> str:\n", - " \"\"\"Reduce the number of tokens in the documents.\"\"\"\n", - " result = []\n", - " for doc in self.memory_retriever.memory_stream[::-1]:\n", - " if consumed_tokens >= self.max_tokens_limit:\n", - " break\n", - " consumed_tokens += self.llm.get_num_tokens(doc.page_content)\n", - " if consumed_tokens < self.max_tokens_limit:\n", - " result.append(doc.page_content) \n", - " return \"; \".join(result[::-1])\n", - " \n", - " def _generate_reaction(\n", - " self,\n", - " observation: str,\n", - " suffix: str\n", - " ) -> str:\n", - " \"\"\"React to a given observation.\"\"\"\n", - " prompt = PromptTemplate.from_template(\n", - " \"{agent_summary_description}\"\n", - " +\"\\nIt is {current_time}.\"\n", - " +\"\\n{agent_name}'s status: {agent_status}\"\n", - " + \"\\nSummary of relevant context from {agent_name}'s memory:\"\n", - " +\"\\n{relevant_memories}\"\n", - " +\"\\nMost recent observations: {recent_observations}\"\n", - " + \"\\nObservation: {observation}\"\n", - " + \"\\n\\n\" + suffix\n", - " )\n", - " agent_summary_description = self.get_summary()\n", - " relevant_memories_str = self.summarize_related_memories(observation)\n", - " current_time_str = datetime.now().strftime(\"%B %d, %Y, %I:%M %p\")\n", - " kwargs = dict(agent_summary_description=agent_summary_description,\n", - " current_time=current_time_str,\n", - " relevant_memories=relevant_memories_str,\n", - " agent_name=self.name,\n", - " observation=observation,\n", - " agent_status=self.status)\n", - " consumed_tokens = self.llm.get_num_tokens(prompt.format(recent_observations=\"\", **kwargs))\n", - " kwargs[\"recent_observations\"] = self._get_memories_until_limit(consumed_tokens)\n", - " action_prediction_chain = LLMChain(llm=self.llm, prompt=prompt)\n", - " result = action_prediction_chain.run(**kwargs)\n", - " return result.strip()\n", - " \n", - " def generate_reaction(self, observation: str) -> Tuple[bool, str]:\n", - " \"\"\"React to a given observation.\"\"\"\n", - " call_to_action_template = (\n", - " \"Should {agent_name} react to the observation, and if so,\"\n", - " +\" what would be an appropriate reaction? Respond in one line.\"\n", - " +' If the action is to engage in dialogue, write:\\nSAY: \"what to say\"'\n", - " +\"\\notherwise, write:\\nREACT: {agent_name}'s reaction (if anything).\"\n", - " + \"\\nEither do nothing, react, or say something but not both.\\n\\n\"\n", - " )\n", - " full_result = self._generate_reaction(observation, call_to_action_template)\n", - " result = full_result.strip().split('\\n')[0]\n", - " self.add_memory(f\"{self.name} observed {observation} and reacted by {result}\")\n", - " if \"REACT:\" in result:\n", - " reaction = result.split(\"REACT:\")[-1].strip()\n", - " return False, f\"{self.name} {reaction}\"\n", - " if \"SAY:\" in result:\n", - " said_value = result.split(\"SAY:\")[-1].strip()\n", - " return True, f\"{self.name} said {said_value}\"\n", - " else:\n", - " return False, result\n", - "\n", - " def generate_dialogue_response(self, observation: str) -> Tuple[bool, str]:\n", - " \"\"\"React to a given observation.\"\"\"\n", - " call_to_action_template = (\n", - " 'What would {agent_name} say? To end the conversation, write: GOODBYE: \"what to say\". Otherwise to continue the conversation, write: SAY: \"what to say next\"\\n\\n'\n", - " )\n", - " full_result = self._generate_reaction(observation, call_to_action_template)\n", - " result = full_result.strip().split('\\n')[0]\n", - " if \"GOODBYE:\" in result:\n", - " farewell = result.split(\"GOODBYE:\")[-1].strip()\n", - " self.add_memory(f\"{self.name} observed {observation} and said {farewell}\")\n", - " return False, f\"{self.name} said {farewell}\"\n", - " if \"SAY:\" in result:\n", - " response_text = result.split(\"SAY:\")[-1].strip()\n", - " self.add_memory(f\"{self.name} observed {observation} and said {response_text}\")\n", - " return True, f\"{self.name} said {response_text}\"\n", - " else:\n", - " return False, result" + "from langchain.experimental.generative_agents import GenerativeAgent, GenerativeAgentMemory" ] }, { @@ -393,7 +114,7 @@ "source": [ "## Memory Lifecycle\n", "\n", - "Summarizing the above key methods: `add_memory` and `summarize_related_memories`.\n", + "Summarizing the key methods in the above: `add_memory` and `summarize_related_memories`.\n", "\n", "When an agent makes an observation, it stores the memory:\n", " \n", @@ -421,7 +142,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "id": "ee9c1a1d-c311-4f1c-8131-75fccd9025b1", "metadata": { "tags": [] @@ -454,29 +175,33 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "id": "7884f9dd-c597-4c27-8c77-1402c71bc2f8", "metadata": { "tags": [] }, "outputs": [], "source": [ + "tommies_memory = GenerativeAgentMemory(\n", + " llm=LLM,\n", + " memory_retriever=create_new_memory_retriever(),\n", + " verbose=False,\n", + " reflection_threshold=8 # we will give this a relatively low number to show how reflection works\n", + ")\n", + "\n", "tommie = GenerativeAgent(name=\"Tommie\", \n", " age=25,\n", - " traits=\"anxious, likes design\", # You can add more persistent traits here \n", + " traits=\"anxious, likes design, talkative\", # You can add more persistent traits here \n", " status=\"looking for a job\", # When connected to a virtual world, we can have the characters update their status\n", " memory_retriever=create_new_memory_retriever(),\n", " llm=LLM,\n", - " daily_summaries = [\n", - " \"Drove across state to move to a new town but doesn't have a job yet.\"\n", - " ],\n", - " reflection_threshold = 8, # we will give this a relatively low number to show how reflection works\n", + " memory=tommies_memory\n", " )" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "id": "c524d529", "metadata": { "tags": [] @@ -487,8 +212,8 @@ "output_type": "stream", "text": [ "Name: Tommie (age: 25)\n", - "Innate traits: anxious, likes design\n", - "Unfortunately, there are no statements provided to summarize Tommie's core characteristics.\n" + "Innate traits: anxious, likes design, talkative\n", + "No statements were provided about Tommie's core characteristics.\n" ] } ], @@ -500,15 +225,15 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 9, "id": "4be60979-d56e-4abf-a636-b34ffa8b7fba", "metadata": { "tags": [] }, "outputs": [], "source": [ - "# We can give the character memories directly\n", - "tommie_memories = [\n", + "# We can add memories directly to the memory object\n", + "tommie_observations = [\n", " \"Tommie remembers his dog, Bruno, from when he was a kid\",\n", " \"Tommie feels tired from driving so far\",\n", " \"Tommie sees the new home\",\n", @@ -517,13 +242,13 @@ " \"Tommie is hungry\",\n", " \"Tommie tries to get some rest.\",\n", "]\n", - "for memory in tommie_memories:\n", - " tommie.add_memory(memory)" + "for observation in tommie_observations:\n", + " tommie.memory.add_memory(observation)" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 10, "id": "6992b48b-697f-4973-9560-142ef85357d7", "metadata": { "tags": [] @@ -534,8 +259,8 @@ "output_type": "stream", "text": [ "Name: Tommie (age: 25)\n", - "Innate traits: anxious, likes design\n", - "Tommie is observant, nostalgic, tired, and hungry.\n" + "Innate traits: anxious, likes design, talkative\n", + "Tommie is a tired and hungry person who is moving into a new home. He remembers his childhood dog and is aware of the new neighbors' cat. He is trying to get some rest despite the noisy road.\n" ] } ], @@ -559,7 +284,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 11, "id": "eaf125d8-f54c-4c5f-b6af-32789b1f7d3a", "metadata": { "tags": [] @@ -574,7 +299,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 12, "id": "54024d41-6e83-4914-91e5-73140e2dd9c8", "metadata": { "tags": [] @@ -583,10 +308,10 @@ { "data": { "text/plain": [ - "'Tommie said \"I really enjoy design, especially interior design. I find it calming and rewarding to create a space that is both functional and aesthetically pleasing. Unfortunately, I haven\\'t been able to find a job in that field yet.\"'" + "'Tommie said \"I really enjoy design and have been working on some projects in my free time. I\\'m also quite talkative and enjoy meeting new people. What about you?\"'" ] }, - "execution_count": 11, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } @@ -597,7 +322,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 13, "id": "71e2e8cc-921e-4816-82f1-66962b2c1055", "metadata": { "tags": [] @@ -606,10 +331,10 @@ { "data": { "text/plain": [ - "'Tommie said \"Well, I\\'m actually on the hunt for a job right now. I\\'m hoping to find something in the design field, but I\\'m open to exploring other options as well. How about you, what are your plans for the day?\"'" + "'Tommie said \"Well, today I\\'m mostly focused on getting settled into my new home. But once that\\'s taken care of, I\\'m looking forward to exploring the neighborhood and finding some new design inspiration. What about you?\"'" ] }, - "execution_count": 12, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } @@ -620,7 +345,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 14, "id": "a2521ffc-7050-4ac3-9a18-4cccfc798c31", "metadata": { "tags": [] @@ -629,10 +354,10 @@ { "data": { "text/plain": [ - "'Tommie said \"Honestly, I\\'m feeling pretty anxious about finding a job. It\\'s been a bit of a struggle and I\\'m not sure what my next step should be. But I\\'m trying to stay positive and keep pushing forward.\"'" + "'Tommie said \"Honestly, I\\'m a bit anxious about finding a job in this new area. But I\\'m trying to focus on settling in first and then I\\'ll start my job search. How about you?\"'" ] }, - "execution_count": 13, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" } @@ -651,7 +376,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 15, "id": "154dee3d-bfe0-4828-b963-ed7e885799b3", "metadata": { "tags": [] @@ -693,7 +418,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 16, "id": "238be49c-edb3-4e26-a2b6-98777ba8de86", "metadata": { "tags": [] @@ -703,40 +428,40 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001b[32mTommie wakes up to the sound of a noisy construction site outside his window.\u001b[0m Tommie Tommie groans and covers their head with a pillow, trying to block out the noise.\n", - "\u001b[32mTommie gets out of bed and heads to the kitchen to make himself some coffee.\u001b[0m Tommie Tommie starts making coffee, feeling grateful for the little bit of energy it will give him.\n", - "\u001b[32mTommie realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some.\u001b[0m Tommie Tommie sighs in frustration and continues to search for the coffee filters.\n", - "\u001b[32mTommie finally finds the filters and makes himself a cup of coffee.\u001b[0m Tommie Tommie takes a sip of the coffee and feels a little more awake.\n", - "\u001b[32mThe coffee tastes bitter, and Tommie regrets not buying a better brand.\u001b[0m Tommie Tommie grimaces at the taste of the coffee and decides to make a mental note to buy a better brand next time.\n", - "\u001b[32mTommie checks his email and sees that he has no job offers yet.\u001b[0m Tommie Tommie feels disappointed and discouraged, but tries to stay positive and continue the job search.\n", - "\u001b[32mTommie spends some time updating his resume and cover letter.\u001b[0m Tommie Tommie feels determined to keep working on his job search.\n", - "\u001b[32mTommie heads out to explore the city and look for job openings.\u001b[0m Tommie Tommie feels hopeful but also anxious as he heads out to explore the city and look for job openings.\n", - "\u001b[32mTommie sees a sign for a job fair and decides to attend.\u001b[0m Tommie said \"That job fair could be a great opportunity to meet potential employers.\"\n", - "\u001b[32mThe line to get in is long, and Tommie has to wait for an hour.\u001b[0m Tommie Tommie feels frustrated and restless while waiting in line.\n", - "\u001b[32mTommie meets several potential employers at the job fair but doesn't receive any offers.\u001b[0m Tommie Tommie feels disappointed but remains determined to keep searching for job openings.\n", - "\u001b[32mTommie leaves the job fair feeling disappointed.\u001b[0m Tommie Tommie feels discouraged but remains determined to keep searching for job openings.\n", - "\u001b[32mTommie stops by a local diner to grab some lunch.\u001b[0m Tommie Tommie feels relieved to take a break from job searching and enjoy a meal.\n", - "\u001b[32mThe service is slow, and Tommie has to wait for 30 minutes to get his food.\u001b[0m Tommie Tommie feels impatient and frustrated while waiting for his food.\n", - "\u001b[32mTommie overhears a conversation at the next table about a job opening.\u001b[0m Tommie said \"Excuse me, I couldn't help but overhear about the job opening. Could you tell me more about it?\"\n", - "\u001b[32mTommie asks the diners about the job opening and gets some information about the company.\u001b[0m Tommie said \"Could you tell me more about it?\"\n", - "\u001b[32mTommie decides to apply for the job and sends his resume and cover letter.\u001b[0m Tommie said \"Thank you for the information, I'll definitely apply for the job and keep my fingers crossed.\"\n", - "\u001b[32mTommie continues his search for job openings and drops off his resume at several local businesses.\u001b[0m Tommie Tommie feels hopeful but also anxious as he continues his search for job openings and drops off his resume at several local businesses.\n", - "\u001b[32mTommie takes a break from his job search to go for a walk in a nearby park.\u001b[0m Tommie Tommie takes a deep breath and enjoys the fresh air in the park.\n", - "\u001b[32mA dog approaches and licks Tommie's feet, and he pets it for a few minutes.\u001b[0m Tommie Tommie smiles and enjoys the momentary distraction from his job search.\n", + "\u001b[32mTommie wakes up to the sound of a noisy construction site outside his window.\u001b[0m Tommie groans and covers his head with a pillow to try and block out the noise.\n", + "\u001b[32mTommie gets out of bed and heads to the kitchen to make himself some coffee.\u001b[0m Tommie stretches his arms and yawns before making his way to the kitchen.\n", + "\u001b[32mTommie realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some.\u001b[0m Tommie sighs in frustration but continues to search through the boxes.\n", + "\u001b[32mTommie finally finds the filters and makes himself a cup of coffee.\u001b[0m Tommie takes a sip of the coffee and smiles, feeling a bit more awake and energized.\n", + "\u001b[32mThe coffee tastes bitter, and Tommie regrets not buying a better brand.\u001b[0m Tommie grimaces and sets down the coffee, disappointed in the taste.\n", + "\u001b[32mTommie checks his email and sees that he has no job offers yet.\u001b[0m Tommie Tommie's shoulders slump and he sighs, feeling discouraged.\n", + "\u001b[32mTommie spends some time updating his resume and cover letter.\u001b[0m Tommie nods to himself, feeling productive and hopeful.\n", + "\u001b[32mTommie heads out to explore the city and look for job openings.\u001b[0m Tommie said \"Do you have any recommendations for good places to look for job openings in the area?\"\n", + "\u001b[32mTommie sees a sign for a job fair and decides to attend.\u001b[0m Tommie said \"That job fair could be a great opportunity for me to network and find some job leads. Thanks for letting me know.\"\n", + "\u001b[32mThe line to get in is long, and Tommie has to wait for an hour.\u001b[0m Tommie sighs and looks around, feeling impatient and frustrated.\n", + "\u001b[32mTommie meets several potential employers at the job fair but doesn't receive any offers.\u001b[0m Tommie Tommie's shoulders slump and he sighs, feeling discouraged.\n", + "\u001b[32mTommie leaves the job fair feeling disappointed.\u001b[0m Tommie Tommie's shoulders slump and he sighs, feeling discouraged.\n", + "\u001b[32mTommie stops by a local diner to grab some lunch.\u001b[0m Tommie said \"Can I get a burger and fries to go, please?\"\n", + "\u001b[32mThe service is slow, and Tommie has to wait for 30 minutes to get his food.\u001b[0m Tommie sighs and looks at his phone, feeling impatient.\n", + "\u001b[32mTommie overhears a conversation at the next table about a job opening.\u001b[0m Tommie said \"Excuse me, I couldn't help but overhear your conversation about the job opening. Do you have any more information about it?\"\n", + "\u001b[32mTommie asks the diners about the job opening and gets some information about the company.\u001b[0m Tommie said \"Thank you for the information, I will definitely look into that company.\"\n", + "\u001b[32mTommie decides to apply for the job and sends his resume and cover letter.\u001b[0m Tommie nods to himself, feeling hopeful and motivated.\n", + "\u001b[32mTommie continues his search for job openings and drops off his resume at several local businesses.\u001b[0m Tommie nods to himself, feeling proactive and hopeful.\n", + "\u001b[32mTommie takes a break from his job search to go for a walk in a nearby park.\u001b[0m Tommie takes a deep breath of fresh air and feels a sense of calm.\n", + "\u001b[32mA dog approaches and licks Tommie's feet, and he pets it for a few minutes.\u001b[0m Tommie smiles and enjoys the moment of affection from the dog.\n", "****************************************\n", "\u001b[34mAfter 20 observations, Tommie's summary is:\n", "Name: Tommie (age: 25)\n", - "Innate traits: anxious, likes design\n", - "Tommie is a determined individual who is actively searching for job opportunities. He feels both hopeful and anxious about his search and remains positive despite facing disappointments. He takes breaks to rest and enjoy the little things in life, like going for a walk or grabbing a meal. Tommie is also open to asking for help and seeking information about potential job openings. He is grateful for the little things that give him energy and tries to stay positive even when faced with discouragement. Overall, Tommie's core characteristics include determination, positivity, and a willingness to seek help and take breaks when needed.\u001b[0m\n", + "Innate traits: anxious, likes design, talkative\n", + "Tommie is hopeful and proactive in his job search, but easily becomes discouraged when faced with setbacks. He enjoys spending time outdoors and interacting with animals. Tommie is also productive and enjoys updating his resume and cover letter. He is talkative, enjoys meeting new people, and has an interest in design. Tommie is also a coffee drinker and seeks advice from others on finding job openings.\u001b[0m\n", "****************************************\n", - "\u001b[32mTommie sees a group of people playing frisbee and decides to join in.\u001b[0m Tommie said \"Mind if I join in on the game?\"\n", - "\u001b[32mTommie has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose.\u001b[0m Tommie Tommie winces in pain and puts his hand to his nose to check for any bleeding.\n", - "\u001b[32mTommie goes back to his apartment to rest for a bit.\u001b[0m Tommie Tommie takes a deep breath and sits down to rest for a bit.\n", - "\u001b[32mA raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor.\u001b[0m Tommie Tommie sighs and grabs a broom to clean up the mess.\n", - "\u001b[32mTommie starts to feel frustrated with his job search.\u001b[0m Tommie Tommie takes a deep breath and reminds himself to stay positive and keep searching for job opportunities.\n", - "\u001b[32mTommie calls his best friend to vent about his struggles.\u001b[0m Tommie said \"Hey, can I vent to you for a bit about my job search struggles?\"\n", - "\u001b[32mTommie's friend offers some words of encouragement and tells him to keep trying.\u001b[0m Tommie said \"Thank you for the encouragement, it means a lot. I'll keep trying.\"\n", - "\u001b[32mTommie feels slightly better after talking to his friend.\u001b[0m Tommie said \"Thank you for your support, it really means a lot to me.\"\n" + "\u001b[32mTommie sees a group of people playing frisbee and decides to join in.\u001b[0m Do nothing.\n", + "\u001b[32mTommie has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose.\u001b[0m Tommie winces and touches his nose, feeling a bit of pain.\n", + "\u001b[32mTommie goes back to his apartment to rest for a bit.\u001b[0m Tommie takes a deep breath and sinks into his couch, feeling grateful for a moment of relaxation.\n", + "\u001b[32mA raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor.\u001b[0m Tommie sighs and grabs a broom and dustpan to clean up the mess.\n", + "\u001b[32mTommie starts to feel frustrated with his job search.\u001b[0m Tommie sighs and feels discouraged.\n", + "\u001b[32mTommie calls his best friend to vent about his struggles.\u001b[0m Tommie said \"Hey, can I vent to you for a bit about my job search? I'm feeling pretty discouraged.\"\n", + "\u001b[32mTommie's friend offers some words of encouragement and tells him to keep trying.\u001b[0m Tommie said \"Thank you for the encouragement, it means a lot to me.\"\n", + "\u001b[32mTommie feels slightly better after talking to his friend.\u001b[0m Tommie nods to himself, feeling grateful for the support from his friend.\n" ] } ], @@ -761,7 +486,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 17, "id": "6336ab5d-3074-4831-951f-c9e2cba5dfb5", "metadata": { "tags": [] @@ -770,10 +495,10 @@ { "data": { "text/plain": [ - "'Tommie said \"It\\'s been a bit of a rollercoaster, to be honest. I went to a job fair and met some potential employers, but didn\\'t get any offers. But then I overheard about a job opening at a diner and applied for it. I also took a break to go for a walk in the park and played frisbee with some people, which was a nice distraction. Overall, it\\'s been a bit frustrating, but I\\'m trying to stay positive and keep searching for job opportunities.\"'" + "'Tommie said \"Well, it\\'s been a bit of a mixed day. I\\'ve had some setbacks in my job search, but I also had some fun playing frisbee and spending time outdoors. How about you?\"'" ] }, - "execution_count": 16, + "execution_count": 17, "metadata": {}, "output_type": "execute_result" } @@ -784,7 +509,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 18, "id": "809ac906-69b7-4326-99ec-af638d32bb20", "metadata": { "tags": [] @@ -793,10 +518,10 @@ { "data": { "text/plain": [ - "'Tommie would say: \"I rely on coffee to give me a little boost, but I regret not buying a better brand lately. The taste has been pretty bitter. But overall, it\\'s not a huge factor in my life.\" '" + "'Tommie said \"I really enjoy coffee, it helps me feel more awake and energized. But sometimes I regret not buying a better brand and finding the taste bitter. How about you?\"'" ] }, - "execution_count": 17, + "execution_count": 18, "metadata": {}, "output_type": "execute_result" } @@ -807,7 +532,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 19, "id": "f733a431-19ea-421a-9101-ae2593a8c626", "metadata": { "tags": [] @@ -816,10 +541,10 @@ { "data": { "text/plain": [ - "'Tommie said \"Oh, I actually don\\'t have a childhood dog, but I do love animals. Have you had any pets?\"'" + "'Tommie said \"I actually didn\\'t have a childhood dog, but I\\'ve always loved animals. Do you have any pets?\"'" ] }, - "execution_count": 18, + "execution_count": 19, "metadata": {}, "output_type": "execute_result" } @@ -840,29 +565,36 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 20, "id": "ec8bbe18-a021-419c-bf1f-23d34732cd99", "metadata": { "tags": [] }, "outputs": [], "source": [ + "eves_memory = GenerativeAgentMemory(\n", + " llm=LLM,\n", + " memory_retriever=create_new_memory_retriever(),\n", + " verbose=False,\n", + " reflection_threshold=5\n", + ")\n", + "\n", + "\n", "eve = GenerativeAgent(name=\"Eve\", \n", " age=34, \n", " traits=\"curious, helpful\", # You can add more persistent traits here \n", " status=\"N/A\", # When connected to a virtual world, we can have the characters update their status\n", - " memory_retriever=create_new_memory_retriever(),\n", " llm=LLM,\n", " daily_summaries = [\n", " (\"Eve started her new job as a career counselor last week and received her first assignment, a client named Tommie.\")\n", " ],\n", - " reflection_threshold = 5,\n", + " memory=eves_memory\n", " )" ] }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 21, "id": "1e2745f5-e0da-4abd-98b4-830802ce6698", "metadata": { "tags": [] @@ -870,22 +602,21 @@ "outputs": [], "source": [ "yesterday = (datetime.now() - timedelta(days=1)).strftime(\"%A %B %d\")\n", - "eve_memories = [\n", + "eve_observations = [\n", " \"Eve overhears her colleague say something about a new client being hard to work with\",\n", " \"Eve wakes up and hear's the alarm\",\n", " \"Eve eats a boal of porridge\",\n", " \"Eve helps a coworker on a task\",\n", " \"Eve plays tennis with her friend Xu before going to work\",\n", " \"Eve overhears her colleague say something about Tommie being hard to work with\",\n", - " \n", "]\n", - "for memory in eve_memories:\n", - " eve.add_memory(memory)" + "for observation in eve_observations:\n", + " eve.memory.add_memory(observation)" ] }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 22, "id": "de4726e3-4bb1-47da-8fd9-f317a036fe0f", "metadata": { "tags": [] @@ -897,7 +628,7 @@ "text": [ "Name: Eve (age: 34)\n", "Innate traits: curious, helpful\n", - "Eve is helpful, active, eats breakfast, is attentive to her surroundings, and works with colleagues.\n" + "Eve is a helpful and active person who enjoys playing tennis, maintaining a healthy diet, and staying aware of her surroundings. She is a responsible employee who is attentive to her coworkers' comments and willing to assist them with tasks.\n" ] } ], @@ -918,7 +649,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 23, "id": "6cda916d-800c-47bc-a7f9-6a2f19187472", "metadata": { "tags": [] @@ -927,10 +658,10 @@ { "data": { "text/plain": [ - "'Eve said \"I\\'m feeling curious about what\\'s on the agenda for today. Anything special we should be aware of?\"'" + "'Eve said \"I\\'m feeling pretty good, thanks for asking! How about you?\"'" ] }, - "execution_count": 22, + "execution_count": 23, "metadata": {}, "output_type": "execute_result" } @@ -941,7 +672,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 24, "id": "448ae644-0a66-4eb2-a03a-319f36948b37", "metadata": { "tags": [] @@ -950,10 +681,10 @@ { "data": { "text/plain": [ - "'Eve said \"I overheard someone say Tommie is hard to work with. Is there something I can help with?\"'" + "'Eve said \"I don\\'t know much about Tommie, why do you ask?\"'" ] }, - "execution_count": 23, + "execution_count": 24, "metadata": {}, "output_type": "execute_result" } @@ -964,7 +695,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 25, "id": "493fc5b8-8730-4ef8-9820-0f1769ce1691", "metadata": { "tags": [] @@ -973,10 +704,10 @@ { "data": { "text/plain": [ - "'Eve said \"Oh, I didn\\'t realize Tommie was looking for a new job. Is there anything I can do to help? Maybe I could introduce him to some people in my network or help him with his resume.\"'" + "'Eve said \"That\\'s interesting. I don\\'t know much about Tommie, but if I had the chance, I would ask him about his previous work experience and what kind of job he\\'s looking for. What about you, what would you ask him?\"'" ] }, - "execution_count": 24, + "execution_count": 25, "metadata": {}, "output_type": "execute_result" } @@ -987,7 +718,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 26, "id": "4b46452a-6c54-4db2-9d87-18597f70fec8", "metadata": { "tags": [] @@ -996,10 +727,10 @@ { "data": { "text/plain": [ - "'Eve said \"Sure, I can definitely help keep the conversation going and ask him plenty of questions. Is there anything specific you would like me to ask him about his skills or experience? I want to make sure the conversation is productive.\"'" + "'Eve said \"Sure, I can definitely ask him a lot of questions to keep the conversation going. Thanks for the heads up about his anxiety.\"'" ] }, - "execution_count": 25, + "execution_count": 26, "metadata": {}, "output_type": "execute_result" } @@ -1020,7 +751,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 27, "id": "042ea271-4bf1-4247-9082-239a6fea43b8", "metadata": { "tags": [] @@ -1047,7 +778,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 28, "id": "d5462b14-218e-4d85-b035-df57ea8e0f80", "metadata": { "tags": [] @@ -1057,25 +788,23 @@ "name": "stdout", "output_type": "stream", "text": [ - "Eve said \"Of course, Tommie! I'm happy to help in any way I can. What specifically would you like advice on?\"\n", - "Tommie said \"Thank you, Eve. I was actually wondering if you knew of any job openings in the design field? That's my area of interest and expertise.\"\n", - "Eve said \"Sure, Tommie! I can definitely keep an eye out for any job openings in the design field and let you know if I come across anything. Have you updated your resume and portfolio recently? I could take a look and provide some feedback if you'd like.\"\n", - "Tommie said \"Thank you so much, Eve! That would be incredibly helpful. And yes, I have updated my resume and portfolio recently, but I would love any feedback you can provide. I really appreciate your support and willingness to help.\"\n", - "Eve said \"Great, Tommie! I'll take a look at your resume and portfolio and provide you with some feedback. Do you have any specific companies or types of design jobs that you're interested in? I might be able to provide more targeted advice and resources based on that information.\"\n", - "Tommie said \"Thank you, Eve! I'm primarily interested in graphic design and UI/UX design positions, but I'm open to other opportunities as well. I appreciate any advice or resources you can provide. Thanks again for your help.\"\n", - "Eve said \"Great, Tommie! Those are both really interesting fields. I'll definitely keep an eye out for any job openings or resources related to graphic design and UI/UX design. In the meantime, I can take a look at your resume and portfolio and provide you with some feedback. Would you like me to email you my feedback or would you prefer to discuss it in person?\"\n", - "Tommie said \"Thank you, Eve! I would really appreciate discussing your feedback in person. When would be a good time for us to meet?\"\n", - "Eve said \"Sure, Tommie! How about we schedule a time to meet next week? I have some availability on Tuesday or Thursday afternoon. What works best for you?\"\n", - "Tommie said \"Thank you, Eve! Tuesday afternoon works better for me. How about 2 PM?\"\n", - "Eve said \"Great, Tommie! Tuesday at 2 PM works for me as well. Let's plan to meet then. In the meantime, if you have any questions or if there's anything else I can help with, please don't hesitate to reach out. Good luck with your job search!\"\n", - "Tommie said \"Thank you so much, Eve! I really appreciate your support and willingness to help. I'll see you next Tuesday at 2 PM. Have a great day!\"\n", - "Eve said \"You're welcome, Tommie! Looking forward to meeting with you on Tuesday. Have a great day and good luck with your job search!\"\n" + "Eve said \"Of course, Tommie. I'd be happy to share about my experience. What specific questions do you have?\"\n", + "Tommie said \"Thank you, Eve. I'm curious about what strategies you used in your own job search. Did you have any specific tactics that helped you stand out to employers?\"\n", + "Eve said \"Sure, Tommie. I found that networking and reaching out to professionals in my field was really helpful. I also made sure to tailor my resume and cover letter to each job I applied to. Do you have any specific questions about those strategies?\"\n", + "Tommie said \"Thank you, Eve. That's really helpful advice. Did you have any specific ways of networking that worked well for you?\"\n", + "Eve said \"Sure, Tommie. I found that attending industry events and connecting with professionals on LinkedIn were both great ways to network. Do you have any specific questions about those tactics?\"\n", + "Tommie said \"That's really helpful, thank you for sharing. Did you find that you were able to make meaningful connections through LinkedIn?\"\n", + "Eve said \"Yes, definitely. I was able to connect with several professionals in my field and even landed a job through a LinkedIn connection. Have you had any luck with networking on LinkedIn?\"\n", + "Tommie said \"That's really impressive! I haven't had much luck yet, but I'll definitely keep trying. Thank you for the advice, Eve.\"\n", + "Eve said \"Glad I could help, Tommie. Is there anything else you want to know?\"\n", + "Tommie said \"Thanks again, Eve. I really appreciate your advice and I'll definitely put it into practice. Have a great day!\"\n", + "Eve said \"You're welcome, Tommie! Don't hesitate to reach out if you have any more questions. Have a great day too!\"\n" ] } ], "source": [ "agents = [tommie, eve]\n", - "run_conversation(agents, \"Tommie said: Hi, Eve. Thanks for agreeing to share your story with me and give me advice. I have a bunch of questions.\")" + "run_conversation(agents, \"Tommie said: Hi, Eve. Thanks for agreeing to meet with me today. I have a bunch of questions and am not sure where to start. Maybe you could first share about your experience?\")" ] }, { @@ -1092,7 +821,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 29, "id": "c4d252f3-fcc1-474c-846e-a7605a6b4ce7", "metadata": { "tags": [] @@ -1103,8 +832,8 @@ "output_type": "stream", "text": [ "Name: Tommie (age: 25)\n", - "Innate traits: anxious, likes design\n", - "Tommie is a determined person who is actively searching for job opportunities. He feels both hopeful and anxious about his job search, and remains persistent despite facing disappointment and discouragement. He seeks support from friends and takes breaks to recharge. He tries to stay positive and continues to work on improving his resume and cover letter. He also values the importance of self-care and takes breaks to rest and enjoy nature.\n" + "Innate traits: anxious, likes design, talkative\n", + "Tommie is a hopeful and proactive individual who is searching for a job. He becomes discouraged when he doesn't receive any offers or positive responses, but he tries to stay productive and calm by updating his resume, going for walks, and talking to friends for support. He is also grateful for any encouragement he receives and is motivated to continue his job search. Additionally, he has a fond memory of his childhood pet and enjoys taking breaks to relax.\n" ] } ], @@ -1116,7 +845,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 30, "id": "c04db9a4", "metadata": { "tags": [] @@ -1128,7 +857,7 @@ "text": [ "Name: Eve (age: 34)\n", "Innate traits: curious, helpful\n", - "Eve is a helpful and proactive coworker who values relationships and communication. She is attentive to her colleagues' needs and willing to offer support and assistance. She is also curious and interested in learning more about her work and the people around her. Overall, Eve demonstrates a strong sense of empathy and collaboration in her interactions with others.\n" + "Eve is a helpful and friendly coworker who enjoys playing tennis and eating breakfast. She is attentive and observant, often overhearing conversations around her. She is also proactive and willing to offer advice and assistance to colleagues, particularly in job searching and networking. She is considerate of others' feelings and strives to keep conversations going to make others feel comfortable.\n" ] } ], @@ -1136,33 +865,10 @@ "print(eve.get_summary(force_refresh=True))" ] }, - { - "cell_type": "code", - "execution_count": 30, - "id": "71762558-8fb6-44d7-8483-f5b47fb2a862", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "'Tommie said \"It was really helpful! Eve offered to provide feedback on my resume and portfolio, and she\\'s going to keep an eye out for job openings in the design field. We\\'re planning to meet next Tuesday to discuss her feedback. Thanks for asking!\"'" - ] - }, - "execution_count": 30, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "interview_agent(tommie, \"How was your conversation with Eve?\")" - ] - }, { "cell_type": "code", "execution_count": 31, - "id": "085af3d8-ac21-41ea-8f8b-055c56976a67", + "id": "71762558-8fb6-44d7-8483-f5b47fb2a862", "metadata": { "tags": [] }, @@ -1170,7 +876,7 @@ { "data": { "text/plain": [ - "'Eve said \"It was really productive! Tommie is interested in graphic design and UI/UX design positions, so I\\'m going to keep an eye out for any job openings or resources related to those fields. I\\'m also going to provide him with some feedback on his resume and portfolio. We\\'re scheduled to meet next Tuesday at 2 PM to discuss everything in person. Is there anything else you would like me to ask him or anything else I can do to help?\".'" + "'Tommie said \"It was really helpful actually! Eve gave me some great advice on job search strategies and networking. Have you ever tried networking on LinkedIn?\"'" ] }, "execution_count": 31, @@ -1179,13 +885,13 @@ } ], "source": [ - "interview_agent(eve, \"How was your conversation with Tommie?\")" + "interview_agent(tommie, \"How was your conversation with Eve?\")" ] }, { "cell_type": "code", "execution_count": 32, - "id": "5b439f3c-7849-4432-a697-2bcc85b89dae", + "id": "085af3d8-ac21-41ea-8f8b-055c56976a67", "metadata": { "tags": [] }, @@ -1193,7 +899,7 @@ { "data": { "text/plain": [ - "'Eve said \"I feel like I covered everything I wanted to with Tommie, but thank you for asking! If there\\'s anything else that comes up or if you have any further questions, please let me know.\"'" + "'Eve said \"It was great, thanks for asking! Tommie had some really insightful questions about job searching and networking, and I was happy to offer my advice. How about you, have you had a chance to speak with Tommie recently?\"'" ] }, "execution_count": 32, @@ -1202,13 +908,13 @@ } ], "source": [ - "interview_agent(eve, \"What do you wish you would have said to Tommie?\")" + "interview_agent(eve, \"How was your conversation with Tommie?\")" ] }, { "cell_type": "code", "execution_count": 33, - "id": "526e8863-8b32-4216-8e61-2dfe82e3fb47", + "id": "5b439f3c-7849-4432-a697-2bcc85b89dae", "metadata": { "tags": [] }, @@ -1216,7 +922,7 @@ { "data": { "text/plain": [ - "'Tommie said \"Oh, I actually forgot to buy coffee filters yesterday, so I couldn\\'t make coffee this morning. But I\\'m planning to grab some later today. Thanks for asking!\"'" + "'Eve said \"Well, I think I covered most of the topics Tommie was interested in, but if I had to add one thing, it would be to make sure to follow up with any connections you make during your job search. It\\'s important to maintain those relationships and keep them updated on your progress. Did you have any other questions, Person A?\"'" ] }, "execution_count": 33, @@ -1225,7 +931,7 @@ } ], "source": [ - "interview_agent(tommie, \"What happened with your coffee this morning?\")" + "interview_agent(eve, \"What do you wish you would have said to Tommie?\")" ] }, { diff --git a/docs/use_cases/agents/characters.ipynb b/docs/use_cases/agents/characters.ipynb deleted file mode 100644 index 8de85cdf..00000000 --- a/docs/use_cases/agents/characters.ipynb +++ /dev/null @@ -1,1261 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "e9732067-71c7-46f7-ad09-381b3bf21a27", - "metadata": {}, - "source": [ - "# Generative Agents in LangChain\n", - "\n", - "This notebook implements a generative agent based on the paper [Generative Agents: Interactive Simulacra of Human Behavior](https://arxiv.org/abs/2304.03442) by Park, et. al.\n", - "\n", - "In it, we leverage a time-weighted Memory object backed by a LangChain Retriever." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "53f81c37-db45-4fdc-843c-aa8fd2a9e99d", - "metadata": {}, - "outputs": [], - "source": [ - "# Use termcolor to make it easy to colorize the outputs.\n", - "!pip install termcolor > /dev/null" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "8851c370-b395-4b80-a79d-486a38ffc244", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import re\n", - "from datetime import datetime, timedelta\n", - "from typing import List, Optional, Tuple\n", - "from termcolor import colored\n", - "\n", - "from pydantic import BaseModel, Field\n", - "\n", - "from langchain import LLMChain\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.docstore import InMemoryDocstore\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.prompts import PromptTemplate\n", - "from langchain.retrievers import TimeWeightedVectorStoreRetriever\n", - "from langchain.schema import BaseLanguageModel, Document\n", - "from langchain.vectorstores import FAISS\n" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "81824e76", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "USER_NAME = \"Person A\" # The name you want to use when interviewing the agent.\n", - "LLM = ChatOpenAI(max_tokens=1500) # Can be any LLM you want." - ] - }, - { - "cell_type": "markdown", - "id": "c3da1649-d88f-4973-b655-7042975cde7e", - "metadata": {}, - "source": [ - "### Generative Agent Memory Components\n", - "\n", - "This tutorial highlights the memory of generative agents and its impact on their behavior. The memory varies from standard LangChain Chat memory in two aspects:\n", - "\n", - "1. **Memory Formation**\n", - "\n", - " Generative Agents have extended memories, stored in a single stream:\n", - " 1. Observations - from dialogues or interactions with the virtual world, about self or others\n", - " 2. Reflections - resurfaced and summarized core memories\n", - "\n", - "2. **Memory Recall**\n", - "\n", - " Memories are retrieved using a weighted sum of salience, recency, and importance.\n", - "\n", - "Review the definition below, focusing on `add_memory` and `summarize_related_memories` methods." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "043e5203-6a41-431c-9efa-3e1743d7d25a", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "class GenerativeAgent(BaseModel):\n", - " \"\"\"A character with memory and innate characteristics.\"\"\"\n", - " \n", - " name: str\n", - " age: int\n", - " traits: str\n", - " \"\"\"The traits of the character you wish not to change.\"\"\"\n", - " status: str\n", - " \"\"\"Current activities of the character.\"\"\"\n", - " llm: BaseLanguageModel\n", - " memory_retriever: TimeWeightedVectorStoreRetriever\n", - " \"\"\"The retriever to fetch related memories.\"\"\"\n", - " verbose: bool = False\n", - " \n", - " reflection_threshold: Optional[float] = None\n", - " \"\"\"When the total 'importance' of memories exceeds the above threshold, stop to reflect.\"\"\"\n", - " \n", - " current_plan: List[str] = []\n", - " \"\"\"The current plan of the agent.\"\"\"\n", - " \n", - " summary: str = \"\" #: :meta private:\n", - " summary_refresh_seconds: int= 3600 #: :meta private:\n", - " last_refreshed: datetime =Field(default_factory=datetime.now) #: :meta private:\n", - " daily_summaries: List[str] #: :meta private:\n", - " memory_importance: float = 0.0 #: :meta private:\n", - " max_tokens_limit: int = 1200 #: :meta private:\n", - " \n", - " class Config:\n", - " \"\"\"Configuration for this pydantic object.\"\"\"\n", - "\n", - " arbitrary_types_allowed = True\n", - "\n", - " @staticmethod\n", - " def _parse_list(text: str) -> List[str]:\n", - " \"\"\"Parse a newline-separated string into a list of strings.\"\"\"\n", - " lines = re.split(r'\\n', text.strip())\n", - " return [re.sub(r'^\\s*\\d+\\.\\s*', '', line).strip() for line in lines]\n", - "\n", - "\n", - " def _compute_agent_summary(self):\n", - " \"\"\"\"\"\"\n", - " prompt = PromptTemplate.from_template(\n", - " \"How would you summarize {name}'s core characteristics given the\"\n", - " +\" following statements:\\n\"\n", - " +\"{related_memories}\"\n", - " + \"Do not embellish.\"\n", - " +\"\\n\\nSummary: \"\n", - " )\n", - " # The agent seeks to think about their core characteristics.\n", - " relevant_memories = self.fetch_memories(f\"{self.name}'s core characteristics\")\n", - " relevant_memories_str = \"\\n\".join([f\"{mem.page_content}\" for mem in relevant_memories])\n", - " chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", - " return chain.run(name=self.name, related_memories=relevant_memories_str).strip()\n", - " \n", - " def _get_topics_of_reflection(self, last_k: int = 50) -> Tuple[str, str, str]:\n", - " \"\"\"Return the 3 most salient high-level questions about recent observations.\"\"\"\n", - " prompt = PromptTemplate.from_template(\n", - " \"{observations}\\n\\n\"\n", - " + \"Given only the information above, what are the 3 most salient\"\n", - " + \" high-level questions we can answer about the subjects in the statements?\"\n", - " + \" Provide each question on a new line.\\n\\n\"\n", - " )\n", - " reflection_chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", - " observations = self.memory_retriever.memory_stream[-last_k:]\n", - " observation_str = \"\\n\".join([o.page_content for o in observations])\n", - " result = reflection_chain.run(observations=observation_str)\n", - " return self._parse_list(result)\n", - " \n", - " def _get_insights_on_topic(self, topic: str) -> List[str]:\n", - " \"\"\"Generate 'insights' on a topic of reflection, based on pertinent memories.\"\"\"\n", - " prompt = PromptTemplate.from_template(\n", - " \"Statements about {topic}\\n\"\n", - " +\"{related_statements}\\n\\n\"\n", - " + \"What 5 high-level insights can you infer from the above statements?\"\n", - " + \" (example format: insight (because of 1, 5, 3))\"\n", - " )\n", - " related_memories = self.fetch_memories(topic)\n", - " related_statements = \"\\n\".join([f\"{i+1}. {memory.page_content}\" \n", - " for i, memory in \n", - " enumerate(related_memories)])\n", - " reflection_chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", - " result = reflection_chain.run(topic=topic, related_statements=related_statements)\n", - " # TODO: Parse the connections between memories and insights\n", - " return self._parse_list(result)\n", - " \n", - " def pause_to_reflect(self) -> List[str]:\n", - " \"\"\"Reflect on recent observations and generate 'insights'.\"\"\"\n", - " print(colored(f\"Character {self.name} is reflecting\", \"blue\"))\n", - " new_insights = []\n", - " topics = self._get_topics_of_reflection()\n", - " for topic in topics:\n", - " insights = self._get_insights_on_topic( topic)\n", - " for insight in insights:\n", - " self.add_memory(insight)\n", - " new_insights.extend(insights)\n", - " return new_insights\n", - " \n", - " def _score_memory_importance(self, memory_content: str, weight: float = 0.15) -> float:\n", - " \"\"\"Score the absolute importance of the given memory.\"\"\"\n", - " # A weight of 0.25 makes this less important than it\n", - " # would be otherwise, relative to salience and time\n", - " prompt = PromptTemplate.from_template(\n", - " \"On the scale of 1 to 10, where 1 is purely mundane\"\n", - " +\" (e.g., brushing teeth, making bed) and 10 is\"\n", - " + \" extremely poignant (e.g., a break up, college\"\n", - " + \" acceptance), rate the likely poignancy of the\"\n", - " + \" following piece of memory. Respond with a single integer.\"\n", - " + \"\\nMemory: {memory_content}\"\n", - " + \"\\nRating: \"\n", - " )\n", - " chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", - " score = chain.run(memory_content=memory_content).strip()\n", - " match = re.search(r\"^\\D*(\\d+)\", score)\n", - " if match:\n", - " return (float(score[0]) / 10) * weight\n", - " else:\n", - " return 0.0\n", - "\n", - "\n", - " def add_memory(self, memory_content: str) -> List[str]:\n", - " \"\"\"Add an observation or memory to the agent's memory.\"\"\"\n", - " importance_score = self._score_memory_importance(memory_content)\n", - " self.memory_importance += importance_score\n", - " document = Document(page_content=memory_content, metadata={\"importance\": importance_score})\n", - " result = self.memory_retriever.add_documents([document])\n", - "\n", - " # After an agent has processed a certain amount of memories (as measured by\n", - " # aggregate importance), it is time to reflect on recent events to add\n", - " # more synthesized memories to the agent's memory stream.\n", - " if (self.reflection_threshold is not None \n", - " and self.memory_importance > self.reflection_threshold\n", - " and self.status != \"Reflecting\"):\n", - " old_status = self.status\n", - " self.status = \"Reflecting\"\n", - " self.pause_to_reflect()\n", - " # Hack to clear the importance from reflection\n", - " self.memory_importance = 0.0\n", - " self.status = old_status\n", - " return result\n", - " \n", - " def fetch_memories(self, observation: str) -> List[Document]:\n", - " \"\"\"Fetch related memories.\"\"\"\n", - " return self.memory_retriever.get_relevant_documents(observation)\n", - " \n", - " \n", - " def get_summary(self, force_refresh: bool = False) -> str:\n", - " \"\"\"Return a descriptive summary of the agent.\"\"\"\n", - " current_time = datetime.now()\n", - " since_refresh = (current_time - self.last_refreshed).seconds\n", - " if not self.summary or since_refresh >= self.summary_refresh_seconds or force_refresh:\n", - " self.summary = self._compute_agent_summary()\n", - " self.last_refreshed = current_time\n", - " return (\n", - " f\"Name: {self.name} (age: {self.age})\"\n", - " +f\"\\nInnate traits: {self.traits}\"\n", - " +f\"\\n{self.summary}\"\n", - " )\n", - " \n", - " def get_full_header(self, force_refresh: bool = False) -> str:\n", - " \"\"\"Return a full header of the agent's status, summary, and current time.\"\"\"\n", - " summary = self.get_summary(force_refresh=force_refresh)\n", - " current_time_str = datetime.now().strftime(\"%B %d, %Y, %I:%M %p\")\n", - " return f\"{summary}\\nIt is {current_time_str}.\\n{self.name}'s status: {self.status}\"\n", - "\n", - " \n", - " \n", - " def _get_entity_from_observation(self, observation: str) -> str:\n", - " prompt = PromptTemplate.from_template(\n", - " \"What is the observed entity in the following observation? {observation}\"\n", - " +\"\\nEntity=\"\n", - " )\n", - " chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", - " return chain.run(observation=observation).strip()\n", - "\n", - " def _get_entity_action(self, observation: str, entity_name: str) -> str:\n", - " prompt = PromptTemplate.from_template(\n", - " \"What is the {entity} doing in the following observation? {observation}\"\n", - " +\"\\nThe {entity} is\"\n", - " )\n", - " chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", - " return chain.run(entity=entity_name, observation=observation).strip()\n", - " \n", - " def _format_memories_to_summarize(self, relevant_memories: List[Document]) -> str:\n", - " content_strs = set()\n", - " content = []\n", - " for mem in relevant_memories:\n", - " if mem.page_content in content_strs:\n", - " continue\n", - " content_strs.add(mem.page_content)\n", - " created_time = mem.metadata[\"created_at\"].strftime(\"%B %d, %Y, %I:%M %p\")\n", - " content.append(f\"- {created_time}: {mem.page_content.strip()}\")\n", - " return \"\\n\".join([f\"{mem}\" for mem in content])\n", - " \n", - " def summarize_related_memories(self, observation: str) -> str:\n", - " \"\"\"Summarize memories that are most relevant to an observation.\"\"\"\n", - " entity_name = self._get_entity_from_observation(observation)\n", - " entity_action = self._get_entity_action(observation, entity_name)\n", - " q1 = f\"What is the relationship between {self.name} and {entity_name}\"\n", - " relevant_memories = self.fetch_memories(q1) # Fetch memories related to the agent's relationship with the entity\n", - " q2 = f\"{entity_name} is {entity_action}\"\n", - " relevant_memories += self.fetch_memories(q2) # Fetch things related to the entity-action pair\n", - " context_str = self._format_memories_to_summarize(relevant_memories)\n", - " prompt = PromptTemplate.from_template(\n", - " \"{q1}?\\nContext from memory:\\n{context_str}\\nRelevant context: \"\n", - " )\n", - " chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n", - " return chain.run(q1=q1, context_str=context_str.strip()).strip()\n", - " \n", - " def _get_memories_until_limit(self, consumed_tokens: int) -> str:\n", - " \"\"\"Reduce the number of tokens in the documents.\"\"\"\n", - " result = []\n", - " for doc in self.memory_retriever.memory_stream[::-1]:\n", - " if consumed_tokens >= self.max_tokens_limit:\n", - " break\n", - " consumed_tokens += self.llm.get_num_tokens(doc.page_content)\n", - " if consumed_tokens < self.max_tokens_limit:\n", - " result.append(doc.page_content) \n", - " return \"; \".join(result[::-1])\n", - " \n", - " def _generate_reaction(\n", - " self,\n", - " observation: str,\n", - " suffix: str\n", - " ) -> str:\n", - " \"\"\"React to a given observation.\"\"\"\n", - " prompt = PromptTemplate.from_template(\n", - " \"{agent_summary_description}\"\n", - " +\"\\nIt is {current_time}.\"\n", - " +\"\\n{agent_name}'s status: {agent_status}\"\n", - " + \"\\nSummary of relevant context from {agent_name}'s memory:\"\n", - " +\"\\n{relevant_memories}\"\n", - " +\"\\nMost recent observations: {recent_observations}\"\n", - " + \"\\nObservation: {observation}\"\n", - " + \"\\n\\n\" + suffix\n", - " )\n", - " agent_summary_description = self.get_summary()\n", - " relevant_memories_str = self.summarize_related_memories(observation)\n", - " current_time_str = datetime.now().strftime(\"%B %d, %Y, %I:%M %p\")\n", - " kwargs = dict(agent_summary_description=agent_summary_description,\n", - " current_time=current_time_str,\n", - " relevant_memories=relevant_memories_str,\n", - " agent_name=self.name,\n", - " observation=observation,\n", - " agent_status=self.status)\n", - " consumed_tokens = self.llm.get_num_tokens(prompt.format(recent_observations=\"\", **kwargs))\n", - " kwargs[\"recent_observations\"] = self._get_memories_until_limit(consumed_tokens)\n", - " action_prediction_chain = LLMChain(llm=self.llm, prompt=prompt)\n", - " result = action_prediction_chain.run(**kwargs)\n", - " return result.strip()\n", - " \n", - " def generate_reaction(self, observation: str) -> Tuple[bool, str]:\n", - " \"\"\"React to a given observation.\"\"\"\n", - " call_to_action_template = (\n", - " \"Should {agent_name} react to the observation, and if so,\"\n", - " +\" what would be an appropriate reaction? Respond in one line.\"\n", - " +' If the action is to engage in dialogue, write:\\nSAY: \"what to say\"'\n", - " +\"\\notherwise, write:\\nREACT: {agent_name}'s reaction (if anything).\"\n", - " + \"\\nEither do nothing, react, or say something but not both.\\n\\n\"\n", - " )\n", - " full_result = self._generate_reaction(observation, call_to_action_template)\n", - " result = full_result.strip().split('\\n')[0]\n", - " self.add_memory(f\"{self.name} observed {observation} and reacted by {result}\")\n", - " if \"REACT:\" in result:\n", - " reaction = result.split(\"REACT:\")[-1].strip()\n", - " return False, f\"{self.name} {reaction}\"\n", - " if \"SAY:\" in result:\n", - " said_value = result.split(\"SAY:\")[-1].strip()\n", - " return True, f\"{self.name} said {said_value}\"\n", - " else:\n", - " return False, result\n", - "\n", - " def generate_dialogue_response(self, observation: str) -> Tuple[bool, str]:\n", - " \"\"\"React to a given observation.\"\"\"\n", - " call_to_action_template = (\n", - " 'What would {agent_name} say? To end the conversation, write: GOODBYE: \"what to say\". Otherwise to continue the conversation, write: SAY: \"what to say next\"\\n\\n'\n", - " )\n", - " full_result = self._generate_reaction(observation, call_to_action_template)\n", - " result = full_result.strip().split('\\n')[0]\n", - " if \"GOODBYE:\" in result:\n", - " farewell = result.split(\"GOODBYE:\")[-1].strip()\n", - " self.add_memory(f\"{self.name} observed {observation} and said {farewell}\")\n", - " return False, f\"{self.name} said {farewell}\"\n", - " if \"SAY:\" in result:\n", - " response_text = result.split(\"SAY:\")[-1].strip()\n", - " self.add_memory(f\"{self.name} observed {observation} and said {response_text}\")\n", - " return True, f\"{self.name} said {response_text}\"\n", - " else:\n", - " return False, result" - ] - }, - { - "cell_type": "markdown", - "id": "361bd49e", - "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "tags": [] - }, - "source": [ - "## Memory Lifecycle\n", - "\n", - "Summarizing the above key methods: `add_memory` and `summarize_related_memories`.\n", - "\n", - "When an agent makes an observation, it stores the memory:\n", - " \n", - "1. Language model scores the memory's importance (1 for mundane, 10 for poignant)\n", - "2. Observation and importance are stored within a document by TimeWeightedVectorStoreRetriever, with a `last_accessed_time`.\n", - "\n", - "When an agent responds to an observation:\n", - "\n", - "1. Generates query(s) for retriever, which fetches documents based on salience, recency, and importance.\n", - "2. Summarizes the retrieved information\n", - "3. Updates the `last_accessed_time` for the used documents.\n" - ] - }, - { - "cell_type": "markdown", - "id": "2fa3ca02", - "metadata": {}, - "source": [ - "## Create a Generative Character\n", - "\n", - "\n", - "\n", - "Now that we've walked through the definition, we will create two characters named \"Tommie\" and \"Eve\"." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "ee9c1a1d-c311-4f1c-8131-75fccd9025b1", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "import math\n", - "import faiss\n", - "\n", - "def relevance_score_fn(score: float) -> float:\n", - " \"\"\"Return a similarity score on a scale [0, 1].\"\"\"\n", - " # This will differ depending on a few things:\n", - " # - the distance / similarity metric used by the VectorStore\n", - " # - the scale of your embeddings (OpenAI's are unit norm. Many others are not!)\n", - " # This function converts the euclidean norm of normalized embeddings\n", - " # (0 is most similar, sqrt(2) most dissimilar)\n", - " # to a similarity function (0 to 1)\n", - " return 1.0 - score / math.sqrt(2)\n", - "\n", - "def create_new_memory_retriever():\n", - " \"\"\"Create a new vector store retriever unique to the agent.\"\"\"\n", - " # Define your embedding model\n", - " embeddings_model = OpenAIEmbeddings()\n", - " # Initialize the vectorstore as empty\n", - " embedding_size = 1536\n", - " index = faiss.IndexFlatL2(embedding_size)\n", - " vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}, relevance_score_fn=relevance_score_fn)\n", - " return TimeWeightedVectorStoreRetriever(vectorstore=vectorstore, other_score_keys=[\"importance\"], k=15) " - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "7884f9dd-c597-4c27-8c77-1402c71bc2f8", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "tommie = GenerativeAgent(name=\"Tommie\", \n", - " age=25,\n", - " traits=\"anxious, likes design\", # You can add more persistent traits here \n", - " status=\"looking for a job\", # When connected to a virtual world, we can have the characters update their status\n", - " memory_retriever=create_new_memory_retriever(),\n", - " llm=LLM,\n", - " daily_summaries = [\n", - " \"Drove across state to move to a new town but doesn't have a job yet.\"\n", - " ],\n", - " reflection_threshold = 8, # we will give this a relatively low number to show how reflection works\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "c524d529", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Name: Tommie (age: 25)\n", - "Innate traits: anxious, likes design\n", - "Unfortunately, there are no statements provided to summarize Tommie's core characteristics.\n" - ] - } - ], - "source": [ - "# The current \"Summary\" of a character can't be made because the agent hasn't made\n", - "# any observations yet.\n", - "print(tommie.get_summary())" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "4be60979-d56e-4abf-a636-b34ffa8b7fba", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# We can give the character memories directly\n", - "tommie_memories = [\n", - " \"Tommie remembers his dog, Bruno, from when he was a kid\",\n", - " \"Tommie feels tired from driving so far\",\n", - " \"Tommie sees the new home\",\n", - " \"The new neighbors have a cat\",\n", - " \"The road is noisy at night\",\n", - " \"Tommie is hungry\",\n", - " \"Tommie tries to get some rest.\",\n", - "]\n", - "for memory in tommie_memories:\n", - " tommie.add_memory(memory)" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "6992b48b-697f-4973-9560-142ef85357d7", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Name: Tommie (age: 25)\n", - "Innate traits: anxious, likes design\n", - "Tommie is observant, nostalgic, tired, and hungry.\n" - ] - } - ], - "source": [ - "# Now that Tommie has 'memories', their self-summary is more descriptive, though still rudimentary.\n", - "# We will see how this summary updates after more observations to create a more rich description.\n", - "print(tommie.get_summary(force_refresh=True))" - ] - }, - { - "cell_type": "markdown", - "id": "40d39a32-838c-4a03-8b27-a52c76c402e7", - "metadata": { - "tags": [] - }, - "source": [ - "## Pre-Interview with Character\n", - "\n", - "Before sending our character on their way, let's ask them a few questions." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "eaf125d8-f54c-4c5f-b6af-32789b1f7d3a", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "def interview_agent(agent: GenerativeAgent, message: str) -> str:\n", - " \"\"\"Help the notebook user interact with the agent.\"\"\"\n", - " new_message = f\"{USER_NAME} says {message}\"\n", - " return agent.generate_dialogue_response(new_message)[1]\n" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "54024d41-6e83-4914-91e5-73140e2dd9c8", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "'Tommie said \"I really enjoy design, especially interior design. I find it calming and rewarding to create a space that is both functional and aesthetically pleasing. Unfortunately, I haven\\'t been able to find a job in that field yet.\"'" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "interview_agent(tommie, \"What do you like to do?\")" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "71e2e8cc-921e-4816-82f1-66962b2c1055", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "'Tommie said \"Well, I\\'m actually on the hunt for a job right now. I\\'m hoping to find something in the design field, but I\\'m open to exploring other options as well. How about you, what are your plans for the day?\"'" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "interview_agent(tommie, \"What are you looking forward to doing today?\")" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "a2521ffc-7050-4ac3-9a18-4cccfc798c31", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "'Tommie said \"Honestly, I\\'m feeling pretty anxious about finding a job. It\\'s been a bit of a struggle and I\\'m not sure what my next step should be. But I\\'m trying to stay positive and keep pushing forward.\"'" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "interview_agent(tommie, \"What are you most worried about today?\")" - ] - }, - { - "cell_type": "markdown", - "id": "e509c468-f7cd-4d72-9f3a-f4aba28b1eea", - "metadata": {}, - "source": [ - "## Step through the day's observations." - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "154dee3d-bfe0-4828-b963-ed7e885799b3", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# Let's have Tommie start going through a day in the life.\n", - "observations = [\n", - " \"Tommie wakes up to the sound of a noisy construction site outside his window.\",\n", - " \"Tommie gets out of bed and heads to the kitchen to make himself some coffee.\",\n", - " \"Tommie realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some.\",\n", - " \"Tommie finally finds the filters and makes himself a cup of coffee.\",\n", - " \"The coffee tastes bitter, and Tommie regrets not buying a better brand.\",\n", - " \"Tommie checks his email and sees that he has no job offers yet.\",\n", - " \"Tommie spends some time updating his resume and cover letter.\",\n", - " \"Tommie heads out to explore the city and look for job openings.\",\n", - " \"Tommie sees a sign for a job fair and decides to attend.\",\n", - " \"The line to get in is long, and Tommie has to wait for an hour.\",\n", - " \"Tommie meets several potential employers at the job fair but doesn't receive any offers.\",\n", - " \"Tommie leaves the job fair feeling disappointed.\",\n", - " \"Tommie stops by a local diner to grab some lunch.\",\n", - " \"The service is slow, and Tommie has to wait for 30 minutes to get his food.\",\n", - " \"Tommie overhears a conversation at the next table about a job opening.\",\n", - " \"Tommie asks the diners about the job opening and gets some information about the company.\",\n", - " \"Tommie decides to apply for the job and sends his resume and cover letter.\",\n", - " \"Tommie continues his search for job openings and drops off his resume at several local businesses.\",\n", - " \"Tommie takes a break from his job search to go for a walk in a nearby park.\",\n", - " \"A dog approaches and licks Tommie's feet, and he pets it for a few minutes.\",\n", - " \"Tommie sees a group of people playing frisbee and decides to join in.\",\n", - " \"Tommie has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose.\",\n", - " \"Tommie goes back to his apartment to rest for a bit.\",\n", - " \"A raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor.\",\n", - " \"Tommie starts to feel frustrated with his job search.\",\n", - " \"Tommie calls his best friend to vent about his struggles.\",\n", - " \"Tommie's friend offers some words of encouragement and tells him to keep trying.\",\n", - " \"Tommie feels slightly better after talking to his friend.\",\n", - "]\n" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "238be49c-edb3-4e26-a2b6-98777ba8de86", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[32mTommie wakes up to the sound of a noisy construction site outside his window.\u001b[0m Tommie Tommie groans and covers their head with a pillow, trying to block out the noise.\n", - "\u001b[32mTommie gets out of bed and heads to the kitchen to make himself some coffee.\u001b[0m Tommie Tommie starts making coffee, feeling grateful for the little bit of energy it will give him.\n", - "\u001b[32mTommie realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some.\u001b[0m Tommie Tommie sighs in frustration and continues to search for the coffee filters.\n", - "\u001b[32mTommie finally finds the filters and makes himself a cup of coffee.\u001b[0m Tommie Tommie takes a sip of the coffee and feels a little more awake.\n", - "\u001b[32mThe coffee tastes bitter, and Tommie regrets not buying a better brand.\u001b[0m Tommie Tommie grimaces at the taste of the coffee and decides to make a mental note to buy a better brand next time.\n", - "\u001b[32mTommie checks his email and sees that he has no job offers yet.\u001b[0m Tommie Tommie feels disappointed and discouraged, but tries to stay positive and continue the job search.\n", - "\u001b[32mTommie spends some time updating his resume and cover letter.\u001b[0m Tommie Tommie feels determined to keep working on his job search.\n", - "\u001b[32mTommie heads out to explore the city and look for job openings.\u001b[0m Tommie Tommie feels hopeful but also anxious as he heads out to explore the city and look for job openings.\n", - "\u001b[32mTommie sees a sign for a job fair and decides to attend.\u001b[0m Tommie said \"That job fair could be a great opportunity to meet potential employers.\"\n", - "\u001b[32mThe line to get in is long, and Tommie has to wait for an hour.\u001b[0m Tommie Tommie feels frustrated and restless while waiting in line.\n", - "\u001b[32mTommie meets several potential employers at the job fair but doesn't receive any offers.\u001b[0m Tommie Tommie feels disappointed but remains determined to keep searching for job openings.\n", - "\u001b[32mTommie leaves the job fair feeling disappointed.\u001b[0m Tommie Tommie feels discouraged but remains determined to keep searching for job openings.\n", - "\u001b[32mTommie stops by a local diner to grab some lunch.\u001b[0m Tommie Tommie feels relieved to take a break from job searching and enjoy a meal.\n", - "\u001b[32mThe service is slow, and Tommie has to wait for 30 minutes to get his food.\u001b[0m Tommie Tommie feels impatient and frustrated while waiting for his food.\n", - "\u001b[32mTommie overhears a conversation at the next table about a job opening.\u001b[0m Tommie said \"Excuse me, I couldn't help but overhear about the job opening. Could you tell me more about it?\"\n", - "\u001b[32mTommie asks the diners about the job opening and gets some information about the company.\u001b[0m Tommie said \"Could you tell me more about it?\"\n", - "\u001b[32mTommie decides to apply for the job and sends his resume and cover letter.\u001b[0m Tommie said \"Thank you for the information, I'll definitely apply for the job and keep my fingers crossed.\"\n", - "\u001b[32mTommie continues his search for job openings and drops off his resume at several local businesses.\u001b[0m Tommie Tommie feels hopeful but also anxious as he continues his search for job openings and drops off his resume at several local businesses.\n", - "\u001b[32mTommie takes a break from his job search to go for a walk in a nearby park.\u001b[0m Tommie Tommie takes a deep breath and enjoys the fresh air in the park.\n", - "\u001b[32mA dog approaches and licks Tommie's feet, and he pets it for a few minutes.\u001b[0m Tommie Tommie smiles and enjoys the momentary distraction from his job search.\n", - "****************************************\n", - "\u001b[34mAfter 20 observations, Tommie's summary is:\n", - "Name: Tommie (age: 25)\n", - "Innate traits: anxious, likes design\n", - "Tommie is a determined individual who is actively searching for job opportunities. He feels both hopeful and anxious about his search and remains positive despite facing disappointments. He takes breaks to rest and enjoy the little things in life, like going for a walk or grabbing a meal. Tommie is also open to asking for help and seeking information about potential job openings. He is grateful for the little things that give him energy and tries to stay positive even when faced with discouragement. Overall, Tommie's core characteristics include determination, positivity, and a willingness to seek help and take breaks when needed.\u001b[0m\n", - "****************************************\n", - "\u001b[32mTommie sees a group of people playing frisbee and decides to join in.\u001b[0m Tommie said \"Mind if I join in on the game?\"\n", - "\u001b[32mTommie has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose.\u001b[0m Tommie Tommie winces in pain and puts his hand to his nose to check for any bleeding.\n", - "\u001b[32mTommie goes back to his apartment to rest for a bit.\u001b[0m Tommie Tommie takes a deep breath and sits down to rest for a bit.\n", - "\u001b[32mA raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor.\u001b[0m Tommie Tommie sighs and grabs a broom to clean up the mess.\n", - "\u001b[32mTommie starts to feel frustrated with his job search.\u001b[0m Tommie Tommie takes a deep breath and reminds himself to stay positive and keep searching for job opportunities.\n", - "\u001b[32mTommie calls his best friend to vent about his struggles.\u001b[0m Tommie said \"Hey, can I vent to you for a bit about my job search struggles?\"\n", - "\u001b[32mTommie's friend offers some words of encouragement and tells him to keep trying.\u001b[0m Tommie said \"Thank you for the encouragement, it means a lot. I'll keep trying.\"\n", - "\u001b[32mTommie feels slightly better after talking to his friend.\u001b[0m Tommie said \"Thank you for your support, it really means a lot to me.\"\n" - ] - } - ], - "source": [ - "# Let's send Tommie on their way. We'll check in on their summary every few observations to watch it evolve\n", - "for i, observation in enumerate(observations):\n", - " _, reaction = tommie.generate_reaction(observation)\n", - " print(colored(observation, \"green\"), reaction)\n", - " if ((i+1) % 20) == 0:\n", - " print('*'*40)\n", - " print(colored(f\"After {i+1} observations, Tommie's summary is:\\n{tommie.get_summary(force_refresh=True)}\", \"blue\"))\n", - " print('*'*40)" - ] - }, - { - "cell_type": "markdown", - "id": "dd62a275-7290-43ca-aa0f-504f3a706d09", - "metadata": {}, - "source": [ - "## Interview after the day" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "id": "6336ab5d-3074-4831-951f-c9e2cba5dfb5", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "'Tommie said \"It\\'s been a bit of a rollercoaster, to be honest. I went to a job fair and met some potential employers, but didn\\'t get any offers. But then I overheard about a job opening at a diner and applied for it. I also took a break to go for a walk in the park and played frisbee with some people, which was a nice distraction. Overall, it\\'s been a bit frustrating, but I\\'m trying to stay positive and keep searching for job opportunities.\"'" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "interview_agent(tommie, \"Tell me about how your day has been going\")" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "809ac906-69b7-4326-99ec-af638d32bb20", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "'Tommie would say: \"I rely on coffee to give me a little boost, but I regret not buying a better brand lately. The taste has been pretty bitter. But overall, it\\'s not a huge factor in my life.\" '" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "interview_agent(tommie, \"How do you feel about coffee?\")" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "f733a431-19ea-421a-9101-ae2593a8c626", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "'Tommie said \"Oh, I actually don\\'t have a childhood dog, but I do love animals. Have you had any pets?\"'" - ] - }, - "execution_count": 18, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "interview_agent(tommie, \"Tell me about your childhood dog!\")" - ] - }, - { - "cell_type": "markdown", - "id": "c9261428-778a-4c0b-b725-bc9e91b71391", - "metadata": {}, - "source": [ - "## Adding Multiple Characters\n", - "\n", - "Let's add a second character to have a conversation with Tommie. Feel free to configure different traits." - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "ec8bbe18-a021-419c-bf1f-23d34732cd99", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "eve = GenerativeAgent(name=\"Eve\", \n", - " age=34, \n", - " traits=\"curious, helpful\", # You can add more persistent traits here \n", - " status=\"N/A\", # When connected to a virtual world, we can have the characters update their status\n", - " memory_retriever=create_new_memory_retriever(),\n", - " llm=LLM,\n", - " daily_summaries = [\n", - " (\"Eve started her new job as a career counselor last week and received her first assignment, a client named Tommie.\")\n", - " ],\n", - " reflection_threshold = 5,\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": 20, - "id": "1e2745f5-e0da-4abd-98b4-830802ce6698", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "yesterday = (datetime.now() - timedelta(days=1)).strftime(\"%A %B %d\")\n", - "eve_memories = [\n", - " \"Eve overhears her colleague say something about a new client being hard to work with\",\n", - " \"Eve wakes up and hear's the alarm\",\n", - " \"Eve eats a boal of porridge\",\n", - " \"Eve helps a coworker on a task\",\n", - " \"Eve plays tennis with her friend Xu before going to work\",\n", - " \"Eve overhears her colleague say something about Tommie being hard to work with\",\n", - " \n", - "]\n", - "for memory in eve_memories:\n", - " eve.add_memory(memory)" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "id": "de4726e3-4bb1-47da-8fd9-f317a036fe0f", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Name: Eve (age: 34)\n", - "Innate traits: curious, helpful\n", - "Eve is helpful, active, eats breakfast, is attentive to her surroundings, and works with colleagues.\n" - ] - } - ], - "source": [ - "print(eve.get_summary())" - ] - }, - { - "cell_type": "markdown", - "id": "837524e9-7f7e-4e9f-b610-f454062f5915", - "metadata": {}, - "source": [ - "## Pre-conversation interviews\n", - "\n", - "\n", - "Let's \"Interview\" Eve before she speaks with Tommie." - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "6cda916d-800c-47bc-a7f9-6a2f19187472", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "'Eve said \"I\\'m feeling curious about what\\'s on the agenda for today. Anything special we should be aware of?\"'" - ] - }, - "execution_count": 22, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "interview_agent(eve, \"How are you feeling about today?\")" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "id": "448ae644-0a66-4eb2-a03a-319f36948b37", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "'Eve said \"I overheard someone say Tommie is hard to work with. Is there something I can help with?\"'" - ] - }, - "execution_count": 23, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "interview_agent(eve, \"What do you know about Tommie?\")" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "id": "493fc5b8-8730-4ef8-9820-0f1769ce1691", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "'Eve said \"Oh, I didn\\'t realize Tommie was looking for a new job. Is there anything I can do to help? Maybe I could introduce him to some people in my network or help him with his resume.\"'" - ] - }, - "execution_count": 24, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "interview_agent(eve, \"Tommie is looking to find a job. What are are some things you'd like to ask him?\")" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "id": "4b46452a-6c54-4db2-9d87-18597f70fec8", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "'Eve said \"Sure, I can definitely help keep the conversation going and ask him plenty of questions. Is there anything specific you would like me to ask him about his skills or experience? I want to make sure the conversation is productive.\"'" - ] - }, - "execution_count": 25, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "interview_agent(eve, \"You'll have to ask him. He may be a bit anxious, so I'd appreciate it if you keep the conversation going and ask as many questions as possible.\")" - ] - }, - { - "cell_type": "markdown", - "id": "dd780655-1d73-4fcb-a78d-79fd46a20636", - "metadata": {}, - "source": [ - "## Dialogue between Generative Agents\n", - "\n", - "Generative agents are much more complex when they interact with a virtual environment or with each other. Below, we run a simple conversation between Tommie and Eve." - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "id": "042ea271-4bf1-4247-9082-239a6fea43b8", - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "def run_conversation(agents: List[GenerativeAgent], initial_observation: str) -> None:\n", - " \"\"\"Runs a conversation between agents.\"\"\"\n", - " _, observation = agents[1].generate_reaction(initial_observation)\n", - " print(observation)\n", - " turns = 0\n", - " while True:\n", - " break_dialogue = False\n", - " for agent in agents:\n", - " stay_in_dialogue, observation = agent.generate_dialogue_response(observation)\n", - " print(observation)\n", - " # observation = f\"{agent.name} said {reaction}\"\n", - " if not stay_in_dialogue:\n", - " break_dialogue = True \n", - " if break_dialogue:\n", - " break\n", - " turns += 1\n" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "id": "d5462b14-218e-4d85-b035-df57ea8e0f80", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Eve said \"Of course, Tommie! I'm happy to help in any way I can. What specifically would you like advice on?\"\n", - "Tommie said \"Thank you, Eve. I was actually wondering if you knew of any job openings in the design field? That's my area of interest and expertise.\"\n", - "Eve said \"Sure, Tommie! I can definitely keep an eye out for any job openings in the design field and let you know if I come across anything. Have you updated your resume and portfolio recently? I could take a look and provide some feedback if you'd like.\"\n", - "Tommie said \"Thank you so much, Eve! That would be incredibly helpful. And yes, I have updated my resume and portfolio recently, but I would love any feedback you can provide. I really appreciate your support and willingness to help.\"\n", - "Eve said \"Great, Tommie! I'll take a look at your resume and portfolio and provide you with some feedback. Do you have any specific companies or types of design jobs that you're interested in? I might be able to provide more targeted advice and resources based on that information.\"\n", - "Tommie said \"Thank you, Eve! I'm primarily interested in graphic design and UI/UX design positions, but I'm open to other opportunities as well. I appreciate any advice or resources you can provide. Thanks again for your help.\"\n", - "Eve said \"Great, Tommie! Those are both really interesting fields. I'll definitely keep an eye out for any job openings or resources related to graphic design and UI/UX design. In the meantime, I can take a look at your resume and portfolio and provide you with some feedback. Would you like me to email you my feedback or would you prefer to discuss it in person?\"\n", - "Tommie said \"Thank you, Eve! I would really appreciate discussing your feedback in person. When would be a good time for us to meet?\"\n", - "Eve said \"Sure, Tommie! How about we schedule a time to meet next week? I have some availability on Tuesday or Thursday afternoon. What works best for you?\"\n", - "Tommie said \"Thank you, Eve! Tuesday afternoon works better for me. How about 2 PM?\"\n", - "Eve said \"Great, Tommie! Tuesday at 2 PM works for me as well. Let's plan to meet then. In the meantime, if you have any questions or if there's anything else I can help with, please don't hesitate to reach out. Good luck with your job search!\"\n", - "Tommie said \"Thank you so much, Eve! I really appreciate your support and willingness to help. I'll see you next Tuesday at 2 PM. Have a great day!\"\n", - "Eve said \"You're welcome, Tommie! Looking forward to meeting with you on Tuesday. Have a great day and good luck with your job search!\"\n" - ] - } - ], - "source": [ - "agents = [tommie, eve]\n", - "run_conversation(agents, \"Tommie said: Hi, Eve. Thanks for agreeing to share your story with me and give me advice. I have a bunch of questions.\")" - ] - }, - { - "cell_type": "markdown", - "id": "1b28fe80-03dc-4399-961d-6e9ee1980216", - "metadata": { - "tags": [] - }, - "source": [ - "## Let's interview our agents after their conversation\n", - "\n", - "Since the generative agents retain their memories from the day, we can ask them about their plans, conversations, and other memoreis." - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "id": "c4d252f3-fcc1-474c-846e-a7605a6b4ce7", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Name: Tommie (age: 25)\n", - "Innate traits: anxious, likes design\n", - "Tommie is a determined person who is actively searching for job opportunities. He feels both hopeful and anxious about his job search, and remains persistent despite facing disappointment and discouragement. He seeks support from friends and takes breaks to recharge. He tries to stay positive and continues to work on improving his resume and cover letter. He also values the importance of self-care and takes breaks to rest and enjoy nature.\n" - ] - } - ], - "source": [ - "# We can see a current \"Summary\" of a character based on their own perception of self\n", - "# has changed\n", - "print(tommie.get_summary(force_refresh=True))" - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "id": "c04db9a4", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Name: Eve (age: 34)\n", - "Innate traits: curious, helpful\n", - "Eve is a helpful and proactive coworker who values relationships and communication. She is attentive to her colleagues' needs and willing to offer support and assistance. She is also curious and interested in learning more about her work and the people around her. Overall, Eve demonstrates a strong sense of empathy and collaboration in her interactions with others.\n" - ] - } - ], - "source": [ - "print(eve.get_summary(force_refresh=True))" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "id": "71762558-8fb6-44d7-8483-f5b47fb2a862", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "'Tommie said \"It was really helpful! Eve offered to provide feedback on my resume and portfolio, and she\\'s going to keep an eye out for job openings in the design field. We\\'re planning to meet next Tuesday to discuss her feedback. Thanks for asking!\"'" - ] - }, - "execution_count": 30, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "interview_agent(tommie, \"How was your conversation with Eve?\")" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "id": "085af3d8-ac21-41ea-8f8b-055c56976a67", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "'Eve said \"It was really productive! Tommie is interested in graphic design and UI/UX design positions, so I\\'m going to keep an eye out for any job openings or resources related to those fields. I\\'m also going to provide him with some feedback on his resume and portfolio. We\\'re scheduled to meet next Tuesday at 2 PM to discuss everything in person. Is there anything else you would like me to ask him or anything else I can do to help?\".'" - ] - }, - "execution_count": 31, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "interview_agent(eve, \"How was your conversation with Tommie?\")" - ] - }, - { - "cell_type": "code", - "execution_count": 32, - "id": "5b439f3c-7849-4432-a697-2bcc85b89dae", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "'Eve said \"I feel like I covered everything I wanted to with Tommie, but thank you for asking! If there\\'s anything else that comes up or if you have any further questions, please let me know.\"'" - ] - }, - "execution_count": 32, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "interview_agent(eve, \"What do you wish you would have said to Tommie?\")" - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "id": "526e8863-8b32-4216-8e61-2dfe82e3fb47", - "metadata": { - "tags": [] - }, - "outputs": [ - { - "data": { - "text/plain": [ - "'Tommie said \"Oh, I actually forgot to buy coffee filters yesterday, so I couldn\\'t make coffee this morning. But I\\'m planning to grab some later today. Thanks for asking!\"'" - ] - }, - "execution_count": 33, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "interview_agent(tommie, \"What happened with your coffee this morning?\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "a17ff5bc-5ad9-4184-8f80-33643e06c589", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.2" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/langchain/experimental/__init__.py b/langchain/experimental/__init__.py index 867cbac8..2443e9d1 100644 --- a/langchain/experimental/__init__.py +++ b/langchain/experimental/__init__.py @@ -1,4 +1,6 @@ from langchain.experimental.autonomous_agents.autogpt.agent import AutoGPT from langchain.experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI +from langchain.experimental.generative_agents.generative_agent import GenerativeAgent +from langchain.experimental.generative_agents.memory import GenerativeAgentMemory -__all__ = ["BabyAGI", "AutoGPT"] +__all__ = ["BabyAGI", "AutoGPT", "GenerativeAgent", "GenerativeAgentMemory"] diff --git a/langchain/experimental/generative_agents/__init__.py b/langchain/experimental/generative_agents/__init__.py new file mode 100644 index 00000000..a46082cf --- /dev/null +++ b/langchain/experimental/generative_agents/__init__.py @@ -0,0 +1,5 @@ +"""Generative Agents primitives.""" +from langchain.experimental.generative_agents.generative_agent import GenerativeAgent +from langchain.experimental.generative_agents.memory import GenerativeAgentMemory + +__all__ = ["GenerativeAgent", "GenerativeAgentMemory"] diff --git a/langchain/experimental/generative_agents/generative_agent.py b/langchain/experimental/generative_agents/generative_agent.py new file mode 100644 index 00000000..ac5d951a --- /dev/null +++ b/langchain/experimental/generative_agents/generative_agent.py @@ -0,0 +1,230 @@ +import re +from datetime import datetime +from typing import Any, Dict, List, Optional, Tuple + +from pydantic import BaseModel, Field + +from langchain import LLMChain +from langchain.experimental.generative_agents.memory import GenerativeAgentMemory +from langchain.prompts import PromptTemplate +from langchain.schema import BaseLanguageModel + + +class GenerativeAgent(BaseModel): + """A character with memory and innate characteristics.""" + + name: str + """The character's name.""" + + age: Optional[int] = None + """The optional age of the character.""" + traits: str = "N/A" + """Permanent traits to ascribe to the character.""" + status: str + """The traits of the character you wish not to change.""" + memory: GenerativeAgentMemory + """The memory object that combines relevance, recency, and 'importance'.""" + llm: BaseLanguageModel + """The underlying language model.""" + verbose: bool = False + summary: str = "" #: :meta private: + """Stateful self-summary generated via reflection on the character's memory.""" + + summary_refresh_seconds: int = 3600 #: :meta private: + """How frequently to re-generate the summary.""" + + last_refreshed: datetime = Field(default_factory=datetime.now) # : :meta private: + """The last time the character's summary was regenerated.""" + + daily_summaries: List[str] = Field(default_factory=list) # : :meta private: + """Summary of the events in the plan that the agent took.""" + + class Config: + """Configuration for this pydantic object.""" + + arbitrary_types_allowed = True + + # LLM-related methods + @staticmethod + def _parse_list(text: str) -> List[str]: + """Parse a newline-separated string into a list of strings.""" + lines = re.split(r"\n", text.strip()) + return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines] + + def chain(self, prompt: PromptTemplate) -> LLMChain: + return LLMChain( + llm=self.llm, prompt=prompt, verbose=self.verbose, memory=self.memory + ) + + def _get_entity_from_observation(self, observation: str) -> str: + prompt = PromptTemplate.from_template( + "What is the observed entity in the following observation? {observation}" + + "\nEntity=" + ) + return self.chain(prompt).run(observation=observation).strip() + + def _get_entity_action(self, observation: str, entity_name: str) -> str: + prompt = PromptTemplate.from_template( + "What is the {entity} doing in the following observation? {observation}" + + "\nThe {entity} is" + ) + return ( + self.chain(prompt).run(entity=entity_name, observation=observation).strip() + ) + + def summarize_related_memories(self, observation: str) -> str: + """Summarize memories that are most relevant to an observation.""" + prompt = PromptTemplate.from_template( + """ +{q1}? +Context from memory: +{relevant_memories} +Relevant context: +""" + ) + entity_name = self._get_entity_from_observation(observation) + entity_action = self._get_entity_action(observation, entity_name) + q1 = f"What is the relationship between {self.name} and {entity_name}" + q2 = f"{entity_name} is {entity_action}" + return self.chain(prompt=prompt).run(q1=q1, queries=[q1, q2]).strip() + + def _generate_reaction(self, observation: str, suffix: str) -> str: + """React to a given observation or dialogue act.""" + prompt = PromptTemplate.from_template( + "{agent_summary_description}" + + "\nIt is {current_time}." + + "\n{agent_name}'s status: {agent_status}" + + "\nSummary of relevant context from {agent_name}'s memory:" + + "\n{relevant_memories}" + + "\nMost recent observations: {most_recent_memories}" + + "\nObservation: {observation}" + + "\n\n" + + suffix + ) + agent_summary_description = self.get_summary() + relevant_memories_str = self.summarize_related_memories(observation) + current_time_str = datetime.now().strftime("%B %d, %Y, %I:%M %p") + kwargs: Dict[str, Any] = dict( + agent_summary_description=agent_summary_description, + current_time=current_time_str, + relevant_memories=relevant_memories_str, + agent_name=self.name, + observation=observation, + agent_status=self.status, + ) + consumed_tokens = self.llm.get_num_tokens( + prompt.format(most_recent_memories="", **kwargs) + ) + kwargs[self.memory.most_recent_memories_token_key] = consumed_tokens + return self.chain(prompt=prompt).run(**kwargs).strip() + + def _clean_response(self, text: str) -> str: + return re.sub(f"^{self.name} ", "", text.strip()).strip() + + def generate_reaction(self, observation: str) -> Tuple[bool, str]: + """React to a given observation.""" + call_to_action_template = ( + "Should {agent_name} react to the observation, and if so," + + " what would be an appropriate reaction? Respond in one line." + + ' If the action is to engage in dialogue, write:\nSAY: "what to say"' + + "\notherwise, write:\nREACT: {agent_name}'s reaction (if anything)." + + "\nEither do nothing, react, or say something but not both.\n\n" + ) + full_result = self._generate_reaction(observation, call_to_action_template) + result = full_result.strip().split("\n")[0] + # AAA + self.memory.save_context( + {}, + { + self.memory.add_memory_key: f"{self.name} observed " + f"{observation} and reacted by {result}" + }, + ) + if "REACT:" in result: + reaction = self._clean_response(result.split("REACT:")[-1]) + return False, f"{self.name} {reaction}" + if "SAY:" in result: + said_value = self._clean_response(result.split("SAY:")[-1]) + return True, f"{self.name} said {said_value}" + else: + return False, result + + def generate_dialogue_response(self, observation: str) -> Tuple[bool, str]: + """React to a given observation.""" + call_to_action_template = ( + "What would {agent_name} say? To end the conversation, write:" + ' GOODBYE: "what to say". Otherwise to continue the conversation,' + ' write: SAY: "what to say next"\n\n' + ) + full_result = self._generate_reaction(observation, call_to_action_template) + result = full_result.strip().split("\n")[0] + if "GOODBYE:" in result: + farewell = self._clean_response(result.split("GOODBYE:")[-1]) + self.memory.save_context( + {}, + { + self.memory.add_memory_key: f"{self.name} observed " + f"{observation} and said {farewell}" + }, + ) + return False, f"{self.name} said {farewell}" + if "SAY:" in result: + response_text = self._clean_response(result.split("SAY:")[-1]) + self.memory.save_context( + {}, + { + self.memory.add_memory_key: f"{self.name} observed " + f"{observation} and said {response_text}" + }, + ) + return True, f"{self.name} said {response_text}" + else: + return False, result + + ###################################################### + # Agent stateful' summary methods. # + # Each dialog or response prompt includes a header # + # summarizing the agent's self-description. This is # + # updated periodically through probing its memories # + ###################################################### + def _compute_agent_summary(self) -> str: + """""" + prompt = PromptTemplate.from_template( + "How would you summarize {name}'s core characteristics given the" + + " following statements:\n" + + "{relevant_memories}" + + "Do not embellish." + + "\n\nSummary: " + ) + # The agent seeks to think about their core characteristics. + return ( + self.chain(prompt) + .run(name=self.name, queries=[f"{self.name}'s core characteristics"]) + .strip() + ) + + def get_summary(self, force_refresh: bool = False) -> str: + """Return a descriptive summary of the agent.""" + current_time = datetime.now() + since_refresh = (current_time - self.last_refreshed).seconds + if ( + not self.summary + or since_refresh >= self.summary_refresh_seconds + or force_refresh + ): + self.summary = self._compute_agent_summary() + self.last_refreshed = current_time + age = self.age if self.age is not None else "N/A" + return ( + f"Name: {self.name} (age: {age})" + + f"\nInnate traits: {self.traits}" + + f"\n{self.summary}" + ) + + def get_full_header(self, force_refresh: bool = False) -> str: + """Return a full header of the agent's status, summary, and current time.""" + summary = self.get_summary(force_refresh=force_refresh) + current_time_str = datetime.now().strftime("%B %d, %Y, %I:%M %p") + return ( + f"{summary}\nIt is {current_time_str}.\n{self.name}'s status: {self.status}" + ) diff --git a/langchain/experimental/generative_agents/memory.py b/langchain/experimental/generative_agents/memory.py new file mode 100644 index 00000000..8719d1bf --- /dev/null +++ b/langchain/experimental/generative_agents/memory.py @@ -0,0 +1,212 @@ +import logging +import re +from typing import Any, Dict, List, Optional + +from langchain import LLMChain +from langchain.prompts import PromptTemplate +from langchain.retrievers import TimeWeightedVectorStoreRetriever +from langchain.schema import BaseLanguageModel, BaseMemory, Document + +logger = logging.getLogger(__name__) + + +class GenerativeAgentMemory(BaseMemory): + llm: BaseLanguageModel + """The core language model.""" + + memory_retriever: TimeWeightedVectorStoreRetriever + """The retriever to fetch related memories.""" + verbose: bool = False + + reflection_threshold: Optional[float] = None + """When aggregate_importance exceeds reflection_threshold, stop to reflect.""" + + current_plan: List[str] = [] + """The current plan of the agent.""" + + # A weight of 0.15 makes this less important than it + # would be otherwise, relative to salience and time + importance_weight: float = 0.15 + """How much weight to assign the memory importance.""" + + aggregate_importance: float = 0.0 # : :meta private: + """Track the sum of the 'importance' of recent memories. + + Triggers reflection when it reaches reflection_threshold.""" + + max_tokens_limit: int = 1200 # : :meta private: + # input keys + queries_key: str = "queries" + most_recent_memories_token_key: str = "recent_memories_token" + add_memory_key: str = "add_memory" + # output keys + relevant_memories_key: str = "relevant_memories" + relevant_memories_simple_key: str = "relevant_memories_simple" + most_recent_memories_key: str = "most_recent_memories" + + def chain(self, prompt: PromptTemplate) -> LLMChain: + return LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose) + + @staticmethod + def _parse_list(text: str) -> List[str]: + """Parse a newline-separated string into a list of strings.""" + lines = re.split(r"\n", text.strip()) + return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines] + + def _get_topics_of_reflection(self, last_k: int = 50) -> List[str]: + """Return the 3 most salient high-level questions about recent observations.""" + prompt = PromptTemplate.from_template( + "{observations}\n\n" + + "Given only the information above, what are the 3 most salient" + + " high-level questions we can answer about the subjects in" + + " the statements? Provide each question on a new line.\n\n" + ) + observations = self.memory_retriever.memory_stream[-last_k:] + observation_str = "\n".join([o.page_content for o in observations]) + result = self.chain(prompt).run(observations=observation_str) + return self._parse_list(result) + + def _get_insights_on_topic(self, topic: str) -> List[str]: + """Generate 'insights' on a topic of reflection, based on pertinent memories.""" + prompt = PromptTemplate.from_template( + "Statements about {topic}\n" + + "{related_statements}\n\n" + + "What 5 high-level insights can you infer from the above statements?" + + " (example format: insight (because of 1, 5, 3))" + ) + related_memories = self.fetch_memories(topic) + related_statements = "\n".join( + [ + f"{i+1}. {memory.page_content}" + for i, memory in enumerate(related_memories) + ] + ) + result = self.chain(prompt).run( + topic=topic, related_statements=related_statements + ) + # TODO: Parse the connections between memories and insights + return self._parse_list(result) + + def pause_to_reflect(self) -> List[str]: + """Reflect on recent observations and generate 'insights'.""" + if self.verbose: + logger.info("Character is reflecting") + new_insights = [] + topics = self._get_topics_of_reflection() + for topic in topics: + insights = self._get_insights_on_topic(topic) + for insight in insights: + self.add_memory(insight) + new_insights.extend(insights) + return new_insights + + def _score_memory_importance(self, memory_content: str) -> float: + """Score the absolute importance of the given memory.""" + prompt = PromptTemplate.from_template( + "On the scale of 1 to 10, where 1 is purely mundane" + + " (e.g., brushing teeth, making bed) and 10 is" + + " extremely poignant (e.g., a break up, college" + + " acceptance), rate the likely poignancy of the" + + " following piece of memory. Respond with a single integer." + + "\nMemory: {memory_content}" + + "\nRating: " + ) + score = self.chain(prompt).run(memory_content=memory_content).strip() + if self.verbose: + logger.info(f"Importance score: {score}") + match = re.search(r"^\D*(\d+)", score) + if match: + return (float(score[0]) / 10) * self.importance_weight + else: + return 0.0 + + def add_memory(self, memory_content: str) -> List[str]: + """Add an observation or memory to the agent's memory.""" + importance_score = self._score_memory_importance(memory_content) + self.aggregate_importance += importance_score + document = Document( + page_content=memory_content, metadata={"importance": importance_score} + ) + result = self.memory_retriever.add_documents([document]) + + # After an agent has processed a certain amount of memories (as measured by + # aggregate importance), it is time to reflect on recent events to add + # more synthesized memories to the agent's memory stream. + if ( + self.reflection_threshold is not None + and self.aggregate_importance > self.reflection_threshold + ): + self.pause_to_reflect() + # Hack to clear the importance from reflection + self.aggregate_importance = 0.0 + return result + + def fetch_memories(self, observation: str) -> List[Document]: + """Fetch related memories.""" + return self.memory_retriever.get_relevant_documents(observation) + + def format_memories_detail(self, relevant_memories: List[Document]) -> str: + content_strs = set() + content = [] + for mem in relevant_memories: + if mem.page_content in content_strs: + continue + content_strs.add(mem.page_content) + created_time = mem.metadata["created_at"].strftime("%B %d, %Y, %I:%M %p") + content.append(f"- {created_time}: {mem.page_content.strip()}") + return "\n".join([f"{mem}" for mem in content]) + + def format_memories_simple(self, relevant_memories: List[Document]) -> str: + return "; ".join([f"{mem.page_content}" for mem in relevant_memories]) + + def _get_memories_until_limit(self, consumed_tokens: int) -> str: + """Reduce the number of tokens in the documents.""" + result = [] + for doc in self.memory_retriever.memory_stream[::-1]: + if consumed_tokens >= self.max_tokens_limit: + break + consumed_tokens += self.llm.get_num_tokens(doc.page_content) + if consumed_tokens < self.max_tokens_limit: + result.append(doc) + return self.format_memories_simple(result) + + @property + def memory_variables(self) -> List[str]: + """Input keys this memory class will load dynamically.""" + return [] + + def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: + """Return key-value pairs given the text input to the chain.""" + queries = inputs.get(self.queries_key) + if queries is not None: + relevant_memories = [ + mem for query in queries for mem in self.fetch_memories(query) + ] + return { + self.relevant_memories_key: self.format_memories_detail( + relevant_memories + ), + self.relevant_memories_simple_key: self.format_memories_simple( + relevant_memories + ), + } + + most_recent_memories_token = inputs.get(self.most_recent_memories_token_key) + if most_recent_memories_token is not None: + return { + self.most_recent_memories_key: self._get_memories_until_limit( + most_recent_memories_token + ) + } + return {} + + def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: + """Save the context of this model run to memory.""" + # TODO: fix the save memory key + mem = outputs.get(self.add_memory_key) + if mem: + self.add_memory(mem) + + def clear(self) -> None: + """Clear memory contents.""" + # TODO