forked from Archives/langchain
f329196cf4
Creating an experimental agents folder, containing BabyAGI, AutoGPT, and later, other examples --------- Co-authored-by: Rahul Behal <rahulbehal01@hotmail.com> Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
1262 lines
54 KiB
Plaintext
1262 lines
54 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "e9732067-71c7-46f7-ad09-381b3bf21a27",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Generative Agents in LangChain\n",
|
|
"\n",
|
|
"This notebook implements a generative agent based on the paper [Generative Agents: Interactive Simulacra of Human Behavior](https://arxiv.org/abs/2304.03442) by Park, et. al.\n",
|
|
"\n",
|
|
"In it, we leverage a time-weighted Memory object backed by a LangChain Retriever."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"id": "53f81c37-db45-4fdc-843c-aa8fd2a9e99d",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Use termcolor to make it easy to colorize the outputs.\n",
|
|
"!pip install termcolor > /dev/null"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"id": "8851c370-b395-4b80-a79d-486a38ffc244",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"import re\n",
|
|
"from datetime import datetime, timedelta\n",
|
|
"from typing import List, Optional, Tuple\n",
|
|
"from termcolor import colored\n",
|
|
"\n",
|
|
"from pydantic import BaseModel, Field\n",
|
|
"\n",
|
|
"from langchain import LLMChain\n",
|
|
"from langchain.chat_models import ChatOpenAI\n",
|
|
"from langchain.docstore import InMemoryDocstore\n",
|
|
"from langchain.embeddings import OpenAIEmbeddings\n",
|
|
"from langchain.prompts import PromptTemplate\n",
|
|
"from langchain.retrievers import TimeWeightedVectorStoreRetriever\n",
|
|
"from langchain.schema import BaseLanguageModel, Document\n",
|
|
"from langchain.vectorstores import FAISS\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"id": "81824e76",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"USER_NAME = \"Person A\" # The name you want to use when interviewing the agent.\n",
|
|
"LLM = ChatOpenAI(max_tokens=1500) # Can be any LLM you want."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "c3da1649-d88f-4973-b655-7042975cde7e",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Generative Agent Memory Components\n",
|
|
"\n",
|
|
"This tutorial highlights the memory of generative agents and its impact on their behavior. The memory varies from standard LangChain Chat memory in two aspects:\n",
|
|
"\n",
|
|
"1. **Memory Formation**\n",
|
|
"\n",
|
|
" Generative Agents have extended memories, stored in a single stream:\n",
|
|
" 1. Observations - from dialogues or interactions with the virtual world, about self or others\n",
|
|
" 2. Reflections - resurfaced and summarized core memories\n",
|
|
"\n",
|
|
"2. **Memory Recall**\n",
|
|
"\n",
|
|
" Memories are retrieved using a weighted sum of salience, recency, and importance.\n",
|
|
"\n",
|
|
"Review the definition below, focusing on `add_memory` and `summarize_related_memories` methods."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 4,
|
|
"id": "043e5203-6a41-431c-9efa-3e1743d7d25a",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"class GenerativeAgent(BaseModel):\n",
|
|
" \"\"\"A character with memory and innate characteristics.\"\"\"\n",
|
|
" \n",
|
|
" name: str\n",
|
|
" age: int\n",
|
|
" traits: str\n",
|
|
" \"\"\"The traits of the character you wish not to change.\"\"\"\n",
|
|
" status: str\n",
|
|
" \"\"\"Current activities of the character.\"\"\"\n",
|
|
" llm: BaseLanguageModel\n",
|
|
" memory_retriever: TimeWeightedVectorStoreRetriever\n",
|
|
" \"\"\"The retriever to fetch related memories.\"\"\"\n",
|
|
" verbose: bool = False\n",
|
|
" \n",
|
|
" reflection_threshold: Optional[float] = None\n",
|
|
" \"\"\"When the total 'importance' of memories exceeds the above threshold, stop to reflect.\"\"\"\n",
|
|
" \n",
|
|
" current_plan: List[str] = []\n",
|
|
" \"\"\"The current plan of the agent.\"\"\"\n",
|
|
" \n",
|
|
" summary: str = \"\" #: :meta private:\n",
|
|
" summary_refresh_seconds: int= 3600 #: :meta private:\n",
|
|
" last_refreshed: datetime =Field(default_factory=datetime.now) #: :meta private:\n",
|
|
" daily_summaries: List[str] #: :meta private:\n",
|
|
" memory_importance: float = 0.0 #: :meta private:\n",
|
|
" max_tokens_limit: int = 1200 #: :meta private:\n",
|
|
" \n",
|
|
" class Config:\n",
|
|
" \"\"\"Configuration for this pydantic object.\"\"\"\n",
|
|
"\n",
|
|
" arbitrary_types_allowed = True\n",
|
|
"\n",
|
|
" @staticmethod\n",
|
|
" def _parse_list(text: str) -> List[str]:\n",
|
|
" \"\"\"Parse a newline-separated string into a list of strings.\"\"\"\n",
|
|
" lines = re.split(r'\\n', text.strip())\n",
|
|
" return [re.sub(r'^\\s*\\d+\\.\\s*', '', line).strip() for line in lines]\n",
|
|
"\n",
|
|
"\n",
|
|
" def _compute_agent_summary(self):\n",
|
|
" \"\"\"\"\"\"\n",
|
|
" prompt = PromptTemplate.from_template(\n",
|
|
" \"How would you summarize {name}'s core characteristics given the\"\n",
|
|
" +\" following statements:\\n\"\n",
|
|
" +\"{related_memories}\"\n",
|
|
" + \"Do not embellish.\"\n",
|
|
" +\"\\n\\nSummary: \"\n",
|
|
" )\n",
|
|
" # The agent seeks to think about their core characteristics.\n",
|
|
" relevant_memories = self.fetch_memories(f\"{self.name}'s core characteristics\")\n",
|
|
" relevant_memories_str = \"\\n\".join([f\"{mem.page_content}\" for mem in relevant_memories])\n",
|
|
" chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n",
|
|
" return chain.run(name=self.name, related_memories=relevant_memories_str).strip()\n",
|
|
" \n",
|
|
" def _get_topics_of_reflection(self, last_k: int = 50) -> Tuple[str, str, str]:\n",
|
|
" \"\"\"Return the 3 most salient high-level questions about recent observations.\"\"\"\n",
|
|
" prompt = PromptTemplate.from_template(\n",
|
|
" \"{observations}\\n\\n\"\n",
|
|
" + \"Given only the information above, what are the 3 most salient\"\n",
|
|
" + \" high-level questions we can answer about the subjects in the statements?\"\n",
|
|
" + \" Provide each question on a new line.\\n\\n\"\n",
|
|
" )\n",
|
|
" reflection_chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n",
|
|
" observations = self.memory_retriever.memory_stream[-last_k:]\n",
|
|
" observation_str = \"\\n\".join([o.page_content for o in observations])\n",
|
|
" result = reflection_chain.run(observations=observation_str)\n",
|
|
" return self._parse_list(result)\n",
|
|
" \n",
|
|
" def _get_insights_on_topic(self, topic: str) -> List[str]:\n",
|
|
" \"\"\"Generate 'insights' on a topic of reflection, based on pertinent memories.\"\"\"\n",
|
|
" prompt = PromptTemplate.from_template(\n",
|
|
" \"Statements about {topic}\\n\"\n",
|
|
" +\"{related_statements}\\n\\n\"\n",
|
|
" + \"What 5 high-level insights can you infer from the above statements?\"\n",
|
|
" + \" (example format: insight (because of 1, 5, 3))\"\n",
|
|
" )\n",
|
|
" related_memories = self.fetch_memories(topic)\n",
|
|
" related_statements = \"\\n\".join([f\"{i+1}. {memory.page_content}\" \n",
|
|
" for i, memory in \n",
|
|
" enumerate(related_memories)])\n",
|
|
" reflection_chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n",
|
|
" result = reflection_chain.run(topic=topic, related_statements=related_statements)\n",
|
|
" # TODO: Parse the connections between memories and insights\n",
|
|
" return self._parse_list(result)\n",
|
|
" \n",
|
|
" def pause_to_reflect(self) -> List[str]:\n",
|
|
" \"\"\"Reflect on recent observations and generate 'insights'.\"\"\"\n",
|
|
" print(colored(f\"Character {self.name} is reflecting\", \"blue\"))\n",
|
|
" new_insights = []\n",
|
|
" topics = self._get_topics_of_reflection()\n",
|
|
" for topic in topics:\n",
|
|
" insights = self._get_insights_on_topic( topic)\n",
|
|
" for insight in insights:\n",
|
|
" self.add_memory(insight)\n",
|
|
" new_insights.extend(insights)\n",
|
|
" return new_insights\n",
|
|
" \n",
|
|
" def _score_memory_importance(self, memory_content: str, weight: float = 0.15) -> float:\n",
|
|
" \"\"\"Score the absolute importance of the given memory.\"\"\"\n",
|
|
" # A weight of 0.25 makes this less important than it\n",
|
|
" # would be otherwise, relative to salience and time\n",
|
|
" prompt = PromptTemplate.from_template(\n",
|
|
" \"On the scale of 1 to 10, where 1 is purely mundane\"\n",
|
|
" +\" (e.g., brushing teeth, making bed) and 10 is\"\n",
|
|
" + \" extremely poignant (e.g., a break up, college\"\n",
|
|
" + \" acceptance), rate the likely poignancy of the\"\n",
|
|
" + \" following piece of memory. Respond with a single integer.\"\n",
|
|
" + \"\\nMemory: {memory_content}\"\n",
|
|
" + \"\\nRating: \"\n",
|
|
" )\n",
|
|
" chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n",
|
|
" score = chain.run(memory_content=memory_content).strip()\n",
|
|
" match = re.search(r\"^\\D*(\\d+)\", score)\n",
|
|
" if match:\n",
|
|
" return (float(score[0]) / 10) * weight\n",
|
|
" else:\n",
|
|
" return 0.0\n",
|
|
"\n",
|
|
"\n",
|
|
" def add_memory(self, memory_content: str) -> List[str]:\n",
|
|
" \"\"\"Add an observation or memory to the agent's memory.\"\"\"\n",
|
|
" importance_score = self._score_memory_importance(memory_content)\n",
|
|
" self.memory_importance += importance_score\n",
|
|
" document = Document(page_content=memory_content, metadata={\"importance\": importance_score})\n",
|
|
" result = self.memory_retriever.add_documents([document])\n",
|
|
"\n",
|
|
" # After an agent has processed a certain amount of memories (as measured by\n",
|
|
" # aggregate importance), it is time to reflect on recent events to add\n",
|
|
" # more synthesized memories to the agent's memory stream.\n",
|
|
" if (self.reflection_threshold is not None \n",
|
|
" and self.memory_importance > self.reflection_threshold\n",
|
|
" and self.status != \"Reflecting\"):\n",
|
|
" old_status = self.status\n",
|
|
" self.status = \"Reflecting\"\n",
|
|
" self.pause_to_reflect()\n",
|
|
" # Hack to clear the importance from reflection\n",
|
|
" self.memory_importance = 0.0\n",
|
|
" self.status = old_status\n",
|
|
" return result\n",
|
|
" \n",
|
|
" def fetch_memories(self, observation: str) -> List[Document]:\n",
|
|
" \"\"\"Fetch related memories.\"\"\"\n",
|
|
" return self.memory_retriever.get_relevant_documents(observation)\n",
|
|
" \n",
|
|
" \n",
|
|
" def get_summary(self, force_refresh: bool = False) -> str:\n",
|
|
" \"\"\"Return a descriptive summary of the agent.\"\"\"\n",
|
|
" current_time = datetime.now()\n",
|
|
" since_refresh = (current_time - self.last_refreshed).seconds\n",
|
|
" if not self.summary or since_refresh >= self.summary_refresh_seconds or force_refresh:\n",
|
|
" self.summary = self._compute_agent_summary()\n",
|
|
" self.last_refreshed = current_time\n",
|
|
" return (\n",
|
|
" f\"Name: {self.name} (age: {self.age})\"\n",
|
|
" +f\"\\nInnate traits: {self.traits}\"\n",
|
|
" +f\"\\n{self.summary}\"\n",
|
|
" )\n",
|
|
" \n",
|
|
" def get_full_header(self, force_refresh: bool = False) -> str:\n",
|
|
" \"\"\"Return a full header of the agent's status, summary, and current time.\"\"\"\n",
|
|
" summary = self.get_summary(force_refresh=force_refresh)\n",
|
|
" current_time_str = datetime.now().strftime(\"%B %d, %Y, %I:%M %p\")\n",
|
|
" return f\"{summary}\\nIt is {current_time_str}.\\n{self.name}'s status: {self.status}\"\n",
|
|
"\n",
|
|
" \n",
|
|
" \n",
|
|
" def _get_entity_from_observation(self, observation: str) -> str:\n",
|
|
" prompt = PromptTemplate.from_template(\n",
|
|
" \"What is the observed entity in the following observation? {observation}\"\n",
|
|
" +\"\\nEntity=\"\n",
|
|
" )\n",
|
|
" chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n",
|
|
" return chain.run(observation=observation).strip()\n",
|
|
"\n",
|
|
" def _get_entity_action(self, observation: str, entity_name: str) -> str:\n",
|
|
" prompt = PromptTemplate.from_template(\n",
|
|
" \"What is the {entity} doing in the following observation? {observation}\"\n",
|
|
" +\"\\nThe {entity} is\"\n",
|
|
" )\n",
|
|
" chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n",
|
|
" return chain.run(entity=entity_name, observation=observation).strip()\n",
|
|
" \n",
|
|
" def _format_memories_to_summarize(self, relevant_memories: List[Document]) -> str:\n",
|
|
" content_strs = set()\n",
|
|
" content = []\n",
|
|
" for mem in relevant_memories:\n",
|
|
" if mem.page_content in content_strs:\n",
|
|
" continue\n",
|
|
" content_strs.add(mem.page_content)\n",
|
|
" created_time = mem.metadata[\"created_at\"].strftime(\"%B %d, %Y, %I:%M %p\")\n",
|
|
" content.append(f\"- {created_time}: {mem.page_content.strip()}\")\n",
|
|
" return \"\\n\".join([f\"{mem}\" for mem in content])\n",
|
|
" \n",
|
|
" def summarize_related_memories(self, observation: str) -> str:\n",
|
|
" \"\"\"Summarize memories that are most relevant to an observation.\"\"\"\n",
|
|
" entity_name = self._get_entity_from_observation(observation)\n",
|
|
" entity_action = self._get_entity_action(observation, entity_name)\n",
|
|
" q1 = f\"What is the relationship between {self.name} and {entity_name}\"\n",
|
|
" relevant_memories = self.fetch_memories(q1) # Fetch memories related to the agent's relationship with the entity\n",
|
|
" q2 = f\"{entity_name} is {entity_action}\"\n",
|
|
" relevant_memories += self.fetch_memories(q2) # Fetch things related to the entity-action pair\n",
|
|
" context_str = self._format_memories_to_summarize(relevant_memories)\n",
|
|
" prompt = PromptTemplate.from_template(\n",
|
|
" \"{q1}?\\nContext from memory:\\n{context_str}\\nRelevant context: \"\n",
|
|
" )\n",
|
|
" chain = LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose)\n",
|
|
" return chain.run(q1=q1, context_str=context_str.strip()).strip()\n",
|
|
" \n",
|
|
" def _get_memories_until_limit(self, consumed_tokens: int) -> str:\n",
|
|
" \"\"\"Reduce the number of tokens in the documents.\"\"\"\n",
|
|
" result = []\n",
|
|
" for doc in self.memory_retriever.memory_stream[::-1]:\n",
|
|
" if consumed_tokens >= self.max_tokens_limit:\n",
|
|
" break\n",
|
|
" consumed_tokens += self.llm.get_num_tokens(doc.page_content)\n",
|
|
" if consumed_tokens < self.max_tokens_limit:\n",
|
|
" result.append(doc.page_content) \n",
|
|
" return \"; \".join(result[::-1])\n",
|
|
" \n",
|
|
" def _generate_reaction(\n",
|
|
" self,\n",
|
|
" observation: str,\n",
|
|
" suffix: str\n",
|
|
" ) -> str:\n",
|
|
" \"\"\"React to a given observation.\"\"\"\n",
|
|
" prompt = PromptTemplate.from_template(\n",
|
|
" \"{agent_summary_description}\"\n",
|
|
" +\"\\nIt is {current_time}.\"\n",
|
|
" +\"\\n{agent_name}'s status: {agent_status}\"\n",
|
|
" + \"\\nSummary of relevant context from {agent_name}'s memory:\"\n",
|
|
" +\"\\n{relevant_memories}\"\n",
|
|
" +\"\\nMost recent observations: {recent_observations}\"\n",
|
|
" + \"\\nObservation: {observation}\"\n",
|
|
" + \"\\n\\n\" + suffix\n",
|
|
" )\n",
|
|
" agent_summary_description = self.get_summary()\n",
|
|
" relevant_memories_str = self.summarize_related_memories(observation)\n",
|
|
" current_time_str = datetime.now().strftime(\"%B %d, %Y, %I:%M %p\")\n",
|
|
" kwargs = dict(agent_summary_description=agent_summary_description,\n",
|
|
" current_time=current_time_str,\n",
|
|
" relevant_memories=relevant_memories_str,\n",
|
|
" agent_name=self.name,\n",
|
|
" observation=observation,\n",
|
|
" agent_status=self.status)\n",
|
|
" consumed_tokens = self.llm.get_num_tokens(prompt.format(recent_observations=\"\", **kwargs))\n",
|
|
" kwargs[\"recent_observations\"] = self._get_memories_until_limit(consumed_tokens)\n",
|
|
" action_prediction_chain = LLMChain(llm=self.llm, prompt=prompt)\n",
|
|
" result = action_prediction_chain.run(**kwargs)\n",
|
|
" return result.strip()\n",
|
|
" \n",
|
|
" def generate_reaction(self, observation: str) -> Tuple[bool, str]:\n",
|
|
" \"\"\"React to a given observation.\"\"\"\n",
|
|
" call_to_action_template = (\n",
|
|
" \"Should {agent_name} react to the observation, and if so,\"\n",
|
|
" +\" what would be an appropriate reaction? Respond in one line.\"\n",
|
|
" +' If the action is to engage in dialogue, write:\\nSAY: \"what to say\"'\n",
|
|
" +\"\\notherwise, write:\\nREACT: {agent_name}'s reaction (if anything).\"\n",
|
|
" + \"\\nEither do nothing, react, or say something but not both.\\n\\n\"\n",
|
|
" )\n",
|
|
" full_result = self._generate_reaction(observation, call_to_action_template)\n",
|
|
" result = full_result.strip().split('\\n')[0]\n",
|
|
" self.add_memory(f\"{self.name} observed {observation} and reacted by {result}\")\n",
|
|
" if \"REACT:\" in result:\n",
|
|
" reaction = result.split(\"REACT:\")[-1].strip()\n",
|
|
" return False, f\"{self.name} {reaction}\"\n",
|
|
" if \"SAY:\" in result:\n",
|
|
" said_value = result.split(\"SAY:\")[-1].strip()\n",
|
|
" return True, f\"{self.name} said {said_value}\"\n",
|
|
" else:\n",
|
|
" return False, result\n",
|
|
"\n",
|
|
" def generate_dialogue_response(self, observation: str) -> Tuple[bool, str]:\n",
|
|
" \"\"\"React to a given observation.\"\"\"\n",
|
|
" call_to_action_template = (\n",
|
|
" 'What would {agent_name} say? To end the conversation, write: GOODBYE: \"what to say\". Otherwise to continue the conversation, write: SAY: \"what to say next\"\\n\\n'\n",
|
|
" )\n",
|
|
" full_result = self._generate_reaction(observation, call_to_action_template)\n",
|
|
" result = full_result.strip().split('\\n')[0]\n",
|
|
" if \"GOODBYE:\" in result:\n",
|
|
" farewell = result.split(\"GOODBYE:\")[-1].strip()\n",
|
|
" self.add_memory(f\"{self.name} observed {observation} and said {farewell}\")\n",
|
|
" return False, f\"{self.name} said {farewell}\"\n",
|
|
" if \"SAY:\" in result:\n",
|
|
" response_text = result.split(\"SAY:\")[-1].strip()\n",
|
|
" self.add_memory(f\"{self.name} observed {observation} and said {response_text}\")\n",
|
|
" return True, f\"{self.name} said {response_text}\"\n",
|
|
" else:\n",
|
|
" return False, result"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "361bd49e",
|
|
"metadata": {
|
|
"jp-MarkdownHeadingCollapsed": true,
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"## Memory Lifecycle\n",
|
|
"\n",
|
|
"Summarizing the above key methods: `add_memory` and `summarize_related_memories`.\n",
|
|
"\n",
|
|
"When an agent makes an observation, it stores the memory:\n",
|
|
" \n",
|
|
"1. Language model scores the memory's importance (1 for mundane, 10 for poignant)\n",
|
|
"2. Observation and importance are stored within a document by TimeWeightedVectorStoreRetriever, with a `last_accessed_time`.\n",
|
|
"\n",
|
|
"When an agent responds to an observation:\n",
|
|
"\n",
|
|
"1. Generates query(s) for retriever, which fetches documents based on salience, recency, and importance.\n",
|
|
"2. Summarizes the retrieved information\n",
|
|
"3. Updates the `last_accessed_time` for the used documents.\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "2fa3ca02",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Create a Generative Character\n",
|
|
"\n",
|
|
"\n",
|
|
"\n",
|
|
"Now that we've walked through the definition, we will create two characters named \"Tommie\" and \"Eve\"."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 5,
|
|
"id": "ee9c1a1d-c311-4f1c-8131-75fccd9025b1",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"import math\n",
|
|
"import faiss\n",
|
|
"\n",
|
|
"def relevance_score_fn(score: float) -> float:\n",
|
|
" \"\"\"Return a similarity score on a scale [0, 1].\"\"\"\n",
|
|
" # This will differ depending on a few things:\n",
|
|
" # - the distance / similarity metric used by the VectorStore\n",
|
|
" # - the scale of your embeddings (OpenAI's are unit norm. Many others are not!)\n",
|
|
" # This function converts the euclidean norm of normalized embeddings\n",
|
|
" # (0 is most similar, sqrt(2) most dissimilar)\n",
|
|
" # to a similarity function (0 to 1)\n",
|
|
" return 1.0 - score / math.sqrt(2)\n",
|
|
"\n",
|
|
"def create_new_memory_retriever():\n",
|
|
" \"\"\"Create a new vector store retriever unique to the agent.\"\"\"\n",
|
|
" # Define your embedding model\n",
|
|
" embeddings_model = OpenAIEmbeddings()\n",
|
|
" # Initialize the vectorstore as empty\n",
|
|
" embedding_size = 1536\n",
|
|
" index = faiss.IndexFlatL2(embedding_size)\n",
|
|
" vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}, relevance_score_fn=relevance_score_fn)\n",
|
|
" return TimeWeightedVectorStoreRetriever(vectorstore=vectorstore, other_score_keys=[\"importance\"], k=15) "
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 6,
|
|
"id": "7884f9dd-c597-4c27-8c77-1402c71bc2f8",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"tommie = GenerativeAgent(name=\"Tommie\", \n",
|
|
" age=25,\n",
|
|
" traits=\"anxious, likes design\", # You can add more persistent traits here \n",
|
|
" status=\"looking for a job\", # When connected to a virtual world, we can have the characters update their status\n",
|
|
" memory_retriever=create_new_memory_retriever(),\n",
|
|
" llm=LLM,\n",
|
|
" daily_summaries = [\n",
|
|
" \"Drove across state to move to a new town but doesn't have a job yet.\"\n",
|
|
" ],\n",
|
|
" reflection_threshold = 8, # we will give this a relatively low number to show how reflection works\n",
|
|
" )"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 7,
|
|
"id": "c524d529",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Name: Tommie (age: 25)\n",
|
|
"Innate traits: anxious, likes design\n",
|
|
"Unfortunately, there are no statements provided to summarize Tommie's core characteristics.\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"# The current \"Summary\" of a character can't be made because the agent hasn't made\n",
|
|
"# any observations yet.\n",
|
|
"print(tommie.get_summary())"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 8,
|
|
"id": "4be60979-d56e-4abf-a636-b34ffa8b7fba",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"# We can give the character memories directly\n",
|
|
"tommie_memories = [\n",
|
|
" \"Tommie remembers his dog, Bruno, from when he was a kid\",\n",
|
|
" \"Tommie feels tired from driving so far\",\n",
|
|
" \"Tommie sees the new home\",\n",
|
|
" \"The new neighbors have a cat\",\n",
|
|
" \"The road is noisy at night\",\n",
|
|
" \"Tommie is hungry\",\n",
|
|
" \"Tommie tries to get some rest.\",\n",
|
|
"]\n",
|
|
"for memory in tommie_memories:\n",
|
|
" tommie.add_memory(memory)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 9,
|
|
"id": "6992b48b-697f-4973-9560-142ef85357d7",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Name: Tommie (age: 25)\n",
|
|
"Innate traits: anxious, likes design\n",
|
|
"Tommie is observant, nostalgic, tired, and hungry.\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"# Now that Tommie has 'memories', their self-summary is more descriptive, though still rudimentary.\n",
|
|
"# We will see how this summary updates after more observations to create a more rich description.\n",
|
|
"print(tommie.get_summary(force_refresh=True))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "40d39a32-838c-4a03-8b27-a52c76c402e7",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"## Pre-Interview with Character\n",
|
|
"\n",
|
|
"Before sending our character on their way, let's ask them a few questions."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 10,
|
|
"id": "eaf125d8-f54c-4c5f-b6af-32789b1f7d3a",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"def interview_agent(agent: GenerativeAgent, message: str) -> str:\n",
|
|
" \"\"\"Help the notebook user interact with the agent.\"\"\"\n",
|
|
" new_message = f\"{USER_NAME} says {message}\"\n",
|
|
" return agent.generate_dialogue_response(new_message)[1]\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 11,
|
|
"id": "54024d41-6e83-4914-91e5-73140e2dd9c8",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'Tommie said \"I really enjoy design, especially interior design. I find it calming and rewarding to create a space that is both functional and aesthetically pleasing. Unfortunately, I haven\\'t been able to find a job in that field yet.\"'"
|
|
]
|
|
},
|
|
"execution_count": 11,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"interview_agent(tommie, \"What do you like to do?\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 12,
|
|
"id": "71e2e8cc-921e-4816-82f1-66962b2c1055",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'Tommie said \"Well, I\\'m actually on the hunt for a job right now. I\\'m hoping to find something in the design field, but I\\'m open to exploring other options as well. How about you, what are your plans for the day?\"'"
|
|
]
|
|
},
|
|
"execution_count": 12,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"interview_agent(tommie, \"What are you looking forward to doing today?\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 13,
|
|
"id": "a2521ffc-7050-4ac3-9a18-4cccfc798c31",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'Tommie said \"Honestly, I\\'m feeling pretty anxious about finding a job. It\\'s been a bit of a struggle and I\\'m not sure what my next step should be. But I\\'m trying to stay positive and keep pushing forward.\"'"
|
|
]
|
|
},
|
|
"execution_count": 13,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"interview_agent(tommie, \"What are you most worried about today?\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "e509c468-f7cd-4d72-9f3a-f4aba28b1eea",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Step through the day's observations."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 14,
|
|
"id": "154dee3d-bfe0-4828-b963-ed7e885799b3",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Let's have Tommie start going through a day in the life.\n",
|
|
"observations = [\n",
|
|
" \"Tommie wakes up to the sound of a noisy construction site outside his window.\",\n",
|
|
" \"Tommie gets out of bed and heads to the kitchen to make himself some coffee.\",\n",
|
|
" \"Tommie realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some.\",\n",
|
|
" \"Tommie finally finds the filters and makes himself a cup of coffee.\",\n",
|
|
" \"The coffee tastes bitter, and Tommie regrets not buying a better brand.\",\n",
|
|
" \"Tommie checks his email and sees that he has no job offers yet.\",\n",
|
|
" \"Tommie spends some time updating his resume and cover letter.\",\n",
|
|
" \"Tommie heads out to explore the city and look for job openings.\",\n",
|
|
" \"Tommie sees a sign for a job fair and decides to attend.\",\n",
|
|
" \"The line to get in is long, and Tommie has to wait for an hour.\",\n",
|
|
" \"Tommie meets several potential employers at the job fair but doesn't receive any offers.\",\n",
|
|
" \"Tommie leaves the job fair feeling disappointed.\",\n",
|
|
" \"Tommie stops by a local diner to grab some lunch.\",\n",
|
|
" \"The service is slow, and Tommie has to wait for 30 minutes to get his food.\",\n",
|
|
" \"Tommie overhears a conversation at the next table about a job opening.\",\n",
|
|
" \"Tommie asks the diners about the job opening and gets some information about the company.\",\n",
|
|
" \"Tommie decides to apply for the job and sends his resume and cover letter.\",\n",
|
|
" \"Tommie continues his search for job openings and drops off his resume at several local businesses.\",\n",
|
|
" \"Tommie takes a break from his job search to go for a walk in a nearby park.\",\n",
|
|
" \"A dog approaches and licks Tommie's feet, and he pets it for a few minutes.\",\n",
|
|
" \"Tommie sees a group of people playing frisbee and decides to join in.\",\n",
|
|
" \"Tommie has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose.\",\n",
|
|
" \"Tommie goes back to his apartment to rest for a bit.\",\n",
|
|
" \"A raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor.\",\n",
|
|
" \"Tommie starts to feel frustrated with his job search.\",\n",
|
|
" \"Tommie calls his best friend to vent about his struggles.\",\n",
|
|
" \"Tommie's friend offers some words of encouragement and tells him to keep trying.\",\n",
|
|
" \"Tommie feels slightly better after talking to his friend.\",\n",
|
|
"]\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 15,
|
|
"id": "238be49c-edb3-4e26-a2b6-98777ba8de86",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\u001b[32mTommie wakes up to the sound of a noisy construction site outside his window.\u001b[0m Tommie Tommie groans and covers their head with a pillow, trying to block out the noise.\n",
|
|
"\u001b[32mTommie gets out of bed and heads to the kitchen to make himself some coffee.\u001b[0m Tommie Tommie starts making coffee, feeling grateful for the little bit of energy it will give him.\n",
|
|
"\u001b[32mTommie realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some.\u001b[0m Tommie Tommie sighs in frustration and continues to search for the coffee filters.\n",
|
|
"\u001b[32mTommie finally finds the filters and makes himself a cup of coffee.\u001b[0m Tommie Tommie takes a sip of the coffee and feels a little more awake.\n",
|
|
"\u001b[32mThe coffee tastes bitter, and Tommie regrets not buying a better brand.\u001b[0m Tommie Tommie grimaces at the taste of the coffee and decides to make a mental note to buy a better brand next time.\n",
|
|
"\u001b[32mTommie checks his email and sees that he has no job offers yet.\u001b[0m Tommie Tommie feels disappointed and discouraged, but tries to stay positive and continue the job search.\n",
|
|
"\u001b[32mTommie spends some time updating his resume and cover letter.\u001b[0m Tommie Tommie feels determined to keep working on his job search.\n",
|
|
"\u001b[32mTommie heads out to explore the city and look for job openings.\u001b[0m Tommie Tommie feels hopeful but also anxious as he heads out to explore the city and look for job openings.\n",
|
|
"\u001b[32mTommie sees a sign for a job fair and decides to attend.\u001b[0m Tommie said \"That job fair could be a great opportunity to meet potential employers.\"\n",
|
|
"\u001b[32mThe line to get in is long, and Tommie has to wait for an hour.\u001b[0m Tommie Tommie feels frustrated and restless while waiting in line.\n",
|
|
"\u001b[32mTommie meets several potential employers at the job fair but doesn't receive any offers.\u001b[0m Tommie Tommie feels disappointed but remains determined to keep searching for job openings.\n",
|
|
"\u001b[32mTommie leaves the job fair feeling disappointed.\u001b[0m Tommie Tommie feels discouraged but remains determined to keep searching for job openings.\n",
|
|
"\u001b[32mTommie stops by a local diner to grab some lunch.\u001b[0m Tommie Tommie feels relieved to take a break from job searching and enjoy a meal.\n",
|
|
"\u001b[32mThe service is slow, and Tommie has to wait for 30 minutes to get his food.\u001b[0m Tommie Tommie feels impatient and frustrated while waiting for his food.\n",
|
|
"\u001b[32mTommie overhears a conversation at the next table about a job opening.\u001b[0m Tommie said \"Excuse me, I couldn't help but overhear about the job opening. Could you tell me more about it?\"\n",
|
|
"\u001b[32mTommie asks the diners about the job opening and gets some information about the company.\u001b[0m Tommie said \"Could you tell me more about it?\"\n",
|
|
"\u001b[32mTommie decides to apply for the job and sends his resume and cover letter.\u001b[0m Tommie said \"Thank you for the information, I'll definitely apply for the job and keep my fingers crossed.\"\n",
|
|
"\u001b[32mTommie continues his search for job openings and drops off his resume at several local businesses.\u001b[0m Tommie Tommie feels hopeful but also anxious as he continues his search for job openings and drops off his resume at several local businesses.\n",
|
|
"\u001b[32mTommie takes a break from his job search to go for a walk in a nearby park.\u001b[0m Tommie Tommie takes a deep breath and enjoys the fresh air in the park.\n",
|
|
"\u001b[32mA dog approaches and licks Tommie's feet, and he pets it for a few minutes.\u001b[0m Tommie Tommie smiles and enjoys the momentary distraction from his job search.\n",
|
|
"****************************************\n",
|
|
"\u001b[34mAfter 20 observations, Tommie's summary is:\n",
|
|
"Name: Tommie (age: 25)\n",
|
|
"Innate traits: anxious, likes design\n",
|
|
"Tommie is a determined individual who is actively searching for job opportunities. He feels both hopeful and anxious about his search and remains positive despite facing disappointments. He takes breaks to rest and enjoy the little things in life, like going for a walk or grabbing a meal. Tommie is also open to asking for help and seeking information about potential job openings. He is grateful for the little things that give him energy and tries to stay positive even when faced with discouragement. Overall, Tommie's core characteristics include determination, positivity, and a willingness to seek help and take breaks when needed.\u001b[0m\n",
|
|
"****************************************\n",
|
|
"\u001b[32mTommie sees a group of people playing frisbee and decides to join in.\u001b[0m Tommie said \"Mind if I join in on the game?\"\n",
|
|
"\u001b[32mTommie has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose.\u001b[0m Tommie Tommie winces in pain and puts his hand to his nose to check for any bleeding.\n",
|
|
"\u001b[32mTommie goes back to his apartment to rest for a bit.\u001b[0m Tommie Tommie takes a deep breath and sits down to rest for a bit.\n",
|
|
"\u001b[32mA raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor.\u001b[0m Tommie Tommie sighs and grabs a broom to clean up the mess.\n",
|
|
"\u001b[32mTommie starts to feel frustrated with his job search.\u001b[0m Tommie Tommie takes a deep breath and reminds himself to stay positive and keep searching for job opportunities.\n",
|
|
"\u001b[32mTommie calls his best friend to vent about his struggles.\u001b[0m Tommie said \"Hey, can I vent to you for a bit about my job search struggles?\"\n",
|
|
"\u001b[32mTommie's friend offers some words of encouragement and tells him to keep trying.\u001b[0m Tommie said \"Thank you for the encouragement, it means a lot. I'll keep trying.\"\n",
|
|
"\u001b[32mTommie feels slightly better after talking to his friend.\u001b[0m Tommie said \"Thank you for your support, it really means a lot to me.\"\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"# Let's send Tommie on their way. We'll check in on their summary every few observations to watch it evolve\n",
|
|
"for i, observation in enumerate(observations):\n",
|
|
" _, reaction = tommie.generate_reaction(observation)\n",
|
|
" print(colored(observation, \"green\"), reaction)\n",
|
|
" if ((i+1) % 20) == 0:\n",
|
|
" print('*'*40)\n",
|
|
" print(colored(f\"After {i+1} observations, Tommie's summary is:\\n{tommie.get_summary(force_refresh=True)}\", \"blue\"))\n",
|
|
" print('*'*40)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "dd62a275-7290-43ca-aa0f-504f3a706d09",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Interview after the day"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 16,
|
|
"id": "6336ab5d-3074-4831-951f-c9e2cba5dfb5",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'Tommie said \"It\\'s been a bit of a rollercoaster, to be honest. I went to a job fair and met some potential employers, but didn\\'t get any offers. But then I overheard about a job opening at a diner and applied for it. I also took a break to go for a walk in the park and played frisbee with some people, which was a nice distraction. Overall, it\\'s been a bit frustrating, but I\\'m trying to stay positive and keep searching for job opportunities.\"'"
|
|
]
|
|
},
|
|
"execution_count": 16,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"interview_agent(tommie, \"Tell me about how your day has been going\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 17,
|
|
"id": "809ac906-69b7-4326-99ec-af638d32bb20",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'Tommie would say: \"I rely on coffee to give me a little boost, but I regret not buying a better brand lately. The taste has been pretty bitter. But overall, it\\'s not a huge factor in my life.\" '"
|
|
]
|
|
},
|
|
"execution_count": 17,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"interview_agent(tommie, \"How do you feel about coffee?\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 18,
|
|
"id": "f733a431-19ea-421a-9101-ae2593a8c626",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'Tommie said \"Oh, I actually don\\'t have a childhood dog, but I do love animals. Have you had any pets?\"'"
|
|
]
|
|
},
|
|
"execution_count": 18,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"interview_agent(tommie, \"Tell me about your childhood dog!\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "c9261428-778a-4c0b-b725-bc9e91b71391",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Adding Multiple Characters\n",
|
|
"\n",
|
|
"Let's add a second character to have a conversation with Tommie. Feel free to configure different traits."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 19,
|
|
"id": "ec8bbe18-a021-419c-bf1f-23d34732cd99",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"eve = GenerativeAgent(name=\"Eve\", \n",
|
|
" age=34, \n",
|
|
" traits=\"curious, helpful\", # You can add more persistent traits here \n",
|
|
" status=\"N/A\", # When connected to a virtual world, we can have the characters update their status\n",
|
|
" memory_retriever=create_new_memory_retriever(),\n",
|
|
" llm=LLM,\n",
|
|
" daily_summaries = [\n",
|
|
" (\"Eve started her new job as a career counselor last week and received her first assignment, a client named Tommie.\")\n",
|
|
" ],\n",
|
|
" reflection_threshold = 5,\n",
|
|
" )"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 20,
|
|
"id": "1e2745f5-e0da-4abd-98b4-830802ce6698",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"yesterday = (datetime.now() - timedelta(days=1)).strftime(\"%A %B %d\")\n",
|
|
"eve_memories = [\n",
|
|
" \"Eve overhears her colleague say something about a new client being hard to work with\",\n",
|
|
" \"Eve wakes up and hear's the alarm\",\n",
|
|
" \"Eve eats a boal of porridge\",\n",
|
|
" \"Eve helps a coworker on a task\",\n",
|
|
" \"Eve plays tennis with her friend Xu before going to work\",\n",
|
|
" \"Eve overhears her colleague say something about Tommie being hard to work with\",\n",
|
|
" \n",
|
|
"]\n",
|
|
"for memory in eve_memories:\n",
|
|
" eve.add_memory(memory)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 21,
|
|
"id": "de4726e3-4bb1-47da-8fd9-f317a036fe0f",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Name: Eve (age: 34)\n",
|
|
"Innate traits: curious, helpful\n",
|
|
"Eve is helpful, active, eats breakfast, is attentive to her surroundings, and works with colleagues.\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"print(eve.get_summary())"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "837524e9-7f7e-4e9f-b610-f454062f5915",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Pre-conversation interviews\n",
|
|
"\n",
|
|
"\n",
|
|
"Let's \"Interview\" Eve before she speaks with Tommie."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 22,
|
|
"id": "6cda916d-800c-47bc-a7f9-6a2f19187472",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'Eve said \"I\\'m feeling curious about what\\'s on the agenda for today. Anything special we should be aware of?\"'"
|
|
]
|
|
},
|
|
"execution_count": 22,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"interview_agent(eve, \"How are you feeling about today?\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 23,
|
|
"id": "448ae644-0a66-4eb2-a03a-319f36948b37",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'Eve said \"I overheard someone say Tommie is hard to work with. Is there something I can help with?\"'"
|
|
]
|
|
},
|
|
"execution_count": 23,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"interview_agent(eve, \"What do you know about Tommie?\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 24,
|
|
"id": "493fc5b8-8730-4ef8-9820-0f1769ce1691",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'Eve said \"Oh, I didn\\'t realize Tommie was looking for a new job. Is there anything I can do to help? Maybe I could introduce him to some people in my network or help him with his resume.\"'"
|
|
]
|
|
},
|
|
"execution_count": 24,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"interview_agent(eve, \"Tommie is looking to find a job. What are are some things you'd like to ask him?\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 25,
|
|
"id": "4b46452a-6c54-4db2-9d87-18597f70fec8",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'Eve said \"Sure, I can definitely help keep the conversation going and ask him plenty of questions. Is there anything specific you would like me to ask him about his skills or experience? I want to make sure the conversation is productive.\"'"
|
|
]
|
|
},
|
|
"execution_count": 25,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"interview_agent(eve, \"You'll have to ask him. He may be a bit anxious, so I'd appreciate it if you keep the conversation going and ask as many questions as possible.\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "dd780655-1d73-4fcb-a78d-79fd46a20636",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Dialogue between Generative Agents\n",
|
|
"\n",
|
|
"Generative agents are much more complex when they interact with a virtual environment or with each other. Below, we run a simple conversation between Tommie and Eve."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 26,
|
|
"id": "042ea271-4bf1-4247-9082-239a6fea43b8",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"def run_conversation(agents: List[GenerativeAgent], initial_observation: str) -> None:\n",
|
|
" \"\"\"Runs a conversation between agents.\"\"\"\n",
|
|
" _, observation = agents[1].generate_reaction(initial_observation)\n",
|
|
" print(observation)\n",
|
|
" turns = 0\n",
|
|
" while True:\n",
|
|
" break_dialogue = False\n",
|
|
" for agent in agents:\n",
|
|
" stay_in_dialogue, observation = agent.generate_dialogue_response(observation)\n",
|
|
" print(observation)\n",
|
|
" # observation = f\"{agent.name} said {reaction}\"\n",
|
|
" if not stay_in_dialogue:\n",
|
|
" break_dialogue = True \n",
|
|
" if break_dialogue:\n",
|
|
" break\n",
|
|
" turns += 1\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 27,
|
|
"id": "d5462b14-218e-4d85-b035-df57ea8e0f80",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Eve said \"Of course, Tommie! I'm happy to help in any way I can. What specifically would you like advice on?\"\n",
|
|
"Tommie said \"Thank you, Eve. I was actually wondering if you knew of any job openings in the design field? That's my area of interest and expertise.\"\n",
|
|
"Eve said \"Sure, Tommie! I can definitely keep an eye out for any job openings in the design field and let you know if I come across anything. Have you updated your resume and portfolio recently? I could take a look and provide some feedback if you'd like.\"\n",
|
|
"Tommie said \"Thank you so much, Eve! That would be incredibly helpful. And yes, I have updated my resume and portfolio recently, but I would love any feedback you can provide. I really appreciate your support and willingness to help.\"\n",
|
|
"Eve said \"Great, Tommie! I'll take a look at your resume and portfolio and provide you with some feedback. Do you have any specific companies or types of design jobs that you're interested in? I might be able to provide more targeted advice and resources based on that information.\"\n",
|
|
"Tommie said \"Thank you, Eve! I'm primarily interested in graphic design and UI/UX design positions, but I'm open to other opportunities as well. I appreciate any advice or resources you can provide. Thanks again for your help.\"\n",
|
|
"Eve said \"Great, Tommie! Those are both really interesting fields. I'll definitely keep an eye out for any job openings or resources related to graphic design and UI/UX design. In the meantime, I can take a look at your resume and portfolio and provide you with some feedback. Would you like me to email you my feedback or would you prefer to discuss it in person?\"\n",
|
|
"Tommie said \"Thank you, Eve! I would really appreciate discussing your feedback in person. When would be a good time for us to meet?\"\n",
|
|
"Eve said \"Sure, Tommie! How about we schedule a time to meet next week? I have some availability on Tuesday or Thursday afternoon. What works best for you?\"\n",
|
|
"Tommie said \"Thank you, Eve! Tuesday afternoon works better for me. How about 2 PM?\"\n",
|
|
"Eve said \"Great, Tommie! Tuesday at 2 PM works for me as well. Let's plan to meet then. In the meantime, if you have any questions or if there's anything else I can help with, please don't hesitate to reach out. Good luck with your job search!\"\n",
|
|
"Tommie said \"Thank you so much, Eve! I really appreciate your support and willingness to help. I'll see you next Tuesday at 2 PM. Have a great day!\"\n",
|
|
"Eve said \"You're welcome, Tommie! Looking forward to meeting with you on Tuesday. Have a great day and good luck with your job search!\"\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"agents = [tommie, eve]\n",
|
|
"run_conversation(agents, \"Tommie said: Hi, Eve. Thanks for agreeing to share your story with me and give me advice. I have a bunch of questions.\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "1b28fe80-03dc-4399-961d-6e9ee1980216",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"source": [
|
|
"## Let's interview our agents after their conversation\n",
|
|
"\n",
|
|
"Since the generative agents retain their memories from the day, we can ask them about their plans, conversations, and other memoreis."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 28,
|
|
"id": "c4d252f3-fcc1-474c-846e-a7605a6b4ce7",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Name: Tommie (age: 25)\n",
|
|
"Innate traits: anxious, likes design\n",
|
|
"Tommie is a determined person who is actively searching for job opportunities. He feels both hopeful and anxious about his job search, and remains persistent despite facing disappointment and discouragement. He seeks support from friends and takes breaks to recharge. He tries to stay positive and continues to work on improving his resume and cover letter. He also values the importance of self-care and takes breaks to rest and enjoy nature.\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"# We can see a current \"Summary\" of a character based on their own perception of self\n",
|
|
"# has changed\n",
|
|
"print(tommie.get_summary(force_refresh=True))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 29,
|
|
"id": "c04db9a4",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Name: Eve (age: 34)\n",
|
|
"Innate traits: curious, helpful\n",
|
|
"Eve is a helpful and proactive coworker who values relationships and communication. She is attentive to her colleagues' needs and willing to offer support and assistance. She is also curious and interested in learning more about her work and the people around her. Overall, Eve demonstrates a strong sense of empathy and collaboration in her interactions with others.\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"print(eve.get_summary(force_refresh=True))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 30,
|
|
"id": "71762558-8fb6-44d7-8483-f5b47fb2a862",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'Tommie said \"It was really helpful! Eve offered to provide feedback on my resume and portfolio, and she\\'s going to keep an eye out for job openings in the design field. We\\'re planning to meet next Tuesday to discuss her feedback. Thanks for asking!\"'"
|
|
]
|
|
},
|
|
"execution_count": 30,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"interview_agent(tommie, \"How was your conversation with Eve?\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 31,
|
|
"id": "085af3d8-ac21-41ea-8f8b-055c56976a67",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'Eve said \"It was really productive! Tommie is interested in graphic design and UI/UX design positions, so I\\'m going to keep an eye out for any job openings or resources related to those fields. I\\'m also going to provide him with some feedback on his resume and portfolio. We\\'re scheduled to meet next Tuesday at 2 PM to discuss everything in person. Is there anything else you would like me to ask him or anything else I can do to help?\".'"
|
|
]
|
|
},
|
|
"execution_count": 31,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"interview_agent(eve, \"How was your conversation with Tommie?\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 32,
|
|
"id": "5b439f3c-7849-4432-a697-2bcc85b89dae",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'Eve said \"I feel like I covered everything I wanted to with Tommie, but thank you for asking! If there\\'s anything else that comes up or if you have any further questions, please let me know.\"'"
|
|
]
|
|
},
|
|
"execution_count": 32,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"interview_agent(eve, \"What do you wish you would have said to Tommie?\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 33,
|
|
"id": "526e8863-8b32-4216-8e61-2dfe82e3fb47",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'Tommie said \"Oh, I actually forgot to buy coffee filters yesterday, so I couldn\\'t make coffee this morning. But I\\'m planning to grab some later today. Thanks for asking!\"'"
|
|
]
|
|
},
|
|
"execution_count": 33,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"interview_agent(tommie, \"What happened with your coffee this morning?\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "a17ff5bc-5ad9-4184-8f80-33643e06c589",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3 (ipykernel)",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.11.2"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|