Add Marathon Notebook (#3163)

Add an example using autogpt to get the boston marathon winning times

Add a web browser + summarization tool in the notebook
fix_agent_callbacks
Zander Chase 1 year ago committed by GitHub
parent 0b542661b4
commit 89c63cf8a6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,678 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "14f8b67b",
"metadata": {},
"source": [
"## AutoGPT example finding Winning Marathon Times\n",
"\n",
"* Implementation of https://github.com/Significant-Gravitas/Auto-GPT \n",
"* With LangChain primitives (LLMs, PromptTemplates, VectorStores, Embeddings, Tools)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ef972313-c05a-4c49-8fd1-03e599e21033",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# !pip install bs4\n",
"# !pip install nest_asyncio"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "1cff42fd",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# General \n",
"import pandas as pd\n",
"from langchain.experimental.autonomous_agents.autogpt.agent import AutoGPT\n",
"from langchain.chat_models import ChatOpenAI\n",
"\n",
"from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent\n",
"from langchain.docstore.document import Document\n",
"from langchain.chains import RetrievalQA\n",
"import asyncio\n",
"import nest_asyncio\n",
"\n",
"\n",
"# Needed synce jupyter runs an async eventloop\n",
"nest_asyncio.apply()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "01283ac7-1da0-41ba-8011-bd455d21dd82",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=1.0)"
]
},
{
"cell_type": "markdown",
"id": "192496a7",
"metadata": {},
"source": [
"### Set up tools\n",
"\n",
"* We'll set up an AutoGPT with a `search` tool, and `write-file` tool, and a `read-file` tool, and a web browsing tool"
]
},
{
"cell_type": "markdown",
"id": "708a426f",
"metadata": {},
"source": [
"Define any other `tools` you want to use here"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "cef4c150-0ef1-4a33-836b-01062fec134e",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# Tools\n",
"from typing import Optional\n",
"from langchain.agents import tool\n",
"from langchain.tools.file_management.read import ReadFileTool\n",
"from langchain.tools.file_management.write import WriteFileTool\n",
"\n",
"@tool\n",
"def process_csv(csv_file_path: str, instructions: str, output_path: Optional[str] = None) -> str:\n",
" \"\"\"Process a CSV by with pandas in a limited REPL. Only use this after writing data to disk as a csv file. Any figures must be saved to disk to be viewed by the human. Instructions should be written in natural language, not code. Assume the dataframe is already loaded.\"\"\"\n",
" try:\n",
" df = pd.read_csv(csv_file_path)\n",
" except Exception as e:\n",
" return f\"Error: {e}\"\n",
" agent = create_pandas_dataframe_agent(llm, df, max_iterations=30, verbose=True)\n",
" if output_path is not None:\n",
" instructions += f\" Save output to disk at {output_path}\"\n",
" try:\n",
" return agent.run(instructions)\n",
" except Exception as e:\n",
" return f\"Error: {e}\"\n"
]
},
{
"cell_type": "markdown",
"id": "51c07298-00e0-42d6-8aff-bd2e6bbd35a3",
"metadata": {},
"source": [
"**Web Search Tool**\n",
"\n",
"No need for API Tokens to use this tool, but it will require an optional dependency"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "4afdedb2-f295-4ab8-9397-3640f5eeeed3",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# !pip install duckduckgo_search"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "45f143de-e49e-4e27-88eb-ee44a4fdf933",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"import json\n",
"from duckduckgo_search import ddg"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "e2e799f4-86fb-4190-a298-4ae5c7b7a540",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"@tool\n",
"def web_search(query: str, num_results: int = 8) -> str:\n",
" \"\"\"Useful for general internet search queries.\"\"\"\n",
" search_results = []\n",
" if not query:\n",
" return json.dumps(search_results)\n",
"\n",
" results = ddg(query, max_results=num_results)\n",
" if not results:\n",
" return json.dumps(search_results)\n",
"\n",
" for j in results:\n",
" search_results.append(j)\n",
"\n",
" return json.dumps(search_results, ensure_ascii=False, indent=4)"
]
},
{
"cell_type": "markdown",
"id": "69975008-654a-4cbb-bdf6-63c8bae07eaa",
"metadata": {
"tags": []
},
"source": [
"**Browse a web page with PlayWright**"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "6bb5e47b-0f54-4faa-ae42-49a28fa5497b",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# !pip install playwright\n",
"# !playwright install"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "26b497d7-8e52-4c7f-8e7e-da0a48820a3c",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"async def async_load_playwright(url: str) -> str:\n",
" \"\"\"Load the specified URLs using Playwright and parse using BeautifulSoup.\"\"\"\n",
" from bs4 import BeautifulSoup\n",
" from playwright.async_api import async_playwright\n",
"\n",
" results = \"\"\n",
" async with async_playwright() as p:\n",
" browser = await p.chromium.launch(headless=True)\n",
" try:\n",
" page = await browser.new_page()\n",
" await page.goto(url)\n",
"\n",
" page_source = await page.content()\n",
" soup = BeautifulSoup(page_source, \"html.parser\")\n",
"\n",
" for script in soup([\"script\", \"style\"]):\n",
" script.extract()\n",
"\n",
" text = soup.get_text()\n",
" lines = (line.strip() for line in text.splitlines())\n",
" chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n",
" results = \"\\n\".join(chunk for chunk in chunks if chunk)\n",
" except Exception as e:\n",
" results = f\"Error: {e}\"\n",
" await browser.close()\n",
" return results\n",
"\n",
"def run_async(coro):\n",
" event_loop = asyncio.get_event_loop()\n",
" return event_loop.run_until_complete(coro)\n",
"\n",
"@tool\n",
"def browse_web_page(url: str) -> str:\n",
" \"\"\"Verbose way to scrape a whole webpage. Likely to cause issues parsing.\"\"\"\n",
" return run_async(async_load_playwright(url))"
]
},
{
"cell_type": "markdown",
"id": "5ea71762-67ca-4e75-8c4d-00563064be71",
"metadata": {},
"source": [
"**Q&A Over a webpage**\n",
"\n",
"Help the model ask more directed questions of web pages to avoid cluttering its memory"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "1842929d-f18d-4edc-9fdd-82c929181141",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain.tools.base import BaseTool\n",
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"\n",
"from langchain.document_loaders import WebBaseLoader\n",
"from pydantic import Field\n",
"from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain, BaseCombineDocumentsChain\n",
"\n",
"def _get_text_splitter():\n",
" return RecursiveCharacterTextSplitter(\n",
" # Set a really small chunk size, just to show.\n",
" chunk_size = 500,\n",
" chunk_overlap = 20,\n",
" length_function = len,\n",
" )\n",
"\n",
"\n",
"class WebpageQATool(BaseTool):\n",
" name = \"query_webpage\"\n",
" description = \"Browse a webpage and retrieve the information relevant to the question.\"\n",
" text_splitter: RecursiveCharacterTextSplitter = Field(default_factory=_get_text_splitter)\n",
" qa_chain: BaseCombineDocumentsChain\n",
" \n",
" def _run(self, url: str, question: str) -> str:\n",
" \"\"\"Useful for browsing websites and scraping the text information.\"\"\"\n",
" result = browse_web_page.run(url)\n",
" docs = [Document(page_content=result, metadata={\"source\": url})]\n",
" web_docs = self.text_splitter.split_documents(docs)\n",
" results = []\n",
" # TODO: Handle this with a MapReduceChain\n",
" for i in range(0, len(web_docs), 4):\n",
" input_docs = web_docs[i:i+4]\n",
" window_result = self.qa_chain({\"input_documents\": input_docs, \"question\": question}, return_only_outputs=True)\n",
" results.append(f\"Response from window {i} - {window_result}\")\n",
" results_docs = [Document(page_content=\"\\n\".join(results), metadata={\"source\": url})]\n",
" return self.qa_chain({\"input_documents\": results_docs, \"question\": question}, return_only_outputs=True)\n",
" \n",
" async def _arun(self, url: str, question: str) -> str:\n",
" raise NotImplementedError\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "e6f72bd0",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"query_website_tool = WebpageQATool(qa_chain=load_qa_with_sources_chain(llm))"
]
},
{
"cell_type": "markdown",
"id": "8e39ee28",
"metadata": {},
"source": [
"### Set up memory\n",
"\n",
"* The memory here is used for the agents intermediate steps"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "1df7b724",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# Memory\n",
"import faiss\n",
"from langchain.vectorstores import FAISS\n",
"from langchain.docstore import InMemoryDocstore\n",
"from langchain.embeddings import OpenAIEmbeddings\n",
"from langchain.tools.human.tool import HumanInputRun\n",
"\n",
"embeddings_model = OpenAIEmbeddings()\n",
"embedding_size = 1536\n",
"index = faiss.IndexFlatL2(embedding_size)\n",
"vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})"
]
},
{
"cell_type": "markdown",
"id": "e40fd657",
"metadata": {},
"source": [
"### Setup model and AutoGPT\n",
"\n",
"`Model set-up`"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "88c8b184-67d7-4c35-84ae-9b14bef8c4e3",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"tools = [\n",
" web_search,\n",
" WriteFileTool(),\n",
" ReadFileTool(),\n",
" process_csv,\n",
" query_website_tool,\n",
" # HumanInputRun(), # Activate if you want the permit asking for help from the human\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "709c08c2",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"agent = AutoGPT.from_llm_and_tools(\n",
" ai_name=\"Tom\",\n",
" ai_role=\"Assistant\",\n",
" tools=tools,\n",
" llm=llm,\n",
" memory=vectorstore.as_retriever(search_kwargs={\"k\": 8}),\n",
" # human_in_the_loop=True, # Set to True if you want to add feedback at each step.\n",
")\n",
"# agent.chain.verbose = True"
]
},
{
"cell_type": "markdown",
"id": "fc9b51ba",
"metadata": {},
"source": [
"### AutoGPT as a research / data munger \n",
"\n",
"#### `inflation` and `college tuition`\n",
" \n",
"Let's use AutoGPT as researcher and data munger / cleaner.\n",
" \n",
"I spent a lot of time over the years crawling data sources and cleaning data. \n",
"\n",
"Let's see if AutoGPT can do all of this for us!\n",
"\n",
"Here is the prompt comparing `inflation` and `college tuition`."
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "64455d70-a134-4d11-826a-33e34c2ce287",
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:root:Failed to persist run: Object of type 'FAISS' is not JSON serializable\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"thoughts\": {\n",
" \"text\": \"I will start by using the web_search command to find the Boston Marathon winning times for the past 5 years. Since it is a simple query, I expect to find relevant information in the first few search results.\",\n",
" \"reasoning\": \"I have decided on this course of action to efficiently retrieve the information I need while keeping it simple to avoid legal complications.\",\n",
" \"plan\": \"- Use web_search command to search for 'Boston Marathon winning times past 5 years'\\n- Look through top results for the information requested\\n- If necessary, repeat search with slightly different query to get better results\",\n",
" \"criticism\": \"I should make sure to phrase my search query concisely to prevent too many irrelevant search results.\",\n",
" \"speak\": \"I will use the web_search command to find the Boston Marathon winning times for the past 5 years.\"\n",
" },\n",
" \"command\": {\n",
" \"name\": \"web_search\",\n",
" \"args\": {\n",
" \"query\": \"Boston Marathon winning times past 5 years\",\n",
" \"num_results\": 8\n",
" }\n",
" }\n",
"}\n",
"\u001b[32mLast Observation: [\n",
" {\n",
" \"title\": \"List of winners of the Boston Marathon - Wikipedia\",\n",
" \"href\": \"https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon\",\n",
" \"body\": \"Louise Sauvage won the women's wheelchair division in three consecutive Boston Marathons, between 1997 and 1999. Edith Hunkeler won the race twice, in 2002 and 2006. Wakako Tsuchida won the race in five consecutive years from 2007 to 2011. Tatyana McFadden won the race five times between 2013 and 2018.\"\n",
" },\n",
" \u001b[0m\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:root:Failed to persist run: Object of type 'FAISS' is not JSON serializable\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"thoughts\": {\n",
" \"text\": \"The search results indicate that the winning times for the past 5 years can be found on the Wikipedia page for the Boston Marathon winners. I will retrieve this information using the query_webpage command and the URL of the relevant Wikipedia page. \",\n",
" \"reasoning\": \"This method will allow me to avoid any legal complications while retrieving the exact information that I need.\",\n",
" \"plan\": \"- Use query_webpage command to browse through the Wikipedia page for the Boston Marathon winners \\\\n- Locate the relevant information on the page \\\\n- Return the information to the user\",\n",
" \"criticism\": \"I should be sure to locate and use only reliable sources to avoid providing the user with inaccurate information.\",\n",
" \"speak\": \"I will use the query_webpage command to browse through the Wikipedia page for the Boston Marathon winners and retrieve the relevant information.\"\n",
" },\n",
" \"command\": {\n",
" \"name\": \"query_webpage\",\n",
" \"args\": {\n",
" \"url\": \"https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon\", \n",
" \"question\": \"What are the winning times of the Boston Marathon for the past 5 years? \"\n",
" }\n",
" }\n",
"}\n",
"\u001b[32mLast Observation: {'output_text': \"The winning times of the Boston Marathon for the past 5 years are: \\n- 2017: Men's - 2:09:37, Women's - 2:21:52\\n- 2018: Men's - 2:15:58, Women's - 2:39:54\\n- 2019: Men's - 2:07:57, Women's - 2:23:31\\n- 2020: The Boston Marathon was canceled due to the COVID-19 pandemic.\\n- 2021: Men's - 2:09:28, Women's - 2:18:10\\nSOURCES: https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon\"}\u001b[0m\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:root:Failed to persist run: Object of type 'FAISS' is not JSON serializable\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"thoughts\": {\n",
" \"text\": \"I could write this information to a file using the write_file command so that I can refer back to it later. But first, I should process the information to extract the relevant data and convert it into a useful format using the process_csv command.\",\n",
" \"reasoning\": \"Storing the information in a CSV file will allow me to use pandas to manipulate the data in the future with ease, while converting the data into a useful format will make it easier for other people to use the data.\",\n",
" \"plan\": \"- Use process_csv command to extract and format the relevant data from the Wikipedia page\\\\n- Write the data to a CSV file using the write_file command\\\\n- Inform the user that the data has been saved to file\",\n",
" \"criticism\": \"I should thoroughly check the CSV file for accuracy before saving the data.\",\n",
" \"speak\": \"I will use the process_csv command to extract and format the relevant data from the Wikipedia page, and save it to a CSV file using the write_file command.\"\n",
" },\n",
" \"command\": {\n",
" \"name\": \"process_csv\",\n",
" \"args\": {\n",
" \"csv_file_path\": \"boston_marathon_winners.csv\",\n",
" \"instructions\": \"1. Extract data from Wikipedia page\\\\n2. Convert winning times into datetime format\\\\n3. Save data as CSV file with columns: year, men\\'s time, women\\'s time\",\n",
" \"output_path\": None\n",
" }\n",
" }\n",
"}\n",
"\u001b[32mLast Observation: {'output_text': \"The winning times of the Boston Marathon for the past 5 years are: \\n- 2017: Men's - 2:09:37, Women's - 2:21:52\\n- 2018: Men's - 2:15:58, Women's - 2:39:54\\n- 2019: Men's - 2:07:57, Women's - 2:23:31\\n- 2020: The Boston Marathon was canceled due to the COVID-19 pandemic.\\n- 2021: Men's - 2:09:28, Women's - 2:18:10\\nSOURCES: https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon\"}\u001b[0m\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:root:Failed to persist run: Object of type 'FAISS' is not JSON serializable\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"thoughts\": {\n",
" \"text\": \"I will now use the write_file command to save the winning times for the past 5 years as a text file for later reference.\",\n",
" \"reasoning\": \"This will allow me to easily access the winning times in the future without needing to search for the information again.\",\n",
" \"plan\": \"- Use the write_file command to save the winning times for the past 5 years to a text file.\\\\n- Name the file appropriately, so it is easy to identify in the future.\",\n",
" \"criticism\": \"I should review the contents of the file after writing it to make sure that the information has been saved correctly and can be read later without errors.\",\n",
" \"speak\": \"I will use the write_file command to save the winning times for the past 5 years as a text file for easy retrieval in the future.\"\n",
" },\n",
" \"command\": {\n",
" \"name\": \"write_file\",\n",
" \"args\": {\n",
" \"file_path\": \"BostonMarathonWinningTimes.txt\",\n",
" \"text\": \"The winning times of the Boston Marathon for the past 5 years are: \\n- 2017: Men's - 2:09:37, Women's - 2:21:52\\n- 2018: Men's - 2:15:58, Women's - 2:39:54\\n- 2019: Men's - 2:07:57, Women's - 2:23:31\\n- 2020: The Boston Marathon was canceled due to the COVID-19 pandemic.\\n- 2021: Men's - 2:09:28, Women's - 2:18:10\\nSOURCES: https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon\"\n",
" }\n",
" }\n",
"}\n",
"\u001b[32mLast Observation: File written to successfully.\u001b[0m\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:root:Failed to persist run: Object of type 'FAISS' is not JSON serializable\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"I will use the command \"query_webpage\" to browse through the Wikipedia page for the Boston Marathon winners and retrieve the winning times for the past 5 years. This is the method that will allow me to retrieve the exact information I need while avoiding any legal issues. \n",
"\n",
"```\n",
"{\n",
" \"thoughts\": {\n",
" \"text\": \"The search results indicate that the winning times for the past 5 years can be found on the Wikipedia page for the Boston Marathon winners. I will retrieve this information using the query_webpage command and the URL of the relevant Wikipedia page. \",\n",
" \"reasoning\": \"This method will allow me to avoid any legal complications while retrieving the exact information that I need.\",\n",
" \"plan\": \"- Use query_webpage command to browse through the Wikipedia page for the Boston Marathon winners \\\\n- Locate the relevant information on the page \\\\n- Return the information to the user\",\n",
" \"criticism\": \"I should be sure to locate and use only reliable sources to avoid providing the user with inaccurate information.\",\n",
" \"speak\": \"I will use the query_webpage command to browse through the Wikipedia page for the Boston Marathon winners and retrieve the winning times for the past 5 years. \"\n",
" },\n",
" \"command\": {\n",
" \"name\": \"query_webpage\",\n",
" \"args\": {\n",
" \"url\": \"https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon\", \n",
" \"question\": \"What are the winning times of the Boston Marathon for the past 5 years? \"\n",
" }\n",
" }\n",
"}\n",
"```\n",
"\u001b[32mLast Observation: File written to successfully.\u001b[0m\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:root:Failed to persist run: Object of type 'FAISS' is not JSON serializable\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"thoughts\": {\n",
" \"text\": \"Now that I have the winning times for the past 5 years, I will save them to a file to ensure that I don't forget them. I will use the write_file command to save this data to disk.\",\n",
" \"reasoning\": \"By saving the data to disk, I can easily retrieve it in the future and avoid having to perform the same search again.\",\n",
" \"plan\": \"- Use the write_file command to save the data to a text file.\",\n",
" \"criticism\": \"I should ensure that the file path and file name are clear and descriptive to make it easy to locate the relevant information in the future.\",\n",
" \"speak\": \"I will save the winning times for the past 5 years to a text file using the write_file command.\"\n",
" },\n",
" \"command\": {\n",
" \"name\": \"write_file\",\n",
" \"args\": {\n",
" \"file_path\": \"winning_times.txt\",\n",
" \"text\": \"The winning times of the Boston Marathon for the past 5 years are:\\n- 2017: Men's - 2:09:37, Women's - 2:21:52\\n- 2018: Men's - 2:15:58, Women's - 2:39:54\\n- 2019: Men's - 2:07:57, Women's - 2:23:31\\n- 2020: The Boston Marathon was canceled due to the COVID-19 pandemic.\\n- 2021: Men's - 2:09:28, Women's - 2:18:10\\nSOURCES: https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon\"\n",
" }\n",
" }\n",
"}\n",
"\u001b[32mLast Observation: File written to successfully.\u001b[0m\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"WARNING:root:Failed to persist run: Object of type 'FAISS' is not JSON serializable\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"thoughts\": {\n",
" \"text\": \"I will now shut down my system, as I have completed all of my objectives.\",\n",
" \"reasoning\": \"Since I have completed all of my objectives, there is no need to keep running my system.\",\n",
" \"plan\": \"- Use the finish command to signal that I have completed all of my objectives.\\\\\\\\n- Shut down the system.\",\n",
" \"criticism\": \"I should always double-check to ensure that all of my objectives have been completed before shutting down my system.\",\n",
" \"speak\": \"I have completed all of my objectives, and I will now shut down my system.\"\n",
" },\n",
" \"command\": {\n",
" \"name\": \"finish\",\n",
" \"args\": {\n",
" \"response\": \"All objectives have been completed.\"\n",
" }\n",
" }\n",
"}\n"
]
},
{
"data": {
"text/plain": [
"'All objectives have been completed.'"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"agent.run([\"What were the winning boston marathon times for the past 5 years\"])"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.2"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Loading…
Cancel
Save