2023-04-19 18:23:08 +00:00
{
"cells": [
{
"cell_type": "markdown",
"id": "14f8b67b",
"metadata": {},
"source": [
"## AutoGPT example finding Winning Marathon Times\n",
"\n",
"* Implementation of https://github.com/Significant-Gravitas/Auto-GPT \n",
"* With LangChain primitives (LLMs, PromptTemplates, VectorStores, Embeddings, Tools)"
]
},
{
"cell_type": "code",
2023-04-24 01:14:11 +00:00
"execution_count": 1,
2023-04-19 18:23:08 +00:00
"id": "ef972313-c05a-4c49-8fd1-03e599e21033",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# !pip install bs4\n",
"# !pip install nest_asyncio"
]
},
{
"cell_type": "code",
2023-04-24 01:14:11 +00:00
"execution_count": 2,
2023-04-19 18:23:08 +00:00
"id": "1cff42fd",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
2023-06-16 18:52:56 +00:00
"# General\n",
2023-11-14 22:17:44 +00:00
"import asyncio\n",
2023-04-24 01:14:11 +00:00
"import os\n",
2023-04-19 18:23:08 +00:00
"\n",
2023-11-14 22:17:44 +00:00
"import nest_asyncio\n",
"import pandas as pd\n",
2023-04-19 18:23:08 +00:00
"from langchain.docstore.document import Document\n",
2024-03-26 01:14:22 +00:00
"from langchain_experimental.agents.agent_toolkits.pandas.base import (\n",
" create_pandas_dataframe_agent,\n",
")\n",
2023-11-14 22:17:44 +00:00
"from langchain_experimental.autonomous_agents import AutoGPT\n",
2024-01-06 23:54:48 +00:00
"from langchain_openai import ChatOpenAI\n",
2023-04-19 18:23:08 +00:00
"\n",
"# Needed synce jupyter runs an async eventloop\n",
"nest_asyncio.apply()"
]
},
{
"cell_type": "code",
2023-04-24 01:14:11 +00:00
"execution_count": 3,
2023-04-19 18:23:08 +00:00
"id": "01283ac7-1da0-41ba-8011-bd455d21dd82",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
2024-04-08 15:56:53 +00:00
"llm = ChatOpenAI(model=\"gpt-4\", temperature=1.0)"
2023-04-19 18:23:08 +00:00
]
},
{
"cell_type": "markdown",
"id": "192496a7",
"metadata": {},
"source": [
"### Set up tools\n",
"\n",
2023-04-24 01:14:11 +00:00
"* We'll set up an AutoGPT with a `search` tool, and `write-file` tool, and a `read-file` tool, a web browsing tool, and a tool to interact with a CSV file via a python REPL"
2023-04-19 18:23:08 +00:00
]
},
{
"cell_type": "markdown",
"id": "708a426f",
"metadata": {},
"source": [
2023-04-24 01:14:11 +00:00
"Define any other `tools` you want to use below:"
2023-04-19 18:23:08 +00:00
]
},
{
"cell_type": "code",
2023-04-24 01:14:11 +00:00
"execution_count": 4,
2023-04-19 18:23:08 +00:00
"id": "cef4c150-0ef1-4a33-836b-01062fec134e",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# Tools\n",
2023-04-24 01:14:11 +00:00
"import os\n",
"from contextlib import contextmanager\n",
2023-04-19 18:23:08 +00:00
"from typing import Optional\n",
2023-11-14 22:17:44 +00:00
"\n",
2023-04-19 18:23:08 +00:00
"from langchain.agents import tool\n",
2024-01-02 23:23:34 +00:00
"from langchain_community.tools.file_management.read import ReadFileTool\n",
"from langchain_community.tools.file_management.write import WriteFileTool\n",
2023-04-19 18:23:08 +00:00
"\n",
2023-04-24 01:14:11 +00:00
"ROOT_DIR = \"./data/\"\n",
"\n",
2023-06-16 18:52:56 +00:00
"\n",
2023-04-24 01:14:11 +00:00
"@contextmanager\n",
"def pushd(new_dir):\n",
" \"\"\"Context manager for changing the current working directory.\"\"\"\n",
" prev_dir = os.getcwd()\n",
" os.chdir(new_dir)\n",
2023-04-19 18:23:08 +00:00
" try:\n",
2023-04-24 01:14:11 +00:00
" yield\n",
" finally:\n",
" os.chdir(prev_dir)\n",
2023-04-19 18:23:08 +00:00
"\n",
2023-06-16 18:52:56 +00:00
"\n",
2023-04-19 18:23:08 +00:00
"@tool\n",
2023-04-24 01:14:11 +00:00
"def process_csv(\n",
" csv_file_path: str, instructions: str, output_path: Optional[str] = None\n",
") -> str:\n",
" \"\"\"Process a CSV by with pandas in a limited REPL.\\\n",
" Only use this after writing data to disk as a csv file.\\\n",
" Any figures must be saved to disk to be viewed by the human.\\\n",
" Instructions should be written in natural language, not code. Assume the dataframe is already loaded.\"\"\"\n",
" with pushd(ROOT_DIR):\n",
" try:\n",
" df = pd.read_csv(csv_file_path)\n",
" except Exception as e:\n",
" return f\"Error: {e}\"\n",
" agent = create_pandas_dataframe_agent(llm, df, max_iterations=30, verbose=True)\n",
" if output_path is not None:\n",
" instructions += f\" Save output to disk at {output_path}\"\n",
" try:\n",
" result = agent.run(instructions)\n",
" return result\n",
" except Exception as e:\n",
" return f\"Error: {e}\""
2023-04-19 18:23:08 +00:00
]
},
{
"cell_type": "markdown",
"id": "69975008-654a-4cbb-bdf6-63c8bae07eaa",
"metadata": {
"tags": []
},
"source": [
"**Browse a web page with PlayWright**"
]
},
{
"cell_type": "code",
2023-04-24 01:14:11 +00:00
"execution_count": 5,
2023-04-19 18:23:08 +00:00
"id": "6bb5e47b-0f54-4faa-ae42-49a28fa5497b",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# !pip install playwright\n",
"# !playwright install"
]
},
{
"cell_type": "code",
2023-04-24 01:14:11 +00:00
"execution_count": 6,
2023-04-19 18:23:08 +00:00
"id": "26b497d7-8e52-4c7f-8e7e-da0a48820a3c",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"async def async_load_playwright(url: str) -> str:\n",
" \"\"\"Load the specified URLs using Playwright and parse using BeautifulSoup.\"\"\"\n",
" from bs4 import BeautifulSoup\n",
" from playwright.async_api import async_playwright\n",
"\n",
" results = \"\"\n",
" async with async_playwright() as p:\n",
" browser = await p.chromium.launch(headless=True)\n",
" try:\n",
" page = await browser.new_page()\n",
" await page.goto(url)\n",
"\n",
" page_source = await page.content()\n",
" soup = BeautifulSoup(page_source, \"html.parser\")\n",
"\n",
" for script in soup([\"script\", \"style\"]):\n",
" script.extract()\n",
"\n",
" text = soup.get_text()\n",
" lines = (line.strip() for line in text.splitlines())\n",
" chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n",
" results = \"\\n\".join(chunk for chunk in chunks if chunk)\n",
" except Exception as e:\n",
" results = f\"Error: {e}\"\n",
" await browser.close()\n",
" return results\n",
"\n",
2023-06-16 18:52:56 +00:00
"\n",
2023-04-19 18:23:08 +00:00
"def run_async(coro):\n",
" event_loop = asyncio.get_event_loop()\n",
" return event_loop.run_until_complete(coro)\n",
"\n",
2023-06-16 18:52:56 +00:00
"\n",
2023-04-19 18:23:08 +00:00
"@tool\n",
"def browse_web_page(url: str) -> str:\n",
" \"\"\"Verbose way to scrape a whole webpage. Likely to cause issues parsing.\"\"\"\n",
" return run_async(async_load_playwright(url))"
]
},
{
"cell_type": "markdown",
"id": "5ea71762-67ca-4e75-8c4d-00563064be71",
"metadata": {},
"source": [
"**Q&A Over a webpage**\n",
"\n",
"Help the model ask more directed questions of web pages to avoid cluttering its memory"
]
},
{
"cell_type": "code",
2023-04-24 01:14:11 +00:00
"execution_count": 7,
2023-04-19 18:23:08 +00:00
"id": "1842929d-f18d-4edc-9fdd-82c929181141",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
2023-06-16 18:52:56 +00:00
"from langchain.chains.qa_with_sources.loading import (\n",
" BaseCombineDocumentsChain,\n",
2023-11-14 22:17:44 +00:00
" load_qa_with_sources_chain,\n",
2023-06-16 18:52:56 +00:00
")\n",
2023-11-14 22:17:44 +00:00
"from langchain.tools import BaseTool, DuckDuckGoSearchRun\n",
2024-03-01 02:33:21 +00:00
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
2023-11-14 22:17:44 +00:00
"from pydantic import Field\n",
2023-06-16 18:52:56 +00:00
"\n",
2023-04-19 18:23:08 +00:00
"\n",
"def _get_text_splitter():\n",
" return RecursiveCharacterTextSplitter(\n",
" # Set a really small chunk size, just to show.\n",
2023-06-16 18:52:56 +00:00
" chunk_size=500,\n",
" chunk_overlap=20,\n",
" length_function=len,\n",
2023-04-19 18:23:08 +00:00
" )\n",
"\n",
"\n",
"class WebpageQATool(BaseTool):\n",
" name = \"query_webpage\"\n",
2023-06-16 18:52:56 +00:00
" description = (\n",
" \"Browse a webpage and retrieve the information relevant to the question.\"\n",
" )\n",
" text_splitter: RecursiveCharacterTextSplitter = Field(\n",
" default_factory=_get_text_splitter\n",
" )\n",
2023-04-19 18:23:08 +00:00
" qa_chain: BaseCombineDocumentsChain\n",
2023-06-16 18:52:56 +00:00
"\n",
2023-04-19 18:23:08 +00:00
" def _run(self, url: str, question: str) -> str:\n",
" \"\"\"Useful for browsing websites and scraping the text information.\"\"\"\n",
" result = browse_web_page.run(url)\n",
" docs = [Document(page_content=result, metadata={\"source\": url})]\n",
" web_docs = self.text_splitter.split_documents(docs)\n",
" results = []\n",
" # TODO: Handle this with a MapReduceChain\n",
" for i in range(0, len(web_docs), 4):\n",
2023-06-16 18:52:56 +00:00
" input_docs = web_docs[i : i + 4]\n",
" window_result = self.qa_chain(\n",
" {\"input_documents\": input_docs, \"question\": question},\n",
" return_only_outputs=True,\n",
" )\n",
2023-04-19 18:23:08 +00:00
" results.append(f\"Response from window {i} - {window_result}\")\n",
2023-06-16 18:52:56 +00:00
" results_docs = [\n",
" Document(page_content=\"\\n\".join(results), metadata={\"source\": url})\n",
" ]\n",
" return self.qa_chain(\n",
" {\"input_documents\": results_docs, \"question\": question},\n",
" return_only_outputs=True,\n",
" )\n",
"\n",
2023-04-19 18:23:08 +00:00
" async def _arun(self, url: str, question: str) -> str:\n",
2023-06-16 18:52:56 +00:00
" raise NotImplementedError"
2023-04-19 18:23:08 +00:00
]
},
{
"cell_type": "code",
2023-04-24 01:14:11 +00:00
"execution_count": 8,
2023-04-19 18:23:08 +00:00
"id": "e6f72bd0",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"query_website_tool = WebpageQATool(qa_chain=load_qa_with_sources_chain(llm))"
]
},
{
"cell_type": "markdown",
"id": "8e39ee28",
"metadata": {},
"source": [
"### Set up memory\n",
"\n",
"* The memory here is used for the agents intermediate steps"
]
},
{
"cell_type": "code",
2023-04-24 01:14:11 +00:00
"execution_count": 9,
2023-04-19 18:23:08 +00:00
"id": "1df7b724",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# Memory\n",
"import faiss\n",
"from langchain.docstore import InMemoryDocstore\n",
2024-01-02 21:47:11 +00:00
"from langchain_community.vectorstores import FAISS\n",
2024-01-06 23:54:48 +00:00
"from langchain_openai import OpenAIEmbeddings\n",
2023-04-19 18:23:08 +00:00
"\n",
"embeddings_model = OpenAIEmbeddings()\n",
"embedding_size = 1536\n",
"index = faiss.IndexFlatL2(embedding_size)\n",
"vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})"
]
},
{
"cell_type": "markdown",
"id": "e40fd657",
"metadata": {},
"source": [
"### Setup model and AutoGPT\n",
"\n",
"`Model set-up`"
]
},
{
"cell_type": "code",
2023-04-24 01:14:11 +00:00
"execution_count": 10,
"id": "1233caf3-fbc9-4acb-9faa-01008200633d",
"metadata": {},
"outputs": [],
"source": [
"# !pip install duckduckgo_search\n",
2023-04-26 23:21:34 +00:00
"web_search = DuckDuckGoSearchRun()"
2023-04-24 01:14:11 +00:00
]
},
{
"cell_type": "code",
"execution_count": 11,
2023-04-19 18:23:08 +00:00
"id": "88c8b184-67d7-4c35-84ae-9b14bef8c4e3",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"tools = [\n",
" web_search,\n",
2023-04-24 01:14:11 +00:00
" WriteFileTool(root_dir=\"./data\"),\n",
" ReadFileTool(root_dir=\"./data\"),\n",
2023-04-19 18:23:08 +00:00
" process_csv,\n",
" query_website_tool,\n",
" # HumanInputRun(), # Activate if you want the permit asking for help from the human\n",
"]"
]
},
{
"cell_type": "code",
2023-04-24 01:14:11 +00:00
"execution_count": 12,
2023-04-19 18:23:08 +00:00
"id": "709c08c2",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"agent = AutoGPT.from_llm_and_tools(\n",
" ai_name=\"Tom\",\n",
" ai_role=\"Assistant\",\n",
" tools=tools,\n",
" llm=llm,\n",
" memory=vectorstore.as_retriever(search_kwargs={\"k\": 8}),\n",
" # human_in_the_loop=True, # Set to True if you want to add feedback at each step.\n",
")\n",
"# agent.chain.verbose = True"
]
},
{
"cell_type": "markdown",
"id": "fc9b51ba",
"metadata": {},
"source": [
2023-04-24 01:14:11 +00:00
"### AutoGPT for Querying the Web\n",
2023-04-19 18:23:08 +00:00
" \n",
" \n",
2023-04-24 01:14:11 +00:00
"I've spent a lot of time over the years crawling data sources and cleaning data. Let's see if AutoGPT can help with this!\n",
2023-04-19 18:23:08 +00:00
"\n",
2023-04-24 01:14:11 +00:00
"Here is the prompt for looking up recent boston marathon times and converting them to tabular form."
2023-04-19 18:23:08 +00:00
]
},
{
"cell_type": "code",
2023-04-24 01:14:11 +00:00
"execution_count": 13,
2023-04-19 18:23:08 +00:00
"id": "64455d70-a134-4d11-826a-33e34c2ce287",
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"thoughts\": {\n",
2023-04-24 01:14:11 +00:00
" \"text\": \"I need to find the winning Boston Marathon times for the past 5 years. I can use the DuckDuckGo Search command to search for this information.\",\n",
" \"reasoning\": \"Using DuckDuckGo Search will help me gather information on the winning times without complications.\",\n",
" \"plan\": \"- Use DuckDuckGo Search to find the winning Boston Marathon times\\n- Generate a table with the year, name, country of origin, and times\\n- Ensure there are no legal complications\",\n",
2023-04-19 23:03:21 +00:00
" \"criticism\": \"None\",\n",
2023-04-24 01:14:11 +00:00
" \"speak\": \"I will use the DuckDuckGo Search command to find the winning Boston Marathon times for the past 5 years.\"\n",
2023-04-19 18:23:08 +00:00
" },\n",
" \"command\": {\n",
2023-04-24 01:14:11 +00:00
" \"name\": \"DuckDuckGo Search\",\n",
2023-04-19 18:23:08 +00:00
" \"args\": {\n",
2023-04-24 01:14:11 +00:00
" \"query\": \"winning Boston Marathon times for the past 5 years ending in 2022\"\n",
2023-04-19 18:23:08 +00:00
" }\n",
" }\n",
2023-04-24 01:14:11 +00:00
"}\n",
2023-04-19 18:23:08 +00:00
"{\n",
" \"thoughts\": {\n",
2023-04-24 01:14:11 +00:00
" \"text\": \"The DuckDuckGo Search command did not provide the specific information I need. I must switch my approach and use query_webpage command to browse a webpage containing the Boston Marathon winning times for the past 5 years.\",\n",
" \"reasoning\": \"The query_webpage command may give me more accurate and comprehensive results compared to the search command.\",\n",
" \"plan\": \"- Use query_webpage command to find the winning Boston Marathon times\\n- Generate a table with the year, name, country of origin, and times\\n- Ensure there are no legal complications\",\n",
" \"criticism\": \"I may face difficulty in finding the right webpage with the desired information.\",\n",
" \"speak\": \"I will use the query_webpage command to find the winning Boston Marathon times for the past 5 years.\"\n",
" },\n",
" \"command\": {\n",
" \"name\": \"DuckDuckGo Search\",\n",
" \"args\": {\n",
" \"query\": \"site with winning Boston Marathon times for the past 5 years ending in 2022\"\n",
" }\n",
" }\n",
"}\n",
"{\n",
" \"thoughts\": {\n",
" \"text\": \"I need to use the query_webpage command to find the information about the winning Boston Marathon times for the past 5 years.\",\n",
" \"reasoning\": \"The previous DuckDuckGo Search command did not provide specific enough results. The query_webpage command might give more accurate and comprehensive results.\",\n",
" \"plan\": \"- Use query_webpage command to find the winning Boston Marathon times\\\\n- Generate a table with the year, name, country of origin, and times\\\\n- Ensure there are no legal complications\",\n",
" \"criticism\": \"I may face difficulty in finding the right webpage with the desired information.\",\n",
" \"speak\": \"I will use the query_webpage command to find the winning Boston Marathon times for the past 5 years.\"\n",
2023-04-19 18:23:08 +00:00
" },\n",
" \"command\": {\n",
" \"name\": \"query_webpage\",\n",
" \"args\": {\n",
2023-04-24 01:14:11 +00:00
" \"url\": \"https://en.wikipedia.org/wiki/List_of_winners_of_the_Boston_Marathon\",\n",
" \"question\": \"What were the winning Boston Marathon times for the past 5 years ending in 2022?\"\n",
2023-04-19 18:23:08 +00:00
" }\n",
" }\n",
2023-04-24 01:14:11 +00:00
"}\n",
2023-04-19 18:23:08 +00:00
"{\n",
" \"thoughts\": {\n",
2023-04-24 01:14:11 +00:00
" \"text\": \"I have already found the winning Boston Marathon times for the past 5 years. Now, I need to generate a table with the information.\",\n",
" \"reasoning\": \"Using the information I already have, I can create a table containing year, name, country of origin, and times.\",\n",
" \"plan\": \"- Write the marathon data to a CSV file\\n- Process the CSV file to display the table\",\n",
2023-04-19 23:03:21 +00:00
" \"criticism\": \"None\",\n",
2023-04-24 01:14:11 +00:00
" \"speak\": \"I will generate a table with the year, name, country of origin, and times for the winning Boston Marathon times for the past 5 years.\"\n",
2023-04-19 18:23:08 +00:00
" },\n",
" \"command\": {\n",
2023-04-19 23:03:21 +00:00
" \"name\": \"write_file\",\n",
2023-04-19 18:23:08 +00:00
" \"args\": {\n",
2023-04-24 01:14:11 +00:00
" \"file_path\": \"boston_marathon_winners.csv\",\n",
" \"text\": \"Year,Name,Country,Time\\n2022,Evans Chebet,KEN,2:06:51\\n2021,Benson Kipruto,KEN,2:09:51\\n2019,Lawrence Cherono,KEN,2:07:57\\n2018,Yuki Kawauchi,JPN,2:15:58\"\n",
2023-04-19 18:23:08 +00:00
" }\n",
" }\n",
2023-04-24 01:14:11 +00:00
"}\n",
2023-04-19 18:23:08 +00:00
"{\n",
" \"thoughts\": {\n",
2023-04-24 01:14:11 +00:00
" \"text\": \"I have retrieved the winning Boston Marathon times for the past 5 years. Now, I need to generate a table with the year, name, country of origin, and times.\",\n",
" \"reasoning\": \"Creating a table will help organize the data in a clear and accessible format.\",\n",
" \"plan\": \"- Write the data to a CSV file\\n- Process the CSV file to generate the table\\n- Complete the task\",\n",
2023-04-19 23:03:21 +00:00
" \"criticism\": \"None\",\n",
2023-04-24 01:14:11 +00:00
" \"speak\": \"I will generate a table with the year, name, country of origin, and winning times using the recently retrieved data.\"\n",
2023-04-19 18:23:08 +00:00
" },\n",
" \"command\": {\n",
" \"name\": \"write_file\",\n",
" \"args\": {\n",
2023-04-24 01:14:11 +00:00
" \"file_path\": \"winning_boston_marathon_data.csv\",\n",
" \"text\": \"Year,Name,Country,Time\\n2022,Evans Chebet,KEN,2:06:51\\n2021,Benson Kipruto,KEN,2:09:51\\n2019,Lawrence Cherono,KEN,2:07:57\\n2018,Yuki Kawauchi,JPN,2:15:58\\n\"\n",
2023-04-19 18:23:08 +00:00
" }\n",
" }\n",
2023-04-24 01:14:11 +00:00
"}\n",
2023-04-19 18:23:08 +00:00
"{\n",
" \"thoughts\": {\n",
2023-04-24 01:14:11 +00:00
" \"text\": \"I have found the winning Boston Marathon times for the past five years ending in 2022. Next, I need to create a table with the year, name, country of origin, and times.\",\n",
" \"reasoning\": \"Generating a table will help organize the information in a structured format.\",\n",
" \"plan\": \"- Create a table with the year, name, country of origin, and times\\n- Ensure there are no legal complications\",\n",
2023-04-19 23:03:21 +00:00
" \"criticism\": \"None\",\n",
2023-04-24 01:14:11 +00:00
" \"speak\": \"I will generate a table with the winning Boston Marathon times for the past 5 years ending in 2022.\"\n",
2023-04-19 18:23:08 +00:00
" },\n",
" \"command\": {\n",
2023-04-19 23:03:21 +00:00
" \"name\": \"write_file\",\n",
2023-04-19 18:23:08 +00:00
" \"args\": {\n",
2023-04-24 01:14:11 +00:00
" \"file_path\": \"winning_times.csv\",\n",
" \"text\": \"Year,Name,Country,Time\\n2022,Evans Chebet,Kenya,2:06:51\\n2021,Benson Kipruto,Kenya,2:09:51\\n2020,Canceled due to COVID-19 pandemic,,\\n2019,Lawrence Cherono,Kenya,2:07:57\\n2018,Yuki Kawauchi,Japan,2:15:58\"\n",
2023-04-19 18:23:08 +00:00
" }\n",
" }\n",
2023-04-24 01:14:11 +00:00
"}\n",
2023-04-19 18:23:08 +00:00
"{\n",
" \"thoughts\": {\n",
2023-04-24 01:14:11 +00:00
" \"text\": \"I need to process the CSV file to generate the table with the year, name, country of origin, and winning times.\",\n",
" \"reasoning\": \"I have already written the data to a file named 'winning_times.csv'. Now, I need to process this CSV file to properly display the data as a table.\",\n",
" \"plan\": \"- Use the process_csv command to read the 'winning_times.csv' file and generate the table\",\n",
2023-04-19 23:03:21 +00:00
" \"criticism\": \"None\",\n",
2023-04-24 01:14:11 +00:00
" \"speak\": \"I will process the 'winning_times.csv' file to display the table with the winning Boston Marathon times for the past 5 years.\"\n",
2023-04-19 18:23:08 +00:00
" },\n",
" \"command\": {\n",
2023-04-19 23:03:21 +00:00
" \"name\": \"process_csv\",\n",
2023-04-19 18:23:08 +00:00
" \"args\": {\n",
2023-04-24 01:14:11 +00:00
" \"csv_file_path\": \"winning_times.csv\",\n",
" \"instructions\": \"Read the CSV file and display the data as a table\"\n",
2023-04-19 18:23:08 +00:00
" }\n",
" }\n",
"}\n",
2023-04-19 23:03:21 +00:00
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
2023-04-24 01:14:11 +00:00
"\u001b[32;1m\u001b[1;3mThought: The CSV file has already been read and saved into a pandas dataframe called `df`. Hence, I can simply display the data by printing the whole dataframe. Since `df.head()` returns the first 5 rows, I can use that to showcase the contents.\n",
"\n",
2023-04-19 23:03:21 +00:00
"Action: python_repl_ast\n",
2023-04-24 01:14:11 +00:00
"Action Input: print(df.head())\u001b[0m Year Name Country Time\n",
"0 2022 Evans Chebet Kenya 2:06:51\n",
"1 2021 Benson Kipruto Kenya 2:09:51\n",
"2 2020 Canceled due to COVID-19 pandemic NaN NaN\n",
"3 2019 Lawrence Cherono Kenya 2:07:57\n",
"4 2018 Yuki Kawauchi Japan 2:15:58\n",
"\n",
"Observation: \u001b[36;1m\u001b[1;3mNone\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3mI used the wrong tool to perform the action. I should have used the given data and not interacted with the Python shell. I can now provide the displayed data as the answer since the information in the printed dataframe would look like a table when typed as text.\n",
"\n",
2023-04-19 23:03:21 +00:00
"Final Answer: \n",
2023-04-24 01:14:11 +00:00
" Year Name Country Time\n",
"0 2022 Evans Chebet Kenya 2:06:51\n",
"1 2021 Benson Kipruto Kenya 2:09:51\n",
"2 2020 Canceled due to COVID-19 pandemic NaN NaN\n",
"3 2019 Lawrence Cherono Kenya 2:07:57\n",
"4 2018 Yuki Kawauchi Japan 2:15:58\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"{\n",
" \"thoughts\": {\n",
" \"text\": \"I already have the winning Boston Marathon times for the past 5 years saved in the file 'winning_times.csv'. Now, I need to process the CSV and display the table.\",\n",
" \"reasoning\": \"I am choosing the process_csv command because I already have the required data saved as a CSV file, and I can use this command to read and display the data as a table.\",\n",
" \"plan\": \"- Use the process_csv command to read the 'winning_times.csv' file and generate the table\",\n",
" \"criticism\": \"None\",\n",
" \"speak\": \"I will process the 'winning_times.csv' file to display the table with the winning Boston Marathon times for the past 5 years.\"\n",
" },\n",
" \"command\": {\n",
" \"name\": \"process_csv\",\n",
" \"args\": {\n",
" \"csv_file_path\": \"winning_times.csv\",\n",
" \"instructions\": \"Read the CSV file and display the data as a table\"\n",
" }\n",
" }\n",
"}\n",
2023-04-19 23:03:21 +00:00
"\n",
"\n",
2023-04-24 01:14:11 +00:00
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mThought: Since the data is already loaded in a pandas dataframe, I just need to display the top rows of the dataframe.\n",
"Action: python_repl_ast\n",
"Action Input: df.head()\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3m Year Name Country Time\n",
"0 2022 Evans Chebet Kenya 2:06:51\n",
"1 2021 Benson Kipruto Kenya 2:09:51\n",
"2 2020 Canceled due to COVID-19 pandemic NaN NaN\n",
"3 2019 Lawrence Cherono Kenya 2:07:57\n",
"4 2018 Yuki Kawauchi Japan 2:15:58\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3mI now know the final answer.\n",
"Final Answer: \n",
" Year Name Country Time\n",
"0 2022 Evans Chebet Kenya 2:06:51\n",
"1 2021 Benson Kipruto Kenya 2:09:51\n",
"2 2020 Canceled due to COVID-19 pandemic NaN NaN\n",
"3 2019 Lawrence Cherono Kenya 2:07:57\n",
"4 2018 Yuki Kawauchi Japan 2:15:58\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
2023-04-19 18:23:08 +00:00
"{\n",
" \"thoughts\": {\n",
2023-04-24 01:14:11 +00:00
" \"text\": \"I have already generated a table with the winning Boston Marathon times for the past 5 years. Now, I can finish the task.\",\n",
" \"reasoning\": \"I have completed the required actions and obtained the desired data. The task is complete.\",\n",
" \"plan\": \"- Use the finish command\",\n",
2023-04-19 23:03:21 +00:00
" \"criticism\": \"None\",\n",
2023-04-24 01:14:11 +00:00
" \"speak\": \"I have generated the table with the winning Boston Marathon times for the past 5 years. Task complete.\"\n",
2023-04-19 18:23:08 +00:00
" },\n",
" \"command\": {\n",
" \"name\": \"finish\",\n",
" \"args\": {\n",
2023-04-24 01:14:11 +00:00
" \"response\": \"I have generated the table with the winning Boston Marathon times for the past 5 years. Task complete.\"\n",
2023-04-19 18:23:08 +00:00
" }\n",
" }\n",
"}\n"
]
},
{
"data": {
"text/plain": [
2023-04-24 01:14:11 +00:00
"'I have generated the table with the winning Boston Marathon times for the past 5 years. Task complete.'"
2023-04-19 18:23:08 +00:00
]
},
2023-04-24 01:14:11 +00:00
"execution_count": 13,
2023-04-19 18:23:08 +00:00
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
2023-06-16 18:52:56 +00:00
"agent.run(\n",
" [\n",
" \"What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times.\"\n",
" ]\n",
")"
2023-04-19 18:23:08 +00:00
]
2023-04-19 23:03:21 +00:00
},
{
"cell_type": "code",
"execution_count": null,
"id": "a6b4f96e",
"metadata": {},
"outputs": [],
"source": []
2023-04-19 18:23:08 +00:00
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
2023-04-26 23:21:34 +00:00
"version": "3.8.16"
2023-04-19 18:23:08 +00:00
}
},
"nbformat": 4,
"nbformat_minor": 5
}