langchain/cookbook/langgraph_agentic_rag.ipynb

486 lines
92 KiB
Plaintext
Raw Normal View History

{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "625868e8-46cb-4232-99de-e95aee53c3a3",
"metadata": {},
"outputs": [],
"source": [
"! pip install langchain-chroma langchain_community tiktoken langchain-openai langchainhub langchain langgraph"
]
},
{
"cell_type": "markdown",
"id": "425fb020-e864-40ce-a31f-8da40c73d14b",
"metadata": {},
"source": [
"# LangGraph Retrieval Agent\n",
"\n",
"We can implement [Retrieval Agents](https://python.langchain.com/docs/use_cases/question_answering/conversational_retrieval_agents) in [LangGraph](https://python.langchain.com/docs/langgraph).\n",
"\n",
"## Retriever"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "e50c9efe-4abe-42fa-b35a-05eeeede9ec6",
"metadata": {},
"outputs": [],
"source": [
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.document_loaders import WebBaseLoader\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"urls = [\n",
" \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n",
" \"https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/\",\n",
" \"https://lilianweng.github.io/posts/2023-10-25-adv-attack-llm/\",\n",
"]\n",
"\n",
"docs = [WebBaseLoader(url).load() for url in urls]\n",
"docs_list = [item for sublist in docs for item in sublist]\n",
"\n",
"text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(\n",
" chunk_size=100, chunk_overlap=50\n",
")\n",
"doc_splits = text_splitter.split_documents(docs_list)\n",
"\n",
"# Add to vectorDB\n",
"vectorstore = Chroma.from_documents(\n",
" documents=doc_splits,\n",
" collection_name=\"rag-chroma\",\n",
" embedding=OpenAIEmbeddings(),\n",
")\n",
"retriever = vectorstore.as_retriever()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "0b97bdd8-d7e3-444d-ac96-5ef4725f9048",
"metadata": {},
"outputs": [],
"source": [
"from langchain.tools.retriever import create_retriever_tool\n",
"\n",
"tool = create_retriever_tool(\n",
" retriever,\n",
" \"retrieve_blog_posts\",\n",
" \"Search and return information about Lilian Weng blog posts.\",\n",
")\n",
"\n",
"tools = [tool]\n",
"\n",
"from langgraph.prebuilt import ToolExecutor\n",
"\n",
"tool_executor = ToolExecutor(tools)"
]
},
{
"cell_type": "markdown",
"id": "fe6e8f78-1ef7-42ad-b2bf-835ed5850553",
"metadata": {},
"source": [
"## Agent state\n",
" \n",
"We will defined a graph.\n",
"\n",
"A `state` object that it passes around to each node.\n",
"\n",
"Our state will be a list of `messages`.\n",
"\n",
"Each node in our graph will append to it."
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "0e378706-47d5-425a-8ba0-57b9acffbd0c",
"metadata": {},
"outputs": [],
"source": [
"import operator\n",
"from typing import Annotated, Sequence, TypedDict\n",
"\n",
"from langchain_core.messages import BaseMessage\n",
"\n",
"\n",
"class AgentState(TypedDict):\n",
" messages: Annotated[Sequence[BaseMessage], operator.add]"
]
},
{
"attachments": {
"f886806c-0aec-4c2a-8027-67339530cb60.png": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAA7EAAAHICAYAAACYkW15AAAMP2lDQ1BJQ0MgUHJvZmlsZQAASImVVwdYU8kWnluSkEBCCSAgJfQmCEgJICWEFkB6EWyEJEAoMQaCiB1dVHDtYgEbuiqi2AGxI3YWwd4XRRSUdbFgV96kgK77yvfO9829//3nzH/OnDu3DADqp7hicQ6qAUCuKF8SGxLAGJucwiB1AwTggAYIgMDl5YlZ0dERANrg+e/27ib0hnbNQab1z/7/app8QR4PACQa4jR+Hi8X4kMA4JU8sSQfAKKMN5+aL5Zh2IC2BCYI8UIZzlDgShlOU+B9cp/4WDbEzQCoqHG5kgwAaG2QZxTwMqAGrQ9iJxFfKAJAnQGxb27uZD7EqRDbQB8xxDJ9ZtoPOhl/00wb0uRyM4awYi5yUwkU5olzuNP+z3L8b8vNkQ7GsIJNLVMSGiubM6zb7ezJ4TKsBnGvKC0yCmItiD8I+XJ/iFFKpjQ0QeGPGvLy2LBmQBdiJz43MBxiQ4iDRTmREUo+LV0YzIEYrhC0UJjPiYdYD+KFgrygOKXPZsnkWGUstC5dwmYp+QtciTyuLNZDaXYCS6n/OlPAUepjtKLM+CSIKRBbFAgTIyGmQeyYlx0XrvQZXZTJjhz0kUhjZflbQBwrEIUEKPSxgnRJcKzSvzQ3b3C+2OZMISdSiQ/kZ8aHKuqDNfO48vzhXLA2gYiVMKgjyBsbMTgXviAwSDF3rFsgSohT6nwQ5wfEKsbiFHFOtNIfNxPkhMh4M4hd8wrilGPxxHy4IBX6eLo4PzpekSdelMUNi1bkgy8DEYANAgEDSGFLA5NBFhC29tb3witFTzDgAgnIAALgoGQGRyTJe0TwGAeKwJ8QCUDe0LgAea8AFED+6xCrODqAdHlvgXxENngKcS4IBznwWiofJRqKlgieQEb4j+hc2Hgw3xzYZP3/nh9kvzMsyEQoGelgRIb6oCcxiBhIDCUGE21xA9wX98Yj4NEfNheciXsOzuO7P+EpoZ3wmHCD0EG4M0lYLPkpyzGgA+oHK2uR9mMtcCuo6YYH4D5QHSrjurgBcMBdYRwW7gcju0GWrcxbVhXGT9p/m8EPd0PpR3Yio+RhZH+yzc8jaXY0tyEVWa1/rI8i17SherOHen6Oz/6h+nx4Dv/ZE1uIHcTOY6exi9gxrB4wsJNYA9aCHZfhodX1RL66BqPFyvPJhjrCf8QbvLOySuY51Tj1OH1R9OULCmXvaMCeLJ4mEWZk5jNY8IsgYHBEPMcRDBcnF1cAZN8XxevrTYz8u4Hotnzn5v0BgM/JgYGBo9+5sJMA7PeAj/+R75wNE346VAG4cIQnlRQoOFx2IMC3hDp80vSBMTAHNnA+LsAdeAN/EATCQBSIB8lgIsw+E65zCZgKZoC5oASUgWVgNVgPNoGtYCfYAw6AenAMnAbnwGXQBm6Ae3D1dIEXoA+8A58RBCEhVISO6CMmiCVij7ggTMQXCUIikFgkGUlFMhARIkVmIPOQMmQFsh7ZglQj+5EjyGnkItKO3EEeIT3Ia+QTiqFqqDZqhFqhI1EmykLD0Xh0ApqBTkGL0PnoEnQtWoXuRuvQ0+hl9Abagb5A+zGAqWK6mCnmgDExNhaFpWDpmASbhZVi5VgVVos1wvt8DevAerGPOBGn4wzcAa7gUDwB5+FT8Fn4Ynw9vhOvw5vxa/gjvA//RqASDAn2BC8ChzCWkEGYSighlBO2Ew4TzsJnqYvwjkgk6hKtiR7wWUwmZhGnExcTNxD3Ek8R24mdxH4SiaRPsif5kKJIXFI+qYS0jrSbdJJ0ldRF+qCiqmKi4qISrJKiIlIpVilX2aVyQuWqyjOVz2QNsiXZixxF5pOnkZeSt5EbyVfIXeTPFE2KNcWHEk/JosylrKXUUs5S7lPeqKqqmql6qsaoClXnqK5V3ad6QfWR6kc1LTU7NbbaeDWp2hK1HWqn1O6ovaFSqVZUf2oKNZ+6hFpNPUN9SP1Ao9McaRwanzabVkGro12lvVQnq1uqs9Qnqhepl6sfVL+i3qtB1rDSYGtwNWZpVGgc0bil0a9J13TWjNLM1VysuUvzoma3FknLSitIi681X2ur1hmtTjpGN6ez6Tz6PPo2+ll6lzZR21qbo52lXaa9R7tVu09HS8dVJ1GnUKdC57hOhy6ma6XL0c3RXap7QPem7qdhRsNYwwTDFg2rHXZ12Hu94Xr+egK9Ur29ejf0Pukz9IP0s/WX69frPzDADewMYgymGmw0OGvQO1x7uPdw3vDS4QeG3zVEDe0MYw2nG241bDHsNzI2CjESG60zOmPUa6xr7G+cZbzK+IRxjwndxNdEaLLK5KTJc4YOg8XIYaxlNDP6TA1NQ02lpltMW00/m1mbJZgVm+01e2BOMWeap5uvMm8y77MwsRhjMcOixuKuJdmSaZlpucbyvOV7K2urJKsFVvVW3dZ61hzrIusa6/s2VBs/myk2VTbXbYm2TNts2w22bXaonZtdpl2F3RV71N7dXmi/wb59BGGE5wjRiKoRtxzUHFgOBQ41Do8cdR0jHIsd6x1fjrQYmTJy+cjzI785uTnlOG1zuues5RzmXOzc6Pzaxc6F51Lhcn0UdVTwqNmjGka9crV3FbhudL3tRncb47bArcntq7uHu8S91r3Hw8Ij1aPS4xZTmxnNXMy84EnwDPCc7XnM86OXu1e+1wGvv7wdvLO9d3l3j7YeLRi9bXSnj5kP12eLT4cvwzfVd7Nvh5+pH9evyu+xv7k/33+7/zOWLSuLtZv1MsApQBJwOOA924s9k30qEAsMCSwNbA3SCkoIWh/0MNgsOCO4JrgvxC1kesipUEJoeOjy0FscIw6PU83pC/MImxnWHK4WHhe+PvxxhF2EJKJxDDombMzKMfcjLSNFkfVRIIoTtTLqQbR19JToozHEmOiYipinsc6xM2LPx9HjJsXtinsXHxC/NP5egk2CNKEpUT1xfGJ14vukwKQVSR1jR46dOfZyskGyMLkhhZSSmLI9pX9c0LjV47rGu40vGX9zgvWEwgkXJxpMzJl4fJL6JO6kg6mE1KTUXalfuFHcKm5/GietMq2Px+at4b3g+/NX8XsEPoIVgmfpPukr0rszfDJWZvRk+mWWZ/YK2cL1wldZoVmbst5nR2XvyB7IScrZm6uSm5p7RKQlyhY1TzaeXDi5XWwvLhF3TPGasnpKnyRcsj0PyZuQ15CvDX/kW6Q20l+kjwp8CyoKPkxNnHqwULNQVNgyzW7aomnPioKLfpuOT+dNb5phOmPujEczWTO3zEJmpc1qmm0+e/7srjkhc3bOpczNnvt7sVPxiuK385LmNc43mj9nfucvIb/UlNBKJCW3Fngv2LQQXyhc2Lpo1KJ1i76V8ksvlTmVlZd9WcxbfOlX51/X/jqwJH1J61L3pRuXEZeJlt1c7rd85wrNFUUrOleOWVm3irGqdNXb1ZNWXyx3Ld+0hrJGuqZjbcTahnUW65at+7I+c/2NioCKvZWGlYsq32/gb7i60X9j7SajTWWbPm0Wbr69JWRLXZVVVflW4taCrU+3JW47/xvzt+rtBtvLtn/dIdrRsTN2Z3O1R3X1LsNdS2vQGmlNz+7xu9v2BO5pqHWo3bJXd2/ZPrBPuu/5/tT9Nw+EH2g6yDxYe8jyUOVh+uHSOqRuWl1ffWZ9R0NyQ/uRsCNNjd6Nh486Ht1xzPRYxXGd40tPUE7MPzFwsuhk/ynxqd7TGac7myY13Tsz9sz15pjm1rPhZy+cCz535jzr/MkLPheOXfS6eOQS81L9ZffLdS1uLYd/d/v9cKt7a90VjysNbZ5tje2j209c9bt6+lrgtXPXOdcv34i80X4z4ebtW+Nvddzm3+6+k3Pn1d2Cu5/vzblPuF/6QONB+UPDh1V/2P6xt8O94/ijwEctj+Me3+vkdb54kvfkS9f8p9Sn5c9MnlV3u3Qf6wnuaXs+7nnXC/GLz70lf2r+WfnS5uWhv/z/aukb29f1SvJq4PXiN/pvdrx1fdvUH93/8F3uu8/vSz/of9j5kfnx/KekT88+T/1C+rL2q+3Xxm/h
}
},
"cell_type": "markdown",
"id": "dc949d42-8a34-4231-bff0-b8198975e2ce",
"metadata": {},
"source": [
"## Nodes and Edges\n",
"\n",
"Each node will - \n",
"\n",
"1/ Either be a function or a runnable.\n",
"\n",
"2/ Modify the `state`.\n",
"\n",
"The edges choose which node to call next.\n",
"\n",
"We can lay out an agentic RAG graph like this:\n",
"\n",
"![Screenshot 2024-02-02 at 1.36.50 PM.png](attachment:f886806c-0aec-4c2a-8027-67339530cb60.png)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "278d1d83-dda6-4de4-bf8b-be9965c227fa",
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import operator\n",
"from typing import Annotated, Sequence, TypedDict\n",
"\n",
"from langchain.output_parsers import PydanticOutputParser\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.tools.render import format_tool_to_openai_function\n",
"from langchain_core.messages import BaseMessage, FunctionMessage\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"from langchain_openai import ChatOpenAI\n",
"from langgraph.prebuilt import ToolInvocation\n",
"\n",
"### Edges\n",
"\n",
"\n",
"def should_retrieve(state):\n",
" \"\"\"\n",
" Decides whether the agent should retrieve more information or end the process.\n",
"\n",
" This function checks the last message in the state for a function call. If a function call is\n",
" present, the process continues to retrieve information. Otherwise, it ends the process.\n",
"\n",
" Args:\n",
" state (messages): The current state of the agent, including all messages.\n",
"\n",
" Returns:\n",
" str: A decision to either \"continue\" the retrieval process or \"end\" it.\n",
" \"\"\"\n",
" print(\"---DECIDE TO RETRIEVE---\")\n",
" messages = state[\"messages\"]\n",
" last_message = messages[-1]\n",
" # If there is no function call, then we finish\n",
" if \"function_call\" not in last_message.additional_kwargs:\n",
" print(\"---DECISION: DO NOT RETRIEVE / DONE---\")\n",
" return \"end\"\n",
" # Otherwise there is a function call, so we continue\n",
" else:\n",
" print(\"---DECISION: RETRIEVE---\")\n",
" return \"continue\"\n",
"\n",
"\n",
"def check_relevance(state):\n",
" \"\"\"\n",
" Determines whether the Agent should continue based on the relevance of retrieved documents.\n",
"\n",
" This function checks if the last message in the conversation is of type FunctionMessage, indicating\n",
" that document retrieval has been performed. It then evaluates the relevance of these documents to the user's\n",
" initial question using a predefined model and output parser. If the documents are relevant, the conversation\n",
" is considered complete. Otherwise, the retrieval process is continued.\n",
"\n",
" Args:\n",
" state messages: The current state of the conversation, including all messages.\n",
"\n",
" Returns:\n",
" str: A directive to either \"end\" the conversation if relevant documents are found, or \"continue\" the retrieval process.\n",
" \"\"\"\n",
"\n",
" print(\"---CHECK RELEVANCE---\")\n",
"\n",
" # Output\n",
" class FunctionOutput(BaseModel):\n",
" binary_score: str = Field(description=\"Relevance score 'yes' or 'no'\")\n",
"\n",
" # Create an instance of the PydanticOutputParser\n",
" parser = PydanticOutputParser(pydantic_object=FunctionOutput)\n",
"\n",
" # Get the format instructions from the output parser\n",
" format_instructions = parser.get_format_instructions()\n",
"\n",
" # Create a prompt template with format instructions and the query\n",
" prompt = PromptTemplate(\n",
" template=\"\"\"You are a grader assessing relevance of retrieved docs to a user question. \\n \n",
" Here are the retrieved docs:\n",
" \\n ------- \\n\n",
" {context} \n",
" \\n ------- \\n\n",
" Here is the user question: {question}\n",
" If the docs contain keyword(s) in the user question, then score them as relevant. \\n\n",
" Give a binary score 'yes' or 'no' score to indicate whether the docs are relevant to the question. \\n \n",
" Output format instructions: \\n {format_instructions}\"\"\",\n",
" input_variables=[\"question\"],\n",
" partial_variables={\"format_instructions\": format_instructions},\n",
" )\n",
"\n",
" model = ChatOpenAI(temperature=0, model=\"gpt-4-0125-preview\")\n",
"\n",
" chain = prompt | model | parser\n",
"\n",
" messages = state[\"messages\"]\n",
" last_message = messages[-1]\n",
" score = chain.invoke(\n",
" {\"question\": messages[0].content, \"context\": last_message.content}\n",
" )\n",
"\n",
" # If relevant\n",
" if score.binary_score == \"yes\":\n",
" print(\"---DECISION: DOCS RELEVANT---\")\n",
" return \"yes\"\n",
"\n",
" else:\n",
" print(\"---DECISION: DOCS NOT RELEVANT---\")\n",
" print(score.binary_score)\n",
" return \"no\"\n",
"\n",
"\n",
"### Nodes\n",
"\n",
"\n",
"# Define the function that calls the model\n",
"def call_model(state):\n",
" \"\"\"\n",
" Invokes the agent model to generate a response based on the current state.\n",
"\n",
" This function calls the agent model to generate a response to the current conversation state.\n",
" The response is added to the state's messages.\n",
"\n",
" Args:\n",
" state (messages): The current state of the agent, including all messages.\n",
"\n",
" Returns:\n",
" dict: The updated state with the new message added to the list of messages.\n",
" \"\"\"\n",
" print(\"---CALL AGENT---\")\n",
" messages = state[\"messages\"]\n",
" model = ChatOpenAI(temperature=0, streaming=True, model=\"gpt-4-0125-preview\")\n",
" functions = [format_tool_to_openai_function(t) for t in tools]\n",
" model = model.bind_functions(functions)\n",
" response = model.invoke(messages)\n",
" # We return a list, because this will get added to the existing list\n",
" return {\"messages\": [response]}\n",
"\n",
"\n",
"# Define the function to execute tools\n",
"def call_tool(state):\n",
" \"\"\"\n",
" Executes a tool based on the last message's function call.\n",
"\n",
" This function is responsible for executing a tool invocation based on the function call\n",
" specified in the last message. The result from the tool execution is added to the conversation\n",
" state as a new message.\n",
"\n",
" Args:\n",
" state (messages): The current state of the agent, including all messages.\n",
"\n",
" Returns:\n",
" dict: The updated state with the new function message added to the list of messages.\n",
" \"\"\"\n",
" print(\"---EXECUTE RETRIEVAL---\")\n",
" messages = state[\"messages\"]\n",
" # Based on the continue condition\n",
" # we know the last message involves a function call\n",
" last_message = messages[-1]\n",
" # We construct an ToolInvocation from the function_call\n",
" action = ToolInvocation(\n",
" tool=last_message.additional_kwargs[\"function_call\"][\"name\"],\n",
" tool_input=json.loads(\n",
" last_message.additional_kwargs[\"function_call\"][\"arguments\"]\n",
" ),\n",
" )\n",
" # We call the tool_executor and get back a response\n",
" response = tool_executor.invoke(action)\n",
" # print(type(response))\n",
" # We use the response to create a FunctionMessage\n",
" function_message = FunctionMessage(content=str(response), name=action.tool)\n",
"\n",
" # We return a list, because this will get added to the existing list\n",
" return {\"messages\": [function_message]}"
]
},
{
"cell_type": "markdown",
"id": "955882ef-7467-48db-ae51-de441f2fc3a7",
"metadata": {},
"source": [
"## Graph\n",
"\n",
"* Start with an agent, `call_model`\n",
"* Agent make a decision to call a function\n",
"* If so, then `action` to call tool (retriever)\n",
"* Then call agent with the tool output added to messages (`state`)"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "8718a37f-83c2-4f16-9850-e61e0f49c3d4",
"metadata": {},
"outputs": [],
"source": [
"from langgraph.graph import END, StateGraph\n",
"\n",
"# Define a new graph\n",
"workflow = StateGraph(AgentState)\n",
"\n",
"# Define the nodes we will cycle between\n",
"workflow.add_node(\"agent\", call_model) # agent\n",
"workflow.add_node(\"action\", call_tool) # retrieval"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "b2158218-b21f-491b-853c-876c1afe9ba6",
"metadata": {},
"outputs": [],
"source": [
"# Call agent node to decide to retrieve or not\n",
"workflow.set_entry_point(\"agent\")\n",
"\n",
"# Decide whether to retrieve\n",
"workflow.add_conditional_edges(\n",
" \"agent\",\n",
" # Assess agent decision\n",
" should_retrieve,\n",
" {\n",
" # Call tool node\n",
" \"continue\": \"action\",\n",
" \"end\": END,\n",
" },\n",
")\n",
"\n",
"# Edges taken after the `action` node is called.\n",
"workflow.add_conditional_edges(\n",
" \"action\",\n",
" # Assess agent decision\n",
" check_relevance,\n",
" {\n",
" # Call agent node\n",
" \"yes\": \"agent\",\n",
" \"no\": END, # placeholder\n",
" },\n",
")\n",
"\n",
"# Compile\n",
"app = workflow.compile()"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "7649f05a-cb67-490d-b24a-74d41895139a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"---CALL AGENT---\n",
"\"Output from node 'agent':\"\n",
"'---'\n",
"{ 'messages': [ AIMessage(content='', additional_kwargs={'function_call': {'arguments': '{\"query\":\"types of agent memory Lilian Weng\"}', 'name': 'retrieve_blog_posts'}})]}\n",
"'\\n---\\n'\n",
"---DECIDE TO RETRIEVE---\n",
"---DECISION: RETRIEVE---\n",
"---EXECUTE RETRIEVAL---\n",
"\"Output from node 'action':\"\n",
"'---'\n",
"{ 'messages': [ FunctionMessage(content='Citation#\\nCited as:\\n\\nWeng, Lilian. (Jun 2023). LLM-powered Autonomous Agents\". LilLog. https://lilianweng.github.io/posts/2023-06-23-agent/.\\n\\nLLM Powered Autonomous Agents\\n \\nDate: June 23, 2023 | Estimated Reading Time: 31 min | Author: Lilian Weng\\n\\n\\n \\n\\n\\nTable of Contents\\n\\n\\n\\nAgent System Overview\\n\\nComponent One: Planning\\n\\nTask Decomposition\\n\\nSelf-Reflection\\n\\n\\nComponent Two: Memory\\n\\nTypes of Memory\\n\\nMaximum Inner Product Search (MIPS)\\n\\nThe design of generative agents combines LLM with memory, planning and reflection mechanisms to enable agents to behave conditioned on past experience, as well as to interact with other agents.\\n\\nWeng, Lilian. (Mar 2023). Prompt Engineering. LilLog. https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/.', name='retrieve_blog_posts')]}\n",
"'\\n---\\n'\n",
"---CHECK RELEVANCE---\n",
"---DECISION: DOCS RELEVANT---\n",
"---CALL AGENT---\n",
"\"Output from node 'agent':\"\n",
"'---'\n",
"{ 'messages': [ AIMessage(content='Lilian Weng\\'s blog post titled \"LLM-powered Autonomous Agents\" discusses the concept of agent memory but does not provide a detailed list of the types of agent memory directly in the provided excerpt. For more detailed information on the types of agent memory, it would be necessary to refer directly to the blog post itself. You can find the post [here](https://lilianweng.github.io/posts/2023-06-23-agent/).')]}\n",
"'\\n---\\n'\n",
"---DECIDE TO RETRIEVE---\n",
"---DECISION: DO NOT RETRIEVE / DONE---\n",
"\"Output from node '__end__':\"\n",
"'---'\n",
"{ 'messages': [ HumanMessage(content=\"What are the types of agent memory based on Lilian Weng's blog post?\"),\n",
" AIMessage(content='', additional_kwargs={'function_call': {'arguments': '{\"query\":\"types of agent memory Lilian Weng\"}', 'name': 'retrieve_blog_posts'}}),\n",
" FunctionMessage(content='Citation#\\nCited as:\\n\\nWeng, Lilian. (Jun 2023). LLM-powered Autonomous Agents\". LilLog. https://lilianweng.github.io/posts/2023-06-23-agent/.\\n\\nLLM Powered Autonomous Agents\\n \\nDate: June 23, 2023 | Estimated Reading Time: 31 min | Author: Lilian Weng\\n\\n\\n \\n\\n\\nTable of Contents\\n\\n\\n\\nAgent System Overview\\n\\nComponent One: Planning\\n\\nTask Decomposition\\n\\nSelf-Reflection\\n\\n\\nComponent Two: Memory\\n\\nTypes of Memory\\n\\nMaximum Inner Product Search (MIPS)\\n\\nThe design of generative agents combines LLM with memory, planning and reflection mechanisms to enable agents to behave conditioned on past experience, as well as to interact with other agents.\\n\\nWeng, Lilian. (Mar 2023). Prompt Engineering. LilLog. https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/.', name='retrieve_blog_posts'),\n",
" AIMessage(content='Lilian Weng\\'s blog post titled \"LLM-powered Autonomous Agents\" discusses the concept of agent memory but does not provide a detailed list of the types of agent memory directly in the provided excerpt. For more detailed information on the types of agent memory, it would be necessary to refer directly to the blog post itself. You can find the post [here](https://lilianweng.github.io/posts/2023-06-23-agent/).')]}\n",
"'\\n---\\n'\n"
]
}
],
"source": [
"import pprint\n",
"\n",
"from langchain_core.messages import HumanMessage\n",
"\n",
"inputs = {\n",
" \"messages\": [\n",
" HumanMessage(\n",
" content=\"What are the types of agent memory based on Lilian Weng's blog post?\"\n",
" )\n",
" ]\n",
"}\n",
"for output in app.stream(inputs):\n",
" for key, value in output.items():\n",
" pprint.pprint(f\"Output from node '{key}':\")\n",
" pprint.pprint(\"---\")\n",
" pprint.pprint(value, indent=2, width=80, depth=None)\n",
" pprint.pprint(\"\\n---\\n\")"
]
},
{
"cell_type": "markdown",
"id": "93781e8c-dd25-4754-9c26-e5faac57e715",
"metadata": {},
"source": [
"Trace:\n",
"\n",
"https://smith.langchain.com/public/6f45c61b-69a0-4b35-bab9-679a8840a2d6/r"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "189333cc-5d34-4869-9f9b-741210e1096f",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.16"
}
},
"nbformat": 4,
"nbformat_minor": 5
}