Fixed typo: 'ouput' to 'output' in all documentation (#5272)

# Fixed typo: 'ouput' to 'output' in all documentation

In this instance, the typo 'ouput' was amended to 'output' in all
occurrences within the documentation. There are no dependencies required
for this change.
This commit is contained in:
Moonsik Kang 2023-05-25 19:18:31 -07:00 committed by GitHub
parent 7047a2c1af
commit a0281f5acb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 24 additions and 24 deletions

View File

@ -30,7 +30,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"memory = ConversationBufferMemory()\n", "memory = ConversationBufferMemory()\n",
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})" "memory.save_context({\"input\": \"hi\"}, {\"output\": \"whats up\"})"
] ]
}, },
{ {
@ -70,7 +70,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"memory = ConversationBufferMemory(return_messages=True)\n", "memory = ConversationBufferMemory(return_messages=True)\n",
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})" "memory.save_context({\"input\": \"hi\"}, {\"output\": \"whats up\"})"
] ]
}, },
{ {

View File

@ -30,8 +30,8 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"memory = ConversationBufferWindowMemory( k=1)\n", "memory = ConversationBufferWindowMemory( k=1)\n",
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})\n", "memory.save_context({\"input\": \"hi\"}, {\"output\": \"whats up\"})\n",
"memory.save_context({\"input\": \"not much you\"}, {\"ouput\": \"not much\"})" "memory.save_context({\"input\": \"not much you\"}, {\"output\": \"not much\"})"
] ]
}, },
{ {
@ -71,8 +71,8 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"memory = ConversationBufferWindowMemory( k=1, return_messages=True)\n", "memory = ConversationBufferWindowMemory( k=1, return_messages=True)\n",
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})\n", "memory.save_context({\"input\": \"hi\"}, {\"output\": \"whats up\"})\n",
"memory.save_context({\"input\": \"not much you\"}, {\"ouput\": \"not much\"})" "memory.save_context({\"input\": \"not much you\"}, {\"output\": \"not much\"})"
] ]
}, },
{ {

View File

@ -35,7 +35,7 @@
"memory.load_memory_variables(_input)\n", "memory.load_memory_variables(_input)\n",
"memory.save_context(\n", "memory.save_context(\n",
" _input,\n", " _input,\n",
" {\"ouput\": \" That sounds like a great project! What kind of project are they working on?\"}\n", " {\"output\": \" That sounds like a great project! What kind of project are they working on?\"}\n",
")" ")"
] ]
}, },
@ -73,7 +73,7 @@
"memory.load_memory_variables(_input)\n", "memory.load_memory_variables(_input)\n",
"memory.save_context(\n", "memory.save_context(\n",
" _input,\n", " _input,\n",
" {\"ouput\": \" That sounds like a great project! What kind of project are they working on?\"}\n", " {\"output\": \" That sounds like a great project! What kind of project are they working on?\"}\n",
")" ")"
] ]
}, },

View File

@ -32,8 +32,8 @@
"source": [ "source": [
"llm = OpenAI(temperature=0)\n", "llm = OpenAI(temperature=0)\n",
"memory = ConversationKGMemory(llm=llm)\n", "memory = ConversationKGMemory(llm=llm)\n",
"memory.save_context({\"input\": \"say hi to sam\"}, {\"ouput\": \"who is sam\"})\n", "memory.save_context({\"input\": \"say hi to sam\"}, {\"output\": \"who is sam\"})\n",
"memory.save_context({\"input\": \"sam is a friend\"}, {\"ouput\": \"okay\"})" "memory.save_context({\"input\": \"sam is a friend\"}, {\"output\": \"okay\"})"
] ]
}, },
{ {
@ -73,8 +73,8 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"memory = ConversationKGMemory(llm=llm, return_messages=True)\n", "memory = ConversationKGMemory(llm=llm, return_messages=True)\n",
"memory.save_context({\"input\": \"say hi to sam\"}, {\"ouput\": \"who is sam\"})\n", "memory.save_context({\"input\": \"say hi to sam\"}, {\"output\": \"who is sam\"})\n",
"memory.save_context({\"input\": \"sam is a friend\"}, {\"ouput\": \"okay\"})" "memory.save_context({\"input\": \"sam is a friend\"}, {\"output\": \"okay\"})"
] ]
}, },
{ {

View File

@ -30,7 +30,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"memory = ConversationSummaryMemory(llm=OpenAI(temperature=0))\n", "memory = ConversationSummaryMemory(llm=OpenAI(temperature=0))\n",
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})" "memory.save_context({\"input\": \"hi\"}, {\"output\": \"whats up\"})"
] ]
}, },
{ {
@ -70,7 +70,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"memory = ConversationSummaryMemory(llm=OpenAI(temperature=0), return_messages=True)\n", "memory = ConversationSummaryMemory(llm=OpenAI(temperature=0), return_messages=True)\n",
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})" "memory.save_context({\"input\": \"hi\"}, {\"output\": \"whats up\"})"
] ]
}, },
{ {

View File

@ -32,8 +32,8 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"memory = ConversationTokenBufferMemory(llm=llm, max_token_limit=10)\n", "memory = ConversationTokenBufferMemory(llm=llm, max_token_limit=10)\n",
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})\n", "memory.save_context({\"input\": \"hi\"}, {\"output\": \"whats up\"})\n",
"memory.save_context({\"input\": \"not much you\"}, {\"ouput\": \"not much\"})" "memory.save_context({\"input\": \"not much you\"}, {\"output\": \"not much\"})"
] ]
}, },
{ {
@ -73,8 +73,8 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"memory = ConversationTokenBufferMemory(llm=llm, max_token_limit=10, return_messages=True)\n", "memory = ConversationTokenBufferMemory(llm=llm, max_token_limit=10, return_messages=True)\n",
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})\n", "memory.save_context({\"input\": \"hi\"}, {\"output\": \"whats up\"})\n",
"memory.save_context({\"input\": \"not much you\"}, {\"ouput\": \"not much\"})" "memory.save_context({\"input\": \"not much you\"}, {\"output\": \"not much\"})"
] ]
}, },
{ {

View File

@ -125,7 +125,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"def show_ouput(output):\n", "def show_output(output):\n",
" \"\"\"Display the multi-modal output from the agent.\"\"\"\n", " \"\"\"Display the multi-modal output from the agent.\"\"\"\n",
" UUID_PATTERN = re.compile(\n", " UUID_PATTERN = re.compile(\n",
" r\"([0-9A-Za-z]{8}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{12})\"\n", " r\"([0-9A-Za-z]{8}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{12})\"\n",
@ -168,7 +168,7 @@
} }
], ],
"source": [ "source": [
"show_ouput(output)" "show_output(output)"
] ]
}, },
{ {
@ -266,7 +266,7 @@
} }
], ],
"source": [ "source": [
"show_ouput(output)" "show_output(output)"
] ]
}, },
{ {

View File

@ -125,7 +125,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"def show_ouput(output):\n", "def show_output(output):\n",
" \"\"\"Display the multi-modal output from the agent.\"\"\"\n", " \"\"\"Display the multi-modal output from the agent.\"\"\"\n",
" UUID_PATTERN = re.compile(\n", " UUID_PATTERN = re.compile(\n",
" r\"([0-9A-Za-z]{8}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{12})\"\n", " r\"([0-9A-Za-z]{8}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{12})\"\n",
@ -168,7 +168,7 @@
} }
], ],
"source": [ "source": [
"show_ouput(output)" "show_output(output)"
] ]
}, },
{ {
@ -221,7 +221,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"show_ouput(output)" "show_output(output)"
] ]
}, },
{ {