Fixed typo: 'ouput' to 'output' in all documentation (#5272)

# Fixed typo: 'ouput' to 'output' in all documentation

In this instance, the typo 'ouput' was amended to 'output' in all
occurrences within the documentation. There are no dependencies required
for this change.
searx_updates
Moonsik Kang 12 months ago committed by GitHub
parent 7047a2c1af
commit a0281f5acb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -30,7 +30,7 @@
"outputs": [],
"source": [
"memory = ConversationBufferMemory()\n",
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})"
"memory.save_context({\"input\": \"hi\"}, {\"output\": \"whats up\"})"
]
},
{
@ -70,7 +70,7 @@
"outputs": [],
"source": [
"memory = ConversationBufferMemory(return_messages=True)\n",
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})"
"memory.save_context({\"input\": \"hi\"}, {\"output\": \"whats up\"})"
]
},
{

@ -30,8 +30,8 @@
"outputs": [],
"source": [
"memory = ConversationBufferWindowMemory( k=1)\n",
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})\n",
"memory.save_context({\"input\": \"not much you\"}, {\"ouput\": \"not much\"})"
"memory.save_context({\"input\": \"hi\"}, {\"output\": \"whats up\"})\n",
"memory.save_context({\"input\": \"not much you\"}, {\"output\": \"not much\"})"
]
},
{
@ -71,8 +71,8 @@
"outputs": [],
"source": [
"memory = ConversationBufferWindowMemory( k=1, return_messages=True)\n",
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})\n",
"memory.save_context({\"input\": \"not much you\"}, {\"ouput\": \"not much\"})"
"memory.save_context({\"input\": \"hi\"}, {\"output\": \"whats up\"})\n",
"memory.save_context({\"input\": \"not much you\"}, {\"output\": \"not much\"})"
]
},
{

@ -35,7 +35,7 @@
"memory.load_memory_variables(_input)\n",
"memory.save_context(\n",
" _input,\n",
" {\"ouput\": \" That sounds like a great project! What kind of project are they working on?\"}\n",
" {\"output\": \" That sounds like a great project! What kind of project are they working on?\"}\n",
")"
]
},
@ -73,7 +73,7 @@
"memory.load_memory_variables(_input)\n",
"memory.save_context(\n",
" _input,\n",
" {\"ouput\": \" That sounds like a great project! What kind of project are they working on?\"}\n",
" {\"output\": \" That sounds like a great project! What kind of project are they working on?\"}\n",
")"
]
},

@ -32,8 +32,8 @@
"source": [
"llm = OpenAI(temperature=0)\n",
"memory = ConversationKGMemory(llm=llm)\n",
"memory.save_context({\"input\": \"say hi to sam\"}, {\"ouput\": \"who is sam\"})\n",
"memory.save_context({\"input\": \"sam is a friend\"}, {\"ouput\": \"okay\"})"
"memory.save_context({\"input\": \"say hi to sam\"}, {\"output\": \"who is sam\"})\n",
"memory.save_context({\"input\": \"sam is a friend\"}, {\"output\": \"okay\"})"
]
},
{
@ -73,8 +73,8 @@
"outputs": [],
"source": [
"memory = ConversationKGMemory(llm=llm, return_messages=True)\n",
"memory.save_context({\"input\": \"say hi to sam\"}, {\"ouput\": \"who is sam\"})\n",
"memory.save_context({\"input\": \"sam is a friend\"}, {\"ouput\": \"okay\"})"
"memory.save_context({\"input\": \"say hi to sam\"}, {\"output\": \"who is sam\"})\n",
"memory.save_context({\"input\": \"sam is a friend\"}, {\"output\": \"okay\"})"
]
},
{

@ -30,7 +30,7 @@
"outputs": [],
"source": [
"memory = ConversationSummaryMemory(llm=OpenAI(temperature=0))\n",
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})"
"memory.save_context({\"input\": \"hi\"}, {\"output\": \"whats up\"})"
]
},
{
@ -70,7 +70,7 @@
"outputs": [],
"source": [
"memory = ConversationSummaryMemory(llm=OpenAI(temperature=0), return_messages=True)\n",
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})"
"memory.save_context({\"input\": \"hi\"}, {\"output\": \"whats up\"})"
]
},
{

@ -32,8 +32,8 @@
"outputs": [],
"source": [
"memory = ConversationTokenBufferMemory(llm=llm, max_token_limit=10)\n",
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})\n",
"memory.save_context({\"input\": \"not much you\"}, {\"ouput\": \"not much\"})"
"memory.save_context({\"input\": \"hi\"}, {\"output\": \"whats up\"})\n",
"memory.save_context({\"input\": \"not much you\"}, {\"output\": \"not much\"})"
]
},
{
@ -73,8 +73,8 @@
"outputs": [],
"source": [
"memory = ConversationTokenBufferMemory(llm=llm, max_token_limit=10, return_messages=True)\n",
"memory.save_context({\"input\": \"hi\"}, {\"ouput\": \"whats up\"})\n",
"memory.save_context({\"input\": \"not much you\"}, {\"ouput\": \"not much\"})"
"memory.save_context({\"input\": \"hi\"}, {\"output\": \"whats up\"})\n",
"memory.save_context({\"input\": \"not much you\"}, {\"output\": \"not much\"})"
]
},
{

@ -125,7 +125,7 @@
"metadata": {},
"outputs": [],
"source": [
"def show_ouput(output):\n",
"def show_output(output):\n",
" \"\"\"Display the multi-modal output from the agent.\"\"\"\n",
" UUID_PATTERN = re.compile(\n",
" r\"([0-9A-Za-z]{8}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{12})\"\n",
@ -168,7 +168,7 @@
}
],
"source": [
"show_ouput(output)"
"show_output(output)"
]
},
{
@ -266,7 +266,7 @@
}
],
"source": [
"show_ouput(output)"
"show_output(output)"
]
},
{

@ -125,7 +125,7 @@
"metadata": {},
"outputs": [],
"source": [
"def show_ouput(output):\n",
"def show_output(output):\n",
" \"\"\"Display the multi-modal output from the agent.\"\"\"\n",
" UUID_PATTERN = re.compile(\n",
" r\"([0-9A-Za-z]{8}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{4}-[0-9A-Za-z]{12})\"\n",
@ -168,7 +168,7 @@
}
],
"source": [
"show_ouput(output)"
"show_output(output)"
]
},
{
@ -221,7 +221,7 @@
"metadata": {},
"outputs": [],
"source": [
"show_ouput(output)"
"show_output(output)"
]
},
{

Loading…
Cancel
Save