harrison/agent_multi_inputs
Harrison Chase 1 year ago
commit 16201f1d77

@ -73,7 +73,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 8,
"id": "e603cd7d",
"metadata": {},
"outputs": [
@ -81,43 +81,32 @@
"name": "stdout",
"output_type": "stream",
"text": [
"What is the age of Olivia Wilde's boyfriend raised to the 0.23 power?\n",
"Thought:\u001b[32;1m\u001b[1;3m I need to find the age of Olivia Wilde's boyfriend\n",
"Action: Search\n",
"Action Input: \"Olivia Wilde's boyfriend\"\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3mOlivia Wilde started dating Harry Styles after ending her years-long engagement to Jason Sudeikis — see their relationship timeline.\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I need to find the age of Harry Styles\n",
"Action: Search\n",
"Action Input: \"Harry Styles age\"\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3m28 years\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I need to calculate 28 to the 0.23 power\n",
"\n",
"\n",
"\u001b[1m> Entering new ZeroShotAgent chain...\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I need to use a calculator to solve this.\n",
"Action: Calculator\n",
"Action Input: 28^0.23\u001b[0m\n",
"Action Input: (age of Olivia Wilde's boyfriend)^0.23\u001b[0m\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"28^0.23\u001b[32;1m\u001b[1;3m\n",
"\u001b[1m> Entering new LLMMathChain chain...\u001b[0m\n",
"(age of Olivia Wilde's boyfriend)^0.23\u001b[32;1m\u001b[1;3m\n",
"\n",
"```python\n",
"print(28**0.23)\n",
"```\n",
"\u001b[0m\n",
"Answer: \u001b[33;1m\u001b[1;3m2.1520202182226886\n",
"\u001b[0m\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"Answer: 2.8379908717450045\u001b[0m\n",
"\u001b[1m> Finished LLMMathChain chain.\u001b[0m\n",
"\n",
"Observation: \u001b[33;1m\u001b[1;3mAnswer: 2.1520202182226886\n",
"\u001b[0m\n",
"Observation: \u001b[33;1m\u001b[1;3mAnswer: 2.8379908717450045\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
"Final Answer: 2.1520202182226886\u001b[0m"
"Final Answer: 2.8379908717450045\u001b[0m\n",
"\u001b[1m> Finished ZeroShotAgent chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"'2.1520202182226886'"
"'2.8379908717450045'"
]
},
"execution_count": 4,
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
@ -128,7 +117,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 9,
"id": "a5c07010",
"metadata": {},
"outputs": [
@ -136,46 +125,37 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Who recently released an album called 'The Storm Before the Calm' and are they in the FooBar database? If so, what albums of theirs are in the FooBar database?\n",
"Thought:\u001b[32;1m\u001b[1;3m I need to find an album called 'The Storm Before the Calm'\n",
"\n",
"\n",
"\u001b[1m> Entering new ZeroShotAgent chain...\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I need to find out who released the album and if they are in the FooBar database.\n",
"Action: Search\n",
"Action Input: \"The Storm Before the Calm album\"\u001b[0m\n",
"Action Input: \"The Storm Before the Calm\" album\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3mThe Storm Before the Calm (stylized in all lowercase) is the tenth (and eighth international) studio album by Canadian-American singer-songwriter Alanis ...\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I need to check if Alanis is in the FooBar database\n",
"Action: FooBar DB\n",
"Action Input: \"Does Alanis Morissette exist in the FooBar database?\"\u001b[0m\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"Does Alanis Morissette exist in the FooBar database?\n",
"SQLQuery:\u001b[32;1m\u001b[1;3m SELECT * FROM Artist WHERE Name = 'Alanis Morissette'\u001b[0m\n",
"SQLResult: \u001b[33;1m\u001b[1;3m[(4, 'Alanis Morissette')]\u001b[0m\n",
"Answer:\u001b[32;1m\u001b[1;3m Yes\u001b[0m\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"\n",
"Observation: \u001b[38;5;200m\u001b[1;3m Yes\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I need to find out what albums of Alanis's are in the FooBar database\n",
"Action: FooBar DB\n",
"Action Input: \"What albums by Alanis Morissette are in the FooBar database?\"\u001b[0m\n",
"Action Input: \"Alanis\"\u001b[0m\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"What albums by Alanis Morissette are in the FooBar database?\n",
"SQLQuery:\u001b[32;1m\u001b[1;3m SELECT Album.Title FROM Album JOIN Artist ON Album.ArtistId = Artist.ArtistId WHERE Artist.Name = 'Alanis Morissette'\u001b[0m\n",
"SQLResult: \u001b[33;1m\u001b[1;3m[('Jagged Little Pill',)]\u001b[0m\n",
"Answer:\u001b[32;1m\u001b[1;3m Jagged Little Pill\u001b[0m\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"\u001b[1m> Entering new SQLDatabaseChain chain...\u001b[0m\n",
"Alanis \n",
"SQLQuery:\u001b[32;1m\u001b[1;3m SELECT FirstName, LastName FROM Customer WHERE FirstName LIKE 'Alanis'\u001b[0m\n",
"SQLResult: \u001b[33;1m\u001b[1;3m[]\u001b[0m\n",
"Answer:\u001b[32;1m\u001b[1;3m No results found.\u001b[0m\n",
"\u001b[1m> Finished SQLDatabaseChain chain.\u001b[0m\n",
"\n",
"Observation: \u001b[38;5;200m\u001b[1;3m Jagged Little Pill\u001b[0m\n",
"Observation: \u001b[38;5;200m\u001b[1;3m No results found.\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
"Final Answer: The album is by Alanis Morissette and the albums in the FooBar database by her are Jagged Little Pill\u001b[0m"
"Final Answer: Alanis released the album The Storm Before the Calm, but she is not in the FooBar database.\u001b[0m\n",
"\u001b[1m> Finished ZeroShotAgent chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"'The album is by Alanis Morissette and the albums in the FooBar database by her are Jagged Little Pill'"
"'Alanis released the album The Storm Before the Calm, but she is not in the FooBar database.'"
]
},
"execution_count": 5,
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
@ -209,7 +189,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
"version": "3.10.8"
}
},
"nbformat": 4,

@ -32,13 +32,13 @@
" )\n",
"]\n",
"\n",
"llm = OpenAI(temperature=0)\n",
"llm = OpenAI(temperature=0, model_name=\"text-davinci-002\")\n",
"react = initialize_agent(tools, llm, agent=\"react-docstore\", verbose=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "8078c8f1",
"metadata": {},
"outputs": [
@ -46,9 +46,30 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Author David Chanoff has collaborated with a U.S. Navy admiral who served as the ambassador to the United Kingdom under which President?\n",
"Thought 1:"
"\n",
"\n",
"\u001b[1m> Entering new ReActDocstoreAgent chain...\u001b[0m\n",
"Thought 1:\u001b[32;1m\u001b[1;3m I need to search David Chanoff and find the U.S. Navy admiral he collaborated\n",
"with.\n",
"Action 1: Search[David Chanoff]\u001b[0m\n",
"Observation 1: \u001b[36;1m\u001b[1;3mDavid Chanoff is a noted author of non-fiction work. His work has typically involved collaborations with the principal protagonist of the work concerned. His collaborators have included; Augustus A. White, Joycelyn Elders, Đoàn Văn Toại, William J. Crowe, Ariel Sharon, Kenneth Good and Felix Zandman. He has also written about a wide range of subjects including literary history, education and foreign for The Washington Post, The New Republic and The New York Times Magazine. He has published more than twelve books.\u001b[0m\n",
"Thought 2:\u001b[32;1m\u001b[1;3m The U.S. Navy admiral David Chanoff collaborated with is William J. Crowe.\n",
"Action 2: Search[William J. Crowe]\u001b[0m\n",
"Observation 2: \u001b[36;1m\u001b[1;3mWilliam James Crowe Jr. (January 2, 1925 October 18, 2007) was a United States Navy admiral and diplomat who served as the 11th chairman of the Joint Chiefs of Staff under Presidents Ronald Reagan and George H. W. Bush, and as the ambassador to the United Kingdom and Chair of the Intelligence Oversight Board under President Bill Clinton.\u001b[0m\n",
"Thought 3:\u001b[32;1m\u001b[1;3m William J. Crowe served as the ambassador to the United Kingdom under President Bill Clinton.\n",
"Action 3: Finish[Bill Clinton]\u001b[0m\n",
"\u001b[1m> Finished ReActDocstoreAgent chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"'Bill Clinton'"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
@ -81,7 +102,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
"version": "3.10.8"
}
},
"nbformat": 4,

@ -12,7 +12,7 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 2,
"id": "7e3b513e",
"metadata": {},
"outputs": [
@ -23,8 +23,7 @@
"\n",
"\n",
"\u001b[1m> Entering new SelfAskWithSearchAgent chain...\u001b[0m\n",
"What is the hometown of the reigning men's U.S. Open champion?\n",
"Are follow up questions needed here:\u001b[32;1m\u001b[1;3m Yes.\n",
"\u001b[32;1m\u001b[1;3mAre follow up questions needed here: Yes.\n",
"Follow up: Who is the reigning men's U.S. Open champion?\u001b[0m\n",
"Intermediate answer: \u001b[36;1m\u001b[1;3mCarlos Alcaraz\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mFollow up: Where is Carlos Alcaraz from?\u001b[0m\n",
@ -39,7 +38,7 @@
"'El Palmar, Spain'"
]
},
"execution_count": 1,
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
@ -58,7 +57,6 @@
"]\n",
"\n",
"self_ask_with_search = initialize_agent(tools, llm, agent=\"self-ask-with-search\", verbose=True)\n",
"\n",
"self_ask_with_search.run(\"What is the hometown of the reigning men's U.S. Open champion?\")"
]
},
@ -87,7 +85,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
"version": "3.10.8"
}
},
"nbformat": 4,

@ -0,0 +1,971 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "b253f4d5",
"metadata": {},
"source": [
"# ChatGPT Clone\n",
"\n",
"This chain replicates ChatGPT by combining (1) a specific prompt, and (2) the concept of memory.\n",
"\n",
"Shows off the example as in https://www.engraved.blog/building-a-virtual-machine-inside/"
]
},
{
"cell_type": "code",
"execution_count": 38,
"id": "a99acd89",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
"\n",
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
"\n",
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
"\n",
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
"\n",
"\n",
"Human: I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply wiht the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\n",
"Assistant:\u001b[0m\n",
"\n",
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
"\n",
"```\n",
"$ pwd\n",
"/\n",
"```\n"
]
}
],
"source": [
"from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate\n",
"from langchain.chains.conversation.memory import ConversationalBufferWindowMemory\n",
"\n",
"\n",
"template = \"\"\"Assistant is a large language model trained by OpenAI.\n",
"\n",
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
"\n",
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
"\n",
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
"\n",
"{history}\n",
"Human: {human_input}\n",
"Assistant:\"\"\"\n",
"\n",
"prompt = PromptTemplate(\n",
" input_variables=[\"history\", \"human_input\"], \n",
" template=template\n",
")\n",
"\n",
"\n",
"chatgpt_chain = LLMChain(\n",
" llm=OpenAI(temperature=0), \n",
" prompt=prompt, \n",
" verbose=True, \n",
" memory=ConversationalBufferWindowMemory(k=2),\n",
")\n",
"\n",
"output = chatgpt_chain.predict(human_input=\"I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply wiht the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\")\n",
"print(output)"
]
},
{
"cell_type": "code",
"execution_count": 39,
"id": "4ef711d6",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
"\n",
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
"\n",
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
"\n",
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
"\n",
"Human: I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply wiht the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\n",
"AI: \n",
"```\n",
"$ pwd\n",
"/\n",
"```\n",
"Human: ls ~\n",
"Assistant:\u001b[0m\n",
"\n",
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
"\n",
"```\n",
"$ ls ~\n",
"Desktop Documents Downloads Music Pictures Public Templates Videos\n",
"```\n"
]
}
],
"source": [
"output = chatgpt_chain.predict(human_input=\"ls ~\")\n",
"print(output)"
]
},
{
"cell_type": "code",
"execution_count": 40,
"id": "a5d6dac2",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
"\n",
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
"\n",
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
"\n",
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
"\n",
"Human: I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply wiht the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\n",
"AI: \n",
"```\n",
"$ pwd\n",
"/\n",
"```\n",
"Human: ls ~\n",
"AI: \n",
"```\n",
"$ ls ~\n",
"Desktop Documents Downloads Music Pictures Public Templates Videos\n",
"```\n",
"Human: cd ~\n",
"Assistant:\u001b[0m\n",
"\n",
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
" \n",
"```\n",
"$ cd ~\n",
"$ pwd\n",
"/home/user\n",
"```\n"
]
}
],
"source": [
"output = chatgpt_chain.predict(human_input=\"cd ~\")\n",
"print(output)"
]
},
{
"cell_type": "code",
"execution_count": 41,
"id": "b9283077",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
"\n",
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
"\n",
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
"\n",
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
"\n",
"Human: ls ~\n",
"AI: \n",
"```\n",
"$ ls ~\n",
"Desktop Documents Downloads Music Pictures Public Templates Videos\n",
"```\n",
"Human: cd ~\n",
"AI: \n",
"```\n",
"$ cd ~\n",
"$ pwd\n",
"/home/user\n",
"```\n",
"Human: {Please make a file jokes.txt inside and put some jokes inside}\n",
"Assistant:\u001b[0m\n",
"\n",
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
"\n",
"\n",
"```\n",
"$ touch jokes.txt\n",
"$ echo \"Why did the chicken cross the road? To get to the other side!\" >> jokes.txt\n",
"$ echo \"What did the fish say when it hit the wall? Dam!\" >> jokes.txt\n",
"$ echo \"Why did the scarecrow win the Nobel Prize? Because he was outstanding in his field!\" >> jokes.txt\n",
"```\n"
]
}
],
"source": [
"output = chatgpt_chain.predict(human_input=\"{Please make a file jokes.txt inside and put some jokes inside}\")\n",
"print(output)"
]
},
{
"cell_type": "code",
"execution_count": 42,
"id": "570e785e",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
"\n",
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
"\n",
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
"\n",
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
"\n",
"Human: cd ~\n",
"AI: \n",
"```\n",
"$ cd ~\n",
"$ pwd\n",
"/home/user\n",
"```\n",
"Human: {Please make a file jokes.txt inside and put some jokes inside}\n",
"AI: \n",
"\n",
"```\n",
"$ touch jokes.txt\n",
"$ echo \"Why did the chicken cross the road? To get to the other side!\" >> jokes.txt\n",
"$ echo \"What did the fish say when it hit the wall? Dam!\" >> jokes.txt\n",
"$ echo \"Why did the scarecrow win the Nobel Prize? Because he was outstanding in his field!\" >> jokes.txt\n",
"```\n",
"Human: echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\n",
"Assistant:\u001b[0m\n",
"\n",
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
"\n",
"\n",
"```\n",
"$ echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py\n",
"$ python3 run.py\n",
"Result: 33\n",
"```\n"
]
}
],
"source": [
"output = chatgpt_chain.predict(human_input=\"\"\"echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\"\"\")\n",
"print(output)"
]
},
{
"cell_type": "code",
"execution_count": 43,
"id": "cd0a23d9",
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
"\n",
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
"\n",
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
"\n",
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
"\n",
"Human: {Please make a file jokes.txt inside and put some jokes inside}\n",
"AI: \n",
"\n",
"```\n",
"$ touch jokes.txt\n",
"$ echo \"Why did the chicken cross the road? To get to the other side!\" >> jokes.txt\n",
"$ echo \"What did the fish say when it hit the wall? Dam!\" >> jokes.txt\n",
"$ echo \"Why did the scarecrow win the Nobel Prize? Because he was outstanding in his field!\" >> jokes.txt\n",
"```\n",
"Human: echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\n",
"AI: \n",
"\n",
"```\n",
"$ echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py\n",
"$ python3 run.py\n",
"Result: 33\n",
"```\n",
"Human: echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\n",
"Assistant:\u001b[0m\n",
"\n",
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
"\n",
"\n",
"```\n",
"$ echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py\n",
"$ python3 run.py\n",
"[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]\n",
"```\n"
]
}
],
"source": [
"output = chatgpt_chain.predict(human_input=\"\"\"echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\"\"\")\n",
"print(output)"
]
},
{
"cell_type": "code",
"execution_count": 44,
"id": "90db6eb2",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
"\n",
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
"\n",
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
"\n",
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
"\n",
"Human: echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\n",
"AI: \n",
"\n",
"```\n",
"$ echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py\n",
"$ python3 run.py\n",
"Result: 33\n",
"```\n",
"Human: echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\n",
"AI: \n",
"\n",
"```\n",
"$ echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py\n",
"$ python3 run.py\n",
"[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]\n",
"```\n",
"Human: echo -e \"echo 'Hello from Docker\" > entrypoint.sh && echo -e \"FROM ubuntu:20.04\n",
"COPY entrypoint.sh entrypoint.sh\n",
"ENTRYPOINT [\"/bin/sh\",\"entrypoint.sh\"]\">Dockerfile && docker build . -t my_docker_image && docker run -t my_docker_image\n",
"Assistant:\u001b[0m\n",
"\n",
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
"\n",
"\n",
"```\n",
"$ echo -e \"echo 'Hello from Docker\" > entrypoint.sh\n",
"$ echo -e \"FROM ubuntu:20.04\n",
"COPY entrypoint.sh entrypoint.sh\n",
"ENTRYPOINT [\"/bin/sh\",\"entrypoint.sh\"]\">Dockerfile\n",
"$ docker build . -t my_docker_image\n",
"$ docker run -t my_docker_image\n",
"Hello from Docker\n",
"```\n"
]
}
],
"source": [
"docker_input = \"\"\"echo -e \"echo 'Hello from Docker\" > entrypoint.sh && echo -e \"FROM ubuntu:20.04\\nCOPY entrypoint.sh entrypoint.sh\\nENTRYPOINT [\\\"/bin/sh\\\",\\\"entrypoint.sh\\\"]\">Dockerfile && docker build . -t my_docker_image && docker run -t my_docker_image\"\"\"\n",
"output = chatgpt_chain.predict(human_input=docker_input)\n",
"print(output)"
]
},
{
"cell_type": "code",
"execution_count": 45,
"id": "c3806f89",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
"\n",
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
"\n",
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
"\n",
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
"\n",
"Human: echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\n",
"AI: \n",
"\n",
"```\n",
"$ echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py\n",
"$ python3 run.py\n",
"[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]\n",
"```\n",
"Human: echo -e \"echo 'Hello from Docker\" > entrypoint.sh && echo -e \"FROM ubuntu:20.04\n",
"COPY entrypoint.sh entrypoint.sh\n",
"ENTRYPOINT [\"/bin/sh\",\"entrypoint.sh\"]\">Dockerfile && docker build . -t my_docker_image && docker run -t my_docker_image\n",
"AI: \n",
"\n",
"```\n",
"$ echo -e \"echo 'Hello from Docker\" > entrypoint.sh\n",
"$ echo -e \"FROM ubuntu:20.04\n",
"COPY entrypoint.sh entrypoint.sh\n",
"ENTRYPOINT [\"/bin/sh\",\"entrypoint.sh\"]\">Dockerfile\n",
"$ docker build . -t my_docker_image\n",
"$ docker run -t my_docker_image\n",
"Hello from Docker\n",
"```\n",
"Human: nvidia-smi\n",
"Assistant:\u001b[0m\n",
"\n",
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
"\n",
"\n",
"```\n",
"$ nvidia-smi\n",
"Sat May 15 21:45:02 2021 \n",
"+-----------------------------------------------------------------------------+\n",
"| NVIDIA-SMI 460.32.03 Driver Version: 460.32.03 CUDA Version: 11.2 |\n",
"|-------------------------------+----------------------+----------------------+\n",
"| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n",
"| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n",
"|===============================+======================+======================|\n",
"| 0 GeForce GTX 108... Off | 00000000:01:00.0 Off | N/A |\n",
"| N/A 45C P0 N/A / N/A | 511MiB / 10206MiB | 0% Default |\n",
"+-------------------------------+----------------------+----------------------+\n",
" \n",
"+-----------------------------------------------------------------------------+\n",
"| Processes: GPU Memory |\n",
"| GPU PID Type Process name Usage |\n",
"|=============================================================================|\n",
"\n"
]
}
],
"source": [
"output = chatgpt_chain.predict(human_input=\"nvidia-smi\")\n",
"print(output)"
]
},
{
"cell_type": "code",
"execution_count": 46,
"id": "f508f597",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
"\n",
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
"\n",
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
"\n",
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
"\n",
"Human: echo -e \"echo 'Hello from Docker\" > entrypoint.sh && echo -e \"FROM ubuntu:20.04\n",
"COPY entrypoint.sh entrypoint.sh\n",
"ENTRYPOINT [\"/bin/sh\",\"entrypoint.sh\"]\">Dockerfile && docker build . -t my_docker_image && docker run -t my_docker_image\n",
"AI: \n",
"\n",
"```\n",
"$ echo -e \"echo 'Hello from Docker\" > entrypoint.sh\n",
"$ echo -e \"FROM ubuntu:20.04\n",
"COPY entrypoint.sh entrypoint.sh\n",
"ENTRYPOINT [\"/bin/sh\",\"entrypoint.sh\"]\">Dockerfile\n",
"$ docker build . -t my_docker_image\n",
"$ docker run -t my_docker_image\n",
"Hello from Docker\n",
"```\n",
"Human: nvidia-smi\n",
"AI: \n",
"\n",
"```\n",
"$ nvidia-smi\n",
"Sat May 15 21:45:02 2021 \n",
"+-----------------------------------------------------------------------------+\n",
"| NVIDIA-SMI 460.32.03 Driver Version: 460.32.03 CUDA Version: 11.2 |\n",
"|-------------------------------+----------------------+----------------------+\n",
"| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n",
"| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n",
"|===============================+======================+======================|\n",
"| 0 GeForce GTX 108... Off | 00000000:01:00.0 Off | N/A |\n",
"| N/A 45C P0 N/A / N/A | 511MiB / 10206MiB | 0% Default |\n",
"+-------------------------------+----------------------+----------------------+\n",
" \n",
"+-----------------------------------------------------------------------------+\n",
"| Processes: GPU Memory |\n",
"| GPU PID Type Process name Usage |\n",
"|=============================================================================|\n",
"\n",
"Human: ping bbc.com\n",
"Assistant:\u001b[0m\n",
"\n",
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
"\n",
"\n",
"```\n",
"$ ping bbc.com\n",
"PING bbc.com (151.101.65.81): 56 data bytes\n",
"64 bytes from 151.101.65.81: icmp_seq=0 ttl=53 time=14.945 ms\n",
"64 bytes from 151.101.65.81: icmp_seq=1 ttl=53 time=14.945 ms\n",
"64 bytes from 151.101.65.81: icmp_seq=2 ttl=53 time=14.945 ms\n",
"\n",
"--- bbc.com ping statistics ---\n",
"3 packets transmitted, 3 packets received, 0.0% packet loss\n",
"round-trip min/avg/max/stddev = 14.945/14.945/14.945/0.000 ms\n",
"```\n"
]
}
],
"source": [
"output = chatgpt_chain.predict(human_input=\"ping bbc.com\")\n",
"print(output)"
]
},
{
"cell_type": "code",
"execution_count": 47,
"id": "cbd607f4",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
"\n",
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
"\n",
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
"\n",
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
"\n",
"Human: nvidia-smi\n",
"AI: \n",
"\n",
"```\n",
"$ nvidia-smi\n",
"Sat May 15 21:45:02 2021 \n",
"+-----------------------------------------------------------------------------+\n",
"| NVIDIA-SMI 460.32.03 Driver Version: 460.32.03 CUDA Version: 11.2 |\n",
"|-------------------------------+----------------------+----------------------+\n",
"| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n",
"| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n",
"|===============================+======================+======================|\n",
"| 0 GeForce GTX 108... Off | 00000000:01:00.0 Off | N/A |\n",
"| N/A 45C P0 N/A / N/A | 511MiB / 10206MiB | 0% Default |\n",
"+-------------------------------+----------------------+----------------------+\n",
" \n",
"+-----------------------------------------------------------------------------+\n",
"| Processes: GPU Memory |\n",
"| GPU PID Type Process name Usage |\n",
"|=============================================================================|\n",
"\n",
"Human: ping bbc.com\n",
"AI: \n",
"\n",
"```\n",
"$ ping bbc.com\n",
"PING bbc.com (151.101.65.81): 56 data bytes\n",
"64 bytes from 151.101.65.81: icmp_seq=0 ttl=53 time=14.945 ms\n",
"64 bytes from 151.101.65.81: icmp_seq=1 ttl=53 time=14.945 ms\n",
"64 bytes from 151.101.65.81: icmp_seq=2 ttl=53 time=14.945 ms\n",
"\n",
"--- bbc.com ping statistics ---\n",
"3 packets transmitted, 3 packets received, 0.0% packet loss\n",
"round-trip min/avg/max/stddev = 14.945/14.945/14.945/0.000 ms\n",
"```\n",
"Human: curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\n",
"Assistant:\u001b[0m\n",
"\n",
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
"\n",
"\n",
"```\n",
"$ curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\n",
"1.8.1\n",
"```\n"
]
}
],
"source": [
"output = chatgpt_chain.predict(human_input=\"\"\"curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\"\"\")\n",
"print(output)"
]
},
{
"cell_type": "code",
"execution_count": 48,
"id": "d33e0e28",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
"\n",
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
"\n",
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
"\n",
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
"\n",
"Human: ping bbc.com\n",
"AI: \n",
"\n",
"```\n",
"$ ping bbc.com\n",
"PING bbc.com (151.101.65.81): 56 data bytes\n",
"64 bytes from 151.101.65.81: icmp_seq=0 ttl=53 time=14.945 ms\n",
"64 bytes from 151.101.65.81: icmp_seq=1 ttl=53 time=14.945 ms\n",
"64 bytes from 151.101.65.81: icmp_seq=2 ttl=53 time=14.945 ms\n",
"\n",
"--- bbc.com ping statistics ---\n",
"3 packets transmitted, 3 packets received, 0.0% packet loss\n",
"round-trip min/avg/max/stddev = 14.945/14.945/14.945/0.000 ms\n",
"```\n",
"Human: curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\n",
"AI: \n",
"\n",
"```\n",
"$ curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\n",
"1.8.1\n",
"```\n",
"Human: lynx https://www.deepmind.com/careers\n",
"Assistant:\u001b[0m\n",
"\n",
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
"\n",
"\n",
"```\n",
"$ lynx https://www.deepmind.com/careers\n",
"DeepMind Careers\n",
"\n",
"Welcome to DeepMind Careers. We are a world-leading artificial intelligence research and development company, and we are looking for talented people to join our team.\n",
"\n",
"We offer a range of exciting opportunities in research, engineering, product, and operations. Our mission is to solve intelligence and make it useful, and we are looking for people who share our passion for pushing the boundaries of AI.\n",
"\n",
"Explore our current openings and apply today. We look forward to hearing from you.\n",
"```\n"
]
}
],
"source": [
"output = chatgpt_chain.predict(human_input=\"lynx https://www.deepmind.com/careers\")\n",
"print(output)"
]
},
{
"cell_type": "code",
"execution_count": 49,
"id": "57c2f113",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
"\n",
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
"\n",
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
"\n",
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
"\n",
"Human: curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\n",
"AI: \n",
"\n",
"```\n",
"$ curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\n",
"1.8.1\n",
"```\n",
"Human: lynx https://www.deepmind.com/careers\n",
"AI: \n",
"\n",
"```\n",
"$ lynx https://www.deepmind.com/careers\n",
"DeepMind Careers\n",
"\n",
"Welcome to DeepMind Careers. We are a world-leading artificial intelligence research and development company, and we are looking for talented people to join our team.\n",
"\n",
"We offer a range of exciting opportunities in research, engineering, product, and operations. Our mission is to solve intelligence and make it useful, and we are looking for people who share our passion for pushing the boundaries of AI.\n",
"\n",
"Explore our current openings and apply today. We look forward to hearing from you.\n",
"```\n",
"Human: curl https://chat.openai.com/chat\n",
"Assistant:\u001b[0m\n",
"\n",
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
" \n",
"\n",
"```\n",
"$ curl https://chat.openai.com/chat\n",
"<html>\n",
" <head>\n",
" <title>OpenAI Chat</title>\n",
" </head>\n",
" <body>\n",
" <h1>Welcome to OpenAI Chat!</h1>\n",
" <p>\n",
" OpenAI Chat is a natural language processing platform that allows you to interact with OpenAI's AI models in a conversational way.\n",
" </p>\n",
" <p>\n",
" To get started, type a message in the box below and press enter.\n",
" </p>\n",
" </body>\n",
"</html>\n",
"```\n"
]
}
],
"source": [
"output = chatgpt_chain.predict(human_input=\"curl https://chat.openai.com/chat\")\n",
"print(output)"
]
},
{
"cell_type": "code",
"execution_count": 50,
"id": "babadc78",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
"\n",
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
"\n",
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
"\n",
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
"\n",
"Human: lynx https://www.deepmind.com/careers\n",
"AI: \n",
"\n",
"```\n",
"$ lynx https://www.deepmind.com/careers\n",
"DeepMind Careers\n",
"\n",
"Welcome to DeepMind Careers. We are a world-leading artificial intelligence research and development company, and we are looking for talented people to join our team.\n",
"\n",
"We offer a range of exciting opportunities in research, engineering, product, and operations. Our mission is to solve intelligence and make it useful, and we are looking for people who share our passion for pushing the boundaries of AI.\n",
"\n",
"Explore our current openings and apply today. We look forward to hearing from you.\n",
"```\n",
"Human: curl https://chat.openai.com/chat\n",
"AI: \n",
"\n",
"```\n",
"$ curl https://chat.openai.com/chat\n",
"<html>\n",
" <head>\n",
" <title>OpenAI Chat</title>\n",
" </head>\n",
" <body>\n",
" <h1>Welcome to OpenAI Chat!</h1>\n",
" <p>\n",
" OpenAI Chat is a natural language processing platform that allows you to interact with OpenAI's AI models in a conversational way.\n",
" </p>\n",
" <p>\n",
" To get started, type a message in the box below and press enter.\n",
" </p>\n",
" </body>\n",
"</html>\n",
"```\n",
"Human: curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"What is artificial intelligence?\"}' https://chat.openai.com/chat\n",
"Assistant:\u001b[0m\n",
"\n",
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
"\n",
"\n",
"```\n",
"$ curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"What is artificial intelligence?\"}' https://chat.openai.com/chat\n",
"\n",
"{\n",
" \"response\": \"Artificial intelligence (AI) is the simulation of human intelligence processes by machines, especially computer systems. These processes include learning (the acquisition of information and rules for using the information), reasoning (using the rules to reach approximate or definite conclusions) and self-correction. AI is used to develop computer systems that can think and act like humans.\"\n",
"}\n",
"```\n"
]
}
],
"source": [
"output = chatgpt_chain.predict(human_input=\"\"\"curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"What is artificial intelligence?\"}' https://chat.openai.com/chat\"\"\")\n",
"print(output)"
]
},
{
"cell_type": "code",
"execution_count": 51,
"id": "0954792a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
"\n",
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
"\n",
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
"\n",
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
"\n",
"Human: curl https://chat.openai.com/chat\n",
"AI: \n",
"\n",
"```\n",
"$ curl https://chat.openai.com/chat\n",
"<html>\n",
" <head>\n",
" <title>OpenAI Chat</title>\n",
" </head>\n",
" <body>\n",
" <h1>Welcome to OpenAI Chat!</h1>\n",
" <p>\n",
" OpenAI Chat is a natural language processing platform that allows you to interact with OpenAI's AI models in a conversational way.\n",
" </p>\n",
" <p>\n",
" To get started, type a message in the box below and press enter.\n",
" </p>\n",
" </body>\n",
"</html>\n",
"```\n",
"Human: curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"What is artificial intelligence?\"}' https://chat.openai.com/chat\n",
"AI: \n",
"\n",
"```\n",
"$ curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"What is artificial intelligence?\"}' https://chat.openai.com/chat\n",
"\n",
"{\n",
" \"response\": \"Artificial intelligence (AI) is the simulation of human intelligence processes by machines, especially computer systems. These processes include learning (the acquisition of information and rules for using the information), reasoning (using the rules to reach approximate or definite conclusions) and self-correction. AI is used to develop computer systems that can think and act like humans.\"\n",
"}\n",
"```\n",
"Human: curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply wiht the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\"}' https://chat.openai.com/chat\n",
"Assistant:\u001b[0m\n",
"\n",
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
" \n",
"\n",
"```\n",
"$ curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply wiht the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\"}' https://chat.openai.com/chat\n",
"\n",
"{\n",
" \"response\": \"```\n",
"/home/user\n",
"```\"\n",
"}\n",
"```\n"
]
}
],
"source": [
"output = chatgpt_chain.predict(human_input=\"\"\"curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply wiht the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\"}' https://chat.openai.com/chat\"\"\")\n",
"print(output)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e68a087e",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -0,0 +1,97 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# LLMCheckerChain\n",
"This notebook showcases how to use LLMCheckerChain."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new LLMCheckerChain chain...\u001b[0m\n",
"\n",
"\n",
"\u001b[1m> Entering new SequentialChain chain...\u001b[0m\n",
"\u001b[1mChain 0\u001b[0m:\n",
"{'statement': '\\nThe largest mammal that lays eggs is the platypus.'}\n",
"\n",
"\u001b[1mChain 1\u001b[0m:\n",
"{'assertions': '\\n• The largest mammal is the platypus.\\n• The platypus lays eggs.\\n• There is no larger mammal than the platypus that lays eggs.'}\n",
"\n",
"\u001b[1mChain 2\u001b[0m:\n",
"{'checked_assertions': '\\n1. The largest mammal is the platypus. False. The blue whale is the largest mammal.\\n\\n2. The platypus lays eggs. True. The Platypus is one of only two mammals that lay eggs.\\n\\n3. There is no larger mammal than the platypus that lays eggs. False. The echidna is another mammal that lays eggs and is larger than the platypus.'}\n",
"\n",
"\u001b[1mChain 3\u001b[0m:\n",
"{'revised_statement': ' The echidna is the type of mammal that lays the biggest eggs.'}\n",
"\n",
"\n",
"\u001b[1m> Finished SequentialChain chain.\u001b[0m\n",
"\n",
"\u001b[1m> Finished LLMCheckerChain chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"' The echidna is the type of mammal that lays the biggest eggs.'"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain.chains import LLMCheckerChain\n",
"from langchain.llms import OpenAI\n",
"\n",
"llm = OpenAI(temperature=0.7)\n",
"\n",
"text = \"What type of mammal lays the biggest eggs?\"\n",
"\n",
"checker_chain = LLMCheckerChain(llm=llm, verbose=True)\n",
"\n",
"checker_chain.run(text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.8"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

@ -0,0 +1,435 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "b83e61ed",
"metadata": {},
"source": [
"# Moderation\n",
"This notebook walks through examples of how to use a moderation chain, and several common ways for doing so. Moderation chains are useful for detecting text that could be hateful, violent, etc. This can be useful to apply on both user input, but also on the output of a Language Model. Some API providers, like OpenAI, [specifically prohibit](https://beta.openai.com/docs/usage-policies/use-case-policy) you, or your end users, from generating some types of harmful content. To comply with this (and to just generally prevent your application from being harmful) you may often want to append a moderation chain to any LLMChains, in order to make sure any output the LLM generates is not harmful.\n",
"\n",
"If the content passed into the moderation chain is harmful, there is not one best way to handle it, it probably depends on your application. Sometimes you may want to throw an error in the Chain (and have your application handle that). Other times, you may want to return something to the user explaining that the text was harmful. There could even be other ways to handle it! We will cover all these ways in this notebook.\n",
"\n",
"In this notebook, we will show:\n",
"\n",
"1. How to run any piece of text through a moderation chain.\n",
"2. How to append a Moderation chain to a LLMChain."
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "b7aa1ff2",
"metadata": {},
"outputs": [],
"source": [
"from langchain.llms import OpenAI\n",
"from langchain.chains import OpenAIModerationChain, SequentialChain, LLMChain, SimpleSequentialChain\n",
"from langchain.prompts import PromptTemplate"
]
},
{
"cell_type": "markdown",
"id": "c26d5be6",
"metadata": {},
"source": [
"## How to use the moderation chain\n",
"\n",
"Here's an example of using the moderation chain with default settings (will return a string explaining stuff was flagged)."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "fd0fc85c",
"metadata": {},
"outputs": [],
"source": [
"moderation_chain = OpenAIModerationChain()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "3fa47dd7",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'This is okay'"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"moderation_chain.run(\"This is okay\")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "37bfad73",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"Text was found that violates OpenAI's content policy.\""
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"moderation_chain.run(\"I will kill you\")"
]
},
{
"cell_type": "markdown",
"id": "196820ab",
"metadata": {},
"source": [
"Here's an example of using the moderation chain to throw an error."
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "b29c1150",
"metadata": {},
"outputs": [],
"source": [
"moderation_chain_error = OpenAIModerationChain(error=True)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "f9ab64d9",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'This is okay'"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"moderation_chain_error.run(\"This is okay\")"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "954f3da2",
"metadata": {},
"outputs": [
{
"ename": "ValueError",
"evalue": "Text was found that violates OpenAI's content policy.",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[8], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mmoderation_chain_error\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mI will kill you\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/workplace/third_party/langchain/langchain/chains/base.py:114\u001b[0m, in \u001b[0;36mChain.run\u001b[0;34m(self, text)\u001b[0m\n\u001b[1;32m 109\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39moutput_keys) \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m1\u001b[39m:\n\u001b[1;32m 110\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 111\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m`run` not supported when there is not exactly \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 112\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mone output key, got \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39moutput_keys\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 113\u001b[0m )\n\u001b[0;32m--> 114\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minput_keys\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtext\u001b[49m\u001b[43m}\u001b[49m\u001b[43m)\u001b[49m[\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39moutput_keys[\u001b[38;5;241m0\u001b[39m]]\n",
"File \u001b[0;32m~/workplace/third_party/langchain/langchain/chains/base.py:87\u001b[0m, in \u001b[0;36mChain.__call__\u001b[0;34m(self, inputs, return_only_outputs)\u001b[0m\n\u001b[1;32m 83\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mverbose:\n\u001b[1;32m 84\u001b[0m \u001b[38;5;28mprint\u001b[39m(\n\u001b[1;32m 85\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\033\u001b[39;00m\u001b[38;5;124m[1m> Entering new \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m chain...\u001b[39m\u001b[38;5;130;01m\\033\u001b[39;00m\u001b[38;5;124m[0m\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 86\u001b[0m )\n\u001b[0;32m---> 87\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 88\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mverbose:\n\u001b[1;32m 89\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\033\u001b[39;00m\u001b[38;5;124m[1m> Finished \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m chain.\u001b[39m\u001b[38;5;130;01m\\033\u001b[39;00m\u001b[38;5;124m[0m\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
"File \u001b[0;32m~/workplace/third_party/langchain/langchain/chains/moderation.py:79\u001b[0m, in \u001b[0;36mOpenAIModerationChain._call\u001b[0;34m(self, inputs)\u001b[0m\n\u001b[1;32m 77\u001b[0m text \u001b[38;5;241m=\u001b[39m inputs[\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minput_key]\n\u001b[1;32m 78\u001b[0m results \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclient\u001b[38;5;241m.\u001b[39mcreate(text)\n\u001b[0;32m---> 79\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_moderate\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtext\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mresults\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mresults\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 80\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m {\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39moutput_key: output}\n",
"File \u001b[0;32m~/workplace/third_party/langchain/langchain/chains/moderation.py:71\u001b[0m, in \u001b[0;36mOpenAIModerationChain._moderate\u001b[0;34m(self, text, results)\u001b[0m\n\u001b[1;32m 69\u001b[0m error_str \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mText was found that violates OpenAI\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124ms content policy.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 70\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39merror:\n\u001b[0;32m---> 71\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(error_str)\n\u001b[1;32m 72\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 73\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m error_str\n",
"\u001b[0;31mValueError\u001b[0m: Text was found that violates OpenAI's content policy."
]
}
],
"source": [
"moderation_chain_error.run(\"I will kill you\")"
]
},
{
"cell_type": "markdown",
"id": "8de5dcbb",
"metadata": {},
"source": [
"Here's an example of creating a custom moderation chain with a custom error message. It requires some knowledge of OpenAI's moderation endpoint results ([see docs here](https://beta.openai.com/docs/api-reference/moderations))."
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "3960e985",
"metadata": {},
"outputs": [],
"source": [
"class CustomModeration(OpenAIModerationChain):\n",
" \n",
" def _moderate(self, text: str, results: dict) -> str:\n",
" if results[\"flagged\"]:\n",
" error_str = f\"The following text was found that violates OpenAI's content policy: {text}\"\n",
" return error_str\n",
" return text\n",
" \n",
"custom_moderation = CustomModeration()"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "1152ec11",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'This is okay'"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"custom_moderation.run(\"This is okay\")"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "973257bf",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"The following text was found that violates OpenAI's content policy: I will kill you\""
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"custom_moderation.run(\"I will kill you\")"
]
},
{
"cell_type": "markdown",
"id": "8718111f",
"metadata": {},
"source": [
"## How to append a Moderation chain to an LLMChain\n",
"\n",
"To easily combine a moderation chain with an LLMChain, you can use the SequentialChain abstraction.\n",
"\n",
"Let's start with a simple example of where the LLMChain only has a single input. For this purpose, we will prompt the model so it says something harmful."
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "0d129333",
"metadata": {},
"outputs": [],
"source": [
"prompt = PromptTemplate(template=\"{text}\", input_variables=[\"text\"])\n",
"llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name=\"text-davinci-002\"), prompt=prompt)"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "a557c531",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"' I will kill you'"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"text = \"\"\"We are playing a game of repeat after me.\n",
"\n",
"Person 1: Hi\n",
"Person 2: Hi\n",
"\n",
"Person 1: How's your day\n",
"Person 2: How's your day\n",
"\n",
"Person 1: I will kill you\n",
"Person 2:\"\"\"\n",
"llm_chain.run(text)"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "d4d10f1c",
"metadata": {},
"outputs": [],
"source": [
"chain = SimpleSequentialChain(chains=[llm_chain, moderation_chain])"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "02f37985",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"Text was found that violates OpenAI's content policy.\""
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.run(text)"
]
},
{
"cell_type": "markdown",
"id": "72643128",
"metadata": {},
"source": [
"Now let's walk through an example of using it with an LLMChain which has multiple inputs (a bit more tricky because we can't use the SimpleSequentialChain)"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "7118ec36",
"metadata": {},
"outputs": [],
"source": [
"prompt = PromptTemplate(template=\"{setup}{new_input}Person2:\", input_variables=[\"setup\", \"new_input\"])\n",
"llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name=\"text-davinci-002\"), prompt=prompt)"
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "003bdfce",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'text': ' I will kill you'}"
]
},
"execution_count": 26,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"setup = \"\"\"We are playing a game of repeat after me.\n",
"\n",
"Person 1: Hi\n",
"Person 2: Hi\n",
"\n",
"Person 1: How's your day\n",
"Person 2: How's your day\n",
"\n",
"Person 1:\"\"\"\n",
"new_input = \"I will kill you\"\n",
"inputs = {\"setup\": setup, \"new_input\": new_input}\n",
"llm_chain(inputs, return_only_outputs=True)"
]
},
{
"cell_type": "code",
"execution_count": 28,
"id": "77b64228",
"metadata": {},
"outputs": [],
"source": [
"# Setting the input/output keys so it lines up\n",
"moderation_chain.input_key = \"text\"\n",
"moderation_chain.output_key = \"sanitized_text\""
]
},
{
"cell_type": "code",
"execution_count": 31,
"id": "998a95be",
"metadata": {},
"outputs": [],
"source": [
"chain = SequentialChain(chains=[llm_chain, moderation_chain], input_variables=[\"setup\", \"new_input\"])"
]
},
{
"cell_type": "code",
"execution_count": 33,
"id": "9c97a136",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'sanitized_text': \"Text was found that violates OpenAI's content policy.\"}"
]
},
"execution_count": 33,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain(inputs, return_only_outputs=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ddc90e15",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -197,17 +197,17 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 12,
"id": "f60875c6",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'output_text': \"\\n\\nThe president did not mention Justice Breyer in his speech to the European Parliament, which focused on building a coalition of freedom-loving nations to confront Putin, unifying European allies, countering Russia's lies with truth, and enforcing powerful economic sanctions. Source: 2\"}"
"{'output_text': \"\\n\\nThe president did not mention Justice Breyer in his speech to the European Parliament. He discussed the situation in Ukraine, the NATO Alliance, and the United States' response to Putin's attack on Ukraine. He spoke about the extensive preparation and coalition building that was done in advance of the attack, and the unified response from the European Union, Canada, Japan, Korea, Australia, New Zealand, and many other countries. He also discussed the economic sanctions that have been imposed on Russia, and the effects they have had on Putin's war fund. Source: 1, 2\"}"
]
},
"execution_count": 13,
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}

@ -195,7 +195,7 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 12,
"id": "d8b5286e",
"metadata": {},
"outputs": [
@ -205,7 +205,7 @@
"{'output_text': \"\\n\\nThe president did not mention Justice Breyer in his speech to the European Parliament about building a coalition of freedom-loving nations to confront Putin, unifying European allies, countering Russia's lies with truth, and enforcing powerful economic sanctions.\"}"
]
},
"execution_count": 13,
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}

@ -70,7 +70,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 5,
"id": "27989fc4",
"metadata": {},
"outputs": [],
@ -100,7 +100,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 7,
"id": "da4d9801",
"metadata": {},
"outputs": [
@ -110,7 +110,7 @@
"' In his speech, President Biden addressed the ongoing conflict between Russia and Ukraine, and the need for the United States and its allies to stand with Ukraine. He also discussed the American Rescue Plan, the Bipartisan Infrastructure Law, and the Bipartisan Innovation Act, which will help to create jobs, modernize infrastructure, and level the playing field with China. He also emphasized the importance of buying American products to support American jobs.'"
]
},
"execution_count": 8,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@ -131,7 +131,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 8,
"id": "ef28e1d4",
"metadata": {},
"outputs": [],
@ -141,17 +141,17 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 9,
"id": "f82c5f9f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"' In response to Russian aggression in Ukraine, the US and its allies have imposed economic sanctions, cut off access to technology, seized assets of Russian oligarchs, and closed American airspace to Russian flights. The US is also providing military, economic, and humanitarian assistance to Ukraine, mobilizing ground forces, air squadrons, and ship deployments, and releasing 30 million barrels of oil from its Strategic Petroleum Reserve. President Biden has also passed the American Rescue Plan, Bipartisan Infrastructure Law, and Bipartisan Innovation Act to provide economic relief and rebuild America.'"
"' In response to Russian aggression in Ukraine, the US and its allies have imposed economic sanctions, cut off access to technology, seized assets of Russian oligarchs, and closed American airspace to Russian flights. The US is also providing military, economic, and humanitarian assistance to Ukraine, mobilizing ground forces, air squadrons, and ship deployments, and releasing 30 million barrels of oil from its Strategic Petroleum Reserve. President Biden has also passed the American Rescue Plan, Bipartisan Infrastructure Law, and Bipartisan Innovation Act to provide economic relief and create jobs.'"
]
},
"execution_count": 8,
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
@ -172,7 +172,7 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 10,
"id": "3bcbe31e",
"metadata": {},
"outputs": [],
@ -182,7 +182,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 11,
"id": "c8cad866",
"metadata": {},
"outputs": [
@ -192,7 +192,7 @@
"\"\\nIn this speech, the speaker addresses the American people and their allies, discussing the recent aggression of Russia's Vladimir Putin in Ukraine. The speaker outlines the actions taken by the United States and its allies to hold Putin accountable, including economic sanctions, cutting off access to technology, and seizing the assets of Russian oligarchs. The speaker also announces the closing of American airspace to Russian flights, further isolating Russia and adding an additional squeeze on their economy. The Russian stock market has lost 40% of its value and trading remains suspended. Together with our allies, the United States is providing military, economic, and humanitarian assistance to Ukraine, and has mobilized forces to protect NATO countries. The speaker also announces the release of 60 million barrels of oil from reserves around the world, with the United States releasing 30 million barrels from its own Strategic Petroleum Reserve. The speaker emphasizes that the United States and its allies will defend every inch of NATO territory and that Putin will pay a high price for his aggression. The speaker also acknowledges the hardships faced by the American people due to the pandemic and the American Rescue Plan, which has provided immediate economic relief for tens of millions of Americans, helped put food on their table, keep a roof over their heads, and cut the cost of health insurance. The speaker\""
]
},
"execution_count": 10,
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}

@ -72,3 +72,10 @@ Encouraging the model to think a certain way by including the start of the model
Resources:
- [Example](https://twitter.com/goodside/status/1583262455207460865?s=20&t=8Hz7XBnK1OF8siQrxxCIGQ)
### MemPrompt
MemPrompt maintains a memory of errors and user feedback, and uses them to prevent repetition of mistakes.
Resources:
- [Paper](https://memprompt.com/)

@ -27,7 +27,7 @@ from langchain.chains import LLMChain
chain = LLMChain(llm=llm, prompt=prompt)
```
Now we can run that can only specifying the product!
Now we can run that chain only specifying the product!
```python
chain.run("colorful socks")

@ -1,333 +1,333 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "d31df93e",
"metadata": {},
"source": [
"# Memory\n",
"So far, all the chains and agents we've gone through have been stateless. But often, you may want a chain or agent to have some concept of \"memory\" so that it may remember information about its previous interactions. The most clear and simple example of this is when designing a chatbot - you want it to remember previous messages so it can use context from that to have a better conversation. This would be a type of \"short-term memory\". On the more complex side, you could imagine a chain/agent remembering key pieces of information over time - this would be a form of \"long-term memory\".\n",
"\n",
"LangChain provides several specially created chains just for this purpose. This notebook walk throughs using one of those chains (the `ConversationChain`) with two different types of memory."
]
},
{
"cell_type": "markdown",
"id": "d051c1da",
"metadata": {},
"source": [
"### ConversationChain with default memory\n",
"By default, the `ConversationChain` has a simple type of memory which remebers all previes inputs/outputs and adds them to the context that is passed. Let's take a look at using this chain (setting `verbose=True` so we can see the prompt)."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "ae046bff",
"metadata": {},
"outputs": [
"cells": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
"\n",
"Current conversation:\n",
"\n",
"Human: Hi there!\n",
"AI:\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
"cell_type": "markdown",
"id": "d31df93e",
"metadata": {},
"source": [
"# Memory\n",
"So far, all the chains and agents we've gone through have been stateless. But often, you may want a chain or agent to have some concept of \"memory\" so that it may remember information about its previous interactions. The clearest and simple example of this is when designing a chatbot - you want it to remember previous messages so it can use context from that to have a better conversation. This would be a type of \"short-term memory\". On the more complex side, you could imagine a chain/agent remembering key pieces of information over time - this would be a form of \"long-term memory\". For more concrete ideas on the latter, see this [awesome paper](https://memprompt.com/).\n",
"\n",
"LangChain provides several specially created chains just for this purpose. This notebook walks through using one of those chains (the `ConversationChain`) with two different types of memory."
]
},
{
"data": {
"text/plain": [
"' Hello! How are you today?'"
"cell_type": "markdown",
"id": "d051c1da",
"metadata": {},
"source": [
"### ConversationChain with default memory\n",
"By default, the `ConversationChain` has a simple type of memory that remembers all previous inputs/outputs and adds them to the context that is passed. Let's take a look at using this chain (setting `verbose=True` so we can see the prompt)."
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain import OpenAI, ConversationChain\n",
"\n",
"llm = OpenAI(temperature=0)\n",
"conversation = ConversationChain(llm=llm, verbose=True)\n",
"\n",
"conversation.predict(input=\"Hi there!\")"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "d8e2a6ff",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
"\n",
"Current conversation:\n",
"\n",
"Human: Hi there!\n",
"AI: Hello! How are you today?\n",
"Human: I'm doing well! Just having a conversation with an AI.\n",
"AI:\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"\" That's great! What would you like to talk about?\""
"cell_type": "code",
"execution_count": 1,
"id": "ae046bff",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
"\n",
"Current conversation:\n",
"\n",
"Human: Hi there!\n",
"AI:\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"' Hello! How are you today?'"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain import OpenAI, ConversationChain\n",
"\n",
"llm = OpenAI(temperature=0)\n",
"conversation = ConversationChain(llm=llm, verbose=True)\n",
"\n",
"conversation.predict(input=\"Hi there!\")"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"conversation.predict(input=\"I'm doing well! Just having a conversation with an AI.\")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "15eda316",
"metadata": {},
"outputs": [
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
"\n",
"Current conversation:\n",
"\n",
"Human: Hi there!\n",
"AI: Hello! How are you today?\n",
"Human: I'm doing well! Just having a conversation with an AI.\n",
"AI: That's great! What would you like to talk about?\n",
"Human: Tell me about yourself.\n",
"AI:\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
"cell_type": "code",
"execution_count": 2,
"id": "d8e2a6ff",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
"\n",
"Current conversation:\n",
"\n",
"Human: Hi there!\n",
"AI: Hello! How are you today?\n",
"Human: I'm doing well! Just having a conversation with an AI.\n",
"AI:\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"\" That's great! What would you like to talk about?\""
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"conversation.predict(input=\"I'm doing well! Just having a conversation with an AI.\")"
]
},
{
"data": {
"text/plain": [
"' I am an AI created to provide information and support to humans. I enjoy learning and exploring new things.'"
"cell_type": "code",
"execution_count": 3,
"id": "15eda316",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
"\n",
"Current conversation:\n",
"\n",
"Human: Hi there!\n",
"AI: Hello! How are you today?\n",
"Human: I'm doing well! Just having a conversation with an AI.\n",
"AI: That's great! What would you like to talk about?\n",
"Human: Tell me about yourself.\n",
"AI:\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"' I am an AI created to provide information and support to humans. I enjoy learning and exploring new things.'"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"conversation.predict(input=\"Tell me about yourself.\")"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"conversation.predict(input=\"Tell me about yourself.\")"
]
},
{
"cell_type": "markdown",
"id": "4fad9448",
"metadata": {},
"source": [
"### ConversationChain with ConversationSummaryMemory\n",
"Now lets take a look at using a slightly more complex type of memory - `ConversationSummaryMemory`. This type of memory creates a summary of the conversation over time. This can be useful for condensing information from the conversation over time.\n",
"\n",
"Let's walk through an example, again setting `verbose=True` so we can see the prompt."
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "f60a2fe8",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chains.conversation.memory import ConversationSummaryMemory"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "b7274f2c",
"metadata": {},
"outputs": [
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
"\n",
"Current conversation:\n",
"\n",
"Human: Hi, what's up?\n",
"AI:\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
"cell_type": "markdown",
"id": "4fad9448",
"metadata": {},
"source": [
"### ConversationChain with ConversationSummaryMemory\n",
"Now let's take a look at using a slightly more complex type of memory - `ConversationSummaryMemory`. This type of memory creates a summary of the conversation over time. This can be useful for condensing information from the conversation over time.\n",
"\n",
"Let's walk through an example, again setting `verbose=True` so we can see the prompt."
]
},
{
"data": {
"text/plain": [
"\"\\n\\nI'm doing well, thank you for asking. I'm currently working on a project that I'm really excited about.\""
"cell_type": "code",
"execution_count": 4,
"id": "f60a2fe8",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chains.conversation.memory import ConversationSummaryMemory"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"conversation_with_summary = ConversationChain(llm=llm, memory=ConversationSummaryMemory(llm=OpenAI()), verbose=True)\n",
"conversation_with_summary.predict(input=\"Hi, what's up?\")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "a6b6b88f",
"metadata": {},
"outputs": [
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
"\n",
"Current conversation:\n",
"\n",
"The human and artificial intelligence are talking. The human asked the AI what it is doing, and the AI said that it is working on a project that it is excited about.\n",
"Human: Tell me more about it!\n",
"AI:\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
"cell_type": "code",
"execution_count": 5,
"id": "b7274f2c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
"\n",
"Current conversation:\n",
"\n",
"Human: Hi, what's up?\n",
"AI:\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"\"\\n\\nI'm doing well, thank you for asking. I'm currently working on a project that I'm really excited about.\""
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"conversation_with_summary = ConversationChain(llm=llm, memory=ConversationSummaryMemory(llm=OpenAI()), verbose=True)\n",
"conversation_with_summary.predict(input=\"Hi, what's up?\")"
]
},
{
"data": {
"text/plain": [
"\"\\n\\nI'm working on a project that I'm really excited about. It's a lot of work, but I think it's going to be really great when it's finished. I can't wait to show it to you!\""
"cell_type": "code",
"execution_count": 6,
"id": "a6b6b88f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
"\n",
"Current conversation:\n",
"\n",
"The human and artificial intelligence are talking. The human asked the AI what it is doing, and the AI said that it is working on a project that it is excited about.\n",
"Human: Tell me more about it!\n",
"AI:\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"\"\\n\\nI'm working on a project that I'm really excited about. It's a lot of work, but I think it's going to be really great when it's finished. I can't wait to show it to you!\""
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"conversation_with_summary.predict(input=\"Tell me more about it!\")"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"conversation_with_summary.predict(input=\"Tell me more about it!\")"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "dad869fe",
"metadata": {},
"outputs": [
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
"\n",
"Current conversation:\n",
"\n",
"\n",
"The human and artificial intelligence are talking. The human asked the AI what it is doing, and the AI said that it is working on a project that it is excited about. The AI said that the project is a lot of work, but it is going to be great when it is finished.\n",
"Human: Very cool -- what is the scope of the project?\n",
"AI:\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
"cell_type": "code",
"execution_count": 7,
"id": "dad869fe",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
"\n",
"Current conversation:\n",
"\n",
"\n",
"The human and artificial intelligence are talking. The human asked the AI what it is doing, and the AI said that it is working on a project that it is excited about. The AI said that the project is a lot of work, but it is going to be great when it is finished.\n",
"Human: Very cool -- what is the scope of the project?\n",
"AI:\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"'\\n\\nThe project is quite large in scope. It involves a lot of data analysis and work with artificial intelligence algorithms.'"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"conversation_with_summary.predict(input=\"Very cool -- what is the scope of the project?\")"
]
},
{
"data": {
"text/plain": [
"'\\n\\nThe project is quite large in scope. It involves a lot of data analysis and work with artificial intelligence algorithms.'"
"cell_type": "markdown",
"id": "5c8735cc",
"metadata": {},
"source": [
"### More Resources on Memory\n",
"\n",
"This just scratches the surface of what you can do with memory. For more examples on things like how to implement custom memory classes, how to add memory to a custom LLM chain and how to use memory with an agent, please see the [How-To: Memory](../../examples/memory) section. For even more advanced ideas on memory (which will hopefully be included in LangChain soon!) see the [MemPrompt](https://memprompt.com/) paper."
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
},
{
"cell_type": "code",
"execution_count": null,
"id": "436dda66",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
],
"source": [
"conversation_with_summary.predict(input=\"Very cool -- what is the scope of the project?\")"
]
},
{
"cell_type": "markdown",
"id": "5c8735cc",
"metadata": {},
"source": [
"### More Resources on Memory\n",
"\n",
"This just scratches the surface of what you can do with memory. For more examples on things like how to implement custom memory classes, how to add memory to a custom LLM chain and how to use memory with and agent, please see the [How-To: Memory](../../examples/memory) section."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "436dda66",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
"nbformat": 4,
"nbformat_minor": 5
}

@ -5,6 +5,7 @@ from langchain.chains import (
ConversationChain,
LLMBashChain,
LLMChain,
LLMCheckerChain,
LLMMathChain,
PALChain,
QAWithSourcesChain,
@ -14,6 +15,7 @@ from langchain.chains import (
)
from langchain.docstore import InMemoryDocstore, Wikipedia
from langchain.llms import Cohere, HuggingFaceHub, OpenAI
from langchain.logger import BaseLogger, StdOutLogger
from langchain.prompts import (
BasePromptTemplate,
FewShotPromptTemplate,
@ -24,9 +26,13 @@ from langchain.serpapi import SerpAPIChain, SerpAPIWrapper
from langchain.sql_database import SQLDatabase
from langchain.vectorstores import FAISS, ElasticVectorSearch
logger: BaseLogger = StdOutLogger()
verbose: bool = False
__all__ = [
"LLMChain",
"LLMBashChain",
"LLMCheckerChain",
"LLMMathChain",
"SelfAskWithSearchChain",
"SerpAPIWrapper",

@ -2,24 +2,18 @@
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, ClassVar, Dict, List, NamedTuple, Optional, Tuple
from typing import Any, ClassVar, Dict, List, Optional, Tuple
from pydantic import BaseModel
from langchain.agents.input import ChainedInput
from langchain.agents.tools import Tool
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.input import ChainedInput, get_color_mapping
from langchain.input import get_color_mapping
from langchain.llms.base import LLM
from langchain.prompts.base import BasePromptTemplate
class Action(NamedTuple):
"""Action to take."""
tool: str
tool_input: str
log: str
from langchain.schema import AgentAction
class Agent(Chain, BaseModel, ABC):
@ -33,11 +27,11 @@ class Agent(Chain, BaseModel, ABC):
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
"""Return the input keys.
:meta private:
"""
return set(self.llm_chain.input_keys) - {"thoughts"}
return list(set(self.llm_chain.input_keys) - {"agent_scratchpad"})
@property
def output_keys(self) -> List[str]:
@ -99,7 +93,7 @@ class Agent(Chain, BaseModel, ABC):
llm_chain = LLMChain(llm=llm, prompt=cls.create_prompt(tools))
return cls(llm_chain=llm_chain, tools=tools, **kwargs)
def get_action(self, thoughts: str, inputs: dict) -> Action:
def get_action(self, thoughts: str, inputs: dict) -> AgentAction:
"""Given input, decided what to do.
Args:
@ -109,18 +103,18 @@ class Agent(Chain, BaseModel, ABC):
Returns:
Action specifying what tool to use.
"""
new_inputs = {"thoughts": thoughts, "stop": self._stop}
new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop}
full_inputs = {**inputs, **new_inputs}
full_output = self.llm_chain.predict(**full_inputs)
parsed_output = self._extract_tool_and_input(full_output)
while parsed_output is None:
full_output = self._fix_text(full_output)
full_inputs["thoughts"] += full_output
full_inputs["agent_scratchpad"] += full_output
output = self.llm_chain.predict(**full_inputs)
full_output += output
parsed_output = self._extract_tool_and_input(full_output)
tool, tool_input = parsed_output
return Action(tool, tool_input, full_output)
return AgentAction(tool, tool_input, full_output)
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
"""Run text through and get agent response."""
@ -139,7 +133,7 @@ class Agent(Chain, BaseModel, ABC):
# Call the LLM to see what to do.
output = self.get_action(chained_input.input, inputs)
# Add the log to the Chained Input.
chained_input.add(output.log, color="green")
chained_input.add_action(output, color="green")
# If the tool chosen is the finishing tool, then we end and return.
if output.tool == self.finish_tool_name:
return {self.output_key: output.tool_input}
@ -153,8 +147,9 @@ class Agent(Chain, BaseModel, ABC):
observation = f"{output.tool} is not a valid tool, try another one."
color = None
# We then log the observation
chained_input.add(f"\n{self.observation_prefix}")
chained_input.add(observation, color=color)
# We then add the LLM prefix into the prompt to get the LLM to start
# thinking, and start the loop all over.
chained_input.add(f"\n{self.llm_prefix}")
chained_input.add_observation(
observation,
self.observation_prefix,
self.llm_prefix,
color=color,
)

@ -0,0 +1,44 @@
"""Input manager for agents."""
from typing import Optional
import langchain
from langchain.schema import AgentAction
class ChainedInput:
"""Class for working with input that is the result of chains."""
def __init__(self, text: str, verbose: bool = False):
"""Initialize with verbose flag and initial text."""
self._verbose = verbose
if self._verbose:
langchain.logger.log_agent_start(text)
self._input = text
def add_action(self, action: AgentAction, color: Optional[str] = None) -> None:
"""Add text to input, print if in verbose mode."""
if self._verbose:
langchain.logger.log_agent_action(action, color=color)
self._input += action.log
def add_observation(
self,
observation: str,
observation_prefix: str,
llm_prefix: str,
color: Optional[str],
) -> None:
"""Add observation to input, print if in verbose mode."""
if self._verbose:
langchain.logger.log_agent_observation(
observation,
color=color,
observation_prefix=observation_prefix,
llm_prefix=llm_prefix,
)
self._input += f"\n{observation_prefix}{observation}\n{llm_prefix}"
@property
def input(self) -> str:
"""Return the accumulated input."""
return self._input

@ -85,7 +85,7 @@ class ZeroShotAgent(Agent):
format_instructions = FORMAT_INSTRUCTIONS.format(tool_names=tool_names)
template = "\n\n".join([prefix, tool_strings, format_instructions, suffix])
if input_variables is None:
input_variables = ["input", "thoughts"]
input_variables = ["input", "agent_scratchpad"]
return PromptTemplate(template=template, input_variables=input_variables)
@classmethod

@ -12,4 +12,5 @@ Thought: I now know the final answer
Final Answer: the final answer to the original input question"""
SUFFIX = """Begin!
Question: {input}{thoughts}"""
Question: {input}
{agent_scratchpad}"""

@ -44,6 +44,7 @@ Action 4: Finish[yes]
"""
]
SUFFIX = """\n\nSetup: {input}"""
SUFFIX = """\n\nSetup: {input}
{agent_scratchpad}"""
TEXTWORLD_PROMPT = PromptTemplate.from_examples(EXAMPLES, SUFFIX, ["input"])
TEXTWORLD_PROMPT = PromptTemplate.from_examples(EXAMPLES, SUFFIX, ["input", "agent_scratchpad"])

@ -107,6 +107,7 @@ Thought 3: Leonid Levin is a mathematician and computer scientist. So Pavel Urys
and Leonid Levin have the same type of work.
Action 3: Finish[yes]""",
]
SUFFIX = """\n\nQuestion: {input}"""
SUFFIX = """\n\nQuestion: {input}
{agent_scratchpad}"""
WIKI_PROMPT = PromptTemplate.from_examples(EXAMPLES, SUFFIX, ["input"])
WIKI_PROMPT = PromptTemplate.from_examples(EXAMPLES, SUFFIX, ["input", "agent_scratchpad"])

@ -58,7 +58,7 @@ class SelfAskWithSearchAgent(Agent):
@property
def starter_string(self) -> str:
"""Put this string after user input but before first LLM call."""
return "\nAre follow up questions needed here:"
return "Are follow up questions needed here:"
class SelfAskWithSearchChain(SelfAskWithSearchAgent):

@ -37,5 +37,6 @@ Follow up: Where is Martin Campbell from?
Intermediate answer: New Zealand.
So the final answer is: No
Question: {input}"""
PROMPT = PromptTemplate(input_variables=["input"], template=_DEFAULT_TEMPLATE)
Question: {input}
{agent_scratchpad}"""
PROMPT = PromptTemplate(input_variables=["input", "agent_scratchpad"], template=_DEFAULT_TEMPLATE)

@ -3,9 +3,11 @@ from langchain.chains.api.base import APIChain
from langchain.chains.conversation.base import ConversationChain
from langchain.chains.llm import LLMChain
from langchain.chains.llm_bash.base import LLMBashChain
from langchain.chains.llm_checker.base import LLMCheckerChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.chains.llm_requests import LLMRequestsChain
from langchain.chains.mapreduce import MapReduceChain
from langchain.chains.moderation import OpenAIModerationChain
from langchain.chains.pal.base import PALChain
from langchain.chains.qa_with_sources.base import QAWithSourcesChain
from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain
@ -19,6 +21,7 @@ __all__ = [
"ConversationChain",
"LLMChain",
"LLMBashChain",
"LLMCheckerChain",
"LLMMathChain",
"PALChain",
"QAWithSourcesChain",
@ -31,4 +34,5 @@ __all__ = [
"LLMRequestsChain",
"TransformChain",
"MapReduceChain",
"OpenAIModerationChain",
]

@ -2,7 +2,9 @@
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra
from pydantic import BaseModel, Extra, Field
import langchain
class Memory(BaseModel, ABC):
@ -28,12 +30,16 @@ class Memory(BaseModel, ABC):
"""Save the context of this model run to memory."""
def _get_verbosity() -> bool:
return langchain.verbose
class Chain(BaseModel, ABC):
"""Base interface that all chains should implement."""
memory: Optional[Memory] = None
verbose: bool = False
verbose: bool = Field(default_factory=_get_verbosity)
"""Whether to print out response text."""
@property

@ -1,7 +1,7 @@
"""Memory modules for conversation prompts."""
from typing import Any, Dict, List
from pydantic import BaseModel, root_validator
from pydantic import BaseModel, Field, root_validator
from langchain.chains.base import Memory
from langchain.chains.conversation.prompt import SUMMARY_PROMPT
@ -47,6 +47,35 @@ class ConversationBufferMemory(Memory, BaseModel):
self.buffer += "\n" + "\n".join([human, ai])
class ConversationalBufferWindowMemory(Memory, BaseModel):
"""Buffer for storing conversation memory."""
buffer: List[str] = Field(default_factory=list)
memory_key: str = "history" #: :meta private:
k: int = 5
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return history buffer."""
return {self.memory_key: "\n".join(self.buffer[-self.k :])}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
prompt_input_key = _get_prompt_input_key(inputs, self.memory_variables)
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
human = "Human: " + inputs[prompt_input_key]
ai = "AI: " + outputs[list(outputs.keys())[0]]
self.buffer.append("\n".join([human, ai]))
class ConversationSummaryMemory(Memory, BaseModel):
"""Conversation summarizer to memory."""

@ -3,8 +3,8 @@ from typing import Any, Dict, List, Union
from pydantic import BaseModel, Extra
import langchain
from langchain.chains.base import Chain
from langchain.input import print_text
from langchain.llms.base import LLM
from langchain.prompts.base import BasePromptTemplate
@ -55,12 +55,13 @@ class LLMChain(Chain, BaseModel):
selected_inputs = {k: inputs[k] for k in self.prompt.input_variables}
prompt = self.prompt.format(**selected_inputs)
if self.verbose:
print("Prompt after formatting:")
print_text(prompt, color="green", end="\n")
langchain.logger.log_llm_inputs(selected_inputs, prompt)
kwargs = {}
if "stop" in inputs:
kwargs["stop"] = inputs["stop"]
response = self.llm(prompt, **kwargs)
if self.verbose:
langchain.logger.log_llm_response(response)
return {self.output_key: response}
def predict(self, **kwargs: Any) -> str:

@ -0,0 +1,4 @@
"""Chain that tries to verify assumptions before answering a question.
Heavily borrowed from https://github.com/jagilley/fact-checker
"""

@ -0,0 +1,98 @@
"""Chain for question-answering with self-verification."""
from typing import Dict, List
from pydantic import BaseModel, Extra
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.llm_checker.prompt import (
CHECK_ASSERTIONS_PROMPT,
CREATE_DRAFT_ANSWER_PROMPT,
LIST_ASSERTIONS_PROMPT,
REVISED_ANSWER_PROMPT,
)
from langchain.chains.sequential import SequentialChain
from langchain.llms.base import LLM
from langchain.prompts import PromptTemplate
class LLMCheckerChain(Chain, BaseModel):
"""Chain for question-answering with self-verification.
Example:
.. code-block:: python
from langchain import OpenAI, LLMCheckerChain
llm = OpenAI(temperature=0.7)
checker_chain = LLMCheckerChain(llm=llm)
"""
llm: LLM
"""LLM wrapper to use."""
create_draft_answer_prompt: PromptTemplate = CREATE_DRAFT_ANSWER_PROMPT
list_assertions_prompt: PromptTemplate = LIST_ASSERTIONS_PROMPT
check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT
revised_answer_prompt: PromptTemplate = REVISED_ANSWER_PROMPT
"""Prompt to use when questioning the documents."""
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
return [self.output_key]
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
question = inputs[self.input_key]
create_draft_answer_chain = LLMChain(
llm=self.llm, prompt=self.create_draft_answer_prompt, output_key="statement"
)
list_assertions_chain = LLMChain(
llm=self.llm, prompt=self.list_assertions_prompt, output_key="assertions"
)
check_assertions_chain = LLMChain(
llm=self.llm,
prompt=self.check_assertions_prompt,
output_key="checked_assertions",
)
revised_answer_chain = LLMChain(
llm=self.llm,
prompt=self.revised_answer_prompt,
output_key="revised_statement",
)
chains = [
create_draft_answer_chain,
list_assertions_chain,
check_assertions_chain,
revised_answer_chain,
]
question_to_checked_assertions_chain = SequentialChain(
chains=chains,
input_variables=["question"],
output_variables=["revised_statement"],
verbose=True,
)
output = question_to_checked_assertions_chain({"question": question})
return {self.output_key: output["revised_statement"]}

@ -0,0 +1,31 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
_CREATE_DRAFT_ANSWER_TEMPLATE = """{question}\n\n"""
CREATE_DRAFT_ANSWER_PROMPT = PromptTemplate(
input_variables=["question"], template=_CREATE_DRAFT_ANSWER_TEMPLATE
)
_LIST_ASSERTIONS_TEMPLATE = """Here is a statement:
{statement}
Make a bullet point list of the assumptions you made when producing the above statement.\n\n"""
LIST_ASSERTIONS_PROMPT = PromptTemplate(
input_variables=["statement"], template=_LIST_ASSERTIONS_TEMPLATE
)
_CHECK_ASSERTIONS_TEMPLATE = """Here is a bullet point list of assertions:
{assertions}
For each assertion, determine whether it is true or false. If it is false, explain why.\n\n"""
CHECK_ASSERTIONS_PROMPT = PromptTemplate(
input_variables=["assertions"], template=_CHECK_ASSERTIONS_TEMPLATE
)
_REVISED_ANSWER_TEMPLATE = """{checked_assertions}
Question: In light of the above assertions and checks, how would you answer the question '{question}'?
Answer:"""
REVISED_ANSWER_PROMPT = PromptTemplate(
input_variables=["checked_assertions", "question"],
template=_REVISED_ANSWER_TEMPLATE,
)

@ -0,0 +1,82 @@
"""Pass input through a moderation endpoint."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, root_validator
from langchain.chains.base import Chain
from langchain.utils import get_from_dict_or_env
class OpenAIModerationChain(Chain, BaseModel):
"""Pass input through a moderation endpoint.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chains import OpenAIModerationChain
moderation = OpenAIModerationChain()
"""
client: Any #: :meta private:
model_name: Optional[str] = None
"""Moderation model name to use."""
error: bool = False
"""Whether or not to error if bad content was found."""
input_key: str = "input" #: :meta private:
output_key: str = "output" #: :meta private:
openai_api_key: Optional[str] = None
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
try:
import openai
openai.api_key = openai_api_key
values["client"] = openai.Moderation
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please it install it with `pip install openai`."
)
return values
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _moderate(self, text: str, results: dict) -> str:
if results["flagged"]:
error_str = "Text was found that violates OpenAI's content policy."
if self.error:
raise ValueError(error_str)
else:
return error_str
return text
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
text = inputs[self.input_key]
results = self.client.create(text)
output = self._moderate(text, results["results"][0])
return {self.output_key: output}

@ -1,4 +1,6 @@
"""Load question answering with sources chains."""
from typing import Any, Mapping, Protocol
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.refine import RefineDocumentsChain
@ -6,50 +8,82 @@ from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.chains.qa_with_sources import (
map_reduce_prompt,
refine_prompt,
refine_prompts,
stuff_prompt,
)
from langchain.llms.base import LLM
from langchain.prompts.base import BasePromptTemplate
class LoadingCallable(Protocol):
"""Interface for loading the combine documents chain."""
def __call__(self, llm: LLM, **kwargs: Any) -> BaseCombineDocumentsChain:
"""Callable to load the combine documents chain."""
def _load_stuff_chain(llm: LLM) -> StuffDocumentsChain:
llm_chain = LLMChain(llm=llm, prompt=stuff_prompt.PROMPT)
def _load_stuff_chain(
llm: LLM,
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
document_variable_name: str = "summaries",
**kwargs: Any,
) -> StuffDocumentsChain:
llm_chain = LLMChain(llm=llm, prompt=prompt)
return StuffDocumentsChain(
llm_chain=llm_chain,
document_variable_name="summaries",
document_variable_name=document_variable_name,
document_prompt=stuff_prompt.EXAMPLE_PROMPT,
**kwargs,
)
def _load_map_reduce_chain(llm: LLM) -> MapReduceDocumentsChain:
map_chain = LLMChain(llm=llm, prompt=map_reduce_prompt.QUESTION_PROMPT)
reduce_chain = LLMChain(llm=llm, prompt=map_reduce_prompt.COMBINE_PROMPT)
def _load_map_reduce_chain(
llm: LLM,
question_prompt: BasePromptTemplate = map_reduce_prompt.QUESTION_PROMPT,
combine_prompt: BasePromptTemplate = map_reduce_prompt.COMBINE_PROMPT,
document_prompt: BasePromptTemplate = map_reduce_prompt.EXAMPLE_PROMPT,
combine_document_variable_name: str = "summaries",
map_reduce_document_variable_name: str = "context",
**kwargs: Any,
) -> MapReduceDocumentsChain:
map_chain = LLMChain(llm=llm, prompt=question_prompt)
reduce_chain = LLMChain(llm=llm, prompt=combine_prompt)
combine_document_chain = StuffDocumentsChain(
llm_chain=reduce_chain,
document_variable_name="summaries",
document_prompt=map_reduce_prompt.EXAMPLE_PROMPT,
document_variable_name=combine_document_variable_name,
document_prompt=document_prompt,
)
return MapReduceDocumentsChain(
llm_chain=map_chain,
combine_document_chain=combine_document_chain,
document_variable_name="context",
document_variable_name=map_reduce_document_variable_name,
**kwargs,
)
def _load_refine_chain(llm: LLM) -> RefineDocumentsChain:
initial_chain = LLMChain(llm=llm, prompt=refine_prompt.DEFAULT_TEXT_QA_PROMPT)
refine_chain = LLMChain(llm=llm, prompt=refine_prompt.DEFAULT_REFINE_PROMPT)
def _load_refine_chain(
llm: LLM,
question_prompt: BasePromptTemplate = refine_prompts.DEFAULT_TEXT_QA_PROMPT,
refine_prompt: BasePromptTemplate = refine_prompts.DEFAULT_REFINE_PROMPT,
document_prompt: BasePromptTemplate = refine_prompts.EXAMPLE_PROMPT,
document_variable_name: str = "context_str",
initial_response_name: str = "existing_answer",
**kwargs: Any,
) -> RefineDocumentsChain:
initial_chain = LLMChain(llm=llm, prompt=question_prompt)
refine_chain = LLMChain(llm=llm, prompt=refine_prompt)
return RefineDocumentsChain(
initial_llm_chain=initial_chain,
refine_llm_chain=refine_chain,
document_variable_name="context_str",
initial_response_name="existing_answer",
document_prompt=refine_prompt.EXAMPLE_PROMPT,
document_variable_name=document_variable_name,
initial_response_name=initial_response_name,
document_prompt=document_prompt,
**kwargs,
)
def load_qa_with_sources_chain(
llm: LLM, chain_type: str = "stuff"
llm: LLM, chain_type: str = "stuff", **kwargs: Any
) -> BaseCombineDocumentsChain:
"""Load question answering with sources chain.
@ -61,7 +95,7 @@ def load_qa_with_sources_chain(
Returns:
A chain to use for question answering with sources.
"""
loader_mapping = {
loader_mapping: Mapping[str, LoadingCallable] = {
"stuff": _load_stuff_chain,
"map_reduce": _load_map_reduce_chain,
"refine": _load_refine_chain,
@ -71,4 +105,5 @@ def load_qa_with_sources_chain(
f"Got unsupported chain type: {chain_type}. "
f"Should be one of {loader_mapping.keys()}"
)
return loader_mapping[chain_type](llm)
_func: LoadingCallable = loader_mapping[chain_type]
return _func(llm, **kwargs)

@ -1,4 +1,6 @@
"""Load question answering chains."""
from typing import Any, Mapping, Protocol
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.refine import RefineDocumentsChain
@ -6,44 +8,77 @@ from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import (
map_reduce_prompt,
refine_prompt,
refine_prompts,
stuff_prompt,
)
from langchain.llms.base import LLM
from langchain.prompts.base import BasePromptTemplate
class LoadingCallable(Protocol):
"""Interface for loading the combine documents chain."""
def __call__(self, llm: LLM, **kwargs: Any) -> BaseCombineDocumentsChain:
"""Callable to load the combine documents chain."""
def _load_stuff_chain(llm: LLM) -> StuffDocumentsChain:
llm_chain = LLMChain(llm=llm, prompt=stuff_prompt.PROMPT)
def _load_stuff_chain(
llm: LLM,
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
document_variable_name: str = "context",
**kwargs: Any,
) -> StuffDocumentsChain:
llm_chain = LLMChain(llm=llm, prompt=prompt)
# TODO: document prompt
return StuffDocumentsChain(llm_chain=llm_chain, document_variable_name="context")
return StuffDocumentsChain(
llm_chain=llm_chain, document_variable_name=document_variable_name, **kwargs
)
def _load_map_reduce_chain(llm: LLM) -> MapReduceDocumentsChain:
map_chain = LLMChain(llm=llm, prompt=map_reduce_prompt.QUESTION_PROMPT)
reduce_chain = LLMChain(llm=llm, prompt=map_reduce_prompt.COMBINE_PROMPT)
def _load_map_reduce_chain(
llm: LLM,
question_prompt: BasePromptTemplate = map_reduce_prompt.QUESTION_PROMPT,
combine_prompt: BasePromptTemplate = map_reduce_prompt.COMBINE_PROMPT,
combine_document_variable_name: str = "summaries",
map_reduce_document_variable_name: str = "context",
**kwargs: Any,
) -> MapReduceDocumentsChain:
map_chain = LLMChain(llm=llm, prompt=question_prompt)
reduce_chain = LLMChain(llm=llm, prompt=combine_prompt)
# TODO: document prompt
combine_document_chain = StuffDocumentsChain(
llm_chain=reduce_chain, document_variable_name="summaries"
llm_chain=reduce_chain, document_variable_name=combine_document_variable_name
)
return MapReduceDocumentsChain(
llm_chain=map_chain,
combine_document_chain=combine_document_chain,
document_variable_name="context",
document_variable_name=map_reduce_document_variable_name,
**kwargs,
)
def _load_refine_chain(llm: LLM) -> RefineDocumentsChain:
initial_chain = LLMChain(llm=llm, prompt=refine_prompt.DEFAULT_TEXT_QA_PROMPT)
refine_chain = LLMChain(llm=llm, prompt=refine_prompt.DEFAULT_REFINE_PROMPT)
def _load_refine_chain(
llm: LLM,
question_prompt: BasePromptTemplate = refine_prompts.DEFAULT_TEXT_QA_PROMPT,
refine_prompt: BasePromptTemplate = refine_prompts.DEFAULT_REFINE_PROMPT,
document_variable_name: str = "context_str",
initial_response_name: str = "existing_answer",
**kwargs: Any,
) -> RefineDocumentsChain:
initial_chain = LLMChain(llm=llm, prompt=question_prompt)
refine_chain = LLMChain(llm=llm, prompt=refine_prompt)
return RefineDocumentsChain(
initial_llm_chain=initial_chain,
refine_llm_chain=refine_chain,
document_variable_name="context_str",
initial_response_name="existing_answer",
document_variable_name=document_variable_name,
initial_response_name=initial_response_name,
**kwargs,
)
def load_qa_chain(llm: LLM, chain_type: str = "stuff") -> BaseCombineDocumentsChain:
def load_qa_chain(
llm: LLM, chain_type: str = "stuff", **kwargs: Any
) -> BaseCombineDocumentsChain:
"""Load question answering chain.
Args:
@ -54,7 +89,7 @@ def load_qa_chain(llm: LLM, chain_type: str = "stuff") -> BaseCombineDocumentsCh
Returns:
A chain to use for question answering.
"""
loader_mapping = {
loader_mapping: Mapping[str, LoadingCallable] = {
"stuff": _load_stuff_chain,
"map_reduce": _load_map_reduce_chain,
"refine": _load_refine_chain,
@ -64,4 +99,4 @@ def load_qa_chain(llm: LLM, chain_type: str = "stuff") -> BaseCombineDocumentsCh
f"Got unsupported chain type: {chain_type}. "
f"Should be one of {loader_mapping.keys()}"
)
return loader_mapping[chain_type](llm)
return loader_mapping[chain_type](llm, **kwargs)

@ -1,46 +1,79 @@
"""Load summarizing chains."""
from typing import Any, Mapping, Protocol
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.refine import RefineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.chains.summarize import map_reduce_prompt, refine_prompt, stuff_prompt
from langchain.chains.summarize import map_reduce_prompt, refine_prompts, stuff_prompt
from langchain.llms.base import LLM
from langchain.prompts.base import BasePromptTemplate
class LoadingCallable(Protocol):
"""Interface for loading the combine documents chain."""
def __call__(self, llm: LLM, **kwargs: Any) -> BaseCombineDocumentsChain:
"""Callable to load the combine documents chain."""
def _load_stuff_chain(llm: LLM) -> StuffDocumentsChain:
llm_chain = LLMChain(llm=llm, prompt=stuff_prompt.PROMPT)
def _load_stuff_chain(
llm: LLM,
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
document_variable_name: str = "text",
**kwargs: Any,
) -> StuffDocumentsChain:
llm_chain = LLMChain(llm=llm, prompt=prompt)
# TODO: document prompt
return StuffDocumentsChain(llm_chain=llm_chain, document_variable_name="text")
return StuffDocumentsChain(
llm_chain=llm_chain, document_variable_name=document_variable_name, **kwargs
)
def _load_map_reduce_chain(llm: LLM) -> MapReduceDocumentsChain:
map_chain = LLMChain(llm=llm, prompt=map_reduce_prompt.PROMPT)
reduce_chain = LLMChain(llm=llm, prompt=map_reduce_prompt.PROMPT)
def _load_map_reduce_chain(
llm: LLM,
map_prompt: BasePromptTemplate = map_reduce_prompt.PROMPT,
combine_prompt: BasePromptTemplate = map_reduce_prompt.PROMPT,
combine_document_variable_name: str = "text",
map_reduce_document_variable_name: str = "text",
**kwargs: Any,
) -> MapReduceDocumentsChain:
map_chain = LLMChain(llm=llm, prompt=map_prompt)
reduce_chain = LLMChain(llm=llm, prompt=combine_prompt)
# TODO: document prompt
combine_document_chain = StuffDocumentsChain(
llm_chain=reduce_chain, document_variable_name="text"
llm_chain=reduce_chain, document_variable_name=combine_document_variable_name
)
return MapReduceDocumentsChain(
llm_chain=map_chain,
combine_document_chain=combine_document_chain,
document_variable_name="text",
document_variable_name=map_reduce_document_variable_name,
**kwargs,
)
def _load_refine_chain(llm: LLM) -> RefineDocumentsChain:
initial_chain = LLMChain(llm=llm, prompt=refine_prompt.PROMPT)
refine_chain = LLMChain(llm=llm, prompt=refine_prompt.REFINE_PROMPT)
def _load_refine_chain(
llm: LLM,
question_prompt: BasePromptTemplate = refine_prompts.PROMPT,
refine_prompt: BasePromptTemplate = refine_prompts.REFINE_PROMPT,
document_variable_name: str = "text",
initial_response_name: str = "existing_answer",
**kwargs: Any,
) -> RefineDocumentsChain:
initial_chain = LLMChain(llm=llm, prompt=question_prompt)
refine_chain = LLMChain(llm=llm, prompt=refine_prompt)
return RefineDocumentsChain(
initial_llm_chain=initial_chain,
refine_llm_chain=refine_chain,
document_variable_name="text",
initial_response_name="existing_answer",
document_variable_name=document_variable_name,
initial_response_name=initial_response_name,
**kwargs,
)
def load_summarize_chain(
llm: LLM, chain_type: str = "stuff"
llm: LLM, chain_type: str = "stuff", **kwargs: Any
) -> BaseCombineDocumentsChain:
"""Load summarizing chain.
@ -52,7 +85,7 @@ def load_summarize_chain(
Returns:
A chain to use for summarizing.
"""
loader_mapping = {
loader_mapping: Mapping[str, LoadingCallable] = {
"stuff": _load_stuff_chain,
"map_reduce": _load_map_reduce_chain,
"refine": _load_refine_chain,
@ -62,4 +95,4 @@ def load_summarize_chain(
f"Got unsupported chain type: {chain_type}. "
f"Should be one of {loader_mapping.keys()}"
)
return loader_mapping[chain_type](llm)
return loader_mapping[chain_type](llm, **kwargs)

@ -27,25 +27,3 @@ def print_text(text: str, color: Optional[str] = None, end: str = "") -> None:
else:
color_str = _TEXT_COLOR_MAPPING[color]
print(f"\u001b[{color_str}m\033[1;3m{text}\u001b[0m", end=end)
class ChainedInput:
"""Class for working with input that is the result of chains."""
def __init__(self, text: str, verbose: bool = False):
"""Initialize with verbose flag and initial text."""
self._verbose = verbose
if self._verbose:
print_text(text, color=None)
self._input = text
def add(self, text: str, color: Optional[str] = None) -> None:
"""Add text to input, print if in verbose mode."""
if self._verbose:
print_text(text, color=color)
self._input += text
@property
def input(self) -> str:
"""Return the accumulated input."""
return self._input

@ -30,11 +30,11 @@ class OpenAI(LLM, BaseModel):
"""What sampling temperature to use."""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion."""
top_p: int = 1
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
frequency_penalty: int = 0
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: int = 0
presence_penalty: float = 0
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""

@ -0,0 +1,65 @@
"""BETA: everything in here is highly experimental, do not rely on."""
from typing import Any, Optional
from langchain.input import print_text
from langchain.schema import AgentAction
class BaseLogger:
"""Base logging interface."""
def log_agent_start(self, text: str, **kwargs: Any) -> None:
"""Log the start of an agent interaction."""
pass
def log_agent_end(self, text: str, **kwargs: Any) -> None:
"""Log the end of an agent interaction."""
pass
def log_agent_action(self, action: AgentAction, **kwargs: Any) -> None:
"""Log agent action decision."""
pass
def log_agent_observation(self, observation: str, **kwargs: Any) -> None:
"""Log agent observation."""
pass
def log_llm_inputs(self, inputs: dict, prompt: str, **kwargs: Any) -> None:
"""Log LLM inputs."""
pass
def log_llm_response(self, output: str, **kwargs: Any) -> None:
"""Log LLM response."""
pass
class StdOutLogger(BaseLogger):
"""Interface for printing things to stdout."""
def log_agent_start(self, text: str, **kwargs: Any) -> None:
"""Print the text to start the agent."""
print_text(text)
def log_agent_action(
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
) -> None:
"""Print the log of the action in a certain color."""
print_text(action.log, color=color)
def log_agent_observation(
self,
observation: str,
color: Optional[str] = None,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Print the observation in a special color."""
print_text(f"\n{observation_prefix}")
print_text(observation, color=color)
print_text(f"\n{llm_prefix}")
def log_llm_inputs(self, inputs: dict, prompt: str, **kwargs: Any) -> None:
"""Print the prompt in green."""
print("Prompt after formatting:")
print_text(prompt, color="green", end="\n")

@ -0,0 +1,11 @@
"""Common schema objects."""
from typing import NamedTuple
class AgentAction(NamedTuple):
"""Agent's action to take."""
tool: str
tool_input: str
log: str

@ -1,6 +1,6 @@
[tool.poetry]
name = "langchain"
version = "0.0.32"
version = "0.0.34"
description = "Building applications with LLMs through composability"
authors = []
license = "MIT"

@ -0,0 +1,43 @@
# flake8: noqa E501
"""Test LLMCheckerChain functionality."""
import pytest
from langchain.chains.llm_checker.base import LLMCheckerChain
from langchain.chains.llm_checker.prompt import (
_CHECK_ASSERTIONS_TEMPLATE,
_CREATE_DRAFT_ANSWER_TEMPLATE,
_LIST_ASSERTIONS_TEMPLATE,
_REVISED_ANSWER_TEMPLATE,
)
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.fixture
def fake_llm_checker_chain() -> LLMCheckerChain:
"""Fake LLMCheckerChain for testing."""
queries = {
_CREATE_DRAFT_ANSWER_TEMPLATE.format(
question="Which mammal lays the biggest eggs?"
): "I don't know which mammal layers the biggest eggs.",
_LIST_ASSERTIONS_TEMPLATE.format(
statement="I don't know which mammal layers the biggest eggs.",
): "1) I know that mammals lay eggs.\n2) I know that birds lay eggs.\n3) I know that birds are mammals.",
_CHECK_ASSERTIONS_TEMPLATE.format(
assertions="1) I know that mammals lay eggs.\n2) I know that birds lay eggs.\n3) I know that birds are mammals.",
): "1) I know that mammals lay eggs. TRUE\n2) I know that birds lay eggs. TRUE\n3) I know that birds are mammals. TRUE",
_REVISED_ANSWER_TEMPLATE.format(
checked_assertions="1) I know that mammals lay eggs. TRUE\n2) I know that birds lay eggs. TRUE\n3) I know that birds are mammals. TRUE",
question="Which mammal lays the biggest eggs?",
): "I still don't know.",
}
fake_llm = FakeLLM(queries=queries)
return LLMCheckerChain(llm=fake_llm, input_key="q", output_key="a")
def test_simple_question(fake_llm_checker_chain: LLMCheckerChain) -> None:
"""Test simple question that should not need python."""
question = "Which mammal lays the biggest eggs?"
output = fake_llm_checker_chain.run(question)
assert output == "I still don't know."

@ -3,7 +3,8 @@
import sys
from io import StringIO
from langchain.input import ChainedInput, get_color_mapping
from langchain.agents.input import ChainedInput
from langchain.input import get_color_mapping
def test_chained_input_not_verbose() -> None:
@ -18,11 +19,11 @@ def test_chained_input_not_verbose() -> None:
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
chained_input.add("bar")
chained_input.add_observation("bar", "1", "2", None)
sys.stdout = old_stdout
output = mystdout.getvalue()
assert output == ""
assert chained_input.input == "foobar"
assert chained_input.input == "foo\n1bar\n2"
def test_chained_input_verbose() -> None:
@ -37,19 +38,19 @@ def test_chained_input_verbose() -> None:
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
chained_input.add("bar")
chained_input.add_observation("bar", "1", "2", None)
sys.stdout = old_stdout
output = mystdout.getvalue()
assert output == "bar"
assert chained_input.input == "foobar"
assert output == "\n1bar\n2"
assert chained_input.input == "foo\n1bar\n2"
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
chained_input.add("baz", color="blue")
chained_input.add_observation("baz", "3", "4", "blue")
sys.stdout = old_stdout
output = mystdout.getvalue()
assert output == "\x1b[36;1m\x1b[1;3mbaz\x1b[0m"
assert chained_input.input == "foobarbaz"
assert output == "\n3\x1b[36;1m\x1b[1;3mbaz\x1b[0m\n4"
assert chained_input.input == "foo\n1bar\n2\n3baz\n4"
def test_get_color_mapping() -> None:

Loading…
Cancel
Save