forked from Archives/langchain
stash
commit
16201f1d77
@ -0,0 +1,971 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b253f4d5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# ChatGPT Clone\n",
|
||||
"\n",
|
||||
"This chain replicates ChatGPT by combining (1) a specific prompt, and (2) the concept of memory.\n",
|
||||
"\n",
|
||||
"Shows off the example as in https://www.engraved.blog/building-a-virtual-machine-inside/"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 38,
|
||||
"id": "a99acd89",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Human: I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply wiht the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ pwd\n",
|
||||
"/\n",
|
||||
"```\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate\n",
|
||||
"from langchain.chains.conversation.memory import ConversationalBufferWindowMemory\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"template = \"\"\"Assistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"{history}\n",
|
||||
"Human: {human_input}\n",
|
||||
"Assistant:\"\"\"\n",
|
||||
"\n",
|
||||
"prompt = PromptTemplate(\n",
|
||||
" input_variables=[\"history\", \"human_input\"], \n",
|
||||
" template=template\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"chatgpt_chain = LLMChain(\n",
|
||||
" llm=OpenAI(temperature=0), \n",
|
||||
" prompt=prompt, \n",
|
||||
" verbose=True, \n",
|
||||
" memory=ConversationalBufferWindowMemory(k=2),\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"output = chatgpt_chain.predict(human_input=\"I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply wiht the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 39,
|
||||
"id": "4ef711d6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Human: I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply wiht the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\n",
|
||||
"AI: \n",
|
||||
"```\n",
|
||||
"$ pwd\n",
|
||||
"/\n",
|
||||
"```\n",
|
||||
"Human: ls ~\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ ls ~\n",
|
||||
"Desktop Documents Downloads Music Pictures Public Templates Videos\n",
|
||||
"```\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = chatgpt_chain.predict(human_input=\"ls ~\")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"id": "a5d6dac2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Human: I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply wiht the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\n",
|
||||
"AI: \n",
|
||||
"```\n",
|
||||
"$ pwd\n",
|
||||
"/\n",
|
||||
"```\n",
|
||||
"Human: ls ~\n",
|
||||
"AI: \n",
|
||||
"```\n",
|
||||
"$ ls ~\n",
|
||||
"Desktop Documents Downloads Music Pictures Public Templates Videos\n",
|
||||
"```\n",
|
||||
"Human: cd ~\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
|
||||
" \n",
|
||||
"```\n",
|
||||
"$ cd ~\n",
|
||||
"$ pwd\n",
|
||||
"/home/user\n",
|
||||
"```\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = chatgpt_chain.predict(human_input=\"cd ~\")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 41,
|
||||
"id": "b9283077",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Human: ls ~\n",
|
||||
"AI: \n",
|
||||
"```\n",
|
||||
"$ ls ~\n",
|
||||
"Desktop Documents Downloads Music Pictures Public Templates Videos\n",
|
||||
"```\n",
|
||||
"Human: cd ~\n",
|
||||
"AI: \n",
|
||||
"```\n",
|
||||
"$ cd ~\n",
|
||||
"$ pwd\n",
|
||||
"/home/user\n",
|
||||
"```\n",
|
||||
"Human: {Please make a file jokes.txt inside and put some jokes inside}\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ touch jokes.txt\n",
|
||||
"$ echo \"Why did the chicken cross the road? To get to the other side!\" >> jokes.txt\n",
|
||||
"$ echo \"What did the fish say when it hit the wall? Dam!\" >> jokes.txt\n",
|
||||
"$ echo \"Why did the scarecrow win the Nobel Prize? Because he was outstanding in his field!\" >> jokes.txt\n",
|
||||
"```\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = chatgpt_chain.predict(human_input=\"{Please make a file jokes.txt inside and put some jokes inside}\")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 42,
|
||||
"id": "570e785e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Human: cd ~\n",
|
||||
"AI: \n",
|
||||
"```\n",
|
||||
"$ cd ~\n",
|
||||
"$ pwd\n",
|
||||
"/home/user\n",
|
||||
"```\n",
|
||||
"Human: {Please make a file jokes.txt inside and put some jokes inside}\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ touch jokes.txt\n",
|
||||
"$ echo \"Why did the chicken cross the road? To get to the other side!\" >> jokes.txt\n",
|
||||
"$ echo \"What did the fish say when it hit the wall? Dam!\" >> jokes.txt\n",
|
||||
"$ echo \"Why did the scarecrow win the Nobel Prize? Because he was outstanding in his field!\" >> jokes.txt\n",
|
||||
"```\n",
|
||||
"Human: echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py\n",
|
||||
"$ python3 run.py\n",
|
||||
"Result: 33\n",
|
||||
"```\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = chatgpt_chain.predict(human_input=\"\"\"echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\"\"\")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"id": "cd0a23d9",
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Human: {Please make a file jokes.txt inside and put some jokes inside}\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ touch jokes.txt\n",
|
||||
"$ echo \"Why did the chicken cross the road? To get to the other side!\" >> jokes.txt\n",
|
||||
"$ echo \"What did the fish say when it hit the wall? Dam!\" >> jokes.txt\n",
|
||||
"$ echo \"Why did the scarecrow win the Nobel Prize? Because he was outstanding in his field!\" >> jokes.txt\n",
|
||||
"```\n",
|
||||
"Human: echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py\n",
|
||||
"$ python3 run.py\n",
|
||||
"Result: 33\n",
|
||||
"```\n",
|
||||
"Human: echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py\n",
|
||||
"$ python3 run.py\n",
|
||||
"[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]\n",
|
||||
"```\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = chatgpt_chain.predict(human_input=\"\"\"echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\"\"\")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 44,
|
||||
"id": "90db6eb2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Human: echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py\n",
|
||||
"$ python3 run.py\n",
|
||||
"Result: 33\n",
|
||||
"```\n",
|
||||
"Human: echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py\n",
|
||||
"$ python3 run.py\n",
|
||||
"[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]\n",
|
||||
"```\n",
|
||||
"Human: echo -e \"echo 'Hello from Docker\" > entrypoint.sh && echo -e \"FROM ubuntu:20.04\n",
|
||||
"COPY entrypoint.sh entrypoint.sh\n",
|
||||
"ENTRYPOINT [\"/bin/sh\",\"entrypoint.sh\"]\">Dockerfile && docker build . -t my_docker_image && docker run -t my_docker_image\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ echo -e \"echo 'Hello from Docker\" > entrypoint.sh\n",
|
||||
"$ echo -e \"FROM ubuntu:20.04\n",
|
||||
"COPY entrypoint.sh entrypoint.sh\n",
|
||||
"ENTRYPOINT [\"/bin/sh\",\"entrypoint.sh\"]\">Dockerfile\n",
|
||||
"$ docker build . -t my_docker_image\n",
|
||||
"$ docker run -t my_docker_image\n",
|
||||
"Hello from Docker\n",
|
||||
"```\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"docker_input = \"\"\"echo -e \"echo 'Hello from Docker\" > entrypoint.sh && echo -e \"FROM ubuntu:20.04\\nCOPY entrypoint.sh entrypoint.sh\\nENTRYPOINT [\\\"/bin/sh\\\",\\\"entrypoint.sh\\\"]\">Dockerfile && docker build . -t my_docker_image && docker run -t my_docker_image\"\"\"\n",
|
||||
"output = chatgpt_chain.predict(human_input=docker_input)\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"id": "c3806f89",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Human: echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py\n",
|
||||
"$ python3 run.py\n",
|
||||
"[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]\n",
|
||||
"```\n",
|
||||
"Human: echo -e \"echo 'Hello from Docker\" > entrypoint.sh && echo -e \"FROM ubuntu:20.04\n",
|
||||
"COPY entrypoint.sh entrypoint.sh\n",
|
||||
"ENTRYPOINT [\"/bin/sh\",\"entrypoint.sh\"]\">Dockerfile && docker build . -t my_docker_image && docker run -t my_docker_image\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ echo -e \"echo 'Hello from Docker\" > entrypoint.sh\n",
|
||||
"$ echo -e \"FROM ubuntu:20.04\n",
|
||||
"COPY entrypoint.sh entrypoint.sh\n",
|
||||
"ENTRYPOINT [\"/bin/sh\",\"entrypoint.sh\"]\">Dockerfile\n",
|
||||
"$ docker build . -t my_docker_image\n",
|
||||
"$ docker run -t my_docker_image\n",
|
||||
"Hello from Docker\n",
|
||||
"```\n",
|
||||
"Human: nvidia-smi\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ nvidia-smi\n",
|
||||
"Sat May 15 21:45:02 2021 \n",
|
||||
"+-----------------------------------------------------------------------------+\n",
|
||||
"| NVIDIA-SMI 460.32.03 Driver Version: 460.32.03 CUDA Version: 11.2 |\n",
|
||||
"|-------------------------------+----------------------+----------------------+\n",
|
||||
"| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n",
|
||||
"| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n",
|
||||
"|===============================+======================+======================|\n",
|
||||
"| 0 GeForce GTX 108... Off | 00000000:01:00.0 Off | N/A |\n",
|
||||
"| N/A 45C P0 N/A / N/A | 511MiB / 10206MiB | 0% Default |\n",
|
||||
"+-------------------------------+----------------------+----------------------+\n",
|
||||
" \n",
|
||||
"+-----------------------------------------------------------------------------+\n",
|
||||
"| Processes: GPU Memory |\n",
|
||||
"| GPU PID Type Process name Usage |\n",
|
||||
"|=============================================================================|\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = chatgpt_chain.predict(human_input=\"nvidia-smi\")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
"id": "f508f597",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Human: echo -e \"echo 'Hello from Docker\" > entrypoint.sh && echo -e \"FROM ubuntu:20.04\n",
|
||||
"COPY entrypoint.sh entrypoint.sh\n",
|
||||
"ENTRYPOINT [\"/bin/sh\",\"entrypoint.sh\"]\">Dockerfile && docker build . -t my_docker_image && docker run -t my_docker_image\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ echo -e \"echo 'Hello from Docker\" > entrypoint.sh\n",
|
||||
"$ echo -e \"FROM ubuntu:20.04\n",
|
||||
"COPY entrypoint.sh entrypoint.sh\n",
|
||||
"ENTRYPOINT [\"/bin/sh\",\"entrypoint.sh\"]\">Dockerfile\n",
|
||||
"$ docker build . -t my_docker_image\n",
|
||||
"$ docker run -t my_docker_image\n",
|
||||
"Hello from Docker\n",
|
||||
"```\n",
|
||||
"Human: nvidia-smi\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ nvidia-smi\n",
|
||||
"Sat May 15 21:45:02 2021 \n",
|
||||
"+-----------------------------------------------------------------------------+\n",
|
||||
"| NVIDIA-SMI 460.32.03 Driver Version: 460.32.03 CUDA Version: 11.2 |\n",
|
||||
"|-------------------------------+----------------------+----------------------+\n",
|
||||
"| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n",
|
||||
"| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n",
|
||||
"|===============================+======================+======================|\n",
|
||||
"| 0 GeForce GTX 108... Off | 00000000:01:00.0 Off | N/A |\n",
|
||||
"| N/A 45C P0 N/A / N/A | 511MiB / 10206MiB | 0% Default |\n",
|
||||
"+-------------------------------+----------------------+----------------------+\n",
|
||||
" \n",
|
||||
"+-----------------------------------------------------------------------------+\n",
|
||||
"| Processes: GPU Memory |\n",
|
||||
"| GPU PID Type Process name Usage |\n",
|
||||
"|=============================================================================|\n",
|
||||
"\n",
|
||||
"Human: ping bbc.com\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ ping bbc.com\n",
|
||||
"PING bbc.com (151.101.65.81): 56 data bytes\n",
|
||||
"64 bytes from 151.101.65.81: icmp_seq=0 ttl=53 time=14.945 ms\n",
|
||||
"64 bytes from 151.101.65.81: icmp_seq=1 ttl=53 time=14.945 ms\n",
|
||||
"64 bytes from 151.101.65.81: icmp_seq=2 ttl=53 time=14.945 ms\n",
|
||||
"\n",
|
||||
"--- bbc.com ping statistics ---\n",
|
||||
"3 packets transmitted, 3 packets received, 0.0% packet loss\n",
|
||||
"round-trip min/avg/max/stddev = 14.945/14.945/14.945/0.000 ms\n",
|
||||
"```\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = chatgpt_chain.predict(human_input=\"ping bbc.com\")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 47,
|
||||
"id": "cbd607f4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Human: nvidia-smi\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ nvidia-smi\n",
|
||||
"Sat May 15 21:45:02 2021 \n",
|
||||
"+-----------------------------------------------------------------------------+\n",
|
||||
"| NVIDIA-SMI 460.32.03 Driver Version: 460.32.03 CUDA Version: 11.2 |\n",
|
||||
"|-------------------------------+----------------------+----------------------+\n",
|
||||
"| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n",
|
||||
"| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n",
|
||||
"|===============================+======================+======================|\n",
|
||||
"| 0 GeForce GTX 108... Off | 00000000:01:00.0 Off | N/A |\n",
|
||||
"| N/A 45C P0 N/A / N/A | 511MiB / 10206MiB | 0% Default |\n",
|
||||
"+-------------------------------+----------------------+----------------------+\n",
|
||||
" \n",
|
||||
"+-----------------------------------------------------------------------------+\n",
|
||||
"| Processes: GPU Memory |\n",
|
||||
"| GPU PID Type Process name Usage |\n",
|
||||
"|=============================================================================|\n",
|
||||
"\n",
|
||||
"Human: ping bbc.com\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ ping bbc.com\n",
|
||||
"PING bbc.com (151.101.65.81): 56 data bytes\n",
|
||||
"64 bytes from 151.101.65.81: icmp_seq=0 ttl=53 time=14.945 ms\n",
|
||||
"64 bytes from 151.101.65.81: icmp_seq=1 ttl=53 time=14.945 ms\n",
|
||||
"64 bytes from 151.101.65.81: icmp_seq=2 ttl=53 time=14.945 ms\n",
|
||||
"\n",
|
||||
"--- bbc.com ping statistics ---\n",
|
||||
"3 packets transmitted, 3 packets received, 0.0% packet loss\n",
|
||||
"round-trip min/avg/max/stddev = 14.945/14.945/14.945/0.000 ms\n",
|
||||
"```\n",
|
||||
"Human: curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\n",
|
||||
"1.8.1\n",
|
||||
"```\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = chatgpt_chain.predict(human_input=\"\"\"curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\"\"\")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 48,
|
||||
"id": "d33e0e28",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Human: ping bbc.com\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ ping bbc.com\n",
|
||||
"PING bbc.com (151.101.65.81): 56 data bytes\n",
|
||||
"64 bytes from 151.101.65.81: icmp_seq=0 ttl=53 time=14.945 ms\n",
|
||||
"64 bytes from 151.101.65.81: icmp_seq=1 ttl=53 time=14.945 ms\n",
|
||||
"64 bytes from 151.101.65.81: icmp_seq=2 ttl=53 time=14.945 ms\n",
|
||||
"\n",
|
||||
"--- bbc.com ping statistics ---\n",
|
||||
"3 packets transmitted, 3 packets received, 0.0% packet loss\n",
|
||||
"round-trip min/avg/max/stddev = 14.945/14.945/14.945/0.000 ms\n",
|
||||
"```\n",
|
||||
"Human: curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\n",
|
||||
"1.8.1\n",
|
||||
"```\n",
|
||||
"Human: lynx https://www.deepmind.com/careers\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ lynx https://www.deepmind.com/careers\n",
|
||||
"DeepMind Careers\n",
|
||||
"\n",
|
||||
"Welcome to DeepMind Careers. We are a world-leading artificial intelligence research and development company, and we are looking for talented people to join our team.\n",
|
||||
"\n",
|
||||
"We offer a range of exciting opportunities in research, engineering, product, and operations. Our mission is to solve intelligence and make it useful, and we are looking for people who share our passion for pushing the boundaries of AI.\n",
|
||||
"\n",
|
||||
"Explore our current openings and apply today. We look forward to hearing from you.\n",
|
||||
"```\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = chatgpt_chain.predict(human_input=\"lynx https://www.deepmind.com/careers\")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 49,
|
||||
"id": "57c2f113",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Human: curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\n",
|
||||
"1.8.1\n",
|
||||
"```\n",
|
||||
"Human: lynx https://www.deepmind.com/careers\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ lynx https://www.deepmind.com/careers\n",
|
||||
"DeepMind Careers\n",
|
||||
"\n",
|
||||
"Welcome to DeepMind Careers. We are a world-leading artificial intelligence research and development company, and we are looking for talented people to join our team.\n",
|
||||
"\n",
|
||||
"We offer a range of exciting opportunities in research, engineering, product, and operations. Our mission is to solve intelligence and make it useful, and we are looking for people who share our passion for pushing the boundaries of AI.\n",
|
||||
"\n",
|
||||
"Explore our current openings and apply today. We look forward to hearing from you.\n",
|
||||
"```\n",
|
||||
"Human: curl https://chat.openai.com/chat\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ curl https://chat.openai.com/chat\n",
|
||||
"<html>\n",
|
||||
" <head>\n",
|
||||
" <title>OpenAI Chat</title>\n",
|
||||
" </head>\n",
|
||||
" <body>\n",
|
||||
" <h1>Welcome to OpenAI Chat!</h1>\n",
|
||||
" <p>\n",
|
||||
" OpenAI Chat is a natural language processing platform that allows you to interact with OpenAI's AI models in a conversational way.\n",
|
||||
" </p>\n",
|
||||
" <p>\n",
|
||||
" To get started, type a message in the box below and press enter.\n",
|
||||
" </p>\n",
|
||||
" </body>\n",
|
||||
"</html>\n",
|
||||
"```\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = chatgpt_chain.predict(human_input=\"curl https://chat.openai.com/chat\")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 50,
|
||||
"id": "babadc78",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Human: lynx https://www.deepmind.com/careers\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ lynx https://www.deepmind.com/careers\n",
|
||||
"DeepMind Careers\n",
|
||||
"\n",
|
||||
"Welcome to DeepMind Careers. We are a world-leading artificial intelligence research and development company, and we are looking for talented people to join our team.\n",
|
||||
"\n",
|
||||
"We offer a range of exciting opportunities in research, engineering, product, and operations. Our mission is to solve intelligence and make it useful, and we are looking for people who share our passion for pushing the boundaries of AI.\n",
|
||||
"\n",
|
||||
"Explore our current openings and apply today. We look forward to hearing from you.\n",
|
||||
"```\n",
|
||||
"Human: curl https://chat.openai.com/chat\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ curl https://chat.openai.com/chat\n",
|
||||
"<html>\n",
|
||||
" <head>\n",
|
||||
" <title>OpenAI Chat</title>\n",
|
||||
" </head>\n",
|
||||
" <body>\n",
|
||||
" <h1>Welcome to OpenAI Chat!</h1>\n",
|
||||
" <p>\n",
|
||||
" OpenAI Chat is a natural language processing platform that allows you to interact with OpenAI's AI models in a conversational way.\n",
|
||||
" </p>\n",
|
||||
" <p>\n",
|
||||
" To get started, type a message in the box below and press enter.\n",
|
||||
" </p>\n",
|
||||
" </body>\n",
|
||||
"</html>\n",
|
||||
"```\n",
|
||||
"Human: curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"What is artificial intelligence?\"}' https://chat.openai.com/chat\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"What is artificial intelligence?\"}' https://chat.openai.com/chat\n",
|
||||
"\n",
|
||||
"{\n",
|
||||
" \"response\": \"Artificial intelligence (AI) is the simulation of human intelligence processes by machines, especially computer systems. These processes include learning (the acquisition of information and rules for using the information), reasoning (using the rules to reach approximate or definite conclusions) and self-correction. AI is used to develop computer systems that can think and act like humans.\"\n",
|
||||
"}\n",
|
||||
"```\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = chatgpt_chain.predict(human_input=\"\"\"curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"What is artificial intelligence?\"}' https://chat.openai.com/chat\"\"\")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 51,
|
||||
"id": "0954792a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n",
|
||||
"\n",
|
||||
"Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n",
|
||||
"\n",
|
||||
"Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n",
|
||||
"\n",
|
||||
"Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n",
|
||||
"\n",
|
||||
"Human: curl https://chat.openai.com/chat\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ curl https://chat.openai.com/chat\n",
|
||||
"<html>\n",
|
||||
" <head>\n",
|
||||
" <title>OpenAI Chat</title>\n",
|
||||
" </head>\n",
|
||||
" <body>\n",
|
||||
" <h1>Welcome to OpenAI Chat!</h1>\n",
|
||||
" <p>\n",
|
||||
" OpenAI Chat is a natural language processing platform that allows you to interact with OpenAI's AI models in a conversational way.\n",
|
||||
" </p>\n",
|
||||
" <p>\n",
|
||||
" To get started, type a message in the box below and press enter.\n",
|
||||
" </p>\n",
|
||||
" </body>\n",
|
||||
"</html>\n",
|
||||
"```\n",
|
||||
"Human: curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"What is artificial intelligence?\"}' https://chat.openai.com/chat\n",
|
||||
"AI: \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"What is artificial intelligence?\"}' https://chat.openai.com/chat\n",
|
||||
"\n",
|
||||
"{\n",
|
||||
" \"response\": \"Artificial intelligence (AI) is the simulation of human intelligence processes by machines, especially computer systems. These processes include learning (the acquisition of information and rules for using the information), reasoning (using the rules to reach approximate or definite conclusions) and self-correction. AI is used to develop computer systems that can think and act like humans.\"\n",
|
||||
"}\n",
|
||||
"```\n",
|
||||
"Human: curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply wiht the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\"}' https://chat.openai.com/chat\n",
|
||||
"Assistant:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished LLMChain chain.\u001b[0m\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"$ curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply wiht the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\"}' https://chat.openai.com/chat\n",
|
||||
"\n",
|
||||
"{\n",
|
||||
" \"response\": \"```\n",
|
||||
"/home/user\n",
|
||||
"```\"\n",
|
||||
"}\n",
|
||||
"```\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"output = chatgpt_chain.predict(human_input=\"\"\"curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply wiht the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\"}' https://chat.openai.com/chat\"\"\")\n",
|
||||
"print(output)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e68a087e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -0,0 +1,97 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LLMCheckerChain\n",
|
||||
"This notebook showcases how to use LLMCheckerChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new LLMCheckerChain chain...\u001b[0m\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new SequentialChain chain...\u001b[0m\n",
|
||||
"\u001b[1mChain 0\u001b[0m:\n",
|
||||
"{'statement': '\\nThe largest mammal that lays eggs is the platypus.'}\n",
|
||||
"\n",
|
||||
"\u001b[1mChain 1\u001b[0m:\n",
|
||||
"{'assertions': '\\n• The largest mammal is the platypus.\\n• The platypus lays eggs.\\n• There is no larger mammal than the platypus that lays eggs.'}\n",
|
||||
"\n",
|
||||
"\u001b[1mChain 2\u001b[0m:\n",
|
||||
"{'checked_assertions': '\\n1. The largest mammal is the platypus. False. The blue whale is the largest mammal.\\n\\n2. The platypus lays eggs. True. The Platypus is one of only two mammals that lay eggs.\\n\\n3. There is no larger mammal than the platypus that lays eggs. False. The echidna is another mammal that lays eggs and is larger than the platypus.'}\n",
|
||||
"\n",
|
||||
"\u001b[1mChain 3\u001b[0m:\n",
|
||||
"{'revised_statement': ' The echidna is the type of mammal that lays the biggest eggs.'}\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished SequentialChain chain.\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished LLMCheckerChain chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' The echidna is the type of mammal that lays the biggest eggs.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain.chains import LLMCheckerChain\n",
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0.7)\n",
|
||||
"\n",
|
||||
"text = \"What type of mammal lays the biggest eggs?\"\n",
|
||||
"\n",
|
||||
"checker_chain = LLMCheckerChain(llm=llm, verbose=True)\n",
|
||||
"\n",
|
||||
"checker_chain.run(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
@ -0,0 +1,435 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "b83e61ed",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Moderation\n",
|
||||
"This notebook walks through examples of how to use a moderation chain, and several common ways for doing so. Moderation chains are useful for detecting text that could be hateful, violent, etc. This can be useful to apply on both user input, but also on the output of a Language Model. Some API providers, like OpenAI, [specifically prohibit](https://beta.openai.com/docs/usage-policies/use-case-policy) you, or your end users, from generating some types of harmful content. To comply with this (and to just generally prevent your application from being harmful) you may often want to append a moderation chain to any LLMChains, in order to make sure any output the LLM generates is not harmful.\n",
|
||||
"\n",
|
||||
"If the content passed into the moderation chain is harmful, there is not one best way to handle it, it probably depends on your application. Sometimes you may want to throw an error in the Chain (and have your application handle that). Other times, you may want to return something to the user explaining that the text was harmful. There could even be other ways to handle it! We will cover all these ways in this notebook.\n",
|
||||
"\n",
|
||||
"In this notebook, we will show:\n",
|
||||
"\n",
|
||||
"1. How to run any piece of text through a moderation chain.\n",
|
||||
"2. How to append a Moderation chain to a LLMChain."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "b7aa1ff2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.llms import OpenAI\n",
|
||||
"from langchain.chains import OpenAIModerationChain, SequentialChain, LLMChain, SimpleSequentialChain\n",
|
||||
"from langchain.prompts import PromptTemplate"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c26d5be6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## How to use the moderation chain\n",
|
||||
"\n",
|
||||
"Here's an example of using the moderation chain with default settings (will return a string explaining stuff was flagged)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "fd0fc85c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"moderation_chain = OpenAIModerationChain()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "3fa47dd7",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'This is okay'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"moderation_chain.run(\"This is okay\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "37bfad73",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Text was found that violates OpenAI's content policy.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"moderation_chain.run(\"I will kill you\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "196820ab",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here's an example of using the moderation chain to throw an error."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "b29c1150",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"moderation_chain_error = OpenAIModerationChain(error=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "f9ab64d9",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'This is okay'"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"moderation_chain_error.run(\"This is okay\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "954f3da2",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "ValueError",
|
||||
"evalue": "Text was found that violates OpenAI's content policy.",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
|
||||
"Cell \u001b[0;32mIn[8], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mmoderation_chain_error\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mI will kill you\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
|
||||
"File \u001b[0;32m~/workplace/third_party/langchain/langchain/chains/base.py:114\u001b[0m, in \u001b[0;36mChain.run\u001b[0;34m(self, text)\u001b[0m\n\u001b[1;32m 109\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39moutput_keys) \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m1\u001b[39m:\n\u001b[1;32m 110\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[1;32m 111\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m`run` not supported when there is not exactly \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 112\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mone output key, got \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39moutput_keys\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 113\u001b[0m )\n\u001b[0;32m--> 114\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m{\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minput_keys\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m:\u001b[49m\u001b[43m \u001b[49m\u001b[43mtext\u001b[49m\u001b[43m}\u001b[49m\u001b[43m)\u001b[49m[\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39moutput_keys[\u001b[38;5;241m0\u001b[39m]]\n",
|
||||
"File \u001b[0;32m~/workplace/third_party/langchain/langchain/chains/base.py:87\u001b[0m, in \u001b[0;36mChain.__call__\u001b[0;34m(self, inputs, return_only_outputs)\u001b[0m\n\u001b[1;32m 83\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mverbose:\n\u001b[1;32m 84\u001b[0m \u001b[38;5;28mprint\u001b[39m(\n\u001b[1;32m 85\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\033\u001b[39;00m\u001b[38;5;124m[1m> Entering new \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m chain...\u001b[39m\u001b[38;5;130;01m\\033\u001b[39;00m\u001b[38;5;124m[0m\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 86\u001b[0m )\n\u001b[0;32m---> 87\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 88\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mverbose:\n\u001b[1;32m 89\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\033\u001b[39;00m\u001b[38;5;124m[1m> Finished \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m chain.\u001b[39m\u001b[38;5;130;01m\\033\u001b[39;00m\u001b[38;5;124m[0m\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
|
||||
"File \u001b[0;32m~/workplace/third_party/langchain/langchain/chains/moderation.py:79\u001b[0m, in \u001b[0;36mOpenAIModerationChain._call\u001b[0;34m(self, inputs)\u001b[0m\n\u001b[1;32m 77\u001b[0m text \u001b[38;5;241m=\u001b[39m inputs[\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minput_key]\n\u001b[1;32m 78\u001b[0m results \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mclient\u001b[38;5;241m.\u001b[39mcreate(text)\n\u001b[0;32m---> 79\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_moderate\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtext\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mresults\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mresults\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;241;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 80\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m {\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39moutput_key: output}\n",
|
||||
"File \u001b[0;32m~/workplace/third_party/langchain/langchain/chains/moderation.py:71\u001b[0m, in \u001b[0;36mOpenAIModerationChain._moderate\u001b[0;34m(self, text, results)\u001b[0m\n\u001b[1;32m 69\u001b[0m error_str \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mText was found that violates OpenAI\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124ms content policy.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 70\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39merror:\n\u001b[0;32m---> 71\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(error_str)\n\u001b[1;32m 72\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 73\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m error_str\n",
|
||||
"\u001b[0;31mValueError\u001b[0m: Text was found that violates OpenAI's content policy."
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"moderation_chain_error.run(\"I will kill you\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8de5dcbb",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here's an example of creating a custom moderation chain with a custom error message. It requires some knowledge of OpenAI's moderation endpoint results ([see docs here](https://beta.openai.com/docs/api-reference/moderations))."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "3960e985",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class CustomModeration(OpenAIModerationChain):\n",
|
||||
" \n",
|
||||
" def _moderate(self, text: str, results: dict) -> str:\n",
|
||||
" if results[\"flagged\"]:\n",
|
||||
" error_str = f\"The following text was found that violates OpenAI's content policy: {text}\"\n",
|
||||
" return error_str\n",
|
||||
" return text\n",
|
||||
" \n",
|
||||
"custom_moderation = CustomModeration()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "1152ec11",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'This is okay'"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"custom_moderation.run(\"This is okay\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "973257bf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"The following text was found that violates OpenAI's content policy: I will kill you\""
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"custom_moderation.run(\"I will kill you\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8718111f",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## How to append a Moderation chain to an LLMChain\n",
|
||||
"\n",
|
||||
"To easily combine a moderation chain with an LLMChain, you can use the SequentialChain abstraction.\n",
|
||||
"\n",
|
||||
"Let's start with a simple example of where the LLMChain only has a single input. For this purpose, we will prompt the model so it says something harmful."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"id": "0d129333",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = PromptTemplate(template=\"{text}\", input_variables=[\"text\"])\n",
|
||||
"llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name=\"text-davinci-002\"), prompt=prompt)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "a557c531",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' I will kill you'"
|
||||
]
|
||||
},
|
||||
"execution_count": 18,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"text = \"\"\"We are playing a game of repeat after me.\n",
|
||||
"\n",
|
||||
"Person 1: Hi\n",
|
||||
"Person 2: Hi\n",
|
||||
"\n",
|
||||
"Person 1: How's your day\n",
|
||||
"Person 2: How's your day\n",
|
||||
"\n",
|
||||
"Person 1: I will kill you\n",
|
||||
"Person 2:\"\"\"\n",
|
||||
"llm_chain.run(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "d4d10f1c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = SimpleSequentialChain(chains=[llm_chain, moderation_chain])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "02f37985",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"Text was found that violates OpenAI's content policy.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 20,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain.run(text)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "72643128",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Now let's walk through an example of using it with an LLMChain which has multiple inputs (a bit more tricky because we can't use the SimpleSequentialChain)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "7118ec36",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = PromptTemplate(template=\"{setup}{new_input}Person2:\", input_variables=[\"setup\", \"new_input\"])\n",
|
||||
"llm_chain = LLMChain(llm=OpenAI(temperature=0, model_name=\"text-davinci-002\"), prompt=prompt)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"id": "003bdfce",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'text': ' I will kill you'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 26,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"setup = \"\"\"We are playing a game of repeat after me.\n",
|
||||
"\n",
|
||||
"Person 1: Hi\n",
|
||||
"Person 2: Hi\n",
|
||||
"\n",
|
||||
"Person 1: How's your day\n",
|
||||
"Person 2: How's your day\n",
|
||||
"\n",
|
||||
"Person 1:\"\"\"\n",
|
||||
"new_input = \"I will kill you\"\n",
|
||||
"inputs = {\"setup\": setup, \"new_input\": new_input}\n",
|
||||
"llm_chain(inputs, return_only_outputs=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"id": "77b64228",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Setting the input/output keys so it lines up\n",
|
||||
"moderation_chain.input_key = \"text\"\n",
|
||||
"moderation_chain.output_key = \"sanitized_text\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"id": "998a95be",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"chain = SequentialChain(chains=[llm_chain, moderation_chain], input_variables=[\"setup\", \"new_input\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 33,
|
||||
"id": "9c97a136",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'sanitized_text': \"Text was found that violates OpenAI's content policy.\"}"
|
||||
]
|
||||
},
|
||||
"execution_count": 33,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"chain(inputs, return_only_outputs=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "ddc90e15",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
@ -1,333 +1,333 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d31df93e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Memory\n",
|
||||
"So far, all the chains and agents we've gone through have been stateless. But often, you may want a chain or agent to have some concept of \"memory\" so that it may remember information about its previous interactions. The most clear and simple example of this is when designing a chatbot - you want it to remember previous messages so it can use context from that to have a better conversation. This would be a type of \"short-term memory\". On the more complex side, you could imagine a chain/agent remembering key pieces of information over time - this would be a form of \"long-term memory\".\n",
|
||||
"\n",
|
||||
"LangChain provides several specially created chains just for this purpose. This notebook walk throughs using one of those chains (the `ConversationChain`) with two different types of memory."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d051c1da",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### ConversationChain with default memory\n",
|
||||
"By default, the `ConversationChain` has a simple type of memory which remebers all previes inputs/outputs and adds them to the context that is passed. Let's take a look at using this chain (setting `verbose=True` so we can see the prompt)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "ae046bff",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
"cells": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
"cell_type": "markdown",
|
||||
"id": "d31df93e",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Memory\n",
|
||||
"So far, all the chains and agents we've gone through have been stateless. But often, you may want a chain or agent to have some concept of \"memory\" so that it may remember information about its previous interactions. The clearest and simple example of this is when designing a chatbot - you want it to remember previous messages so it can use context from that to have a better conversation. This would be a type of \"short-term memory\". On the more complex side, you could imagine a chain/agent remembering key pieces of information over time - this would be a form of \"long-term memory\". For more concrete ideas on the latter, see this [awesome paper](https://memprompt.com/).\n",
|
||||
"\n",
|
||||
"LangChain provides several specially created chains just for this purpose. This notebook walks through using one of those chains (the `ConversationChain`) with two different types of memory."
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Hello! How are you today?'"
|
||||
"cell_type": "markdown",
|
||||
"id": "d051c1da",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### ConversationChain with default memory\n",
|
||||
"By default, the `ConversationChain` has a simple type of memory that remembers all previous inputs/outputs and adds them to the context that is passed. Let's take a look at using this chain (setting `verbose=True` so we can see the prompt)."
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain import OpenAI, ConversationChain\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"conversation = ConversationChain(llm=llm, verbose=True)\n",
|
||||
"\n",
|
||||
"conversation.predict(input=\"Hi there!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "d8e2a6ff",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI: Hello! How are you today?\n",
|
||||
"Human: I'm doing well! Just having a conversation with an AI.\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" That's great! What would you like to talk about?\""
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "ae046bff",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' Hello! How are you today?'"
|
||||
]
|
||||
},
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from langchain import OpenAI, ConversationChain\n",
|
||||
"\n",
|
||||
"llm = OpenAI(temperature=0)\n",
|
||||
"conversation = ConversationChain(llm=llm, verbose=True)\n",
|
||||
"\n",
|
||||
"conversation.predict(input=\"Hi there!\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"I'm doing well! Just having a conversation with an AI.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "15eda316",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI: Hello! How are you today?\n",
|
||||
"Human: I'm doing well! Just having a conversation with an AI.\n",
|
||||
"AI: That's great! What would you like to talk about?\n",
|
||||
"Human: Tell me about yourself.\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "d8e2a6ff",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI: Hello! How are you today?\n",
|
||||
"Human: I'm doing well! Just having a conversation with an AI.\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\" That's great! What would you like to talk about?\""
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"I'm doing well! Just having a conversation with an AI.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' I am an AI created to provide information and support to humans. I enjoy learning and exploring new things.'"
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "15eda316",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi there!\n",
|
||||
"AI: Hello! How are you today?\n",
|
||||
"Human: I'm doing well! Just having a conversation with an AI.\n",
|
||||
"AI: That's great! What would you like to talk about?\n",
|
||||
"Human: Tell me about yourself.\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"' I am an AI created to provide information and support to humans. I enjoy learning and exploring new things.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"Tell me about yourself.\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation.predict(input=\"Tell me about yourself.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4fad9448",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### ConversationChain with ConversationSummaryMemory\n",
|
||||
"Now lets take a look at using a slightly more complex type of memory - `ConversationSummaryMemory`. This type of memory creates a summary of the conversation over time. This can be useful for condensing information from the conversation over time.\n",
|
||||
"\n",
|
||||
"Let's walk through an example, again setting `verbose=True` so we can see the prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "f60a2fe8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.conversation.memory import ConversationSummaryMemory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "b7274f2c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
"cell_type": "markdown",
|
||||
"id": "4fad9448",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### ConversationChain with ConversationSummaryMemory\n",
|
||||
"Now let's take a look at using a slightly more complex type of memory - `ConversationSummaryMemory`. This type of memory creates a summary of the conversation over time. This can be useful for condensing information from the conversation over time.\n",
|
||||
"\n",
|
||||
"Let's walk through an example, again setting `verbose=True` so we can see the prompt."
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"\\n\\nI'm doing well, thank you for asking. I'm currently working on a project that I'm really excited about.\""
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "f60a2fe8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from langchain.chains.conversation.memory import ConversationSummaryMemory"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary = ConversationChain(llm=llm, memory=ConversationSummaryMemory(llm=OpenAI()), verbose=True)\n",
|
||||
"conversation_with_summary.predict(input=\"Hi, what's up?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "a6b6b88f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"The human and artificial intelligence are talking. The human asked the AI what it is doing, and the AI said that it is working on a project that it is excited about.\n",
|
||||
"Human: Tell me more about it!\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "b7274f2c",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"Human: Hi, what's up?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"\\n\\nI'm doing well, thank you for asking. I'm currently working on a project that I'm really excited about.\""
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary = ConversationChain(llm=llm, memory=ConversationSummaryMemory(llm=OpenAI()), verbose=True)\n",
|
||||
"conversation_with_summary.predict(input=\"Hi, what's up?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"\\n\\nI'm working on a project that I'm really excited about. It's a lot of work, but I think it's going to be really great when it's finished. I can't wait to show it to you!\""
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "a6b6b88f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"The human and artificial intelligence are talking. The human asked the AI what it is doing, and the AI said that it is working on a project that it is excited about.\n",
|
||||
"Human: Tell me more about it!\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"\\n\\nI'm working on a project that I'm really excited about. It's a lot of work, but I think it's going to be really great when it's finished. I can't wait to show it to you!\""
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary.predict(input=\"Tell me more about it!\")"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary.predict(input=\"Tell me more about it!\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "dad869fe",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"The human and artificial intelligence are talking. The human asked the AI what it is doing, and the AI said that it is working on a project that it is excited about. The AI said that the project is a lot of work, but it is going to be great when it is finished.\n",
|
||||
"Human: Very cool -- what is the scope of the project?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "dad869fe",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"\n",
|
||||
"\u001b[1m> Entering new chain...\u001b[0m\n",
|
||||
"Prompt after formatting:\n",
|
||||
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
||||
"\n",
|
||||
"Current conversation:\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"The human and artificial intelligence are talking. The human asked the AI what it is doing, and the AI said that it is working on a project that it is excited about. The AI said that the project is a lot of work, but it is going to be great when it is finished.\n",
|
||||
"Human: Very cool -- what is the scope of the project?\n",
|
||||
"AI:\u001b[0m\n",
|
||||
"\n",
|
||||
"\u001b[1m> Finished chain.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nThe project is quite large in scope. It involves a lot of data analysis and work with artificial intelligence algorithms.'"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary.predict(input=\"Very cool -- what is the scope of the project?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'\\n\\nThe project is quite large in scope. It involves a lot of data analysis and work with artificial intelligence algorithms.'"
|
||||
"cell_type": "markdown",
|
||||
"id": "5c8735cc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### More Resources on Memory\n",
|
||||
"\n",
|
||||
"This just scratches the surface of what you can do with memory. For more examples on things like how to implement custom memory classes, how to add memory to a custom LLM chain and how to use memory with an agent, please see the [How-To: Memory](../../examples/memory) section. For even more advanced ideas on memory (which will hopefully be included in LangChain soon!) see the [MemPrompt](https://memprompt.com/) paper."
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "436dda66",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.1"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"conversation_with_summary.predict(input=\"Very cool -- what is the scope of the project?\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "5c8735cc",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### More Resources on Memory\n",
|
||||
"\n",
|
||||
"This just scratches the surface of what you can do with memory. For more examples on things like how to implement custom memory classes, how to add memory to a custom LLM chain and how to use memory with and agent, please see the [How-To: Memory](../../examples/memory) section."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "436dda66",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
|
@ -0,0 +1,44 @@
|
||||
"""Input manager for agents."""
|
||||
from typing import Optional
|
||||
|
||||
import langchain
|
||||
from langchain.schema import AgentAction
|
||||
|
||||
|
||||
class ChainedInput:
|
||||
"""Class for working with input that is the result of chains."""
|
||||
|
||||
def __init__(self, text: str, verbose: bool = False):
|
||||
"""Initialize with verbose flag and initial text."""
|
||||
self._verbose = verbose
|
||||
if self._verbose:
|
||||
langchain.logger.log_agent_start(text)
|
||||
self._input = text
|
||||
|
||||
def add_action(self, action: AgentAction, color: Optional[str] = None) -> None:
|
||||
"""Add text to input, print if in verbose mode."""
|
||||
if self._verbose:
|
||||
langchain.logger.log_agent_action(action, color=color)
|
||||
self._input += action.log
|
||||
|
||||
def add_observation(
|
||||
self,
|
||||
observation: str,
|
||||
observation_prefix: str,
|
||||
llm_prefix: str,
|
||||
color: Optional[str],
|
||||
) -> None:
|
||||
"""Add observation to input, print if in verbose mode."""
|
||||
if self._verbose:
|
||||
langchain.logger.log_agent_observation(
|
||||
observation,
|
||||
color=color,
|
||||
observation_prefix=observation_prefix,
|
||||
llm_prefix=llm_prefix,
|
||||
)
|
||||
self._input += f"\n{observation_prefix}{observation}\n{llm_prefix}"
|
||||
|
||||
@property
|
||||
def input(self) -> str:
|
||||
"""Return the accumulated input."""
|
||||
return self._input
|
@ -0,0 +1,4 @@
|
||||
"""Chain that tries to verify assumptions before answering a question.
|
||||
|
||||
Heavily borrowed from https://github.com/jagilley/fact-checker
|
||||
"""
|
@ -0,0 +1,98 @@
|
||||
"""Chain for question-answering with self-verification."""
|
||||
|
||||
|
||||
from typing import Dict, List
|
||||
|
||||
from pydantic import BaseModel, Extra
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.chains.llm import LLMChain
|
||||
from langchain.chains.llm_checker.prompt import (
|
||||
CHECK_ASSERTIONS_PROMPT,
|
||||
CREATE_DRAFT_ANSWER_PROMPT,
|
||||
LIST_ASSERTIONS_PROMPT,
|
||||
REVISED_ANSWER_PROMPT,
|
||||
)
|
||||
from langchain.chains.sequential import SequentialChain
|
||||
from langchain.llms.base import LLM
|
||||
from langchain.prompts import PromptTemplate
|
||||
|
||||
|
||||
class LLMCheckerChain(Chain, BaseModel):
|
||||
"""Chain for question-answering with self-verification.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
from langchain import OpenAI, LLMCheckerChain
|
||||
llm = OpenAI(temperature=0.7)
|
||||
checker_chain = LLMCheckerChain(llm=llm)
|
||||
"""
|
||||
|
||||
llm: LLM
|
||||
"""LLM wrapper to use."""
|
||||
create_draft_answer_prompt: PromptTemplate = CREATE_DRAFT_ANSWER_PROMPT
|
||||
list_assertions_prompt: PromptTemplate = LIST_ASSERTIONS_PROMPT
|
||||
check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT
|
||||
revised_answer_prompt: PromptTemplate = REVISED_ANSWER_PROMPT
|
||||
"""Prompt to use when questioning the documents."""
|
||||
input_key: str = "query" #: :meta private:
|
||||
output_key: str = "result" #: :meta private:
|
||||
|
||||
class Config:
|
||||
"""Configuration for this pydantic object."""
|
||||
|
||||
extra = Extra.forbid
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
"""Return the singular input key.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.input_key]
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
"""Return the singular output key.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.output_key]
|
||||
|
||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
question = inputs[self.input_key]
|
||||
|
||||
create_draft_answer_chain = LLMChain(
|
||||
llm=self.llm, prompt=self.create_draft_answer_prompt, output_key="statement"
|
||||
)
|
||||
list_assertions_chain = LLMChain(
|
||||
llm=self.llm, prompt=self.list_assertions_prompt, output_key="assertions"
|
||||
)
|
||||
check_assertions_chain = LLMChain(
|
||||
llm=self.llm,
|
||||
prompt=self.check_assertions_prompt,
|
||||
output_key="checked_assertions",
|
||||
)
|
||||
|
||||
revised_answer_chain = LLMChain(
|
||||
llm=self.llm,
|
||||
prompt=self.revised_answer_prompt,
|
||||
output_key="revised_statement",
|
||||
)
|
||||
|
||||
chains = [
|
||||
create_draft_answer_chain,
|
||||
list_assertions_chain,
|
||||
check_assertions_chain,
|
||||
revised_answer_chain,
|
||||
]
|
||||
|
||||
question_to_checked_assertions_chain = SequentialChain(
|
||||
chains=chains,
|
||||
input_variables=["question"],
|
||||
output_variables=["revised_statement"],
|
||||
verbose=True,
|
||||
)
|
||||
output = question_to_checked_assertions_chain({"question": question})
|
||||
return {self.output_key: output["revised_statement"]}
|
@ -0,0 +1,31 @@
|
||||
# flake8: noqa
|
||||
from langchain.prompts.prompt import PromptTemplate
|
||||
|
||||
_CREATE_DRAFT_ANSWER_TEMPLATE = """{question}\n\n"""
|
||||
CREATE_DRAFT_ANSWER_PROMPT = PromptTemplate(
|
||||
input_variables=["question"], template=_CREATE_DRAFT_ANSWER_TEMPLATE
|
||||
)
|
||||
|
||||
_LIST_ASSERTIONS_TEMPLATE = """Here is a statement:
|
||||
{statement}
|
||||
Make a bullet point list of the assumptions you made when producing the above statement.\n\n"""
|
||||
LIST_ASSERTIONS_PROMPT = PromptTemplate(
|
||||
input_variables=["statement"], template=_LIST_ASSERTIONS_TEMPLATE
|
||||
)
|
||||
|
||||
_CHECK_ASSERTIONS_TEMPLATE = """Here is a bullet point list of assertions:
|
||||
{assertions}
|
||||
For each assertion, determine whether it is true or false. If it is false, explain why.\n\n"""
|
||||
CHECK_ASSERTIONS_PROMPT = PromptTemplate(
|
||||
input_variables=["assertions"], template=_CHECK_ASSERTIONS_TEMPLATE
|
||||
)
|
||||
|
||||
_REVISED_ANSWER_TEMPLATE = """{checked_assertions}
|
||||
|
||||
Question: In light of the above assertions and checks, how would you answer the question '{question}'?
|
||||
|
||||
Answer:"""
|
||||
REVISED_ANSWER_PROMPT = PromptTemplate(
|
||||
input_variables=["checked_assertions", "question"],
|
||||
template=_REVISED_ANSWER_TEMPLATE,
|
||||
)
|
@ -0,0 +1,82 @@
|
||||
"""Pass input through a moderation endpoint."""
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel, root_validator
|
||||
|
||||
from langchain.chains.base import Chain
|
||||
from langchain.utils import get_from_dict_or_env
|
||||
|
||||
|
||||
class OpenAIModerationChain(Chain, BaseModel):
|
||||
"""Pass input through a moderation endpoint.
|
||||
|
||||
To use, you should have the ``openai`` python package installed, and the
|
||||
environment variable ``OPENAI_API_KEY`` set with your API key.
|
||||
|
||||
Any parameters that are valid to be passed to the openai.create call can be passed
|
||||
in, even if not explicitly saved on this class.
|
||||
|
||||
Example:
|
||||
.. code-block:: python
|
||||
|
||||
from langchain.chains import OpenAIModerationChain
|
||||
moderation = OpenAIModerationChain()
|
||||
"""
|
||||
|
||||
client: Any #: :meta private:
|
||||
model_name: Optional[str] = None
|
||||
"""Moderation model name to use."""
|
||||
error: bool = False
|
||||
"""Whether or not to error if bad content was found."""
|
||||
input_key: str = "input" #: :meta private:
|
||||
output_key: str = "output" #: :meta private:
|
||||
openai_api_key: Optional[str] = None
|
||||
|
||||
@root_validator()
|
||||
def validate_environment(cls, values: Dict) -> Dict:
|
||||
"""Validate that api key and python package exists in environment."""
|
||||
openai_api_key = get_from_dict_or_env(
|
||||
values, "openai_api_key", "OPENAI_API_KEY"
|
||||
)
|
||||
try:
|
||||
import openai
|
||||
|
||||
openai.api_key = openai_api_key
|
||||
values["client"] = openai.Moderation
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import openai python package. "
|
||||
"Please it install it with `pip install openai`."
|
||||
)
|
||||
return values
|
||||
|
||||
@property
|
||||
def input_keys(self) -> List[str]:
|
||||
"""Expect input key.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.input_key]
|
||||
|
||||
@property
|
||||
def output_keys(self) -> List[str]:
|
||||
"""Return output key.
|
||||
|
||||
:meta private:
|
||||
"""
|
||||
return [self.output_key]
|
||||
|
||||
def _moderate(self, text: str, results: dict) -> str:
|
||||
if results["flagged"]:
|
||||
error_str = "Text was found that violates OpenAI's content policy."
|
||||
if self.error:
|
||||
raise ValueError(error_str)
|
||||
else:
|
||||
return error_str
|
||||
return text
|
||||
|
||||
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
||||
text = inputs[self.input_key]
|
||||
results = self.client.create(text)
|
||||
output = self._moderate(text, results["results"][0])
|
||||
return {self.output_key: output}
|
@ -0,0 +1,65 @@
|
||||
"""BETA: everything in here is highly experimental, do not rely on."""
|
||||
from typing import Any, Optional
|
||||
|
||||
from langchain.input import print_text
|
||||
from langchain.schema import AgentAction
|
||||
|
||||
|
||||
class BaseLogger:
|
||||
"""Base logging interface."""
|
||||
|
||||
def log_agent_start(self, text: str, **kwargs: Any) -> None:
|
||||
"""Log the start of an agent interaction."""
|
||||
pass
|
||||
|
||||
def log_agent_end(self, text: str, **kwargs: Any) -> None:
|
||||
"""Log the end of an agent interaction."""
|
||||
pass
|
||||
|
||||
def log_agent_action(self, action: AgentAction, **kwargs: Any) -> None:
|
||||
"""Log agent action decision."""
|
||||
pass
|
||||
|
||||
def log_agent_observation(self, observation: str, **kwargs: Any) -> None:
|
||||
"""Log agent observation."""
|
||||
pass
|
||||
|
||||
def log_llm_inputs(self, inputs: dict, prompt: str, **kwargs: Any) -> None:
|
||||
"""Log LLM inputs."""
|
||||
pass
|
||||
|
||||
def log_llm_response(self, output: str, **kwargs: Any) -> None:
|
||||
"""Log LLM response."""
|
||||
pass
|
||||
|
||||
|
||||
class StdOutLogger(BaseLogger):
|
||||
"""Interface for printing things to stdout."""
|
||||
|
||||
def log_agent_start(self, text: str, **kwargs: Any) -> None:
|
||||
"""Print the text to start the agent."""
|
||||
print_text(text)
|
||||
|
||||
def log_agent_action(
|
||||
self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
|
||||
) -> None:
|
||||
"""Print the log of the action in a certain color."""
|
||||
print_text(action.log, color=color)
|
||||
|
||||
def log_agent_observation(
|
||||
self,
|
||||
observation: str,
|
||||
color: Optional[str] = None,
|
||||
observation_prefix: Optional[str] = None,
|
||||
llm_prefix: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Print the observation in a special color."""
|
||||
print_text(f"\n{observation_prefix}")
|
||||
print_text(observation, color=color)
|
||||
print_text(f"\n{llm_prefix}")
|
||||
|
||||
def log_llm_inputs(self, inputs: dict, prompt: str, **kwargs: Any) -> None:
|
||||
"""Print the prompt in green."""
|
||||
print("Prompt after formatting:")
|
||||
print_text(prompt, color="green", end="\n")
|
@ -0,0 +1,11 @@
|
||||
"""Common schema objects."""
|
||||
|
||||
from typing import NamedTuple
|
||||
|
||||
|
||||
class AgentAction(NamedTuple):
|
||||
"""Agent's action to take."""
|
||||
|
||||
tool: str
|
||||
tool_input: str
|
||||
log: str
|
@ -0,0 +1,43 @@
|
||||
# flake8: noqa E501
|
||||
|
||||
"""Test LLMCheckerChain functionality."""
|
||||
|
||||
import pytest
|
||||
|
||||
from langchain.chains.llm_checker.base import LLMCheckerChain
|
||||
from langchain.chains.llm_checker.prompt import (
|
||||
_CHECK_ASSERTIONS_TEMPLATE,
|
||||
_CREATE_DRAFT_ANSWER_TEMPLATE,
|
||||
_LIST_ASSERTIONS_TEMPLATE,
|
||||
_REVISED_ANSWER_TEMPLATE,
|
||||
)
|
||||
from tests.unit_tests.llms.fake_llm import FakeLLM
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def fake_llm_checker_chain() -> LLMCheckerChain:
|
||||
"""Fake LLMCheckerChain for testing."""
|
||||
queries = {
|
||||
_CREATE_DRAFT_ANSWER_TEMPLATE.format(
|
||||
question="Which mammal lays the biggest eggs?"
|
||||
): "I don't know which mammal layers the biggest eggs.",
|
||||
_LIST_ASSERTIONS_TEMPLATE.format(
|
||||
statement="I don't know which mammal layers the biggest eggs.",
|
||||
): "1) I know that mammals lay eggs.\n2) I know that birds lay eggs.\n3) I know that birds are mammals.",
|
||||
_CHECK_ASSERTIONS_TEMPLATE.format(
|
||||
assertions="1) I know that mammals lay eggs.\n2) I know that birds lay eggs.\n3) I know that birds are mammals.",
|
||||
): "1) I know that mammals lay eggs. TRUE\n2) I know that birds lay eggs. TRUE\n3) I know that birds are mammals. TRUE",
|
||||
_REVISED_ANSWER_TEMPLATE.format(
|
||||
checked_assertions="1) I know that mammals lay eggs. TRUE\n2) I know that birds lay eggs. TRUE\n3) I know that birds are mammals. TRUE",
|
||||
question="Which mammal lays the biggest eggs?",
|
||||
): "I still don't know.",
|
||||
}
|
||||
fake_llm = FakeLLM(queries=queries)
|
||||
return LLMCheckerChain(llm=fake_llm, input_key="q", output_key="a")
|
||||
|
||||
|
||||
def test_simple_question(fake_llm_checker_chain: LLMCheckerChain) -> None:
|
||||
"""Test simple question that should not need python."""
|
||||
question = "Which mammal lays the biggest eggs?"
|
||||
output = fake_llm_checker_chain.run(question)
|
||||
assert output == "I still don't know."
|
Loading…
Reference in New Issue