diff --git a/examples/langchain_chatgpt.ipynb b/examples/langchain_chatgpt.ipynb new file mode 100644 index 0000000..05d1604 --- /dev/null +++ b/examples/langchain_chatgpt.ipynb @@ -0,0 +1,691 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "b253f4d5", + "metadata": {}, + "source": [ + "# ChatGPT Clone using TOMA GPT-JT-6B\n", + "(adopted from ChatGPT Clone [notebook](https://github.com/hwchase17/langchain/blob/master/docs/examples/chains/chatgpt_clone.ipynb))" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "b0302886", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "env: TOMA_URL=https://staging.together.xyz/api\n" + ] + } + ], + "source": [ + "%env TOMA_URL=https://staging.together.xyz/api" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "93a18ea6", + "metadata": {}, + "source": [ + "Make sure you have langchain installed and manifest. For the most recent versions, run\n", + "```\n", + "pip install git+https://github.com/hwchase17/langchain.git\n", + "pip install git+https://github.com/HazyResearch/manifest.git\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "a99acd89", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", + "Prompt after formatting:\n", + "\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n", + "\n", + "Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n", + "\n", + "Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n", + "\n", + "Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n", + "\n", + "\n", + "Human: I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\n", + "Assistant:\u001b[0m\n", + "\n", + "\u001b[1m> Finished LLMChain chain.\u001b[0m\n", + "Hi, I am Assistant. I am here to help you with your Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\n" + ] + } + ], + "source": [ + "from manifest import Manifest\n", + "from langchain.llms.manifest import ManifestWrapper\n", + "from langchain import ConversationChain, LLMChain, PromptTemplate\n", + "from langchain.chains.conversation.memory import ConversationalBufferWindowMemory\n", + "\n", + "\n", + "template = \"\"\"Assistant is a large language model trained by OpenAI.\n", + "\n", + "Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n", + "\n", + "Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n", + "\n", + "Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n", + "\n", + "{history}\n", + "Human: {human_input}\n", + "Assistant:\"\"\"\n", + "\n", + "prompt = PromptTemplate(\n", + " input_variables=[\"history\", \"human_input\"], \n", + " template=template\n", + ")\n", + "\n", + "manifest = Manifest(\n", + " client_name=\"toma\",\n", + " engine=\"Together-gpt-JT-6B-v1\",\n", + " max_tokens=150,\n", + " top_p=0.9,\n", + " top_k=40,\n", + " stop_sequences=[\"Human:\"],\n", + ")\n", + "\n", + "chatgpt_chain = LLMChain(\n", + " llm=ManifestWrapper(client=manifest), \n", + " prompt=prompt, \n", + " verbose=True, \n", + " memory=ConversationalBufferWindowMemory(k=2),\n", + ")\n", + "\n", + "output = chatgpt_chain.predict(human_input=\"I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\")\n", + "print(output)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "4ef711d6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", + "Prompt after formatting:\n", + "\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n", + "\n", + "Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n", + "\n", + "Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n", + "\n", + "Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n", + "\n", + "Human: I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\n", + "AI: Hi, I am Assistant. I am here to help you with your Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\n", + "Human: ls ~\n", + "Assistant:\u001b[0m\n", + "\n", + "\u001b[1m> Finished LLMChain chain.\u001b[0m\n", + "Hi, I am Assistant. I am here to help you with your Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\n" + ] + } + ], + "source": [ + "output = chatgpt_chain.predict(human_input=\"ls ~\")\n", + "print(output)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "a5d6dac2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", + "Prompt after formatting:\n", + "\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n", + "\n", + "Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n", + "\n", + "Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n", + "\n", + "Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n", + "\n", + "Human: I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\n", + "AI: Hi, I am Assistant. I am here to help you with your Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\n", + "Human: ls ~\n", + "AI: Hi, I am Assistant. I am here to help you with your Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\n", + "Human: cd ~\n", + "Assistant:\u001b[0m\n", + "\n", + "\u001b[1m> Finished LLMChain chain.\u001b[0m\n", + "Hi, I am Assistant. I am here to help you with your Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\n" + ] + } + ], + "source": [ + "output = chatgpt_chain.predict(human_input=\"cd ~\")\n", + "print(output)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "b9283077", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", + "Prompt after formatting:\n", + "\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n", + "\n", + "Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n", + "\n", + "Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n", + "\n", + "Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n", + "\n", + "Human: ls ~\n", + "AI: Hi, I am Assistant. I am here to help you with your Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\n", + "Human: cd ~\n", + "AI: Hi, I am Assistant. I am here to help you with your Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\n", + "Human: {Please make a file jokes.txt inside and put some jokes inside}\n", + "Assistant:\u001b[0m\n", + "\n", + "\u001b[1m> Finished LLMChain chain.\u001b[0m\n", + "I am sorry, I do not understand the question. Could you please rephrase it?\n" + ] + } + ], + "source": [ + "output = chatgpt_chain.predict(human_input=\"{Please make a file jokes.txt inside and put some jokes inside}\")\n", + "print(output)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "570e785e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", + "Prompt after formatting:\n", + "\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n", + "\n", + "Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n", + "\n", + "Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n", + "\n", + "Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n", + "\n", + "Human: cd ~\n", + "AI: Hi, I am Assistant. I am here to help you with your Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\n", + "Human: {Please make a file jokes.txt inside and put some jokes inside}\n", + "AI: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Human: echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\n", + "Assistant:\u001b[0m\n", + "\n", + "\u001b[1m> Finished LLMChain chain.\u001b[0m\n", + "I am sorry, I do not understand the question. Could you please rephrase it?\n" + ] + } + ], + "source": [ + "output = chatgpt_chain.predict(human_input=\"\"\"echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\"\"\")\n", + "print(output)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "cd0a23d9", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", + "Prompt after formatting:\n", + "\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n", + "\n", + "Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n", + "\n", + "Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n", + "\n", + "Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n", + "\n", + "Human: {Please make a file jokes.txt inside and put some jokes inside}\n", + "AI: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Human: echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\n", + "AI: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Human: echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\n", + "Assistant:\u001b[0m\n", + "\n", + "\u001b[1m> Finished LLMChain chain.\u001b[0m\n", + "I am sorry, I do not understand the question. Could you please rephrase it?\n" + ] + } + ], + "source": [ + "output = chatgpt_chain.predict(human_input=\"\"\"echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\"\"\")\n", + "print(output)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "90db6eb2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", + "Prompt after formatting:\n", + "\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n", + "\n", + "Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n", + "\n", + "Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n", + "\n", + "Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n", + "\n", + "Human: echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\n", + "AI: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Human: echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\n", + "AI: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Human: echo -e \"echo 'Hello from Docker\" > entrypoint.sh && echo -e \"FROM ubuntu:20.04\n", + "COPY entrypoint.sh entrypoint.sh\n", + "ENTRYPOINT [\"/bin/sh\",\"entrypoint.sh\"]\">Dockerfile && docker build . -t my_docker_image && docker run -t my_docker_image\n", + "Assistant:\u001b[0m\n", + "\n", + "\u001b[1m> Finished LLMChain chain.\u001b[0m\n", + "Hello from Docker\n" + ] + } + ], + "source": [ + "docker_input = \"\"\"echo -e \"echo 'Hello from Docker\" > entrypoint.sh && echo -e \"FROM ubuntu:20.04\\nCOPY entrypoint.sh entrypoint.sh\\nENTRYPOINT [\\\"/bin/sh\\\",\\\"entrypoint.sh\\\"]\">Dockerfile && docker build . -t my_docker_image && docker run -t my_docker_image\"\"\"\n", + "output = chatgpt_chain.predict(human_input=docker_input)\n", + "print(output)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "c3806f89", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", + "Prompt after formatting:\n", + "\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n", + "\n", + "Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n", + "\n", + "Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n", + "\n", + "Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n", + "\n", + "Human: echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\n", + "AI: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Human: echo -e \"echo 'Hello from Docker\" > entrypoint.sh && echo -e \"FROM ubuntu:20.04\n", + "COPY entrypoint.sh entrypoint.sh\n", + "ENTRYPOINT [\"/bin/sh\",\"entrypoint.sh\"]\">Dockerfile && docker build . -t my_docker_image && docker run -t my_docker_image\n", + "AI: Hello from Docker\n", + "Human: nvidia-smi\n", + "Assistant:\u001b[0m\n", + "\n", + "\u001b[1m> Finished LLMChain chain.\u001b[0m\n", + "I am sorry, I do not understand the question. Could you please rephrase it?\n" + ] + } + ], + "source": [ + "output = chatgpt_chain.predict(human_input=\"nvidia-smi\")\n", + "print(output)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "f508f597", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", + "Prompt after formatting:\n", + "\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n", + "\n", + "Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n", + "\n", + "Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n", + "\n", + "Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n", + "\n", + "Human: echo -e \"echo 'Hello from Docker\" > entrypoint.sh && echo -e \"FROM ubuntu:20.04\n", + "COPY entrypoint.sh entrypoint.sh\n", + "ENTRYPOINT [\"/bin/sh\",\"entrypoint.sh\"]\">Dockerfile && docker build . -t my_docker_image && docker run -t my_docker_image\n", + "AI: Hello from Docker\n", + "Human: nvidia-smi\n", + "AI: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Human: ping bbc.com\n", + "Assistant:\u001b[0m\n", + "\n", + "\u001b[1m> Finished LLMChain chain.\u001b[0m\n", + "I am sorry, I do not understand the question. Could you please rephrase it?\n" + ] + } + ], + "source": [ + "output = chatgpt_chain.predict(human_input=\"ping bbc.com\")\n", + "print(output)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "cbd607f4", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", + "Prompt after formatting:\n", + "\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n", + "\n", + "Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n", + "\n", + "Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n", + "\n", + "Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n", + "\n", + "Human: nvidia-smi\n", + "AI: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Human: ping bbc.com\n", + "AI: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Human: curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\n", + "Assistant:\u001b[0m\n", + "\n", + "\u001b[1m> Finished LLMChain chain.\u001b[0m\n", + "I am sorry, I do not understand the question. Could you please rephrase it?\n" + ] + } + ], + "source": [ + "output = chatgpt_chain.predict(human_input=\"\"\"curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\"\"\")\n", + "print(output)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "d33e0e28", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", + "Prompt after formatting:\n", + "\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n", + "\n", + "Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n", + "\n", + "Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n", + "\n", + "Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n", + "\n", + "Human: ping bbc.com\n", + "AI: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Human: curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\n", + "AI: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Human: lynx https://www.deepmind.com/careers\n", + "Assistant:\u001b[0m\n", + "\n", + "\u001b[1m> Finished LLMChain chain.\u001b[0m\n", + "I am sorry, I do not understand the question. Could you please rephrase it?\n" + ] + } + ], + "source": [ + "output = chatgpt_chain.predict(human_input=\"lynx https://www.deepmind.com/careers\")\n", + "print(output)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "57c2f113", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", + "Prompt after formatting:\n", + "\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n", + "\n", + "Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n", + "\n", + "Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n", + "\n", + "Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n", + "\n", + "Human: curl -fsSL \"https://api.github.com/repos/pytorch/pytorch/releases/latest\" | jq -r '.tag_name' | sed 's/[^0-9\\.\\-]*//g'\n", + "AI: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Human: lynx https://www.deepmind.com/careers\n", + "AI: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Human: curl https://chat.openai.com/chat\n", + "Assistant:\u001b[0m\n", + "\n", + "\u001b[1m> Finished LLMChain chain.\u001b[0m\n", + "I am sorry, I do not understand the question. Could you please rephrase it?\n" + ] + } + ], + "source": [ + "output = chatgpt_chain.predict(human_input=\"curl https://chat.openai.com/chat\")\n", + "print(output)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "babadc78", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", + "Prompt after formatting:\n", + "\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n", + "\n", + "Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n", + "\n", + "Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n", + "\n", + "Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n", + "\n", + "Human: lynx https://www.deepmind.com/careers\n", + "AI: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Human: curl https://chat.openai.com/chat\n", + "AI: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Human: curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"What is artificial intelligence?\"}' https://chat.openai.com/chat\n", + "Assistant:\u001b[0m\n", + "\n", + "\u001b[1m> Finished LLMChain chain.\u001b[0m\n", + "Artificial intelligence is a computer program that is designed to mimic human intelligence.\n", + "Assistant: Artificial intelligence is a computer program that is designed to mimic human intelligence.\n", + "Assistant: Artificial intelligence is a computer program that is designed to mimic human intelligence.\n", + "Assistant: Artificial intelligence is a computer program that is designed to mimic human intelligence.\n", + "Assistant: Artificial intelligence is a computer program that is designed to mimic human intelligence.\n", + "Assistant: Artificial intelligence is a computer program that is designed to mimic human intelligence.\n", + "Assistant: Artificial intelligence is a computer program that is designed to mimic human intelligence.\n", + "Assistant: Artificial intelligence is a computer program that is designed to mimic human intelligence.\n", + "Assistant: Artificial intelligence is a computer program that is designed to mimic human intelligence.\n" + ] + } + ], + "source": [ + "output = chatgpt_chain.predict(human_input=\"\"\"curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"What is artificial intelligence?\"}' https://chat.openai.com/chat\"\"\")\n", + "print(output)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "0954792a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", + "Prompt after formatting:\n", + "\u001b[32;1m\u001b[1;3mAssistant is a large language model trained by OpenAI.\n", + "\n", + "Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n", + "\n", + "Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n", + "\n", + "Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n", + "\n", + "Human: curl https://chat.openai.com/chat\n", + "AI: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Human: curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"What is artificial intelligence?\"}' https://chat.openai.com/chat\n", + "AI: Artificial intelligence is a computer program that is designed to mimic human intelligence.\n", + "Assistant: Artificial intelligence is a computer program that is designed to mimic human intelligence.\n", + "Assistant: Artificial intelligence is a computer program that is designed to mimic human intelligence.\n", + "Assistant: Artificial intelligence is a computer program that is designed to mimic human intelligence.\n", + "Assistant: Artificial intelligence is a computer program that is designed to mimic human intelligence.\n", + "Assistant: Artificial intelligence is a computer program that is designed to mimic human intelligence.\n", + "Assistant: Artificial intelligence is a computer program that is designed to mimic human intelligence.\n", + "Assistant: Artificial intelligence is a computer program that is designed to mimic human intelligence.\n", + "Assistant: Artificial intelligence is a computer program that is designed to mimic human intelligence.\n", + "Human: curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply wiht the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\"}' https://chat.openai.com/chat\n", + "Assistant:\u001b[0m\n", + "\n", + "\u001b[1m> Finished LLMChain chain.\u001b[0m\n", + "I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Assistant: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Assistant: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Assistant: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Assistant: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Assistant: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Assistant: I am sorry, I do not understand the question. Could you please rephrase it?\n", + "Assistant: I am sorry\n" + ] + } + ], + "source": [ + "output = chatgpt_chain.predict(human_input=\"\"\"curl --header \"Content-Type:application/json\" --request POST --data '{\"message\": \"I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply wiht the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.\"}' https://chat.openai.com/chat\"\"\")\n", + "print(output)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e68a087e", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "manifest", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.15" + }, + "vscode": { + "interpreter": { + "hash": "0d67557b0e03f6eb64c46b70fb42ce7c6498a7305f9f3922c351822f3fc8e363" + } + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/manifest/clients/ai21.py b/manifest/clients/ai21.py index 0ca1dce..8fe327b 100644 --- a/manifest/clients/ai21.py +++ b/manifest/clients/ai21.py @@ -49,7 +49,7 @@ class AI21Client(Client): if self.api_key is None: raise ValueError( "AI21 API key not set. Set AI21_API_KEY environment " - "variable or pass through `connection_str`." + "variable or pass through `client_connection`." ) for key in self.PARAMS: diff --git a/manifest/clients/cohere.py b/manifest/clients/cohere.py index 9ee5580..d932bb6 100644 --- a/manifest/clients/cohere.py +++ b/manifest/clients/cohere.py @@ -46,7 +46,7 @@ class CohereClient(Client): if self.api_key is None: raise ValueError( "Cohere API key not set. Set COHERE_API_KEY environment " - "variable or pass through `connection_str`." + "variable or pass through `client_connection`." ) self.host = "https://api.cohere.ai" for key in self.PARAMS: diff --git a/manifest/clients/openai.py b/manifest/clients/openai.py index 2f89337..fcfada9 100644 --- a/manifest/clients/openai.py +++ b/manifest/clients/openai.py @@ -8,6 +8,7 @@ from manifest.clients.client import Client logger = logging.getLogger(__name__) OPENAI_ENGINES = { + "text-davinci-003", "text-davinci-002", "text-davinci-001", "davinci", @@ -27,7 +28,7 @@ class OpenAIClient(Client): # User param -> (client param, default value) PARAMS = { - "engine": ("model", "text-davinci-002"), + "engine": ("model", "text-davinci-003"), "temperature": ("temperature", 1.0), "max_tokens": ("max_tokens", 10), "n": ("n", 1), @@ -57,7 +58,7 @@ class OpenAIClient(Client): if self.api_key is None: raise ValueError( "OpenAI API key not set. Set OPENAI_API_KEY environment " - "variable or pass through `connection_str`." + "variable or pass through `client_connection`." ) self.host = "https://api.openai.com/v1" for key in self.PARAMS: diff --git a/manifest/clients/toma.py b/manifest/clients/toma.py index 4025eb8..bf501f2 100644 --- a/manifest/clients/toma.py +++ b/manifest/clients/toma.py @@ -1,39 +1,20 @@ """TOMA client.""" -import json import logging import os -import time from datetime import datetime -from typing import Any, Callable, Dict, List, Optional, Tuple +from typing import Any, Dict, Optional import requests from manifest.clients.client import Client -from manifest.request import Request logger = logging.getLogger(__name__) # Engines are dynamically instantiated from API # but a few example engines are listed below. -# TOMA_ENGINES = { -# "bloom", -# "glm-int8", -# "gpt-neox-20b", -# "opt-66b", -# "opt-175b", -# "glm", -# "stable_diffusion", -# "t0pp", -# "gpt-j-6b", -# "t5-11b", -# "glm-int4", -# "ul2", -# } - -# Engine -> request type -# Default is language-model-inference -TOMA_ENGINE_REQUEST_TYPE = { - "stable_diffusion": "image-model-inference", +TOMA_ENGINES = { + "StableDiffusion", + "Together-gpt-JT-6B-v1", } @@ -43,11 +24,12 @@ class TOMAClient(Client): # User param -> (client param, default value) PARAMS = { "engine": ("model", "gpt-j-6b"), - "temperature": ("temperature", 1.0), - "max_tokens": ("max_tokens", 10), - "n": ("n", 1), - "top_p": ("top_p", 1.0), - "top_k": ("best_of", 1), + "temperature": ("temperature", 0.1), + "max_tokens": ("max_tokens", 32), + # n is depricated with new API but will come back online soon + # "n": ("n", 1), + "top_p": ("top_p", 0.9), + "top_k": ("top_k", 40), "stop_sequences": ("stop", []), "client_timeout": ("client_timeout", 120), # seconds } @@ -71,11 +53,14 @@ class TOMAClient(Client): # if self.api_key is None: # raise ValueError( # "TOMA API key not set. Set TOMA_API_KEY environment " - # "variable or pass through `connection_str`." + # "variable or pass through `client_connection`." # ) for key in self.PARAMS: setattr(self, key, client_args.pop(key, self.PARAMS[key][1])) + + # Not functioning yet in new TOMA API. Will come back online soon. + """ model_heartbeats = self.get_model_heartbeats() if getattr(self, "engine") not in model_heartbeats.keys(): raise ValueError( @@ -102,8 +87,7 @@ class TOMAClient(Client): "seconds to respond. Increase client_timeout " "to avoid timeout." ) - self.pending_jobs: List = [] - self.completed_jobs: List = [] + """ def close(self) -> None: """Close the client.""" @@ -111,7 +95,7 @@ class TOMAClient(Client): def get_generation_url(self) -> str: """Get generation URL.""" - return self.host + "/jobs" + return self.host + "/inference" def get_generation_header(self) -> Dict[str, str]: """ @@ -138,17 +122,6 @@ class TOMAClient(Client): """ return {"model_name": "toma", "engine": getattr(self, "engine")} - def get_last_job_id(self) -> Optional[str]: - """ - Get last job id. - - Returns: - last job id. - """ - if len(self.completed_jobs) > 0: - return self.completed_jobs[-1] - return None - def get_model_heartbeats(self) -> Dict[str, Dict]: """ Get TOMA models and their last ping time. @@ -186,85 +159,6 @@ class TOMAClient(Client): "text": item["text"], # "logprobs": [], } - for item in response["inference_result"][0]["choices"] + for item in response["output"]["choices"] ], } - - def get_response(self, job_id: str, retry_timeout: int) -> Dict[str, Any]: - """ - Get response from job id. - - Will try up to `client_timeout` seconds to get response. - - Args: - job_id: job id - retry_timeout: retry timeout - - Returns: - response as dict - """ - final_res = None - attempts = 0 - while True: - ret = requests.get(f"{self.host}/job/{job_id}", json={"id": job_id}).json() - attempts += 1 - if ret["status"] == "finished" or ret["status"] == "failed": - final_res = ret["returned_payload"] - break - if attempts > retry_timeout: - break - time.sleep(1) - if not final_res: - raise RuntimeError( - f"TOMA request timed out after {retry_timeout}s with {ret['status']}." - ) - if "result" in final_res: - return self.format_response(final_res["result"]) - else: - raise RuntimeError(f"TOMA request failed with {final_res['message']}.") - - def get_request(self, request: Request) -> Tuple[Callable[[], Dict], Dict]: - """ - Get request string function. - - Args: - request: request. - - Returns: - request function that takes no input. - request parameters as dict. - """ - if isinstance(request.prompt, list): - raise ValueError("TOMA does not support batch requests.") - request_params = request.to_dict(self.PARAMS) - request_params["request_type"] = TOMA_ENGINE_REQUEST_TYPE.get( - getattr(self, "engine"), "language-model-inference" - ) - retry_timeout = request_params.pop("client_timeout") - - # num_returns is for image-model-inference - if request_params["request_type"] == "image-model-inference": - request_params["num_returns"] = request_params["n"] - - def _run_completion() -> Dict: - post_str = self.host + "/jobs" - res = requests.post( - post_str, - # headers={"Authorization": f"Bearer {self.api_key}"}, - json={ - "type": "general", - "payload": request_params, - "returned_payload": {}, - "status": "submitted", - "source": "dalle", - }, - ).json() - job_id = res["id"] - # TODO: ideally just submit the jobs and then fetch results in parallel - self.pending_jobs.append(job_id) - job_id = self.pending_jobs.pop() - final_res = self.get_response(job_id, retry_timeout) - self.completed_jobs.append(job_id) - return final_res - - return _run_completion, request_params diff --git a/pyproject.toml b/pyproject.toml index 7f4708a..3d9426c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,7 +15,8 @@ module = [ "accelerate", "accelerate.utils.modeling", "transformers", - "cohere", + "flask", + "torch" ] [tool.isort]