{ "cells": [ { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "# Retrieval Augmentation for GPT-4 using Pinecone\n", "\n", "#### Fixing LLMs that Hallucinate\n", "\n", "In this notebook we will learn how to query relevant contexts to our queries from Pinecone, and pass these to a GPT-4 model to generate an answer backed by real data sources.\n", "\n", "GPT-4 is a big step up from previous OpenAI completion models. It also exclusively uses the `ChatCompletion` endpoint, so we must use it in a slightly different way to usual. However, the power of the model makes the change worthwhile, particularly when augmented with an external knowledge base like the Pinecone vector database.\n", "\n", "Required installs for this notebook are:" ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "_HDKlQO5svqI", "outputId": "4a57df82-5e46-4b60-f0c7-c408e3d0f5b0" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/1.7 MB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[91m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[91m╸\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m71.4 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m41.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m70.1/70.1 KB\u001b[0m \u001b[31m6.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m396.0/396.0 KB\u001b[0m \u001b[31m28.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m177.2/177.2 KB\u001b[0m \u001b[31m12.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.8/62.8 KB\u001b[0m \u001b[31m4.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.0/1.0 MB\u001b[0m \u001b[31m4.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.3/58.3 KB\u001b[0m \u001b[31m8.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.1/1.1 MB\u001b[0m \u001b[31m43.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m77.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m158.8/158.8 KB\u001b[0m \u001b[31m19.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m199.2/199.2 KB\u001b[0m \u001b[31m26.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m264.6/264.6 KB\u001b[0m \u001b[31m35.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m114.2/114.2 KB\u001b[0m \u001b[31m15.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m49.1/49.1 KB\u001b[0m \u001b[31m7.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m218.0/218.0 KB\u001b[0m \u001b[31m27.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m218.0/218.0 KB\u001b[0m \u001b[31m28.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m211.7/211.7 KB\u001b[0m \u001b[31m12.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25h\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", "google-cloud-translate 3.8.4 requires protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.19.5, but you have protobuf 3.19.3 which is incompatible.\n", "google-cloud-language 2.6.1 requires protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.19.5, but you have protobuf 3.19.3 which is incompatible.\n", "google-cloud-firestore 2.7.3 requires protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.19.5, but you have protobuf 3.19.3 which is incompatible.\n", "google-cloud-datastore 2.11.1 requires protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.19.5, but you have protobuf 3.19.3 which is incompatible.\n", "google-cloud-bigquery 3.4.2 requires protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.19.5, but you have protobuf 3.19.3 which is incompatible.\n", "google-cloud-bigquery-storage 2.19.0 requires protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.19.5, but you have protobuf 3.19.3 which is incompatible.\n", "google-api-core 2.11.0 requires protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.19.5, but you have protobuf 3.19.3 which is incompatible.\u001b[0m\u001b[31m\n", "\u001b[0m" ] } ], "source": [ "!pip install -qU bs4 tiktoken openai langchain pinecone-client[grpc]" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Preparing the Data" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "In this example, we will download the LangChain docs from [langchain.readthedocs.io/](https://langchain.readthedocs.io/latest/en/). We get all `.html` files located on the site like so:" ] }, { "cell_type": "code", "execution_count": 56, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "xo9gYhGPr_DQ", "outputId": "f1b00acf-b7f0-48e3-abf6-8b0d8a86ffd2" }, "outputs": [ { "data": { "text/plain": [ "" ] }, "execution_count": 56, "metadata": {}, "output_type": "execute_result" } ], "source": [ "!wget -r -A.html -P rtdocs https://python.langchain.com/en/latest/" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "This downloads all HTML into the `rtdocs` directory. Now we can use LangChain itself to process these docs. We do this using the `ReadTheDocsLoader` like so:" ] }, { "cell_type": "code", "execution_count": 57, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "n40_0MtlsKgM", "outputId": "e0978f3f-2b1a-4d95-c8c6-5e73a5f35f56" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ ".rst .pdf Welcome to LangChain Contents Getting Started Modules Use Cases Reference Docs LangChain Ecosystem Additional Resources Welcome to LangChain# Large language models (LLMs) are emerging as a transformative technology, enabling developers to build applications that they previously could not. But using these LLMs in isolation is often not enough to create a truly powerful app - the real power comes when you are able to combine them with other sources of computation or knowledge. This library is aimed at assisting in the development of those types of applications. Common examples of these types of applications include: ❓ Question Answering over specific documents Documentation End-to-end Example: Question Answering over Notion Database 💬 Chatbots Documentation End-to-end Example: Chat-LangChain 🤖 Agents Documentation End-to-end Example: GPT+WolframAlpha Getting Started# Checkout the below guide for a walkthrough of how to get started using LangChain to create an Language Model application. Getting Started Documentation Modules# There are several main modules that LangChain provides support for. For each module we provide some examples to get started, how-to guides, reference docs, and conceptual guides. These modules are, in increasing order of complexity: Prompts: This includes prompt management, prompt optimization, and prompt serialization. LLMs: This includes a generic interface for all LLMs, and common utilities for working with LLMs. Document Loaders: This includes a standard interface for loading documents, as well as specific integrations to all types of text data sources. Utils: Language models are often more powerful when interacting with other sources of knowledge or computation. This can include Python REPLs, embeddings, search engines, and more. LangChain provides a large collection of common utils to use in your application. Chains: Chains go beyond just a single LLM call, and are sequences of calls (whether to an LLM or a different utility). LangChain provides a standard interface for chains, lots of integrations with other tools, and end-to-end chains for common applications. Indexes: Language models are often more powerful when combined with your own text data - this module covers best practices for doing exactly that. Agents: Agents involve an LLM making decisions about which Actions to take, taking that Action, seeing an Observation, and repeating that until done. LangChain provides a standard interface for agents, a selection of agents to choose from, and examples of end to end agents. Memory: Memory is the concept of persisting state between calls of a chain/agent. LangChain provides a standard interface for memory, a collection of memory implementations, and examples of chains/agents that use memory. Chat: Chat models are a variation on Language Models that expose a different API - rather than working with raw text, they work with messages. LangChain provides a standard interface for working with them and doing all the same things as above. Use Cases# The above modules can be used in a variety of ways. LangChain also provides guidance and assistance in this. Below are some of the common use cases LangChain supports. Agents: Agents are systems that use a language model to interact with other tools. These can be used to do more grounded question/answering, interact with APIs, or even take actions. Chatbots: Since language models are good at producing text, that makes them ideal for creating chatbots. Data Augmented Generation: Data Augmented Generation involves specific types of chains that first interact with an external datasource to fetch data to use in the generation step. Examples of this include summarization of long pieces of text and question/answering over specific data sources. Question Answering: Answering questions over specific documents, only utilizing the information in those documents to construct an answer. A type of Data Augmented Generation. Summarization: Summarizing longer documents into shorter, more condensed chunks of information. A type of Data Augmented Generation. Evaluation: Generative models are notoriously hard to evaluate with traditional metrics. One new way of evaluating them is using language models themselves to do the evaluation. LangChain provides some prompts/chains for assisting in this. Generate similar examples: Generating similar examples to a given input. This is a common use case for many applications, and LangChain provides some prompts/chains for assisting in this. Compare models: Experimenting with different prompts, models, and chains is a big part of developing the best possible application. The ModelLaboratory makes it easy to do so. Reference Docs# All of LangChain’s reference documentation, in one place. Full documentation on all methods, classes, installation methods, and integration setups for LangChain. Reference Documentation LangChain Ecosystem# Guides for how other companies/products can be used with LangChain LangChain Ecosystem Additional Resources# Additional collection of resources we think may be useful as you develop your application! LangChainHub: The LangChainHub is a place to share and explore other prompts, chains, and agents. Glossary: A glossary of all related terms, papers, methods, etc. Whether implemented in LangChain or not! Gallery: A collection of our favorite projects that use LangChain. Useful for finding inspiration or seeing how things were done in other applications. Deployments: A collection of instructions, code snippets, and template repositories for deploying LangChain apps. Discord: Join us on our Discord to discuss all things LangChain! Tracing: A guide on using tracing in LangChain to visualize the execution of chains and agents. Production Support: As you move your LangChains into production, we’d love to offer more comprehensive support. Please fill out this form and we’ll set up a dedicated support Slack channel. next Quickstart Guide Contents Getting Started Modules Use Cases Reference Docs LangChain Ecosystem Additional Resources By Harrison Chase © Copyright 2022, Harrison Chase. Last updated on Mar 15, 2023.\n" ] } ], "source": [ "from langchain.document_loaders import ReadTheDocsLoader\n", "\n", "loader = ReadTheDocsLoader('rtdocs')\n", "docs = loader.load()\n", "len(docs)" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "This leaves us with hundreds of processed doc pages. Let's take a look at the format each one contains:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "docs[0]" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "We access the plaintext page content like so:" ] }, { "cell_type": "code", "execution_count": 58, "metadata": { "id": "OcIkny_6xiZJ" }, "outputs": [], "source": [ "print(docs[0].page_content)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "print(docs[5].page_content)" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "We can also find the source of each document:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "docs[5].metadata['source'].replace('rtdocs/', 'https://')" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "We can use these to create our `data` list:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "data = []\n", "\n", "for doc in docs:\n", " data.append({\n", " 'url': doc.metadata['source'].replace('rtdocs/', 'https://'),\n", " 'text': doc.page_content\n", " })" ] }, { "cell_type": "code", "execution_count": 60, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "F0tUQRxtzqF0", "outputId": "a7a9b799-98cb-41a2-a696-fc9f00579773" }, "outputs": [ { "data": { "text/plain": [ "{'url': 'https://langchain.readthedocs.io/en/latest/modules/memory/types/entity_summary_memory.html',\n", " 'text': '.ipynb .pdf Entity Memory Contents Using in a chain Inspecting the memory store Entity Memory# This notebook shows how to work with a memory module that remembers things about specific entities. It extracts information on entities (using LLMs) and builds up its knowledge about that entity over time (also using LLMs). Let’s first walk through using this functionality. from langchain.llms import OpenAI from langchain.memory import ConversationEntityMemory llm = OpenAI(temperature=0) memory = ConversationEntityMemory(llm=llm) _input = {\"input\": \"Deven & Sam are working on a hackathon project\"} memory.load_memory_variables(_input) memory.save_context( _input, {\"ouput\": \" That sounds like a great project! What kind of project are they working on?\"} ) memory.load_memory_variables({\"input\": \\'who is Sam\\'}) {\\'history\\': \\'Human: Deven & Sam are working on a hackathon project\\\\nAI: That sounds like a great project! What kind of project are they working on?\\', \\'entities\\': {\\'Sam\\': \\'Sam is working on a hackathon project with Deven.\\'}} memory = ConversationEntityMemory(llm=llm, return_messages=True) _input = {\"input\": \"Deven & Sam are working on a hackathon project\"} memory.load_memory_variables(_input) memory.save_context( _input, {\"ouput\": \" That sounds like a great project! What kind of project are they working on?\"} ) memory.load_memory_variables({\"input\": \\'who is Sam\\'}) {\\'history\\': [HumanMessage(content=\\'Deven & Sam are working on a hackathon project\\', additional_kwargs={}), AIMessage(content=\\' That sounds like a great project! What kind of project are they working on?\\', additional_kwargs={})], \\'entities\\': {\\'Sam\\': \\'Sam is working on a hackathon project with Deven.\\'}} Using in a chain# Let’s now use it in a chain! from langchain.chains import ConversationChain from langchain.memory import ConversationEntityMemory from langchain.memory.prompt import ENTITY_MEMORY_CONVERSATION_TEMPLATE from pydantic import BaseModel from typing import List, Dict, Any conversation = ConversationChain( llm=llm, verbose=True, prompt=ENTITY_MEMORY_CONVERSATION_TEMPLATE, memory=ConversationEntityMemory(llm=llm) ) conversation.predict(input=\"Deven & Sam are working on a hackathon project\") > Entering new ConversationChain chain... Prompt after formatting: You are an assistant to a human, powered by a large language model trained by OpenAI. You are designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, you are able to generate human-like text based on the input you receive, allowing you to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. You are constantly learning and improving, and your capabilities are constantly evolving. You are able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. You have access to some personalized information provided by the human in the Context section below. Additionally, you are able to generate your own text based on the input you receive, allowing you to engage in discussions and provide explanations and descriptions on a wide range of topics. Overall, you are a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether the human needs help with a specific question or just wants to have a conversation about a particular topic, you are here to assist. Context: {\\'Deven\\': \\'\\', \\'Sam\\': \\'\\'} Current conversation: Last line: Human: Deven & Sam are working on a hackathon project You: > Finished chain. \\' That sounds like a great project! What kind of project are they working on?\\' conversation.memory.store {\\'Deven\\': \\'Deven is working on a hackathon project with Sam.\\', \\'Sam\\': \\'Sam is working on a hackathon project with Deven.\\'} conversation.predict(input=\"They are trying to add more complex memory structures to Langchain\") > Entering new ConversationChain chain... Prompt after formatting: You are an assistant to a human, powered by a large language model trained by OpenAI. You are designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, you are able to generate human-like text based on the input you receive, allowing you to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. You are constantly learning and improving, and your capabilities are constantly evolving. You are able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. You have access to some personalized information provided by the human in the Context section below. Additionally, you are able to generate your own text based on the input you receive, allowing you to engage in discussions and provide explanations and descriptions on a wide range of topics. Overall, you are a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether the human needs help with a specific question or just wants to have a conversation about a particular topic, you are here to assist. Context: {\\'Deven\\': \\'Deven is working on a hackathon project with Sam.\\', \\'Sam\\': \\'Sam is working on a hackathon project with Deven.\\', \\'Langchain\\': \\'\\'} Current conversation: Human: Deven & Sam are working on a hackathon project AI: That sounds like a great project! What kind of project are they working on? Last line: Human: They are trying to add more complex memory structures to Langchain You: > Finished chain. \\' That sounds like an interesting project! What kind of memory structures are they trying to add?\\' conversation.predict(input=\"They are adding in a key-value store for entities mentioned so far in the conversation.\") > Entering new ConversationChain chain... Prompt after formatting: You are an assistant to a human, powered by a large language model trained by OpenAI. You are designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, you are able to generate human-like text based on the input you receive, allowing you to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. You are constantly learning and improving, and your capabilities are constantly evolving. You are able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. You have access to some personalized information provided by the human in the Context section below. Additionally, you are able to generate your own text based on the input you receive, allowing you to engage in discussions and provide explanations and descriptions on a wide range of topics. Overall, you are a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether the human needs help with a specific question or just wants to have a conversation about a particular topic, you are here to assist. Context: {\\'Deven\\': \\'Deven is working on a hackathon project with Sam, attempting to add more complex memory structures to Langchain.\\', \\'Sam\\': \\'Sam is working on a hackathon project with Deven, trying to add more complex memory structures to Langchain.\\', \\'Langchain\\': \\'Langchain is a project that is trying to add more complex memory structures.\\', \\'Key-Value Store\\': \\'\\'} Current conversation: Human: Deven & Sam are working on a hackathon project AI: That sounds like a great project! What kind of project are they working on? Human: They are trying to add more complex memory structures to Langchain AI: That sounds like an interesting project! What kind of memory structures are they trying to add? Last line: Human: They are adding in a key-value store for entities mentioned so far in the conversation. You: > Finished chain. \\' That sounds like a great idea! How will the key-value store work?\\' conversation.predict(input=\"What do you know about Deven & Sam?\") > Entering new ConversationChain chain... Prompt after formatting: You are an assistant to a human, powered by a large language model trained by OpenAI. You are designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, you are able to generate human-like text based on the input you receive, allowing you to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. You are constantly learning and improving, and your capabilities are constantly evolving. You are able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. You have access to some personalized information provided by the human in the Context section below. Additionally, you are able to generate your own text based on the input you receive, allowing you to engage in discussions and provide explanations and descriptions on a wide range of topics. Overall, you are a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether the human needs help with a specific question or just wants to have a conversation about a particular topic, you are here to assist. Context: {\\'Deven\\': \\'Deven is working on a hackathon project with Sam, attempting to add more complex memory structures to Langchain, including a key-value store for entities mentioned so far in the conversation.\\', \\'Sam\\': \\'Sam is working on a hackathon project with Deven, trying to add more complex memory structures to Langchain, including a key-value store for entities mentioned so far in the conversation.\\'} Current conversation: Human: Deven & Sam are working on a hackathon project AI: That sounds like a great project! What kind of project are they working on? Human: They are trying to add more complex memory structures to Langchain AI: That sounds like an interesting project! What kind of memory structures are they trying to add? Human: They are adding in a key-value store for entities mentioned so far in the conversation. AI: That sounds like a great idea! How will the key-value store work? Last line: Human: What do you know about Deven & Sam? You: > Finished chain. \\' Deven and Sam are working on a hackathon project together, attempting to add more complex memory structures to Langchain, including a key-value store for entities mentioned so far in the conversation.\\' Inspecting the memory store# We can also inspect the memory store directly. In the following examaples, we look at it directly, and then go through some examples of adding information and watch how it changes. from pprint import pprint pprint(conversation.memory.store) {\\'Deven\\': \\'Deven is working on a hackathon project with Sam, attempting to add \\' \\'more complex memory structures to Langchain, including a key-value \\' \\'store for entities mentioned so far in the conversation.\\', \\'Key-Value Store\\': \\'A key-value store that stores entities mentioned in the \\' \\'conversation.\\', \\'Langchain\\': \\'Langchain is a project that is trying to add more complex \\' \\'memory structures, including a key-value store for entities \\' \\'mentioned so far in the conversation.\\', \\'Sam\\': \\'Sam is working on a hackathon project with Deven, attempting to add \\' \\'more complex memory structures to Langchain, including a key-value \\' \\'store for entities mentioned so far in the conversation.\\'} conversation.predict(input=\"Sam is the founder of a company called Daimon.\") > Entering new ConversationChain chain... Prompt after formatting: You are an assistant to a human, powered by a large language model trained by OpenAI. You are designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, you are able to generate human-like text based on the input you receive, allowing you to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. You are constantly learning and improving, and your capabilities are constantly evolving. You are able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. You have access to some personalized information provided by the human in the Context section below. Additionally, you are able to generate your own text based on the input you receive, allowing you to engage in discussions and provide explanations and descriptions on a wide range of topics. Overall, you are a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether the human needs help with a specific question or just wants to have a conversation about a particular topic, you are here to assist. Context: {\\'Daimon\\': \\'\\', \\'Sam\\': \\'Sam is working on a hackathon project with Deven to add more complex memory structures to Langchain, including a key-value store for entities mentioned so far in the conversation.\\'} Current conversation: Human: They are trying to add more complex memory structures to Langchain AI: That sounds like an interesting project! What kind of memory structures are they trying to add? Human: They are adding in a key-value store for entities mentioned so far in the conversation. AI: That sounds like a great idea! How will the key-value store work? Human: What do you know about Deven & Sam? AI: Deven and Sam are working on a hackathon project to add more complex memory structures to Langchain, including a key-value store for entities mentioned so far in the conversation. They seem to be very motivated and passionate about their project, and are working hard to make it a success. Last line: Human: Sam is the founder of a company called Daimon. You: > Finished chain. \"\\\\nThat\\'s impressive! It sounds like Sam is a very successful entrepreneur. What kind of company is Daimon?\" from pprint import pprint pprint(conversation.memory.store) {\\'Daimon\\': \\'Daimon is a company founded by Sam.\\', \\'Deven\\': \\'Deven is working on a hackathon project with Sam to add more \\' \\'complex memory structures to Langchain, including a key-value store \\' \\'for entities mentioned so far in the conversation.\\', \\'Key-Value Store\\': \\'Key-Value Store: A data structure that stores values \\' \\'associated with a unique key, allowing for efficient \\' \\'retrieval of values. Deven and Sam are adding a key-value \\' \\'store for entities mentioned so far in the conversation.\\', \\'Langchain\\': \\'Langchain is a project that seeks to add more complex memory \\' \\'structures, including a key-value store for entities mentioned \\' \\'so far in the conversation.\\', \\'Sam\\': \\'Sam is working on a hackathon project with Deven to add more complex \\' \\'memory structures to Langchain, including a key-value store for \\' \\'entities mentioned so far in the conversation. He is also the founder \\' \\'of a company called Daimon.\\'} conversation.predict(input=\"What do you know about Sam?\") > Entering new ConversationChain chain... Prompt after formatting: You are an assistant to a human, powered by a large language model trained by OpenAI. You are designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, you are able to generate human-like text based on the input you receive, allowing you to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. You are constantly learning and improving, and your capabilities are constantly evolving. You are able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. You have access to some personalized information provided by the human in the Context section below. Additionally, you are able to generate your own text based on the input you receive, allowing you to engage in discussions and provide explanations and descriptions on a wide range of topics. Overall, you are a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether the human needs help with a specific question or just wants to have a conversation about a particular topic, you are here to assist. Context: {\\'Sam\\': \\'Sam is working on a hackathon project with Deven to add more complex memory structures to Langchain, including a key-value store for entities mentioned so far in the conversation. He is also the founder of a company called Daimon.\\', \\'Daimon\\': \\'Daimon is a company founded by Sam.\\'} Current conversation: Human: They are adding in a key-value store for entities mentioned so far in the conversation. AI: That sounds like a great idea! How will the key-value store work? Human: What do you know about Deven & Sam? AI: Deven and Sam are working on a hackathon project to add more complex memory structures to Langchain, including a key-value store for entities mentioned so far in the conversation. They seem to be very motivated and passionate about their project, and are working hard to make it a success. Human: Sam is the founder of a company called Daimon. AI: That\\'s impressive! It sounds like Sam is a very successful entrepreneur. What kind of company is Daimon? Last line: Human: What do you know about Sam? You: > Finished chain. \\' Sam is the founder of a company called Daimon. He is also working on a hackathon project with Deven to add more complex memory structures to Langchain, including a key-value store for entities mentioned so far in the conversation. He seems to be very motivated and passionate about his project, and is working hard to make it a success.\\' previous ConversationBufferWindowMemory next Conversation Knowledge Graph Memory Contents Using in a chain Inspecting the memory store By Harrison Chase © Copyright 2022, Harrison Chase. Last updated on Mar 15, 2023.'}" ] }, "execution_count": 60, "metadata": {}, "output_type": "execute_result" } ], "source": [ "data[3]" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": { "id": "ouY4rcx7z2oa" }, "source": [ "It's pretty ugly but it's good enough for now. Let's see how we can process all of these. We will chunk everything into ~400 token chunks, we can do this easily with `langchain` and `tiktoken`:" ] }, { "cell_type": "code", "execution_count": 61, "metadata": { "id": "Rb7KxUqYzsuV" }, "outputs": [], "source": [ "import tiktoken\n", "\n", "tokenizer = tiktoken.get_encoding('p50k_base')\n", "\n", "# create the length function\n", "def tiktoken_len(text):\n", " tokens = tokenizer.encode(\n", " text,\n", " disallowed_special=()\n", " )\n", " return len(tokens)" ] }, { "cell_type": "code", "execution_count": 62, "metadata": { "id": "OKO8e3Dp0dQS" }, "outputs": [], "source": [ "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", "\n", "text_splitter = RecursiveCharacterTextSplitter(\n", " chunk_size=400,\n", " chunk_overlap=20,\n", " length_function=tiktoken_len,\n", " separators=[\"\\n\\n\", \"\\n\", \" \", \"\"]\n", ")" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": { "id": "bLdvW8eq06Zd" }, "source": [ "Process the `data` into more chunks using this approach." ] }, { "cell_type": "code", "execution_count": 63, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": [ "fe4aa5160ef74ecd820f9c6f7de035a0", "8f0edead487948358efe478a01316209", "bcd3e274345a44dc950890c1fa1026a7", "153f898146264d50b77b5ef23db92408", "30d7236e54844058b7c76404e1f2ccb8", "348fe1bd02ed4dca8df422031d1184f6", "157f79e1ecf0423393cb15dcd2e66996", "a43a7db5cab149b3a8aeef23d3fb936f", "865d0f1e70cc4889aaf91cf1ad82b909", "1bed5d4ebf054e80b4d63d6f8a2593d8", "04fd6e9cebaa4c9287d16cb8c861c8a3" ] }, "id": "uOdPyiAQ0uWs", "outputId": "b1d00544-a432-4a41-a68e-59a0f9825070" }, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "fe4aa5160ef74ecd820f9c6f7de035a0", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/231 [00:00" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "from IPython.display import Markdown\n", "\n", "display(Markdown(res['choices'][0]['message']['content']))" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": { "id": "eJ-a8MHg0eYQ" }, "source": [ "Let's compare this to a non-augmented query..." ] }, { "cell_type": "code", "execution_count": 90, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 46 }, "id": "vwhaSgdF0ZDX", "outputId": "7a139208-bc40-4784-fae8-a10caf09800e" }, "outputs": [ { "data": { "text/markdown": [ "I don't know." ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "res = openai.ChatCompletion.create(\n", " model=\"gpt-4\",\n", " messages=[\n", " {\"role\": \"system\", \"content\": primer},\n", " {\"role\": \"user\", \"content\": query}\n", " ]\n", ")\n", "display(Markdown(res['choices'][0]['message']['content']))" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": { "id": "5CSsA-dW0m_P" }, "source": [ "If we drop the `\"I don't know\"` part of the `primer`?" ] }, { "cell_type": "code", "execution_count": 91, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 163 }, "id": "Z3svdTCZ0iJ2", "outputId": "e6a8c0bf-e575-454c-bd20-e3b1db3e47b0" }, "outputs": [ { "data": { "text/markdown": [ "LangChain hasn't provided any public documentation on LLMChain, nor is there a known technology called LLMChain in their library. To better assist you, please provide more information or context about LLMChain and LangChain.\n", "\n", "Meanwhile, if you are referring to LangChain, a blockchain-based decentralized AI language model, you can start by visiting their official website (if they have one), exploring their available resources, such as documentation and tutorials, and following any instructions on setting up their technology.\n", "\n", "If you are looking for help with a specific language chain or model in natural language processing, consider rephrasing your question to provide more accurate information or visit relevant resources like GPT-3 or other NLP-related documentation." ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "res = openai.ChatCompletion.create(\n", " model=\"gpt-4\",\n", " messages=[\n", " {\"role\": \"system\", \"content\": \"You are Q&A bot. A highly intelligent system that answers user questions\"},\n", " {\"role\": \"user\", \"content\": query}\n", " ]\n", ")\n", "display(Markdown(res['choices'][0]['message']['content']))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "kqDEXo3c0w1K" }, "outputs": [], "source": [] } ], "metadata": { "colab": { "provenance": [] }, "kernelspec": { "display_name": "Python 3", "name": "python3" }, "language_info": { "name": "python" }, "widgets": { "application/vnd.jupyter.widget-state+json": { "04fd6e9cebaa4c9287d16cb8c861c8a3": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "144e2e3c8a014c549e0f552a64a670ef": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "153f898146264d50b77b5ef23db92408": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_1bed5d4ebf054e80b4d63d6f8a2593d8", "placeholder": "​", "style": "IPY_MODEL_04fd6e9cebaa4c9287d16cb8c861c8a3", "value": " 231/231 [00:01<00:00, 193.80it/s]" } }, "157f79e1ecf0423393cb15dcd2e66996": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "1b93eb9d358041ab99fe87045f7f0660": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "1bed5d4ebf054e80b4d63d6f8a2593d8": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "30d7236e54844058b7c76404e1f2ccb8": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "348fe1bd02ed4dca8df422031d1184f6": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "70eaf7d1a5b24e49a32490fd3a75ea15": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_a9552f4dca1642e2924ee152067f1f3d", "placeholder": "​", "style": "IPY_MODEL_c82f8fbcef0648489f1dcbb4af5ea8c4", "value": " 12/12 [00:18<00:00, 3.12s/it]" } }, "7d78613ce91b4427a4afacb699ef031e": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "865d0f1e70cc4889aaf91cf1ad82b909": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "8f0edead487948358efe478a01316209": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_348fe1bd02ed4dca8df422031d1184f6", "placeholder": "​", "style": "IPY_MODEL_157f79e1ecf0423393cb15dcd2e66996", "value": "100%" } }, "90aa35cbdf0c45a1bb7b9075c48a6f7d": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_1b93eb9d358041ab99fe87045f7f0660", "max": 12, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_af4f336bfcb446afb9e6a513d49d791f", "value": 12 } }, "a43a7db5cab149b3a8aeef23d3fb936f": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "a9552f4dca1642e2924ee152067f1f3d": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "af4f336bfcb446afb9e6a513d49d791f": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "b1489d5d6c1f498fadaea8aeb16ab60f": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_e5b49411f2134a9b9649528314f746d6", "placeholder": "​", "style": "IPY_MODEL_7d78613ce91b4427a4afacb699ef031e", "value": "100%" } }, "b6b5865b02504e10a020ad5f42241df6": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_b1489d5d6c1f498fadaea8aeb16ab60f", "IPY_MODEL_90aa35cbdf0c45a1bb7b9075c48a6f7d", "IPY_MODEL_70eaf7d1a5b24e49a32490fd3a75ea15" ], "layout": "IPY_MODEL_144e2e3c8a014c549e0f552a64a670ef" } }, "bcd3e274345a44dc950890c1fa1026a7": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_tooltip": null, "layout": "IPY_MODEL_a43a7db5cab149b3a8aeef23d3fb936f", "max": 231, "min": 0, "orientation": "horizontal", "style": "IPY_MODEL_865d0f1e70cc4889aaf91cf1ad82b909", "value": 231 } }, "c82f8fbcef0648489f1dcbb4af5ea8c4": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "DescriptionStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "StyleView", "description_width": "" } }, "e5b49411f2134a9b9649528314f746d6": { "model_module": "@jupyter-widgets/base", "model_module_version": "1.2.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "overflow_x": null, "overflow_y": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "fe4aa5160ef74ecd820f9c6f7de035a0": { "model_module": "@jupyter-widgets/controls", "model_module_version": "1.5.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_8f0edead487948358efe478a01316209", "IPY_MODEL_bcd3e274345a44dc950890c1fa1026a7", "IPY_MODEL_153f898146264d50b77b5ef23db92408" ], "layout": "IPY_MODEL_30d7236e54844058b7c76404e1f2ccb8" } } } } }, "nbformat": 4, "nbformat_minor": 0 }