langchain/cookbook/agent_fireworks_ai_langchain_mongodb.ipynb

1594 lines
84 KiB
Plaintext
Raw Normal View History

{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mongodb-developer/GenAI-Showcase/blob/main/notebooks/agents/agent_fireworks_ai_langchain_mongodb.ipynb)\n",
"\n",
"[![View Article](https://img.shields.io/badge/View%20Article-blue)](https://www.mongodb.com/developer/products/atlas/agent-fireworksai-mongodb-langchain/)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "3kMALXaMv-MS"
},
"source": [
"## Install Libraries"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "cxTXczeTghzU",
"outputId": "ae3a81b2-cba6-42fc-f593-8646bff77b14"
},
"outputs": [],
"source": [
"!pip install langchain langchain_openai langchain-fireworks langchain-mongodb arxiv pymupdf datasets pymongo"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "RM8rg08YhqZe"
},
"source": [
"## Set Evironment Variables"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"id": "oXLWCWEghuOX"
},
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"\"\n",
"os.environ[\"FIREWORKS_API_KEY\"] = \"\"\n",
"os.environ[\"MONGO_URI\"] = \"\"\n",
"\n",
"FIREWORKS_API_KEY = os.environ.get(\"FIREWORKS_API_KEY\")\n",
"OPENAI_API_KEY = os.environ.get(\"OPENAI_API_KEY\")\n",
"MONGO_URI = os.environ.get(\"MONGO_URI\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "UUf3jtFzO4-V"
},
"source": [
"## Data Ingestion into MongoDB Vector Database\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"referenced_widgets": [
"cebfba144ba6418092df949783f93455",
"09dcf4ce88064f11980bbefaad1ebc75",
"f2bd7bda4d0c4d93b88e53aeb4e1b62d",
"278513c5a8b04a24b1823d38107f1e50",
"d3941c633788427abb858b21e285088f",
"39563df9477648398456675ec51075aa",
"f4353368efbd4c3891f805ddc3d05e1b",
"30fe0bcd02cb47f3ba23bb480e2eaaea",
"d17d8c8f45ee44cd87dcd787c05dbdc3",
"62e196b6d30746578e137c50b661f946",
"ced7f9d61e06442a960dcda95852048e",
"7dbfebff68ff45628da832fac5233c93",
"164d16df28d24ab796b7c9cf85174800",
"e70e0d317f1e4e73bd95349ed1510cce",
"41056c822b9d44559147d2b21416b956",
"b1929fb112174c0abcd8004f6be0f880",
"95e4af5b420242b7a6b74a18cad98961",
"dff65b579f0746ffae8739ecb0aa5a41",
"f73ae771c24645c79fd41409a8fc7b34",
"20d693a09c534414a5c4c0dd58cf94ed",
"a43c349d171e469c8cc94d48060f775b",
"373ed3b6307741859ab297c270cf42c8"
]
},
"id": "pq4SA6r7O30i",
"outputId": "904f4112-79fb-45cc-954b-d2b818cb2748"
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/richmondalake/miniconda3/envs/langchain_workarea/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n",
"Downloading readme: 100%|██████████| 701/701 [00:00<00:00, 2.04MB/s]\n",
"Repo card metadata block was not found. Setting CardData to empty.\n",
"Downloading data: 100%|██████████| 102M/102M [00:15<00:00, 6.41MB/s] \n",
"Generating train split: 50000 examples [00:01, 38699.64 examples/s]\n"
]
}
],
"source": [
"import pandas as pd\n",
"from datasets import load_dataset\n",
"\n",
"data = load_dataset(\"MongoDB/subset_arxiv_papers_with_emebeddings\")\n",
"dataset_df = pd.DataFrame(data[\"train\"])"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "jsuj3jOgFimi",
"outputId": "5e92750a-4053-46d8-c3b3-9bba5b1180ba"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"50000\n"
]
},
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>id</th>\n",
" <th>submitter</th>\n",
" <th>authors</th>\n",
" <th>title</th>\n",
" <th>comments</th>\n",
" <th>journal-ref</th>\n",
" <th>doi</th>\n",
" <th>report-no</th>\n",
" <th>categories</th>\n",
" <th>license</th>\n",
" <th>abstract</th>\n",
" <th>versions</th>\n",
" <th>update_date</th>\n",
" <th>authors_parsed</th>\n",
" <th>embedding</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>704.0001</td>\n",
" <td>Pavel Nadolsky</td>\n",
" <td>C. Bal\\'azs, E. L. Berger, P. M. Nadolsky, C.-...</td>\n",
" <td>Calculation of prompt diphoton production cros...</td>\n",
" <td>37 pages, 15 figures; published version</td>\n",
" <td>Phys.Rev.D76:013009,2007</td>\n",
" <td>10.1103/PhysRevD.76.013009</td>\n",
" <td>ANL-HEP-PR-07-12</td>\n",
" <td>hep-ph</td>\n",
" <td>None</td>\n",
" <td>A fully differential calculation in perturba...</td>\n",
" <td>[{'version': 'v1', 'created': 'Mon, 2 Apr 2007...</td>\n",
" <td>2008-11-26</td>\n",
" <td>[[Balázs, C., ], [Berger, E. L., ], [Nadolsky,...</td>\n",
" <td>[0.0594153292, -0.0440569334, -0.0487333685, -...</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>704.0002</td>\n",
" <td>Louis Theran</td>\n",
" <td>Ileana Streinu and Louis Theran</td>\n",
" <td>Sparsity-certifying Graph Decompositions</td>\n",
" <td>To appear in Graphs and Combinatorics</td>\n",
" <td>None</td>\n",
" <td>None</td>\n",
" <td>None</td>\n",
" <td>math.CO cs.CG</td>\n",
" <td>http://arxiv.org/licenses/nonexclusive-distrib...</td>\n",
" <td>We describe a new algorithm, the $(k,\\ell)$-...</td>\n",
" <td>[{'version': 'v1', 'created': 'Sat, 31 Mar 200...</td>\n",
" <td>2008-12-13</td>\n",
" <td>[[Streinu, Ileana, ], [Theran, Louis, ]]</td>\n",
" <td>[0.0247399714, -0.065658465, 0.0201423876, -0....</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>704.0003</td>\n",
" <td>Hongjun Pan</td>\n",
" <td>Hongjun Pan</td>\n",
" <td>The evolution of the Earth-Moon system based o...</td>\n",
" <td>23 pages, 3 figures</td>\n",
" <td>None</td>\n",
" <td>None</td>\n",
" <td>None</td>\n",
" <td>physics.gen-ph</td>\n",
" <td>None</td>\n",
" <td>The evolution of Earth-Moon system is descri...</td>\n",
" <td>[{'version': 'v1', 'created': 'Sun, 1 Apr 2007...</td>\n",
" <td>2008-01-13</td>\n",
" <td>[[Pan, Hongjun, ]]</td>\n",
" <td>[0.0491479263, 0.0728017688, 0.0604138002, 0.0...</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>704.0004</td>\n",
" <td>David Callan</td>\n",
" <td>David Callan</td>\n",
" <td>A determinant of Stirling cycle numbers counts...</td>\n",
" <td>11 pages</td>\n",
" <td>None</td>\n",
" <td>None</td>\n",
" <td>None</td>\n",
" <td>math.CO</td>\n",
" <td>None</td>\n",
" <td>We show that a determinant of Stirling cycle...</td>\n",
" <td>[{'version': 'v1', 'created': 'Sat, 31 Mar 200...</td>\n",
" <td>2007-05-23</td>\n",
" <td>[[Callan, David, ]]</td>\n",
" <td>[0.0389556214, -0.0410280302, 0.0410280302, -0...</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>704.0005</td>\n",
" <td>Alberto Torchinsky</td>\n",
" <td>Wael Abu-Shammala and Alberto Torchinsky</td>\n",
" <td>From dyadic $\\Lambda_{\\alpha}$ to $\\Lambda_{\\a...</td>\n",
" <td>None</td>\n",
" <td>Illinois J. Math. 52 (2008) no.2, 681-689</td>\n",
" <td>None</td>\n",
" <td>None</td>\n",
" <td>math.CA math.FA</td>\n",
" <td>None</td>\n",
" <td>In this paper we show how to compute the $\\L...</td>\n",
" <td>[{'version': 'v1', 'created': 'Mon, 2 Apr 2007...</td>\n",
" <td>2013-10-15</td>\n",
" <td>[[Abu-Shammala, Wael, ], [Torchinsky, Alberto, ]]</td>\n",
" <td>[0.118412666, -0.0127423415, 0.1185125113, 0.0...</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" id submitter \\\n",
"0 704.0001 Pavel Nadolsky \n",
"1 704.0002 Louis Theran \n",
"2 704.0003 Hongjun Pan \n",
"3 704.0004 David Callan \n",
"4 704.0005 Alberto Torchinsky \n",
"\n",
" authors \\\n",
"0 C. Bal\\'azs, E. L. Berger, P. M. Nadolsky, C.-... \n",
"1 Ileana Streinu and Louis Theran \n",
"2 Hongjun Pan \n",
"3 David Callan \n",
"4 Wael Abu-Shammala and Alberto Torchinsky \n",
"\n",
" title \\\n",
"0 Calculation of prompt diphoton production cros... \n",
"1 Sparsity-certifying Graph Decompositions \n",
"2 The evolution of the Earth-Moon system based o... \n",
"3 A determinant of Stirling cycle numbers counts... \n",
"4 From dyadic $\\Lambda_{\\alpha}$ to $\\Lambda_{\\a... \n",
"\n",
" comments \\\n",
"0 37 pages, 15 figures; published version \n",
"1 To appear in Graphs and Combinatorics \n",
"2 23 pages, 3 figures \n",
"3 11 pages \n",
"4 None \n",
"\n",
" journal-ref doi \\\n",
"0 Phys.Rev.D76:013009,2007 10.1103/PhysRevD.76.013009 \n",
"1 None None \n",
"2 None None \n",
"3 None None \n",
"4 Illinois J. Math. 52 (2008) no.2, 681-689 None \n",
"\n",
" report-no categories \\\n",
"0 ANL-HEP-PR-07-12 hep-ph \n",
"1 None math.CO cs.CG \n",
"2 None physics.gen-ph \n",
"3 None math.CO \n",
"4 None math.CA math.FA \n",
"\n",
" license \\\n",
"0 None \n",
"1 http://arxiv.org/licenses/nonexclusive-distrib... \n",
"2 None \n",
"3 None \n",
"4 None \n",
"\n",
" abstract \\\n",
"0 A fully differential calculation in perturba... \n",
"1 We describe a new algorithm, the $(k,\\ell)$-... \n",
"2 The evolution of Earth-Moon system is descri... \n",
"3 We show that a determinant of Stirling cycle... \n",
"4 In this paper we show how to compute the $\\L... \n",
"\n",
" versions update_date \\\n",
"0 [{'version': 'v1', 'created': 'Mon, 2 Apr 2007... 2008-11-26 \n",
"1 [{'version': 'v1', 'created': 'Sat, 31 Mar 200... 2008-12-13 \n",
"2 [{'version': 'v1', 'created': 'Sun, 1 Apr 2007... 2008-01-13 \n",
"3 [{'version': 'v1', 'created': 'Sat, 31 Mar 200... 2007-05-23 \n",
"4 [{'version': 'v1', 'created': 'Mon, 2 Apr 2007... 2013-10-15 \n",
"\n",
" authors_parsed \\\n",
"0 [[Balázs, C., ], [Berger, E. L., ], [Nadolsky,... \n",
"1 [[Streinu, Ileana, ], [Theran, Louis, ]] \n",
"2 [[Pan, Hongjun, ]] \n",
"3 [[Callan, David, ]] \n",
"4 [[Abu-Shammala, Wael, ], [Torchinsky, Alberto, ]] \n",
"\n",
" embedding \n",
"0 [0.0594153292, -0.0440569334, -0.0487333685, -... \n",
"1 [0.0247399714, -0.065658465, 0.0201423876, -0.... \n",
"2 [0.0491479263, 0.0728017688, 0.0604138002, 0.0... \n",
"3 [0.0389556214, -0.0410280302, 0.0410280302, -0... \n",
"4 [0.118412666, -0.0127423415, 0.1185125113, 0.0... "
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"print(len(dataset_df))\n",
"dataset_df.head()"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"id": "o2gHwRjMfJlO"
},
"outputs": [],
"source": [
"from pymongo import MongoClient\n",
"\n",
"# Initialize MongoDB python client\n",
"client = MongoClient(MONGO_URI, appname=\"devrel.content.ai_agent_firechain.python\")\n",
"\n",
"DB_NAME = \"agent_demo\"\n",
"COLLECTION_NAME = \"knowledge\"\n",
"ATLAS_VECTOR_SEARCH_INDEX_NAME = \"vector_index\"\n",
"collection = client[DB_NAME][COLLECTION_NAME]"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "zJkyy9UbffZT",
"outputId": "c6f78ea3-fc93-4d57-95eb-98cea5bf15d3"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Data ingestion into MongoDB completed\n"
]
}
],
"source": [
"# Delete any existing records in the collection\n",
"collection.delete_many({})\n",
"\n",
"# Data Ingestion\n",
"records = dataset_df.to_dict(\"records\")\n",
"collection.insert_many(records)\n",
"\n",
"print(\"Data ingestion into MongoDB completed\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "6S1Cz9dtGPwL"
},
"source": [
"## Create Vector Search Index Defintion\n",
"\n",
"```\n",
"{\n",
" \"fields\": [\n",
" {\n",
" \"type\": \"vector\",\n",
" \"path\": \"embedding\",\n",
" \"numDimensions\": 256,\n",
" \"similarity\": \"cosine\"\n",
" }\n",
" ]\n",
"}\n",
"```"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "1a-0n9PpfqDj"
},
"source": [
"## Create LangChain Retriever (MongoDB)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"id": "HAxeTPimfxM-"
},
"outputs": [],
"source": [
"from langchain_mongodb import MongoDBAtlasVectorSearch\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"embedding_model = OpenAIEmbeddings(model=\"text-embedding-3-small\", dimensions=256)\n",
"\n",
"# Vector Store Creation\n",
"vector_store = MongoDBAtlasVectorSearch.from_connection_string(\n",
" connection_string=MONGO_URI,\n",
" namespace=DB_NAME + \".\" + COLLECTION_NAME,\n",
" embedding=embedding_model,\n",
" index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME,\n",
" text_key=\"abstract\",\n",
")\n",
"\n",
"retriever = vector_store.as_retriever(search_type=\"similarity\", search_kwargs={\"k\": 5})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Optional: Creating a retrevier with compression capabilities using LLMLingua\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install langchain_community llmlingua"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"from langchain.retrievers import ContextualCompressionRetriever\n",
"from langchain_community.document_compressors import LLMLinguaCompressor"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/richmondalake/miniconda3/envs/langchain_workarea/lib/python3.12/site-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n",
" warnings.warn(\n"
]
}
],
"source": [
"compressor = LLMLinguaCompressor(model_name=\"openai-community/gpt2\", device_map=\"cpu\")\n",
"compression_retriever = ContextualCompressionRetriever(\n",
" base_compressor=compressor, base_retriever=retriever\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Sm5QZdshwJLN"
},
"source": [
"## Configure LLM Using Fireworks AI"
]
},
{
"cell_type": "code",
"execution_count": 61,
"metadata": {
"id": "V4ztCMCtgme_"
},
"outputs": [],
"source": [
"from langchain_fireworks import ChatFireworks\n",
"\n",
"llm = ChatFireworks(model=\"accounts/fireworks/models/firefunction-v1\", max_tokens=256)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "pZfheX5FiIhU"
},
"source": [
"## Agent Tools Creation"
]
},
{
"cell_type": "code",
"execution_count": 51,
"metadata": {
"id": "3eufR9H8gopU"
},
"outputs": [],
"source": [
"from langchain.agents import tool\n",
"from langchain.tools.retriever import create_retriever_tool\n",
"from langchain_community.document_loaders import ArxivLoader\n",
"\n",
"\n",
"# Custom Tool Definiton\n",
"@tool\n",
"def get_metadata_information_from_arxiv(word: str) -> list:\n",
" \"\"\"\n",
" Fetches and returns metadata for a maximum of ten documents from arXiv matching the given query word.\n",
"\n",
" Args:\n",
" word (str): The search query to find relevant documents on arXiv.\n",
"\n",
" Returns:\n",
" list: Metadata about the documents matching the query.\n",
" \"\"\"\n",
" docs = ArxivLoader(query=word, load_max_docs=10).load()\n",
" # Extract just the metadata from each document\n",
" metadata_list = [doc.metadata for doc in docs]\n",
" return metadata_list\n",
"\n",
"\n",
"@tool\n",
"def get_information_from_arxiv(word: str) -> list:\n",
" \"\"\"\n",
" Fetches and returns metadata for a single research paper from arXiv matching the given query word, which is the ID of the paper, for example: 704.0001.\n",
"\n",
" Args:\n",
" word (str): The search query to find the relevant paper on arXiv using the ID.\n",
"\n",
" Returns:\n",
" list: Data about the paper matching the query.\n",
" \"\"\"\n",
" doc = ArxivLoader(query=word, load_max_docs=1).load()\n",
" return doc\n",
"\n",
"\n",
"# If you created a retriever with compression capaitilies in the optional cell in an earlier cell, you can replace 'retriever' with 'compression_retriever'\n",
"# Otherwise you can also create a compression procedure as a tool for the agent as shown in the `compress_prompt_using_llmlingua` tool definition function\n",
"retriever_tool = create_retriever_tool(\n",
" retriever=retriever,\n",
" name=\"knowledge_base\",\n",
" description=\"This serves as the base knowledge source of the agent and contains some records of research papers from Arxiv. This tool is used as the first step for exploration and reseach efforts.\",\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 52,
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.document_compressors import LLMLinguaCompressor\n",
"\n",
"compressor = LLMLinguaCompressor(model_name=\"openai-community/gpt2\", device_map=\"cpu\")\n",
"\n",
"\n",
"@tool\n",
"def compress_prompt_using_llmlingua(prompt: str, compression_rate: float = 0.5) -> str:\n",
" \"\"\"\n",
" Compresses a long data or prompt using the LLMLinguaCompressor.\n",
"\n",
" Args:\n",
" data (str): The data or prompt to be compressed.\n",
" compression_rate (float): The rate at which to compress the data (default is 0.5).\n",
"\n",
" Returns:\n",
" str: The compressed data or prompt.\n",
" \"\"\"\n",
" compressed_data = compressor.compress_prompt(\n",
" prompt,\n",
" rate=compression_rate,\n",
" force_tokens=[\"!\", \".\", \"?\", \"\\n\"],\n",
" drop_consecutive=True,\n",
" )\n",
" return compressed_data"
]
},
{
"cell_type": "code",
"execution_count": 53,
"metadata": {
"id": "AS8QmaKVjhbR"
},
"outputs": [],
"source": [
"tools = [\n",
" retriever_tool,\n",
" get_metadata_information_from_arxiv,\n",
" get_information_from_arxiv,\n",
" compress_prompt_using_llmlingua,\n",
"]"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "ueEn73nlliNr"
},
"source": [
"## Agent Prompt Creation"
]
},
{
"cell_type": "code",
"execution_count": 89,
"metadata": {
"id": "RY13DrVXFDrm"
},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
"\n",
"agent_purpose = \"\"\"\n",
"You are a helpful research assistant equipped with various tools to assist with your tasks efficiently. \n",
"You have access to conversational history stored in your inpout as chat_history.\n",
"You are cost-effective and utilize the compress_prompt_using_llmlingua tool whenever you determine that a prompt or conversational history is too long. \n",
"Below are instructions on when and how to use each tool in your operations.\n",
"\n",
"1. get_metadata_information_from_arxiv\n",
"\n",
"Purpose: To fetch and return metadata for up to ten documents from arXiv that match a given query word.\n",
"When to Use: Use this tool when you need to gather metadata about multiple research papers related to a specific topic.\n",
"Example: If you are asked to provide an overview of recent papers on \"machine learning,\" use this tool to fetch metadata for relevant documents.\n",
"\n",
"2. get_information_from_arxiv\n",
"\n",
"Purpose: To fetch and return metadata for a single research paper from arXiv using the paper's ID.\n",
"When to Use: Use this tool when you need detailed information about a specific research paper identified by its arXiv ID.\n",
"Example: If you are asked to retrieve detailed information about the paper with the ID \"704.0001,\" use this tool.\n",
"\n",
"3. retriever_tool\n",
"\n",
"Purpose: To serve as your base knowledge, containing records of research papers from arXiv.\n",
"When to Use: Use this tool as the first step for exploration and research efforts when dealing with topics covered by the documents in the knowledge base.\n",
"Example: When beginning research on a new topic that is well-documented in the arXiv repository, use this tool to access the relevant papers.\n",
"\n",
"4. compress_prompt_using_llmlingua\n",
"\n",
"Purpose: To compress long prompts or conversational histories using the LLMLinguaCompressor.\n",
"When to Use: Use this tool whenever you determine that a prompt or conversational history is too long to be efficiently processed.\n",
"Example: If you receive a very lengthy query or conversation context that exceeds the typical token limits, compress it using this tool before proceeding with further processing.\n",
"\n",
"\"\"\"\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", agent_purpose),\n",
" (\"human\", \"{input}\"),\n",
" MessagesPlaceholder(\"agent_scratchpad\"),\n",
" ]\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "z4NU4ZjGl0WC"
},
"source": [
"## Agent Memory Using MongoDB"
]
},
{
"cell_type": "code",
"execution_count": 92,
"metadata": {
"id": "1A-3Fg1cjwyK"
},
"outputs": [],
"source": [
"from langchain.memory import ConversationBufferMemory\n",
"from langchain_mongodb.chat_message_histories import MongoDBChatMessageHistory\n",
"\n",
"\n",
"def get_session_history(session_id: str) -> MongoDBChatMessageHistory:\n",
" return MongoDBChatMessageHistory(\n",
" MONGO_URI, session_id, database_name=DB_NAME, collection_name=\"history\"\n",
" )\n",
"\n",
"\n",
"memory = ConversationBufferMemory(\n",
" memory_key=\"chat_history\", chat_memory=get_session_history(\"latest_agent_session\")\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "O9TqMKyvKhvq"
},
"source": [
"## Agent Creation"
]
},
{
"cell_type": "code",
"execution_count": 93,
"metadata": {
"id": "wI4uBAmNF5ll"
},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor, create_tool_calling_agent\n",
"\n",
"agent = create_tool_calling_agent(llm, tools, prompt)\n",
"\n",
"agent_executor = AgentExecutor(\n",
" agent=agent,\n",
" tools=tools,\n",
" verbose=True,\n",
" handle_parsing_errors=True,\n",
" memory=memory,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "RGB4pWTylmFy"
},
"source": [
"## Agent Exectution"
]
},
{
"cell_type": "code",
"execution_count": 94,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "DM8GtbjgIJXt",
"outputId": "328c36f6-b4a0-4a32-e7d6-b606ca044517"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m\n",
"Invoking: `get_metadata_information_from_arxiv` with `{'word': 'Prompt Compression in LLM Applications'}`\n",
"\n",
"\n",
"\u001b[0m\u001b[33;1m\u001b[1;3m[{'Published': '2024-05-27', 'Title': 'SelfCP: Compressing Long Prompt to 1/12 Using the Frozen Large Language Model Itself', 'Authors': 'Jun Gao', 'Summary': 'Long prompt leads to huge hardware costs when using Large Language Models\\n(LLMs). Unfortunately, many tasks, such as summarization, inevitably introduce\\nlong task-inputs, and the wide application of in-context learning easily makes\\nthe prompt length explode. Inspired by the language understanding ability of\\nLLMs, this paper proposes SelfCP, which uses the LLM \\\\textbf{itself} to\\n\\\\textbf{C}ompress long \\\\textbf{P}rompt into compact virtual tokens. SelfCP\\napplies a general frozen LLM twice, first as an encoder to compress the prompt\\nand then as a decoder to generate responses. Specifically, given a long prompt,\\nwe place special tokens within the lengthy segment for compression and signal\\nthe LLM to generate $k$ virtual tokens. Afterward, the virtual tokens\\nconcatenate with the uncompressed prompt and are fed into the same LLM to\\ngenerate the response. In general, SelfCP facilitates the unconditional and\\nconditional compression of prompts, fitting both standard tasks and those with\\nspecific objectives. Since the encoder and decoder are frozen, SelfCP only\\ncontains 17M trainable parameters and allows for convenient adaptation across\\nvarious backbones. We implement SelfCP with two LLM backbones and evaluate it\\nin both in- and out-domain tasks. Results show that the compressed virtual\\ntokens can substitute $12 \\\\times$ larger original prompts effectively'}, {'Published': '2024-04-18', 'Title': 'Adapting LLMs for Efficient Context Processing through Soft Prompt Compression', 'Authors': 'Cangqing Wang, Yutian Yang, Ruisi Li, Dan Sun, Ruicong Cai, Yuzhu Zhang, Chengqian Fu, Lillian Floyd', 'Summary': \"The rapid advancement of Large Language Models (LLMs) has inaugurated a\\ntransformative epoch in natural language processing, fostering unprecedented\\nproficiency in text generation, comprehension, and contextual scrutiny.\\nNevertheless, effectively handling extensive contexts, crucial for myriad\\napplications, poses a formidable obstacle owing to the intrinsic constraints of\\nthe models' context window sizes and the computational burdens entailed by\\ntheir operations. This investigation presents an innovative framework that\\nstrategically tailors LLMs for streamlined context processing by harnessing the\\nsynergies among natural language summarization, soft prompt compression, and\\naugmented utility preservation mechanisms. Our methodology, dubbed\\nSoftPromptComp, amalgamates natural language prompts extracted from\\nsummarization methodologies with dynamically generated soft prompts to forge a\\nconcise yet semantically robust depiction of protracted contexts. This\\ndepiction undergoes further refinement via a weighting mechanism optimizing\\ninformation retention and utility for subsequent tasks. We substantiate that\\nour framework markedly diminishes computational overhead and enhances LLMs'\\nefficacy across various benchmarks, while upholding or even augmenting the\\ncaliber of the produced content. By amalgamating soft prompt compression with\\nsophisticated summarization, SoftPromptComp confronts the dual challenges of\\nmanaging lengthy contexts and ensuring model scalability. Our findings point\\ntowards a propitious trajectory for augmenting LLMs' applicability and\\nefficiency, rendering them more versatile and pragmatic for real-world\\napplications. This research enriches the ongoing discourse on optimizing\\nlanguage models, providing insights into the potency of soft prompts and\\nsummarization techniques as pivotal instruments for the forthcoming generation\\nof NLP solutions.\"}, {'Published': '2023-12-06', 'Title': 'LLMLingua: Compressing Prompts for Accelerated Inference of Large Language Models', 'Authors': 'Huiqiang Jiang, Qianhui Wu, Chin-Yew Lin, Yuqing Yang, Lili Qiu', 'Summary': 'Large language models (LLMs) have been applied in various applications due to\
"\n",
"1. \"SelfCP: Compressing Long Prompt to 1/12 Using the Frozen Large Language Model Itself\" by Jun Gao\n",
"2. \"Adapting LLMs for Efficient Context Processing through Soft Prompt Compression\" by Cangqing Wang, Yutian Yang, Ruisi Li, Dan Sun, Ruicong Cai, Yuzhu Zhang, Chengqian Fu, Lillian Floyd\n",
"3. \"LLMLingua: Compressing Prompts for Accelerated Inference of Large Language Models\" by Huiqiang Jiang, Qianhui Wu, Chin-Yew Lin, Yuqing Yang, Lili Qiu\n",
"4. \"Learning to Compress Prompt in Natural Language Formats\" by Yu-Neng Chuang, Tianwei Xing, Chia-Yuan Chang, Zirui Liu, Xun Chen, Xia Hu\n",
"5. \"PROMPT-SAW: Leveraging Relation-Aware Graphs for Textual Prompt Compression\"\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"{'input': 'Get me a list of research papers on the topic Prompt Compression in LLM Applications.',\n",
" 'chat_history': '',\n",
" 'output': 'Here are some research papers on the topic Prompt Compression in LLM Applications:\\n\\n1. \"SelfCP: Compressing Long Prompt to 1/12 Using the Frozen Large Language Model Itself\" by Jun Gao\\n2. \"Adapting LLMs for Efficient Context Processing through Soft Prompt Compression\" by Cangqing Wang, Yutian Yang, Ruisi Li, Dan Sun, Ruicong Cai, Yuzhu Zhang, Chengqian Fu, Lillian Floyd\\n3. \"LLMLingua: Compressing Prompts for Accelerated Inference of Large Language Models\" by Huiqiang Jiang, Qianhui Wu, Chin-Yew Lin, Yuqing Yang, Lili Qiu\\n4. \"Learning to Compress Prompt in Natural Language Formats\" by Yu-Neng Chuang, Tianwei Xing, Chia-Yuan Chang, Zirui Liu, Xun Chen, Xia Hu\\n5. \"PROMPT-SAW: Leveraging Relation-Aware Graphs for Textual Prompt Compression\"'}"
]
},
"execution_count": 94,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"agent_executor.invoke(\n",
" {\n",
" \"input\": \"Get me a list of research papers on the topic Prompt Compression in LLM Applications.\"\n",
" }\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 95,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "oBvTS8S0JUPb",
"outputId": "13fbb430-eb49-4b91-dd04-33bcc33ecc00"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3m\n",
"Invoking: `get_metadata_information_from_arxiv` with `{'word': 'chat history'}`\n",
"responded: I need to access the chat history to answer this question. \n",
"\n",
"\u001b[0m\u001b[33;1m\u001b[1;3m[{'Published': '2023-10-20', 'Title': 'Towards Detecting Contextual Real-Time Toxicity for In-Game Chat', 'Authors': 'Zachary Yang, Nicolas Grenan-Godbout, Reihaneh Rabbany', 'Summary': \"Real-time toxicity detection in online environments poses a significant\\nchallenge, due to the increasing prevalence of social media and gaming\\nplatforms. We introduce ToxBuster, a simple and scalable model that reliably\\ndetects toxic content in real-time for a line of chat by including chat history\\nand metadata. ToxBuster consistently outperforms conventional toxicity models\\nacross popular multiplayer games, including Rainbow Six Siege, For Honor, and\\nDOTA 2. We conduct an ablation study to assess the importance of each model\\ncomponent and explore ToxBuster's transferability across the datasets.\\nFurthermore, we showcase ToxBuster's efficacy in post-game moderation,\\nsuccessfully flagging 82.1% of chat-reported players at a precision level of\\n90.0%. Additionally, we show how an additional 6% of unreported toxic players\\ncan be proactively moderated.\"}, {'Published': '2021-07-13', 'Title': \"A First Look at Developers' Live Chat on Gitter\", 'Authors': 'Lin Shi, Xiao Chen, Ye Yang, Hanzhi Jiang, Ziyou Jiang, Nan Niu, Qing Wang', 'Summary': \"Modern communication platforms such as Gitter and Slack play an increasingly\\ncritical role in supporting software teamwork, especially in open source\\ndevelopment.Conversations on such platforms often contain intensive, valuable\\ninformation that may be used for better understanding OSS developer\\ncommunication and collaboration. However, little work has been done in this\\nregard. To bridge the gap, this paper reports a first comprehensive empirical\\nstudy on developers' live chat, investigating when they interact, what\\ncommunity structures look like, which topics are discussed, and how they\\ninteract. We manually analyze 749 dialogs in the first phase, followed by an\\nautomated analysis of over 173K dialogs in the second phase. We find that\\ndevelopers tend to converse more often on weekdays, especially on Wednesdays\\nand Thursdays (UTC), that there are three common community structures observed,\\nthat developers tend to discuss topics such as API usages and errors, and that\\nsix dialog interaction patterns are identified in the live chat communities.\\nBased on the findings, we provide recommendations for individual developers and\\nOSS communities, highlight desired features for platform vendors, and shed\\nlight on future research directions. We believe that the findings and insights\\nwill enable a better understanding of developers' live chat, pave the way for\\nother researchers, as well as a better utilization and mining of knowledge\\nembedded in the massive chat history.\"}, {'Published': '2022-02-28', 'Title': 'MSCTD: A Multimodal Sentiment Chat Translation Dataset', 'Authors': 'Yunlong Liang, Fandong Meng, Jinan Xu, Yufeng Chen, Jie Zhou', 'Summary': 'Multimodal machine translation and textual chat translation have received\\nconsiderable attention in recent years. Although the conversation in its\\nnatural form is usually multimodal, there still lacks work on multimodal\\nmachine translation in conversations. In this work, we introduce a new task\\nnamed Multimodal Chat Translation (MCT), aiming to generate more accurate\\ntranslations with the help of the associated dialogue history and visual\\ncontext. To this end, we firstly construct a Multimodal Sentiment Chat\\nTranslation Dataset (MSCTD) containing 142,871 English-Chinese utterance pairs\\nin 14,762 bilingual dialogues and 30,370 English-German utterance pairs in\\n3,079 bilingual dialogues. Each utterance pair, corresponding to the visual\\ncontext that reflects the current conversational scene, is annotated with a\\nsentiment label. Then, we benchmark the task by establishing multiple baseline\\nsystems that incorporate multimodal and sentiment features for MCT. Preliminary\\nexperiments on four language directions (English-Chinese and English-German)\\n
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"{'input': 'What paper did we speak about from our chat history?',\n",
" 'chat_history': 'Human: Get me a list of research papers on the topic Prompt Compression in LLM Applications.\\nAI: Here are some research papers on the topic Prompt Compression in LLM Applications:\\n\\n1. \"SelfCP: Compressing Long Prompt to 1/12 Using the Frozen Large Language Model Itself\" by Jun Gao\\n2. \"Adapting LLMs for Efficient Context Processing through Soft Prompt Compression\" by Cangqing Wang, Yutian Yang, Ruisi Li, Dan Sun, Ruicong Cai, Yuzhu Zhang, Chengqian Fu, Lillian Floyd\\n3. \"LLMLingua: Compressing Prompts for Accelerated Inference of Large Language Models\" by Huiqiang Jiang, Qianhui Wu, Chin-Yew Lin, Yuqing Yang, Lili Qiu\\n4. \"Learning to Compress Prompt in Natural Language Formats\" by Yu-Neng Chuang, Tianwei Xing, Chia-Yuan Chang, Zirui Liu, Xun Chen, Xia Hu\\n5. \"PROMPT-SAW: Leveraging Relation-Aware Graphs for Textual Prompt Compression\"',\n",
" 'output': 'The paper we spoke about from our chat history is \"ToxBuster: In-game Chat Toxicity Buster with BERT\" by Zachary Yang, Yasmine Maricar, MohammadReza Davari, Nicolas Grenon-Godbout, and Reihaneh Rabbany.'}"
]
},
"execution_count": 95,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"agent_executor.invoke({\"input\": \"What paper did we speak about from our chat history?\"})"
]
}
],
"metadata": {
"colab": {
"collapsed_sections": [
"RM8rg08YhqZe",
"UUf3jtFzO4-V",
"Sm5QZdshwJLN"
],
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python",
"version": "3.12.2"
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"09dcf4ce88064f11980bbefaad1ebc75": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_39563df9477648398456675ec51075aa",
"placeholder": "",
"style": "IPY_MODEL_f4353368efbd4c3891f805ddc3d05e1b",
"value": "Downloadingdata:100%"
}
},
"164d16df28d24ab796b7c9cf85174800": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_95e4af5b420242b7a6b74a18cad98961",
"placeholder": "",
"style": "IPY_MODEL_dff65b579f0746ffae8739ecb0aa5a41",
"value": "Generatingtrainsplit:"
}
},
"20d693a09c534414a5c4c0dd58cf94ed": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": ""
}
},
"278513c5a8b04a24b1823d38107f1e50": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_62e196b6d30746578e137c50b661f946",
"placeholder": "",
"style": "IPY_MODEL_ced7f9d61e06442a960dcda95852048e",
"value": "102M/102M[00:06&lt;00:00,20.6MB/s]"
}
},
"30fe0bcd02cb47f3ba23bb480e2eaaea": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"373ed3b6307741859ab297c270cf42c8": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"39563df9477648398456675ec51075aa": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"41056c822b9d44559147d2b21416b956": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_a43c349d171e469c8cc94d48060f775b",
"placeholder": "",
"style": "IPY_MODEL_373ed3b6307741859ab297c270cf42c8",
"value": "50000/0[00:04&lt;00:00,12390.43examples/s]"
}
},
"62e196b6d30746578e137c50b661f946": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"7dbfebff68ff45628da832fac5233c93": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_164d16df28d24ab796b7c9cf85174800",
"IPY_MODEL_e70e0d317f1e4e73bd95349ed1510cce",
"IPY_MODEL_41056c822b9d44559147d2b21416b956"
],
"layout": "IPY_MODEL_b1929fb112174c0abcd8004f6be0f880"
}
},
"95e4af5b420242b7a6b74a18cad98961": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"a43c349d171e469c8cc94d48060f775b": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"b1929fb112174c0abcd8004f6be0f880": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"cebfba144ba6418092df949783f93455": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_09dcf4ce88064f11980bbefaad1ebc75",
"IPY_MODEL_f2bd7bda4d0c4d93b88e53aeb4e1b62d",
"IPY_MODEL_278513c5a8b04a24b1823d38107f1e50"
],
"layout": "IPY_MODEL_d3941c633788427abb858b21e285088f"
}
},
"ced7f9d61e06442a960dcda95852048e": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"d17d8c8f45ee44cd87dcd787c05dbdc3": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": ""
}
},
"d3941c633788427abb858b21e285088f": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"dff65b579f0746ffae8739ecb0aa5a41": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"e70e0d317f1e4e73bd95349ed1510cce": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "success",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_f73ae771c24645c79fd41409a8fc7b34",
"max": 1,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_20d693a09c534414a5c4c0dd58cf94ed",
"value": 1
}
},
"f2bd7bda4d0c4d93b88e53aeb4e1b62d": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "success",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_30fe0bcd02cb47f3ba23bb480e2eaaea",
"max": 102202622,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_d17d8c8f45ee44cd87dcd787c05dbdc3",
"value": 102202622
}
},
"f4353368efbd4c3891f805ddc3d05e1b": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"f73ae771c24645c79fd41409a8fc7b34": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": "20px"
}
}
}
}
},
"nbformat": 4,
"nbformat_minor": 0
}