From 6de1ca4251e38107eea688de570525e36e04c84a Mon Sep 17 00:00:00 2001 From: olgavrou Date: Fri, 18 Aug 2023 02:02:01 -0400 Subject: [PATCH 01/65] Imported changes from repo VowpalWabbit/rl_chain into rl_chain directory --- .../rl_chain/.github/workflows/unit_tests.yml | 23 + .../langchain/chains/rl_chain/.gitignore | 6 + .../langchain/chains/rl_chain/LICENSE | 21 + .../langchain/chains/rl_chain/README.md | 25 + .../chains/rl_chain/prompt_selection.ipynb | 364 ++++++++++++ .../chains/rl_chain/requirements.txt | 7 + .../langchain/chains/rl_chain/rl_chain.ipynb | 403 +++++++++++++ .../chains/rl_chain/rl_chain/__init__.py | 33 ++ .../chains/rl_chain/rl_chain/metrics.py | 27 + .../rl_chain/rl_chain/model_repository.py | 53 ++ .../rl_chain/rl_chain/pick_best_chain.py | 284 +++++++++ .../chains/rl_chain/rl_chain/rl_chain_base.py | 551 ++++++++++++++++++ .../chains/rl_chain/rl_chain/slates_chain.py | 275 +++++++++ .../chains/rl_chain/rl_chain/vw_logger.py | 18 + .../tests/test_pick_best_chain_call.py | 289 +++++++++ .../tests/test_pick_best_text_embedder.py | 334 +++++++++++ .../tests/test_rl_chain_base_embedder.py | 410 +++++++++++++ .../tests/test_slates_text_embedder.py | 124 ++++ .../chains/rl_chain/tests/test_utils.py | 14 + 19 files changed, 3261 insertions(+) create mode 100644 libs/langchain/langchain/chains/rl_chain/.github/workflows/unit_tests.yml create mode 100644 libs/langchain/langchain/chains/rl_chain/.gitignore create mode 100644 libs/langchain/langchain/chains/rl_chain/LICENSE create mode 100644 libs/langchain/langchain/chains/rl_chain/README.md create mode 100644 libs/langchain/langchain/chains/rl_chain/prompt_selection.ipynb create mode 100644 libs/langchain/langchain/chains/rl_chain/requirements.txt create mode 100644 libs/langchain/langchain/chains/rl_chain/rl_chain.ipynb create mode 100644 libs/langchain/langchain/chains/rl_chain/rl_chain/__init__.py create mode 100644 libs/langchain/langchain/chains/rl_chain/rl_chain/metrics.py create mode 100644 libs/langchain/langchain/chains/rl_chain/rl_chain/model_repository.py create mode 100644 libs/langchain/langchain/chains/rl_chain/rl_chain/pick_best_chain.py create mode 100644 libs/langchain/langchain/chains/rl_chain/rl_chain/rl_chain_base.py create mode 100644 libs/langchain/langchain/chains/rl_chain/rl_chain/slates_chain.py create mode 100644 libs/langchain/langchain/chains/rl_chain/rl_chain/vw_logger.py create mode 100644 libs/langchain/langchain/chains/rl_chain/tests/test_pick_best_chain_call.py create mode 100644 libs/langchain/langchain/chains/rl_chain/tests/test_pick_best_text_embedder.py create mode 100644 libs/langchain/langchain/chains/rl_chain/tests/test_rl_chain_base_embedder.py create mode 100644 libs/langchain/langchain/chains/rl_chain/tests/test_slates_text_embedder.py create mode 100644 libs/langchain/langchain/chains/rl_chain/tests/test_utils.py diff --git a/libs/langchain/langchain/chains/rl_chain/.github/workflows/unit_tests.yml b/libs/langchain/langchain/chains/rl_chain/.github/workflows/unit_tests.yml new file mode 100644 index 0000000000..029646a11e --- /dev/null +++ b/libs/langchain/langchain/chains/rl_chain/.github/workflows/unit_tests.yml @@ -0,0 +1,23 @@ +name: Unit Tests + +on: + push: + branches: + - main + pull_request: + branches: + - '*' + +jobs: + python-unit-test: + container: + image: python:3.8 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - name: Run Tests + shell: bash + run: | + pip install -r requirements.txt + pip install pytest + python -m pytest tests/ \ No newline at end of file diff --git a/libs/langchain/langchain/chains/rl_chain/.gitignore b/libs/langchain/langchain/chains/rl_chain/.gitignore new file mode 100644 index 0000000000..0845b27a84 --- /dev/null +++ b/libs/langchain/langchain/chains/rl_chain/.gitignore @@ -0,0 +1,6 @@ +**/__pycache__/** +models/* +logs/* +**/*.vw +.venv + diff --git a/libs/langchain/langchain/chains/rl_chain/LICENSE b/libs/langchain/langchain/chains/rl_chain/LICENSE new file mode 100644 index 0000000000..a1c616b972 --- /dev/null +++ b/libs/langchain/langchain/chains/rl_chain/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Vowpal Wabbit + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/libs/langchain/langchain/chains/rl_chain/README.md b/libs/langchain/langchain/chains/rl_chain/README.md new file mode 100644 index 0000000000..76028002ad --- /dev/null +++ b/libs/langchain/langchain/chains/rl_chain/README.md @@ -0,0 +1,25 @@ +# VW in a langchain chain + +Install `requirements.txt` + +[VowpalWabbit](https://github.com/VowpalWabbit/vowpal_wabbit) + +There is an example notebook (rl_chain.ipynb) with basic usage of the chain. + +TLDR: + +- Chain is initialized and creates a Vowpal Wabbit instance - only Contextual Bandits and Slates are supported for now +- You can change the arguments at chain creation time +- There is a default prompt but it can be changed +- There is a default reward function that gets triggered and triggers learn automatically + - This can be turned off and score can be spcified explicitly + +Flow: + +- Developer: creates chain +- Developer: sets actions +- Developer: calls chain with context and other prompt inputs +- Chain: calls VW with the context and selects an action +- Chain: action (and other vars) are passed to the LLM with the prompt +- Chain: if default reward set, the LLM is called to judge and give a reward score of the response based on the context +- Chain: VW learn is triggered with that score diff --git a/libs/langchain/langchain/chains/rl_chain/prompt_selection.ipynb b/libs/langchain/langchain/chains/rl_chain/prompt_selection.ipynb new file mode 100644 index 0000000000..0caea8b446 --- /dev/null +++ b/libs/langchain/langchain/chains/rl_chain/prompt_selection.ipynb @@ -0,0 +1,364 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Prepare core llm chain" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import langchain\n", + "langchain.debug = False # set to True if you want to see what the LLM is doing\n", + "\n", + "from langchain.chat_models import AzureChatOpenAI\n", + "\n", + "import dotenv\n", + "dotenv.load_dotenv()\n", + "\n", + "llm = AzureChatOpenAI(\n", + " deployment_name=\"gpt-35-turbo\",\n", + " temperature=0,\n", + " request_timeout=20,\n", + " max_retries=1,\n", + " client=None,\n", + ")\n", + "\n", + "llm.predict('Are you ready?')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Vanilla LLMChain" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.chains.llm import LLMChain\n", + "from langchain.prompts.prompt import PromptTemplate\n", + "\n", + "llm_chain = LLMChain(\n", + " llm = llm,\n", + " prompt = PromptTemplate(\n", + " input_variables=[\"adjective\", \"content\", \"topic\"],\n", + " template=\"Hi, please create {adjective} {content} about {topic}.\",\n", + " ))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "llm_chain.run(\n", + " adjective = \"funny\",\n", + " content = \"poem\",\n", + " topic = \"machine learning\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Variable selection" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import rl_chain\n", + "from langchain.prompts.prompt import PromptTemplate\n", + "\n", + "llm_chain = rl_chain.SlatesPersonalizerChain.from_llm(\n", + " llm=llm,\n", + " prompt = PromptTemplate(\n", + " input_variables=[\"adjective\", \"content\", \"topic\"],\n", + " template=\"Hi, please create {adjective} {content} about {topic}\",\n", + " ))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "r = llm_chain.run(\n", + " adjective = rl_chain.ToSelectFrom([\"funny\"]),\n", + " content = rl_chain.ToSelectFrom([\"poem\"]),\n", + " topic = rl_chain.ToSelectFrom([\"machine learning\"]))\n", + "\n", + "print(r[\"response\"])\n", + "print(r[\"selection_metadata\"].to_select_from)\n", + "print(r[\"selection_metadata\"].based_on)\n", + "print(r[\"selection_metadata\"].selected.score)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "llm_chain.update_with_delayed_score(score=1, event=r[\"selection_metadata\"], force_score=True)\n", + "print(r[\"selection_metadata\"].selected.score)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It is ok to be uncertain about certain variable values" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "llm_chain.run(\n", + " adjective = rl_chain.ToSelectFrom([\"funny\", \"scary\"]),\n", + " content = rl_chain.ToSelectFrom([\"poem\"]),\n", + " topic = rl_chain.ToSelectFrom([\"machine learning\", \"cats\"]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Full loop" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import rl_chain\n", + "from langchain.prompts.prompt import PromptTemplate\n", + "from langchain.prompts import (\n", + " ChatPromptTemplate,\n", + " HumanMessagePromptTemplate,\n", + ")\n", + "\n", + "template = \"\"\"\n", + "using style {style}\n", + "\n", + "{prefix}\n", + "{goal}: {context}.\n", + "{suffix}\n", + "\"\"\"\n", + "prompt = PromptTemplate(\n", + " input_variables=[\"prefix\", \"goal\", \"context\", \"suffix\", \"style\"],\n", + " template=template,\n", + ")\n", + "chain = rl_chain.SlatesPersonalizerChain.from_llm(\n", + " llm=llm,\n", + " vw_logs = 'logs/stories.txt',\n", + " model_save_dir=\"./models\", # where to save the model checkpoints\n", + " prompt = prompt,\n", + " selection_scorer = rl_chain.AutoSelectionScorer(\n", + " llm=llm,\n", + " scoring_criteria_template_str = '''Given the task:\n", + " {goal}: {context}\n", + " rank how good or bad this response is:\n", + " {llm_response}.''',\n", + " ),\n", + " metrics_step=1\n", + ")\n", + "\n", + "chain.run(\n", + " prefix = rl_chain.ToSelectFrom([f'ALWAYS DO EXACTLY WHAT I ASK YOU!', 'Please do your best to help me.']),\n", + " goal = rl_chain.ToSelectFrom(['Write a funny story about']),\n", + " context = rl_chain.ToSelectFrom(['Friends series']),\n", + " suffix = rl_chain.ToSelectFrom(['Please try to be as funny as possible.', '']),\n", + " style = \"Shakespeare\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import rl_chain\n", + "from langchain.prompts.prompt import PromptTemplate\n", + "\n", + "template = \"\"\"\n", + "{prefix}\n", + "{goal}: {context}.\n", + "{suffix}\n", + "\"\"\"\n", + "prompt = PromptTemplate(\n", + " input_variables=[\"prefix\", \"goal\", \"context\", \"suffix\"],\n", + " template=template,\n", + ")\n", + "chain = rl_chain.SlatesPersonalizerChain.from_llm(\n", + " llm=llm,\n", + " vw_logs = 'logs/stories.txt',\n", + " model_save_dir=\"./models\", # where to save the model checkpoints\n", + " prompt = prompt,\n", + " selection_scorer = rl_chain.AutoSelectionScorer(\n", + " llm=llm,\n", + " scoring_criteria_template_str = '''Given the task:\n", + " {goal}: {context}\n", + " rank how good or bad this response is:\n", + " {llm_response}.'''\n", + " ),\n", + " metrics_step=1\n", + ")\n", + "chain.run(\n", + " prefix = rl_chain.ToSelectFrom(rl_chain.Embed([f'ALWAYS DO EXACTLY WHAT I ASK YOU!', 'Please do your best to help me.'])),\n", + " goal = rl_chain.ToSelectFrom([rl_chain.Embed('Write a funny story about')]),\n", + " context = rl_chain.ToSelectFrom(['Friends series']),\n", + " suffix = rl_chain.ToSelectFrom(['Please try to be as funny as possible.', '']))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Experiment with mock llm" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import List\n", + "from tests.test_utils import MockScorer\n", + "\n", + "class MockLLMChain:\n", + " outcomes: List[List[float]] = None\n", + " \n", + " def __init__(self, outcomes, prompt):\n", + " self.outcomes = outcomes\n", + " self.prompt = prompt\n", + "\n", + " def run(self, prefix, suffix, **kwargs):\n", + " return str(self.outcomes[int(prefix)][int(suffix)])\n", + "\n", + "import rl_chain\n", + "from langchain.prompts.prompt import PromptTemplate\n", + "\n", + "template = \"\"\"\n", + "{prefix}\n", + "{context}\n", + "{suffix}\n", + "\"\"\"\n", + "prompt = PromptTemplate(\n", + " input_variables=[\"prefix\", \"context\", \"suffix\"],\n", + " template=template,\n", + ")\n", + "chain = rl_chain.SlatesPersonalizerChain.from_llm(\n", + " llm=llm,\n", + " vw_logs = 'logs/mock.txt',\n", + " model_save_dir=\"./models\", # where to save the model checkpoints\n", + " prompt = prompt,\n", + " selection_scorer = MockScorer(),\n", + " metrics_step=1\n", + ")\n", + "chain.llm_chain = MockLLMChain([\n", + " [0, 0.3],\n", + " [0.6, 0.9]], prompt = prompt)\n", + "chain.run(\n", + " prefix = rl_chain.ToSelectFrom(['0', '1']),\n", + " context = rl_chain.ToSelectFrom(['bla']),\n", + " suffix = rl_chain.ToSelectFrom(['0', '1']))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import rl_chain\n", + "from matplotlib import pyplot as plt\n", + "\n", + "vw_chain = rl_chain.SlatesPersonalizerChain.from_llm(\n", + " llm=llm,\n", + " vw_logs = 'logs/mock.txt',\n", + " model_save_dir=\"./models\", # where to save the model checkpoints\n", + " prompt = prompt,\n", + " policy = rl_chain.VwPolicy,\n", + " selection_scorer = MockScorer(),\n", + " auto_embed=False,\n", + " metrics_step=1\n", + ")\n", + "vw_chain.llm_chain = MockLLMChain([\n", + " [0, 0.3],\n", + " [0.6, 0.9]], prompt = prompt)\n", + "\n", + "rnd_chain = rl_chain.SlatesPersonalizerChain.from_llm(\n", + " llm=llm,\n", + " vw_logs = 'logs/mock.txt',\n", + " model_save_dir=\"./models\", # where to save the model checkpoints\n", + " prompt = prompt,\n", + " policy = rl_chain.SlatesRandomPolicy,\n", + " selection_scorer = MockScorer(),\n", + " auto_embed=False,\n", + " metrics_step=1\n", + ")\n", + "rnd_chain.llm_chain = MockLLMChain([\n", + " [0, 0.3],\n", + " [0.6, 0.9]], prompt = prompt)\n", + "\n", + "for i in range(1000):\n", + " vw_chain.run(\n", + " prefix = rl_chain.ToSelectFrom(['0', '1']),\n", + " context = rl_chain.ToSelectFrom(['bla']),\n", + " suffix = rl_chain.ToSelectFrom(['0']))\n", + " rnd_chain.run(\n", + " prefix = rl_chain.ToSelectFrom(['0', '1']),\n", + " context = rl_chain.ToSelectFrom(['bla']),\n", + " suffix = rl_chain.ToSelectFrom(['0']))\n", + "\n", + "vw_chain.metrics.to_pandas()['score'].plot(label=\"vw\")\n", + "rnd_chain.metrics.to_pandas()['score'].plot(label=\"slates\")\n", + "plt.legend()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/libs/langchain/langchain/chains/rl_chain/requirements.txt b/libs/langchain/langchain/chains/rl_chain/requirements.txt new file mode 100644 index 0000000000..faf213caed --- /dev/null +++ b/libs/langchain/langchain/chains/rl_chain/requirements.txt @@ -0,0 +1,7 @@ +vowpal-wabbit-next +langchain +openai +sentence_transformers +pandas +numpy +matplotlib diff --git a/libs/langchain/langchain/chains/rl_chain/rl_chain.ipynb b/libs/langchain/langchain/chains/rl_chain/rl_chain.ipynb new file mode 100644 index 0000000000..c39661d0cb --- /dev/null +++ b/libs/langchain/langchain/chains/rl_chain/rl_chain.ipynb @@ -0,0 +1,403 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class MealPlanner:\n", + " def __init__(self, name: str, desc: str, difficulty: str, tags: str):\n", + " try:\n", + " self.name = name\n", + " self.desc = desc\n", + " self.diff = difficulty\n", + " self.tags = tags\n", + " except:\n", + " print(name)\n", + " raise ValueError" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "## Actions\n", + "## examples copied from hello fresh website\n", + "actions = [\n", + " MealPlanner(name=\"One-Pan Beef Enchiladas Verdes with Mexican Cheese Blend & Hot Sauce Crema\", difficulty=\"Easy\", tags=\"Spicy, Easy Cleanup, Easy Prep\", desc=\"When it comes to Mexican-style cuisine, burritos typically get all the glory. In our humble opinion, enchiladas are an unsung dinner hero. They’re technically easier-to-assemble burritos that get smothered in a delicious sauce, but they’re really so much more than that! Ours start with spiced beef and charred green pepper that get rolled up in warm tortillas. This winning combo gets topped with tangy salsa verde and cheese, then baked until bubbly and melty. Hear that? That’s the sound of the dinner bell!\"),\n", + " MealPlanner(name=\"Chicken & Mushroom Flatbreads with Gouda Cream Sauce & Parmesan\", difficulty=\"Easy\", tags=\"\", desc=\"Yes we love our simple cheese pizza with red sauce but tonight, move over, marinara—there’s a new sauce in town. In this recipe, crispy flatbreads are slathered with a rich, creamy gouda-mustard sauce we just can’t get enough of. We top that off with a pile of caramelized onion and earthy cremini mushrooms. Shower with Parmesan, and that’s it. Simple, satisfying, and all in 30 minutes–a dinner idea you can’t pass up!\"),\n", + " MealPlanner(name=\"Sweet Potato & Pepper Quesadillas with Southwest Crema & Tomato Salsa\", difficulty=\"Easy\", tags=\"Veggie\", desc=\"This quesadilla is jam-packed with flavorful roasted sweet potato and green pepper, plus two types of gooey, melty cheese (how could we choose just one?!). Of course, we’d never forget the toppings—there’s a fresh tomato salsa and dollops of spiced lime crema. Now for the fun part: piling on a little bit of everything to construct the perfect bite!\"),\n", + " MealPlanner(name=\"One-Pan Trattoria Tortelloni Bake with a Crispy Parmesan Panko Topping\", difficulty=\"Easy\", tags=\"Veggie, Easy Cleanup, Easy Prep\", desc=\"Think a cheesy stuffed pasta can’t get any better? What about baking it in a creamy sauce with a crispy topping? In this recipe, we toss cheese-stuffed tortelloni in an herby tomato cream sauce, then top with Parmesan and panko breadcrumbs. Once broiled, it turns into a showstopping topping that’ll earn you plenty of oohs and aahs from your lucky fellow diners.\"),\n", + "]\n", + "\n", + "meals = [f'title={action.name.replace(\":\", \"\").replace(\"|\", \"\")}' for action in actions]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.chat_models import AzureChatOpenAI\n", + "import langchain\n", + "langchain.debug = False\n", + "# assuming LLM api keys have been set in the environment\n", + "# can use whatever LLM you want here doesn't have to be AzureChatOpenAI\n", + "\n", + "llm = AzureChatOpenAI(\n", + " deployment_name=\"gpt-35-turbo\",\n", + " temperature=0,\n", + " request_timeout=10,\n", + " max_retries=1,\n", + " client=None,\n", + ")\n", + "\n", + "llm.predict('Are you ready?')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### default chain default reward (the LLM is used to judge and rank the response)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import rl_chain\n", + "from langchain.prompts import PromptTemplate\n", + "\n", + "import logging\n", + "logger = logging.getLogger(\"rl_chain\")\n", + "logger.setLevel(logging.INFO)\n", + "\n", + "_PROMPT_TEMPLATE = \"\"\"Here is the description of a meal: {meal}.\n", + "\n", + "You have to embed this into the given text where it makes sense. Here is the given text: {text_to_personalize}.\n", + "\n", + "\"\"\"\n", + "\n", + "PROMPT = PromptTemplate(\n", + " input_variables=[\"meal\", \"text_to_personalize\"], template=_PROMPT_TEMPLATE\n", + ")\n", + "\n", + "chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "response = chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " User = rl_chain.BasedOn(\"Tom Hanks\"),\n", + " preference = rl_chain.BasedOn(\"Vegetarian, regular dairy is ok\"),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + ")\n", + "\n", + "print(response[\"response\"])\n", + "rr = response[\"selection_metadata\"]\n", + "print(f\"score: {rr.selected.score}, selection index: {rr.selected.index}, probability: {rr.selected.probability}, \")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.prompts.prompt import PromptTemplate\n", + "\n", + "_OTHER_PROMPT_TEMPLATE = \"\"\"You can use the actions that were chosen by VW like so: {action}.\n", + "\n", + "And use whatever other vars you want to pass into the chain at run: {some_text}. And {some_other_text}\n", + "\n", + "\"\"\"\n", + "\n", + "\n", + "OTHER_PROMPT = PromptTemplate(\n", + " input_variables=[\"action\", \"some_text\", \"some_other_text\"],\n", + " template=_OTHER_PROMPT_TEMPLATE,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import rl_chain.pick_best_chain\n", + "\n", + "chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " model_save_dir=\"./models\", # where to save the model checkpoints\n", + " prompt=OTHER_PROMPT,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "response = chain.run(\n", + " some_text = \"This is some text\",\n", + " some_other_text = \"This is some other text\",\n", + " action=rl_chain.ToSelectFrom([\"an action\", \"another action\", \"a third action\"]),\n", + " User = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn(\"Vegetarian\")\n", + ")\n", + "\n", + "print(response[\"response\"])\n", + "rr = response[\"selection_metadata\"]\n", + "print(f\"score: {rr.selected.score}, selection index: {rr.selected.index}, probability: {rr.selected.probability}, \")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### actions and context with multiple namespaces" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# each action is a dictionary of namespace to action string\n", + "# this example here shows that while embedding is recommended for all features, it is not required and can be customized\n", + "action_strs_w_ns = [{\"A\":\"an action feature\", \"B\" : rl_chain.Embed(\"antoher action feature\")}, {\"B\": \"another action\"}, {\"C\":\"a third action\"}]\n", + "\n", + "inputs = {\n", + " \"some_text\": \"This is some text\",\n", + " \"some_other_text\": \"This is some other text\",\n", + " \"action\" : rl_chain.ToSelectFrom(action_strs_w_ns)\n", + "}\n", + "\n", + "inputs[\"User\"] = rl_chain.BasedOn(\"Tom\")\n", + "inputs[\"preference\"] = rl_chain.BasedOn(rl_chain.Embed(\"Vegetarian\"))\n", + "response = chain.run(inputs)\n", + "print(response[\"response\"])\n", + "rr = response[\"selection_metadata\"]\n", + "print(f\"score: {rr.selected.score}, selection index: {rr.selected.index}, probability: {rr.selected.probability}, \")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### chain with default prompt and custom reward prompt (the LLM is used to judge and rank the response)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.llms import OpenAI\n", + "\n", + "llm = OpenAI(engine=\"text-davinci-003\")\n", + "\n", + "llm('Are you ready?')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import rl_chain\n", + "\n", + "human_template = \"Given {preference} rank how good or bad this selection is {action}\"\n", + "\n", + "chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=OTHER_PROMPT,\n", + " model_save_dir=\"./models\", # where to save the model checkpoints\n", + " selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, scoring_criteria_template_str=human_template),\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "actions = [\"an action\", \"another action\", \"a third action\"]\n", + "\n", + "response = chain.run(\n", + " some_text = \"Some text\",\n", + " some_other_text = \"Some other text\",\n", + " action=rl_chain.ToSelectFrom(actions),\n", + " User = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn(\"Vegetarian\"),\n", + ")\n", + "print(response[\"response\"])\n", + "rr = response[\"selection_metadata\"]\n", + "print(f\"score: {rr.selected.score}, selection index: {rr.selected.index}, probability: {rr.selected.probability}, \")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.prompts.prompt import PromptTemplate\n", + "\n", + "_REWARD_PROMPT_TEMPLATE = \"\"\"Given {preference} rank how good or bad this selection is {action}, IMPORANT: you MUST return a single number between 0 and 1, 0 being bad, 1 being good\"\"\"\n", + "\n", + "\n", + "REWARD_PROMPT = PromptTemplate(\n", + " input_variables=[\"preference\", \"action\"],\n", + " template=_REWARD_PROMPT_TEMPLATE,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import rl_chain\n", + "\n", + "human_template = \"Given {preference} rank how good or bad this selection is {action}\"\n", + "\n", + "chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=OTHER_PROMPT,\n", + " model_save_dir=\"./models\", # where to save the model checkpoints\n", + " selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, prompt=REWARD_PROMPT),\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "actions = [\"an action\", \"another action\", \"a third action\"]\n", + "\n", + "response = chain.run(\n", + " some_text = \"Some text\",\n", + " some_other_text = \"Some other text\",\n", + " action=rl_chain.ToSelectFrom(actions),\n", + " User = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn(\"Vegetarian\"),\n", + ")\n", + "print(response[\"response\"])\n", + "rr = response[\"selection_metadata\"]\n", + "print(f\"score: {rr.selected.score}, selection index: {rr.selected.index}, probability: {rr.selected.probability}, \")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### other reward options\n", + "\n", + "custom reward class" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# custom reward class/function is just defining another class that inherits from RewardChecker and implementing the score_response method\n", + "import rl_chain\n", + "\n", + "class CustomSelectionScorer(rl_chain.SelectionScorer):\n", + " #grade or score the response\n", + " def score_response(\n", + " self, inputs, llm_response: str\n", + " ) -> float:\n", + " # do whatever you want here, use whatever inputs you supplied and return reward\n", + " reward = 1.0\n", + " return reward\n", + " \n", + "# set this in the chain during construction (selection_scorer=CustomSelectionScorer()) and it will be auto-called" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Asynchronus user defined reward" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import rl_chain\n", + "\n", + "chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=PROMPT,\n", + " selection_scorer=None)\n", + "\n", + "# whenever you have the reward for the call, send it back to the chain to learn from\n", + "\n", + "response = chain.run(text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " User = rl_chain.BasedOn(rl_chain.Embed(\"Tom\")),\n", + " preference = rl_chain.BasedOn(\"Vegetarian\")\n", + " )\n", + "print(response[\"response\"])\n", + "rr = response[\"selection_metadata\"]\n", + "# score should be None here because we turned auto-checkin off\n", + "print(f\"score: {rr.selected.score}, action: {rr.selected.index}, probability: {rr.selected.probability}, \")\n", + "\n", + "# learn delayed score/grade\n", + "chain.update_with_delayed_score(score=1.0, event=rr)\n", + "\n", + "print(f\"score: {rr.selected.score}\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/libs/langchain/langchain/chains/rl_chain/rl_chain/__init__.py b/libs/langchain/langchain/chains/rl_chain/rl_chain/__init__.py new file mode 100644 index 0000000000..1d9c216cad --- /dev/null +++ b/libs/langchain/langchain/chains/rl_chain/rl_chain/__init__.py @@ -0,0 +1,33 @@ +from .pick_best_chain import PickBest +from .slates_chain import ( + SlatesPersonalizerChain, + SlatesRandomPolicy, + SlatesFirstChoicePolicy, +) +from .rl_chain_base import ( + Embed, + BasedOn, + ToSelectFrom, + SelectionScorer, + AutoSelectionScorer, + Embedder, + Policy, + VwPolicy, +) + +import logging + + +def configure_logger(): + logger = logging.getLogger(__name__) + logger.setLevel(logging.INFO) + ch = logging.StreamHandler() + formatter = logging.Formatter( + "%(asctime)s - %(name)s - %(levelname)s - %(message)s" + ) + ch.setFormatter(formatter) + ch.setLevel(logging.INFO) + logger.addHandler(ch) + + +configure_logger() diff --git a/libs/langchain/langchain/chains/rl_chain/rl_chain/metrics.py b/libs/langchain/langchain/chains/rl_chain/rl_chain/metrics.py new file mode 100644 index 0000000000..eefc6bc4de --- /dev/null +++ b/libs/langchain/langchain/chains/rl_chain/rl_chain/metrics.py @@ -0,0 +1,27 @@ +import pandas as pd +from typing import Optional + + +class MetricsTracker: + def __init__(self, step: int): + self._history = [] + self._step = step + self._i = 0 + self._num = 0 + self._denom = 0 + + @property + def score(self) -> float: + return self._num / self._denom if self._denom > 0 else 0 + + def on_decision(self) -> None: + self._denom += 1 + + def on_feedback(self, score: Optional[float]) -> None: + self._num += score or 0 + self._i += 1 + if self._step > 0 and self._i % self._step == 0: + self._history.append({"step": self._i, "score": self.score}) + + def to_pandas(self) -> pd.DataFrame: + return pd.DataFrame(self._history) diff --git a/libs/langchain/langchain/chains/rl_chain/rl_chain/model_repository.py b/libs/langchain/langchain/chains/rl_chain/rl_chain/model_repository.py new file mode 100644 index 0000000000..3f3f4c1063 --- /dev/null +++ b/libs/langchain/langchain/chains/rl_chain/rl_chain/model_repository.py @@ -0,0 +1,53 @@ +from pathlib import Path +import shutil +import datetime +import vowpal_wabbit_next as vw +from typing import Union, Sequence +import os +import glob +import logging + +logger = logging.getLogger(__name__) + + +class ModelRepository: + def __init__( + self, + folder: Union[str, os.PathLike], + with_history: bool = True, + reset: bool = False, + ): + self.folder = Path(folder) + self.model_path = self.folder / "latest.vw" + self.with_history = with_history + if reset and self.has_history: + logger.warning( + "There is non empty history which is recommended to be cleaned up" + ) + if self.model_path.exists(): + os.remove(self.model_path) + + self.folder.mkdir(parents=True, exist_ok=True) + + def get_tag(self) -> str: + return datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + + def has_history(self) -> bool: + return len(glob.glob(str(self.folder / "model-????????-??????.vw"))) > 0 + + def save(self, workspace: vw.Workspace) -> None: + with open(self.model_path, "wb") as f: + logger.info(f"storing rl_chain model in: {self.model_path}") + f.write(workspace.serialize()) + if self.with_history: # write history + shutil.copyfile(self.model_path, self.folder / f"model-{self.get_tag()}.vw") + + def load(self, commandline: Sequence[str]) -> vw.Workspace: + model_data = None + if self.model_path.exists(): + with open(self.model_path, "rb") as f: + model_data = f.read() + if model_data: + logger.info(f"rl_chain model is loaded from: {self.model_path}") + return vw.Workspace(commandline, model_data=model_data) + return vw.Workspace(commandline) diff --git a/libs/langchain/langchain/chains/rl_chain/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/rl_chain/pick_best_chain.py new file mode 100644 index 0000000000..6fe8e828a0 --- /dev/null +++ b/libs/langchain/langchain/chains/rl_chain/rl_chain/pick_best_chain.py @@ -0,0 +1,284 @@ +from __future__ import annotations + +from . import rl_chain_base as base + +from langchain.callbacks.manager import CallbackManagerForChainRun +from langchain.chains.base import Chain +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np +from langchain.base_language import BaseLanguageModel +from langchain.chains.llm import LLMChain +from sentence_transformers import SentenceTransformer +from langchain.prompts import BasePromptTemplate + +import logging + +logger = logging.getLogger(__name__) + +# sentinel object used to distinguish between user didn't supply anything or user explicitly supplied None +SENTINEL = object() + + +class PickBestFeatureEmbedder(base.Embedder): + """ + Contextual Bandit Text Embedder class that embeds the based_on and to_select_from into a format that can be used by VW + + Attributes: + model name (Any, optional): The type of embeddings to be used for feature representation. Defaults to BERT SentenceTransformer. + """ + + def __init__(self, model: Optional[Any] = None, *args, **kwargs): + super().__init__(*args, **kwargs) + + if model is None: + model = SentenceTransformer("bert-base-nli-mean-tokens") + + self.model = model + + def format(self, event: PickBest.Event) -> str: + """ + Converts the based_on and to_select_from into a format that can be used by VW + """ + + cost = None + if event.selected: + chosen_action = event.selected.index + cost = ( + -1.0 * event.selected.score + if event.selected.score is not None + else None + ) + prob = event.selected.probability + + context_emb = base.embed(event.based_on, self.model) if event.based_on else None + to_select_from_var_name, to_select_from = next( + iter(event.to_select_from.items()), (None, None) + ) + action_embs = ( + base.embed(to_select_from, self.model, to_select_from_var_name) + if event.to_select_from + else None + ) + + if not context_emb or not action_embs: + raise ValueError( + "Context and to_select_from must be provided in the inputs dictionary" + ) + + example_string = "" + example_string += f"shared " + for context_item in context_emb: + for ns, based_on in context_item.items(): + example_string += f"|{ns} {' '.join(based_on) if isinstance(based_on, list) else based_on} " + example_string += "\n" + + for i, action in enumerate(action_embs): + if cost is not None and chosen_action == i: + example_string += f"{chosen_action}:{cost}:{prob} " + for ns, action_embedding in action.items(): + example_string += f"|{ns} {' '.join(action_embedding) if isinstance(action_embedding, list) else action_embedding} " + example_string += "\n" + # Strip the last newline + return example_string[:-1] + + +class PickBest(base.RLChain): + """ + PickBest class that utilizes the Vowpal Wabbit (VW) model for personalization. + + The Chain is initialized with a set of potential to_select_from. For each call to the Chain, a specific action will be chosen based on an input based_on. + This chosen action is then passed to the prompt that will be utilized in the subsequent call to the LLM (Language Model). + + The flow of this chain is: + - Chain is initialized + - Chain is called input containing the based_on and the List of potential to_select_from + - Chain chooses an action based on the based_on + - Chain calls the LLM with the chosen action + - LLM returns a response + - If the selection_scorer is specified, the response is checked against the selection_scorer + - The internal model will be updated with the based_on, action, and reward of the response (how good or bad the response was) + - The response is returned + + input dictionary expects: + - at least one variable wrapped in BasedOn which will be the based_on to use for personalization + - one variable of a list wrapped in ToSelectFrom which will be the list of to_select_from for the Vowpal Wabbit model to choose from. + This list can either be a List of str's or a List of Dict's. + - Actions provided as a list of strings e.g. to_select_from = ["action1", "action2", "action3"] + - If to_select_from are provided as a list of dictionaries, each action should be a dictionary where the keys are namespace names and the values are the corresponding action strings e.g. to_select_from = [{"namespace1": "action1", "namespace2": "action2"}, {"namespace1": "action3", "namespace2": "action4"}] + Extends: + RLChain + + Attributes: + feature_embedder: (PickBestFeatureEmbedder, optional) The text embedder to use for embedding the based_on and the to_select_from. If not provided, a default embedder is used. + """ + + class Selected(base.Selected): + index: Optional[int] + probability: Optional[float] + score: Optional[float] + + def __init__( + self, + index: Optional[int] = None, + probability: Optional[float] = None, + score: Optional[float] = None, + ): + self.index = index + self.probability = probability + self.score = score + + class Event(base.Event): + def __init__( + self, + inputs: Dict[str, Any], + to_select_from: Dict[str, Any], + based_on: Dict[str, Any], + selected: Optional[PickBest.Selected] = None, + ): + super().__init__(inputs=inputs, selected=selected) + self.to_select_from = to_select_from + self.based_on = based_on + + def __init__( + self, + feature_embedder: Optional[PickBestFeatureEmbedder] = None, + *args, + **kwargs, + ): + vw_cmd = kwargs.get("vw_cmd", []) + if not vw_cmd: + vw_cmd = [ + "--cb_explore_adf", + "--quiet", + "--interactions=::", + "--coin", + "--epsilon=0.2", + ] + else: + if "--cb_explore_adf" not in vw_cmd: + raise ValueError( + "If vw_cmd is specified, it must include --cb_explore_adf" + ) + + kwargs["vw_cmd"] = vw_cmd + if not feature_embedder: + feature_embedder = PickBestFeatureEmbedder() + + super().__init__(feature_embedder=feature_embedder, *args, **kwargs) + + def _call_before_predict(self, inputs: Dict[str, Any]) -> PickBest.Event: + context, actions = base.get_based_on_and_to_select_from(inputs=inputs) + if not actions: + raise ValueError( + "No variables using 'ToSelectFrom' found in the inputs. Please include at least one variable containing a list to select from." + ) + + if len(list(actions.values())) > 1: + raise ValueError( + "Only one variable using 'ToSelectFrom' can be provided in the inputs for the PickBest chain. Please provide only one variable containing a list to select from." + ) + + if not context: + raise ValueError( + "No variables using 'BasedOn' found in the inputs. Please include at least one variable containing information to base the selected of ToSelectFrom on." + ) + + event = PickBest.Event(inputs=inputs, to_select_from=actions, based_on=context) + return event + + def _call_after_predict_before_llm( + self, inputs: Dict[str, Any], event: Event, prediction: List[Tuple[int, float]] + ) -> Tuple[Dict[str, Any], PickBest.Event]: + prob_sum = sum(prob for _, prob in prediction) + probabilities = [prob / prob_sum for _, prob in prediction] + ## sample from the pmf + sampled_index = np.random.choice(len(prediction), p=probabilities) + sampled_ap = prediction[sampled_index] + sampled_action = sampled_ap[0] + sampled_prob = sampled_ap[1] + selected = PickBest.Selected(index=sampled_action, probability=sampled_prob) + event.selected = selected + + # only one key, value pair in event.to_select_from + key, value = next(iter(event.to_select_from.items())) + next_chain_inputs = inputs.copy() + next_chain_inputs.update({key: value[event.selected.index]}) + return next_chain_inputs, event + + def _call_after_llm_before_scoring( + self, llm_response: str, event: PickBest.Event + ) -> Tuple[Dict[str, Any], PickBest.Event]: + next_chain_inputs = event.inputs.copy() + # only one key, value pair in event.to_select_from + value = next(iter(event.to_select_from.values())) + next_chain_inputs.update( + { + self.selected_based_on_input_key: str(event.based_on), + self.selected_input_key: value[event.selected.index], + } + ) + return next_chain_inputs, event + + def _call_after_scoring_before_learning( + self, event: PickBest.Event, score: Optional[float] + ) -> Event: + event.selected.score = score + return event + + def _call( + self, + inputs: Dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> Dict[str, Any]: + """ + When chain.run() is called with the given inputs, this function is called. It is responsible for calling the VW model to choose an action (ToSelectFrom) based on the (BasedOn) based_on, and then calling the LLM (Language Model) with the chosen action to generate a response. + + Attributes: + inputs: (Dict, required) The inputs to the chain. The inputs must contain a input variables that are wrapped in BasedOn and ToSelectFrom. BasedOn is the based_on that will be used for selecting an ToSelectFrom action that will be passed to the LLM prompt. + run_manager: (CallbackManagerForChainRun, optional) The callback manager to use for this run. If not provided, a default callback manager is used. + + Returns: + A dictionary containing: + - `response`: The response generated by the LLM (Language Model). + - `selection_metadata`: A Event object containing all the information needed to learn the reward for the chosen action at a later point. If an automatic selection_scorer is not provided, then this object can be used at a later point with the `update_with_delayed_score()` function to learn the delayed reward and update the Vowpal Wabbit model. + - the `score` in the `selection_metadata` object is set to None if an automatic selection_scorer is not provided or if the selection_scorer failed (e.g. LLM timeout or LLM failed to rank correctly). + """ + return super()._call(run_manager=run_manager, inputs=inputs) + + @property + def _chain_type(self) -> str: + return "rl_chain_pick_best" + + @classmethod + def from_chain( + cls, + llm_chain: Chain, + prompt: BasePromptTemplate, + selection_scorer=SENTINEL, + **kwargs: Any, + ): + if selection_scorer is SENTINEL: + selection_scorer = base.AutoSelectionScorer(llm=llm_chain.llm) + return PickBest( + llm_chain=llm_chain, + prompt=prompt, + selection_scorer=selection_scorer, + **kwargs, + ) + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + prompt: BasePromptTemplate, + selection_scorer=SENTINEL, + **kwargs: Any, + ): + llm_chain = LLMChain(llm=llm, prompt=prompt) + return PickBest.from_chain( + llm_chain=llm_chain, + prompt=prompt, + selection_scorer=selection_scorer, + **kwargs, + ) diff --git a/libs/langchain/langchain/chains/rl_chain/rl_chain/rl_chain_base.py b/libs/langchain/langchain/chains/rl_chain/rl_chain/rl_chain_base.py new file mode 100644 index 0000000000..a20c78fc78 --- /dev/null +++ b/libs/langchain/langchain/chains/rl_chain/rl_chain/rl_chain_base.py @@ -0,0 +1,551 @@ +from __future__ import annotations + +import logging +import os +from typing import Any, Dict, List, Optional, Tuple, Union, Sequence +from abc import ABC, abstractmethod + +import vowpal_wabbit_next as vw +from .vw_logger import VwLogger +from .model_repository import ModelRepository +from .metrics import MetricsTracker +from langchain.prompts import BasePromptTemplate + +from langchain.pydantic_v1 import Extra, BaseModel, root_validator + +from langchain.callbacks.manager import CallbackManagerForChainRun +from langchain.chains.base import Chain +from langchain.chains.llm import LLMChain +from langchain.prompts import ( + ChatPromptTemplate, + SystemMessagePromptTemplate, + HumanMessagePromptTemplate, +) + +logger = logging.getLogger(__name__) + + +class _BasedOn: + def __init__(self, value): + self.value = value + + def __str__(self): + return str(self.value) + + __repr__ = __str__ + + +def BasedOn(anything): + return _BasedOn(anything) + + +class _ToSelectFrom: + def __init__(self, value): + self.value = value + + def __str__(self): + return str(self.value) + + __repr__ = __str__ + + +def ToSelectFrom(anything): + if not isinstance(anything, list): + raise ValueError("ToSelectFrom must be a list to select from") + return _ToSelectFrom(anything) + + +class _Embed: + def __init__(self, value, keep=False): + self.value = value + self.keep = keep + + def __str__(self): + return str(self.value) + + __repr__ = __str__ + + +def Embed(anything, keep=False): + if isinstance(anything, _ToSelectFrom): + return ToSelectFrom(Embed(anything.value, keep=keep)) + elif isinstance(anything, _BasedOn): + return BasedOn(Embed(anything.value, keep=keep)) + if isinstance(anything, list): + return [Embed(v, keep=keep) for v in anything] + elif isinstance(anything, dict): + return {k: Embed(v, keep=keep) for k, v in anything.items()} + elif isinstance(anything, _Embed): + return anything + return _Embed(anything, keep=keep) + + +def EmbedAndKeep(anything): + return Embed(anything, keep=True) + + +# helper functions + + +def parse_lines(parser: vw.TextFormatParser, input_str: str) -> List[vw.Example]: + return [parser.parse_line(line) for line in input_str.split("\n")] + + +def get_based_on_and_to_select_from(inputs: Dict[str, Any]): + to_select_from = { + k: inputs[k].value + for k in inputs.keys() + if isinstance(inputs[k], _ToSelectFrom) + } + + if not to_select_from: + raise ValueError( + "No variables using 'ToSelectFrom' found in the inputs. Please include at least one variable containing a list to select from." + ) + + based_on = { + k: inputs[k].value if isinstance(inputs[k].value, list) else [inputs[k].value] + for k in inputs.keys() + if isinstance(inputs[k], _BasedOn) + } + + return based_on, to_select_from + + +def prepare_inputs_for_autoembed(inputs: Dict[str, Any]): + # go over all the inputs and if something is either wrapped in _ToSelectFrom or _BasedOn, and if + # their inner values are not already _Embed, then wrap them in EmbedAndKeep while retaining their _ToSelectFrom or _BasedOn status + next_inputs = inputs.copy() + for k, v in next_inputs.items(): + if isinstance(v, _ToSelectFrom) or isinstance(v, _BasedOn): + if not isinstance(v.value, _Embed): + next_inputs[k].value = EmbedAndKeep(v.value) + return next_inputs + + +# end helper functions + + +class Selected(ABC): + pass + + +class Event(ABC): + inputs: Dict[str, Any] + selected: Optional[Selected] + + def __init__(self, inputs: Dict[str, Any], selected: Optional[Selected] = None): + self.inputs = inputs + self.selected = selected + + +class Policy(ABC): + @abstractmethod + def predict(self, event: Event) -> Any: + pass + + @abstractmethod + def learn(self, event: Event): + pass + + @abstractmethod + def log(self, event: Event): + pass + + def save(self): + pass + + +class VwPolicy(Policy): + def __init__( + self, + model_repo: ModelRepository, + vw_cmd: Sequence[str], + feature_embedder: Embedder, + vw_logger: VwLogger, + *args, + **kwargs, + ): + super().__init__(*args, **kwargs) + self.model_repo = model_repo + self.workspace = self.model_repo.load(vw_cmd) + self.feature_embedder = feature_embedder + self.vw_logger = vw_logger + + def predict(self, event: Event) -> Any: + text_parser = vw.TextFormatParser(self.workspace) + return self.workspace.predict_one( + parse_lines(text_parser, self.feature_embedder.format(event)) + ) + + def learn(self, event: Event): + vw_ex = self.feature_embedder.format(event) + + text_parser = vw.TextFormatParser(self.workspace) + multi_ex = parse_lines(text_parser, vw_ex) + self.workspace.learn_one(multi_ex) + + def log(self, event: Event): + if self.vw_logger.logging_enabled(): + vw_ex = self.feature_embedder.format(event) + self.vw_logger.log(vw_ex) + + def save(self): + self.model_repo.save() + + +class Embedder(ABC): + @abstractmethod + def format(self, event: Event) -> str: + pass + + +class SelectionScorer(ABC, BaseModel): + """Abstract method to grade the chosen selection or the response of the llm""" + + @abstractmethod + def score_response(self, inputs: Dict[str, Any], llm_response: str) -> float: + pass + + +class AutoSelectionScorer(SelectionScorer, BaseModel): + llm_chain: Union[LLMChain, None] = None + prompt: Union[BasePromptTemplate, None] = None + scoring_criteria_template_str: Optional[str] = None + + @staticmethod + def get_default_system_prompt() -> SystemMessagePromptTemplate: + return SystemMessagePromptTemplate.from_template( + "PLEASE RESPOND ONLY WITH A SIGNLE FLOAT AND NO OTHER TEXT EXPLANATION\n You are a strict judge that is called on to rank a response based on given criteria.\ + You must respond with your ranking by providing a single float within the range [0, 1], 0 being very bad response and 1 being very good response." + ) + + @staticmethod + def get_default_prompt() -> ChatPromptTemplate: + human_template = 'Given this based_on "{rl_chain_selected_based_on}" as the most important attribute, rank how good or bad this text is: "{llm_response}".' + human_message_prompt = HumanMessagePromptTemplate.from_template(human_template) + default_system_prompt = AutoSelectionScorer.get_default_system_prompt() + chat_prompt = ChatPromptTemplate.from_messages( + [default_system_prompt, human_message_prompt] + ) + return chat_prompt + + @root_validator(pre=True) + def set_prompt_and_llm_chain(cls, values): + llm = values.get("llm") + prompt = values.get("prompt") + scoring_criteria_template_str = values.get("scoring_criteria_template_str") + if prompt is None and scoring_criteria_template_str is None: + prompt = AutoSelectionScorer.get_default_prompt() + elif prompt is None and scoring_criteria_template_str is not None: + human_message_prompt = HumanMessagePromptTemplate.from_template( + scoring_criteria_template_str + ) + default_system_prompt = AutoSelectionScorer.get_default_system_prompt() + prompt = ChatPromptTemplate.from_messages( + [default_system_prompt, human_message_prompt] + ) + values["prompt"] = prompt + values["llm_chain"] = LLMChain(llm=llm, prompt=prompt) + return values + + def score_response(self, inputs: Dict[str, Any], llm_response: str) -> float: + ranking = self.llm_chain.predict(llm_response=llm_response, **inputs) + ranking = ranking.strip() + try: + resp = float(ranking) + return resp + except Exception as e: + raise RuntimeError( + f"The llm did not manage to rank the response as expected, there is always the option to try again or tweak the reward prompt. Error: {e}" + ) + + +class RLChain(Chain): + """ + RLChain class that utilizes the Vowpal Wabbit (VW) model for personalization. + + Attributes: + model_loading (bool, optional): If set to True, the chain will attempt to load an existing VW model from the latest checkpoint file in the {model_save_dir} directory (current directory if none specified). If set to False, it will start training from scratch, potentially overwriting existing files. Defaults to True. + large_action_spaces (bool, optional): If set to True and vw_cmd has not been specified in the constructor, it will enable large action spaces + vw_cmd (List[str], optional): Advanced users can set the VW command line to whatever they want, as long as it is compatible with the Type that is specified (Type Enum) + model_save_dir (str, optional): The directory to save the VW model to. Defaults to the current directory. + selection_scorer (SelectionScorer): If set, the chain will check the response using the provided selection_scorer and the VW model will be updated with the result. Defaults to None. + + Notes: + The class creates a VW model instance using the provided arguments. Before the chain object is destroyed the save_progress() function can be called. If it is called, the learned VW model is saved to a file in the current directory named `model-.vw`. Checkpoints start at 1 and increment monotonically. + When making predictions, VW is first called to choose action(s) which are then passed into the prompt with the key `{actions}`. After action selection, the LLM (Language Model) is called with the prompt populated by the chosen action(s), and the response is returned. + """ + + llm_chain: Chain + + output_key: str = "result" #: :meta private: + prompt: BasePromptTemplate + selection_scorer: Union[SelectionScorer, None] + policy: Optional[Policy] + auto_embed: bool = True + selected_input_key = "rl_chain_selected" + selected_based_on_input_key = "rl_chain_selected_based_on" + metrics: Optional[MetricsTracker] = None + + def __init__( + self, + feature_embedder: Embedder, + model_save_dir="./", + reset_model=False, + vw_cmd=None, + policy=VwPolicy, + vw_logs: Optional[Union[str, os.PathLike]] = None, + metrics_step=-1, + *args, + **kwargs, + ): + super().__init__(*args, **kwargs) + if self.selection_scorer is None: + logger.warning( + "No response validator provided, which means that no reinforcement learning will be done in the RL chain unless update_with_delayed_score is called." + ) + self.policy = policy( + model_repo=ModelRepository( + model_save_dir, with_history=True, reset=reset_model + ), + vw_cmd=vw_cmd or [], + feature_embedder=feature_embedder, + vw_logger=VwLogger(vw_logs), + ) + self.metrics = MetricsTracker(step=metrics_step) + + class Config: + """Configuration for this pydantic object.""" + + extra = Extra.forbid + arbitrary_types_allowed = True + + @property + def input_keys(self) -> List[str]: + """Expect input key. + :meta private: + """ + return [] + + @property + def output_keys(self) -> List[str]: + """Expect output key. + + :meta private: + """ + return [self.output_key] + + def _validate_inputs(self, inputs: Dict[str, Any]) -> None: + super()._validate_inputs(inputs) + if ( + self.selected_input_key in inputs.keys() + or self.selected_based_on_input_key in inputs.keys() + ): + raise ValueError( + f"The rl chain does not accept '{self.selected_input_key}' or '{self.selected_based_on_input_key}' as input keys, they are reserved for internal use during auto reward." + ) + + @abstractmethod + def _call_before_predict(self, inputs: Dict[str, Any]) -> Event: + pass + + @abstractmethod + def _call_after_predict_before_llm( + self, inputs: Dict[str, Any], event: Event, prediction: Any + ) -> Tuple[Dict[str, Any], Event]: + pass + + @abstractmethod + def _call_after_llm_before_scoring( + self, llm_response: str, event: Event + ) -> Tuple[Dict[str, Any], Event]: + pass + + @abstractmethod + def _call_after_scoring_before_learning( + self, event: Event, score: Optional[float] + ) -> Event: + pass + + def update_with_delayed_score( + self, score: float, event: Event, force_score=False + ) -> None: + """ + Learn will be called with the score specified and the actions/embeddings/etc stored in event + + Will raise an error if selection_scorer is set, and force_score=True was not provided during the method call + """ + if self.selection_scorer and not force_score: + raise RuntimeError( + "The selection scorer is set, and force_score was not set to True. Please set force_score=True to use this function." + ) + self.metrics.on_feedback(score) + self._call_after_scoring_before_learning(event=event, score=score) + self.policy.learn(event=event) + self.policy.log(event=event) + + def set_auto_embed(self, auto_embed: bool) -> None: + """ + Set whether the chain should auto embed the inputs or not. If set to False, the inputs will not be embedded and the user will need to embed the inputs themselves before calling run. + + Args: + auto_embed (bool): Whether the chain should auto embed the inputs or not. + """ + self.auto_embed = auto_embed + + def _call( + self, + inputs: Dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> Dict[str, str]: + _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() + + if self.auto_embed: + inputs = prepare_inputs_for_autoembed(inputs=inputs) + + event = self._call_before_predict(inputs=inputs) + prediction = self.policy.predict(event=event) + self.metrics.on_decision() + + next_chain_inputs, event = self._call_after_predict_before_llm( + inputs=inputs, event=event, prediction=prediction + ) + + t = self.llm_chain.run(**next_chain_inputs, callbacks=_run_manager.get_child()) + _run_manager.on_text(t, color="green", verbose=self.verbose) + t = t.strip() + + if self.verbose: + _run_manager.on_text("\nCode: ", verbose=self.verbose) + + output = t + _run_manager.on_text("\nAnswer: ", verbose=self.verbose) + _run_manager.on_text(output, color="yellow", verbose=self.verbose) + + next_chain_inputs, event = self._call_after_llm_before_scoring( + llm_response=output, event=event + ) + + score = None + try: + if self.selection_scorer: + score = self.selection_scorer.score_response( + inputs=next_chain_inputs, llm_response=output + ) + except Exception as e: + logger.info( + f"The LLM was not able to rank and the chain was not able to adjust to this response, error: {e}" + ) + self.metrics.on_feedback(score) + event = self._call_after_scoring_before_learning(score=score, event=event) + self.policy.learn(event=event) + self.policy.log(event=event) + + return {self.output_key: {"response": output, "selection_metadata": event}} + + def save_progress(self) -> None: + """ + This function should be called whenever there is a need to save the progress of the VW (Vowpal Wabbit) model within the chain. It saves the current state of the VW model to a file. + + File Naming Convention: + The file will be named using the pattern `model-.vw`, where `` is a monotonically increasing number. The numbering starts from 1, and increments by 1 for each subsequent save. If there are already saved checkpoints, the number used for `` will be the next in the sequence. + + Example: + If there are already two saved checkpoints, `model-1.vw` and `model-2.vw`, the next time this function is called, it will save the model as `model-3.vw`. + + Note: + Be cautious when deleting or renaming checkpoint files manually, as this could cause the function to reuse checkpoint numbers. + """ + self.policy.save() + + @property + def _chain_type(self) -> str: + return "llm_personalizer_chain" + + +def is_stringtype_instance(item: Any) -> bool: + """Helper function to check if an item is a string.""" + return isinstance(item, str) or ( + isinstance(item, _Embed) and isinstance(item.value, str) + ) + + +def embed_string_type( + item: Union[str, _Embed], model: Any, namespace: Optional[str] = None +) -> Dict[str, str]: + """Helper function to embed a string or an _Embed object.""" + join_char = "" + keep_str = "" + if isinstance(item, _Embed): + encoded = model.encode(item.value) + join_char = " " + if item.keep: + keep_str = item.value.replace(" ", "_") + " " + elif isinstance(item, str): + encoded = item.replace(" ", "_") + join_char = "" + else: + raise ValueError(f"Unsupported type {type(item)} for embedding") + + if namespace is None: + raise ValueError( + "The default namespace must be provided when embedding a string or _Embed object." + ) + + return {namespace: keep_str + join_char.join(map(str, encoded))} + + +def embed_dict_type(item: Dict, model: Any) -> Dict[str, Union[str, List[str]]]: + """Helper function to embed a dictionary item.""" + inner_dict = {} + for ns, embed_item in item.items(): + if isinstance(embed_item, list): + inner_dict[ns] = [] + for embed_list_item in embed_item: + embedded = embed_string_type(embed_list_item, model, ns) + inner_dict[ns].append(embedded[ns]) + else: + inner_dict.update(embed_string_type(embed_item, model, ns)) + return inner_dict + + +def embed_list_type( + item: list, model: Any, namespace: Optional[str] = None +) -> List[Dict[str, Union[str, List[str]]]]: + ret_list = [] + for embed_item in item: + if isinstance(embed_item, dict): + ret_list.append(embed_dict_type(embed_item, model)) + else: + ret_list.append(embed_string_type(embed_item, model, namespace)) + return ret_list + + +def embed( + to_embed: Union[ + Union(str, _Embed(str)), Dict, List[Union(str, _Embed(str))], List[Dict] + ], + model: Any, + namespace: Optional[str] = None, +) -> List[Dict[str, Union[str, List[str]]]]: + """ + Embeds the actions or context using the SentenceTransformer model + + Attributes: + to_embed: (Union[Union(str, _Embed(str)), Dict, List[Union(str, _Embed(str))], List[Dict]], required) The text to be embedded, either a string, a list of strings or a dictionary or a list of dictionaries. + namespace: (str, optional) The default namespace to use when dictionary or list of dictionaries not provided. + model: (Any, required) The model to use for embedding + Returns: + List[Dict[str, str]]: A list of dictionaries where each dictionary has the namespace as the key and the embedded string as the value + """ + if (isinstance(to_embed, _Embed) and isinstance(to_embed.value, str)) or isinstance( + to_embed, str + ): + return [embed_string_type(to_embed, model, namespace)] + elif isinstance(to_embed, dict): + return [embed_dict_type(to_embed, model)] + elif isinstance(to_embed, list): + return embed_list_type(to_embed, model, namespace) + else: + raise ValueError("Invalid input format for embedding") diff --git a/libs/langchain/langchain/chains/rl_chain/rl_chain/slates_chain.py b/libs/langchain/langchain/chains/rl_chain/rl_chain/slates_chain.py new file mode 100644 index 0000000000..62beedaecf --- /dev/null +++ b/libs/langchain/langchain/chains/rl_chain/rl_chain/slates_chain.py @@ -0,0 +1,275 @@ +from __future__ import annotations + +from . import rl_chain_base as base +from langchain.prompts.prompt import PromptTemplate + +from langchain.callbacks.manager import CallbackManagerForChainRun +from langchain.chains.base import Chain + +from typing import Any, Dict, List, Optional, Tuple, Union +from itertools import chain +import random + + +from langchain.base_language import BaseLanguageModel +from langchain.chains.llm import LLMChain +from sentence_transformers import SentenceTransformer + +# sentinel object used to distinguish between user didn't supply anything or user explicitly supplied None +SENTINEL = object() + + +class SlatesFeatureEmbedder(base.Embedder): + """ + Slates Text Embedder class that embeds the context and actions and slates into a format that can be used by VW + + Attributes: + model (Any, optional): The type of embeddings to be used for feature representation. Defaults to BERT Sentence Transformer + """ + + def __init__(self, model: Optional[Any] = None, *args, **kwargs): + super().__init__(*args, **kwargs) + + if model is None: + model = SentenceTransformer("bert-base-nli-mean-tokens") + + self.model = model + + def to_action_features(self, actions: Dict[str, Any]): + def _str(embedding): + return " ".join([f"{i}:{e}" for i, e in enumerate(embedding)]) + + action_features = [] + for slot in actions.values(): + slot_features = [] + for action in slot: + if isinstance(action, base._Embed) and action.keep: + feature = ( + action.value.replace(" ", "_") + + " " + + _str(self.model.encode(action.value)) + ) + elif isinstance(action, base._Embed): + feature = _str(self.model.encode(action.value)) + else: + feature = action.replace(" ", "_") + slot_features.append(feature) + action_features.append(slot_features) + + return action_features + + def format(self, event: SlatesPersonalizerChain.Event) -> str: + action_features = self.to_action_features(event.to_select_from) + + cost = ( + -1.0 * event.selected.score + if event.selected and event.selected.score is not None + else "" + ) + context_str = f"slates shared {cost} " + + if event.based_on: + embedded_context = base.embed(event.based_on, self.model) + for context_item in embedded_context: + for ns, ctx in context_item.items(): + context_str += ( + f"|{ns} {' '.join(ctx) if isinstance(ctx, list) else ctx} " + ) + else: + context_str += "|" # empty context + + actions = chain.from_iterable( + [ + [f"slates action {i} |Action {action}"] + for i, slot in enumerate(action_features) + for action in slot + ] + ) + ps = ( + [f"{a}:{p}" for a, p in event.selected.get_indexes_and_probabilities()] + if event.selected + else [""] * len(action_features) + ) + slots = [f"slates slot {p} |" for p in ps] + return "\n".join(list(chain.from_iterable([[context_str], actions, slots]))) + + +class SlatesRandomPolicy(base.Policy): + def __init__(self, feature_embedder: base.Embedder, *_, **__): + self.feature_embedder = feature_embedder + + def predict(self, event: SlatesPersonalizerChain.Event) -> Any: + return [ + [(random.randint(0, len(slot) - 1), 1.0 / len(slot))] + for _, slot in event.to_select_from.items() + ] + + def learn(self, event: SlatesPersonalizerChain.Event) -> Any: + pass + + def log(self, event: SlatesPersonalizerChain.Event) -> Any: + pass + + +class SlatesFirstChoicePolicy(base.Policy): + def __init__(self, feature_embedder: base.Embedder, *_, **__): + self.feature_embedder = feature_embedder + + def predict(self, event: SlatesPersonalizerChain.Event) -> Any: + return [[(0, 1)] for _ in event.to_select_from] + + def learn(self, event: SlatesPersonalizerChain.Event) -> Any: + pass + + def log(self, event: SlatesPersonalizerChain.Event) -> Any: + pass + + +class SlatesPersonalizerChain(base.RLChain): + class Selected(base.Selected): + indexes: Optional[List[int]] + probabilities: Optional[List[float]] + score: Optional[float] + + def __init__( + self, + indexes: Optional[List[int]] = None, + probabilities: Optional[List[float]] = None, + score: Optional[float] = None, + ): + self.indexes = indexes + self.probabilities = probabilities + self.score = score + + def get_indexes_and_probabilities(self): + return zip(self.indexes, self.probabilities) + + class Event(base.Event): + def __init__( + self, + inputs: Dict[str, Any], + to_select_from: Dict[str, Any], + based_on: Dict[str, Any], + selected: Optional[SlatesPersonalizerChain.Selected] = None, + ): + super().__init__(inputs=inputs, selected=selected) + self.to_select_from = to_select_from + self.based_on = based_on + + def __init__( + self, feature_embedder: Optional[base.Embedder] = None, *args, **kwargs + ): + vw_cmd = kwargs.get("vw_cmd", []) + if not vw_cmd: + vw_cmd = [ + "--slates", + "--quiet", + "--interactions=::", + "--coin", + "--squarecb", + ] + else: + if "--slates" not in vw_cmd: + raise ValueError("If vw_cmd is specified, it must include --slates") + + kwargs["vw_cmd"] = vw_cmd + + if feature_embedder is None: + feature_embedder = SlatesFeatureEmbedder() + + super().__init__(feature_embedder=feature_embedder, *args, **kwargs) + + def _call_before_predict( + self, inputs: Dict[str, Any] + ) -> SlatesPersonalizerChain.Event: + context, actions = base.get_based_on_and_to_select_from(inputs=inputs) + event = SlatesPersonalizerChain.Event( + inputs=inputs, to_select_from=actions, based_on=context + ) + return event + + def _call_after_predict_before_llm( + self, + inputs: Dict[str, Any], + event: SlatesPersonalizerChain.Event, + prediction: List[List[Tuple[int, float]]], + ) -> Tuple[Dict[str, Any], SlatesPersonalizerChain.Event]: + indexes = [p[0][0] for p in prediction] + probabilities = [p[0][1] for p in prediction] + selected = SlatesPersonalizerChain.Selected( + indexes=indexes, probabilities=probabilities + ) + event.selected = selected + + preds = {} + for i, (j, a) in enumerate( + zip(event.selected.indexes, event.to_select_from.values()) + ): + preds[list(event.to_select_from.keys())[i]] = str(a[j]) + + next_chain_inputs = inputs.copy() + next_chain_inputs.update(preds) + + return next_chain_inputs, event + + def _call_after_llm_before_scoring( + self, llm_response: str, event: SlatesPersonalizerChain.Event + ) -> Tuple[Dict[str, Any], SlatesPersonalizerChain.Event]: + next_chain_inputs = event.inputs.copy() + next_chain_inputs.update( + { + self.selected_based_on_input_key: str(event.based_on), + self.selected_input_key: str(event.to_select_from), + } + ) + return next_chain_inputs, event + + def _call_after_scoring_before_learning( + self, event: Event, score: Optional[float] + ) -> SlatesPersonalizerChain.Event: + event.selected.score = score + return event + + def _call( + self, + inputs: Dict[str, Any], + run_manager: Optional[CallbackManagerForChainRun] = None, + ) -> Dict[str, str]: + return super()._call(run_manager=run_manager, inputs=inputs) + + @property + def _chain_type(self) -> str: + return "llm_personalizer_chain" + + @classmethod + def from_chain( + cls, + llm_chain: Chain, + prompt: PromptTemplate, + selection_scorer=SENTINEL, + **kwargs: Any, + ): + if selection_scorer is SENTINEL: + selection_scorer = base.AutoSelectionScorer(llm=llm_chain.llm) + return SlatesPersonalizerChain( + llm_chain=llm_chain, + prompt=prompt, + selection_scorer=selection_scorer, + **kwargs, + ) + + @classmethod + def from_llm( + cls, + llm: BaseLanguageModel, + prompt: PromptTemplate, + selection_scorer=SENTINEL, + **kwargs: Any, + ): + llm_chain = LLMChain(llm=llm, prompt=prompt) + return SlatesPersonalizerChain.from_chain( + llm_chain=llm_chain, + prompt=prompt, + selection_scorer=selection_scorer, + **kwargs, + ) diff --git a/libs/langchain/langchain/chains/rl_chain/rl_chain/vw_logger.py b/libs/langchain/langchain/chains/rl_chain/rl_chain/vw_logger.py new file mode 100644 index 0000000000..0d4cce2144 --- /dev/null +++ b/libs/langchain/langchain/chains/rl_chain/rl_chain/vw_logger.py @@ -0,0 +1,18 @@ +from typing import Union, Optional +from pathlib import Path +from os import PathLike + + +class VwLogger: + def __init__(self, path: Optional[Union[str, PathLike]]): + self.path = Path(path) if path else None + if self.path: + self.path.parent.mkdir(parents=True, exist_ok=True) + + def log(self, vw_ex: str): + if self.path: + with open(self.path, "a") as f: + f.write(f"{vw_ex}\n\n") + + def logging_enabled(self): + return bool(self.path) diff --git a/libs/langchain/langchain/chains/rl_chain/tests/test_pick_best_chain_call.py b/libs/langchain/langchain/chains/rl_chain/tests/test_pick_best_chain_call.py new file mode 100644 index 0000000000..6c8db426d4 --- /dev/null +++ b/libs/langchain/langchain/chains/rl_chain/tests/test_pick_best_chain_call.py @@ -0,0 +1,289 @@ +import sys + +sys.path.append("..") + +import rl_chain.pick_best_chain as pick_best_chain +from test_utils import MockEncoder +import pytest +from langchain.prompts.prompt import PromptTemplate +from langchain.chat_models import FakeListChatModel + +encoded_text = "[ e n c o d e d ] " + + +def setup(): + _PROMPT_TEMPLATE = """This is a dummy prompt that will be ignored by the fake llm""" + PROMPT = PromptTemplate(input_variables=[], template=_PROMPT_TEMPLATE) + + llm = FakeListChatModel(responses=["hey"]) + return llm, PROMPT + + +def test_multiple_ToSelectFrom_throws(): + llm, PROMPT = setup() + chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) + actions = ["0", "1", "2"] + with pytest.raises(ValueError): + chain.run( + User=pick_best_chain.base.BasedOn("Context"), + action=pick_best_chain.base.ToSelectFrom(actions), + another_action=pick_best_chain.base.ToSelectFrom(actions), + ) + + +def test_missing_basedOn_from_throws(): + llm, PROMPT = setup() + chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) + actions = ["0", "1", "2"] + with pytest.raises(ValueError): + chain.run(action=pick_best_chain.base.ToSelectFrom(actions)) + + +def test_ToSelectFrom_not_a_list_throws(): + llm, PROMPT = setup() + chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) + actions = {"actions": ["0", "1", "2"]} + with pytest.raises(ValueError): + chain.run( + User=pick_best_chain.base.BasedOn("Context"), + action=pick_best_chain.base.ToSelectFrom(actions), + ) + + +def test_update_with_delayed_score_with_auto_validator_throws(): + llm, PROMPT = setup() + # this LLM returns a number so that the auto validator will return that + auto_val_llm = FakeListChatModel(responses=["3"]) + chain = pick_best_chain.PickBest.from_llm( + llm=llm, + prompt=PROMPT, + selection_scorer=pick_best_chain.base.AutoSelectionScorer(llm=auto_val_llm), + ) + actions = ["0", "1", "2"] + response = chain.run( + User=pick_best_chain.base.BasedOn("Context"), + action=pick_best_chain.base.ToSelectFrom(actions), + ) + assert response["response"] == "hey" + selection_metadata = response["selection_metadata"] + assert selection_metadata.selected.score == 3.0 + with pytest.raises(RuntimeError): + chain.update_with_delayed_score(event=selection_metadata, score=100) + + +def test_update_with_delayed_score_force(): + llm, PROMPT = setup() + # this LLM returns a number so that the auto validator will return that + auto_val_llm = FakeListChatModel(responses=["3"]) + chain = pick_best_chain.PickBest.from_llm( + llm=llm, + prompt=PROMPT, + selection_scorer=pick_best_chain.base.AutoSelectionScorer(llm=auto_val_llm), + ) + actions = ["0", "1", "2"] + response = chain.run( + User=pick_best_chain.base.BasedOn("Context"), + action=pick_best_chain.base.ToSelectFrom(actions), + ) + assert response["response"] == "hey" + selection_metadata = response["selection_metadata"] + assert selection_metadata.selected.score == 3.0 + chain.update_with_delayed_score( + event=selection_metadata, score=100, force_score=True + ) + assert selection_metadata.selected.score == 100.0 + + +def test_update_with_delayed_score(): + llm, PROMPT = setup() + chain = pick_best_chain.PickBest.from_llm( + llm=llm, prompt=PROMPT, selection_scorer=None + ) + actions = ["0", "1", "2"] + response = chain.run( + User=pick_best_chain.base.BasedOn("Context"), + action=pick_best_chain.base.ToSelectFrom(actions), + ) + assert response["response"] == "hey" + selection_metadata = response["selection_metadata"] + assert selection_metadata.selected.score == None + chain.update_with_delayed_score(event=selection_metadata, score=100) + assert selection_metadata.selected.score == 100.0 + + +def test_user_defined_scorer(): + llm, PROMPT = setup() + + class CustomSelectionScorer(pick_best_chain.base.SelectionScorer): + def score_response(self, inputs, llm_response: str) -> float: + score = 200 + return score + + chain = pick_best_chain.PickBest.from_llm( + llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer() + ) + actions = ["0", "1", "2"] + response = chain.run( + User=pick_best_chain.base.BasedOn("Context"), + action=pick_best_chain.base.ToSelectFrom(actions), + ) + assert response["response"] == "hey" + selection_metadata = response["selection_metadata"] + assert selection_metadata.selected.score == 200.0 + + +def test_default_embeddings(): + llm, PROMPT = setup() + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + chain = pick_best_chain.PickBest.from_llm( + llm=llm, prompt=PROMPT, feature_embedder=feature_embedder + ) + + str1 = "0" + str2 = "1" + str3 = "2" + encoded_str1 = encoded_text + " ".join(char for char in str1) + encoded_str2 = encoded_text + " ".join(char for char in str2) + encoded_str3 = encoded_text + " ".join(char for char in str3) + + ctx_str_1 = "context1" + ctx_str_2 = "context2" + + encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) + encoded_ctx_str_2 = encoded_text + " ".join(char for char in ctx_str_2) + + expected = f"""shared |User {ctx_str_1 + " " + encoded_ctx_str_1} \n|action {str1 + " " + encoded_str1} \n|action {str2 + " " + encoded_str2} \n|action {str3 + " " + encoded_str3} """ + + actions = [str1, str2, str3] + + response = chain.run( + User=pick_best_chain.base.BasedOn(ctx_str_1), + action=pick_best_chain.base.ToSelectFrom(actions), + ) + selection_metadata = response["selection_metadata"] + vw_str = feature_embedder.format(selection_metadata) + assert vw_str == expected + + +def test_default_embeddings_off(): + llm, PROMPT = setup() + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + chain = pick_best_chain.PickBest.from_llm( + llm=llm, prompt=PROMPT, feature_embedder=feature_embedder, auto_embed=False + ) + + str1 = "0" + str2 = "1" + str3 = "2" + ctx_str_1 = "context1" + + expected = f"""shared |User {ctx_str_1} \n|action {str1} \n|action {str2} \n|action {str3} """ + + actions = [str1, str2, str3] + + response = chain.run( + User=pick_best_chain.base.BasedOn(ctx_str_1), + action=pick_best_chain.base.ToSelectFrom(actions), + ) + selection_metadata = response["selection_metadata"] + vw_str = feature_embedder.format(selection_metadata) + assert vw_str == expected + + +def test_default_embeddings_mixed_w_explicit_user_embeddings(): + llm, PROMPT = setup() + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + chain = pick_best_chain.PickBest.from_llm( + llm=llm, prompt=PROMPT, feature_embedder=feature_embedder + ) + + str1 = "0" + str2 = "1" + str3 = "2" + encoded_str1 = encoded_text + " ".join(char for char in str1) + encoded_str2 = encoded_text + " ".join(char for char in str2) + encoded_str3 = encoded_text + " ".join(char for char in str3) + + ctx_str_1 = "context1" + ctx_str_2 = "context2" + + encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) + encoded_ctx_str_2 = encoded_text + " ".join(char for char in ctx_str_2) + + expected = f"""shared |User {encoded_ctx_str_1} |User2 {ctx_str_2 + " " + encoded_ctx_str_2} \n|action {str1 + " " + encoded_str1} \n|action {str2 + " " + encoded_str2} \n|action {encoded_str3} """ + + actions = [str1, str2, pick_best_chain.base.Embed(str3)] + + response = chain.run( + User=pick_best_chain.base.BasedOn(pick_best_chain.base.Embed(ctx_str_1)), + User2=pick_best_chain.base.BasedOn(ctx_str_2), + action=pick_best_chain.base.ToSelectFrom(actions), + ) + selection_metadata = response["selection_metadata"] + vw_str = feature_embedder.format(selection_metadata) + assert vw_str == expected + + +def test_default_no_scorer_specified(): + _, PROMPT = setup() + chain_llm = FakeListChatModel(responses=[100]) + chain = pick_best_chain.PickBest.from_llm(llm=chain_llm, prompt=PROMPT) + response = chain.run( + User=pick_best_chain.base.BasedOn("Context"), + action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), + ) + # chain llm used for both basic prompt and for scoring + assert response["response"] == "100" + selection_metadata = response["selection_metadata"] + assert selection_metadata.selected.score == 100.0 + + +def test_explicitly_no_scorer(): + llm, PROMPT = setup() + chain = pick_best_chain.PickBest.from_llm( + llm=llm, prompt=PROMPT, selection_scorer=None + ) + response = chain.run( + User=pick_best_chain.base.BasedOn("Context"), + action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), + ) + # chain llm used for both basic prompt and for scoring + assert response["response"] == "hey" + selection_metadata = response["selection_metadata"] + assert selection_metadata.selected.score == None + + +def test_auto_scorer_with_user_defined_llm(): + llm, PROMPT = setup() + scorer_llm = FakeListChatModel(responses=[300]) + chain = pick_best_chain.PickBest.from_llm( + llm=llm, + prompt=PROMPT, + selection_scorer=pick_best_chain.base.AutoSelectionScorer(llm=scorer_llm), + ) + response = chain.run( + User=pick_best_chain.base.BasedOn("Context"), + action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), + ) + # chain llm used for both basic prompt and for scoring + assert response["response"] == "hey" + selection_metadata = response["selection_metadata"] + assert selection_metadata.selected.score == 300.0 + + +def test_calling_chain_w_reserved_inputs_throws(): + llm, PROMPT = setup() + chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) + with pytest.raises(ValueError): + chain.run( + User=pick_best_chain.base.BasedOn("Context"), + rl_chain_selected_based_on=pick_best_chain.base.ToSelectFrom( + ["0", "1", "2"] + ), + ) + + with pytest.raises(ValueError): + chain.run( + User=pick_best_chain.base.BasedOn("Context"), + rl_chain_selected=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), + ) diff --git a/libs/langchain/langchain/chains/rl_chain/tests/test_pick_best_text_embedder.py b/libs/langchain/langchain/chains/rl_chain/tests/test_pick_best_text_embedder.py new file mode 100644 index 0000000000..29d8d9af69 --- /dev/null +++ b/libs/langchain/langchain/chains/rl_chain/tests/test_pick_best_text_embedder.py @@ -0,0 +1,334 @@ +import sys + +sys.path.append("..") + +import rl_chain.pick_best_chain as pick_best_chain +from test_utils import MockEncoder + +import pytest + +encoded_text = "[ e n c o d e d ] " + + +def test_pickbest_textembedder_missing_context_throws(): + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + named_action = {"action": ["0", "1", "2"]} + event = pick_best_chain.PickBest.Event( + inputs={}, to_select_from=named_action, based_on={} + ) + with pytest.raises(ValueError): + feature_embedder.format(event) + + +def test_pickbest_textembedder_missing_actions_throws(): + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + event = pick_best_chain.PickBest.Event( + inputs={}, to_select_from={}, based_on={"context": "context"} + ) + with pytest.raises(ValueError): + feature_embedder.format(event) + + +def test_pickbest_textembedder_no_label_no_emb(): + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + named_actions = {"action1": ["0", "1", "2"]} + expected = """shared |context context \n|action1 0 \n|action1 1 \n|action1 2 """ + event = pick_best_chain.PickBest.Event( + inputs={}, to_select_from=named_actions, based_on={"context": "context"} + ) + vw_ex_str = feature_embedder.format(event) + assert vw_ex_str == expected + + +def test_pickbest_textembedder_w_label_no_score_no_emb(): + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + named_actions = {"action1": ["0", "1", "2"]} + expected = """shared |context context \n|action1 0 \n|action1 1 \n|action1 2 """ + selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0) + event = pick_best_chain.PickBest.Event( + inputs={}, + to_select_from=named_actions, + based_on={"context": "context"}, + selected=selected, + ) + vw_ex_str = feature_embedder.format(event) + assert vw_ex_str == expected + + +def test_pickbest_textembedder_w_full_label_no_emb(): + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + named_actions = {"action1": ["0", "1", "2"]} + expected = ( + """shared |context context \n0:-0.0:1.0 |action1 0 \n|action1 1 \n|action1 2 """ + ) + selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBest.Event( + inputs={}, + to_select_from=named_actions, + based_on={"context": "context"}, + selected=selected, + ) + vw_ex_str = feature_embedder.format(event) + assert vw_ex_str == expected + + +def test_pickbest_textembedder_w_full_label_w_emb(): + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + str1 = "0" + str2 = "1" + str3 = "2" + encoded_str1 = encoded_text + " ".join(char for char in str1) + encoded_str2 = encoded_text + " ".join(char for char in str2) + encoded_str3 = encoded_text + " ".join(char for char in str3) + + ctx_str_1 = "context1" + encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) + + named_actions = {"action1": pick_best_chain.base.Embed([str1, str2, str3])} + context = {"context": pick_best_chain.base.Embed(ctx_str_1)} + expected = f"""shared |context {encoded_ctx_str_1} \n0:-0.0:1.0 |action1 {encoded_str1} \n|action1 {encoded_str2} \n|action1 {encoded_str3} """ + selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBest.Event( + inputs={}, to_select_from=named_actions, based_on=context, selected=selected + ) + vw_ex_str = feature_embedder.format(event) + assert vw_ex_str == expected + + +def test_pickbest_textembedder_w_full_label_w_embed_and_keep(): + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + str1 = "0" + str2 = "1" + str3 = "2" + encoded_str1 = encoded_text + " ".join(char for char in str1) + encoded_str2 = encoded_text + " ".join(char for char in str2) + encoded_str3 = encoded_text + " ".join(char for char in str3) + + ctx_str_1 = "context1" + encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) + + named_actions = {"action1": pick_best_chain.base.EmbedAndKeep([str1, str2, str3])} + context = {"context": pick_best_chain.base.EmbedAndKeep(ctx_str_1)} + expected = f"""shared |context {ctx_str_1 + " " + encoded_ctx_str_1} \n0:-0.0:1.0 |action1 {str1 + " " + encoded_str1} \n|action1 {str2 + " " + encoded_str2} \n|action1 {str3 + " " + encoded_str3} """ + selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBest.Event( + inputs={}, to_select_from=named_actions, based_on=context, selected=selected + ) + vw_ex_str = feature_embedder.format(event) + assert vw_ex_str == expected + + +def test_pickbest_textembedder_more_namespaces_no_label_no_emb(): + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} + context = {"context1": "context1", "context2": "context2"} + expected = """shared |context1 context1 |context2 context2 \n|a 0 |b 0 \n|action1 1 \n|action1 2 """ + event = pick_best_chain.PickBest.Event( + inputs={}, to_select_from=named_actions, based_on=context + ) + vw_ex_str = feature_embedder.format(event) + assert vw_ex_str == expected + + +def test_pickbest_textembedder_more_namespaces_w_label_no_emb(): + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} + context = {"context1": "context1", "context2": "context2"} + expected = """shared |context1 context1 |context2 context2 \n|a 0 |b 0 \n|action1 1 \n|action1 2 """ + selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0) + event = pick_best_chain.PickBest.Event( + inputs={}, to_select_from=named_actions, based_on=context, selected=selected + ) + vw_ex_str = feature_embedder.format(event) + assert vw_ex_str == expected + + +def test_pickbest_textembedder_more_namespaces_w_full_label_no_emb(): + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} + context = {"context1": "context1", "context2": "context2"} + expected = """shared |context1 context1 |context2 context2 \n0:-0.0:1.0 |a 0 |b 0 \n|action1 1 \n|action1 2 """ + selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBest.Event( + inputs={}, to_select_from=named_actions, based_on=context, selected=selected + ) + vw_ex_str = feature_embedder.format(event) + assert vw_ex_str == expected + + +def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb(): + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + + str1 = "0" + str2 = "1" + str3 = "2" + encoded_str1 = encoded_text + " ".join(char for char in str1) + encoded_str2 = encoded_text + " ".join(char for char in str2) + encoded_str3 = encoded_text + " ".join(char for char in str3) + + ctx_str_1 = "context1" + ctx_str_2 = "context2" + encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) + encoded_ctx_str_2 = encoded_text + " ".join(char for char in ctx_str_2) + + named_actions = { + "action1": pick_best_chain.base.Embed([{"a": str1, "b": str1}, str2, str3]) + } + context = { + "context1": pick_best_chain.base.Embed(ctx_str_1), + "context2": pick_best_chain.base.Embed(ctx_str_2), + } + expected = f"""shared |context1 {encoded_ctx_str_1} |context2 {encoded_ctx_str_2} \n0:-0.0:1.0 |a {encoded_str1} |b {encoded_str1} \n|action1 {encoded_str2} \n|action1 {encoded_str3} """ + + selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBest.Event( + inputs={}, to_select_from=named_actions, based_on=context, selected=selected + ) + vw_ex_str = feature_embedder.format(event) + assert vw_ex_str == expected + + +def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_keep(): + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + + str1 = "0" + str2 = "1" + str3 = "2" + encoded_str1 = encoded_text + " ".join(char for char in str1) + encoded_str2 = encoded_text + " ".join(char for char in str2) + encoded_str3 = encoded_text + " ".join(char for char in str3) + + ctx_str_1 = "context1" + ctx_str_2 = "context2" + encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) + encoded_ctx_str_2 = encoded_text + " ".join(char for char in ctx_str_2) + + named_actions = { + "action1": pick_best_chain.base.EmbedAndKeep( + [{"a": str1, "b": str1}, str2, str3] + ) + } + context = { + "context1": pick_best_chain.base.EmbedAndKeep(ctx_str_1), + "context2": pick_best_chain.base.EmbedAndKeep(ctx_str_2), + } + expected = f"""shared |context1 {ctx_str_1 + " " + encoded_ctx_str_1} |context2 {ctx_str_2 + " " + encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1 + " " + encoded_str1} |b {str1 + " " + encoded_str1} \n|action1 {str2 + " " + encoded_str2} \n|action1 {str3 + " " + encoded_str3} """ + + selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBest.Event( + inputs={}, to_select_from=named_actions, based_on=context, selected=selected + ) + vw_ex_str = feature_embedder.format(event) + assert vw_ex_str == expected + + +def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb(): + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + + str1 = "0" + str2 = "1" + str3 = "2" + encoded_str1 = encoded_text + " ".join(char for char in str1) + encoded_str2 = encoded_text + " ".join(char for char in str2) + encoded_str3 = encoded_text + " ".join(char for char in str3) + + ctx_str_1 = "context1" + ctx_str_2 = "context2" + encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) + encoded_ctx_str_2 = encoded_text + " ".join(char for char in ctx_str_2) + + named_actions = { + "action1": [ + {"a": str1, "b": pick_best_chain.base.Embed(str1)}, + str2, + pick_best_chain.base.Embed(str3), + ] + } + context = {"context1": ctx_str_1, "context2": pick_best_chain.base.Embed(ctx_str_2)} + expected = f"""shared |context1 {ctx_str_1} |context2 {encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1} |b {encoded_str1} \n|action1 {str2} \n|action1 {encoded_str3} """ + + selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBest.Event( + inputs={}, to_select_from=named_actions, based_on=context, selected=selected + ) + vw_ex_str = feature_embedder.format(event) + assert vw_ex_str == expected + + +def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_embed_and_keep(): + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + + str1 = "0" + str2 = "1" + str3 = "2" + encoded_str1 = encoded_text + " ".join(char for char in str1) + encoded_str2 = encoded_text + " ".join(char for char in str2) + encoded_str3 = encoded_text + " ".join(char for char in str3) + + ctx_str_1 = "context1" + ctx_str_2 = "context2" + encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) + encoded_ctx_str_2 = encoded_text + " ".join(char for char in ctx_str_2) + + named_actions = { + "action1": [ + {"a": str1, "b": pick_best_chain.base.EmbedAndKeep(str1)}, + str2, + pick_best_chain.base.EmbedAndKeep(str3), + ] + } + context = { + "context1": ctx_str_1, + "context2": pick_best_chain.base.EmbedAndKeep(ctx_str_2), + } + expected = f"""shared |context1 {ctx_str_1} |context2 {ctx_str_2 + " " + encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1} |b {str1 + " " + encoded_str1} \n|action1 {str2} \n|action1 {str3 + " " + encoded_str3} """ + + selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBest.Event( + inputs={}, to_select_from=named_actions, based_on=context, selected=selected + ) + vw_ex_str = feature_embedder.format(event) + assert vw_ex_str == expected + + +def test_raw_features_underscored(): + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + str1 = "this is a long string" + str1_underscored = str1.replace(" ", "_") + encoded_str1 = encoded_text + " ".join(char for char in str1) + + ctx_str = "this is a long context" + ctx_str_underscored = ctx_str.replace(" ", "_") + encoded_ctx_str = encoded_text + " ".join(char for char in ctx_str) + + # No embeddings + named_actions = {"action": [str1]} + context = {"context": ctx_str} + expected_no_embed = ( + f"""shared |context {ctx_str_underscored} \n|action {str1_underscored} """ + ) + event = pick_best_chain.PickBest.Event( + inputs={}, to_select_from=named_actions, based_on=context + ) + vw_ex_str = feature_embedder.format(event) + assert vw_ex_str == expected_no_embed + + # Just embeddings + named_actions = {"action": pick_best_chain.base.Embed([str1])} + context = {"context": pick_best_chain.base.Embed(ctx_str)} + expected_embed = f"""shared |context {encoded_ctx_str} \n|action {encoded_str1} """ + event = pick_best_chain.PickBest.Event( + inputs={}, to_select_from=named_actions, based_on=context + ) + vw_ex_str = feature_embedder.format(event) + assert vw_ex_str == expected_embed + + # Embeddings and raw features + named_actions = {"action": pick_best_chain.base.EmbedAndKeep([str1])} + context = {"context": pick_best_chain.base.EmbedAndKeep(ctx_str)} + expected_embed_and_keep = f"""shared |context {ctx_str_underscored + " " + encoded_ctx_str} \n|action {str1_underscored + " " + encoded_str1} """ + event = pick_best_chain.PickBest.Event( + inputs={}, to_select_from=named_actions, based_on=context + ) + vw_ex_str = feature_embedder.format(event) + assert vw_ex_str == expected_embed_and_keep diff --git a/libs/langchain/langchain/chains/rl_chain/tests/test_rl_chain_base_embedder.py b/libs/langchain/langchain/chains/rl_chain/tests/test_rl_chain_base_embedder.py new file mode 100644 index 0000000000..7ae232bbca --- /dev/null +++ b/libs/langchain/langchain/chains/rl_chain/tests/test_rl_chain_base_embedder.py @@ -0,0 +1,410 @@ +import sys + +sys.path.append("..") + +import rl_chain.rl_chain_base as base +from test_utils import MockEncoder + +import pytest + +encoded_text = "[ e n c o d e d ] " + + +def test_simple_context_str_no_emb(): + expected = [{"a_namespace": "test"}] + assert base.embed("test", MockEncoder(), "a_namespace") == expected + + +def test_simple_context_str_w_emb(): + str1 = "test" + encoded_str1 = " ".join(char for char in str1) + expected = [{"a_namespace": encoded_text + encoded_str1}] + assert base.embed(base.Embed(str1), MockEncoder(), "a_namespace") == expected + expected_embed_and_keep = [ + {"a_namespace": str1 + " " + encoded_text + encoded_str1} + ] + assert ( + base.embed(base.EmbedAndKeep(str1), MockEncoder(), "a_namespace") + == expected_embed_and_keep + ) + + +def test_simple_context_str_w_nested_emb(): + # nested embeddings, innermost wins + str1 = "test" + encoded_str1 = " ".join(char for char in str1) + expected = [{"a_namespace": encoded_text + encoded_str1}] + assert ( + base.embed(base.EmbedAndKeep(base.Embed(str1)), MockEncoder(), "a_namespace") + == expected + ) + + expected2 = [{"a_namespace": str1 + " " + encoded_text + encoded_str1}] + assert ( + base.embed(base.Embed(base.EmbedAndKeep(str1)), MockEncoder(), "a_namespace") + == expected2 + ) + + +def test_context_w_namespace_no_emb(): + expected = [{"test_namespace": "test"}] + assert base.embed({"test_namespace": "test"}, MockEncoder()) == expected + + +def test_context_w_namespace_w_emb(): + str1 = "test" + encoded_str1 = " ".join(char for char in str1) + expected = [{"test_namespace": encoded_text + encoded_str1}] + assert base.embed({"test_namespace": base.Embed(str1)}, MockEncoder()) == expected + expected_embed_and_keep = [ + {"test_namespace": str1 + " " + encoded_text + encoded_str1} + ] + assert ( + base.embed({"test_namespace": base.EmbedAndKeep(str1)}, MockEncoder()) + == expected_embed_and_keep + ) + + +def test_context_w_namespace_w_emb2(): + str1 = "test" + encoded_str1 = " ".join(char for char in str1) + expected = [{"test_namespace": encoded_text + encoded_str1}] + assert base.embed(base.Embed({"test_namespace": str1}), MockEncoder()) == expected + expected_embed_and_keep = [ + {"test_namespace": str1 + " " + encoded_text + encoded_str1} + ] + assert ( + base.embed(base.EmbedAndKeep({"test_namespace": str1}), MockEncoder()) + == expected_embed_and_keep + ) + + +def test_context_w_namespace_w_some_emb(): + str1 = "test1" + str2 = "test2" + encoded_str2 = " ".join(char for char in str2) + expected = [ + {"test_namespace": str1, "test_namespace2": encoded_text + encoded_str2} + ] + assert ( + base.embed( + {"test_namespace": str1, "test_namespace2": base.Embed(str2)}, MockEncoder() + ) + == expected + ) + expected_embed_and_keep = [ + { + "test_namespace": str1, + "test_namespace2": str2 + " " + encoded_text + encoded_str2, + } + ] + assert ( + base.embed( + {"test_namespace": str1, "test_namespace2": base.EmbedAndKeep(str2)}, + MockEncoder(), + ) + == expected_embed_and_keep + ) + + +def test_simple_action_strlist_no_emb(): + str1 = "test1" + str2 = "test2" + str3 = "test3" + expected = [{"a_namespace": str1}, {"a_namespace": str2}, {"a_namespace": str3}] + assert base.embed([str1, str2, str3], MockEncoder(), "a_namespace") == expected + + +def test_simple_action_strlist_w_emb(): + str1 = "test1" + str2 = "test2" + str3 = "test3" + encoded_str1 = " ".join(char for char in str1) + encoded_str2 = " ".join(char for char in str2) + encoded_str3 = " ".join(char for char in str3) + expected = [ + {"a_namespace": encoded_text + encoded_str1}, + {"a_namespace": encoded_text + encoded_str2}, + {"a_namespace": encoded_text + encoded_str3}, + ] + assert ( + base.embed(base.Embed([str1, str2, str3]), MockEncoder(), "a_namespace") + == expected + ) + expected_embed_and_keep = [ + {"a_namespace": str1 + " " + encoded_text + encoded_str1}, + {"a_namespace": str2 + " " + encoded_text + encoded_str2}, + {"a_namespace": str3 + " " + encoded_text + encoded_str3}, + ] + assert ( + base.embed(base.EmbedAndKeep([str1, str2, str3]), MockEncoder(), "a_namespace") + == expected_embed_and_keep + ) + + +def test_simple_action_strlist_w_some_emb(): + str1 = "test1" + str2 = "test2" + str3 = "test3" + encoded_str2 = " ".join(char for char in str2) + encoded_str3 = " ".join(char for char in str3) + expected = [ + {"a_namespace": str1}, + {"a_namespace": encoded_text + encoded_str2}, + {"a_namespace": encoded_text + encoded_str3}, + ] + assert ( + base.embed( + [str1, base.Embed(str2), base.Embed(str3)], MockEncoder(), "a_namespace" + ) + == expected + ) + expected_embed_and_keep = [ + {"a_namespace": str1}, + {"a_namespace": str2 + " " + encoded_text + encoded_str2}, + {"a_namespace": str3 + " " + encoded_text + encoded_str3}, + ] + assert ( + base.embed( + [str1, base.EmbedAndKeep(str2), base.EmbedAndKeep(str3)], + MockEncoder(), + "a_namespace", + ) + == expected_embed_and_keep + ) + + +def test_action_w_namespace_no_emb(): + str1 = "test1" + str2 = "test2" + str3 = "test3" + expected = [ + {"test_namespace": str1}, + {"test_namespace": str2}, + {"test_namespace": str3}, + ] + assert ( + base.embed( + [ + {"test_namespace": str1}, + {"test_namespace": str2}, + {"test_namespace": str3}, + ], + MockEncoder(), + ) + == expected + ) + + +def test_action_w_namespace_w_emb(): + str1 = "test1" + str2 = "test2" + str3 = "test3" + encoded_str1 = " ".join(char for char in str1) + encoded_str2 = " ".join(char for char in str2) + encoded_str3 = " ".join(char for char in str3) + expected = [ + {"test_namespace": encoded_text + encoded_str1}, + {"test_namespace": encoded_text + encoded_str2}, + {"test_namespace": encoded_text + encoded_str3}, + ] + assert ( + base.embed( + [ + {"test_namespace": base.Embed(str1)}, + {"test_namespace": base.Embed(str2)}, + {"test_namespace": base.Embed(str3)}, + ], + MockEncoder(), + ) + == expected + ) + expected_embed_and_keep = [ + {"test_namespace": str1 + " " + encoded_text + encoded_str1}, + {"test_namespace": str2 + " " + encoded_text + encoded_str2}, + {"test_namespace": str3 + " " + encoded_text + encoded_str3}, + ] + assert ( + base.embed( + [ + {"test_namespace": base.EmbedAndKeep(str1)}, + {"test_namespace": base.EmbedAndKeep(str2)}, + {"test_namespace": base.EmbedAndKeep(str3)}, + ], + MockEncoder(), + ) + == expected_embed_and_keep + ) + + +def test_action_w_namespace_w_emb2(): + str1 = "test1" + str2 = "test2" + str3 = "test3" + encoded_str1 = " ".join(char for char in str1) + encoded_str2 = " ".join(char for char in str2) + encoded_str3 = " ".join(char for char in str3) + expected = [ + {"test_namespace1": encoded_text + encoded_str1}, + {"test_namespace2": encoded_text + encoded_str2}, + {"test_namespace3": encoded_text + encoded_str3}, + ] + assert ( + base.embed( + base.Embed( + [ + {"test_namespace1": str1}, + {"test_namespace2": str2}, + {"test_namespace3": str3}, + ] + ), + MockEncoder(), + ) + == expected + ) + expected_embed_and_keep = [ + {"test_namespace1": str1 + " " + encoded_text + encoded_str1}, + {"test_namespace2": str2 + " " + encoded_text + encoded_str2}, + {"test_namespace3": str3 + " " + encoded_text + encoded_str3}, + ] + assert ( + base.embed( + base.EmbedAndKeep( + [ + {"test_namespace1": str1}, + {"test_namespace2": str2}, + {"test_namespace3": str3}, + ] + ), + MockEncoder(), + ) + == expected_embed_and_keep + ) + + +def test_action_w_namespace_w_some_emb(): + str1 = "test1" + str2 = "test2" + str3 = "test3" + encoded_str2 = " ".join(char for char in str2) + encoded_str3 = " ".join(char for char in str3) + expected = [ + {"test_namespace": str1}, + {"test_namespace": encoded_text + encoded_str2}, + {"test_namespace": encoded_text + encoded_str3}, + ] + assert ( + base.embed( + [ + {"test_namespace": str1}, + {"test_namespace": base.Embed(str2)}, + {"test_namespace": base.Embed(str3)}, + ], + MockEncoder(), + ) + == expected + ) + expected_embed_and_keep = [ + {"test_namespace": str1}, + {"test_namespace": str2 + " " + encoded_text + encoded_str2}, + {"test_namespace": str3 + " " + encoded_text + encoded_str3}, + ] + assert ( + base.embed( + [ + {"test_namespace": str1}, + {"test_namespace": base.EmbedAndKeep(str2)}, + {"test_namespace": base.EmbedAndKeep(str3)}, + ], + MockEncoder(), + ) + == expected_embed_and_keep + ) + + +def test_action_w_namespace_w_emb_w_more_than_one_item_in_first_dict(): + str1 = "test1" + str2 = "test2" + str3 = "test3" + encoded_str1 = " ".join(char for char in str1) + encoded_str2 = " ".join(char for char in str2) + encoded_str3 = " ".join(char for char in str3) + expected = [ + {"test_namespace": encoded_text + encoded_str1, "test_namespace2": str1}, + {"test_namespace": encoded_text + encoded_str2, "test_namespace2": str2}, + {"test_namespace": encoded_text + encoded_str3, "test_namespace2": str3}, + ] + assert ( + base.embed( + [ + {"test_namespace": base.Embed(str1), "test_namespace2": str1}, + {"test_namespace": base.Embed(str2), "test_namespace2": str2}, + {"test_namespace": base.Embed(str3), "test_namespace2": str3}, + ], + MockEncoder(), + ) + == expected + ) + expected_embed_and_keep = [ + { + "test_namespace": str1 + " " + encoded_text + encoded_str1, + "test_namespace2": str1, + }, + { + "test_namespace": str2 + " " + encoded_text + encoded_str2, + "test_namespace2": str2, + }, + { + "test_namespace": str3 + " " + encoded_text + encoded_str3, + "test_namespace2": str3, + }, + ] + assert ( + base.embed( + [ + {"test_namespace": base.EmbedAndKeep(str1), "test_namespace2": str1}, + {"test_namespace": base.EmbedAndKeep(str2), "test_namespace2": str2}, + {"test_namespace": base.EmbedAndKeep(str3), "test_namespace2": str3}, + ], + MockEncoder(), + ) + == expected_embed_and_keep + ) + + +def test_one_namespace_w_list_of_features_no_emb(): + str1 = "test1" + str2 = "test2" + expected = [{"test_namespace": [str1, str2]}] + assert base.embed({"test_namespace": [str1, str2]}, MockEncoder()) == expected + + +def test_one_namespace_w_list_of_features_w_some_emb(): + str1 = "test1" + str2 = "test2" + encoded_str2 = " ".join(char for char in str2) + expected = [{"test_namespace": [str1, encoded_text + encoded_str2]}] + assert ( + base.embed({"test_namespace": [str1, base.Embed(str2)]}, MockEncoder()) + == expected + ) + + +def test_nested_list_features_throws(): + with pytest.raises(ValueError): + base.embed({"test_namespace": [[1, 2], [3, 4]]}, MockEncoder()) + + +def test_dict_in_list_throws(): + with pytest.raises(ValueError): + base.embed({"test_namespace": [{"a": 1}, {"b": 2}]}, MockEncoder()) + + +def test_nested_dict_throws(): + with pytest.raises(ValueError): + base.embed({"test_namespace": {"a": {"b": 1}}}, MockEncoder()) + + +def test_list_of_tuples_throws(): + with pytest.raises(ValueError): + base.embed({"test_namespace": [("a", 1), ("b", 2)]}, MockEncoder()) diff --git a/libs/langchain/langchain/chains/rl_chain/tests/test_slates_text_embedder.py b/libs/langchain/langchain/chains/rl_chain/tests/test_slates_text_embedder.py new file mode 100644 index 0000000000..b5c094bd55 --- /dev/null +++ b/libs/langchain/langchain/chains/rl_chain/tests/test_slates_text_embedder.py @@ -0,0 +1,124 @@ +import sys + +sys.path.append("..") + +import rl_chain.slates_chain as slates +from test_utils import MockEncoder + +import pytest + +encoded_keyword = "[encoded]" +encoded_text = "[ e n c o d e d ] " + + +def test_slate_text_creation_no_label_no_emb(): + named_actions = {"prefix": ["0", "1"], "context": ["bla"], "suffix": ["0", "1"]} + expected = """slates shared |\nslates action 0 |Action 0\nslates action 0 |Action 1\nslates action 1 |Action bla\nslates action 2 |Action 0\nslates action 2 |Action 1\nslates slot |\nslates slot |\nslates slot |""" + feature_embedder = slates.SlatesFeatureEmbedder() + event = slates.SlatesPersonalizerChain.Event( + inputs={}, to_select_from=named_actions, based_on={} + ) + vw_str_ex = feature_embedder.format(event) + assert vw_str_ex == expected + + +def _str(embedding): + return " ".join([f"{i}:{e}" for i, e in enumerate(embedding)]) + + +def test_slate_text_creation_no_label_w_emb(): + action00 = "0" + action01 = "1" + action10 = "bla" + action20 = "0" + action21 = "1" + encoded_action00 = _str(encoded_keyword + action00) + encoded_action01 = _str(encoded_keyword + action01) + encoded_action10 = _str(encoded_keyword + action10) + encoded_action20 = _str(encoded_keyword + action20) + encoded_action21 = _str(encoded_keyword + action21) + + named_actions = { + "prefix": slates.base.Embed(["0", "1"]), + "context": slates.base.Embed(["bla"]), + "suffix": slates.base.Embed(["0", "1"]), + } + expected = f"""slates shared |\nslates action 0 |Action {encoded_action00}\nslates action 0 |Action {encoded_action01}\nslates action 1 |Action {encoded_action10}\nslates action 2 |Action {encoded_action20}\nslates action 2 |Action {encoded_action21}\nslates slot |\nslates slot |\nslates slot |""" + feature_embedder = slates.SlatesFeatureEmbedder(model=MockEncoder()) + event = slates.SlatesPersonalizerChain.Event( + inputs={}, to_select_from=named_actions, based_on={} + ) + vw_str_ex = feature_embedder.format(event) + assert vw_str_ex == expected + + +def test_slate_text_create_no_label_w_embed_and_keep(): + action00 = "0" + action01 = "1" + action10 = "bla" + action20 = "0" + action21 = "1" + encoded_action00 = _str(encoded_keyword + action00) + encoded_action01 = _str(encoded_keyword + action01) + encoded_action10 = _str(encoded_keyword + action10) + encoded_action20 = _str(encoded_keyword + action20) + encoded_action21 = _str(encoded_keyword + action21) + + named_actions = { + "prefix": slates.base.EmbedAndKeep(["0", "1"]), + "context": slates.base.EmbedAndKeep(["bla"]), + "suffix": slates.base.EmbedAndKeep(["0", "1"]), + } + expected = f"""slates shared |\nslates action 0 |Action {action00 + " " + encoded_action00}\nslates action 0 |Action {action01 + " " + encoded_action01}\nslates action 1 |Action {action10 + " " + encoded_action10}\nslates action 2 |Action {action20 + " " + encoded_action20}\nslates action 2 |Action {action21 + " " + encoded_action21}\nslates slot |\nslates slot |\nslates slot |""" + feature_embedder = slates.SlatesFeatureEmbedder(model=MockEncoder()) + event = slates.SlatesPersonalizerChain.Event( + inputs={}, to_select_from=named_actions, based_on={} + ) + vw_str_ex = feature_embedder.format(event) + assert vw_str_ex == expected + + +def test_slates_raw_features_underscored(): + action00 = "this is a long action 0" + action01 = "this is a long action 1" + action00_underscored = action00.replace(" ", "_") + action01_underscored = action01.replace(" ", "_") + encoded_action00 = _str(encoded_keyword + action00) + encoded_action01 = _str(encoded_keyword + action01) + + ctx_str = "this is a long context" + ctx_str_underscored = ctx_str.replace(" ", "_") + encoded_ctx_str = encoded_text + " ".join(char for char in ctx_str) + + # No Embeddings + named_actions = {"prefix": [action00, action01]} + context = {"context": ctx_str} + expected_no_embed = f"""slates shared |context {ctx_str_underscored} \nslates action 0 |Action {action00_underscored}\nslates action 0 |Action {action01_underscored}\nslates slot |""" + feature_embedder = slates.SlatesFeatureEmbedder(model=MockEncoder()) + event = slates.SlatesPersonalizerChain.Event( + inputs={}, to_select_from=named_actions, based_on=context + ) + vw_str_ex = feature_embedder.format(event) + assert vw_str_ex == expected_no_embed + + # Just embeddings + named_actions = {"prefix": slates.base.Embed([action00, action01])} + context = {"context": slates.base.Embed(ctx_str)} + expected_embed = f"""slates shared |context {encoded_ctx_str} \nslates action 0 |Action {encoded_action00}\nslates action 0 |Action {encoded_action01}\nslates slot |""" + feature_embedder = slates.SlatesFeatureEmbedder(model=MockEncoder()) + event = slates.SlatesPersonalizerChain.Event( + inputs={}, to_select_from=named_actions, based_on=context + ) + vw_str_ex = feature_embedder.format(event) + assert vw_str_ex == expected_embed + + # Embeddings and raw features + named_actions = {"prefix": slates.base.EmbedAndKeep([action00, action01])} + context = {"context": slates.base.EmbedAndKeep(ctx_str)} + expected_embed_and_keep = f"""slates shared |context {ctx_str_underscored + " " + encoded_ctx_str} \nslates action 0 |Action {action00_underscored + " " + encoded_action00}\nslates action 0 |Action {action01_underscored + " " + encoded_action01}\nslates slot |""" + feature_embedder = slates.SlatesFeatureEmbedder(model=MockEncoder()) + event = slates.SlatesPersonalizerChain.Event( + inputs={}, to_select_from=named_actions, based_on=context + ) + vw_str_ex = feature_embedder.format(event) + assert vw_str_ex == expected_embed_and_keep diff --git a/libs/langchain/langchain/chains/rl_chain/tests/test_utils.py b/libs/langchain/langchain/chains/rl_chain/tests/test_utils.py new file mode 100644 index 0000000000..8b3773165e --- /dev/null +++ b/libs/langchain/langchain/chains/rl_chain/tests/test_utils.py @@ -0,0 +1,14 @@ +from rl_chain import SelectionScorer +from typing import Dict, Any + + +class MockScorer(SelectionScorer): + def score_response( + self, inputs: Dict[str, Any], llm_response: str, **kwargs + ) -> float: + return float(llm_response) + + +class MockEncoder: + def encode(self, to_encode): + return "[encoded]" + to_encode From 56b40beb0e3943d5235676d3f4f75b3a4491123d Mon Sep 17 00:00:00 2001 From: olgavrou Date: Fri, 18 Aug 2023 02:04:35 -0400 Subject: [PATCH 02/65] keep only what is needed for first PR --- .../rl_chain/.github/workflows/unit_tests.yml | 23 - .../langchain/chains/rl_chain/.gitignore | 6 - .../langchain/chains/rl_chain/LICENSE | 21 - .../langchain/chains/rl_chain/README.md | 25 -- .../rl_chain/{rl_chain => }/__init__.py | 0 .../chains/rl_chain/{rl_chain => }/metrics.py | 0 .../{rl_chain => }/model_repository.py | 0 .../{rl_chain => }/pick_best_chain.py | 0 .../chains/rl_chain/prompt_selection.ipynb | 364 ---------------- .../langchain/chains/rl_chain/rl_chain.ipynb | 403 ------------------ .../chains/rl_chain/rl_chain/slates_chain.py | 275 ------------ .../rl_chain/{rl_chain => }/rl_chain_base.py | 0 .../rl_chain/{rl_chain => }/vw_logger.py | 0 13 files changed, 1117 deletions(-) delete mode 100644 libs/langchain/langchain/chains/rl_chain/.github/workflows/unit_tests.yml delete mode 100644 libs/langchain/langchain/chains/rl_chain/.gitignore delete mode 100644 libs/langchain/langchain/chains/rl_chain/LICENSE delete mode 100644 libs/langchain/langchain/chains/rl_chain/README.md rename libs/langchain/langchain/chains/rl_chain/{rl_chain => }/__init__.py (100%) rename libs/langchain/langchain/chains/rl_chain/{rl_chain => }/metrics.py (100%) rename libs/langchain/langchain/chains/rl_chain/{rl_chain => }/model_repository.py (100%) rename libs/langchain/langchain/chains/rl_chain/{rl_chain => }/pick_best_chain.py (100%) delete mode 100644 libs/langchain/langchain/chains/rl_chain/prompt_selection.ipynb delete mode 100644 libs/langchain/langchain/chains/rl_chain/rl_chain.ipynb delete mode 100644 libs/langchain/langchain/chains/rl_chain/rl_chain/slates_chain.py rename libs/langchain/langchain/chains/rl_chain/{rl_chain => }/rl_chain_base.py (100%) rename libs/langchain/langchain/chains/rl_chain/{rl_chain => }/vw_logger.py (100%) diff --git a/libs/langchain/langchain/chains/rl_chain/.github/workflows/unit_tests.yml b/libs/langchain/langchain/chains/rl_chain/.github/workflows/unit_tests.yml deleted file mode 100644 index 029646a11e..0000000000 --- a/libs/langchain/langchain/chains/rl_chain/.github/workflows/unit_tests.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Unit Tests - -on: - push: - branches: - - main - pull_request: - branches: - - '*' - -jobs: - python-unit-test: - container: - image: python:3.8 - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v1 - - name: Run Tests - shell: bash - run: | - pip install -r requirements.txt - pip install pytest - python -m pytest tests/ \ No newline at end of file diff --git a/libs/langchain/langchain/chains/rl_chain/.gitignore b/libs/langchain/langchain/chains/rl_chain/.gitignore deleted file mode 100644 index 0845b27a84..0000000000 --- a/libs/langchain/langchain/chains/rl_chain/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -**/__pycache__/** -models/* -logs/* -**/*.vw -.venv - diff --git a/libs/langchain/langchain/chains/rl_chain/LICENSE b/libs/langchain/langchain/chains/rl_chain/LICENSE deleted file mode 100644 index a1c616b972..0000000000 --- a/libs/langchain/langchain/chains/rl_chain/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2023 Vowpal Wabbit - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/libs/langchain/langchain/chains/rl_chain/README.md b/libs/langchain/langchain/chains/rl_chain/README.md deleted file mode 100644 index 76028002ad..0000000000 --- a/libs/langchain/langchain/chains/rl_chain/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# VW in a langchain chain - -Install `requirements.txt` - -[VowpalWabbit](https://github.com/VowpalWabbit/vowpal_wabbit) - -There is an example notebook (rl_chain.ipynb) with basic usage of the chain. - -TLDR: - -- Chain is initialized and creates a Vowpal Wabbit instance - only Contextual Bandits and Slates are supported for now -- You can change the arguments at chain creation time -- There is a default prompt but it can be changed -- There is a default reward function that gets triggered and triggers learn automatically - - This can be turned off and score can be spcified explicitly - -Flow: - -- Developer: creates chain -- Developer: sets actions -- Developer: calls chain with context and other prompt inputs -- Chain: calls VW with the context and selects an action -- Chain: action (and other vars) are passed to the LLM with the prompt -- Chain: if default reward set, the LLM is called to judge and give a reward score of the response based on the context -- Chain: VW learn is triggered with that score diff --git a/libs/langchain/langchain/chains/rl_chain/rl_chain/__init__.py b/libs/langchain/langchain/chains/rl_chain/__init__.py similarity index 100% rename from libs/langchain/langchain/chains/rl_chain/rl_chain/__init__.py rename to libs/langchain/langchain/chains/rl_chain/__init__.py diff --git a/libs/langchain/langchain/chains/rl_chain/rl_chain/metrics.py b/libs/langchain/langchain/chains/rl_chain/metrics.py similarity index 100% rename from libs/langchain/langchain/chains/rl_chain/rl_chain/metrics.py rename to libs/langchain/langchain/chains/rl_chain/metrics.py diff --git a/libs/langchain/langchain/chains/rl_chain/rl_chain/model_repository.py b/libs/langchain/langchain/chains/rl_chain/model_repository.py similarity index 100% rename from libs/langchain/langchain/chains/rl_chain/rl_chain/model_repository.py rename to libs/langchain/langchain/chains/rl_chain/model_repository.py diff --git a/libs/langchain/langchain/chains/rl_chain/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py similarity index 100% rename from libs/langchain/langchain/chains/rl_chain/rl_chain/pick_best_chain.py rename to libs/langchain/langchain/chains/rl_chain/pick_best_chain.py diff --git a/libs/langchain/langchain/chains/rl_chain/prompt_selection.ipynb b/libs/langchain/langchain/chains/rl_chain/prompt_selection.ipynb deleted file mode 100644 index 0caea8b446..0000000000 --- a/libs/langchain/langchain/chains/rl_chain/prompt_selection.ipynb +++ /dev/null @@ -1,364 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Prepare core llm chain" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import langchain\n", - "langchain.debug = False # set to True if you want to see what the LLM is doing\n", - "\n", - "from langchain.chat_models import AzureChatOpenAI\n", - "\n", - "import dotenv\n", - "dotenv.load_dotenv()\n", - "\n", - "llm = AzureChatOpenAI(\n", - " deployment_name=\"gpt-35-turbo\",\n", - " temperature=0,\n", - " request_timeout=20,\n", - " max_retries=1,\n", - " client=None,\n", - ")\n", - "\n", - "llm.predict('Are you ready?')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Vanilla LLMChain" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.chains.llm import LLMChain\n", - "from langchain.prompts.prompt import PromptTemplate\n", - "\n", - "llm_chain = LLMChain(\n", - " llm = llm,\n", - " prompt = PromptTemplate(\n", - " input_variables=[\"adjective\", \"content\", \"topic\"],\n", - " template=\"Hi, please create {adjective} {content} about {topic}.\",\n", - " ))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "llm_chain.run(\n", - " adjective = \"funny\",\n", - " content = \"poem\",\n", - " topic = \"machine learning\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Variable selection" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import rl_chain\n", - "from langchain.prompts.prompt import PromptTemplate\n", - "\n", - "llm_chain = rl_chain.SlatesPersonalizerChain.from_llm(\n", - " llm=llm,\n", - " prompt = PromptTemplate(\n", - " input_variables=[\"adjective\", \"content\", \"topic\"],\n", - " template=\"Hi, please create {adjective} {content} about {topic}\",\n", - " ))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "r = llm_chain.run(\n", - " adjective = rl_chain.ToSelectFrom([\"funny\"]),\n", - " content = rl_chain.ToSelectFrom([\"poem\"]),\n", - " topic = rl_chain.ToSelectFrom([\"machine learning\"]))\n", - "\n", - "print(r[\"response\"])\n", - "print(r[\"selection_metadata\"].to_select_from)\n", - "print(r[\"selection_metadata\"].based_on)\n", - "print(r[\"selection_metadata\"].selected.score)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "llm_chain.update_with_delayed_score(score=1, event=r[\"selection_metadata\"], force_score=True)\n", - "print(r[\"selection_metadata\"].selected.score)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It is ok to be uncertain about certain variable values" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "llm_chain.run(\n", - " adjective = rl_chain.ToSelectFrom([\"funny\", \"scary\"]),\n", - " content = rl_chain.ToSelectFrom([\"poem\"]),\n", - " topic = rl_chain.ToSelectFrom([\"machine learning\", \"cats\"]))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Full loop" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import rl_chain\n", - "from langchain.prompts.prompt import PromptTemplate\n", - "from langchain.prompts import (\n", - " ChatPromptTemplate,\n", - " HumanMessagePromptTemplate,\n", - ")\n", - "\n", - "template = \"\"\"\n", - "using style {style}\n", - "\n", - "{prefix}\n", - "{goal}: {context}.\n", - "{suffix}\n", - "\"\"\"\n", - "prompt = PromptTemplate(\n", - " input_variables=[\"prefix\", \"goal\", \"context\", \"suffix\", \"style\"],\n", - " template=template,\n", - ")\n", - "chain = rl_chain.SlatesPersonalizerChain.from_llm(\n", - " llm=llm,\n", - " vw_logs = 'logs/stories.txt',\n", - " model_save_dir=\"./models\", # where to save the model checkpoints\n", - " prompt = prompt,\n", - " selection_scorer = rl_chain.AutoSelectionScorer(\n", - " llm=llm,\n", - " scoring_criteria_template_str = '''Given the task:\n", - " {goal}: {context}\n", - " rank how good or bad this response is:\n", - " {llm_response}.''',\n", - " ),\n", - " metrics_step=1\n", - ")\n", - "\n", - "chain.run(\n", - " prefix = rl_chain.ToSelectFrom([f'ALWAYS DO EXACTLY WHAT I ASK YOU!', 'Please do your best to help me.']),\n", - " goal = rl_chain.ToSelectFrom(['Write a funny story about']),\n", - " context = rl_chain.ToSelectFrom(['Friends series']),\n", - " suffix = rl_chain.ToSelectFrom(['Please try to be as funny as possible.', '']),\n", - " style = \"Shakespeare\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import rl_chain\n", - "from langchain.prompts.prompt import PromptTemplate\n", - "\n", - "template = \"\"\"\n", - "{prefix}\n", - "{goal}: {context}.\n", - "{suffix}\n", - "\"\"\"\n", - "prompt = PromptTemplate(\n", - " input_variables=[\"prefix\", \"goal\", \"context\", \"suffix\"],\n", - " template=template,\n", - ")\n", - "chain = rl_chain.SlatesPersonalizerChain.from_llm(\n", - " llm=llm,\n", - " vw_logs = 'logs/stories.txt',\n", - " model_save_dir=\"./models\", # where to save the model checkpoints\n", - " prompt = prompt,\n", - " selection_scorer = rl_chain.AutoSelectionScorer(\n", - " llm=llm,\n", - " scoring_criteria_template_str = '''Given the task:\n", - " {goal}: {context}\n", - " rank how good or bad this response is:\n", - " {llm_response}.'''\n", - " ),\n", - " metrics_step=1\n", - ")\n", - "chain.run(\n", - " prefix = rl_chain.ToSelectFrom(rl_chain.Embed([f'ALWAYS DO EXACTLY WHAT I ASK YOU!', 'Please do your best to help me.'])),\n", - " goal = rl_chain.ToSelectFrom([rl_chain.Embed('Write a funny story about')]),\n", - " context = rl_chain.ToSelectFrom(['Friends series']),\n", - " suffix = rl_chain.ToSelectFrom(['Please try to be as funny as possible.', '']))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Experiment with mock llm" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from typing import List\n", - "from tests.test_utils import MockScorer\n", - "\n", - "class MockLLMChain:\n", - " outcomes: List[List[float]] = None\n", - " \n", - " def __init__(self, outcomes, prompt):\n", - " self.outcomes = outcomes\n", - " self.prompt = prompt\n", - "\n", - " def run(self, prefix, suffix, **kwargs):\n", - " return str(self.outcomes[int(prefix)][int(suffix)])\n", - "\n", - "import rl_chain\n", - "from langchain.prompts.prompt import PromptTemplate\n", - "\n", - "template = \"\"\"\n", - "{prefix}\n", - "{context}\n", - "{suffix}\n", - "\"\"\"\n", - "prompt = PromptTemplate(\n", - " input_variables=[\"prefix\", \"context\", \"suffix\"],\n", - " template=template,\n", - ")\n", - "chain = rl_chain.SlatesPersonalizerChain.from_llm(\n", - " llm=llm,\n", - " vw_logs = 'logs/mock.txt',\n", - " model_save_dir=\"./models\", # where to save the model checkpoints\n", - " prompt = prompt,\n", - " selection_scorer = MockScorer(),\n", - " metrics_step=1\n", - ")\n", - "chain.llm_chain = MockLLMChain([\n", - " [0, 0.3],\n", - " [0.6, 0.9]], prompt = prompt)\n", - "chain.run(\n", - " prefix = rl_chain.ToSelectFrom(['0', '1']),\n", - " context = rl_chain.ToSelectFrom(['bla']),\n", - " suffix = rl_chain.ToSelectFrom(['0', '1']))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import rl_chain\n", - "from matplotlib import pyplot as plt\n", - "\n", - "vw_chain = rl_chain.SlatesPersonalizerChain.from_llm(\n", - " llm=llm,\n", - " vw_logs = 'logs/mock.txt',\n", - " model_save_dir=\"./models\", # where to save the model checkpoints\n", - " prompt = prompt,\n", - " policy = rl_chain.VwPolicy,\n", - " selection_scorer = MockScorer(),\n", - " auto_embed=False,\n", - " metrics_step=1\n", - ")\n", - "vw_chain.llm_chain = MockLLMChain([\n", - " [0, 0.3],\n", - " [0.6, 0.9]], prompt = prompt)\n", - "\n", - "rnd_chain = rl_chain.SlatesPersonalizerChain.from_llm(\n", - " llm=llm,\n", - " vw_logs = 'logs/mock.txt',\n", - " model_save_dir=\"./models\", # where to save the model checkpoints\n", - " prompt = prompt,\n", - " policy = rl_chain.SlatesRandomPolicy,\n", - " selection_scorer = MockScorer(),\n", - " auto_embed=False,\n", - " metrics_step=1\n", - ")\n", - "rnd_chain.llm_chain = MockLLMChain([\n", - " [0, 0.3],\n", - " [0.6, 0.9]], prompt = prompt)\n", - "\n", - "for i in range(1000):\n", - " vw_chain.run(\n", - " prefix = rl_chain.ToSelectFrom(['0', '1']),\n", - " context = rl_chain.ToSelectFrom(['bla']),\n", - " suffix = rl_chain.ToSelectFrom(['0']))\n", - " rnd_chain.run(\n", - " prefix = rl_chain.ToSelectFrom(['0', '1']),\n", - " context = rl_chain.ToSelectFrom(['bla']),\n", - " suffix = rl_chain.ToSelectFrom(['0']))\n", - "\n", - "vw_chain.metrics.to_pandas()['score'].plot(label=\"vw\")\n", - "rnd_chain.metrics.to_pandas()['score'].plot(label=\"slates\")\n", - "plt.legend()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.10" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/libs/langchain/langchain/chains/rl_chain/rl_chain.ipynb b/libs/langchain/langchain/chains/rl_chain/rl_chain.ipynb deleted file mode 100644 index c39661d0cb..0000000000 --- a/libs/langchain/langchain/chains/rl_chain/rl_chain.ipynb +++ /dev/null @@ -1,403 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class MealPlanner:\n", - " def __init__(self, name: str, desc: str, difficulty: str, tags: str):\n", - " try:\n", - " self.name = name\n", - " self.desc = desc\n", - " self.diff = difficulty\n", - " self.tags = tags\n", - " except:\n", - " print(name)\n", - " raise ValueError" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "## Actions\n", - "## examples copied from hello fresh website\n", - "actions = [\n", - " MealPlanner(name=\"One-Pan Beef Enchiladas Verdes with Mexican Cheese Blend & Hot Sauce Crema\", difficulty=\"Easy\", tags=\"Spicy, Easy Cleanup, Easy Prep\", desc=\"When it comes to Mexican-style cuisine, burritos typically get all the glory. In our humble opinion, enchiladas are an unsung dinner hero. They’re technically easier-to-assemble burritos that get smothered in a delicious sauce, but they’re really so much more than that! Ours start with spiced beef and charred green pepper that get rolled up in warm tortillas. This winning combo gets topped with tangy salsa verde and cheese, then baked until bubbly and melty. Hear that? That’s the sound of the dinner bell!\"),\n", - " MealPlanner(name=\"Chicken & Mushroom Flatbreads with Gouda Cream Sauce & Parmesan\", difficulty=\"Easy\", tags=\"\", desc=\"Yes we love our simple cheese pizza with red sauce but tonight, move over, marinara—there’s a new sauce in town. In this recipe, crispy flatbreads are slathered with a rich, creamy gouda-mustard sauce we just can’t get enough of. We top that off with a pile of caramelized onion and earthy cremini mushrooms. Shower with Parmesan, and that’s it. Simple, satisfying, and all in 30 minutes–a dinner idea you can’t pass up!\"),\n", - " MealPlanner(name=\"Sweet Potato & Pepper Quesadillas with Southwest Crema & Tomato Salsa\", difficulty=\"Easy\", tags=\"Veggie\", desc=\"This quesadilla is jam-packed with flavorful roasted sweet potato and green pepper, plus two types of gooey, melty cheese (how could we choose just one?!). Of course, we’d never forget the toppings—there’s a fresh tomato salsa and dollops of spiced lime crema. Now for the fun part: piling on a little bit of everything to construct the perfect bite!\"),\n", - " MealPlanner(name=\"One-Pan Trattoria Tortelloni Bake with a Crispy Parmesan Panko Topping\", difficulty=\"Easy\", tags=\"Veggie, Easy Cleanup, Easy Prep\", desc=\"Think a cheesy stuffed pasta can’t get any better? What about baking it in a creamy sauce with a crispy topping? In this recipe, we toss cheese-stuffed tortelloni in an herby tomato cream sauce, then top with Parmesan and panko breadcrumbs. Once broiled, it turns into a showstopping topping that’ll earn you plenty of oohs and aahs from your lucky fellow diners.\"),\n", - "]\n", - "\n", - "meals = [f'title={action.name.replace(\":\", \"\").replace(\"|\", \"\")}' for action in actions]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.chat_models import AzureChatOpenAI\n", - "import langchain\n", - "langchain.debug = False\n", - "# assuming LLM api keys have been set in the environment\n", - "# can use whatever LLM you want here doesn't have to be AzureChatOpenAI\n", - "\n", - "llm = AzureChatOpenAI(\n", - " deployment_name=\"gpt-35-turbo\",\n", - " temperature=0,\n", - " request_timeout=10,\n", - " max_retries=1,\n", - " client=None,\n", - ")\n", - "\n", - "llm.predict('Are you ready?')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "##### default chain default reward (the LLM is used to judge and rank the response)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import rl_chain\n", - "from langchain.prompts import PromptTemplate\n", - "\n", - "import logging\n", - "logger = logging.getLogger(\"rl_chain\")\n", - "logger.setLevel(logging.INFO)\n", - "\n", - "_PROMPT_TEMPLATE = \"\"\"Here is the description of a meal: {meal}.\n", - "\n", - "You have to embed this into the given text where it makes sense. Here is the given text: {text_to_personalize}.\n", - "\n", - "\"\"\"\n", - "\n", - "PROMPT = PromptTemplate(\n", - " input_variables=[\"meal\", \"text_to_personalize\"], template=_PROMPT_TEMPLATE\n", - ")\n", - "\n", - "chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "response = chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " User = rl_chain.BasedOn(\"Tom Hanks\"),\n", - " preference = rl_chain.BasedOn(\"Vegetarian, regular dairy is ok\"),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - ")\n", - "\n", - "print(response[\"response\"])\n", - "rr = response[\"selection_metadata\"]\n", - "print(f\"score: {rr.selected.score}, selection index: {rr.selected.index}, probability: {rr.selected.probability}, \")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.prompts.prompt import PromptTemplate\n", - "\n", - "_OTHER_PROMPT_TEMPLATE = \"\"\"You can use the actions that were chosen by VW like so: {action}.\n", - "\n", - "And use whatever other vars you want to pass into the chain at run: {some_text}. And {some_other_text}\n", - "\n", - "\"\"\"\n", - "\n", - "\n", - "OTHER_PROMPT = PromptTemplate(\n", - " input_variables=[\"action\", \"some_text\", \"some_other_text\"],\n", - " template=_OTHER_PROMPT_TEMPLATE,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import rl_chain.pick_best_chain\n", - "\n", - "chain = rl_chain.PickBest.from_llm(\n", - " llm=llm,\n", - " model_save_dir=\"./models\", # where to save the model checkpoints\n", - " prompt=OTHER_PROMPT,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "response = chain.run(\n", - " some_text = \"This is some text\",\n", - " some_other_text = \"This is some other text\",\n", - " action=rl_chain.ToSelectFrom([\"an action\", \"another action\", \"a third action\"]),\n", - " User = rl_chain.BasedOn(\"Tom\"),\n", - " preference = rl_chain.BasedOn(\"Vegetarian\")\n", - ")\n", - "\n", - "print(response[\"response\"])\n", - "rr = response[\"selection_metadata\"]\n", - "print(f\"score: {rr.selected.score}, selection index: {rr.selected.index}, probability: {rr.selected.probability}, \")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### actions and context with multiple namespaces" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# each action is a dictionary of namespace to action string\n", - "# this example here shows that while embedding is recommended for all features, it is not required and can be customized\n", - "action_strs_w_ns = [{\"A\":\"an action feature\", \"B\" : rl_chain.Embed(\"antoher action feature\")}, {\"B\": \"another action\"}, {\"C\":\"a third action\"}]\n", - "\n", - "inputs = {\n", - " \"some_text\": \"This is some text\",\n", - " \"some_other_text\": \"This is some other text\",\n", - " \"action\" : rl_chain.ToSelectFrom(action_strs_w_ns)\n", - "}\n", - "\n", - "inputs[\"User\"] = rl_chain.BasedOn(\"Tom\")\n", - "inputs[\"preference\"] = rl_chain.BasedOn(rl_chain.Embed(\"Vegetarian\"))\n", - "response = chain.run(inputs)\n", - "print(response[\"response\"])\n", - "rr = response[\"selection_metadata\"]\n", - "print(f\"score: {rr.selected.score}, selection index: {rr.selected.index}, probability: {rr.selected.probability}, \")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "##### chain with default prompt and custom reward prompt (the LLM is used to judge and rank the response)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.llms import OpenAI\n", - "\n", - "llm = OpenAI(engine=\"text-davinci-003\")\n", - "\n", - "llm('Are you ready?')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import rl_chain\n", - "\n", - "human_template = \"Given {preference} rank how good or bad this selection is {action}\"\n", - "\n", - "chain = rl_chain.PickBest.from_llm(\n", - " llm=llm,\n", - " prompt=OTHER_PROMPT,\n", - " model_save_dir=\"./models\", # where to save the model checkpoints\n", - " selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, scoring_criteria_template_str=human_template),\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "actions = [\"an action\", \"another action\", \"a third action\"]\n", - "\n", - "response = chain.run(\n", - " some_text = \"Some text\",\n", - " some_other_text = \"Some other text\",\n", - " action=rl_chain.ToSelectFrom(actions),\n", - " User = rl_chain.BasedOn(\"Tom\"),\n", - " preference = rl_chain.BasedOn(\"Vegetarian\"),\n", - ")\n", - "print(response[\"response\"])\n", - "rr = response[\"selection_metadata\"]\n", - "print(f\"score: {rr.selected.score}, selection index: {rr.selected.index}, probability: {rr.selected.probability}, \")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.prompts.prompt import PromptTemplate\n", - "\n", - "_REWARD_PROMPT_TEMPLATE = \"\"\"Given {preference} rank how good or bad this selection is {action}, IMPORANT: you MUST return a single number between 0 and 1, 0 being bad, 1 being good\"\"\"\n", - "\n", - "\n", - "REWARD_PROMPT = PromptTemplate(\n", - " input_variables=[\"preference\", \"action\"],\n", - " template=_REWARD_PROMPT_TEMPLATE,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import rl_chain\n", - "\n", - "human_template = \"Given {preference} rank how good or bad this selection is {action}\"\n", - "\n", - "chain = rl_chain.PickBest.from_llm(\n", - " llm=llm,\n", - " prompt=OTHER_PROMPT,\n", - " model_save_dir=\"./models\", # where to save the model checkpoints\n", - " selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, prompt=REWARD_PROMPT),\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "actions = [\"an action\", \"another action\", \"a third action\"]\n", - "\n", - "response = chain.run(\n", - " some_text = \"Some text\",\n", - " some_other_text = \"Some other text\",\n", - " action=rl_chain.ToSelectFrom(actions),\n", - " User = rl_chain.BasedOn(\"Tom\"),\n", - " preference = rl_chain.BasedOn(\"Vegetarian\"),\n", - ")\n", - "print(response[\"response\"])\n", - "rr = response[\"selection_metadata\"]\n", - "print(f\"score: {rr.selected.score}, selection index: {rr.selected.index}, probability: {rr.selected.probability}, \")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "##### other reward options\n", - "\n", - "custom reward class" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# custom reward class/function is just defining another class that inherits from RewardChecker and implementing the score_response method\n", - "import rl_chain\n", - "\n", - "class CustomSelectionScorer(rl_chain.SelectionScorer):\n", - " #grade or score the response\n", - " def score_response(\n", - " self, inputs, llm_response: str\n", - " ) -> float:\n", - " # do whatever you want here, use whatever inputs you supplied and return reward\n", - " reward = 1.0\n", - " return reward\n", - " \n", - "# set this in the chain during construction (selection_scorer=CustomSelectionScorer()) and it will be auto-called" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Asynchronus user defined reward" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import rl_chain\n", - "\n", - "chain = rl_chain.PickBest.from_llm(\n", - " llm=llm,\n", - " prompt=PROMPT,\n", - " selection_scorer=None)\n", - "\n", - "# whenever you have the reward for the call, send it back to the chain to learn from\n", - "\n", - "response = chain.run(text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " User = rl_chain.BasedOn(rl_chain.Embed(\"Tom\")),\n", - " preference = rl_chain.BasedOn(\"Vegetarian\")\n", - " )\n", - "print(response[\"response\"])\n", - "rr = response[\"selection_metadata\"]\n", - "# score should be None here because we turned auto-checkin off\n", - "print(f\"score: {rr.selected.score}, action: {rr.selected.index}, probability: {rr.selected.probability}, \")\n", - "\n", - "# learn delayed score/grade\n", - "chain.update_with_delayed_score(score=1.0, event=rr)\n", - "\n", - "print(f\"score: {rr.selected.score}\")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.10" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/libs/langchain/langchain/chains/rl_chain/rl_chain/slates_chain.py b/libs/langchain/langchain/chains/rl_chain/rl_chain/slates_chain.py deleted file mode 100644 index 62beedaecf..0000000000 --- a/libs/langchain/langchain/chains/rl_chain/rl_chain/slates_chain.py +++ /dev/null @@ -1,275 +0,0 @@ -from __future__ import annotations - -from . import rl_chain_base as base -from langchain.prompts.prompt import PromptTemplate - -from langchain.callbacks.manager import CallbackManagerForChainRun -from langchain.chains.base import Chain - -from typing import Any, Dict, List, Optional, Tuple, Union -from itertools import chain -import random - - -from langchain.base_language import BaseLanguageModel -from langchain.chains.llm import LLMChain -from sentence_transformers import SentenceTransformer - -# sentinel object used to distinguish between user didn't supply anything or user explicitly supplied None -SENTINEL = object() - - -class SlatesFeatureEmbedder(base.Embedder): - """ - Slates Text Embedder class that embeds the context and actions and slates into a format that can be used by VW - - Attributes: - model (Any, optional): The type of embeddings to be used for feature representation. Defaults to BERT Sentence Transformer - """ - - def __init__(self, model: Optional[Any] = None, *args, **kwargs): - super().__init__(*args, **kwargs) - - if model is None: - model = SentenceTransformer("bert-base-nli-mean-tokens") - - self.model = model - - def to_action_features(self, actions: Dict[str, Any]): - def _str(embedding): - return " ".join([f"{i}:{e}" for i, e in enumerate(embedding)]) - - action_features = [] - for slot in actions.values(): - slot_features = [] - for action in slot: - if isinstance(action, base._Embed) and action.keep: - feature = ( - action.value.replace(" ", "_") - + " " - + _str(self.model.encode(action.value)) - ) - elif isinstance(action, base._Embed): - feature = _str(self.model.encode(action.value)) - else: - feature = action.replace(" ", "_") - slot_features.append(feature) - action_features.append(slot_features) - - return action_features - - def format(self, event: SlatesPersonalizerChain.Event) -> str: - action_features = self.to_action_features(event.to_select_from) - - cost = ( - -1.0 * event.selected.score - if event.selected and event.selected.score is not None - else "" - ) - context_str = f"slates shared {cost} " - - if event.based_on: - embedded_context = base.embed(event.based_on, self.model) - for context_item in embedded_context: - for ns, ctx in context_item.items(): - context_str += ( - f"|{ns} {' '.join(ctx) if isinstance(ctx, list) else ctx} " - ) - else: - context_str += "|" # empty context - - actions = chain.from_iterable( - [ - [f"slates action {i} |Action {action}"] - for i, slot in enumerate(action_features) - for action in slot - ] - ) - ps = ( - [f"{a}:{p}" for a, p in event.selected.get_indexes_and_probabilities()] - if event.selected - else [""] * len(action_features) - ) - slots = [f"slates slot {p} |" for p in ps] - return "\n".join(list(chain.from_iterable([[context_str], actions, slots]))) - - -class SlatesRandomPolicy(base.Policy): - def __init__(self, feature_embedder: base.Embedder, *_, **__): - self.feature_embedder = feature_embedder - - def predict(self, event: SlatesPersonalizerChain.Event) -> Any: - return [ - [(random.randint(0, len(slot) - 1), 1.0 / len(slot))] - for _, slot in event.to_select_from.items() - ] - - def learn(self, event: SlatesPersonalizerChain.Event) -> Any: - pass - - def log(self, event: SlatesPersonalizerChain.Event) -> Any: - pass - - -class SlatesFirstChoicePolicy(base.Policy): - def __init__(self, feature_embedder: base.Embedder, *_, **__): - self.feature_embedder = feature_embedder - - def predict(self, event: SlatesPersonalizerChain.Event) -> Any: - return [[(0, 1)] for _ in event.to_select_from] - - def learn(self, event: SlatesPersonalizerChain.Event) -> Any: - pass - - def log(self, event: SlatesPersonalizerChain.Event) -> Any: - pass - - -class SlatesPersonalizerChain(base.RLChain): - class Selected(base.Selected): - indexes: Optional[List[int]] - probabilities: Optional[List[float]] - score: Optional[float] - - def __init__( - self, - indexes: Optional[List[int]] = None, - probabilities: Optional[List[float]] = None, - score: Optional[float] = None, - ): - self.indexes = indexes - self.probabilities = probabilities - self.score = score - - def get_indexes_and_probabilities(self): - return zip(self.indexes, self.probabilities) - - class Event(base.Event): - def __init__( - self, - inputs: Dict[str, Any], - to_select_from: Dict[str, Any], - based_on: Dict[str, Any], - selected: Optional[SlatesPersonalizerChain.Selected] = None, - ): - super().__init__(inputs=inputs, selected=selected) - self.to_select_from = to_select_from - self.based_on = based_on - - def __init__( - self, feature_embedder: Optional[base.Embedder] = None, *args, **kwargs - ): - vw_cmd = kwargs.get("vw_cmd", []) - if not vw_cmd: - vw_cmd = [ - "--slates", - "--quiet", - "--interactions=::", - "--coin", - "--squarecb", - ] - else: - if "--slates" not in vw_cmd: - raise ValueError("If vw_cmd is specified, it must include --slates") - - kwargs["vw_cmd"] = vw_cmd - - if feature_embedder is None: - feature_embedder = SlatesFeatureEmbedder() - - super().__init__(feature_embedder=feature_embedder, *args, **kwargs) - - def _call_before_predict( - self, inputs: Dict[str, Any] - ) -> SlatesPersonalizerChain.Event: - context, actions = base.get_based_on_and_to_select_from(inputs=inputs) - event = SlatesPersonalizerChain.Event( - inputs=inputs, to_select_from=actions, based_on=context - ) - return event - - def _call_after_predict_before_llm( - self, - inputs: Dict[str, Any], - event: SlatesPersonalizerChain.Event, - prediction: List[List[Tuple[int, float]]], - ) -> Tuple[Dict[str, Any], SlatesPersonalizerChain.Event]: - indexes = [p[0][0] for p in prediction] - probabilities = [p[0][1] for p in prediction] - selected = SlatesPersonalizerChain.Selected( - indexes=indexes, probabilities=probabilities - ) - event.selected = selected - - preds = {} - for i, (j, a) in enumerate( - zip(event.selected.indexes, event.to_select_from.values()) - ): - preds[list(event.to_select_from.keys())[i]] = str(a[j]) - - next_chain_inputs = inputs.copy() - next_chain_inputs.update(preds) - - return next_chain_inputs, event - - def _call_after_llm_before_scoring( - self, llm_response: str, event: SlatesPersonalizerChain.Event - ) -> Tuple[Dict[str, Any], SlatesPersonalizerChain.Event]: - next_chain_inputs = event.inputs.copy() - next_chain_inputs.update( - { - self.selected_based_on_input_key: str(event.based_on), - self.selected_input_key: str(event.to_select_from), - } - ) - return next_chain_inputs, event - - def _call_after_scoring_before_learning( - self, event: Event, score: Optional[float] - ) -> SlatesPersonalizerChain.Event: - event.selected.score = score - return event - - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, str]: - return super()._call(run_manager=run_manager, inputs=inputs) - - @property - def _chain_type(self) -> str: - return "llm_personalizer_chain" - - @classmethod - def from_chain( - cls, - llm_chain: Chain, - prompt: PromptTemplate, - selection_scorer=SENTINEL, - **kwargs: Any, - ): - if selection_scorer is SENTINEL: - selection_scorer = base.AutoSelectionScorer(llm=llm_chain.llm) - return SlatesPersonalizerChain( - llm_chain=llm_chain, - prompt=prompt, - selection_scorer=selection_scorer, - **kwargs, - ) - - @classmethod - def from_llm( - cls, - llm: BaseLanguageModel, - prompt: PromptTemplate, - selection_scorer=SENTINEL, - **kwargs: Any, - ): - llm_chain = LLMChain(llm=llm, prompt=prompt) - return SlatesPersonalizerChain.from_chain( - llm_chain=llm_chain, - prompt=prompt, - selection_scorer=selection_scorer, - **kwargs, - ) diff --git a/libs/langchain/langchain/chains/rl_chain/rl_chain/rl_chain_base.py b/libs/langchain/langchain/chains/rl_chain/rl_chain_base.py similarity index 100% rename from libs/langchain/langchain/chains/rl_chain/rl_chain/rl_chain_base.py rename to libs/langchain/langchain/chains/rl_chain/rl_chain_base.py diff --git a/libs/langchain/langchain/chains/rl_chain/rl_chain/vw_logger.py b/libs/langchain/langchain/chains/rl_chain/vw_logger.py similarity index 100% rename from libs/langchain/langchain/chains/rl_chain/rl_chain/vw_logger.py rename to libs/langchain/langchain/chains/rl_chain/vw_logger.py From c37fd29fd896695963f04217d7e554139cf5b2f8 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Fri, 18 Aug 2023 02:22:00 -0400 Subject: [PATCH 03/65] move tests to correct directory and cleanup slates examples --- .../langchain/chains/rl_chain/__init__.py | 5 - .../chains/rl_chain/requirements.txt | 7 - .../tests/test_slates_text_embedder.py | 124 ------------------ .../chains/rl_chain/tests/test_utils.py | 14 -- libs/langchain/pyproject.toml | 1 + .../rl_chain}/test_pick_best_chain_call.py | 6 +- .../rl_chain}/test_pick_best_text_embedder.py | 6 +- .../rl_chain}/test_rl_chain_base_embedder.py | 6 +- .../unit_tests/chains/rl_chain/test_utils.py | 3 + 9 files changed, 7 insertions(+), 165 deletions(-) delete mode 100644 libs/langchain/langchain/chains/rl_chain/requirements.txt delete mode 100644 libs/langchain/langchain/chains/rl_chain/tests/test_slates_text_embedder.py delete mode 100644 libs/langchain/langchain/chains/rl_chain/tests/test_utils.py rename libs/langchain/{langchain/chains/rl_chain/tests => tests/unit_tests/chains/rl_chain}/test_pick_best_chain_call.py (99%) rename libs/langchain/{langchain/chains/rl_chain/tests => tests/unit_tests/chains/rl_chain}/test_pick_best_text_embedder.py (99%) rename libs/langchain/{langchain/chains/rl_chain/tests => tests/unit_tests/chains/rl_chain}/test_rl_chain_base_embedder.py (99%) create mode 100644 libs/langchain/tests/unit_tests/chains/rl_chain/test_utils.py diff --git a/libs/langchain/langchain/chains/rl_chain/__init__.py b/libs/langchain/langchain/chains/rl_chain/__init__.py index 1d9c216cad..35b0ed7b49 100644 --- a/libs/langchain/langchain/chains/rl_chain/__init__.py +++ b/libs/langchain/langchain/chains/rl_chain/__init__.py @@ -1,9 +1,4 @@ from .pick_best_chain import PickBest -from .slates_chain import ( - SlatesPersonalizerChain, - SlatesRandomPolicy, - SlatesFirstChoicePolicy, -) from .rl_chain_base import ( Embed, BasedOn, diff --git a/libs/langchain/langchain/chains/rl_chain/requirements.txt b/libs/langchain/langchain/chains/rl_chain/requirements.txt deleted file mode 100644 index faf213caed..0000000000 --- a/libs/langchain/langchain/chains/rl_chain/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -vowpal-wabbit-next -langchain -openai -sentence_transformers -pandas -numpy -matplotlib diff --git a/libs/langchain/langchain/chains/rl_chain/tests/test_slates_text_embedder.py b/libs/langchain/langchain/chains/rl_chain/tests/test_slates_text_embedder.py deleted file mode 100644 index b5c094bd55..0000000000 --- a/libs/langchain/langchain/chains/rl_chain/tests/test_slates_text_embedder.py +++ /dev/null @@ -1,124 +0,0 @@ -import sys - -sys.path.append("..") - -import rl_chain.slates_chain as slates -from test_utils import MockEncoder - -import pytest - -encoded_keyword = "[encoded]" -encoded_text = "[ e n c o d e d ] " - - -def test_slate_text_creation_no_label_no_emb(): - named_actions = {"prefix": ["0", "1"], "context": ["bla"], "suffix": ["0", "1"]} - expected = """slates shared |\nslates action 0 |Action 0\nslates action 0 |Action 1\nslates action 1 |Action bla\nslates action 2 |Action 0\nslates action 2 |Action 1\nslates slot |\nslates slot |\nslates slot |""" - feature_embedder = slates.SlatesFeatureEmbedder() - event = slates.SlatesPersonalizerChain.Event( - inputs={}, to_select_from=named_actions, based_on={} - ) - vw_str_ex = feature_embedder.format(event) - assert vw_str_ex == expected - - -def _str(embedding): - return " ".join([f"{i}:{e}" for i, e in enumerate(embedding)]) - - -def test_slate_text_creation_no_label_w_emb(): - action00 = "0" - action01 = "1" - action10 = "bla" - action20 = "0" - action21 = "1" - encoded_action00 = _str(encoded_keyword + action00) - encoded_action01 = _str(encoded_keyword + action01) - encoded_action10 = _str(encoded_keyword + action10) - encoded_action20 = _str(encoded_keyword + action20) - encoded_action21 = _str(encoded_keyword + action21) - - named_actions = { - "prefix": slates.base.Embed(["0", "1"]), - "context": slates.base.Embed(["bla"]), - "suffix": slates.base.Embed(["0", "1"]), - } - expected = f"""slates shared |\nslates action 0 |Action {encoded_action00}\nslates action 0 |Action {encoded_action01}\nslates action 1 |Action {encoded_action10}\nslates action 2 |Action {encoded_action20}\nslates action 2 |Action {encoded_action21}\nslates slot |\nslates slot |\nslates slot |""" - feature_embedder = slates.SlatesFeatureEmbedder(model=MockEncoder()) - event = slates.SlatesPersonalizerChain.Event( - inputs={}, to_select_from=named_actions, based_on={} - ) - vw_str_ex = feature_embedder.format(event) - assert vw_str_ex == expected - - -def test_slate_text_create_no_label_w_embed_and_keep(): - action00 = "0" - action01 = "1" - action10 = "bla" - action20 = "0" - action21 = "1" - encoded_action00 = _str(encoded_keyword + action00) - encoded_action01 = _str(encoded_keyword + action01) - encoded_action10 = _str(encoded_keyword + action10) - encoded_action20 = _str(encoded_keyword + action20) - encoded_action21 = _str(encoded_keyword + action21) - - named_actions = { - "prefix": slates.base.EmbedAndKeep(["0", "1"]), - "context": slates.base.EmbedAndKeep(["bla"]), - "suffix": slates.base.EmbedAndKeep(["0", "1"]), - } - expected = f"""slates shared |\nslates action 0 |Action {action00 + " " + encoded_action00}\nslates action 0 |Action {action01 + " " + encoded_action01}\nslates action 1 |Action {action10 + " " + encoded_action10}\nslates action 2 |Action {action20 + " " + encoded_action20}\nslates action 2 |Action {action21 + " " + encoded_action21}\nslates slot |\nslates slot |\nslates slot |""" - feature_embedder = slates.SlatesFeatureEmbedder(model=MockEncoder()) - event = slates.SlatesPersonalizerChain.Event( - inputs={}, to_select_from=named_actions, based_on={} - ) - vw_str_ex = feature_embedder.format(event) - assert vw_str_ex == expected - - -def test_slates_raw_features_underscored(): - action00 = "this is a long action 0" - action01 = "this is a long action 1" - action00_underscored = action00.replace(" ", "_") - action01_underscored = action01.replace(" ", "_") - encoded_action00 = _str(encoded_keyword + action00) - encoded_action01 = _str(encoded_keyword + action01) - - ctx_str = "this is a long context" - ctx_str_underscored = ctx_str.replace(" ", "_") - encoded_ctx_str = encoded_text + " ".join(char for char in ctx_str) - - # No Embeddings - named_actions = {"prefix": [action00, action01]} - context = {"context": ctx_str} - expected_no_embed = f"""slates shared |context {ctx_str_underscored} \nslates action 0 |Action {action00_underscored}\nslates action 0 |Action {action01_underscored}\nslates slot |""" - feature_embedder = slates.SlatesFeatureEmbedder(model=MockEncoder()) - event = slates.SlatesPersonalizerChain.Event( - inputs={}, to_select_from=named_actions, based_on=context - ) - vw_str_ex = feature_embedder.format(event) - assert vw_str_ex == expected_no_embed - - # Just embeddings - named_actions = {"prefix": slates.base.Embed([action00, action01])} - context = {"context": slates.base.Embed(ctx_str)} - expected_embed = f"""slates shared |context {encoded_ctx_str} \nslates action 0 |Action {encoded_action00}\nslates action 0 |Action {encoded_action01}\nslates slot |""" - feature_embedder = slates.SlatesFeatureEmbedder(model=MockEncoder()) - event = slates.SlatesPersonalizerChain.Event( - inputs={}, to_select_from=named_actions, based_on=context - ) - vw_str_ex = feature_embedder.format(event) - assert vw_str_ex == expected_embed - - # Embeddings and raw features - named_actions = {"prefix": slates.base.EmbedAndKeep([action00, action01])} - context = {"context": slates.base.EmbedAndKeep(ctx_str)} - expected_embed_and_keep = f"""slates shared |context {ctx_str_underscored + " " + encoded_ctx_str} \nslates action 0 |Action {action00_underscored + " " + encoded_action00}\nslates action 0 |Action {action01_underscored + " " + encoded_action01}\nslates slot |""" - feature_embedder = slates.SlatesFeatureEmbedder(model=MockEncoder()) - event = slates.SlatesPersonalizerChain.Event( - inputs={}, to_select_from=named_actions, based_on=context - ) - vw_str_ex = feature_embedder.format(event) - assert vw_str_ex == expected_embed_and_keep diff --git a/libs/langchain/langchain/chains/rl_chain/tests/test_utils.py b/libs/langchain/langchain/chains/rl_chain/tests/test_utils.py deleted file mode 100644 index 8b3773165e..0000000000 --- a/libs/langchain/langchain/chains/rl_chain/tests/test_utils.py +++ /dev/null @@ -1,14 +0,0 @@ -from rl_chain import SelectionScorer -from typing import Dict, Any - - -class MockScorer(SelectionScorer): - def score_response( - self, inputs: Dict[str, Any], llm_response: str, **kwargs - ) -> float: - return float(llm_response) - - -class MockEncoder: - def encode(self, to_encode): - return "[encoded]" + to_encode diff --git a/libs/langchain/pyproject.toml b/libs/langchain/pyproject.toml index 8c3c8c18df..25087bf228 100644 --- a/libs/langchain/pyproject.toml +++ b/libs/langchain/pyproject.toml @@ -125,6 +125,7 @@ newspaper3k = {version = "^0.2.8", optional = true} amazon-textract-caller = {version = "<2", optional = true} xata = {version = "^1.0.0a7", optional = true} xmltodict = {version = "^0.13.0", optional = true} +vowpal-wabbit-next = "0.6.0" [tool.poetry.group.test.dependencies] diff --git a/libs/langchain/langchain/chains/rl_chain/tests/test_pick_best_chain_call.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py similarity index 99% rename from libs/langchain/langchain/chains/rl_chain/tests/test_pick_best_chain_call.py rename to libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py index 6c8db426d4..3e739f44a5 100644 --- a/libs/langchain/langchain/chains/rl_chain/tests/test_pick_best_chain_call.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py @@ -1,8 +1,4 @@ -import sys - -sys.path.append("..") - -import rl_chain.pick_best_chain as pick_best_chain +import langchain.chains.rl_chain.pick_best_chain as pick_best_chain from test_utils import MockEncoder import pytest from langchain.prompts.prompt import PromptTemplate diff --git a/libs/langchain/langchain/chains/rl_chain/tests/test_pick_best_text_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py similarity index 99% rename from libs/langchain/langchain/chains/rl_chain/tests/test_pick_best_text_embedder.py rename to libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py index 29d8d9af69..7dc6d7f474 100644 --- a/libs/langchain/langchain/chains/rl_chain/tests/test_pick_best_text_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py @@ -1,8 +1,4 @@ -import sys - -sys.path.append("..") - -import rl_chain.pick_best_chain as pick_best_chain +import langchain.chains.rl_chain.pick_best_chain as pick_best_chain from test_utils import MockEncoder import pytest diff --git a/libs/langchain/langchain/chains/rl_chain/tests/test_rl_chain_base_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py similarity index 99% rename from libs/langchain/langchain/chains/rl_chain/tests/test_rl_chain_base_embedder.py rename to libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py index 7ae232bbca..4f9ddcc1ac 100644 --- a/libs/langchain/langchain/chains/rl_chain/tests/test_rl_chain_base_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py @@ -1,8 +1,4 @@ -import sys - -sys.path.append("..") - -import rl_chain.rl_chain_base as base +import langchain.chains.rl_chain.rl_chain_base as base from test_utils import MockEncoder import pytest diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_utils.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_utils.py new file mode 100644 index 0000000000..6d54d20d92 --- /dev/null +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_utils.py @@ -0,0 +1,3 @@ +class MockEncoder: + def encode(self, to_encode): + return "[encoded]" + to_encode From b422dc035f363731559cbb8a89ebb7d533b8ab1a Mon Sep 17 00:00:00 2001 From: olgavrou Date: Fri, 18 Aug 2023 03:23:20 -0400 Subject: [PATCH 04/65] fix imports --- .../langchain/chains/rl_chain/__init__.py | 4 +- .../chains/rl_chain/pick_best_chain.py | 2 +- .../chains/rl_chain/rl_chain_base.py | 6 +- .../rl_chain/test_pick_best_chain_call.py | 108 +++++++-------- .../rl_chain/test_pick_best_text_embedder.py | 126 +++++++++--------- .../rl_chain/test_rl_chain_base_embedder.py | 2 +- 6 files changed, 124 insertions(+), 124 deletions(-) diff --git a/libs/langchain/langchain/chains/rl_chain/__init__.py b/libs/langchain/langchain/chains/rl_chain/__init__.py index 35b0ed7b49..1985665813 100644 --- a/libs/langchain/langchain/chains/rl_chain/__init__.py +++ b/libs/langchain/langchain/chains/rl_chain/__init__.py @@ -1,5 +1,5 @@ -from .pick_best_chain import PickBest -from .rl_chain_base import ( +from langchain.chains.rl_chain.pick_best_chain import PickBest +from langchain.chains.rl_chain.rl_chain_base import ( Embed, BasedOn, ToSelectFrom, diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py index 6fe8e828a0..5e9a4673f7 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py @@ -1,6 +1,6 @@ from __future__ import annotations -from . import rl_chain_base as base +import langchain.chains.rl_chain.rl_chain_base as base from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain diff --git a/libs/langchain/langchain/chains/rl_chain/rl_chain_base.py b/libs/langchain/langchain/chains/rl_chain/rl_chain_base.py index a20c78fc78..4b6ff1490a 100644 --- a/libs/langchain/langchain/chains/rl_chain/rl_chain_base.py +++ b/libs/langchain/langchain/chains/rl_chain/rl_chain_base.py @@ -6,9 +6,9 @@ from typing import Any, Dict, List, Optional, Tuple, Union, Sequence from abc import ABC, abstractmethod import vowpal_wabbit_next as vw -from .vw_logger import VwLogger -from .model_repository import ModelRepository -from .metrics import MetricsTracker +from langchain.chains.rl_chain.vw_logger import VwLogger +from langchain.chains.rl_chain.model_repository import ModelRepository +from langchain.chains.rl_chain.metrics import MetricsTracker from langchain.prompts import BasePromptTemplate from langchain.pydantic_v1 import Extra, BaseModel, root_validator diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py index 3e739f44a5..dd7a2d3ea1 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py @@ -1,4 +1,4 @@ -import langchain.chains.rl_chain.pick_best_chain as pick_best_chain +import langchain.chains.rl_chain as rl_chain from test_utils import MockEncoder import pytest from langchain.prompts.prompt import PromptTemplate @@ -17,32 +17,32 @@ def setup(): def test_multiple_ToSelectFrom_throws(): llm, PROMPT = setup() - chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) + chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) actions = ["0", "1", "2"] with pytest.raises(ValueError): chain.run( - User=pick_best_chain.base.BasedOn("Context"), - action=pick_best_chain.base.ToSelectFrom(actions), - another_action=pick_best_chain.base.ToSelectFrom(actions), + User=rl_chain.BasedOn("Context"), + action=rl_chain.ToSelectFrom(actions), + another_action=rl_chain.ToSelectFrom(actions), ) def test_missing_basedOn_from_throws(): llm, PROMPT = setup() - chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) + chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) actions = ["0", "1", "2"] with pytest.raises(ValueError): - chain.run(action=pick_best_chain.base.ToSelectFrom(actions)) + chain.run(action=rl_chain.ToSelectFrom(actions)) def test_ToSelectFrom_not_a_list_throws(): llm, PROMPT = setup() - chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) + chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) actions = {"actions": ["0", "1", "2"]} with pytest.raises(ValueError): chain.run( - User=pick_best_chain.base.BasedOn("Context"), - action=pick_best_chain.base.ToSelectFrom(actions), + User=rl_chain.BasedOn("Context"), + action=rl_chain.ToSelectFrom(actions), ) @@ -50,15 +50,15 @@ def test_update_with_delayed_score_with_auto_validator_throws(): llm, PROMPT = setup() # this LLM returns a number so that the auto validator will return that auto_val_llm = FakeListChatModel(responses=["3"]) - chain = pick_best_chain.PickBest.from_llm( + chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, - selection_scorer=pick_best_chain.base.AutoSelectionScorer(llm=auto_val_llm), + selection_scorer=rl_chain.AutoSelectionScorer(llm=auto_val_llm), ) actions = ["0", "1", "2"] response = chain.run( - User=pick_best_chain.base.BasedOn("Context"), - action=pick_best_chain.base.ToSelectFrom(actions), + User=rl_chain.BasedOn("Context"), + action=rl_chain.ToSelectFrom(actions), ) assert response["response"] == "hey" selection_metadata = response["selection_metadata"] @@ -71,15 +71,15 @@ def test_update_with_delayed_score_force(): llm, PROMPT = setup() # this LLM returns a number so that the auto validator will return that auto_val_llm = FakeListChatModel(responses=["3"]) - chain = pick_best_chain.PickBest.from_llm( + chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, - selection_scorer=pick_best_chain.base.AutoSelectionScorer(llm=auto_val_llm), + selection_scorer=rl_chain.AutoSelectionScorer(llm=auto_val_llm), ) actions = ["0", "1", "2"] response = chain.run( - User=pick_best_chain.base.BasedOn("Context"), - action=pick_best_chain.base.ToSelectFrom(actions), + User=rl_chain.BasedOn("Context"), + action=rl_chain.ToSelectFrom(actions), ) assert response["response"] == "hey" selection_metadata = response["selection_metadata"] @@ -92,13 +92,13 @@ def test_update_with_delayed_score_force(): def test_update_with_delayed_score(): llm, PROMPT = setup() - chain = pick_best_chain.PickBest.from_llm( + chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=None ) actions = ["0", "1", "2"] response = chain.run( - User=pick_best_chain.base.BasedOn("Context"), - action=pick_best_chain.base.ToSelectFrom(actions), + User=rl_chain.BasedOn("Context"), + action=rl_chain.ToSelectFrom(actions), ) assert response["response"] == "hey" selection_metadata = response["selection_metadata"] @@ -110,18 +110,18 @@ def test_update_with_delayed_score(): def test_user_defined_scorer(): llm, PROMPT = setup() - class CustomSelectionScorer(pick_best_chain.base.SelectionScorer): + class CustomSelectionScorer(rl_chain.SelectionScorer): def score_response(self, inputs, llm_response: str) -> float: score = 200 return score - chain = pick_best_chain.PickBest.from_llm( + chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer() ) actions = ["0", "1", "2"] response = chain.run( - User=pick_best_chain.base.BasedOn("Context"), - action=pick_best_chain.base.ToSelectFrom(actions), + User=rl_chain.BasedOn("Context"), + action=rl_chain.ToSelectFrom(actions), ) assert response["response"] == "hey" selection_metadata = response["selection_metadata"] @@ -130,8 +130,8 @@ def test_user_defined_scorer(): def test_default_embeddings(): llm, PROMPT = setup() - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) - chain = pick_best_chain.PickBest.from_llm( + feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) + chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, feature_embedder=feature_embedder ) @@ -153,8 +153,8 @@ def test_default_embeddings(): actions = [str1, str2, str3] response = chain.run( - User=pick_best_chain.base.BasedOn(ctx_str_1), - action=pick_best_chain.base.ToSelectFrom(actions), + User=rl_chain.BasedOn(ctx_str_1), + action=rl_chain.ToSelectFrom(actions), ) selection_metadata = response["selection_metadata"] vw_str = feature_embedder.format(selection_metadata) @@ -163,8 +163,8 @@ def test_default_embeddings(): def test_default_embeddings_off(): llm, PROMPT = setup() - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) - chain = pick_best_chain.PickBest.from_llm( + feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) + chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, feature_embedder=feature_embedder, auto_embed=False ) @@ -178,8 +178,8 @@ def test_default_embeddings_off(): actions = [str1, str2, str3] response = chain.run( - User=pick_best_chain.base.BasedOn(ctx_str_1), - action=pick_best_chain.base.ToSelectFrom(actions), + User=rl_chain.BasedOn(ctx_str_1), + action=rl_chain.ToSelectFrom(actions), ) selection_metadata = response["selection_metadata"] vw_str = feature_embedder.format(selection_metadata) @@ -188,8 +188,8 @@ def test_default_embeddings_off(): def test_default_embeddings_mixed_w_explicit_user_embeddings(): llm, PROMPT = setup() - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) - chain = pick_best_chain.PickBest.from_llm( + feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) + chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, feature_embedder=feature_embedder ) @@ -208,12 +208,12 @@ def test_default_embeddings_mixed_w_explicit_user_embeddings(): expected = f"""shared |User {encoded_ctx_str_1} |User2 {ctx_str_2 + " " + encoded_ctx_str_2} \n|action {str1 + " " + encoded_str1} \n|action {str2 + " " + encoded_str2} \n|action {encoded_str3} """ - actions = [str1, str2, pick_best_chain.base.Embed(str3)] + actions = [str1, str2, rl_chain.Embed(str3)] response = chain.run( - User=pick_best_chain.base.BasedOn(pick_best_chain.base.Embed(ctx_str_1)), - User2=pick_best_chain.base.BasedOn(ctx_str_2), - action=pick_best_chain.base.ToSelectFrom(actions), + User=rl_chain.BasedOn(rl_chain.Embed(ctx_str_1)), + User2=rl_chain.BasedOn(ctx_str_2), + action=rl_chain.ToSelectFrom(actions), ) selection_metadata = response["selection_metadata"] vw_str = feature_embedder.format(selection_metadata) @@ -223,10 +223,10 @@ def test_default_embeddings_mixed_w_explicit_user_embeddings(): def test_default_no_scorer_specified(): _, PROMPT = setup() chain_llm = FakeListChatModel(responses=[100]) - chain = pick_best_chain.PickBest.from_llm(llm=chain_llm, prompt=PROMPT) + chain = rl_chain.PickBest.from_llm(llm=chain_llm, prompt=PROMPT) response = chain.run( - User=pick_best_chain.base.BasedOn("Context"), - action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), + User=rl_chain.BasedOn("Context"), + action=rl_chain.ToSelectFrom(["0", "1", "2"]), ) # chain llm used for both basic prompt and for scoring assert response["response"] == "100" @@ -236,12 +236,12 @@ def test_default_no_scorer_specified(): def test_explicitly_no_scorer(): llm, PROMPT = setup() - chain = pick_best_chain.PickBest.from_llm( + chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=None ) response = chain.run( - User=pick_best_chain.base.BasedOn("Context"), - action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), + User=rl_chain.BasedOn("Context"), + action=rl_chain.ToSelectFrom(["0", "1", "2"]), ) # chain llm used for both basic prompt and for scoring assert response["response"] == "hey" @@ -252,14 +252,14 @@ def test_explicitly_no_scorer(): def test_auto_scorer_with_user_defined_llm(): llm, PROMPT = setup() scorer_llm = FakeListChatModel(responses=[300]) - chain = pick_best_chain.PickBest.from_llm( + chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, - selection_scorer=pick_best_chain.base.AutoSelectionScorer(llm=scorer_llm), + selection_scorer=rl_chain.AutoSelectionScorer(llm=scorer_llm), ) response = chain.run( - User=pick_best_chain.base.BasedOn("Context"), - action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), + User=rl_chain.BasedOn("Context"), + action=rl_chain.ToSelectFrom(["0", "1", "2"]), ) # chain llm used for both basic prompt and for scoring assert response["response"] == "hey" @@ -269,17 +269,17 @@ def test_auto_scorer_with_user_defined_llm(): def test_calling_chain_w_reserved_inputs_throws(): llm, PROMPT = setup() - chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) + chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) with pytest.raises(ValueError): chain.run( - User=pick_best_chain.base.BasedOn("Context"), - rl_chain_selected_based_on=pick_best_chain.base.ToSelectFrom( + User=rl_chain.BasedOn("Context"), + rl_chain_selected_based_on=rl_chain.ToSelectFrom( ["0", "1", "2"] ), ) with pytest.raises(ValueError): chain.run( - User=pick_best_chain.base.BasedOn("Context"), - rl_chain_selected=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), + User=rl_chain.BasedOn("Context"), + rl_chain_selected=rl_chain.ToSelectFrom(["0", "1", "2"]), ) diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py index 7dc6d7f474..eee384641c 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py @@ -1,4 +1,4 @@ -import langchain.chains.rl_chain.pick_best_chain as pick_best_chain +import langchain.chains.rl_chain as rl_chain from test_utils import MockEncoder import pytest @@ -7,9 +7,9 @@ encoded_text = "[ e n c o d e d ] " def test_pickbest_textembedder_missing_context_throws(): - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_action = {"action": ["0", "1", "2"]} - event = pick_best_chain.PickBest.Event( + event = rl_chain.PickBest.Event( inputs={}, to_select_from=named_action, based_on={} ) with pytest.raises(ValueError): @@ -17,8 +17,8 @@ def test_pickbest_textembedder_missing_context_throws(): def test_pickbest_textembedder_missing_actions_throws(): - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) - event = pick_best_chain.PickBest.Event( + feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) + event = rl_chain.PickBest.Event( inputs={}, to_select_from={}, based_on={"context": "context"} ) with pytest.raises(ValueError): @@ -26,10 +26,10 @@ def test_pickbest_textembedder_missing_actions_throws(): def test_pickbest_textembedder_no_label_no_emb(): - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": ["0", "1", "2"]} expected = """shared |context context \n|action1 0 \n|action1 1 \n|action1 2 """ - event = pick_best_chain.PickBest.Event( + event = rl_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on={"context": "context"} ) vw_ex_str = feature_embedder.format(event) @@ -37,11 +37,11 @@ def test_pickbest_textembedder_no_label_no_emb(): def test_pickbest_textembedder_w_label_no_score_no_emb(): - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": ["0", "1", "2"]} expected = """shared |context context \n|action1 0 \n|action1 1 \n|action1 2 """ - selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0) - event = pick_best_chain.PickBest.Event( + selected = rl_chain.PickBest.Selected(index=0, probability=1.0) + event = rl_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on={"context": "context"}, @@ -52,13 +52,13 @@ def test_pickbest_textembedder_w_label_no_score_no_emb(): def test_pickbest_textembedder_w_full_label_no_emb(): - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": ["0", "1", "2"]} expected = ( """shared |context context \n0:-0.0:1.0 |action1 0 \n|action1 1 \n|action1 2 """ ) - selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBest.Event( + selected = rl_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = rl_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on={"context": "context"}, @@ -69,7 +69,7 @@ def test_pickbest_textembedder_w_full_label_no_emb(): def test_pickbest_textembedder_w_full_label_w_emb(): - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" str2 = "1" str3 = "2" @@ -80,11 +80,11 @@ def test_pickbest_textembedder_w_full_label_w_emb(): ctx_str_1 = "context1" encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) - named_actions = {"action1": pick_best_chain.base.Embed([str1, str2, str3])} - context = {"context": pick_best_chain.base.Embed(ctx_str_1)} + named_actions = {"action1": rl_chain.Embed([str1, str2, str3])} + context = {"context": rl_chain.Embed(ctx_str_1)} expected = f"""shared |context {encoded_ctx_str_1} \n0:-0.0:1.0 |action1 {encoded_str1} \n|action1 {encoded_str2} \n|action1 {encoded_str3} """ - selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBest.Event( + selected = rl_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = rl_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -92,7 +92,7 @@ def test_pickbest_textembedder_w_full_label_w_emb(): def test_pickbest_textembedder_w_full_label_w_embed_and_keep(): - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" str2 = "1" str3 = "2" @@ -103,11 +103,11 @@ def test_pickbest_textembedder_w_full_label_w_embed_and_keep(): ctx_str_1 = "context1" encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) - named_actions = {"action1": pick_best_chain.base.EmbedAndKeep([str1, str2, str3])} - context = {"context": pick_best_chain.base.EmbedAndKeep(ctx_str_1)} + named_actions = {"action1": rl_chain.EmbedAndKeep([str1, str2, str3])} + context = {"context": rl_chain.EmbedAndKeep(ctx_str_1)} expected = f"""shared |context {ctx_str_1 + " " + encoded_ctx_str_1} \n0:-0.0:1.0 |action1 {str1 + " " + encoded_str1} \n|action1 {str2 + " " + encoded_str2} \n|action1 {str3 + " " + encoded_str3} """ - selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBest.Event( + selected = rl_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = rl_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -115,11 +115,11 @@ def test_pickbest_textembedder_w_full_label_w_embed_and_keep(): def test_pickbest_textembedder_more_namespaces_no_label_no_emb(): - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} expected = """shared |context1 context1 |context2 context2 \n|a 0 |b 0 \n|action1 1 \n|action1 2 """ - event = pick_best_chain.PickBest.Event( + event = rl_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context ) vw_ex_str = feature_embedder.format(event) @@ -127,12 +127,12 @@ def test_pickbest_textembedder_more_namespaces_no_label_no_emb(): def test_pickbest_textembedder_more_namespaces_w_label_no_emb(): - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} expected = """shared |context1 context1 |context2 context2 \n|a 0 |b 0 \n|action1 1 \n|action1 2 """ - selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0) - event = pick_best_chain.PickBest.Event( + selected = rl_chain.PickBest.Selected(index=0, probability=1.0) + event = rl_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -140,12 +140,12 @@ def test_pickbest_textembedder_more_namespaces_w_label_no_emb(): def test_pickbest_textembedder_more_namespaces_w_full_label_no_emb(): - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} expected = """shared |context1 context1 |context2 context2 \n0:-0.0:1.0 |a 0 |b 0 \n|action1 1 \n|action1 2 """ - selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBest.Event( + selected = rl_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = rl_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -153,7 +153,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_no_emb(): def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb(): - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" str2 = "1" @@ -168,16 +168,16 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb(): encoded_ctx_str_2 = encoded_text + " ".join(char for char in ctx_str_2) named_actions = { - "action1": pick_best_chain.base.Embed([{"a": str1, "b": str1}, str2, str3]) + "action1": rl_chain.Embed([{"a": str1, "b": str1}, str2, str3]) } context = { - "context1": pick_best_chain.base.Embed(ctx_str_1), - "context2": pick_best_chain.base.Embed(ctx_str_2), + "context1": rl_chain.Embed(ctx_str_1), + "context2": rl_chain.Embed(ctx_str_2), } expected = f"""shared |context1 {encoded_ctx_str_1} |context2 {encoded_ctx_str_2} \n0:-0.0:1.0 |a {encoded_str1} |b {encoded_str1} \n|action1 {encoded_str2} \n|action1 {encoded_str3} """ - selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBest.Event( + selected = rl_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = rl_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -185,7 +185,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb(): def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_keep(): - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" str2 = "1" @@ -200,18 +200,18 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_kee encoded_ctx_str_2 = encoded_text + " ".join(char for char in ctx_str_2) named_actions = { - "action1": pick_best_chain.base.EmbedAndKeep( + "action1": rl_chain.EmbedAndKeep( [{"a": str1, "b": str1}, str2, str3] ) } context = { - "context1": pick_best_chain.base.EmbedAndKeep(ctx_str_1), - "context2": pick_best_chain.base.EmbedAndKeep(ctx_str_2), + "context1": rl_chain.EmbedAndKeep(ctx_str_1), + "context2": rl_chain.EmbedAndKeep(ctx_str_2), } expected = f"""shared |context1 {ctx_str_1 + " " + encoded_ctx_str_1} |context2 {ctx_str_2 + " " + encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1 + " " + encoded_str1} |b {str1 + " " + encoded_str1} \n|action1 {str2 + " " + encoded_str2} \n|action1 {str3 + " " + encoded_str3} """ - selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBest.Event( + selected = rl_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = rl_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -219,7 +219,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_kee def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb(): - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" str2 = "1" @@ -235,16 +235,16 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb(): named_actions = { "action1": [ - {"a": str1, "b": pick_best_chain.base.Embed(str1)}, + {"a": str1, "b": rl_chain.Embed(str1)}, str2, - pick_best_chain.base.Embed(str3), + rl_chain.Embed(str3), ] } - context = {"context1": ctx_str_1, "context2": pick_best_chain.base.Embed(ctx_str_2)} + context = {"context1": ctx_str_1, "context2": rl_chain.Embed(ctx_str_2)} expected = f"""shared |context1 {ctx_str_1} |context2 {encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1} |b {encoded_str1} \n|action1 {str2} \n|action1 {encoded_str3} """ - selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBest.Event( + selected = rl_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = rl_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -252,7 +252,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb(): def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_embed_and_keep(): - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" str2 = "1" @@ -268,19 +268,19 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_embed_and_ named_actions = { "action1": [ - {"a": str1, "b": pick_best_chain.base.EmbedAndKeep(str1)}, + {"a": str1, "b": rl_chain.EmbedAndKeep(str1)}, str2, - pick_best_chain.base.EmbedAndKeep(str3), + rl_chain.EmbedAndKeep(str3), ] } context = { "context1": ctx_str_1, - "context2": pick_best_chain.base.EmbedAndKeep(ctx_str_2), + "context2": rl_chain.EmbedAndKeep(ctx_str_2), } expected = f"""shared |context1 {ctx_str_1} |context2 {ctx_str_2 + " " + encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1} |b {str1 + " " + encoded_str1} \n|action1 {str2} \n|action1 {str3 + " " + encoded_str3} """ - selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBest.Event( + selected = rl_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = rl_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -288,7 +288,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_embed_and_ def test_raw_features_underscored(): - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "this is a long string" str1_underscored = str1.replace(" ", "_") encoded_str1 = encoded_text + " ".join(char for char in str1) @@ -303,27 +303,27 @@ def test_raw_features_underscored(): expected_no_embed = ( f"""shared |context {ctx_str_underscored} \n|action {str1_underscored} """ ) - event = pick_best_chain.PickBest.Event( + event = rl_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context ) vw_ex_str = feature_embedder.format(event) assert vw_ex_str == expected_no_embed # Just embeddings - named_actions = {"action": pick_best_chain.base.Embed([str1])} - context = {"context": pick_best_chain.base.Embed(ctx_str)} + named_actions = {"action": rl_chain.Embed([str1])} + context = {"context": rl_chain.Embed(ctx_str)} expected_embed = f"""shared |context {encoded_ctx_str} \n|action {encoded_str1} """ - event = pick_best_chain.PickBest.Event( + event = rl_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context ) vw_ex_str = feature_embedder.format(event) assert vw_ex_str == expected_embed # Embeddings and raw features - named_actions = {"action": pick_best_chain.base.EmbedAndKeep([str1])} - context = {"context": pick_best_chain.base.EmbedAndKeep(ctx_str)} + named_actions = {"action": rl_chain.EmbedAndKeep([str1])} + context = {"context": rl_chain.EmbedAndKeep(ctx_str)} expected_embed_and_keep = f"""shared |context {ctx_str_underscored + " " + encoded_ctx_str} \n|action {str1_underscored + " " + encoded_str1} """ - event = pick_best_chain.PickBest.Event( + event = rl_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context ) vw_ex_str = feature_embedder.format(event) diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py index 4f9ddcc1ac..073bab31ad 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py @@ -1,4 +1,4 @@ -import langchain.chains.rl_chain.rl_chain_base as base +import langchain.chains.rl_chain as base from test_utils import MockEncoder import pytest From a6f9dccc35d5420cd441383db0c5df1712c37bf8 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Fri, 18 Aug 2023 03:42:17 -0400 Subject: [PATCH 05/65] rename rl_chain_base to base and update paths and imports --- .../langchain/chains/rl_chain/__init__.py | 2 +- .../rl_chain/{rl_chain_base.py => base.py} | 0 .../chains/rl_chain/pick_best_chain.py | 2 +- .../rl_chain/test_pick_best_chain_call.py | 37 ++++---- .../rl_chain/test_pick_best_text_embedder.py | 87 ++++++++++--------- .../rl_chain/test_rl_chain_base_embedder.py | 2 +- 6 files changed, 66 insertions(+), 64 deletions(-) rename libs/langchain/langchain/chains/rl_chain/{rl_chain_base.py => base.py} (100%) diff --git a/libs/langchain/langchain/chains/rl_chain/__init__.py b/libs/langchain/langchain/chains/rl_chain/__init__.py index 1985665813..d485c5d506 100644 --- a/libs/langchain/langchain/chains/rl_chain/__init__.py +++ b/libs/langchain/langchain/chains/rl_chain/__init__.py @@ -1,5 +1,5 @@ from langchain.chains.rl_chain.pick_best_chain import PickBest -from langchain.chains.rl_chain.rl_chain_base import ( +from langchain.chains.rl_chain.base import ( Embed, BasedOn, ToSelectFrom, diff --git a/libs/langchain/langchain/chains/rl_chain/rl_chain_base.py b/libs/langchain/langchain/chains/rl_chain/base.py similarity index 100% rename from libs/langchain/langchain/chains/rl_chain/rl_chain_base.py rename to libs/langchain/langchain/chains/rl_chain/base.py diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py index 5e9a4673f7..28c0f509f0 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py @@ -1,6 +1,6 @@ from __future__ import annotations -import langchain.chains.rl_chain.rl_chain_base as base +import langchain.chains.rl_chain.base as base from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py index dd7a2d3ea1..e0e5f5dc21 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py @@ -1,4 +1,5 @@ -import langchain.chains.rl_chain as rl_chain +import langchain.chains.rl_chain.pick_best_chain as pick_best_chain +import langchain.chains.rl_chain.base as rl_chain from test_utils import MockEncoder import pytest from langchain.prompts.prompt import PromptTemplate @@ -17,7 +18,7 @@ def setup(): def test_multiple_ToSelectFrom_throws(): llm, PROMPT = setup() - chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) + chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) actions = ["0", "1", "2"] with pytest.raises(ValueError): chain.run( @@ -29,7 +30,7 @@ def test_multiple_ToSelectFrom_throws(): def test_missing_basedOn_from_throws(): llm, PROMPT = setup() - chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) + chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) actions = ["0", "1", "2"] with pytest.raises(ValueError): chain.run(action=rl_chain.ToSelectFrom(actions)) @@ -37,7 +38,7 @@ def test_missing_basedOn_from_throws(): def test_ToSelectFrom_not_a_list_throws(): llm, PROMPT = setup() - chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) + chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) actions = {"actions": ["0", "1", "2"]} with pytest.raises(ValueError): chain.run( @@ -50,7 +51,7 @@ def test_update_with_delayed_score_with_auto_validator_throws(): llm, PROMPT = setup() # this LLM returns a number so that the auto validator will return that auto_val_llm = FakeListChatModel(responses=["3"]) - chain = rl_chain.PickBest.from_llm( + chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=rl_chain.AutoSelectionScorer(llm=auto_val_llm), @@ -71,7 +72,7 @@ def test_update_with_delayed_score_force(): llm, PROMPT = setup() # this LLM returns a number so that the auto validator will return that auto_val_llm = FakeListChatModel(responses=["3"]) - chain = rl_chain.PickBest.from_llm( + chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=rl_chain.AutoSelectionScorer(llm=auto_val_llm), @@ -92,7 +93,7 @@ def test_update_with_delayed_score_force(): def test_update_with_delayed_score(): llm, PROMPT = setup() - chain = rl_chain.PickBest.from_llm( + chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=None ) actions = ["0", "1", "2"] @@ -115,7 +116,7 @@ def test_user_defined_scorer(): score = 200 return score - chain = rl_chain.PickBest.from_llm( + chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer() ) actions = ["0", "1", "2"] @@ -130,8 +131,8 @@ def test_user_defined_scorer(): def test_default_embeddings(): llm, PROMPT = setup() - feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) - chain = rl_chain.PickBest.from_llm( + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, feature_embedder=feature_embedder ) @@ -163,8 +164,8 @@ def test_default_embeddings(): def test_default_embeddings_off(): llm, PROMPT = setup() - feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) - chain = rl_chain.PickBest.from_llm( + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, feature_embedder=feature_embedder, auto_embed=False ) @@ -188,8 +189,8 @@ def test_default_embeddings_off(): def test_default_embeddings_mixed_w_explicit_user_embeddings(): llm, PROMPT = setup() - feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) - chain = rl_chain.PickBest.from_llm( + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, feature_embedder=feature_embedder ) @@ -223,7 +224,7 @@ def test_default_embeddings_mixed_w_explicit_user_embeddings(): def test_default_no_scorer_specified(): _, PROMPT = setup() chain_llm = FakeListChatModel(responses=[100]) - chain = rl_chain.PickBest.from_llm(llm=chain_llm, prompt=PROMPT) + chain = pick_best_chain.PickBest.from_llm(llm=chain_llm, prompt=PROMPT) response = chain.run( User=rl_chain.BasedOn("Context"), action=rl_chain.ToSelectFrom(["0", "1", "2"]), @@ -236,7 +237,7 @@ def test_default_no_scorer_specified(): def test_explicitly_no_scorer(): llm, PROMPT = setup() - chain = rl_chain.PickBest.from_llm( + chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=None ) response = chain.run( @@ -252,7 +253,7 @@ def test_explicitly_no_scorer(): def test_auto_scorer_with_user_defined_llm(): llm, PROMPT = setup() scorer_llm = FakeListChatModel(responses=[300]) - chain = rl_chain.PickBest.from_llm( + chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=rl_chain.AutoSelectionScorer(llm=scorer_llm), @@ -269,7 +270,7 @@ def test_auto_scorer_with_user_defined_llm(): def test_calling_chain_w_reserved_inputs_throws(): llm, PROMPT = setup() - chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) + chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) with pytest.raises(ValueError): chain.run( User=rl_chain.BasedOn("Context"), diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py index eee384641c..22097c6ef3 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py @@ -1,4 +1,5 @@ -import langchain.chains.rl_chain as rl_chain +import langchain.chains.rl_chain.pick_best_chain as pick_best_chain +import langchain.chains.rl_chain.base as rl_chain from test_utils import MockEncoder import pytest @@ -7,9 +8,9 @@ encoded_text = "[ e n c o d e d ] " def test_pickbest_textembedder_missing_context_throws(): - feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_action = {"action": ["0", "1", "2"]} - event = rl_chain.PickBest.Event( + event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_action, based_on={} ) with pytest.raises(ValueError): @@ -17,8 +18,8 @@ def test_pickbest_textembedder_missing_context_throws(): def test_pickbest_textembedder_missing_actions_throws(): - feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) - event = rl_chain.PickBest.Event( + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + event = pick_best_chain.PickBest.Event( inputs={}, to_select_from={}, based_on={"context": "context"} ) with pytest.raises(ValueError): @@ -26,10 +27,10 @@ def test_pickbest_textembedder_missing_actions_throws(): def test_pickbest_textembedder_no_label_no_emb(): - feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": ["0", "1", "2"]} expected = """shared |context context \n|action1 0 \n|action1 1 \n|action1 2 """ - event = rl_chain.PickBest.Event( + event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on={"context": "context"} ) vw_ex_str = feature_embedder.format(event) @@ -37,11 +38,11 @@ def test_pickbest_textembedder_no_label_no_emb(): def test_pickbest_textembedder_w_label_no_score_no_emb(): - feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": ["0", "1", "2"]} expected = """shared |context context \n|action1 0 \n|action1 1 \n|action1 2 """ - selected = rl_chain.PickBest.Selected(index=0, probability=1.0) - event = rl_chain.PickBest.Event( + selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0) + event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on={"context": "context"}, @@ -52,13 +53,13 @@ def test_pickbest_textembedder_w_label_no_score_no_emb(): def test_pickbest_textembedder_w_full_label_no_emb(): - feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": ["0", "1", "2"]} expected = ( """shared |context context \n0:-0.0:1.0 |action1 0 \n|action1 1 \n|action1 2 """ ) - selected = rl_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = rl_chain.PickBest.Event( + selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on={"context": "context"}, @@ -69,7 +70,7 @@ def test_pickbest_textembedder_w_full_label_no_emb(): def test_pickbest_textembedder_w_full_label_w_emb(): - feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" str2 = "1" str3 = "2" @@ -83,8 +84,8 @@ def test_pickbest_textembedder_w_full_label_w_emb(): named_actions = {"action1": rl_chain.Embed([str1, str2, str3])} context = {"context": rl_chain.Embed(ctx_str_1)} expected = f"""shared |context {encoded_ctx_str_1} \n0:-0.0:1.0 |action1 {encoded_str1} \n|action1 {encoded_str2} \n|action1 {encoded_str3} """ - selected = rl_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = rl_chain.PickBest.Event( + selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -92,7 +93,7 @@ def test_pickbest_textembedder_w_full_label_w_emb(): def test_pickbest_textembedder_w_full_label_w_embed_and_keep(): - feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" str2 = "1" str3 = "2" @@ -106,8 +107,8 @@ def test_pickbest_textembedder_w_full_label_w_embed_and_keep(): named_actions = {"action1": rl_chain.EmbedAndKeep([str1, str2, str3])} context = {"context": rl_chain.EmbedAndKeep(ctx_str_1)} expected = f"""shared |context {ctx_str_1 + " " + encoded_ctx_str_1} \n0:-0.0:1.0 |action1 {str1 + " " + encoded_str1} \n|action1 {str2 + " " + encoded_str2} \n|action1 {str3 + " " + encoded_str3} """ - selected = rl_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = rl_chain.PickBest.Event( + selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -115,11 +116,11 @@ def test_pickbest_textembedder_w_full_label_w_embed_and_keep(): def test_pickbest_textembedder_more_namespaces_no_label_no_emb(): - feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} expected = """shared |context1 context1 |context2 context2 \n|a 0 |b 0 \n|action1 1 \n|action1 2 """ - event = rl_chain.PickBest.Event( + event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context ) vw_ex_str = feature_embedder.format(event) @@ -127,12 +128,12 @@ def test_pickbest_textembedder_more_namespaces_no_label_no_emb(): def test_pickbest_textembedder_more_namespaces_w_label_no_emb(): - feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} expected = """shared |context1 context1 |context2 context2 \n|a 0 |b 0 \n|action1 1 \n|action1 2 """ - selected = rl_chain.PickBest.Selected(index=0, probability=1.0) - event = rl_chain.PickBest.Event( + selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0) + event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -140,12 +141,12 @@ def test_pickbest_textembedder_more_namespaces_w_label_no_emb(): def test_pickbest_textembedder_more_namespaces_w_full_label_no_emb(): - feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} expected = """shared |context1 context1 |context2 context2 \n0:-0.0:1.0 |a 0 |b 0 \n|action1 1 \n|action1 2 """ - selected = rl_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = rl_chain.PickBest.Event( + selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -153,7 +154,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_no_emb(): def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb(): - feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" str2 = "1" @@ -176,8 +177,8 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb(): } expected = f"""shared |context1 {encoded_ctx_str_1} |context2 {encoded_ctx_str_2} \n0:-0.0:1.0 |a {encoded_str1} |b {encoded_str1} \n|action1 {encoded_str2} \n|action1 {encoded_str3} """ - selected = rl_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = rl_chain.PickBest.Event( + selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -185,7 +186,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb(): def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_keep(): - feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" str2 = "1" @@ -210,8 +211,8 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_kee } expected = f"""shared |context1 {ctx_str_1 + " " + encoded_ctx_str_1} |context2 {ctx_str_2 + " " + encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1 + " " + encoded_str1} |b {str1 + " " + encoded_str1} \n|action1 {str2 + " " + encoded_str2} \n|action1 {str3 + " " + encoded_str3} """ - selected = rl_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = rl_chain.PickBest.Event( + selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -219,7 +220,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_kee def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb(): - feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" str2 = "1" @@ -243,8 +244,8 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb(): context = {"context1": ctx_str_1, "context2": rl_chain.Embed(ctx_str_2)} expected = f"""shared |context1 {ctx_str_1} |context2 {encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1} |b {encoded_str1} \n|action1 {str2} \n|action1 {encoded_str3} """ - selected = rl_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = rl_chain.PickBest.Event( + selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -252,7 +253,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb(): def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_embed_and_keep(): - feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" str2 = "1" @@ -279,8 +280,8 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_embed_and_ } expected = f"""shared |context1 {ctx_str_1} |context2 {ctx_str_2 + " " + encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1} |b {str1 + " " + encoded_str1} \n|action1 {str2} \n|action1 {str3 + " " + encoded_str3} """ - selected = rl_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = rl_chain.PickBest.Event( + selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -288,7 +289,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_embed_and_ def test_raw_features_underscored(): - feature_embedder = rl_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "this is a long string" str1_underscored = str1.replace(" ", "_") encoded_str1 = encoded_text + " ".join(char for char in str1) @@ -303,7 +304,7 @@ def test_raw_features_underscored(): expected_no_embed = ( f"""shared |context {ctx_str_underscored} \n|action {str1_underscored} """ ) - event = rl_chain.PickBest.Event( + event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context ) vw_ex_str = feature_embedder.format(event) @@ -313,7 +314,7 @@ def test_raw_features_underscored(): named_actions = {"action": rl_chain.Embed([str1])} context = {"context": rl_chain.Embed(ctx_str)} expected_embed = f"""shared |context {encoded_ctx_str} \n|action {encoded_str1} """ - event = rl_chain.PickBest.Event( + event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context ) vw_ex_str = feature_embedder.format(event) @@ -323,7 +324,7 @@ def test_raw_features_underscored(): named_actions = {"action": rl_chain.EmbedAndKeep([str1])} context = {"context": rl_chain.EmbedAndKeep(ctx_str)} expected_embed_and_keep = f"""shared |context {ctx_str_underscored + " " + encoded_ctx_str} \n|action {str1_underscored + " " + encoded_str1} """ - event = rl_chain.PickBest.Event( + event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context ) vw_ex_str = feature_embedder.format(event) diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py index 073bab31ad..fc3d02d9b0 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py @@ -1,4 +1,4 @@ -import langchain.chains.rl_chain as base +import langchain.chains.rl_chain.base as base from test_utils import MockEncoder import pytest From 1ae5a9c7a3bb354b3f2bb23895d9ddde6a0c7031 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Fri, 18 Aug 2023 05:45:21 -0400 Subject: [PATCH 06/65] fix lock, imports, deps, test w deps, typo, formatting --- .../langchain/chains/rl_chain/base.py | 31 +- .../langchain/chains/rl_chain/metrics.py | 5 +- .../chains/rl_chain/model_repository.py | 17 +- .../chains/rl_chain/pick_best_chain.py | 22 +- libs/langchain/poetry.lock | 500 ++---------------- libs/langchain/pyproject.toml | 1 + .../rl_chain/test_pick_best_chain_call.py | 28 +- .../rl_chain/test_pick_best_text_embedder.py | 29 +- .../rl_chain/test_rl_chain_base_embedder.py | 25 +- 9 files changed, 153 insertions(+), 505 deletions(-) diff --git a/libs/langchain/langchain/chains/rl_chain/base.py b/libs/langchain/langchain/chains/rl_chain/base.py index 4b6ff1490a..2d0a103679 100644 --- a/libs/langchain/langchain/chains/rl_chain/base.py +++ b/libs/langchain/langchain/chains/rl_chain/base.py @@ -2,25 +2,22 @@ from __future__ import annotations import logging import os -from typing import Any, Dict, List, Optional, Tuple, Union, Sequence from abc import ABC, abstractmethod - -import vowpal_wabbit_next as vw -from langchain.chains.rl_chain.vw_logger import VwLogger -from langchain.chains.rl_chain.model_repository import ModelRepository -from langchain.chains.rl_chain.metrics import MetricsTracker -from langchain.prompts import BasePromptTemplate - -from langchain.pydantic_v1 import Extra, BaseModel, root_validator +from typing import Any, Dict, List, Optional, Sequence, Tuple, Union from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain +from langchain.chains.rl_chain.metrics import MetricsTracker +from langchain.chains.rl_chain.model_repository import ModelRepository +from langchain.chains.rl_chain.vw_logger import VwLogger from langchain.prompts import ( + BasePromptTemplate, ChatPromptTemplate, - SystemMessagePromptTemplate, HumanMessagePromptTemplate, + SystemMessagePromptTemplate, ) +from langchain.pydantic_v1 import BaseModel, Extra, root_validator logger = logging.getLogger(__name__) @@ -87,7 +84,9 @@ def EmbedAndKeep(anything): # helper functions -def parse_lines(parser: vw.TextFormatParser, input_str: str) -> List[vw.Example]: +def parse_lines(parser: "vw.TextFormatParser", input_str: str) -> List["vw.Example"]: + import vowpal_wabbit_next as vw + return [parser.parse_line(line) for line in input_str.split("\n")] @@ -100,7 +99,8 @@ def get_based_on_and_to_select_from(inputs: Dict[str, Any]): if not to_select_from: raise ValueError( - "No variables using 'ToSelectFrom' found in the inputs. Please include at least one variable containing a list to select from." + "No variables using 'ToSelectFrom' found in the inputs. \ + Please include at least one variable containing a list to select from." ) based_on = { @@ -173,14 +173,17 @@ class VwPolicy(Policy): self.vw_logger = vw_logger def predict(self, event: Event) -> Any: + import vowpal_wabbit_next as vw + text_parser = vw.TextFormatParser(self.workspace) return self.workspace.predict_one( parse_lines(text_parser, self.feature_embedder.format(event)) ) def learn(self, event: Event): - vw_ex = self.feature_embedder.format(event) + import vowpal_wabbit_next as vw + vw_ex = self.feature_embedder.format(event) text_parser = vw.TextFormatParser(self.workspace) multi_ex = parse_lines(text_parser, vw_ex) self.workspace.learn_one(multi_ex) @@ -216,7 +219,7 @@ class AutoSelectionScorer(SelectionScorer, BaseModel): @staticmethod def get_default_system_prompt() -> SystemMessagePromptTemplate: return SystemMessagePromptTemplate.from_template( - "PLEASE RESPOND ONLY WITH A SIGNLE FLOAT AND NO OTHER TEXT EXPLANATION\n You are a strict judge that is called on to rank a response based on given criteria.\ + "PLEASE RESPOND ONLY WITH A SINGLE FLOAT AND NO OTHER TEXT EXPLANATION\n You are a strict judge that is called on to rank a response based on given criteria.\ You must respond with your ranking by providing a single float within the range [0, 1], 0 being very bad response and 1 being very good response." ) diff --git a/libs/langchain/langchain/chains/rl_chain/metrics.py b/libs/langchain/langchain/chains/rl_chain/metrics.py index eefc6bc4de..973b778911 100644 --- a/libs/langchain/langchain/chains/rl_chain/metrics.py +++ b/libs/langchain/langchain/chains/rl_chain/metrics.py @@ -1,4 +1,3 @@ -import pandas as pd from typing import Optional @@ -23,5 +22,7 @@ class MetricsTracker: if self._step > 0 and self._i % self._step == 0: self._history.append({"step": self._i, "score": self.score}) - def to_pandas(self) -> pd.DataFrame: + def to_pandas(self) -> "pd.DataFrame": + import pandas as pd + return pd.DataFrame(self._history) diff --git a/libs/langchain/langchain/chains/rl_chain/model_repository.py b/libs/langchain/langchain/chains/rl_chain/model_repository.py index 3f3f4c1063..992fca4518 100644 --- a/libs/langchain/langchain/chains/rl_chain/model_repository.py +++ b/libs/langchain/langchain/chains/rl_chain/model_repository.py @@ -1,11 +1,10 @@ -from pathlib import Path -import shutil import datetime -import vowpal_wabbit_next as vw -from typing import Union, Sequence -import os import glob import logging +import os +import shutil +from pathlib import Path +from typing import Sequence, Union logger = logging.getLogger(__name__) @@ -35,14 +34,18 @@ class ModelRepository: def has_history(self) -> bool: return len(glob.glob(str(self.folder / "model-????????-??????.vw"))) > 0 - def save(self, workspace: vw.Workspace) -> None: + def save(self, workspace: "vw.Workspace") -> None: + import vowpal_wabbit_next as vw + with open(self.model_path, "wb") as f: logger.info(f"storing rl_chain model in: {self.model_path}") f.write(workspace.serialize()) if self.with_history: # write history shutil.copyfile(self.model_path, self.folder / f"model-{self.get_tag()}.vw") - def load(self, commandline: Sequence[str]) -> vw.Workspace: + def load(self, commandline: Sequence[str]) -> "vw.Workspace": + import vowpal_wabbit_next as vw + model_data = None if self.model_path.exists(): with open(self.model_path, "rb") as f: diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py index 28c0f509f0..3df1d7f9d9 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py @@ -1,19 +1,15 @@ from __future__ import annotations -import langchain.chains.rl_chain.base as base +import logging +from typing import Any, Dict, List, Optional, Tuple +import langchain.chains.rl_chain.base as base +from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain -from typing import Any, Dict, List, Optional, Tuple, Union - -import numpy as np -from langchain.base_language import BaseLanguageModel from langchain.chains.llm import LLMChain -from sentence_transformers import SentenceTransformer from langchain.prompts import BasePromptTemplate -import logging - logger = logging.getLogger(__name__) # sentinel object used to distinguish between user didn't supply anything or user explicitly supplied None @@ -23,7 +19,7 @@ SENTINEL = object() class PickBestFeatureEmbedder(base.Embedder): """ Contextual Bandit Text Embedder class that embeds the based_on and to_select_from into a format that can be used by VW - + Attributes: model name (Any, optional): The type of embeddings to be used for feature representation. Defaults to BERT SentenceTransformer. """ @@ -32,6 +28,8 @@ class PickBestFeatureEmbedder(base.Embedder): super().__init__(*args, **kwargs) if model is None: + from sentence_transformers import SentenceTransformer + model = SentenceTransformer("bert-base-nli-mean-tokens") self.model = model @@ -67,7 +65,7 @@ class PickBestFeatureEmbedder(base.Embedder): ) example_string = "" - example_string += f"shared " + example_string += "shared " for context_item in context_emb: for ns, based_on in context_item.items(): example_string += f"|{ns} {' '.join(based_on) if isinstance(based_on, list) else based_on} " @@ -190,6 +188,8 @@ class PickBest(base.RLChain): def _call_after_predict_before_llm( self, inputs: Dict[str, Any], event: Event, prediction: List[Tuple[int, float]] ) -> Tuple[Dict[str, Any], PickBest.Event]: + import numpy as np + prob_sum = sum(prob for _, prob in prediction) probabilities = [prob / prob_sum for _, prob in prediction] ## sample from the pmf @@ -237,7 +237,7 @@ class PickBest(base.RLChain): Attributes: inputs: (Dict, required) The inputs to the chain. The inputs must contain a input variables that are wrapped in BasedOn and ToSelectFrom. BasedOn is the based_on that will be used for selecting an ToSelectFrom action that will be passed to the LLM prompt. run_manager: (CallbackManagerForChainRun, optional) The callback manager to use for this run. If not provided, a default callback manager is used. - + Returns: A dictionary containing: - `response`: The response generated by the LLM (Language Model). diff --git a/libs/langchain/poetry.lock b/libs/langchain/poetry.lock index a0a76d0979..75ae888efd 100644 --- a/libs/langchain/poetry.lock +++ b/libs/langchain/poetry.lock @@ -1,10 +1,9 @@ -# This file is automatically @generated by Poetry 1.4.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. [[package]] name = "absl-py" version = "1.4.0" description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py." -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -16,7 +15,6 @@ files = [ name = "aioboto3" version = "11.2.0" description = "Async boto3 wrapper" -category = "main" optional = true python-versions = ">=3.7,<4.0" files = [ @@ -35,7 +33,6 @@ s3cse = ["cryptography (>=2.3.1)"] name = "aiobotocore" version = "2.5.0" description = "Async client for aws services using botocore and aiohttp" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -58,7 +55,6 @@ boto3 = ["boto3 (>=1.26.76,<1.26.77)"] name = "aiodns" version = "3.0.0" description = "Simple DNS resolver for asyncio" -category = "main" optional = true python-versions = "*" files = [ @@ -73,7 +69,6 @@ pycares = ">=4.0.0" name = "aiofiles" version = "23.1.0" description = "File support for asyncio." -category = "main" optional = true python-versions = ">=3.7,<4.0" files = [ @@ -85,7 +80,6 @@ files = [ name = "aiohttp" version = "3.8.4" description = "Async http client/server framework (asyncio)" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -194,7 +188,6 @@ speedups = ["Brotli", "aiodns", "cchardet"] name = "aiohttp-retry" version = "2.8.3" description = "Simple retry client for aiohttp" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -209,7 +202,6 @@ aiohttp = "*" name = "aioitertools" version = "0.11.0" description = "itertools and builtins for AsyncIO and mixed iterables" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -224,7 +216,6 @@ typing_extensions = {version = ">=4.0", markers = "python_version < \"3.10\""} name = "aiosignal" version = "1.3.1" description = "aiosignal: a list of registered asynchronous callbacks" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -239,7 +230,6 @@ frozenlist = ">=1.1.0" name = "aleph-alpha-client" version = "2.17.0" description = "python client to interact with Aleph Alpha api endpoints" -category = "main" optional = true python-versions = "*" files = [ @@ -267,7 +257,6 @@ types = ["mypy", "types-Pillow", "types-requests"] name = "altair" version = "4.2.2" description = "Altair: A declarative statistical visualization library for Python." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -290,7 +279,6 @@ dev = ["black", "docutils", "flake8", "ipython", "m2r", "mistune (<2.0.0)", "pyt name = "amadeus" version = "8.1.0" description = "Python module for the Amadeus travel APIs" -category = "main" optional = true python-versions = ">=3.4.8" files = [ @@ -301,7 +289,6 @@ files = [ name = "amazon-textract-caller" version = "0.0.29" description = "Amazon Textract Caller tools" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -321,7 +308,6 @@ testing = ["amazon-textract-response-parser", "pytest"] name = "amazon-textract-response-parser" version = "1.0.0" description = "Easily parse JSON returned by Amazon Textract." -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -337,7 +323,6 @@ marshmallow = ">=3.14,<4" name = "anyio" version = "3.7.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -359,7 +344,6 @@ trio = ["trio (<0.22)"] name = "appdirs" version = "1.4.4" description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -category = "main" optional = true python-versions = "*" files = [ @@ -371,7 +355,6 @@ files = [ name = "appnope" version = "0.1.3" description = "Disable App Nap on macOS >= 10.9" -category = "dev" optional = false python-versions = "*" files = [ @@ -383,7 +366,6 @@ files = [ name = "argon2-cffi" version = "21.3.0" description = "The secure Argon2 password hashing algorithm." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -403,7 +385,6 @@ tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pytest"] name = "argon2-cffi-bindings" version = "21.2.0" description = "Low-level CFFI bindings for Argon2" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -441,7 +422,6 @@ tests = ["pytest"] name = "arrow" version = "1.2.3" description = "Better dates & times for Python" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -456,7 +436,6 @@ python-dateutil = ">=2.7.0" name = "arxiv" version = "1.4.7" description = "Python wrapper for the arXiv API: http://arxiv.org/help/api/" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -471,7 +450,6 @@ feedparser = "*" name = "asttokens" version = "2.2.1" description = "Annotate AST trees with source code positions" -category = "dev" optional = false python-versions = "*" files = [ @@ -489,7 +467,6 @@ test = ["astroid", "pytest"] name = "astunparse" version = "1.6.3" description = "An AST unparser for Python" -category = "main" optional = true python-versions = "*" files = [ @@ -505,7 +482,6 @@ wheel = ">=0.23.0,<1.0" name = "async-timeout" version = "4.0.2" description = "Timeout context manager for asyncio programs" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -517,7 +493,6 @@ files = [ name = "atlassian-python-api" version = "3.39.0" description = "Python Atlassian REST API Wrapper" -category = "main" optional = true python-versions = "*" files = [ @@ -538,7 +513,6 @@ kerberos = ["requests-kerberos"] name = "attrs" version = "23.1.0" description = "Classes Without Boilerplate" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -557,7 +531,6 @@ tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pyte name = "audioread" version = "3.0.0" description = "multi-library, cross-platform audio decoding" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -568,7 +541,6 @@ files = [ name = "authlib" version = "1.2.0" description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." -category = "main" optional = true python-versions = "*" files = [ @@ -583,7 +555,6 @@ cryptography = ">=3.2" name = "awadb" version = "0.3.9" description = "AI Native database for embedding vectors" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -611,7 +582,6 @@ test = ["pytest (>=6.0)"] name = "azure-ai-formrecognizer" version = "3.2.1" description = "Microsoft Azure Form Recognizer Client Library for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -629,7 +599,6 @@ typing-extensions = ">=4.0.1" name = "azure-ai-vision" version = "0.11.1b1" description = "Microsoft Azure AI Vision SDK for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -641,7 +610,6 @@ files = [ name = "azure-cognitiveservices-speech" version = "1.29.0" description = "Microsoft Cognitive Services Speech SDK for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -657,7 +625,6 @@ files = [ name = "azure-common" version = "1.1.28" description = "Microsoft Azure Client Library for Python (Common)" -category = "main" optional = true python-versions = "*" files = [ @@ -669,7 +636,6 @@ files = [ name = "azure-core" version = "1.27.1" description = "Microsoft Azure Core Library for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -689,7 +655,6 @@ aio = ["aiohttp (>=3.0)"] name = "azure-cosmos" version = "4.4.0" description = "Microsoft Azure Cosmos Client Library for Python" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -704,7 +669,6 @@ azure-core = ">=1.23.0,<2.0.0" name = "azure-identity" version = "1.13.0" description = "Microsoft Azure Identity Library for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -723,7 +687,6 @@ six = ">=1.12.0" name = "azure-search-documents" version = "11.4.0b6" description = "Microsoft Azure Cognitive Search Client Library for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -740,7 +703,6 @@ isodate = ">=0.6.0" name = "backcall" version = "0.2.0" description = "Specifications for callback functions passed in to an API" -category = "dev" optional = false python-versions = "*" files = [ @@ -752,7 +714,6 @@ files = [ name = "backoff" version = "2.2.1" description = "Function decoration for backoff and retry" -category = "main" optional = true python-versions = ">=3.7,<4.0" files = [ @@ -764,7 +725,6 @@ files = [ name = "backports-zoneinfo" version = "0.2.1" description = "Backport of the standard library zoneinfo module" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -793,7 +753,6 @@ tzdata = ["tzdata"] name = "beautifulsoup4" version = "4.12.2" description = "Screen-scraping library" -category = "main" optional = false python-versions = ">=3.6.0" files = [ @@ -812,7 +771,6 @@ lxml = ["lxml"] name = "bibtexparser" version = "1.4.0" description = "Bibtex parser for python 3" -category = "main" optional = true python-versions = "*" files = [ @@ -826,7 +784,6 @@ pyparsing = ">=2.0.3" name = "black" version = "23.3.0" description = "The uncompromising code formatter." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -876,7 +833,6 @@ uvloop = ["uvloop (>=0.15.2)"] name = "bleach" version = "6.0.0" description = "An easy safelist-based HTML-sanitizing tool." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -895,7 +851,6 @@ css = ["tinycss2 (>=1.1.0,<1.2)"] name = "blinker" version = "1.6.2" description = "Fast, simple object-to-object and broadcast signaling" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -907,7 +862,6 @@ files = [ name = "boto3" version = "1.26.76" description = "The AWS SDK for Python" -category = "main" optional = true python-versions = ">= 3.7" files = [ @@ -927,7 +881,6 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] name = "botocore" version = "1.29.76" description = "Low-level, data-driven core of boto 3." -category = "main" optional = true python-versions = ">= 3.7" files = [ @@ -947,7 +900,6 @@ crt = ["awscrt (==0.16.9)"] name = "brotli" version = "1.0.9" description = "Python bindings for the Brotli compression library" -category = "main" optional = true python-versions = "*" files = [ @@ -1039,7 +991,6 @@ files = [ name = "brotlicffi" version = "1.0.9.2" description = "Python CFFI bindings to the Brotli library" -category = "main" optional = true python-versions = "*" files = [ @@ -1082,7 +1033,6 @@ cffi = ">=1.0.0" name = "build" version = "0.10.0" description = "A simple, correct Python build frontend" -category = "main" optional = true python-versions = ">= 3.7" files = [ @@ -1106,7 +1056,6 @@ virtualenv = ["virtualenv (>=20.0.35)"] name = "cachetools" version = "5.3.1" description = "Extensible memoizing collections and decorators" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -1118,7 +1067,6 @@ files = [ name = "cassandra-driver" version = "3.28.0" description = "DataStax Driver for Apache Cassandra" -category = "main" optional = false python-versions = "*" files = [ @@ -1170,7 +1118,6 @@ graph = ["gremlinpython (==3.4.6)"] name = "cassio" version = "0.0.7" description = "A framework-agnostic Python library to seamlessly integrate Apache Cassandra with ML/LLM/genAI workloads." -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1186,7 +1133,6 @@ numpy = ">=1.0" name = "certifi" version = "2023.5.7" description = "Python package for providing Mozilla's CA Bundle." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1198,7 +1144,6 @@ files = [ name = "cffi" version = "1.15.1" description = "Foreign Function Interface for Python calling C code." -category = "main" optional = false python-versions = "*" files = [ @@ -1275,7 +1220,6 @@ pycparser = "*" name = "chardet" version = "5.1.0" description = "Universal encoding detector for Python 3" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -1287,7 +1231,6 @@ files = [ name = "charset-normalizer" version = "3.2.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "main" optional = false python-versions = ">=3.7.0" files = [ @@ -1372,7 +1315,6 @@ files = [ name = "clarifai" version = "9.1.0" description = "Clarifai Python Utilities" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -1387,7 +1329,6 @@ clarifai-grpc = ">=9.1.0" name = "clarifai-grpc" version = "9.1.1" description = "Clarifai gRPC API Client" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -1405,7 +1346,6 @@ requests = ">=2.25.1" name = "click" version = "8.1.3" description = "Composable command line interface toolkit" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1420,7 +1360,6 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} name = "click-plugins" version = "1.1.1" description = "An extension module for click to enable registering CLI commands via setuptools entry-points." -category = "main" optional = true python-versions = "*" files = [ @@ -1438,7 +1377,6 @@ dev = ["coveralls", "pytest (>=3.6)", "pytest-cov", "wheel"] name = "clickhouse-connect" version = "0.5.25" description = "ClickHouse core driver, SqlAlchemy, and Superset libraries" -category = "main" optional = true python-versions = "~=3.7" files = [ @@ -1528,7 +1466,6 @@ superset = ["apache-superset (>=1.4.1)"] name = "cligj" version = "0.7.2" description = "Click params for commmand line interfaces to GeoJSON" -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, <4" files = [ @@ -1546,7 +1483,6 @@ test = ["pytest-cov"] name = "codespell" version = "2.2.5" description = "Codespell" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1564,7 +1500,6 @@ types = ["chardet (>=5.1.0)", "mypy", "pytest", "pytest-cov", "pytest-dependency name = "cohere" version = "4.18.0" description = "" -category = "main" optional = true python-versions = ">=3.7,<4.0" files = [ @@ -1584,7 +1519,6 @@ urllib3 = ">=1.26,<3" name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ @@ -1596,7 +1530,6 @@ files = [ name = "colored" version = "1.4.4" description = "Simple library for color and formatting to terminal" -category = "dev" optional = false python-versions = "*" files = [ @@ -1607,7 +1540,6 @@ files = [ name = "comm" version = "0.1.3" description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1627,7 +1559,6 @@ typing = ["mypy (>=0.990)"] name = "coverage" version = "7.2.7" description = "Code coverage measurement for Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1703,7 +1634,6 @@ toml = ["tomli"] name = "cryptography" version = "41.0.1" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1745,7 +1675,6 @@ test-randomorder = ["pytest-randomly"] name = "cssselect" version = "1.2.0" description = "cssselect parses CSS3 Selectors and translates them to XPath 1.0" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -1757,7 +1686,6 @@ files = [ name = "dataclasses-json" version = "0.5.8" description = "Easily serialize dataclasses to and from JSON" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1777,7 +1705,6 @@ dev = ["flake8", "hypothesis", "ipython", "mypy (>=0.710)", "portray", "pytest ( name = "debugpy" version = "1.6.7" description = "An implementation of the Debug Adapter Protocol for Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1805,7 +1732,6 @@ files = [ name = "decorator" version = "5.1.1" description = "Decorators for Humans" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -1817,7 +1743,6 @@ files = [ name = "deeplake" version = "3.6.11" description = "Activeloop Deep Lake" -category = "main" optional = true python-versions = "*" files = [ @@ -1855,7 +1780,6 @@ visualizer = ["IPython", "flask"] name = "defusedxml" version = "0.7.1" description = "XML bomb protection for Python stdlib modules" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -1867,7 +1791,6 @@ files = [ name = "deprecated" version = "1.2.14" description = "Python @deprecated decorator to deprecate old python classes, functions or methods." -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -1885,7 +1808,6 @@ dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] name = "deprecation" version = "2.1.0" description = "A library to handle automated deprecations" -category = "main" optional = true python-versions = "*" files = [ @@ -1900,7 +1822,6 @@ packaging = "*" name = "dill" version = "0.3.6" description = "serialize all of python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -1915,7 +1836,6 @@ graph = ["objgraph (>=1.7.2)"] name = "dnspython" version = "2.3.0" description = "DNS toolkit" -category = "main" optional = true python-versions = ">=3.7,<4.0" files = [ @@ -1936,7 +1856,6 @@ wmi = ["wmi (>=1.5.1,<2.0.0)"] name = "docarray" version = "0.32.1" description = "The data structure for multimodal data" -category = "main" optional = true python-versions = ">=3.7,<4.0" files = [ @@ -1975,7 +1894,6 @@ web = ["fastapi (>=0.87.0)"] name = "docker" version = "6.1.3" description = "A Python library for the Docker Engine API." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -1997,7 +1915,6 @@ ssh = ["paramiko (>=2.4.3)"] name = "docopt" version = "0.6.2" description = "Pythonic argument parser, that will make you smile" -category = "main" optional = true python-versions = "*" files = [ @@ -2008,7 +1925,6 @@ files = [ name = "duckdb" version = "0.8.1" description = "DuckDB embedded database" -category = "dev" optional = false python-versions = "*" files = [ @@ -2070,7 +1986,6 @@ files = [ name = "duckdb-engine" version = "0.7.3" description = "SQLAlchemy driver for duckdb" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -2087,7 +2002,6 @@ sqlalchemy = ">=1.3.22" name = "duckduckgo-search" version = "3.8.3" description = "Search for words, documents, images, news, maps and text translation using the DuckDuckGo.com search engine." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -2105,7 +2019,6 @@ lxml = ">=4.9.2" name = "elastic-transport" version = "8.4.0" description = "Transport classes and utilities shared among Python Elastic client libraries" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -2124,7 +2037,6 @@ develop = ["aiohttp", "mock", "pytest", "pytest-asyncio", "pytest-cov", "pytest- name = "elasticsearch" version = "8.8.0" description = "Python client for Elasticsearch" -category = "main" optional = true python-versions = ">=3.6, <4" files = [ @@ -2143,7 +2055,6 @@ requests = ["requests (>=2.4.0,<3.0.0)"] name = "entrypoints" version = "0.4" description = "Discover and load entry points from installed packages." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -2155,7 +2066,6 @@ files = [ name = "esprima" version = "4.0.1" description = "ECMAScript parsing infrastructure for multipurpose analysis in Python" -category = "main" optional = true python-versions = "*" files = [ @@ -2166,7 +2076,6 @@ files = [ name = "exceptiongroup" version = "1.1.1" description = "Backport of PEP 654 (exception groups)" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2181,7 +2090,6 @@ test = ["pytest (>=6)"] name = "executing" version = "1.2.0" description = "Get the currently executing AST node of a frame, and other information" -category = "dev" optional = false python-versions = "*" files = [ @@ -2196,7 +2104,6 @@ tests = ["asttokens", "littleutils", "pytest", "rich"] name = "faiss-cpu" version = "1.7.4" description = "A library for efficient similarity search and clustering of dense vectors." -category = "main" optional = true python-versions = "*" files = [ @@ -2231,7 +2138,6 @@ files = [ name = "fastavro" version = "1.7.4" description = "Fast read/write of AVRO files" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -2278,7 +2184,6 @@ zstandard = ["zstandard"] name = "fastjsonschema" version = "2.17.1" description = "Fastest Python implementation of JSON schema" -category = "dev" optional = false python-versions = "*" files = [ @@ -2293,7 +2198,6 @@ devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benc name = "feedfinder2" version = "0.0.4" description = "Find the feed URLs for a website." -category = "main" optional = true python-versions = "*" files = [ @@ -2309,7 +2213,6 @@ six = "*" name = "feedparser" version = "6.0.10" description = "Universal feed parser, handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -2324,7 +2227,6 @@ sgmllib3k = "*" name = "filelock" version = "3.12.2" description = "A platform independent file lock." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -2340,7 +2242,6 @@ testing = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "diff-cover (>=7.5)", "p name = "fiona" version = "1.9.4.post1" description = "Fiona reads and writes spatial data files" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -2385,7 +2286,6 @@ test = ["Fiona[s3]", "pytest (>=7)", "pytest-cov", "pytz"] name = "flatbuffers" version = "23.5.26" description = "The FlatBuffers serialization format for Python" -category = "main" optional = true python-versions = "*" files = [ @@ -2397,7 +2297,6 @@ files = [ name = "fqdn" version = "1.5.1" description = "Validates fully-qualified domain names against RFC 1123, so that they are acceptable to modern bowsers" -category = "dev" optional = false python-versions = ">=2.7, !=3.0, !=3.1, !=3.2, !=3.3, !=3.4, <4" files = [ @@ -2409,7 +2308,6 @@ files = [ name = "freezegun" version = "1.2.2" description = "Let your Python tests travel through time" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -2424,7 +2322,6 @@ python-dateutil = ">=2.7" name = "frozenlist" version = "1.3.3" description = "A list-like structure which implements collections.abc.MutableSequence" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2508,7 +2405,6 @@ files = [ name = "fsspec" version = "2023.6.0" description = "File-system specification" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -2544,7 +2440,6 @@ tqdm = ["tqdm"] name = "future" version = "0.18.3" description = "Clean single-source support for Python 3 and 2" -category = "main" optional = true python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -2555,7 +2450,6 @@ files = [ name = "gast" version = "0.4.0" description = "Python AST that abstracts the underlying Python version" -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -2567,7 +2461,6 @@ files = [ name = "geojson" version = "2.5.0" description = "Python bindings and utilities for GeoJSON" -category = "main" optional = true python-versions = "*" files = [ @@ -2579,7 +2472,6 @@ files = [ name = "geomet" version = "0.2.1.post1" description = "GeoJSON <-> WKT/WKB conversion utilities" -category = "main" optional = false python-versions = ">2.6, !=3.3.*, <4" files = [ @@ -2595,7 +2487,6 @@ six = "*" name = "geopandas" version = "0.13.2" description = "Geographic pandas extensions" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -2614,7 +2505,6 @@ shapely = ">=1.7.1" name = "gitdb" version = "4.0.10" description = "Git Object Database" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -2629,7 +2519,6 @@ smmap = ">=3.0.1,<6" name = "gitpython" version = "3.1.32" description = "GitPython is a Python library used to interact with Git repositories" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -2644,7 +2533,6 @@ gitdb = ">=4.0.1,<5" name = "google-api-core" version = "2.11.1" description = "Google API client core library" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -2667,7 +2555,6 @@ grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] name = "google-api-python-client" version = "2.70.0" description = "Google API Client Library for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -2676,7 +2563,7 @@ files = [ ] [package.dependencies] -google-api-core = ">=1.31.5,<2.0.0 || >2.3.0,<3.0.0dev" +google-api-core = ">=1.31.5,<2.0.dev0 || >2.3.0,<3.0.0dev" google-auth = ">=1.19.0,<3.0.0dev" google-auth-httplib2 = ">=0.1.0" httplib2 = ">=0.15.0,<1dev" @@ -2686,7 +2573,6 @@ uritemplate = ">=3.0.1,<5" name = "google-auth" version = "2.20.0" description = "Google Authentication Library" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -2712,7 +2598,6 @@ requests = ["requests (>=2.20.0,<3.0.0.dev0)"] name = "google-auth-httplib2" version = "0.1.0" description = "Google Authentication Library: httplib2 transport" -category = "main" optional = true python-versions = "*" files = [ @@ -2729,7 +2614,6 @@ six = "*" name = "google-auth-oauthlib" version = "0.4.6" description = "Google Authentication Library" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -2748,7 +2632,6 @@ tool = ["click (>=6.0.0)"] name = "google-pasta" version = "0.2.0" description = "pasta is an AST-based Python refactoring library" -category = "main" optional = true python-versions = "*" files = [ @@ -2764,7 +2647,6 @@ six = "*" name = "google-search-results" version = "2.4.2" description = "Scrape and search localized results from Google, Bing, Baidu, Yahoo, Yandex, Ebay, Homedepot, youtube at scale using SerpApi.com" -category = "main" optional = true python-versions = ">=3.5" files = [ @@ -2778,7 +2660,6 @@ requests = "*" name = "googleapis-common-protos" version = "1.59.1" description = "Common protobufs used in Google APIs" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -2796,7 +2677,6 @@ grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] name = "gptcache" version = "0.1.32" description = "GPTCache, a powerful caching library that can be used to speed up and lower the cost of chat applications that rely on the LLM service. GPTCache works as a memcache for AIGC applications, similar to how Redis works for traditional applications." -category = "main" optional = true python-versions = ">=3.8.1" files = [ @@ -2813,7 +2693,6 @@ requests = "*" name = "gql" version = "3.4.1" description = "GraphQL client for Python" -category = "main" optional = true python-versions = "*" files = [ @@ -2840,7 +2719,6 @@ websockets = ["websockets (>=10,<11)", "websockets (>=9,<10)"] name = "graphql-core" version = "3.2.3" description = "GraphQL implementation for Python, a port of GraphQL.js, the JavaScript reference implementation for GraphQL." -category = "main" optional = true python-versions = ">=3.6,<4" files = [ @@ -2852,7 +2730,6 @@ files = [ name = "greenlet" version = "2.0.2" description = "Lightweight in-process concurrent programming" -category = "main" optional = false python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" files = [ @@ -2926,7 +2803,6 @@ test = ["objgraph", "psutil"] name = "grpcio" version = "1.47.5" description = "HTTP/2-based RPC framework" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -2988,7 +2864,6 @@ protobuf = ["grpcio-tools (>=1.47.5)"] name = "grpcio-tools" version = "1.47.5" description = "Protobuf code generator for gRPC" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -3049,7 +2924,6 @@ setuptools = "*" name = "h11" version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -3061,7 +2935,6 @@ files = [ name = "h2" version = "4.1.0" description = "HTTP/2 State-Machine based protocol implementation" -category = "main" optional = true python-versions = ">=3.6.1" files = [ @@ -3077,7 +2950,6 @@ hyperframe = ">=6.0,<7" name = "h5py" version = "3.8.0" description = "Read and write HDF5 files from Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -3115,7 +2987,6 @@ numpy = ">=1.14.5" name = "hnswlib" version = "0.7.0" description = "hnswlib" -category = "main" optional = true python-versions = "*" files = [ @@ -3129,7 +3000,6 @@ numpy = "*" name = "hpack" version = "4.0.0" description = "Pure-Python HPACK header compression" -category = "main" optional = true python-versions = ">=3.6.1" files = [ @@ -3141,7 +3011,6 @@ files = [ name = "html2text" version = "2020.1.16" description = "Turn HTML into equivalent Markdown-structured text." -category = "main" optional = true python-versions = ">=3.5" files = [ @@ -3153,7 +3022,6 @@ files = [ name = "httpcore" version = "0.17.2" description = "A minimal low-level HTTP client." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -3165,17 +3033,16 @@ files = [ anyio = ">=3.0,<5.0" certifi = "*" h11 = ">=0.13,<0.15" -sniffio = ">=1.0.0,<2.0.0" +sniffio = "==1.*" [package.extras] http2 = ["h2 (>=3,<5)"] -socks = ["socksio (>=1.0.0,<2.0.0)"] +socks = ["socksio (==1.*)"] [[package]] name = "httplib2" version = "0.22.0" description = "A comprehensive HTTP client library." -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -3190,7 +3057,6 @@ pyparsing = {version = ">=2.4.2,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.0.2 || >3.0 name = "httpx" version = "0.24.1" description = "The next generation HTTP client." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -3206,19 +3072,18 @@ h2 = {version = ">=3,<5", optional = true, markers = "extra == \"http2\""} httpcore = ">=0.15.0,<0.18.0" idna = "*" sniffio = "*" -socksio = {version = ">=1.0.0,<2.0.0", optional = true, markers = "extra == \"socks\""} +socksio = {version = "==1.*", optional = true, markers = "extra == \"socks\""} [package.extras] brotli = ["brotli", "brotlicffi"] -cli = ["click (>=8.0.0,<9.0.0)", "pygments (>=2.0.0,<3.0.0)", "rich (>=10,<14)"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] -socks = ["socksio (>=1.0.0,<2.0.0)"] +socks = ["socksio (==1.*)"] [[package]] name = "huggingface-hub" version = "0.15.1" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" -category = "main" optional = true python-versions = ">=3.7.0" files = [ @@ -3250,7 +3115,6 @@ typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "t name = "humbug" version = "0.3.1" description = "Humbug: Do you build developer tools? Humbug helps you know your users." -category = "main" optional = true python-versions = "*" files = [ @@ -3270,7 +3134,6 @@ profile = ["GPUtil", "psutil", "types-psutil"] name = "hyperframe" version = "6.0.1" description = "HTTP/2 framing layer for Python" -category = "main" optional = true python-versions = ">=3.6.1" files = [ @@ -3282,7 +3145,6 @@ files = [ name = "idna" version = "3.4" description = "Internationalized Domain Names in Applications (IDNA)" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -3294,7 +3156,6 @@ files = [ name = "importlib-metadata" version = "6.0.1" description = "Read metadata from Python packages" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -3314,7 +3175,6 @@ testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packag name = "importlib-resources" version = "5.12.0" description = "Read resources from Python packages" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -3333,7 +3193,6 @@ testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-chec name = "iniconfig" version = "2.0.0" description = "brain-dead simple config-ini parsing" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3345,7 +3204,6 @@ files = [ name = "ipykernel" version = "6.23.2" description = "IPython Kernel for Jupyter" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -3359,7 +3217,7 @@ comm = ">=0.1.1" debugpy = ">=1.6.5" ipython = ">=7.23.1" jupyter-client = ">=6.1.12" -jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" matplotlib-inline = ">=0.1" nest-asyncio = "*" packaging = "*" @@ -3379,7 +3237,6 @@ test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio" name = "ipython" version = "8.12.2" description = "IPython: Productive Interactive Computing" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -3419,7 +3276,6 @@ test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pa name = "ipython-genutils" version = "0.2.0" description = "Vestigial utilities from IPython" -category = "dev" optional = false python-versions = "*" files = [ @@ -3431,7 +3287,6 @@ files = [ name = "ipywidgets" version = "8.0.6" description = "Jupyter interactive widgets" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3453,7 +3308,6 @@ test = ["ipykernel", "jsonschema", "pytest (>=3.6.0)", "pytest-cov", "pytz"] name = "isodate" version = "0.6.1" description = "An ISO 8601 date/time/duration parser and formatter" -category = "main" optional = true python-versions = "*" files = [ @@ -3468,7 +3322,6 @@ six = "*" name = "isoduration" version = "20.11.0" description = "Operations with ISO 8601 durations" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3483,7 +3336,6 @@ arrow = ">=0.15.0" name = "jaraco-context" version = "4.3.0" description = "Context managers by jaraco" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -3499,7 +3351,6 @@ testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-chec name = "jedi" version = "0.18.2" description = "An autocompletion tool for Python that can be used for text editors." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -3519,7 +3370,6 @@ testing = ["Django (<3.1)", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] name = "jieba3k" version = "0.35.1" description = "Chinese Words Segementation Utilities" -category = "main" optional = true python-versions = "*" files = [ @@ -3530,7 +3380,6 @@ files = [ name = "jinja2" version = "3.1.2" description = "A very fast and expressive template engine." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -3548,7 +3397,6 @@ i18n = ["Babel (>=2.7)"] name = "jmespath" version = "1.0.1" description = "JSON Matching Expressions" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -3560,7 +3408,6 @@ files = [ name = "joblib" version = "1.2.0" description = "Lightweight pipelining with Python functions" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -3572,7 +3419,6 @@ files = [ name = "jq" version = "1.4.1" description = "jq is a lightweight and flexible JSON processor." -category = "main" optional = true python-versions = ">=3.5" files = [ @@ -3637,7 +3483,6 @@ files = [ name = "jsonable" version = "0.3.1" description = "An abstract class that supports jsonserialization/deserialization." -category = "main" optional = true python-versions = "*" files = [ @@ -3649,7 +3494,6 @@ files = [ name = "jsonlines" version = "3.1.0" description = "Library with helpers for the jsonlines file format" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -3664,18 +3508,17 @@ attrs = ">=19.2.0" name = "jsonpointer" version = "2.4" description = "Identify specific nodes in a JSON document (RFC 6901)" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" files = [ {file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"}, + {file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"}, ] [[package]] name = "jsonschema" version = "4.17.3" description = "An implementation of JSON Schema validation for Python" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -3705,7 +3548,6 @@ format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339- name = "jupyter" version = "1.0.0" description = "Jupyter metapackage. Install all the Jupyter components in one go." -category = "dev" optional = false python-versions = "*" files = [ @@ -3726,7 +3568,6 @@ qtconsole = "*" name = "jupyter-client" version = "7.4.9" description = "Jupyter protocol implementation and client libraries" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3751,7 +3592,6 @@ test = ["codecov", "coverage", "ipykernel (>=6.12)", "ipython", "mypy", "pre-com name = "jupyter-console" version = "6.6.3" description = "Jupyter terminal console" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3763,7 +3603,7 @@ files = [ ipykernel = ">=6.14" ipython = "*" jupyter-client = ">=7.0.0" -jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" prompt-toolkit = ">=3.0.30" pygments = "*" pyzmq = ">=17" @@ -3776,7 +3616,6 @@ test = ["flaky", "pexpect", "pytest"] name = "jupyter-core" version = "5.3.1" description = "Jupyter core package. A base package on which Jupyter projects rely." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -3797,7 +3636,6 @@ test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"] name = "jupyter-events" version = "0.6.3" description = "Jupyter Event System library" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3822,7 +3660,6 @@ test = ["click", "coverage", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>= name = "jupyter-server" version = "2.6.0" description = "The backend—i.e. core services, APIs, and REST endpoints—to Jupyter web applications." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -3835,7 +3672,7 @@ anyio = ">=3.1.0" argon2-cffi = "*" jinja2 = "*" jupyter-client = ">=7.4.4" -jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" jupyter-events = ">=0.6.0" jupyter-server-terminals = "*" nbconvert = ">=6.4.4" @@ -3859,7 +3696,6 @@ test = ["ipykernel", "pre-commit", "pytest (>=7.0)", "pytest-console-scripts", " name = "jupyter-server-terminals" version = "0.4.4" description = "A Jupyter Server Extension Providing Terminals." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -3879,7 +3715,6 @@ test = ["coverage", "jupyter-server (>=2.0.0)", "pytest (>=7.0)", "pytest-cov", name = "jupyterlab-pygments" version = "0.2.2" description = "Pygments theme using JupyterLab CSS variables" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3891,7 +3726,6 @@ files = [ name = "jupyterlab-widgets" version = "3.0.7" description = "Jupyter interactive widgets for JupyterLab" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3903,7 +3737,6 @@ files = [ name = "keras" version = "2.11.0" description = "Deep learning for humans." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -3914,7 +3747,6 @@ files = [ name = "lancedb" version = "0.1.8" description = "lancedb" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -3937,7 +3769,6 @@ tests = ["doctest", "pytest", "pytest-mock"] name = "langkit" version = "0.0.6" description = "A collection of text metric udfs for whylogs profiling and monitoring in WhyLabs" -category = "main" optional = true python-versions = ">=3.8,<4.0" files = [ @@ -3957,7 +3788,6 @@ all = ["datasets (>=2.12.0,<3.0.0)", "nltk (>=3.8.1,<4.0.0)", "openai (>=0.27.6, name = "langsmith" version = "0.0.22" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." -category = "main" optional = false python-versions = ">=3.8.1,<4.0" files = [ @@ -3973,7 +3803,6 @@ requests = ">=2,<3" name = "lark" version = "1.1.5" description = "a modern parsing library" -category = "main" optional = false python-versions = "*" files = [ @@ -3990,7 +3819,6 @@ regex = ["regex"] name = "lazy-loader" version = "0.3" description = "lazy_loader" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -4006,7 +3834,6 @@ test = ["pytest (>=7.4)", "pytest-cov (>=4.1)"] name = "libclang" version = "16.0.0" description = "Clang Python Bindings, mirrored from the official LLVM repo: https://github.com/llvm/llvm-project/tree/main/clang/bindings/python, to make the installation process easier." -category = "main" optional = true python-versions = "*" files = [ @@ -4024,7 +3851,6 @@ files = [ name = "libdeeplake" version = "0.0.60" description = "C++ backend for Deep Lake" -category = "main" optional = true python-versions = "*" files = [ @@ -4057,7 +3883,6 @@ numpy = "*" name = "librosa" version = "0.10.0.post2" description = "Python module for audio and music processing" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -4082,14 +3907,13 @@ typing-extensions = ">=4.1.1" [package.extras] display = ["matplotlib (>=3.3.0)"] -docs = ["ipython (>=7.0)", "matplotlib (>=3.3.0)", "mir-eval (>=0.5)", "numba (>=0.51)", "numpydoc", "presets", "sphinx (!=1.3.1,<6)", "sphinx-gallery (>=0.7)", "sphinx-multiversion (>=0.2.3)", "sphinx-rtd-theme (>=1.0.0,<2.0.0)", "sphinxcontrib-svg2pdfconverter"] +docs = ["ipython (>=7.0)", "matplotlib (>=3.3.0)", "mir-eval (>=0.5)", "numba (>=0.51)", "numpydoc", "presets", "sphinx (!=1.3.1,<6)", "sphinx-gallery (>=0.7)", "sphinx-multiversion (>=0.2.3)", "sphinx-rtd-theme (==1.*)", "sphinxcontrib-svg2pdfconverter"] tests = ["matplotlib (>=3.3.0)", "packaging (>=20.0)", "pytest", "pytest-cov", "pytest-mpl", "resampy (>=0.2.2)", "samplerate", "types-decorator"] [[package]] name = "llvmlite" version = "0.40.1" description = "lightweight wrapper around basic LLVM functionality" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -4123,7 +3947,6 @@ files = [ name = "loguru" version = "0.7.0" description = "Python logging made (stupidly) simple" -category = "main" optional = true python-versions = ">=3.5" files = [ @@ -4142,7 +3965,6 @@ dev = ["Sphinx (==5.3.0)", "colorama (==0.4.5)", "colorama (==0.4.6)", "freezegu name = "lxml" version = "4.9.2" description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*" files = [ @@ -4235,7 +4057,6 @@ source = ["Cython (>=0.29.7)"] name = "lz4" version = "4.3.2" description = "LZ4 Bindings for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -4285,7 +4106,6 @@ tests = ["psutil", "pytest (!=3.3.0)", "pytest-cov"] name = "manifest-ml" version = "0.0.1" description = "Manifest for Prompt Programming Foundation Models." -category = "main" optional = true python-versions = ">=3.8.0" files = [ @@ -4309,7 +4129,6 @@ dev = ["autopep8 (>=1.6.0)", "black (>=22.3.0)", "docformatter (>=1.4)", "flake8 name = "markdown" version = "3.4.3" description = "Python implementation of John Gruber's Markdown." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -4324,7 +4143,6 @@ testing = ["coverage", "pyyaml"] name = "markdown-it-py" version = "2.2.0" description = "Python port of markdown-it. Markdown parsing, done right!" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -4349,7 +4167,6 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] name = "markupsafe" version = "2.1.3" description = "Safely add untrusted strings to HTML/XML markup." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -4409,7 +4226,6 @@ files = [ name = "marqo" version = "1.2.4" description = "Tensor search for humans" -category = "main" optional = true python-versions = ">=3" files = [ @@ -4428,7 +4244,6 @@ urllib3 = "*" name = "marshmallow" version = "3.19.0" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -4449,7 +4264,6 @@ tests = ["pytest", "pytz", "simplejson"] name = "marshmallow-enum" version = "1.5.1" description = "Enum field for Marshmallow" -category = "main" optional = false python-versions = "*" files = [ @@ -4464,7 +4278,6 @@ marshmallow = ">=2.0.0" name = "matplotlib-inline" version = "0.1.6" description = "Inline Matplotlib backend for Jupyter" -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -4479,7 +4292,6 @@ traitlets = "*" name = "mdurl" version = "0.1.2" description = "Markdown URL utilities" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -4491,7 +4303,6 @@ files = [ name = "mistune" version = "2.0.5" description = "A sane Markdown parser with useful plugins and renderers" -category = "dev" optional = false python-versions = "*" files = [ @@ -4503,7 +4314,6 @@ files = [ name = "mmh3" version = "3.1.0" description = "Python wrapper for MurmurHash (MurmurHash3), a set of fast and robust hash functions." -category = "main" optional = true python-versions = "*" files = [ @@ -4548,7 +4358,6 @@ files = [ name = "momento" version = "1.6.0" description = "SDK for Momento" -category = "main" optional = true python-versions = ">=3.7,<4.0" files = [ @@ -4565,7 +4374,6 @@ pyjwt = ">=2.4.0,<3.0.0" name = "momento-wire-types" version = "0.64.1" description = "Momento Client Proto Generated Files" -category = "main" optional = true python-versions = ">=3.7,<4.0" files = [ @@ -4581,7 +4389,6 @@ protobuf = ">=3,<5" name = "more-itertools" version = "9.1.0" description = "More routines for operating on iterables, beyond itertools" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -4593,7 +4400,6 @@ files = [ name = "mpmath" version = "1.3.0" description = "Python library for arbitrary-precision floating-point arithmetic" -category = "main" optional = true python-versions = "*" files = [ @@ -4611,7 +4417,6 @@ tests = ["pytest (>=4.6)"] name = "msal" version = "1.22.0" description = "The Microsoft Authentication Library (MSAL) for Python library enables your app to access the Microsoft Cloud by supporting authentication of users with Microsoft Azure Active Directory accounts (AAD) and Microsoft Accounts (MSA) using industry standard OAuth2 and OpenID Connect." -category = "main" optional = true python-versions = "*" files = [ @@ -4631,7 +4436,6 @@ broker = ["pymsalruntime (>=0.13.2,<0.14)"] name = "msal-extensions" version = "1.0.0" description = "Microsoft Authentication Library extensions (MSAL EX) provides a persistence API that can save your data on disk, encrypted on Windows, macOS and Linux. Concurrent data access will be coordinated by a file lock mechanism." -category = "main" optional = true python-versions = "*" files = [ @@ -4650,7 +4454,6 @@ portalocker = [ name = "msgpack" version = "1.0.5" description = "MessagePack serializer" -category = "main" optional = true python-versions = "*" files = [ @@ -4723,7 +4526,6 @@ files = [ name = "msrest" version = "0.7.1" description = "AutoRest swagger generator Python client runtime." -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -4745,7 +4547,6 @@ async = ["aiodns", "aiohttp (>=3.0)"] name = "multidict" version = "6.0.4" description = "multidict implementation" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -4829,7 +4630,6 @@ files = [ name = "multiprocess" version = "0.70.14" description = "better multiprocessing and multithreading in python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -4856,7 +4656,6 @@ dill = ">=0.3.6" name = "mwcli" version = "0.0.3" description = "Utilities for processing MediaWiki on the command line." -category = "main" optional = true python-versions = "*" files = [ @@ -4873,7 +4672,6 @@ para = "*" name = "mwparserfromhell" version = "0.6.4" description = "MWParserFromHell is a parser for MediaWiki wikicode." -category = "main" optional = true python-versions = ">= 3.6" files = [ @@ -4911,7 +4709,6 @@ files = [ name = "mwtypes" version = "0.3.2" description = "A set of types for processing MediaWiki data." -category = "main" optional = true python-versions = "*" files = [ @@ -4926,7 +4723,6 @@ jsonable = ">=0.3.0" name = "mwxml" version = "0.3.3" description = "A set of utilities for processing MediaWiki XML dump data." -category = "main" optional = true python-versions = "*" files = [ @@ -4944,7 +4740,6 @@ para = ">=0.0.1" name = "mypy" version = "0.991" description = "Optional static typing for Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -4995,7 +4790,6 @@ reports = ["lxml"] name = "mypy-extensions" version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -5007,7 +4801,6 @@ files = [ name = "mypy-protobuf" version = "3.3.0" description = "Generate mypy stub files from protobuf specs" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -5023,7 +4816,6 @@ types-protobuf = ">=3.19.12" name = "nbclassic" version = "1.0.0" description = "Jupyter Notebook as a Jupyter Server extension." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -5059,7 +4851,6 @@ test = ["coverage", "nbval", "pytest", "pytest-cov", "pytest-jupyter", "pytest-p name = "nbclient" version = "0.7.4" description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor." -category = "dev" optional = false python-versions = ">=3.7.0" files = [ @@ -5069,7 +4860,7 @@ files = [ [package.dependencies] jupyter-client = ">=6.1.12" -jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" nbformat = ">=5.1" traitlets = ">=5.3" @@ -5082,7 +4873,6 @@ test = ["flaky", "ipykernel", "ipython", "ipywidgets", "nbconvert (>=7.0.0)", "p name = "nbconvert" version = "7.5.0" description = "Converting Jupyter Notebooks" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -5121,7 +4911,6 @@ webpdf = ["pyppeteer (>=1,<1.1)"] name = "nbformat" version = "5.9.0" description = "The Jupyter Notebook format" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -5143,7 +4932,6 @@ test = ["pep440", "pre-commit", "pytest", "testpath"] name = "nebula3-python" version = "3.4.0" description = "Python client for NebulaGraph V3.4" -category = "main" optional = true python-versions = "*" files = [ @@ -5161,7 +4949,6 @@ six = ">=1.16.0" name = "neo4j" version = "5.9.0" description = "Neo4j Bolt driver for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -5179,7 +4966,6 @@ pandas = ["numpy (>=1.7.0,<2.0.0)", "pandas (>=1.1.0,<3.0.0)"] name = "nest-asyncio" version = "1.5.6" description = "Patch asyncio to allow nested event loops" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -5191,7 +4977,6 @@ files = [ name = "networkx" version = "2.8.8" description = "Python package for creating and manipulating graphs and networks" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -5210,7 +4995,6 @@ test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] name = "newspaper3k" version = "0.2.8" description = "Simplified python article discovery & extraction." -category = "main" optional = true python-versions = "*" files = [ @@ -5237,7 +5021,6 @@ tldextract = ">=2.0.1" name = "nlpcloud" version = "1.0.42" description = "Python client for the NLP Cloud API" -category = "main" optional = true python-versions = "*" files = [ @@ -5252,7 +5035,6 @@ requests = "*" name = "nltk" version = "3.8.1" description = "Natural Language Toolkit" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -5278,7 +5060,6 @@ twitter = ["twython"] name = "nomic" version = "1.1.14" description = "The offical Nomic python client." -category = "main" optional = true python-versions = "*" files = [ @@ -5306,7 +5087,6 @@ gpt4all = ["peft (==0.3.0.dev0)", "sentencepiece", "torch", "transformers (==4.2 name = "notebook" version = "6.5.4" description = "A web-based notebook environment for interactive computing" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -5341,7 +5121,6 @@ test = ["coverage", "nbval", "pytest", "pytest-cov", "requests", "requests-unixs name = "notebook-shim" version = "0.2.3" description = "A shim layer for notebook traits and config" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -5359,7 +5138,6 @@ test = ["pytest", "pytest-console-scripts", "pytest-jupyter", "pytest-tornasync" name = "numba" version = "0.57.1" description = "compiling Python code using LLVM" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -5391,14 +5169,13 @@ files = [ [package.dependencies] importlib-metadata = {version = "*", markers = "python_version < \"3.9\""} -llvmlite = ">=0.40.0dev0,<0.41" +llvmlite = "==0.40.*" numpy = ">=1.21,<1.25" [[package]] name = "numcodecs" version = "0.11.0" description = "A Python package providing buffer compression and transformation codecs for use" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -5431,7 +5208,6 @@ zfpy = ["zfpy (>=1.0.0)"] name = "numexpr" version = "2.8.4" description = "Fast numerical expression evaluator for NumPy" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -5474,7 +5250,6 @@ numpy = ">=1.13.3" name = "numpy" version = "1.24.3" description = "Fundamental package for array computing in Python" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -5512,7 +5287,6 @@ files = [ name = "nvidia-cublas-cu11" version = "11.10.3.66" description = "CUBLAS native runtime libraries" -category = "main" optional = true python-versions = ">=3" files = [ @@ -5528,7 +5302,6 @@ wheel = "*" name = "nvidia-cuda-nvrtc-cu11" version = "11.7.99" description = "NVRTC native runtime libraries" -category = "main" optional = true python-versions = ">=3" files = [ @@ -5545,7 +5318,6 @@ wheel = "*" name = "nvidia-cuda-runtime-cu11" version = "11.7.99" description = "CUDA Runtime native Libraries" -category = "main" optional = true python-versions = ">=3" files = [ @@ -5561,7 +5333,6 @@ wheel = "*" name = "nvidia-cudnn-cu11" version = "8.5.0.96" description = "cuDNN runtime libraries" -category = "main" optional = true python-versions = ">=3" files = [ @@ -5577,7 +5348,6 @@ wheel = "*" name = "o365" version = "2.0.27" description = "Microsoft Graph and Office 365 API made easy" -category = "main" optional = true python-versions = ">=3.4" files = [ @@ -5598,7 +5368,6 @@ tzlocal = ">=4.0,<5.0" name = "oauthlib" version = "3.2.2" description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -5615,7 +5384,6 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] name = "openai" version = "0.27.8" description = "Python client library for the OpenAI API" -category = "main" optional = false python-versions = ">=3.7.1" files = [ @@ -5630,7 +5398,7 @@ tqdm = "*" [package.extras] datalib = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] -dev = ["black (>=21.6b0,<22.0)", "pytest (>=6.0.0,<7.0.0)", "pytest-asyncio", "pytest-mock"] +dev = ["black (>=21.6b0,<22.0)", "pytest (==6.*)", "pytest-asyncio", "pytest-mock"] embeddings = ["matplotlib", "numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "plotly", "scikit-learn (>=1.0.2)", "scipy", "tenacity (>=8.0.1)"] wandb = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "wandb"] @@ -5638,7 +5406,6 @@ wandb = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1 name = "openapi-schema-pydantic" version = "1.2.4" description = "OpenAPI (v3) specification schema as pydantic class" -category = "main" optional = true python-versions = ">=3.6.1" files = [ @@ -5653,7 +5420,6 @@ pydantic = ">=1.8.2" name = "openlm" version = "0.0.5" description = "Drop-in OpenAI-compatible that can call LLMs from other providers" -category = "main" optional = true python-versions = ">=3.8.1,<4.0" files = [ @@ -5668,7 +5434,6 @@ requests = ">=2,<3" name = "opensearch-py" version = "2.2.0" description = "Python client for OpenSearch" -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4" files = [ @@ -5693,7 +5458,6 @@ kerberos = ["requests-kerberos"] name = "opt-einsum" version = "3.3.0" description = "Optimizing numpys einsum function" -category = "main" optional = true python-versions = ">=3.5" files = [ @@ -5712,7 +5476,6 @@ tests = ["pytest", "pytest-cov", "pytest-pep8"] name = "orjson" version = "3.9.1" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -5768,7 +5531,6 @@ files = [ name = "overrides" version = "7.3.1" description = "A decorator to automatically detect mismatch when overriding a method." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -5780,7 +5542,6 @@ files = [ name = "packaging" version = "23.1" description = "Core utilities for Python packages" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -5792,7 +5553,6 @@ files = [ name = "pandas" version = "2.0.2" description = "Powerful data structures for data analysis, time series, and statistics" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -5826,8 +5586,8 @@ files = [ [package.dependencies] numpy = [ {version = ">=1.20.3", markers = "python_version < \"3.10\""}, - {version = ">=1.21.0", markers = "python_version >= \"3.10\""}, {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, + {version = ">=1.21.0", markers = "python_version >= \"3.10\" and python_version < \"3.11\""}, ] python-dateutil = ">=2.8.2" pytz = ">=2020.1" @@ -5860,7 +5620,6 @@ xml = ["lxml (>=4.6.3)"] name = "pandocfilters" version = "1.5.0" description = "Utilities for writing pandoc filters in python" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -5872,7 +5631,6 @@ files = [ name = "para" version = "0.0.8" description = "a set utilities that ake advantage of python's 'multiprocessing' module to distribute CPU-intensive tasks" -category = "main" optional = true python-versions = "*" files = [ @@ -5884,7 +5642,6 @@ files = [ name = "parso" version = "0.8.3" description = "A Python Parser" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -5900,7 +5657,6 @@ testing = ["docopt", "pytest (<6.0.0)"] name = "pathos" version = "0.3.0" description = "parallel graph management and execution in heterogeneous computing" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -5918,7 +5674,6 @@ ppft = ">=1.7.6.6" name = "pathspec" version = "0.11.1" description = "Utility library for gitignore style pattern matching of file paths." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -5930,7 +5685,6 @@ files = [ name = "pdfminer-six" version = "20221105" description = "PDF parser and analyzer" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -5951,7 +5705,6 @@ image = ["Pillow"] name = "pexpect" version = "4.8.0" description = "Pexpect allows easy control of interactive console applications." -category = "main" optional = false python-versions = "*" files = [ @@ -5966,7 +5719,6 @@ ptyprocess = ">=0.5" name = "pgvector" version = "0.1.8" description = "pgvector support for Python" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -5980,7 +5732,6 @@ numpy = "*" name = "pickleshare" version = "0.7.5" description = "Tiny 'shelve'-like database with concurrency support" -category = "dev" optional = false python-versions = "*" files = [ @@ -5992,7 +5743,6 @@ files = [ name = "pillow" version = "9.5.0" description = "Python Imaging Library (Fork)" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -6072,7 +5822,6 @@ tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "pa name = "pinecone-client" version = "2.2.2" description = "Pinecone client and SDK" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -6098,7 +5847,6 @@ grpc = ["googleapis-common-protos (>=1.53.0)", "grpc-gateway-protoc-gen-openapiv name = "pinecone-text" version = "0.4.2" description = "Text utilities library by Pinecone.io" -category = "main" optional = true python-versions = ">=3.8,<4.0" files = [ @@ -6118,7 +5866,6 @@ wget = ">=3.2,<4.0" name = "pkgutil-resolve-name" version = "1.3.10" description = "Resolve a name to an object." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -6130,7 +5877,6 @@ files = [ name = "platformdirs" version = "3.6.0" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -6146,7 +5892,6 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.3.1)", "pytest- name = "playwright" version = "1.35.0" description = "A high-level API to automate web browsers" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -6168,7 +5913,6 @@ typing-extensions = {version = "*", markers = "python_version <= \"3.8\""} name = "pluggy" version = "1.0.0" description = "plugin and hook calling mechanisms for python" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -6184,7 +5928,6 @@ testing = ["pytest", "pytest-benchmark"] name = "pooch" version = "1.6.0" description = "\"Pooch manages your Python library's sample data files: it automatically downloads and stores them in a local directory, with support for versioning and corruption checks.\"" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -6206,7 +5949,6 @@ xxhash = ["xxhash (>=1.4.3)"] name = "portalocker" version = "2.7.0" description = "Wraps the portalocker recipe for easy usage" -category = "main" optional = true python-versions = ">=3.5" files = [ @@ -6226,7 +5968,6 @@ tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "p name = "pox" version = "0.3.2" description = "utilities for filesystem exploration and automated builds" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -6238,7 +5979,6 @@ files = [ name = "ppft" version = "1.7.6.6" description = "distributed and parallel python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -6253,7 +5993,6 @@ dill = ["dill (>=0.3.6)"] name = "prometheus-client" version = "0.17.0" description = "Python client for the Prometheus monitoring system." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -6268,7 +6007,6 @@ twisted = ["twisted"] name = "prompt-toolkit" version = "3.0.38" description = "Library for building powerful interactive command lines in Python" -category = "dev" optional = false python-versions = ">=3.7.0" files = [ @@ -6283,7 +6021,6 @@ wcwidth = "*" name = "protobuf" version = "3.19.6" description = "Protocol Buffers" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -6318,7 +6055,6 @@ files = [ name = "psutil" version = "5.9.5" description = "Cross-platform lib for process and system monitoring in Python." -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -6345,7 +6081,6 @@ test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] name = "psychicapi" version = "0.8.0" description = "Psychic.dev is an open-source data integration platform for LLMs. This is the Python client for Psychic" -category = "main" optional = true python-versions = "*" files = [ @@ -6360,7 +6095,6 @@ requests = "*" name = "psycopg2-binary" version = "2.9.6" description = "psycopg2 - Python-PostgreSQL Database Adapter" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -6432,7 +6166,6 @@ files = [ name = "ptyprocess" version = "0.7.0" description = "Run a subprocess in a pseudo terminal" -category = "main" optional = false python-versions = "*" files = [ @@ -6444,7 +6177,6 @@ files = [ name = "pure-eval" version = "0.2.2" description = "Safely evaluate AST nodes without side effects" -category = "dev" optional = false python-versions = "*" files = [ @@ -6459,7 +6191,6 @@ tests = ["pytest"] name = "py" version = "1.11.0" description = "library with cross-python path, ini-parsing, io, code, log facilities" -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -6471,7 +6202,6 @@ files = [ name = "py-trello" version = "0.19.0" description = "Python wrapper around the Trello API" -category = "main" optional = true python-versions = "*" files = [ @@ -6488,7 +6218,6 @@ requests-oauthlib = ">=0.4.1" name = "py4j" version = "0.10.9.7" description = "Enables Python programs to dynamically access arbitrary Java objects" -category = "main" optional = true python-versions = "*" files = [ @@ -6500,7 +6229,6 @@ files = [ name = "pyaes" version = "1.6.1" description = "Pure-Python Implementation of the AES block-cipher and common modes of operation" -category = "main" optional = true python-versions = "*" files = [ @@ -6511,7 +6239,6 @@ files = [ name = "pyarrow" version = "12.0.1" description = "Python library for Apache Arrow" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -6549,7 +6276,6 @@ numpy = ">=1.16.6" name = "pyasn1" version = "0.5.0" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" -category = "main" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ @@ -6561,7 +6287,6 @@ files = [ name = "pyasn1-modules" version = "0.3.0" description = "A collection of ASN.1-based protocols modules" -category = "main" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ @@ -6576,7 +6301,6 @@ pyasn1 = ">=0.4.6,<0.6.0" name = "pycares" version = "4.3.0" description = "Python interface for c-ares" -category = "main" optional = true python-versions = "*" files = [ @@ -6644,7 +6368,6 @@ idna = ["idna (>=2.1)"] name = "pycparser" version = "2.21" description = "C parser in Python" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -6656,7 +6379,6 @@ files = [ name = "pydantic" version = "1.10.12" description = "Data validation and settings management using python type hints" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -6709,7 +6431,6 @@ email = ["email-validator (>=1.0.3)"] name = "pydeck" version = "0.8.0" description = "Widget for deck.gl maps" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -6729,7 +6450,6 @@ jupyter = ["ipykernel (>=5.1.2)", "ipython (>=5.8.0)", "ipywidgets (>=7,<8)", "t name = "pyee" version = "9.0.4" description = "A port of node.js's EventEmitter to python." -category = "dev" optional = false python-versions = "*" files = [ @@ -6744,7 +6464,6 @@ typing-extensions = "*" name = "pygments" version = "2.15.1" description = "Pygments is a syntax highlighting package written in Python." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -6759,7 +6478,6 @@ plugins = ["importlib-metadata"] name = "pyjwt" version = "2.7.0" description = "JSON Web Token implementation in Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -6780,7 +6498,6 @@ tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] name = "pylance" version = "0.4.21" description = "python wrapper for lance-rs" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -6802,7 +6519,6 @@ tests = ["duckdb", "polars[pandas,pyarrow]", "pytest"] name = "pymongo" version = "4.3.3" description = "Python driver for MongoDB " -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -6897,7 +6613,6 @@ zstd = ["zstandard"] name = "pympler" version = "1.0.1" description = "A development tool to measure, monitor and analyze the memory behavior of Python objects." -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -6909,7 +6624,6 @@ files = [ name = "pymupdf" version = "1.22.3" description = "Python bindings for the PDF toolkit and renderer MuPDF" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -6949,7 +6663,6 @@ files = [ name = "pyowm" version = "3.3.0" description = "A Python wrapper around OpenWeatherMap web APIs" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -6969,7 +6682,6 @@ requests = [ name = "pyparsing" version = "3.0.9" description = "pyparsing module - Classes and methods to define and execute parsing grammars" -category = "main" optional = true python-versions = ">=3.6.8" files = [ @@ -6984,7 +6696,6 @@ diagrams = ["jinja2", "railroad-diagrams"] name = "pypdf" version = "3.9.1" description = "A pure-python PDF library capable of splitting, merging, cropping, and transforming PDF files" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -7006,7 +6717,6 @@ image = ["Pillow"] name = "pypdfium2" version = "4.15.0" description = "Python bindings to PDFium" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -7028,7 +6738,6 @@ files = [ name = "pyphen" version = "0.14.0" description = "Pure Python module to hyphenate text" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -7044,7 +6753,6 @@ test = ["flake8", "isort", "pytest"] name = "pyproj" version = "3.5.0" description = "Python interface to PROJ (cartographic projections and coordinate transformations library)" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -7092,7 +6800,6 @@ certifi = "*" name = "pyproject-hooks" version = "1.0.0" description = "Wrappers to call pyproject.toml-based build backend hooks." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -7107,7 +6814,6 @@ tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} name = "pyrsistent" version = "0.19.3" description = "Persistent/Functional/Immutable data structures" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -7144,7 +6850,6 @@ files = [ name = "pysocks" version = "1.7.1" description = "A Python SOCKS client module. See https://github.com/Anorov/PySocks for more information." -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -7157,7 +6862,6 @@ files = [ name = "pyspark" version = "3.4.0" description = "Apache Spark Python API" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -7178,7 +6882,6 @@ sql = ["numpy (>=1.15)", "pandas (>=1.0.5)", "pyarrow (>=1.0.0)"] name = "pytesseract" version = "0.3.10" description = "Python-tesseract is a python wrapper for Google's Tesseract-OCR" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -7194,7 +6897,6 @@ Pillow = ">=8.0.0" name = "pytest" version = "7.3.2" description = "pytest: simple powerful testing with Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -7217,7 +6919,6 @@ testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "no name = "pytest-asyncio" version = "0.20.3" description = "Pytest support for asyncio" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -7236,7 +6937,6 @@ testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy name = "pytest-cov" version = "4.1.0" description = "Pytest plugin for measuring coverage." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -7255,7 +6955,6 @@ testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtuale name = "pytest-dotenv" version = "0.5.2" description = "A py.test plugin that parses environment files before running tests" -category = "dev" optional = false python-versions = "*" files = [ @@ -7271,7 +6970,6 @@ python-dotenv = ">=0.9.1" name = "pytest-mock" version = "3.11.1" description = "Thin-wrapper around the mock package for easier use with pytest" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -7289,7 +6987,6 @@ dev = ["pre-commit", "pytest-asyncio", "tox"] name = "pytest-socket" version = "0.6.0" description = "Pytest Plugin to disable socket calls during tests" -category = "dev" optional = false python-versions = ">=3.7,<4.0" files = [ @@ -7304,7 +7001,6 @@ pytest = ">=3.6.3" name = "pytest-vcr" version = "1.0.2" description = "Plugin for managing VCR.py cassettes" -category = "dev" optional = false python-versions = "*" files = [ @@ -7320,7 +7016,6 @@ vcrpy = "*" name = "pytest-watcher" version = "0.2.6" description = "Continiously runs pytest on changes in *.py files" -category = "dev" optional = false python-versions = ">=3.7.0,<4.0.0" files = [ @@ -7335,7 +7030,6 @@ watchdog = ">=2.0.0" name = "python-arango" version = "7.5.9" description = "Python Driver for ArangoDB" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -7359,7 +7053,6 @@ dev = ["black (>=22.3.0)", "flake8 (>=4.0.1)", "isort (>=5.10.1)", "mock", "mypy name = "python-dateutil" version = "2.8.2" description = "Extensions to the standard Python datetime module" -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ @@ -7374,7 +7067,6 @@ six = ">=1.5" name = "python-dotenv" version = "1.0.0" description = "Read key-value pairs from a .env file and set them as environment variables" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -7389,7 +7081,6 @@ cli = ["click (>=5.0)"] name = "python-json-logger" version = "2.0.7" description = "A python library adding a json log formatter" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -7401,7 +7092,6 @@ files = [ name = "pytz" version = "2023.3" description = "World timezone definitions, modern and historical" -category = "main" optional = false python-versions = "*" files = [ @@ -7413,7 +7103,6 @@ files = [ name = "pytz-deprecation-shim" version = "0.1.0.post0" description = "Shims to make deprecation of pytz easier" -category = "main" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ @@ -7429,7 +7118,6 @@ tzdata = {version = "*", markers = "python_version >= \"3.6\""} name = "pyvespa" version = "0.33.0" description = "Python API for vespa.ai" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -7454,7 +7142,6 @@ ml = ["keras-tuner", "tensorflow", "tensorflow-ranking", "torch (<1.13)", "trans name = "pywin32" version = "306" description = "Python for Window Extensions" -category = "main" optional = false python-versions = "*" files = [ @@ -7478,7 +7165,6 @@ files = [ name = "pywinpty" version = "2.0.10" description = "Pseudo terminal support for Windows from Python." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -7494,7 +7180,6 @@ files = [ name = "pyyaml" version = "6.0" description = "YAML parser and emitter for Python" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -7544,7 +7229,6 @@ files = [ name = "pyzmq" version = "25.1.0" description = "Python bindings for 0MQ" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -7634,7 +7318,6 @@ cffi = {version = "*", markers = "implementation_name == \"pypy\""} name = "qdrant-client" version = "1.4.0" description = "Client library for the Qdrant vector search engine" -category = "main" optional = true python-versions = ">=3.7,<3.12" files = [ @@ -7655,7 +7338,6 @@ urllib3 = ">=1.26.14,<2.0.0" name = "qtconsole" version = "5.4.3" description = "Jupyter Qt console" -category = "dev" optional = false python-versions = ">= 3.7" files = [ @@ -7682,7 +7364,6 @@ test = ["flaky", "pytest", "pytest-qt"] name = "qtpy" version = "2.3.1" description = "Provides an abstraction layer on top of the various Qt bindings (PyQt5/6 and PySide2/6)." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -7700,7 +7381,6 @@ test = ["pytest (>=6,!=7.0.0,!=7.0.1)", "pytest-cov (>=3.0.0)", "pytest-qt"] name = "rank-bm25" version = "0.2.2" description = "Various BM25 algorithms for document ranking" -category = "main" optional = true python-versions = "*" files = [ @@ -7718,7 +7398,6 @@ dev = ["pytest"] name = "rapidfuzz" version = "3.1.1" description = "rapid fuzzy string matching" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -7823,7 +7502,6 @@ full = ["numpy"] name = "ratelimiter" version = "1.2.0.post0" description = "Simple python rate limiting object" -category = "main" optional = true python-versions = "*" files = [ @@ -7838,7 +7516,6 @@ test = ["pytest (>=3.0)", "pytest-asyncio"] name = "rdflib" version = "6.3.2" description = "RDFLib is a Python library for working with RDF, a simple yet powerful language for representing information." -category = "main" optional = true python-versions = ">=3.7,<4.0" files = [ @@ -7860,7 +7537,6 @@ networkx = ["networkx (>=2.0.0,<3.0.0)"] name = "redis" version = "4.5.5" description = "Python client for Redis database and key-value store" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -7879,7 +7555,6 @@ ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==20.0.1)", "requests (>=2.26.0)" name = "regex" version = "2023.6.3" description = "Alternative regular expression module, to replace re." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -7977,7 +7652,6 @@ files = [ name = "requests" version = "2.28.2" description = "Python HTTP for Humans." -category = "main" optional = false python-versions = ">=3.7, <4" files = [ @@ -8000,7 +7674,6 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] name = "requests-file" version = "1.5.1" description = "File transport adapter for Requests" -category = "main" optional = true python-versions = "*" files = [ @@ -8016,7 +7689,6 @@ six = "*" name = "requests-oauthlib" version = "1.3.1" description = "OAuthlib authentication support for Requests." -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -8035,7 +7707,6 @@ rsa = ["oauthlib[signedtoken] (>=3.0.0)"] name = "requests-toolbelt" version = "1.0.0" description = "A utility belt for advanced users of python-requests" -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -8050,7 +7721,6 @@ requests = ">=2.0.1,<3.0.0" name = "responses" version = "0.22.0" description = "A utility library for mocking out the `requests` Python library." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -8071,7 +7741,6 @@ tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asy name = "retry" version = "0.9.2" description = "Easy to use retry decorator." -category = "main" optional = true python-versions = "*" files = [ @@ -8087,7 +7756,6 @@ py = ">=1.4.26,<2.0.0" name = "rfc3339-validator" version = "0.1.4" description = "A pure python RFC3339 validator" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -8102,7 +7770,6 @@ six = "*" name = "rfc3986-validator" version = "0.1.1" description = "Pure python rfc3986 validator" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -8114,7 +7781,6 @@ files = [ name = "rich" version = "13.4.2" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" -category = "main" optional = true python-versions = ">=3.7.0" files = [ @@ -8134,7 +7800,6 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] name = "rsa" version = "4.9" description = "Pure-Python RSA implementation" -category = "main" optional = true python-versions = ">=3.6,<4" files = [ @@ -8149,7 +7814,6 @@ pyasn1 = ">=0.1.3" name = "ruff" version = "0.0.249" description = "An extremely fast Python linter, written in Rust." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -8176,7 +7840,6 @@ files = [ name = "s3transfer" version = "0.6.1" description = "An Amazon S3 Transfer Manager" -category = "main" optional = true python-versions = ">= 3.7" files = [ @@ -8194,7 +7857,6 @@ crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"] name = "safetensors" version = "0.3.1" description = "Fast and Safe Tensor serialization" -category = "main" optional = true python-versions = "*" files = [ @@ -8255,7 +7917,6 @@ torch = ["torch (>=1.10)"] name = "scikit-learn" version = "1.2.2" description = "A set of python modules for machine learning and data mining" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -8298,7 +7959,6 @@ tests = ["black (>=22.3.0)", "flake8 (>=3.8.2)", "matplotlib (>=3.1.3)", "mypy ( name = "scipy" version = "1.9.3" description = "Fundamental algorithms for scientific computing in Python" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -8337,7 +7997,6 @@ test = ["asv", "gmpy2", "mpmath", "pytest", "pytest-cov", "pytest-xdist", "sciki name = "send2trash" version = "1.8.2" description = "Send file to trash natively under Mac OS X, Windows and Linux" -category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" files = [ @@ -8354,7 +8013,6 @@ win32 = ["pywin32"] name = "sentence-transformers" version = "2.2.2" description = "Multilingual text embeddings" -category = "main" optional = true python-versions = ">=3.6.0" files = [ @@ -8377,7 +8035,6 @@ transformers = ">=4.6.0,<5.0.0" name = "sentencepiece" version = "0.1.99" description = "SentencePiece python wrapper" -category = "main" optional = true python-versions = "*" files = [ @@ -8432,7 +8089,6 @@ files = [ name = "setuptools" version = "67.8.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -8449,7 +8105,6 @@ testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs ( name = "sgmllib3k" version = "1.0.0" description = "Py3k port of sgmllib." -category = "main" optional = true python-versions = "*" files = [ @@ -8460,7 +8115,6 @@ files = [ name = "shapely" version = "2.0.1" description = "Manipulation and analysis of geometric objects" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -8508,14 +8162,13 @@ files = [ numpy = ">=1.14" [package.extras] -docs = ["matplotlib", "numpydoc (>=1.1.0,<1.2.0)", "sphinx", "sphinx-book-theme", "sphinx-remove-toctrees"] +docs = ["matplotlib", "numpydoc (==1.1.*)", "sphinx", "sphinx-book-theme", "sphinx-remove-toctrees"] test = ["pytest", "pytest-cov"] [[package]] name = "singlestoredb" version = "0.7.1" description = "Interface to the SingleStore database and cluster management APIs" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -8548,7 +8201,6 @@ sqlalchemy = ["sqlalchemy-singlestoredb"] name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -8560,7 +8212,6 @@ files = [ name = "smmap" version = "5.0.0" description = "A pure Python implementation of a sliding window memory map manager" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -8572,7 +8223,6 @@ files = [ name = "sniffio" version = "1.3.0" description = "Sniff out which async library your code is running under" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -8584,7 +8234,6 @@ files = [ name = "socksio" version = "1.0.0" description = "Sans-I/O implementation of SOCKS4, SOCKS4A, and SOCKS5." -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -8596,7 +8245,6 @@ files = [ name = "soundfile" version = "0.12.1" description = "An audio library based on libsndfile, CFFI and NumPy" -category = "main" optional = true python-versions = "*" files = [ @@ -8620,7 +8268,6 @@ numpy = ["numpy"] name = "soupsieve" version = "2.4.1" description = "A modern CSS selector implementation for Beautiful Soup." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -8632,7 +8279,6 @@ files = [ name = "soxr" version = "0.3.5" description = "High quality, one-dimensional sample-rate conversion library" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -8677,7 +8323,6 @@ test = ["pytest"] name = "sqlalchemy" version = "2.0.16" description = "Database Abstraction Library" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -8756,7 +8401,6 @@ sqlcipher = ["sqlcipher3-binary"] name = "sqlitedict" version = "2.1.0" description = "Persistent dict in Python, backed up by sqlite3 and pickle, multithread-safe." -category = "main" optional = true python-versions = "*" files = [ @@ -8767,7 +8411,6 @@ files = [ name = "sqlparams" version = "5.1.0" description = "Convert between various DB API 2.0 parameter styles." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -8779,7 +8422,6 @@ files = [ name = "stack-data" version = "0.6.2" description = "Extract data from python stack frames and tracebacks for informative displays" -category = "dev" optional = false python-versions = "*" files = [ @@ -8799,7 +8441,6 @@ tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] name = "streamlit" version = "1.22.0" description = "A faster way to build and share data apps" -category = "main" optional = true python-versions = ">=3.7, !=3.9.7" files = [ @@ -8840,7 +8481,6 @@ snowflake = ["snowflake-snowpark-python"] name = "stringcase" version = "1.2.0" description = "String case converter." -category = "main" optional = true python-versions = "*" files = [ @@ -8851,7 +8491,6 @@ files = [ name = "sympy" version = "1.12" description = "Computer algebra system (CAS) in Python" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -8866,7 +8505,6 @@ mpmath = ">=0.19" name = "syrupy" version = "4.0.2" description = "Pytest Snapshot Test Utility" -category = "dev" optional = false python-versions = ">=3.8.1,<4" files = [ @@ -8882,7 +8520,6 @@ pytest = ">=7.0.0,<8.0.0" name = "telethon" version = "1.28.5" description = "Full-featured Telegram client library for Python 3" -category = "main" optional = true python-versions = ">=3.5" files = [ @@ -8901,7 +8538,6 @@ cryptg = ["cryptg"] name = "tenacity" version = "8.2.2" description = "Retry code until it succeeds" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -8916,7 +8552,6 @@ doc = ["reno", "sphinx", "tornado (>=4.5)"] name = "tensorboard" version = "2.11.2" description = "TensorBoard lets you watch Tensors Flow" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -8942,7 +8577,6 @@ wheel = ">=0.26" name = "tensorboard-data-server" version = "0.6.1" description = "Fast data loading for TensorBoard" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -8955,7 +8589,6 @@ files = [ name = "tensorboard-plugin-wit" version = "1.8.1" description = "What-If Tool TensorBoard plugin." -category = "main" optional = true python-versions = "*" files = [ @@ -8966,7 +8599,6 @@ files = [ name = "tensorflow" version = "2.11.1" description = "TensorFlow is an open source machine learning framework for everyone." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -9011,7 +8643,6 @@ wrapt = ">=1.11.0" name = "tensorflow-estimator" version = "2.11.0" description = "TensorFlow Estimator." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -9022,7 +8653,6 @@ files = [ name = "tensorflow-hub" version = "0.13.0" description = "TensorFlow Hub is a library to foster the publication, discovery, and consumption of reusable parts of machine learning models." -category = "main" optional = true python-versions = "*" files = [ @@ -9041,7 +8671,6 @@ make-nearest-neighbour-index = ["annoy", "apache-beam"] name = "tensorflow-io-gcs-filesystem" version = "0.32.0" description = "TensorFlow IO" -category = "main" optional = true python-versions = ">=3.7, <3.12" files = [ @@ -9072,7 +8701,6 @@ tensorflow-rocm = ["tensorflow-rocm (>=2.12.0,<2.13.0)"] name = "tensorflow-macos" version = "2.11.0" description = "TensorFlow is an open source machine learning framework for everyone." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -9110,7 +8738,6 @@ wrapt = ">=1.11.0" name = "tensorflow-text" version = "2.11.0" description = "TF.Text is a TensorFlow library of text related ops, modules, and subgraphs." -category = "main" optional = true python-versions = "*" files = [ @@ -9137,7 +8764,6 @@ tests = ["absl-py", "pytest", "tensorflow-datasets (>=3.2.0)"] name = "termcolor" version = "2.3.0" description = "ANSI color formatting for output in terminal" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -9152,7 +8778,6 @@ tests = ["pytest", "pytest-cov"] name = "terminado" version = "0.17.1" description = "Tornado websocket backend for the Xterm.js Javascript terminal emulator library." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -9173,7 +8798,6 @@ test = ["pre-commit", "pytest (>=7.0)", "pytest-timeout"] name = "textstat" version = "0.7.3" description = "Calculate statistical features from text" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -9188,7 +8812,6 @@ pyphen = "*" name = "threadpoolctl" version = "3.1.0" description = "threadpoolctl" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -9200,7 +8823,6 @@ files = [ name = "tigrisdb" version = "1.0.0b6" description = "Python SDK for Tigris " -category = "main" optional = true python-versions = ">=3.8,<4.0" files = [ @@ -9216,7 +8838,6 @@ protobuf = ">=3.19.6" name = "tiktoken" version = "0.3.3" description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -9262,7 +8883,6 @@ blobfile = ["blobfile (>=2)"] name = "tinycss2" version = "1.2.1" description = "A tiny CSS parser" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -9281,7 +8901,6 @@ test = ["flake8", "isort", "pytest"] name = "tinysegmenter" version = "0.3" description = "Very compact Japanese tokenizer" -category = "main" optional = true python-versions = "*" files = [ @@ -9292,7 +8911,6 @@ files = [ name = "tldextract" version = "3.4.4" description = "Accurately separates a URL's subdomain, domain, and public suffix, using the Public Suffix List (PSL). By default, this includes the public ICANN TLDs and their exceptions. You can optionally support the Public Suffix List's private domains as well." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -9310,7 +8928,6 @@ requests-file = ">=1.4" name = "tokenizers" version = "0.13.3" description = "Fast and Customizable Tokenizers" -category = "main" optional = true python-versions = "*" files = [ @@ -9365,7 +8982,6 @@ testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] name = "toml" version = "0.10.2" description = "Python Library for Tom's Obvious, Minimal Language" -category = "main" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -9377,7 +8993,6 @@ files = [ name = "tomli" version = "2.0.1" description = "A lil' TOML parser" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -9389,7 +9004,6 @@ files = [ name = "toolz" version = "0.12.0" description = "List processing tools and functional utilities" -category = "main" optional = true python-versions = ">=3.5" files = [ @@ -9401,7 +9015,6 @@ files = [ name = "torch" version = "1.13.1" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" -category = "main" optional = true python-versions = ">=3.7.0" files = [ @@ -9442,7 +9055,6 @@ opt-einsum = ["opt-einsum (>=3.3)"] name = "torchvision" version = "0.14.1" description = "image and video datasets and models for torch deep learning" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -9469,7 +9081,7 @@ files = [ [package.dependencies] numpy = "*" -pillow = ">=5.3.0,<8.3.0 || >=8.4.0" +pillow = ">=5.3.0,<8.3.dev0 || >=8.4.dev0" requests = "*" torch = "1.13.1" typing-extensions = "*" @@ -9481,7 +9093,6 @@ scipy = ["scipy"] name = "tornado" version = "6.3.2" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." -category = "main" optional = false python-versions = ">= 3.8" files = [ @@ -9502,7 +9113,6 @@ files = [ name = "tqdm" version = "4.65.0" description = "Fast, Extensible Progress Meter" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -9523,7 +9133,6 @@ telegram = ["requests"] name = "traitlets" version = "5.9.0" description = "Traitlets Python configuration system" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -9539,7 +9148,6 @@ test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"] name = "transformers" version = "4.30.2" description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" -category = "main" optional = true python-versions = ">=3.7.0" files = [ @@ -9609,7 +9217,6 @@ vision = ["Pillow"] name = "types-chardet" version = "5.0.4.6" description = "Typing stubs for chardet" -category = "dev" optional = false python-versions = "*" files = [ @@ -9621,7 +9228,6 @@ files = [ name = "types-protobuf" version = "4.23.0.1" description = "Typing stubs for protobuf" -category = "dev" optional = false python-versions = "*" files = [ @@ -9633,7 +9239,6 @@ files = [ name = "types-pyopenssl" version = "23.2.0.0" description = "Typing stubs for pyOpenSSL" -category = "dev" optional = false python-versions = "*" files = [ @@ -9648,7 +9253,6 @@ cryptography = ">=35.0.0" name = "types-pytz" version = "2023.3.0.0" description = "Typing stubs for pytz" -category = "dev" optional = false python-versions = "*" files = [ @@ -9660,7 +9264,6 @@ files = [ name = "types-pyyaml" version = "6.0.12.10" description = "Typing stubs for PyYAML" -category = "dev" optional = false python-versions = "*" files = [ @@ -9672,7 +9275,6 @@ files = [ name = "types-redis" version = "4.5.5.2" description = "Typing stubs for redis" -category = "dev" optional = false python-versions = "*" files = [ @@ -9688,7 +9290,6 @@ types-pyOpenSSL = "*" name = "types-requests" version = "2.31.0.1" description = "Typing stubs for requests" -category = "main" optional = false python-versions = "*" files = [ @@ -9703,7 +9304,6 @@ types-urllib3 = "*" name = "types-toml" version = "0.10.8.6" description = "Typing stubs for toml" -category = "dev" optional = false python-versions = "*" files = [ @@ -9715,7 +9315,6 @@ files = [ name = "types-urllib3" version = "1.26.25.13" description = "Typing stubs for urllib3" -category = "main" optional = false python-versions = "*" files = [ @@ -9727,7 +9326,6 @@ files = [ name = "typing-extensions" version = "4.7.1" description = "Backported and Experimental Type Hints for Python 3.7+" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -9739,7 +9337,6 @@ files = [ name = "typing-inspect" version = "0.9.0" description = "Runtime inspection utilities for typing module." -category = "main" optional = false python-versions = "*" files = [ @@ -9755,7 +9352,6 @@ typing-extensions = ">=3.7.4" name = "tzdata" version = "2023.3" description = "Provider of IANA time zone data" -category = "main" optional = false python-versions = ">=2" files = [ @@ -9767,7 +9363,6 @@ files = [ name = "tzlocal" version = "4.3" description = "tzinfo object for the local timezone" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -9787,7 +9382,6 @@ devenv = ["black", "check-manifest", "flake8", "pyroma", "pytest (>=4.3)", "pyte name = "uri-template" version = "1.2.0" description = "RFC 6570 URI Template Processor" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -9802,7 +9396,6 @@ dev = ["flake8 (<4.0.0)", "flake8-annotations", "flake8-bugbear", "flake8-commas name = "uritemplate" version = "4.1.1" description = "Implementation of RFC 6570 URI Templates" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -9814,7 +9407,6 @@ files = [ name = "urllib3" version = "1.26.16" description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ @@ -9831,7 +9423,6 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] name = "validators" version = "0.20.0" description = "Python Data Validation for Humans™." -category = "main" optional = true python-versions = ">=3.4" files = [ @@ -9848,7 +9439,6 @@ test = ["flake8 (>=2.4.0)", "isort (>=4.2.2)", "pytest (>=2.2.3)"] name = "vcrpy" version = "4.3.1" description = "Automatically mock your HTTP interactions to simplify and speed up testing" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -9863,11 +9453,37 @@ urllib3 = {version = "<2", markers = "python_version < \"3.10\""} wrapt = "*" yarl = "*" +[[package]] +name = "vowpal-wabbit-next" +version = "0.6.0" +description = "Experimental python bindings for VowpalWabbit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "vowpal-wabbit-next-0.6.0.tar.gz", hash = "sha256:f0381614d99fac6a0f52e995ee0bfc7b681054f397bea7ff08b8a523d5315a54"}, + {file = "vowpal_wabbit_next-0.6.0-cp310-cp310-macosx_10_13_universal2.whl", hash = "sha256:cfbb831cfe9eb81185aff7cdca437ae17c6d9aca8d74e26c326e3ef4ee8e81e7"}, + {file = "vowpal_wabbit_next-0.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d31829778f9c600f5c121f614516ca1bc9ede5d1bc77b1eb3b59b32d9138db9"}, + {file = "vowpal_wabbit_next-0.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:714347606ab302a2f72870b6ae6dce58de4bec1b489f4bd65d80a8e326e1db8a"}, + {file = "vowpal_wabbit_next-0.6.0-cp311-cp311-macosx_10_13_universal2.whl", hash = "sha256:3a8482d5c0b9357fdb36b62d659e6b74e93aeab165b910292572a98e91d7a014"}, + {file = "vowpal_wabbit_next-0.6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e4349099b938102f51fb6fedf035bc1deacb2971cd2a48641ca7d45186efda0"}, + {file = "vowpal_wabbit_next-0.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:c8f58cdc49f270b1bed6f0fdd7520c8ba1b328de5cd8a2760c0ec70a630de92e"}, + {file = "vowpal_wabbit_next-0.6.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8b7052ce7212fd1cae8ffd966e240c814f3c1df08fd612437d48f0f23e7694c"}, + {file = "vowpal_wabbit_next-0.6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d24d9c380d0e9b41151337c7f9e2a33ec5bfd738fdee9f65c1a40e486234aca3"}, + {file = "vowpal_wabbit_next-0.6.0-cp38-cp38-macosx_10_13_universal2.whl", hash = "sha256:0d77a8c55249ec9a7f404939ecc6948db0527e522e8a7ae149ec7cd29b3ade04"}, + {file = "vowpal_wabbit_next-0.6.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa2f52f1267fbc26c7757335f9c76a0f00b112971e04c85b8a9bc9e82300597"}, + {file = "vowpal_wabbit_next-0.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d04f91200ecae73196d9f5601853d63afce8c1c8a0d310a608e8ddfa3b190cb"}, + {file = "vowpal_wabbit_next-0.6.0-cp39-cp39-macosx_10_13_universal2.whl", hash = "sha256:2df4a652729c0db34afd8fb4fc49b0090d6f061e2d49899e5f092fd4c3d23253"}, + {file = "vowpal_wabbit_next-0.6.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c289a260ab759f04903b441701cff66ea74d6c061d966caaba0c65ac12d05528"}, + {file = "vowpal_wabbit_next-0.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:8d022cab07274f227df159a81bccf034def7dd54ad70392ee98743ffa4953072"}, +] + +[package.dependencies] +numpy = "*" + [[package]] name = "watchdog" version = "3.0.0" description = "Filesystem events monitoring" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -9907,7 +9523,6 @@ watchmedo = ["PyYAML (>=3.10)"] name = "wcwidth" version = "0.2.6" description = "Measures the displayed width of unicode strings in a terminal" -category = "dev" optional = false python-versions = "*" files = [ @@ -9919,7 +9534,6 @@ files = [ name = "weaviate-client" version = "3.20.1" description = "A python native Weaviate client" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -9940,7 +9554,6 @@ grpc = ["grpcio", "grpcio-tools"] name = "webcolors" version = "1.13" description = "A library for working with the color formats defined by HTML and CSS." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -9956,7 +9569,6 @@ tests = ["pytest", "pytest-cov"] name = "webencodings" version = "0.5.1" description = "Character encoding aliases for legacy web content" -category = "dev" optional = false python-versions = "*" files = [ @@ -9968,7 +9580,6 @@ files = [ name = "websocket-client" version = "1.6.0" description = "WebSocket client for Python with low level API options" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -9985,7 +9596,6 @@ test = ["websockets"] name = "werkzeug" version = "2.3.6" description = "The comprehensive WSGI web application library." -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -10003,7 +9613,6 @@ watchdog = ["watchdog (>=2.3)"] name = "wget" version = "3.2" description = "pure python download utility" -category = "main" optional = true python-versions = "*" files = [ @@ -10014,7 +9623,6 @@ files = [ name = "wheel" version = "0.40.0" description = "A built-package format for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -10029,7 +9637,6 @@ test = ["pytest (>=6.0.0)"] name = "whylabs-client" version = "0.5.1" description = "WhyLabs API client" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -10045,7 +9652,6 @@ urllib3 = ">=1.25.3" name = "whylogs" version = "1.2.3" description = "Profile and monitor your ML data pipeline end-to-end" -category = "main" optional = true python-versions = ">=3.7.1,<4" files = [ @@ -10079,7 +9685,6 @@ viz = ["Pillow (>=9.2.0,<10.0.0)", "ipython", "numpy", "numpy (>=1.23.2)", "pyba name = "whylogs-sketching" version = "3.4.1.dev3" description = "sketching library of whylogs" -category = "main" optional = true python-versions = "*" files = [ @@ -10120,7 +9725,6 @@ files = [ name = "widgetsnbextension" version = "4.0.7" description = "Jupyter interactive widgets for Jupyter Notebook" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -10132,7 +9736,6 @@ files = [ name = "wikipedia" version = "1.4.0" description = "Wikipedia API for Python" -category = "main" optional = true python-versions = "*" files = [ @@ -10147,7 +9750,6 @@ requests = ">=2.0.0,<3.0.0" name = "win32-setctime" version = "1.1.0" description = "A small Python utility to set file creation time on Windows" -category = "main" optional = true python-versions = ">=3.5" files = [ @@ -10162,7 +9764,6 @@ dev = ["black (>=19.3b0)", "pytest (>=4.6.2)"] name = "wolframalpha" version = "5.0.0" description = "Wolfram|Alpha 2.0 API client" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -10183,7 +9784,6 @@ testing = ["keyring", "pmxbot", "pytest (>=3.5,!=3.7.3)", "pytest-black (>=0.3.7 name = "wonderwords" version = "2.2.0" description = "A python package for random words and sentences in the english language" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -10198,7 +9798,6 @@ cli = ["rich (==9.10.0)"] name = "wrapt" version = "1.15.0" description = "Module for decorators, wrappers and monkey patching." -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" files = [ @@ -10283,7 +9882,6 @@ files = [ name = "xata" version = "1.0.0a7" description = "Python client for Xata.io" -category = "main" optional = true python-versions = ">=3.8,<4.0" files = [ @@ -10301,7 +9899,6 @@ requests = ">=2.28.1,<3.0.0" name = "xmltodict" version = "0.13.0" description = "Makes working with XML feel like you are working with JSON" -category = "main" optional = true python-versions = ">=3.4" files = [ @@ -10313,7 +9910,6 @@ files = [ name = "yarl" version = "1.9.2" description = "Yet another URL library" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -10401,7 +9997,6 @@ multidict = ">=4.0" name = "zipp" version = "3.15.0" description = "Backport of pathlib-compatible object wrapper for zip files" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -10417,7 +10012,6 @@ testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more name = "zstandard" version = "0.21.0" description = "Zstandard bindings for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -10479,7 +10073,7 @@ clarifai = ["clarifai"] cohere = ["cohere"] docarray = ["docarray"] embeddings = ["sentence-transformers"] -extended-testing = ["amazon-textract-caller", "atlassian-python-api", "beautifulsoup4", "bibtexparser", "cassio", "chardet", "esprima", "faiss-cpu", "feedparser", "geopandas", "gitpython", "gql", "html2text", "jinja2", "jq", "lxml", "mwparserfromhell", "mwxml", "newspaper3k", "openai", "openai", "openapi-schema-pydantic", "pandas", "pdfminer-six", "pgvector", "psychicapi", "py-trello", "pymupdf", "pypdf", "pypdfium2", "pyspark", "rank-bm25", "rapidfuzz", "requests-toolbelt", "scikit-learn", "streamlit", "sympy", "telethon", "tqdm", "xata", "xmltodict"] +extended-testing = ["amazon-textract-caller", "atlassian-python-api", "beautifulsoup4", "bibtexparser", "cassio", "chardet", "esprima", "faiss-cpu", "feedparser", "geopandas", "gitpython", "gql", "html2text", "jinja2", "jq", "lxml", "mwparserfromhell", "mwxml", "newspaper3k", "openai", "openai", "openapi-schema-pydantic", "pandas", "pdfminer-six", "pgvector", "psychicapi", "py-trello", "pymupdf", "pypdf", "pypdfium2", "pyspark", "rank-bm25", "rapidfuzz", "requests-toolbelt", "scikit-learn", "streamlit", "sympy", "telethon", "tqdm", "vowpal-wabbit-next", "xata", "xmltodict"] javascript = ["esprima"] llms = ["clarifai", "cohere", "huggingface_hub", "manifest-ml", "nlpcloud", "openai", "openlm", "torch", "transformers"] openai = ["openai", "tiktoken"] @@ -10489,4 +10083,4 @@ text-helpers = ["chardet"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "0247674f3f274fd2249ceb02c23a468f911a7c482796ea67252b203d1ab938ae" +content-hash = "505c324e9a84f481084f62ebccf3091e18a165b753d96bd43ec60344c33dc01d" diff --git a/libs/langchain/pyproject.toml b/libs/langchain/pyproject.toml index 25087bf228..575b0ba089 100644 --- a/libs/langchain/pyproject.toml +++ b/libs/langchain/pyproject.toml @@ -338,6 +338,7 @@ extended_testing = [ "xmltodict", "faiss-cpu", "openapi-schema-pydantic", + "vowpal-wabbit-next" ] [tool.ruff] diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py index e0e5f5dc21..0a5ba9dd31 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py @@ -1,13 +1,15 @@ -import langchain.chains.rl_chain.pick_best_chain as pick_best_chain -import langchain.chains.rl_chain.base as rl_chain -from test_utils import MockEncoder import pytest -from langchain.prompts.prompt import PromptTemplate +from test_utils import MockEncoder + +import langchain.chains.rl_chain.base as rl_chain +import langchain.chains.rl_chain.pick_best_chain as pick_best_chain from langchain.chat_models import FakeListChatModel +from langchain.prompts.prompt import PromptTemplate encoded_text = "[ e n c o d e d ] " +@pytest.mark.requires("vowpal_wabbit_next") def setup(): _PROMPT_TEMPLATE = """This is a dummy prompt that will be ignored by the fake llm""" PROMPT = PromptTemplate(input_variables=[], template=_PROMPT_TEMPLATE) @@ -16,6 +18,7 @@ def setup(): return llm, PROMPT +@pytest.mark.requires("vowpal_wabbit_next") def test_multiple_ToSelectFrom_throws(): llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) @@ -28,6 +31,7 @@ def test_multiple_ToSelectFrom_throws(): ) +@pytest.mark.requires("vowpal_wabbit_next") def test_missing_basedOn_from_throws(): llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) @@ -36,6 +40,7 @@ def test_missing_basedOn_from_throws(): chain.run(action=rl_chain.ToSelectFrom(actions)) +@pytest.mark.requires("vowpal_wabbit_next") def test_ToSelectFrom_not_a_list_throws(): llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) @@ -47,6 +52,7 @@ def test_ToSelectFrom_not_a_list_throws(): ) +@pytest.mark.requires("vowpal_wabbit_next") def test_update_with_delayed_score_with_auto_validator_throws(): llm, PROMPT = setup() # this LLM returns a number so that the auto validator will return that @@ -68,6 +74,7 @@ def test_update_with_delayed_score_with_auto_validator_throws(): chain.update_with_delayed_score(event=selection_metadata, score=100) +@pytest.mark.requires("vowpal_wabbit_next") def test_update_with_delayed_score_force(): llm, PROMPT = setup() # this LLM returns a number so that the auto validator will return that @@ -91,6 +98,7 @@ def test_update_with_delayed_score_force(): assert selection_metadata.selected.score == 100.0 +@pytest.mark.requires("vowpal_wabbit_next") def test_update_with_delayed_score(): llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm( @@ -108,6 +116,7 @@ def test_update_with_delayed_score(): assert selection_metadata.selected.score == 100.0 +@pytest.mark.requires("vowpal_wabbit_next") def test_user_defined_scorer(): llm, PROMPT = setup() @@ -129,6 +138,7 @@ def test_user_defined_scorer(): assert selection_metadata.selected.score == 200.0 +@pytest.mark.requires("vowpal_wabbit_next") def test_default_embeddings(): llm, PROMPT = setup() feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) @@ -162,6 +172,7 @@ def test_default_embeddings(): assert vw_str == expected +@pytest.mark.requires("vowpal_wabbit_next") def test_default_embeddings_off(): llm, PROMPT = setup() feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) @@ -187,6 +198,7 @@ def test_default_embeddings_off(): assert vw_str == expected +@pytest.mark.requires("vowpal_wabbit_next") def test_default_embeddings_mixed_w_explicit_user_embeddings(): llm, PROMPT = setup() feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) @@ -221,6 +233,7 @@ def test_default_embeddings_mixed_w_explicit_user_embeddings(): assert vw_str == expected +@pytest.mark.requires("vowpal_wabbit_next") def test_default_no_scorer_specified(): _, PROMPT = setup() chain_llm = FakeListChatModel(responses=[100]) @@ -235,6 +248,7 @@ def test_default_no_scorer_specified(): assert selection_metadata.selected.score == 100.0 +@pytest.mark.requires("vowpal_wabbit_next") def test_explicitly_no_scorer(): llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm( @@ -250,6 +264,7 @@ def test_explicitly_no_scorer(): assert selection_metadata.selected.score == None +@pytest.mark.requires("vowpal_wabbit_next") def test_auto_scorer_with_user_defined_llm(): llm, PROMPT = setup() scorer_llm = FakeListChatModel(responses=[300]) @@ -268,15 +283,14 @@ def test_auto_scorer_with_user_defined_llm(): assert selection_metadata.selected.score == 300.0 +@pytest.mark.requires("vowpal_wabbit_next") def test_calling_chain_w_reserved_inputs_throws(): llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) with pytest.raises(ValueError): chain.run( User=rl_chain.BasedOn("Context"), - rl_chain_selected_based_on=rl_chain.ToSelectFrom( - ["0", "1", "2"] - ), + rl_chain_selected_based_on=rl_chain.ToSelectFrom(["0", "1", "2"]), ) with pytest.raises(ValueError): diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py index 22097c6ef3..501700a69a 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py @@ -1,12 +1,13 @@ -import langchain.chains.rl_chain.pick_best_chain as pick_best_chain -import langchain.chains.rl_chain.base as rl_chain +import pytest from test_utils import MockEncoder -import pytest +import langchain.chains.rl_chain.base as rl_chain +import langchain.chains.rl_chain.pick_best_chain as pick_best_chain encoded_text = "[ e n c o d e d ] " +@pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_missing_context_throws(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_action = {"action": ["0", "1", "2"]} @@ -17,6 +18,7 @@ def test_pickbest_textembedder_missing_context_throws(): feature_embedder.format(event) +@pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_missing_actions_throws(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) event = pick_best_chain.PickBest.Event( @@ -26,6 +28,7 @@ def test_pickbest_textembedder_missing_actions_throws(): feature_embedder.format(event) +@pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_no_label_no_emb(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": ["0", "1", "2"]} @@ -37,6 +40,7 @@ def test_pickbest_textembedder_no_label_no_emb(): assert vw_ex_str == expected +@pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_w_label_no_score_no_emb(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": ["0", "1", "2"]} @@ -52,6 +56,7 @@ def test_pickbest_textembedder_w_label_no_score_no_emb(): assert vw_ex_str == expected +@pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_w_full_label_no_emb(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": ["0", "1", "2"]} @@ -69,6 +74,7 @@ def test_pickbest_textembedder_w_full_label_no_emb(): assert vw_ex_str == expected +@pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_w_full_label_w_emb(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" @@ -92,6 +98,7 @@ def test_pickbest_textembedder_w_full_label_w_emb(): assert vw_ex_str == expected +@pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_w_full_label_w_embed_and_keep(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" @@ -115,6 +122,7 @@ def test_pickbest_textembedder_w_full_label_w_embed_and_keep(): assert vw_ex_str == expected +@pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_more_namespaces_no_label_no_emb(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} @@ -127,6 +135,7 @@ def test_pickbest_textembedder_more_namespaces_no_label_no_emb(): assert vw_ex_str == expected +@pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_more_namespaces_w_label_no_emb(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} @@ -140,6 +149,7 @@ def test_pickbest_textembedder_more_namespaces_w_label_no_emb(): assert vw_ex_str == expected +@pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_more_namespaces_w_full_label_no_emb(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} @@ -153,6 +163,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_no_emb(): assert vw_ex_str == expected +@pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) @@ -168,9 +179,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb(): encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) encoded_ctx_str_2 = encoded_text + " ".join(char for char in ctx_str_2) - named_actions = { - "action1": rl_chain.Embed([{"a": str1, "b": str1}, str2, str3]) - } + named_actions = {"action1": rl_chain.Embed([{"a": str1, "b": str1}, str2, str3])} context = { "context1": rl_chain.Embed(ctx_str_1), "context2": rl_chain.Embed(ctx_str_2), @@ -185,6 +194,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb(): assert vw_ex_str == expected +@pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_keep(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) @@ -201,9 +211,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_kee encoded_ctx_str_2 = encoded_text + " ".join(char for char in ctx_str_2) named_actions = { - "action1": rl_chain.EmbedAndKeep( - [{"a": str1, "b": str1}, str2, str3] - ) + "action1": rl_chain.EmbedAndKeep([{"a": str1, "b": str1}, str2, str3]) } context = { "context1": rl_chain.EmbedAndKeep(ctx_str_1), @@ -219,6 +227,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_kee assert vw_ex_str == expected +@pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) @@ -252,6 +261,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb(): assert vw_ex_str == expected +@pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_embed_and_keep(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) @@ -288,6 +298,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_embed_and_ assert vw_ex_str == expected +@pytest.mark.requires("vowpal_wabbit_next") def test_raw_features_underscored(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "this is a long string" diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py index fc3d02d9b0..895fa8ebb6 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py @@ -1,16 +1,18 @@ -import langchain.chains.rl_chain.base as base +import pytest from test_utils import MockEncoder -import pytest +import langchain.chains.rl_chain.base as base encoded_text = "[ e n c o d e d ] " +@pytest.mark.requires("vowpal_wabbit_next") def test_simple_context_str_no_emb(): expected = [{"a_namespace": "test"}] assert base.embed("test", MockEncoder(), "a_namespace") == expected +@pytest.mark.requires("vowpal_wabbit_next") def test_simple_context_str_w_emb(): str1 = "test" encoded_str1 = " ".join(char for char in str1) @@ -25,6 +27,7 @@ def test_simple_context_str_w_emb(): ) +@pytest.mark.requires("vowpal_wabbit_next") def test_simple_context_str_w_nested_emb(): # nested embeddings, innermost wins str1 = "test" @@ -42,11 +45,13 @@ def test_simple_context_str_w_nested_emb(): ) +@pytest.mark.requires("vowpal_wabbit_next") def test_context_w_namespace_no_emb(): expected = [{"test_namespace": "test"}] assert base.embed({"test_namespace": "test"}, MockEncoder()) == expected +@pytest.mark.requires("vowpal_wabbit_next") def test_context_w_namespace_w_emb(): str1 = "test" encoded_str1 = " ".join(char for char in str1) @@ -61,6 +66,7 @@ def test_context_w_namespace_w_emb(): ) +@pytest.mark.requires("vowpal_wabbit_next") def test_context_w_namespace_w_emb2(): str1 = "test" encoded_str1 = " ".join(char for char in str1) @@ -75,6 +81,7 @@ def test_context_w_namespace_w_emb2(): ) +@pytest.mark.requires("vowpal_wabbit_next") def test_context_w_namespace_w_some_emb(): str1 = "test1" str2 = "test2" @@ -103,6 +110,7 @@ def test_context_w_namespace_w_some_emb(): ) +@pytest.mark.requires("vowpal_wabbit_next") def test_simple_action_strlist_no_emb(): str1 = "test1" str2 = "test2" @@ -111,6 +119,7 @@ def test_simple_action_strlist_no_emb(): assert base.embed([str1, str2, str3], MockEncoder(), "a_namespace") == expected +@pytest.mark.requires("vowpal_wabbit_next") def test_simple_action_strlist_w_emb(): str1 = "test1" str2 = "test2" @@ -138,6 +147,7 @@ def test_simple_action_strlist_w_emb(): ) +@pytest.mark.requires("vowpal_wabbit_next") def test_simple_action_strlist_w_some_emb(): str1 = "test1" str2 = "test2" @@ -170,6 +180,7 @@ def test_simple_action_strlist_w_some_emb(): ) +@pytest.mark.requires("vowpal_wabbit_next") def test_action_w_namespace_no_emb(): str1 = "test1" str2 = "test2" @@ -192,6 +203,7 @@ def test_action_w_namespace_no_emb(): ) +@pytest.mark.requires("vowpal_wabbit_next") def test_action_w_namespace_w_emb(): str1 = "test1" str2 = "test2" @@ -233,6 +245,7 @@ def test_action_w_namespace_w_emb(): ) +@pytest.mark.requires("vowpal_wabbit_next") def test_action_w_namespace_w_emb2(): str1 = "test1" str2 = "test2" @@ -278,6 +291,7 @@ def test_action_w_namespace_w_emb2(): ) +@pytest.mark.requires("vowpal_wabbit_next") def test_action_w_namespace_w_some_emb(): str1 = "test1" str2 = "test2" @@ -318,6 +332,7 @@ def test_action_w_namespace_w_some_emb(): ) +@pytest.mark.requires("vowpal_wabbit_next") def test_action_w_namespace_w_emb_w_more_than_one_item_in_first_dict(): str1 = "test1" str2 = "test2" @@ -368,6 +383,7 @@ def test_action_w_namespace_w_emb_w_more_than_one_item_in_first_dict(): ) +@pytest.mark.requires("vowpal_wabbit_next") def test_one_namespace_w_list_of_features_no_emb(): str1 = "test1" str2 = "test2" @@ -375,6 +391,7 @@ def test_one_namespace_w_list_of_features_no_emb(): assert base.embed({"test_namespace": [str1, str2]}, MockEncoder()) == expected +@pytest.mark.requires("vowpal_wabbit_next") def test_one_namespace_w_list_of_features_w_some_emb(): str1 = "test1" str2 = "test2" @@ -386,21 +403,25 @@ def test_one_namespace_w_list_of_features_w_some_emb(): ) +@pytest.mark.requires("vowpal_wabbit_next") def test_nested_list_features_throws(): with pytest.raises(ValueError): base.embed({"test_namespace": [[1, 2], [3, 4]]}, MockEncoder()) +@pytest.mark.requires("vowpal_wabbit_next") def test_dict_in_list_throws(): with pytest.raises(ValueError): base.embed({"test_namespace": [{"a": 1}, {"b": 2}]}, MockEncoder()) +@pytest.mark.requires("vowpal_wabbit_next") def test_nested_dict_throws(): with pytest.raises(ValueError): base.embed({"test_namespace": {"a": {"b": 1}}}, MockEncoder()) +@pytest.mark.requires("vowpal_wabbit_next") def test_list_of_tuples_throws(): with pytest.raises(ValueError): base.embed({"test_namespace": [("a", 1), ("b", 2)]}, MockEncoder()) From a2f807e0554924217133675d7c3a6e4ac875b2b2 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Fri, 18 Aug 2023 05:51:26 -0400 Subject: [PATCH 07/65] make vw dependency optional --- libs/langchain/poetry.lock | 4 ++-- libs/langchain/pyproject.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/libs/langchain/poetry.lock b/libs/langchain/poetry.lock index 75ae888efd..84d8763646 100644 --- a/libs/langchain/poetry.lock +++ b/libs/langchain/poetry.lock @@ -9457,7 +9457,7 @@ yarl = "*" name = "vowpal-wabbit-next" version = "0.6.0" description = "Experimental python bindings for VowpalWabbit" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "vowpal-wabbit-next-0.6.0.tar.gz", hash = "sha256:f0381614d99fac6a0f52e995ee0bfc7b681054f397bea7ff08b8a523d5315a54"}, @@ -10083,4 +10083,4 @@ text-helpers = ["chardet"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "505c324e9a84f481084f62ebccf3091e18a165b753d96bd43ec60344c33dc01d" +content-hash = "eba7c01296c1948ab432ca2bc70e274e79a4135d66b4c189d6bb95b5e0c41198" diff --git a/libs/langchain/pyproject.toml b/libs/langchain/pyproject.toml index 575b0ba089..3d48a55968 100644 --- a/libs/langchain/pyproject.toml +++ b/libs/langchain/pyproject.toml @@ -125,7 +125,7 @@ newspaper3k = {version = "^0.2.8", optional = true} amazon-textract-caller = {version = "<2", optional = true} xata = {version = "^1.0.0a7", optional = true} xmltodict = {version = "^0.13.0", optional = true} -vowpal-wabbit-next = "0.6.0" +vowpal-wabbit-next = {version = "0.6.0", optional = true} [tool.poetry.group.test.dependencies] From 5aafb3bc46b5e1b436281aa5b2b7536385a8a352 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Fri, 18 Aug 2023 07:09:30 -0400 Subject: [PATCH 08/65] resolving linting and formatting errors --- .../langchain/chains/rl_chain/__init__.py | 26 +++-- .../langchain/chains/rl_chain/base.py | 98 +++++++++++-------- .../langchain/chains/rl_chain/metrics.py | 5 +- .../chains/rl_chain/model_repository.py | 7 +- .../chains/rl_chain/pick_best_chain.py | 88 ++++++++--------- .../langchain/chains/rl_chain/vw_logger.py | 4 +- .../rl_chain/test_pick_best_chain_call.py | 6 +- .../rl_chain/test_pick_best_text_embedder.py | 8 +- 8 files changed, 135 insertions(+), 107 deletions(-) diff --git a/libs/langchain/langchain/chains/rl_chain/__init__.py b/libs/langchain/langchain/chains/rl_chain/__init__.py index d485c5d506..e71de1da6c 100644 --- a/libs/langchain/langchain/chains/rl_chain/__init__.py +++ b/libs/langchain/langchain/chains/rl_chain/__init__.py @@ -1,16 +1,16 @@ -from langchain.chains.rl_chain.pick_best_chain import PickBest +import logging + from langchain.chains.rl_chain.base import ( - Embed, - BasedOn, - ToSelectFrom, - SelectionScorer, AutoSelectionScorer, + BasedOn, + Embed, Embedder, Policy, + SelectionScorer, + ToSelectFrom, VwPolicy, ) - -import logging +from langchain.chains.rl_chain.pick_best_chain import PickBest def configure_logger(): @@ -26,3 +26,15 @@ def configure_logger(): configure_logger() + +__all__ = [ + "PickBest", + "Embed", + "BasedOn", + "ToSelectFrom", + "SelectionScorer", + "AutoSelectionScorer", + "Embedder", + "Policy", + "VwPolicy", +] diff --git a/libs/langchain/langchain/chains/rl_chain/base.py b/libs/langchain/langchain/chains/rl_chain/base.py index 2d0a103679..437053f2dc 100644 --- a/libs/langchain/langchain/chains/rl_chain/base.py +++ b/libs/langchain/langchain/chains/rl_chain/base.py @@ -3,7 +3,7 @@ from __future__ import annotations import logging import os from abc import ABC, abstractmethod -from typing import Any, Dict, List, Optional, Sequence, Tuple, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain @@ -19,6 +19,9 @@ from langchain.prompts import ( ) from langchain.pydantic_v1 import BaseModel, Extra, root_validator +if TYPE_CHECKING: + import vowpal_wabbit_next as vw + logger = logging.getLogger(__name__) @@ -85,8 +88,6 @@ def EmbedAndKeep(anything): def parse_lines(parser: "vw.TextFormatParser", input_str: str) -> List["vw.Example"]: - import vowpal_wabbit_next as vw - return [parser.parse_line(line) for line in input_str.split("\n")] @@ -113,8 +114,11 @@ def get_based_on_and_to_select_from(inputs: Dict[str, Any]): def prepare_inputs_for_autoembed(inputs: Dict[str, Any]): - # go over all the inputs and if something is either wrapped in _ToSelectFrom or _BasedOn, and if - # their inner values are not already _Embed, then wrap them in EmbedAndKeep while retaining their _ToSelectFrom or _BasedOn status + """ + go over all the inputs and if something is either wrapped in _ToSelectFrom or _BasedOn, and if their inner values are not already _Embed, + then wrap them in EmbedAndKeep while retaining their _ToSelectFrom or _BasedOn status + """ # noqa: E501 + next_inputs = inputs.copy() for k, v in next_inputs.items(): if isinstance(v, _ToSelectFrom) or isinstance(v, _BasedOn): @@ -219,13 +223,18 @@ class AutoSelectionScorer(SelectionScorer, BaseModel): @staticmethod def get_default_system_prompt() -> SystemMessagePromptTemplate: return SystemMessagePromptTemplate.from_template( - "PLEASE RESPOND ONLY WITH A SINGLE FLOAT AND NO OTHER TEXT EXPLANATION\n You are a strict judge that is called on to rank a response based on given criteria.\ - You must respond with your ranking by providing a single float within the range [0, 1], 0 being very bad response and 1 being very good response." + "PLEASE RESPOND ONLY WITH A SINGLE FLOAT AND NO OTHER TEXT EXPLANATION\n \ + You are a strict judge that is called on to rank a response based on \ + given criteria. You must respond with your ranking by providing a \ + single float within the range [0, 1], 0 being very bad \ + response and 1 being very good response." ) @staticmethod def get_default_prompt() -> ChatPromptTemplate: - human_template = 'Given this based_on "{rl_chain_selected_based_on}" as the most important attribute, rank how good or bad this text is: "{llm_response}".' + human_template = 'Given this based_on "{rl_chain_selected_based_on}" \ + as the most important attribute, rank how good or bad this text is: \ + "{llm_response}".' human_message_prompt = HumanMessagePromptTemplate.from_template(human_template) default_system_prompt = AutoSelectionScorer.get_default_system_prompt() chat_prompt = ChatPromptTemplate.from_messages( @@ -260,25 +269,36 @@ class AutoSelectionScorer(SelectionScorer, BaseModel): return resp except Exception as e: raise RuntimeError( - f"The llm did not manage to rank the response as expected, there is always the option to try again or tweak the reward prompt. Error: {e}" + f"The auto selection scorer did not manage to score the response, \ + there is always the option to try again or tweak the reward prompt.\ + Error: {e}" ) class RLChain(Chain): """ - RLChain class that utilizes the Vowpal Wabbit (VW) model for personalization. + The `RLChain` class leverages the Vowpal Wabbit (VW) model as a learned policy for reinforcement learning. Attributes: - model_loading (bool, optional): If set to True, the chain will attempt to load an existing VW model from the latest checkpoint file in the {model_save_dir} directory (current directory if none specified). If set to False, it will start training from scratch, potentially overwriting existing files. Defaults to True. - large_action_spaces (bool, optional): If set to True and vw_cmd has not been specified in the constructor, it will enable large action spaces - vw_cmd (List[str], optional): Advanced users can set the VW command line to whatever they want, as long as it is compatible with the Type that is specified (Type Enum) - model_save_dir (str, optional): The directory to save the VW model to. Defaults to the current directory. - selection_scorer (SelectionScorer): If set, the chain will check the response using the provided selection_scorer and the VW model will be updated with the result. Defaults to None. + - llm_chain (Chain): Represents the underlying Language Model chain. + - prompt (BasePromptTemplate): The template for the base prompt. + - selection_scorer (Union[SelectionScorer, None]): Scorer for the selection. Can be set to None. + - policy (Optional[Policy]): The policy used by the chain to learn to populate a dynamic prompt. + - auto_embed (bool): Determines if embedding should be automatic. Default is True. + - metrics (Optional[MetricsTracker]): Tracker for metrics, can be set to None. + + Initialization Attributes: + - feature_embedder (Embedder): Embedder used for the `BasedOn` and `ToSelectFrom` inputs. + - model_save_dir (str, optional): Directory for saving the VW model. Default is the current directory. + - reset_model (bool): If set to True, the model starts training from scratch. Default is False. + - vw_cmd (List[str], optional): Command line arguments for the VW model. + - policy (VwPolicy): Policy used by the chain. + - vw_logs (Optional[Union[str, os.PathLike]]): Path for the VW logs. + - metrics_step (int): Step for the metrics tracker. Default is -1. Notes: - The class creates a VW model instance using the provided arguments. Before the chain object is destroyed the save_progress() function can be called. If it is called, the learned VW model is saved to a file in the current directory named `model-.vw`. Checkpoints start at 1 and increment monotonically. - When making predictions, VW is first called to choose action(s) which are then passed into the prompt with the key `{actions}`. After action selection, the LLM (Language Model) is called with the prompt populated by the chosen action(s), and the response is returned. - """ + The class initializes the VW model using the provided arguments. If `selection_scorer` is not provided, a warning is logged, indicating that no reinforcement learning will occur unless the `update_with_delayed_score` method is called. + """ # noqa: E501 llm_chain: Chain @@ -306,7 +326,9 @@ class RLChain(Chain): super().__init__(*args, **kwargs) if self.selection_scorer is None: logger.warning( - "No response validator provided, which means that no reinforcement learning will be done in the RL chain unless update_with_delayed_score is called." + "No selection scorer provided, which means that no \ + reinforcement learning will be done in the RL chain \ + unless update_with_delayed_score is called." ) self.policy = policy( model_repo=ModelRepository( @@ -346,7 +368,9 @@ class RLChain(Chain): or self.selected_based_on_input_key in inputs.keys() ): raise ValueError( - f"The rl chain does not accept '{self.selected_input_key}' or '{self.selected_based_on_input_key}' as input keys, they are reserved for internal use during auto reward." + f"The rl chain does not accept '{self.selected_input_key}' \ + or '{self.selected_based_on_input_key}' as input keys, \ + they are reserved for internal use during auto reward." ) @abstractmethod @@ -375,13 +399,13 @@ class RLChain(Chain): self, score: float, event: Event, force_score=False ) -> None: """ - Learn will be called with the score specified and the actions/embeddings/etc stored in event - + Updates the learned policy with the score provided. Will raise an error if selection_scorer is set, and force_score=True was not provided during the method call - """ + """ # noqa: E501 if self.selection_scorer and not force_score: raise RuntimeError( - "The selection scorer is set, and force_score was not set to True. Please set force_score=True to use this function." + "The selection scorer is set, and force_score was not set to True. \ + Please set force_score=True to use this function." ) self.metrics.on_feedback(score) self._call_after_scoring_before_learning(event=event, score=score) @@ -390,10 +414,7 @@ class RLChain(Chain): def set_auto_embed(self, auto_embed: bool) -> None: """ - Set whether the chain should auto embed the inputs or not. If set to False, the inputs will not be embedded and the user will need to embed the inputs themselves before calling run. - - Args: - auto_embed (bool): Whether the chain should auto embed the inputs or not. + Sets whether the chain should auto embed the inputs or not. """ self.auto_embed = auto_embed @@ -438,7 +459,8 @@ class RLChain(Chain): ) except Exception as e: logger.info( - f"The LLM was not able to rank and the chain was not able to adjust to this response, error: {e}" + f"The selection scorer was not able to score, \ + and the chain was not able to adjust to this response, error: {e}" ) self.metrics.on_feedback(score) event = self._call_after_scoring_before_learning(score=score, event=event) @@ -449,16 +471,7 @@ class RLChain(Chain): def save_progress(self) -> None: """ - This function should be called whenever there is a need to save the progress of the VW (Vowpal Wabbit) model within the chain. It saves the current state of the VW model to a file. - - File Naming Convention: - The file will be named using the pattern `model-.vw`, where `` is a monotonically increasing number. The numbering starts from 1, and increments by 1 for each subsequent save. If there are already saved checkpoints, the number used for `` will be the next in the sequence. - - Example: - If there are already two saved checkpoints, `model-1.vw` and `model-2.vw`, the next time this function is called, it will save the model as `model-3.vw`. - - Note: - Be cautious when deleting or renaming checkpoint files manually, as this could cause the function to reuse checkpoint numbers. + This function should be called to save the state of the Vowpal Wabbit model. """ self.policy.save() @@ -493,7 +506,8 @@ def embed_string_type( if namespace is None: raise ValueError( - "The default namespace must be provided when embedding a string or _Embed object." + "The default namespace must be \ + provided when embedding a string or _Embed object." ) return {namespace: keep_str + join_char.join(map(str, encoded))} @@ -533,7 +547,7 @@ def embed( namespace: Optional[str] = None, ) -> List[Dict[str, Union[str, List[str]]]]: """ - Embeds the actions or context using the SentenceTransformer model + Embeds the actions or context using the SentenceTransformer model (or a model that has an `encode` function) Attributes: to_embed: (Union[Union(str, _Embed(str)), Dict, List[Union(str, _Embed(str))], List[Dict]], required) The text to be embedded, either a string, a list of strings or a dictionary or a list of dictionaries. @@ -541,7 +555,7 @@ def embed( model: (Any, required) The model to use for embedding Returns: List[Dict[str, str]]: A list of dictionaries where each dictionary has the namespace as the key and the embedded string as the value - """ + """ # noqa: E501 if (isinstance(to_embed, _Embed) and isinstance(to_embed.value, str)) or isinstance( to_embed, str ): diff --git a/libs/langchain/langchain/chains/rl_chain/metrics.py b/libs/langchain/langchain/chains/rl_chain/metrics.py index 973b778911..b7ec949c9e 100644 --- a/libs/langchain/langchain/chains/rl_chain/metrics.py +++ b/libs/langchain/langchain/chains/rl_chain/metrics.py @@ -1,4 +1,7 @@ -from typing import Optional +from typing import TYPE_CHECKING, Optional + +if TYPE_CHECKING: + import pandas as pd class MetricsTracker: diff --git a/libs/langchain/langchain/chains/rl_chain/model_repository.py b/libs/langchain/langchain/chains/rl_chain/model_repository.py index 992fca4518..eea866d1cf 100644 --- a/libs/langchain/langchain/chains/rl_chain/model_repository.py +++ b/libs/langchain/langchain/chains/rl_chain/model_repository.py @@ -4,7 +4,10 @@ import logging import os import shutil from pathlib import Path -from typing import Sequence, Union +from typing import TYPE_CHECKING, Sequence, Union + +if TYPE_CHECKING: + import vowpal_wabbit_next as vw logger = logging.getLogger(__name__) @@ -35,8 +38,6 @@ class ModelRepository: return len(glob.glob(str(self.folder / "model-????????-??????.vw"))) > 0 def save(self, workspace: "vw.Workspace") -> None: - import vowpal_wabbit_next as vw - with open(self.model_path, "wb") as f: logger.info(f"storing rl_chain model in: {self.model_path}") f.write(workspace.serialize()) diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py index 3df1d7f9d9..6e1a1a5eff 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py @@ -12,17 +12,18 @@ from langchain.prompts import BasePromptTemplate logger = logging.getLogger(__name__) -# sentinel object used to distinguish between user didn't supply anything or user explicitly supplied None +# sentinel object used to distinguish between +# user didn't supply anything or user explicitly supplied None SENTINEL = object() class PickBestFeatureEmbedder(base.Embedder): """ - Contextual Bandit Text Embedder class that embeds the based_on and to_select_from into a format that can be used by VW + Text Embedder class that embeds the `BasedOn` and `ToSelectFrom` inputs into a format that can be used by the learning policy Attributes: model name (Any, optional): The type of embeddings to be used for feature representation. Defaults to BERT SentenceTransformer. - """ + """ # noqa E501 def __init__(self, model: Optional[Any] = None, *args, **kwargs): super().__init__(*args, **kwargs) @@ -36,7 +37,7 @@ class PickBestFeatureEmbedder(base.Embedder): def format(self, event: PickBest.Event) -> str: """ - Converts the based_on and to_select_from into a format that can be used by VW + Converts the `BasedOn` and `ToSelectFrom` into a format that can be used by VW """ cost = None @@ -68,14 +69,20 @@ class PickBestFeatureEmbedder(base.Embedder): example_string += "shared " for context_item in context_emb: for ns, based_on in context_item.items(): - example_string += f"|{ns} {' '.join(based_on) if isinstance(based_on, list) else based_on} " + e = " ".join(based_on) if isinstance(based_on, list) else based_on + example_string += f"|{ns} {e} " example_string += "\n" for i, action in enumerate(action_embs): if cost is not None and chosen_action == i: example_string += f"{chosen_action}:{cost}:{prob} " for ns, action_embedding in action.items(): - example_string += f"|{ns} {' '.join(action_embedding) if isinstance(action_embedding, list) else action_embedding} " + e = ( + " ".join(action_embedding) + if isinstance(action_embedding, list) + else action_embedding + ) + example_string += f"|{ns} {e} " example_string += "\n" # Strip the last newline return example_string[:-1] @@ -83,33 +90,31 @@ class PickBestFeatureEmbedder(base.Embedder): class PickBest(base.RLChain): """ - PickBest class that utilizes the Vowpal Wabbit (VW) model for personalization. - - The Chain is initialized with a set of potential to_select_from. For each call to the Chain, a specific action will be chosen based on an input based_on. - This chosen action is then passed to the prompt that will be utilized in the subsequent call to the LLM (Language Model). - - The flow of this chain is: - - Chain is initialized - - Chain is called input containing the based_on and the List of potential to_select_from - - Chain chooses an action based on the based_on - - Chain calls the LLM with the chosen action - - LLM returns a response - - If the selection_scorer is specified, the response is checked against the selection_scorer - - The internal model will be updated with the based_on, action, and reward of the response (how good or bad the response was) - - The response is returned - - input dictionary expects: - - at least one variable wrapped in BasedOn which will be the based_on to use for personalization - - one variable of a list wrapped in ToSelectFrom which will be the list of to_select_from for the Vowpal Wabbit model to choose from. - This list can either be a List of str's or a List of Dict's. - - Actions provided as a list of strings e.g. to_select_from = ["action1", "action2", "action3"] - - If to_select_from are provided as a list of dictionaries, each action should be a dictionary where the keys are namespace names and the values are the corresponding action strings e.g. to_select_from = [{"namespace1": "action1", "namespace2": "action2"}, {"namespace1": "action3", "namespace2": "action4"}] + `PickBest` is a class designed to leverage the Vowpal Wabbit (VW) model for reinforcement learning with a context, with the goal of modifying the prompt before the LLM call. + + Each invocation of the chain's `run()` method should be equipped with a set of potential actions (`ToSelectFrom`) and will result in the selection of a specific action based on the `BasedOn` input. This chosen action then informs the LLM (Language Model) prompt for the subsequent response generation. + + The standard operation flow of this Chain includes: + 1. The Chain is invoked with inputs containing the `BasedOn` criteria and a list of potential actions (`ToSelectFrom`). + 2. An action is selected based on the `BasedOn` input. + 3. The LLM is called with the dynamic prompt, producing a response. + 4. If a `selection_scorer` is provided, it is used to score the selection. + 5. The internal Vowpal Wabbit model is updated with the `BasedOn` input, the chosen `ToSelectFrom` action, and the resulting score from the scorer. + 6. The final response is returned. + + Expected input dictionary format: + - At least one variable encapsulated within `BasedOn` to serve as the selection criteria. + - A single list variable within `ToSelectFrom`, representing potential actions for the VW model. This list can take the form of: + - A list of strings, e.g., `action = ToSelectFrom(["action1", "action2", "action3"])` + - A list of list of strings e.g. `action = ToSelectFrom([["action1", "another identifier of action1"], ["action2", "another identifier of action2"]])` + - A list of dictionaries, where each dictionary represents an action with namespace names as keys and corresponding action strings as values. For instance, `action = ToSelectFrom([{"namespace1": ["action1", "another identifier of action1"], "namespace2": "action2"}, {"namespace1": "action3", "namespace2": "action4"}])`. + Extends: RLChain Attributes: - feature_embedder: (PickBestFeatureEmbedder, optional) The text embedder to use for embedding the based_on and the to_select_from. If not provided, a default embedder is used. - """ + feature_embedder (PickBestFeatureEmbedder, optional): Is an advanced attribute. Responsible for embedding the `BasedOn` and `ToSelectFrom` inputs. If omitted, a default embedder is utilized. + """ # noqa E501 class Selected(base.Selected): index: Optional[int] @@ -169,17 +174,23 @@ class PickBest(base.RLChain): context, actions = base.get_based_on_and_to_select_from(inputs=inputs) if not actions: raise ValueError( - "No variables using 'ToSelectFrom' found in the inputs. Please include at least one variable containing a list to select from." + "No variables using 'ToSelectFrom' found in the inputs. \ + Please include at least one variable containing \ + a list to select from." ) if len(list(actions.values())) > 1: raise ValueError( - "Only one variable using 'ToSelectFrom' can be provided in the inputs for the PickBest chain. Please provide only one variable containing a list to select from." + "Only one variable using 'ToSelectFrom' can be provided in the inputs \ + for the PickBest chain. Please provide only one variable \ + containing a list to select from." ) if not context: raise ValueError( - "No variables using 'BasedOn' found in the inputs. Please include at least one variable containing information to base the selected of ToSelectFrom on." + "No variables using 'BasedOn' found in the inputs. \ + Please include at least one variable containing information \ + to base the selected of ToSelectFrom on." ) event = PickBest.Event(inputs=inputs, to_select_from=actions, based_on=context) @@ -231,19 +242,6 @@ class PickBest(base.RLChain): inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: - """ - When chain.run() is called with the given inputs, this function is called. It is responsible for calling the VW model to choose an action (ToSelectFrom) based on the (BasedOn) based_on, and then calling the LLM (Language Model) with the chosen action to generate a response. - - Attributes: - inputs: (Dict, required) The inputs to the chain. The inputs must contain a input variables that are wrapped in BasedOn and ToSelectFrom. BasedOn is the based_on that will be used for selecting an ToSelectFrom action that will be passed to the LLM prompt. - run_manager: (CallbackManagerForChainRun, optional) The callback manager to use for this run. If not provided, a default callback manager is used. - - Returns: - A dictionary containing: - - `response`: The response generated by the LLM (Language Model). - - `selection_metadata`: A Event object containing all the information needed to learn the reward for the chosen action at a later point. If an automatic selection_scorer is not provided, then this object can be used at a later point with the `update_with_delayed_score()` function to learn the delayed reward and update the Vowpal Wabbit model. - - the `score` in the `selection_metadata` object is set to None if an automatic selection_scorer is not provided or if the selection_scorer failed (e.g. LLM timeout or LLM failed to rank correctly). - """ return super()._call(run_manager=run_manager, inputs=inputs) @property diff --git a/libs/langchain/langchain/chains/rl_chain/vw_logger.py b/libs/langchain/langchain/chains/rl_chain/vw_logger.py index 0d4cce2144..4fa4717539 100644 --- a/libs/langchain/langchain/chains/rl_chain/vw_logger.py +++ b/libs/langchain/langchain/chains/rl_chain/vw_logger.py @@ -1,6 +1,6 @@ -from typing import Union, Optional -from pathlib import Path from os import PathLike +from pathlib import Path +from typing import Optional, Union class VwLogger: diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py index 0a5ba9dd31..1d8045e7c9 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py @@ -111,7 +111,7 @@ def test_update_with_delayed_score(): ) assert response["response"] == "hey" selection_metadata = response["selection_metadata"] - assert selection_metadata.selected.score == None + assert selection_metadata.selected.score is None chain.update_with_delayed_score(event=selection_metadata, score=100) assert selection_metadata.selected.score == 100.0 @@ -157,7 +157,7 @@ def test_default_embeddings(): ctx_str_2 = "context2" encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) - encoded_ctx_str_2 = encoded_text + " ".join(char for char in ctx_str_2) + encoded_text + " ".join(char for char in ctx_str_2) expected = f"""shared |User {ctx_str_1 + " " + encoded_ctx_str_1} \n|action {str1 + " " + encoded_str1} \n|action {str2 + " " + encoded_str2} \n|action {str3 + " " + encoded_str3} """ @@ -261,7 +261,7 @@ def test_explicitly_no_scorer(): # chain llm used for both basic prompt and for scoring assert response["response"] == "hey" selection_metadata = response["selection_metadata"] - assert selection_metadata.selected.score == None + assert selection_metadata.selected.score is None @pytest.mark.requires("vowpal_wabbit_next") diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py index 501700a69a..2eda4b4488 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py @@ -235,12 +235,12 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb(): str2 = "1" str3 = "2" encoded_str1 = encoded_text + " ".join(char for char in str1) - encoded_str2 = encoded_text + " ".join(char for char in str2) + encoded_text + " ".join(char for char in str2) encoded_str3 = encoded_text + " ".join(char for char in str3) ctx_str_1 = "context1" ctx_str_2 = "context2" - encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) + encoded_text + " ".join(char for char in ctx_str_1) encoded_ctx_str_2 = encoded_text + " ".join(char for char in ctx_str_2) named_actions = { @@ -269,12 +269,12 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_embed_and_ str2 = "1" str3 = "2" encoded_str1 = encoded_text + " ".join(char for char in str1) - encoded_str2 = encoded_text + " ".join(char for char in str2) + encoded_text + " ".join(char for char in str2) encoded_str3 = encoded_text + " ".join(char for char in str3) ctx_str_1 = "context1" ctx_str_2 = "context2" - encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) + encoded_text + " ".join(char for char in ctx_str_1) encoded_ctx_str_2 = encoded_text + " ".join(char for char in ctx_str_2) named_actions = { From e276ae26160693dd6a1ce14b353453c7e9938a2b Mon Sep 17 00:00:00 2001 From: olgavrou Date: Fri, 18 Aug 2023 07:12:39 -0400 Subject: [PATCH 09/65] linting and formatting --- .../rl_chain/test_pick_best_chain_call.py | 6 +++--- .../rl_chain/test_pick_best_text_embedder.py | 20 +++++++++---------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py index 1d8045e7c9..e42818ea8c 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py @@ -159,7 +159,7 @@ def test_default_embeddings(): encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) encoded_text + " ".join(char for char in ctx_str_2) - expected = f"""shared |User {ctx_str_1 + " " + encoded_ctx_str_1} \n|action {str1 + " " + encoded_str1} \n|action {str2 + " " + encoded_str2} \n|action {str3 + " " + encoded_str3} """ + expected = f"""shared |User {ctx_str_1 + " " + encoded_ctx_str_1} \n|action {str1 + " " + encoded_str1} \n|action {str2 + " " + encoded_str2} \n|action {str3 + " " + encoded_str3} """ # noqa actions = [str1, str2, str3] @@ -185,7 +185,7 @@ def test_default_embeddings_off(): str3 = "2" ctx_str_1 = "context1" - expected = f"""shared |User {ctx_str_1} \n|action {str1} \n|action {str2} \n|action {str3} """ + expected = f"""shared |User {ctx_str_1} \n|action {str1} \n|action {str2} \n|action {str3} """ # noqa actions = [str1, str2, str3] @@ -219,7 +219,7 @@ def test_default_embeddings_mixed_w_explicit_user_embeddings(): encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) encoded_ctx_str_2 = encoded_text + " ".join(char for char in ctx_str_2) - expected = f"""shared |User {encoded_ctx_str_1} |User2 {ctx_str_2 + " " + encoded_ctx_str_2} \n|action {str1 + " " + encoded_str1} \n|action {str2 + " " + encoded_str2} \n|action {encoded_str3} """ + expected = f"""shared |User {encoded_ctx_str_1} |User2 {ctx_str_2 + " " + encoded_ctx_str_2} \n|action {str1 + " " + encoded_str1} \n|action {str2 + " " + encoded_str2} \n|action {encoded_str3} """ # noqa actions = [str1, str2, rl_chain.Embed(str3)] diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py index 2eda4b4488..d8ea85c6eb 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py @@ -89,7 +89,7 @@ def test_pickbest_textembedder_w_full_label_w_emb(): named_actions = {"action1": rl_chain.Embed([str1, str2, str3])} context = {"context": rl_chain.Embed(ctx_str_1)} - expected = f"""shared |context {encoded_ctx_str_1} \n0:-0.0:1.0 |action1 {encoded_str1} \n|action1 {encoded_str2} \n|action1 {encoded_str3} """ + expected = f"""shared |context {encoded_ctx_str_1} \n0:-0.0:1.0 |action1 {encoded_str1} \n|action1 {encoded_str2} \n|action1 {encoded_str3} """ # noqa: E501 selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context, selected=selected @@ -113,7 +113,7 @@ def test_pickbest_textembedder_w_full_label_w_embed_and_keep(): named_actions = {"action1": rl_chain.EmbedAndKeep([str1, str2, str3])} context = {"context": rl_chain.EmbedAndKeep(ctx_str_1)} - expected = f"""shared |context {ctx_str_1 + " " + encoded_ctx_str_1} \n0:-0.0:1.0 |action1 {str1 + " " + encoded_str1} \n|action1 {str2 + " " + encoded_str2} \n|action1 {str3 + " " + encoded_str3} """ + expected = f"""shared |context {ctx_str_1 + " " + encoded_ctx_str_1} \n0:-0.0:1.0 |action1 {str1 + " " + encoded_str1} \n|action1 {str2 + " " + encoded_str2} \n|action1 {str3 + " " + encoded_str3} """ # noqa: E501 selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context, selected=selected @@ -127,7 +127,7 @@ def test_pickbest_textembedder_more_namespaces_no_label_no_emb(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} - expected = """shared |context1 context1 |context2 context2 \n|a 0 |b 0 \n|action1 1 \n|action1 2 """ + expected = """shared |context1 context1 |context2 context2 \n|a 0 |b 0 \n|action1 1 \n|action1 2 """ # noqa: E501 event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context ) @@ -140,7 +140,7 @@ def test_pickbest_textembedder_more_namespaces_w_label_no_emb(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} - expected = """shared |context1 context1 |context2 context2 \n|a 0 |b 0 \n|action1 1 \n|action1 2 """ + expected = """shared |context1 context1 |context2 context2 \n|a 0 |b 0 \n|action1 1 \n|action1 2 """ # noqa: E501 selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0) event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context, selected=selected @@ -154,7 +154,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_no_emb(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} - expected = """shared |context1 context1 |context2 context2 \n0:-0.0:1.0 |a 0 |b 0 \n|action1 1 \n|action1 2 """ + expected = """shared |context1 context1 |context2 context2 \n0:-0.0:1.0 |a 0 |b 0 \n|action1 1 \n|action1 2 """ # noqa: E501 selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context, selected=selected @@ -184,7 +184,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb(): "context1": rl_chain.Embed(ctx_str_1), "context2": rl_chain.Embed(ctx_str_2), } - expected = f"""shared |context1 {encoded_ctx_str_1} |context2 {encoded_ctx_str_2} \n0:-0.0:1.0 |a {encoded_str1} |b {encoded_str1} \n|action1 {encoded_str2} \n|action1 {encoded_str3} """ + expected = f"""shared |context1 {encoded_ctx_str_1} |context2 {encoded_ctx_str_2} \n0:-0.0:1.0 |a {encoded_str1} |b {encoded_str1} \n|action1 {encoded_str2} \n|action1 {encoded_str3} """ # noqa: E501 selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) event = pick_best_chain.PickBest.Event( @@ -217,7 +217,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_kee "context1": rl_chain.EmbedAndKeep(ctx_str_1), "context2": rl_chain.EmbedAndKeep(ctx_str_2), } - expected = f"""shared |context1 {ctx_str_1 + " " + encoded_ctx_str_1} |context2 {ctx_str_2 + " " + encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1 + " " + encoded_str1} |b {str1 + " " + encoded_str1} \n|action1 {str2 + " " + encoded_str2} \n|action1 {str3 + " " + encoded_str3} """ + expected = f"""shared |context1 {ctx_str_1 + " " + encoded_ctx_str_1} |context2 {ctx_str_2 + " " + encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1 + " " + encoded_str1} |b {str1 + " " + encoded_str1} \n|action1 {str2 + " " + encoded_str2} \n|action1 {str3 + " " + encoded_str3} """ # noqa: E501 selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) event = pick_best_chain.PickBest.Event( @@ -251,7 +251,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb(): ] } context = {"context1": ctx_str_1, "context2": rl_chain.Embed(ctx_str_2)} - expected = f"""shared |context1 {ctx_str_1} |context2 {encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1} |b {encoded_str1} \n|action1 {str2} \n|action1 {encoded_str3} """ + expected = f"""shared |context1 {ctx_str_1} |context2 {encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1} |b {encoded_str1} \n|action1 {str2} \n|action1 {encoded_str3} """ # noqa: E501 selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) event = pick_best_chain.PickBest.Event( @@ -288,7 +288,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_embed_and_ "context1": ctx_str_1, "context2": rl_chain.EmbedAndKeep(ctx_str_2), } - expected = f"""shared |context1 {ctx_str_1} |context2 {ctx_str_2 + " " + encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1} |b {str1 + " " + encoded_str1} \n|action1 {str2} \n|action1 {str3 + " " + encoded_str3} """ + expected = f"""shared |context1 {ctx_str_1} |context2 {ctx_str_2 + " " + encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1} |b {str1 + " " + encoded_str1} \n|action1 {str2} \n|action1 {str3 + " " + encoded_str3} """ # noqa: E501 selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) event = pick_best_chain.PickBest.Event( @@ -334,7 +334,7 @@ def test_raw_features_underscored(): # Embeddings and raw features named_actions = {"action": rl_chain.EmbedAndKeep([str1])} context = {"context": rl_chain.EmbedAndKeep(ctx_str)} - expected_embed_and_keep = f"""shared |context {ctx_str_underscored + " " + encoded_ctx_str} \n|action {str1_underscored + " " + encoded_str1} """ + expected_embed_and_keep = f"""shared |context {ctx_str_underscored + " " + encoded_ctx_str} \n|action {str1_underscored + " " + encoded_str1} """ # noqa: E501 event = pick_best_chain.PickBest.Event( inputs={}, to_select_from=named_actions, based_on=context ) From 44badd07077879d092fdd53ae29fc8fc8f418756 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Fri, 18 Aug 2023 07:19:56 -0400 Subject: [PATCH 10/65] add dependency requirements to test file --- .../langchain/chains/rl_chain/base.py | 2 +- .../rl_chain/test_pick_best_chain_call.py | 30 +++++++++---------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/libs/langchain/langchain/chains/rl_chain/base.py b/libs/langchain/langchain/chains/rl_chain/base.py index 437053f2dc..28baf898d2 100644 --- a/libs/langchain/langchain/chains/rl_chain/base.py +++ b/libs/langchain/langchain/chains/rl_chain/base.py @@ -471,7 +471,7 @@ class RLChain(Chain): def save_progress(self) -> None: """ - This function should be called to save the state of the Vowpal Wabbit model. + This function should be called to save the state of the learned policy model. """ self.policy.save() diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py index e42818ea8c..3fad1667d9 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py @@ -9,7 +9,7 @@ from langchain.prompts.prompt import PromptTemplate encoded_text = "[ e n c o d e d ] " -@pytest.mark.requires("vowpal_wabbit_next") +@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def setup(): _PROMPT_TEMPLATE = """This is a dummy prompt that will be ignored by the fake llm""" PROMPT = PromptTemplate(input_variables=[], template=_PROMPT_TEMPLATE) @@ -18,7 +18,7 @@ def setup(): return llm, PROMPT -@pytest.mark.requires("vowpal_wabbit_next") +@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_multiple_ToSelectFrom_throws(): llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) @@ -31,7 +31,7 @@ def test_multiple_ToSelectFrom_throws(): ) -@pytest.mark.requires("vowpal_wabbit_next") +@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_missing_basedOn_from_throws(): llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) @@ -40,7 +40,7 @@ def test_missing_basedOn_from_throws(): chain.run(action=rl_chain.ToSelectFrom(actions)) -@pytest.mark.requires("vowpal_wabbit_next") +@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_ToSelectFrom_not_a_list_throws(): llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) @@ -52,7 +52,7 @@ def test_ToSelectFrom_not_a_list_throws(): ) -@pytest.mark.requires("vowpal_wabbit_next") +@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_update_with_delayed_score_with_auto_validator_throws(): llm, PROMPT = setup() # this LLM returns a number so that the auto validator will return that @@ -74,7 +74,7 @@ def test_update_with_delayed_score_with_auto_validator_throws(): chain.update_with_delayed_score(event=selection_metadata, score=100) -@pytest.mark.requires("vowpal_wabbit_next") +@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_update_with_delayed_score_force(): llm, PROMPT = setup() # this LLM returns a number so that the auto validator will return that @@ -98,7 +98,7 @@ def test_update_with_delayed_score_force(): assert selection_metadata.selected.score == 100.0 -@pytest.mark.requires("vowpal_wabbit_next") +@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_update_with_delayed_score(): llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm( @@ -116,7 +116,7 @@ def test_update_with_delayed_score(): assert selection_metadata.selected.score == 100.0 -@pytest.mark.requires("vowpal_wabbit_next") +@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_user_defined_scorer(): llm, PROMPT = setup() @@ -138,7 +138,7 @@ def test_user_defined_scorer(): assert selection_metadata.selected.score == 200.0 -@pytest.mark.requires("vowpal_wabbit_next") +@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_default_embeddings(): llm, PROMPT = setup() feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) @@ -172,7 +172,7 @@ def test_default_embeddings(): assert vw_str == expected -@pytest.mark.requires("vowpal_wabbit_next") +@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_default_embeddings_off(): llm, PROMPT = setup() feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) @@ -198,7 +198,7 @@ def test_default_embeddings_off(): assert vw_str == expected -@pytest.mark.requires("vowpal_wabbit_next") +@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_default_embeddings_mixed_w_explicit_user_embeddings(): llm, PROMPT = setup() feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) @@ -233,7 +233,7 @@ def test_default_embeddings_mixed_w_explicit_user_embeddings(): assert vw_str == expected -@pytest.mark.requires("vowpal_wabbit_next") +@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_default_no_scorer_specified(): _, PROMPT = setup() chain_llm = FakeListChatModel(responses=[100]) @@ -248,7 +248,7 @@ def test_default_no_scorer_specified(): assert selection_metadata.selected.score == 100.0 -@pytest.mark.requires("vowpal_wabbit_next") +@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_explicitly_no_scorer(): llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm( @@ -264,7 +264,7 @@ def test_explicitly_no_scorer(): assert selection_metadata.selected.score is None -@pytest.mark.requires("vowpal_wabbit_next") +@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_auto_scorer_with_user_defined_llm(): llm, PROMPT = setup() scorer_llm = FakeListChatModel(responses=[300]) @@ -283,7 +283,7 @@ def test_auto_scorer_with_user_defined_llm(): assert selection_metadata.selected.score == 300.0 -@pytest.mark.requires("vowpal_wabbit_next") +@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_calling_chain_w_reserved_inputs_throws(): llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) From c9e9c0eeae8c80dcf5babeca8b9223c8c0079810 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Fri, 18 Aug 2023 07:56:20 -0400 Subject: [PATCH 11/65] add sentence transformers to extended test deps --- libs/langchain/poetry.lock | 4 ++-- libs/langchain/pyproject.toml | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/libs/langchain/poetry.lock b/libs/langchain/poetry.lock index 84d8763646..cff786972e 100644 --- a/libs/langchain/poetry.lock +++ b/libs/langchain/poetry.lock @@ -10073,7 +10073,7 @@ clarifai = ["clarifai"] cohere = ["cohere"] docarray = ["docarray"] embeddings = ["sentence-transformers"] -extended-testing = ["amazon-textract-caller", "atlassian-python-api", "beautifulsoup4", "bibtexparser", "cassio", "chardet", "esprima", "faiss-cpu", "feedparser", "geopandas", "gitpython", "gql", "html2text", "jinja2", "jq", "lxml", "mwparserfromhell", "mwxml", "newspaper3k", "openai", "openai", "openapi-schema-pydantic", "pandas", "pdfminer-six", "pgvector", "psychicapi", "py-trello", "pymupdf", "pypdf", "pypdfium2", "pyspark", "rank-bm25", "rapidfuzz", "requests-toolbelt", "scikit-learn", "streamlit", "sympy", "telethon", "tqdm", "vowpal-wabbit-next", "xata", "xmltodict"] +extended-testing = ["amazon-textract-caller", "atlassian-python-api", "beautifulsoup4", "bibtexparser", "cassio", "chardet", "esprima", "faiss-cpu", "feedparser", "geopandas", "gitpython", "gql", "html2text", "jinja2", "jq", "lxml", "mwparserfromhell", "mwxml", "newspaper3k", "openai", "openai", "openapi-schema-pydantic", "pandas", "pdfminer-six", "pgvector", "psychicapi", "py-trello", "pymupdf", "pypdf", "pypdfium2", "pyspark", "rank-bm25", "rapidfuzz", "requests-toolbelt", "scikit-learn", "sentence-transformers", "streamlit", "sympy", "telethon", "tqdm", "vowpal-wabbit-next", "xata", "xmltodict"] javascript = ["esprima"] llms = ["clarifai", "cohere", "huggingface_hub", "manifest-ml", "nlpcloud", "openai", "openlm", "torch", "transformers"] openai = ["openai", "tiktoken"] @@ -10083,4 +10083,4 @@ text-helpers = ["chardet"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "eba7c01296c1948ab432ca2bc70e274e79a4135d66b4c189d6bb95b5e0c41198" +content-hash = "706edba1e67116864e3245fe5851902ce987283b4f143fd9e15f094ecad87deb" diff --git a/libs/langchain/pyproject.toml b/libs/langchain/pyproject.toml index 3d48a55968..f60ef26af9 100644 --- a/libs/langchain/pyproject.toml +++ b/libs/langchain/pyproject.toml @@ -338,7 +338,8 @@ extended_testing = [ "xmltodict", "faiss-cpu", "openapi-schema-pydantic", - "vowpal-wabbit-next" + "vowpal-wabbit-next", + "sentence-transformers" ] [tool.ruff] From 7725192a0da202c3a59ef986dd436190b5892f08 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 28 Aug 2023 04:58:55 -0400 Subject: [PATCH 12/65] update deps for vw --- libs/langchain/poetry.lock | 32 ++++++++++++++++++++++++++++++-- libs/langchain/pyproject.toml | 2 +- 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/libs/langchain/poetry.lock b/libs/langchain/poetry.lock index e21acf426b..c003e52f28 100644 --- a/libs/langchain/poetry.lock +++ b/libs/langchain/poetry.lock @@ -3542,6 +3542,7 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" files = [ {file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"}, + {file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"}, ] [[package]] @@ -9765,6 +9766,33 @@ urllib3 = {version = "<2", markers = "python_version < \"3.10\""} wrapt = "*" yarl = "*" +[[package]] +name = "vowpal-wabbit-next" +version = "0.6.0" +description = "Experimental python bindings for VowpalWabbit" +optional = true +python-versions = ">=3.7" +files = [ + {file = "vowpal-wabbit-next-0.6.0.tar.gz", hash = "sha256:f0381614d99fac6a0f52e995ee0bfc7b681054f397bea7ff08b8a523d5315a54"}, + {file = "vowpal_wabbit_next-0.6.0-cp310-cp310-macosx_10_13_universal2.whl", hash = "sha256:cfbb831cfe9eb81185aff7cdca437ae17c6d9aca8d74e26c326e3ef4ee8e81e7"}, + {file = "vowpal_wabbit_next-0.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d31829778f9c600f5c121f614516ca1bc9ede5d1bc77b1eb3b59b32d9138db9"}, + {file = "vowpal_wabbit_next-0.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:714347606ab302a2f72870b6ae6dce58de4bec1b489f4bd65d80a8e326e1db8a"}, + {file = "vowpal_wabbit_next-0.6.0-cp311-cp311-macosx_10_13_universal2.whl", hash = "sha256:3a8482d5c0b9357fdb36b62d659e6b74e93aeab165b910292572a98e91d7a014"}, + {file = "vowpal_wabbit_next-0.6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e4349099b938102f51fb6fedf035bc1deacb2971cd2a48641ca7d45186efda0"}, + {file = "vowpal_wabbit_next-0.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:c8f58cdc49f270b1bed6f0fdd7520c8ba1b328de5cd8a2760c0ec70a630de92e"}, + {file = "vowpal_wabbit_next-0.6.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8b7052ce7212fd1cae8ffd966e240c814f3c1df08fd612437d48f0f23e7694c"}, + {file = "vowpal_wabbit_next-0.6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d24d9c380d0e9b41151337c7f9e2a33ec5bfd738fdee9f65c1a40e486234aca3"}, + {file = "vowpal_wabbit_next-0.6.0-cp38-cp38-macosx_10_13_universal2.whl", hash = "sha256:0d77a8c55249ec9a7f404939ecc6948db0527e522e8a7ae149ec7cd29b3ade04"}, + {file = "vowpal_wabbit_next-0.6.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa2f52f1267fbc26c7757335f9c76a0f00b112971e04c85b8a9bc9e82300597"}, + {file = "vowpal_wabbit_next-0.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d04f91200ecae73196d9f5601853d63afce8c1c8a0d310a608e8ddfa3b190cb"}, + {file = "vowpal_wabbit_next-0.6.0-cp39-cp39-macosx_10_13_universal2.whl", hash = "sha256:2df4a652729c0db34afd8fb4fc49b0090d6f061e2d49899e5f092fd4c3d23253"}, + {file = "vowpal_wabbit_next-0.6.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c289a260ab759f04903b441701cff66ea74d6c061d966caaba0c65ac12d05528"}, + {file = "vowpal_wabbit_next-0.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:8d022cab07274f227df159a81bccf034def7dd54ad70392ee98743ffa4953072"}, +] + +[package.dependencies] +numpy = "*" + [[package]] name = "watchdog" version = "3.0.0" @@ -10437,7 +10465,7 @@ clarifai = ["clarifai"] cohere = ["cohere"] docarray = ["docarray"] embeddings = ["sentence-transformers"] -extended-testing = ["amazon-textract-caller", "assemblyai", "atlassian-python-api", "beautifulsoup4", "bibtexparser", "cassio", "chardet", "esprima", "faiss-cpu", "feedparser", "geopandas", "gitpython", "gql", "html2text", "jinja2", "jq", "lxml", "markdownify", "mwparserfromhell", "mwxml", "newspaper3k", "openai", "openai", "openapi-schema-pydantic", "pandas", "pdfminer-six", "pgvector", "psychicapi", "py-trello", "pymupdf", "pypdf", "pypdfium2", "pyspark", "rank-bm25", "rapidfuzz", "requests-toolbelt", "scikit-learn", "streamlit", "sympy", "telethon", "tqdm", "xata", "xmltodict"] +extended-testing = ["amazon-textract-caller", "assemblyai", "atlassian-python-api", "beautifulsoup4", "bibtexparser", "cassio", "chardet", "esprima", "faiss-cpu", "feedparser", "geopandas", "gitpython", "gql", "html2text", "jinja2", "jq", "lxml", "markdownify", "mwparserfromhell", "mwxml", "newspaper3k", "openai", "openai", "openapi-schema-pydantic", "pandas", "pdfminer-six", "pgvector", "psychicapi", "py-trello", "pymupdf", "pypdf", "pypdfium2", "pyspark", "rank-bm25", "rapidfuzz", "requests-toolbelt", "scikit-learn", "sentence-transformers", "streamlit", "sympy", "telethon", "tqdm", "vowpal-wabbit-next", "xata", "xmltodict"] javascript = ["esprima"] llms = ["clarifai", "cohere", "huggingface_hub", "manifest-ml", "nlpcloud", "openai", "openlm", "torch", "transformers"] openai = ["openai", "tiktoken"] @@ -10447,4 +10475,4 @@ text-helpers = ["chardet"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "43a6bd42efc0baf917418087f788aaf3b1bc793cb4aa81de99c52ed6a7d54d26" +content-hash = "54927d0a4c82ca2038d109f0415f56b0d4b1dd6dbdc35a4eab2a2c6cd0a4c443" diff --git a/libs/langchain/pyproject.toml b/libs/langchain/pyproject.toml index 1e15dcd7c3..c05ad28773 100644 --- a/libs/langchain/pyproject.toml +++ b/libs/langchain/pyproject.toml @@ -342,7 +342,7 @@ extended_testing = [ "faiss-cpu", "openapi-schema-pydantic", "vowpal-wabbit-next", - "sentence-transformers" + "sentence-transformers", "markdownify", ] From 6a1102d4c094b48afe43818a079f5758dc186a98 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 28 Aug 2023 06:58:33 -0400 Subject: [PATCH 13/65] mypy fixes and formatting --- .../langchain/chains/rl_chain/base.py | 148 ++++++++++-------- .../langchain/chains/rl_chain/metrics.py | 12 +- .../chains/rl_chain/model_repository.py | 6 +- .../chains/rl_chain/pick_best_chain.py | 22 +-- .../langchain/chains/rl_chain/vw_logger.py | 4 +- .../unit_tests/chains/rl_chain/test_utils.py | 2 +- 6 files changed, 107 insertions(+), 87 deletions(-) diff --git a/libs/langchain/langchain/chains/rl_chain/base.py b/libs/langchain/langchain/chains/rl_chain/base.py index 28baf898d2..d97dd255af 100644 --- a/libs/langchain/langchain/chains/rl_chain/base.py +++ b/libs/langchain/langchain/chains/rl_chain/base.py @@ -3,7 +3,18 @@ from __future__ import annotations import logging import os from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Generic, + List, + Optional, + Tuple, + Type, + TypeVar, + Union, +) from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain @@ -26,47 +37,47 @@ logger = logging.getLogger(__name__) class _BasedOn: - def __init__(self, value): + def __init__(self, value: Any): self.value = value - def __str__(self): + def __str__(self) -> str: return str(self.value) __repr__ = __str__ -def BasedOn(anything): +def BasedOn(anything: Any) -> _BasedOn: return _BasedOn(anything) class _ToSelectFrom: - def __init__(self, value): + def __init__(self, value: Any): self.value = value - def __str__(self): + def __str__(self) -> str: return str(self.value) __repr__ = __str__ -def ToSelectFrom(anything): +def ToSelectFrom(anything: Any) -> _ToSelectFrom: if not isinstance(anything, list): raise ValueError("ToSelectFrom must be a list to select from") return _ToSelectFrom(anything) class _Embed: - def __init__(self, value, keep=False): + def __init__(self, value: Any, keep: bool = False): self.value = value self.keep = keep - def __str__(self): + def __str__(self) -> str: return str(self.value) __repr__ = __str__ -def Embed(anything, keep=False): +def Embed(anything: Any, keep: bool = False) -> Any: if isinstance(anything, _ToSelectFrom): return ToSelectFrom(Embed(anything.value, keep=keep)) elif isinstance(anything, _BasedOn): @@ -80,7 +91,7 @@ def Embed(anything, keep=False): return _Embed(anything, keep=keep) -def EmbedAndKeep(anything): +def EmbedAndKeep(anything: Any) -> Any: return Embed(anything, keep=True) @@ -91,7 +102,7 @@ def parse_lines(parser: "vw.TextFormatParser", input_str: str) -> List["vw.Examp return [parser.parse_line(line) for line in input_str.split("\n")] -def get_based_on_and_to_select_from(inputs: Dict[str, Any]): +def get_based_on_and_to_select_from(inputs: Dict[str, Any]) -> Tuple[Dict, Dict]: to_select_from = { k: inputs[k].value for k in inputs.keys() @@ -113,7 +124,7 @@ def get_based_on_and_to_select_from(inputs: Dict[str, Any]): return based_on, to_select_from -def prepare_inputs_for_autoembed(inputs: Dict[str, Any]): +def prepare_inputs_for_autoembed(inputs: Dict[str, Any]) -> Dict[str, Any]: """ go over all the inputs and if something is either wrapped in _ToSelectFrom or _BasedOn, and if their inner values are not already _Embed, then wrap them in EmbedAndKeep while retaining their _ToSelectFrom or _BasedOn status @@ -134,29 +145,35 @@ class Selected(ABC): pass -class Event(ABC): +TSelected = TypeVar("TSelected", bound=Selected) + + +class Event(Generic[TSelected], ABC): inputs: Dict[str, Any] - selected: Optional[Selected] + selected: Optional[TSelected] - def __init__(self, inputs: Dict[str, Any], selected: Optional[Selected] = None): + def __init__(self, inputs: Dict[str, Any], selected: Optional[TSelected] = None): self.inputs = inputs self.selected = selected +TEvent = TypeVar("TEvent", bound=Event) + + class Policy(ABC): @abstractmethod - def predict(self, event: Event) -> Any: - pass + def predict(self, event: TEvent) -> Any: + ... @abstractmethod - def learn(self, event: Event): - pass + def learn(self, event: TEvent) -> None: + ... @abstractmethod - def log(self, event: Event): - pass + def log(self, event: TEvent) -> None: + ... - def save(self): + def save(self) -> None: pass @@ -164,11 +181,11 @@ class VwPolicy(Policy): def __init__( self, model_repo: ModelRepository, - vw_cmd: Sequence[str], + vw_cmd: List[str], feature_embedder: Embedder, vw_logger: VwLogger, - *args, - **kwargs, + *args: Any, + **kwargs: Any, ): super().__init__(*args, **kwargs) self.model_repo = model_repo @@ -176,7 +193,7 @@ class VwPolicy(Policy): self.feature_embedder = feature_embedder self.vw_logger = vw_logger - def predict(self, event: Event) -> Any: + def predict(self, event: TEvent) -> Any: import vowpal_wabbit_next as vw text_parser = vw.TextFormatParser(self.workspace) @@ -184,7 +201,7 @@ class VwPolicy(Policy): parse_lines(text_parser, self.feature_embedder.format(event)) ) - def learn(self, event: Event): + def learn(self, event: TEvent) -> None: import vowpal_wabbit_next as vw vw_ex = self.feature_embedder.format(event) @@ -192,19 +209,19 @@ class VwPolicy(Policy): multi_ex = parse_lines(text_parser, vw_ex) self.workspace.learn_one(multi_ex) - def log(self, event: Event): + def log(self, event: TEvent) -> None: if self.vw_logger.logging_enabled(): vw_ex = self.feature_embedder.format(event) self.vw_logger.log(vw_ex) - def save(self): - self.model_repo.save() + def save(self) -> None: + self.model_repo.save(self.workspace) -class Embedder(ABC): +class Embedder(Generic[TEvent], ABC): @abstractmethod - def format(self, event: Event) -> str: - pass + def format(self, event: TEvent) -> str: + ... class SelectionScorer(ABC, BaseModel): @@ -212,7 +229,7 @@ class SelectionScorer(ABC, BaseModel): @abstractmethod def score_response(self, inputs: Dict[str, Any], llm_response: str) -> float: - pass + ... class AutoSelectionScorer(SelectionScorer, BaseModel): @@ -243,7 +260,7 @@ class AutoSelectionScorer(SelectionScorer, BaseModel): return chat_prompt @root_validator(pre=True) - def set_prompt_and_llm_chain(cls, values): + def set_prompt_and_llm_chain(cls, values: Dict[str, Any]) -> Dict[str, Any]: llm = values.get("llm") prompt = values.get("prompt") scoring_criteria_template_str = values.get("scoring_criteria_template_str") @@ -275,7 +292,7 @@ class AutoSelectionScorer(SelectionScorer, BaseModel): ) -class RLChain(Chain): +class RLChain(Generic[TEvent], Chain): """ The `RLChain` class leverages the Vowpal Wabbit (VW) model as a learned policy for reinforcement learning. @@ -305,7 +322,7 @@ class RLChain(Chain): output_key: str = "result" #: :meta private: prompt: BasePromptTemplate selection_scorer: Union[SelectionScorer, None] - policy: Optional[Policy] + policy: Policy auto_embed: bool = True selected_input_key = "rl_chain_selected" selected_based_on_input_key = "rl_chain_selected_based_on" @@ -314,14 +331,14 @@ class RLChain(Chain): def __init__( self, feature_embedder: Embedder, - model_save_dir="./", - reset_model=False, - vw_cmd=None, - policy=VwPolicy, + model_save_dir: str = "./", + reset_model: bool = False, + vw_cmd: Optional[List[str]] = None, + policy: Type[Policy] = VwPolicy, vw_logs: Optional[Union[str, os.PathLike]] = None, - metrics_step=-1, - *args, - **kwargs, + metrics_step: int = -1, + *args: Any, + **kwargs: Any, ): super().__init__(*args, **kwargs) if self.selection_scorer is None: @@ -374,29 +391,29 @@ class RLChain(Chain): ) @abstractmethod - def _call_before_predict(self, inputs: Dict[str, Any]) -> Event: - pass + def _call_before_predict(self, inputs: Dict[str, Any]) -> TEvent: + ... @abstractmethod def _call_after_predict_before_llm( - self, inputs: Dict[str, Any], event: Event, prediction: Any - ) -> Tuple[Dict[str, Any], Event]: - pass + self, inputs: Dict[str, Any], event: TEvent, prediction: Any + ) -> Tuple[Dict[str, Any], TEvent]: + ... @abstractmethod def _call_after_llm_before_scoring( - self, llm_response: str, event: Event - ) -> Tuple[Dict[str, Any], Event]: - pass + self, llm_response: str, event: TEvent + ) -> Tuple[Dict[str, Any], TEvent]: + ... @abstractmethod def _call_after_scoring_before_learning( - self, event: Event, score: Optional[float] - ) -> Event: - pass + self, event: TEvent, score: Optional[float] + ) -> TEvent: + ... def update_with_delayed_score( - self, score: float, event: Event, force_score=False + self, score: float, event: TEvent, force_score: bool = False ) -> None: """ Updates the learned policy with the score provided. @@ -407,7 +424,8 @@ class RLChain(Chain): "The selection scorer is set, and force_score was not set to True. \ Please set force_score=True to use this function." ) - self.metrics.on_feedback(score) + if self.metrics: + self.metrics.on_feedback(score) self._call_after_scoring_before_learning(event=event, score=score) self.policy.learn(event=event) self.policy.log(event=event) @@ -422,15 +440,16 @@ class RLChain(Chain): self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, str]: + ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() if self.auto_embed: inputs = prepare_inputs_for_autoembed(inputs=inputs) - event = self._call_before_predict(inputs=inputs) + event: TEvent = self._call_before_predict(inputs=inputs) prediction = self.policy.predict(event=event) - self.metrics.on_decision() + if self.metrics: + self.metrics.on_decision() next_chain_inputs, event = self._call_after_predict_before_llm( inputs=inputs, event=event, prediction=prediction @@ -462,7 +481,8 @@ class RLChain(Chain): f"The selection scorer was not able to score, \ and the chain was not able to adjust to this response, error: {e}" ) - self.metrics.on_feedback(score) + if self.metrics: + self.metrics.on_feedback(score) event = self._call_after_scoring_before_learning(score=score, event=event) self.policy.learn(event=event) self.policy.log(event=event) @@ -515,7 +535,7 @@ def embed_string_type( def embed_dict_type(item: Dict, model: Any) -> Dict[str, Union[str, List[str]]]: """Helper function to embed a dictionary item.""" - inner_dict = {} + inner_dict: Dict[str, Union[str, List[str]]] = {} for ns, embed_item in item.items(): if isinstance(embed_item, list): inner_dict[ns] = [] @@ -530,7 +550,7 @@ def embed_dict_type(item: Dict, model: Any) -> Dict[str, Union[str, List[str]]]: def embed_list_type( item: list, model: Any, namespace: Optional[str] = None ) -> List[Dict[str, Union[str, List[str]]]]: - ret_list = [] + ret_list: List[Dict[str, Union[str, List[str]]]] = [] for embed_item in item: if isinstance(embed_item, dict): ret_list.append(embed_dict_type(embed_item, model)) diff --git a/libs/langchain/langchain/chains/rl_chain/metrics.py b/libs/langchain/langchain/chains/rl_chain/metrics.py index b7ec949c9e..4d6306f776 100644 --- a/libs/langchain/langchain/chains/rl_chain/metrics.py +++ b/libs/langchain/langchain/chains/rl_chain/metrics.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Dict, List, Optional, Union if TYPE_CHECKING: import pandas as pd @@ -6,11 +6,11 @@ if TYPE_CHECKING: class MetricsTracker: def __init__(self, step: int): - self._history = [] - self._step = step - self._i = 0 - self._num = 0 - self._denom = 0 + self._history: List[Dict[str, Union[int, float]]] = [] + self._step: int = step + self._i: int = 0 + self._num: float = 0 + self._denom: float = 0 @property def score(self) -> float: diff --git a/libs/langchain/langchain/chains/rl_chain/model_repository.py b/libs/langchain/langchain/chains/rl_chain/model_repository.py index eea866d1cf..87f162df0a 100644 --- a/libs/langchain/langchain/chains/rl_chain/model_repository.py +++ b/libs/langchain/langchain/chains/rl_chain/model_repository.py @@ -4,7 +4,7 @@ import logging import os import shutil from pathlib import Path -from typing import TYPE_CHECKING, Sequence, Union +from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: import vowpal_wabbit_next as vw @@ -22,7 +22,7 @@ class ModelRepository: self.folder = Path(folder) self.model_path = self.folder / "latest.vw" self.with_history = with_history - if reset and self.has_history: + if reset and self.has_history(): logger.warning( "There is non empty history which is recommended to be cleaned up" ) @@ -44,7 +44,7 @@ class ModelRepository: if self.with_history: # write history shutil.copyfile(self.model_path, self.folder / f"model-{self.get_tag()}.vw") - def load(self, commandline: Sequence[str]) -> "vw.Workspace": + def load(self, commandline: List[str]) -> "vw.Workspace": import vowpal_wabbit_next as vw model_data = None diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py index 6e1a1a5eff..e60e685a0b 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py @@ -17,7 +17,7 @@ logger = logging.getLogger(__name__) SENTINEL = object() -class PickBestFeatureEmbedder(base.Embedder): +class PickBestFeatureEmbedder(base.Embedder[PickBest.Event]): """ Text Embedder class that embeds the `BasedOn` and `ToSelectFrom` inputs into a format that can be used by the learning policy @@ -25,7 +25,7 @@ class PickBestFeatureEmbedder(base.Embedder): model name (Any, optional): The type of embeddings to be used for feature representation. Defaults to BERT SentenceTransformer. """ # noqa E501 - def __init__(self, model: Optional[Any] = None, *args, **kwargs): + def __init__(self, model: Optional[Any] = None, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) if model is None: @@ -88,7 +88,7 @@ class PickBestFeatureEmbedder(base.Embedder): return example_string[:-1] -class PickBest(base.RLChain): +class PickBest(base.RLChain[PickBest.Event]): """ `PickBest` is a class designed to leverage the Vowpal Wabbit (VW) model for reinforcement learning with a context, with the goal of modifying the prompt before the LLM call. @@ -131,7 +131,7 @@ class PickBest(base.RLChain): self.probability = probability self.score = score - class Event(base.Event): + class Event(base.Event[PickBest.Selected]): def __init__( self, inputs: Dict[str, Any], @@ -146,8 +146,8 @@ class PickBest(base.RLChain): def __init__( self, feature_embedder: Optional[PickBestFeatureEmbedder] = None, - *args, - **kwargs, + *args: Any, + **kwargs: Any, ): vw_cmd = kwargs.get("vw_cmd", []) if not vw_cmd: @@ -170,7 +170,7 @@ class PickBest(base.RLChain): super().__init__(feature_embedder=feature_embedder, *args, **kwargs) - def _call_before_predict(self, inputs: Dict[str, Any]) -> PickBest.Event: + def _call_before_predict(self, inputs: Dict[str, Any]) -> Event: context, actions = base.get_based_on_and_to_select_from(inputs=inputs) if not actions: raise ValueError( @@ -198,7 +198,7 @@ class PickBest(base.RLChain): def _call_after_predict_before_llm( self, inputs: Dict[str, Any], event: Event, prediction: List[Tuple[int, float]] - ) -> Tuple[Dict[str, Any], PickBest.Event]: + ) -> Tuple[Dict[str, Any], Event]: import numpy as np prob_sum = sum(prob for _, prob in prediction) @@ -218,8 +218,8 @@ class PickBest(base.RLChain): return next_chain_inputs, event def _call_after_llm_before_scoring( - self, llm_response: str, event: PickBest.Event - ) -> Tuple[Dict[str, Any], PickBest.Event]: + self, llm_response: str, event: Event + ) -> Tuple[Dict[str, Any], Event]: next_chain_inputs = event.inputs.copy() # only one key, value pair in event.to_select_from value = next(iter(event.to_select_from.values())) @@ -232,7 +232,7 @@ class PickBest(base.RLChain): return next_chain_inputs, event def _call_after_scoring_before_learning( - self, event: PickBest.Event, score: Optional[float] + self, event: Event, score: Optional[float] ) -> Event: event.selected.score = score return event diff --git a/libs/langchain/langchain/chains/rl_chain/vw_logger.py b/libs/langchain/langchain/chains/rl_chain/vw_logger.py index 4fa4717539..e8d2e1541f 100644 --- a/libs/langchain/langchain/chains/rl_chain/vw_logger.py +++ b/libs/langchain/langchain/chains/rl_chain/vw_logger.py @@ -9,10 +9,10 @@ class VwLogger: if self.path: self.path.parent.mkdir(parents=True, exist_ok=True) - def log(self, vw_ex: str): + def log(self, vw_ex: str) -> None: if self.path: with open(self.path, "a") as f: f.write(f"{vw_ex}\n\n") - def logging_enabled(self): + def logging_enabled(self) -> bool: return bool(self.path) diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_utils.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_utils.py index 6d54d20d92..625c37ee00 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_utils.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_utils.py @@ -1,3 +1,3 @@ class MockEncoder: - def encode(self, to_encode): + def encode(self, to_encode: str) -> str: return "[encoded]" + to_encode From dd6fff1c6209f6b05cfac0f343654947d6bd9e2e Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 28 Aug 2023 08:13:23 -0400 Subject: [PATCH 14/65] no errors in pick best chain --- .../langchain/chains/rl_chain/__init__.py | 2 +- .../chains/rl_chain/pick_best_chain.py | 47 ++++++++----------- 2 files changed, 21 insertions(+), 28 deletions(-) diff --git a/libs/langchain/langchain/chains/rl_chain/__init__.py b/libs/langchain/langchain/chains/rl_chain/__init__.py index e71de1da6c..6d5cfc3e29 100644 --- a/libs/langchain/langchain/chains/rl_chain/__init__.py +++ b/libs/langchain/langchain/chains/rl_chain/__init__.py @@ -13,7 +13,7 @@ from langchain.chains.rl_chain.base import ( from langchain.chains.rl_chain.pick_best_chain import PickBest -def configure_logger(): +def configure_logger() -> None: logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) ch = logging.StreamHandler() diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py index e60e685a0b..ca92052268 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py @@ -1,7 +1,7 @@ from __future__ import annotations import logging -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple, Type, Union import langchain.chains.rl_chain.base as base from langchain.base_language import BaseLanguageModel @@ -145,7 +145,6 @@ class PickBest(base.RLChain[PickBest.Event]): def __init__( self, - feature_embedder: Optional[PickBestFeatureEmbedder] = None, *args: Any, **kwargs: Any, ): @@ -163,12 +162,14 @@ class PickBest(base.RLChain[PickBest.Event]): raise ValueError( "If vw_cmd is specified, it must include --cb_explore_adf" ) - kwargs["vw_cmd"] = vw_cmd + + feature_embedder = kwargs.get("feature_embedder", None) if not feature_embedder: feature_embedder = PickBestFeatureEmbedder() + kwargs["feature_embedder"] = feature_embedder - super().__init__(feature_embedder=feature_embedder, *args, **kwargs) + super().__init__(*args, **kwargs) def _call_before_predict(self, inputs: Dict[str, Any]) -> Event: context, actions = base.get_based_on_and_to_select_from(inputs=inputs) @@ -223,10 +224,15 @@ class PickBest(base.RLChain[PickBest.Event]): next_chain_inputs = event.inputs.copy() # only one key, value pair in event.to_select_from value = next(iter(event.to_select_from.values())) + v = ( + value[event.selected.index] + if event.selected + else event.to_select_from.values() + ) next_chain_inputs.update( { self.selected_based_on_input_key: str(event.based_on), - self.selected_input_key: value[event.selected.index], + self.selected_input_key: v, } ) return next_chain_inputs, event @@ -234,7 +240,8 @@ class PickBest(base.RLChain[PickBest.Event]): def _call_after_scoring_before_learning( self, event: Event, score: Optional[float] ) -> Event: - event.selected.score = score + if event.selected: + event.selected.score = score return event def _call( @@ -248,33 +255,19 @@ class PickBest(base.RLChain[PickBest.Event]): def _chain_type(self) -> str: return "rl_chain_pick_best" - @classmethod - def from_chain( - cls, - llm_chain: Chain, - prompt: BasePromptTemplate, - selection_scorer=SENTINEL, - **kwargs: Any, - ): - if selection_scorer is SENTINEL: - selection_scorer = base.AutoSelectionScorer(llm=llm_chain.llm) - return PickBest( - llm_chain=llm_chain, - prompt=prompt, - selection_scorer=selection_scorer, - **kwargs, - ) - @classmethod def from_llm( - cls, + cls: Type[PickBest], llm: BaseLanguageModel, prompt: BasePromptTemplate, - selection_scorer=SENTINEL, + selection_scorer: Union[base.AutoSelectionScorer, object] = SENTINEL, **kwargs: Any, - ): + ) -> PickBest: llm_chain = LLMChain(llm=llm, prompt=prompt) - return PickBest.from_chain( + if selection_scorer is SENTINEL: + selection_scorer = base.AutoSelectionScorer(llm=llm_chain.llm) + + return PickBest( llm_chain=llm_chain, prompt=prompt, selection_scorer=selection_scorer, From a11ad11d063e8f5553bd25b8bd74e629e1e31dd6 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Tue, 29 Aug 2023 03:59:01 -0400 Subject: [PATCH 15/65] fix all mypy errors --- .../langchain/chains/rl_chain/base.py | 50 ++++++++++--------- .../chains/rl_chain/pick_best_chain.py | 9 +++- 2 files changed, 34 insertions(+), 25 deletions(-) diff --git a/libs/langchain/langchain/chains/rl_chain/base.py b/libs/langchain/langchain/chains/rl_chain/base.py index d97dd255af..22ff60a403 100644 --- a/libs/langchain/langchain/chains/rl_chain/base.py +++ b/libs/langchain/langchain/chains/rl_chain/base.py @@ -161,6 +161,9 @@ TEvent = TypeVar("TEvent", bound=Event) class Policy(ABC): + def __init__(self, **kwargs: Any): + pass + @abstractmethod def predict(self, event: TEvent) -> Any: ... @@ -233,7 +236,7 @@ class SelectionScorer(ABC, BaseModel): class AutoSelectionScorer(SelectionScorer, BaseModel): - llm_chain: Union[LLMChain, None] = None + llm_chain: LLMChain prompt: Union[BasePromptTemplate, None] = None scoring_criteria_template_str: Optional[str] = None @@ -309,7 +312,7 @@ class RLChain(Generic[TEvent], Chain): - model_save_dir (str, optional): Directory for saving the VW model. Default is the current directory. - reset_model (bool): If set to True, the model starts training from scratch. Default is False. - vw_cmd (List[str], optional): Command line arguments for the VW model. - - policy (VwPolicy): Policy used by the chain. + - policy (Type[VwPolicy]): Policy used by the chain. - vw_logs (Optional[Union[str, os.PathLike]]): Path for the VW logs. - metrics_step (int): Step for the metrics tracker. Default is -1. @@ -322,7 +325,7 @@ class RLChain(Generic[TEvent], Chain): output_key: str = "result" #: :meta private: prompt: BasePromptTemplate selection_scorer: Union[SelectionScorer, None] - policy: Policy + active_policy: Policy auto_embed: bool = True selected_input_key = "rl_chain_selected" selected_based_on_input_key = "rl_chain_selected_based_on" @@ -347,14 +350,17 @@ class RLChain(Generic[TEvent], Chain): reinforcement learning will be done in the RL chain \ unless update_with_delayed_score is called." ) - self.policy = policy( - model_repo=ModelRepository( - model_save_dir, with_history=True, reset=reset_model - ), - vw_cmd=vw_cmd or [], - feature_embedder=feature_embedder, - vw_logger=VwLogger(vw_logs), - ) + + if self.active_policy is None: + self.active_policy = policy( + model_repo=ModelRepository( + model_save_dir, with_history=True, reset=reset_model + ), + vw_cmd=vw_cmd or [], + feature_embedder=feature_embedder, + vw_logger=VwLogger(vw_logs), + ) + self.metrics = MetricsTracker(step=metrics_step) class Config: @@ -427,8 +433,8 @@ class RLChain(Generic[TEvent], Chain): if self.metrics: self.metrics.on_feedback(score) self._call_after_scoring_before_learning(event=event, score=score) - self.policy.learn(event=event) - self.policy.log(event=event) + self.active_policy.learn(event=event) + self.active_policy.log(event=event) def set_auto_embed(self, auto_embed: bool) -> None: """ @@ -447,7 +453,7 @@ class RLChain(Generic[TEvent], Chain): inputs = prepare_inputs_for_autoembed(inputs=inputs) event: TEvent = self._call_before_predict(inputs=inputs) - prediction = self.policy.predict(event=event) + prediction = self.active_policy.predict(event=event) if self.metrics: self.metrics.on_decision() @@ -484,8 +490,8 @@ class RLChain(Generic[TEvent], Chain): if self.metrics: self.metrics.on_feedback(score) event = self._call_after_scoring_before_learning(score=score, event=event) - self.policy.learn(event=event) - self.policy.log(event=event) + self.active_policy.learn(event=event) + self.active_policy.log(event=event) return {self.output_key: {"response": output, "selection_metadata": event}} @@ -493,7 +499,7 @@ class RLChain(Generic[TEvent], Chain): """ This function should be called to save the state of the learned policy model. """ - self.policy.save() + self.active_policy.save() @property def _chain_type(self) -> str: @@ -509,7 +515,7 @@ def is_stringtype_instance(item: Any) -> bool: def embed_string_type( item: Union[str, _Embed], model: Any, namespace: Optional[str] = None -) -> Dict[str, str]: +) -> Dict[str, Union[str, List[str]]]: """Helper function to embed a string or an _Embed object.""" join_char = "" keep_str = "" @@ -533,9 +539,9 @@ def embed_string_type( return {namespace: keep_str + join_char.join(map(str, encoded))} -def embed_dict_type(item: Dict, model: Any) -> Dict[str, Union[str, List[str]]]: +def embed_dict_type(item: Dict, model: Any) -> Dict[str, Any]: """Helper function to embed a dictionary item.""" - inner_dict: Dict[str, Union[str, List[str]]] = {} + inner_dict: Dict[str, Any] = {} for ns, embed_item in item.items(): if isinstance(embed_item, list): inner_dict[ns] = [] @@ -560,9 +566,7 @@ def embed_list_type( def embed( - to_embed: Union[ - Union(str, _Embed(str)), Dict, List[Union(str, _Embed(str))], List[Dict] - ], + to_embed: Union[Union[str, _Embed], Dict, List[Union[str, _Embed]], List[Dict]], model: Any, namespace: Optional[str] = None, ) -> List[Dict[str, Union[str, List[str]]]]: diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py index ca92052268..691e0a99ce 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py @@ -54,9 +54,14 @@ class PickBestFeatureEmbedder(base.Embedder[PickBest.Event]): to_select_from_var_name, to_select_from = next( iter(event.to_select_from.items()), (None, None) ) + action_embs = ( - base.embed(to_select_from, self.model, to_select_from_var_name) - if event.to_select_from + ( + base.embed(to_select_from, self.model, to_select_from_var_name) + if event.to_select_from + else None + ) + if to_select_from else None ) From 0b8691c6e5de170bbe574a4cf4f9c1bc68c453d2 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Tue, 29 Aug 2023 05:19:19 -0400 Subject: [PATCH 16/65] fix all mypy errors and some renaming and refactoring --- .../langchain/chains/rl_chain/base.py | 18 +++- .../chains/rl_chain/pick_best_chain.py | 83 ++++++++++--------- .../rl_chain/test_pick_best_text_embedder.py | 54 ++++++------ 3 files changed, 86 insertions(+), 69 deletions(-) diff --git a/libs/langchain/langchain/chains/rl_chain/base.py b/libs/langchain/langchain/chains/rl_chain/base.py index 22ff60a403..721b7d35de 100644 --- a/libs/langchain/langchain/chains/rl_chain/base.py +++ b/libs/langchain/langchain/chains/rl_chain/base.py @@ -295,7 +295,7 @@ class AutoSelectionScorer(SelectionScorer, BaseModel): ) -class RLChain(Generic[TEvent], Chain): +class RLChain(Chain, Generic[TEvent]): """ The `RLChain` class leverages the Vowpal Wabbit (VW) model as a learned policy for reinforcement learning. @@ -320,12 +320,24 @@ class RLChain(Generic[TEvent], Chain): The class initializes the VW model using the provided arguments. If `selection_scorer` is not provided, a warning is logged, indicating that no reinforcement learning will occur unless the `update_with_delayed_score` method is called. """ # noqa: E501 + class _NoOpPolicy(Policy): + """Placeholder policy that does nothing""" + + def predict(self, event: TEvent) -> Any: + return None + + def learn(self, event: TEvent) -> None: + pass + + def log(self, event: TEvent) -> None: + pass + llm_chain: Chain output_key: str = "result" #: :meta private: prompt: BasePromptTemplate selection_scorer: Union[SelectionScorer, None] - active_policy: Policy + active_policy: Policy = _NoOpPolicy() auto_embed: bool = True selected_input_key = "rl_chain_selected" selected_based_on_input_key = "rl_chain_selected_based_on" @@ -351,7 +363,7 @@ class RLChain(Generic[TEvent], Chain): unless update_with_delayed_score is called." ) - if self.active_policy is None: + if isinstance(self.active_policy, RLChain._NoOpPolicy): self.active_policy = policy( model_repo=ModelRepository( model_save_dir, with_history=True, reset=reset_model diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py index 691e0a99ce..16e8bf598c 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py @@ -17,7 +17,36 @@ logger = logging.getLogger(__name__) SENTINEL = object() -class PickBestFeatureEmbedder(base.Embedder[PickBest.Event]): +class PickBestSelected(base.Selected): + index: Optional[int] + probability: Optional[float] + score: Optional[float] + + def __init__( + self, + index: Optional[int] = None, + probability: Optional[float] = None, + score: Optional[float] = None, + ): + self.index = index + self.probability = probability + self.score = score + + +class PickBestEvent(base.Event[PickBestSelected]): + def __init__( + self, + inputs: Dict[str, Any], + to_select_from: Dict[str, Any], + based_on: Dict[str, Any], + selected: Optional[PickBestSelected] = None, + ): + super().__init__(inputs=inputs, selected=selected) + self.to_select_from = to_select_from + self.based_on = based_on + + +class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): """ Text Embedder class that embeds the `BasedOn` and `ToSelectFrom` inputs into a format that can be used by the learning policy @@ -35,7 +64,7 @@ class PickBestFeatureEmbedder(base.Embedder[PickBest.Event]): self.model = model - def format(self, event: PickBest.Event) -> str: + def format(self, event: PickBestEvent) -> str: """ Converts the `BasedOn` and `ToSelectFrom` into a format that can be used by VW """ @@ -93,7 +122,7 @@ class PickBestFeatureEmbedder(base.Embedder[PickBest.Event]): return example_string[:-1] -class PickBest(base.RLChain[PickBest.Event]): +class PickBest(base.RLChain[PickBestEvent]): """ `PickBest` is a class designed to leverage the Vowpal Wabbit (VW) model for reinforcement learning with a context, with the goal of modifying the prompt before the LLM call. @@ -121,33 +150,6 @@ class PickBest(base.RLChain[PickBest.Event]): feature_embedder (PickBestFeatureEmbedder, optional): Is an advanced attribute. Responsible for embedding the `BasedOn` and `ToSelectFrom` inputs. If omitted, a default embedder is utilized. """ # noqa E501 - class Selected(base.Selected): - index: Optional[int] - probability: Optional[float] - score: Optional[float] - - def __init__( - self, - index: Optional[int] = None, - probability: Optional[float] = None, - score: Optional[float] = None, - ): - self.index = index - self.probability = probability - self.score = score - - class Event(base.Event[PickBest.Selected]): - def __init__( - self, - inputs: Dict[str, Any], - to_select_from: Dict[str, Any], - based_on: Dict[str, Any], - selected: Optional[PickBest.Selected] = None, - ): - super().__init__(inputs=inputs, selected=selected) - self.to_select_from = to_select_from - self.based_on = based_on - def __init__( self, *args: Any, @@ -176,7 +178,7 @@ class PickBest(base.RLChain[PickBest.Event]): super().__init__(*args, **kwargs) - def _call_before_predict(self, inputs: Dict[str, Any]) -> Event: + def _call_before_predict(self, inputs: Dict[str, Any]) -> PickBestEvent: context, actions = base.get_based_on_and_to_select_from(inputs=inputs) if not actions: raise ValueError( @@ -199,12 +201,15 @@ class PickBest(base.RLChain[PickBest.Event]): to base the selected of ToSelectFrom on." ) - event = PickBest.Event(inputs=inputs, to_select_from=actions, based_on=context) + event = PickBestEvent(inputs=inputs, to_select_from=actions, based_on=context) return event def _call_after_predict_before_llm( - self, inputs: Dict[str, Any], event: Event, prediction: List[Tuple[int, float]] - ) -> Tuple[Dict[str, Any], Event]: + self, + inputs: Dict[str, Any], + event: PickBestEvent, + prediction: List[Tuple[int, float]], + ) -> Tuple[Dict[str, Any], PickBestEvent]: import numpy as np prob_sum = sum(prob for _, prob in prediction) @@ -214,7 +219,7 @@ class PickBest(base.RLChain[PickBest.Event]): sampled_ap = prediction[sampled_index] sampled_action = sampled_ap[0] sampled_prob = sampled_ap[1] - selected = PickBest.Selected(index=sampled_action, probability=sampled_prob) + selected = PickBestSelected(index=sampled_action, probability=sampled_prob) event.selected = selected # only one key, value pair in event.to_select_from @@ -224,8 +229,8 @@ class PickBest(base.RLChain[PickBest.Event]): return next_chain_inputs, event def _call_after_llm_before_scoring( - self, llm_response: str, event: Event - ) -> Tuple[Dict[str, Any], Event]: + self, llm_response: str, event: PickBestEvent + ) -> Tuple[Dict[str, Any], PickBestEvent]: next_chain_inputs = event.inputs.copy() # only one key, value pair in event.to_select_from value = next(iter(event.to_select_from.values())) @@ -243,8 +248,8 @@ class PickBest(base.RLChain[PickBest.Event]): return next_chain_inputs, event def _call_after_scoring_before_learning( - self, event: Event, score: Optional[float] - ) -> Event: + self, event: PickBestEvent, score: Optional[float] + ) -> PickBestEvent: if event.selected: event.selected.score = score return event diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py index d8ea85c6eb..c299b18720 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py @@ -11,7 +11,7 @@ encoded_text = "[ e n c o d e d ] " def test_pickbest_textembedder_missing_context_throws(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_action = {"action": ["0", "1", "2"]} - event = pick_best_chain.PickBest.Event( + event = pick_best_chain.PickBestEvent( inputs={}, to_select_from=named_action, based_on={} ) with pytest.raises(ValueError): @@ -21,7 +21,7 @@ def test_pickbest_textembedder_missing_context_throws(): @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_missing_actions_throws(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) - event = pick_best_chain.PickBest.Event( + event = pick_best_chain.PickBestEvent( inputs={}, to_select_from={}, based_on={"context": "context"} ) with pytest.raises(ValueError): @@ -33,7 +33,7 @@ def test_pickbest_textembedder_no_label_no_emb(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": ["0", "1", "2"]} expected = """shared |context context \n|action1 0 \n|action1 1 \n|action1 2 """ - event = pick_best_chain.PickBest.Event( + event = pick_best_chain.PickBestEvent( inputs={}, to_select_from=named_actions, based_on={"context": "context"} ) vw_ex_str = feature_embedder.format(event) @@ -45,8 +45,8 @@ def test_pickbest_textembedder_w_label_no_score_no_emb(): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": ["0", "1", "2"]} expected = """shared |context context \n|action1 0 \n|action1 1 \n|action1 2 """ - selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0) - event = pick_best_chain.PickBest.Event( + selected = pick_best_chain.PickBestSelected(index=0, probability=1.0) + event = pick_best_chain.PickBestEvent( inputs={}, to_select_from=named_actions, based_on={"context": "context"}, @@ -63,8 +63,8 @@ def test_pickbest_textembedder_w_full_label_no_emb(): expected = ( """shared |context context \n0:-0.0:1.0 |action1 0 \n|action1 1 \n|action1 2 """ ) - selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBest.Event( + selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBestEvent( inputs={}, to_select_from=named_actions, based_on={"context": "context"}, @@ -90,8 +90,8 @@ def test_pickbest_textembedder_w_full_label_w_emb(): named_actions = {"action1": rl_chain.Embed([str1, str2, str3])} context = {"context": rl_chain.Embed(ctx_str_1)} expected = f"""shared |context {encoded_ctx_str_1} \n0:-0.0:1.0 |action1 {encoded_str1} \n|action1 {encoded_str2} \n|action1 {encoded_str3} """ # noqa: E501 - selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBest.Event( + selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBestEvent( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -114,8 +114,8 @@ def test_pickbest_textembedder_w_full_label_w_embed_and_keep(): named_actions = {"action1": rl_chain.EmbedAndKeep([str1, str2, str3])} context = {"context": rl_chain.EmbedAndKeep(ctx_str_1)} expected = f"""shared |context {ctx_str_1 + " " + encoded_ctx_str_1} \n0:-0.0:1.0 |action1 {str1 + " " + encoded_str1} \n|action1 {str2 + " " + encoded_str2} \n|action1 {str3 + " " + encoded_str3} """ # noqa: E501 - selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBest.Event( + selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBestEvent( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -128,7 +128,7 @@ def test_pickbest_textembedder_more_namespaces_no_label_no_emb(): named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} expected = """shared |context1 context1 |context2 context2 \n|a 0 |b 0 \n|action1 1 \n|action1 2 """ # noqa: E501 - event = pick_best_chain.PickBest.Event( + event = pick_best_chain.PickBestEvent( inputs={}, to_select_from=named_actions, based_on=context ) vw_ex_str = feature_embedder.format(event) @@ -141,8 +141,8 @@ def test_pickbest_textembedder_more_namespaces_w_label_no_emb(): named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} expected = """shared |context1 context1 |context2 context2 \n|a 0 |b 0 \n|action1 1 \n|action1 2 """ # noqa: E501 - selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0) - event = pick_best_chain.PickBest.Event( + selected = pick_best_chain.PickBestSelected(index=0, probability=1.0) + event = pick_best_chain.PickBestEvent( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -155,8 +155,8 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_no_emb(): named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} expected = """shared |context1 context1 |context2 context2 \n0:-0.0:1.0 |a 0 |b 0 \n|action1 1 \n|action1 2 """ # noqa: E501 - selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBest.Event( + selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBestEvent( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -186,8 +186,8 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb(): } expected = f"""shared |context1 {encoded_ctx_str_1} |context2 {encoded_ctx_str_2} \n0:-0.0:1.0 |a {encoded_str1} |b {encoded_str1} \n|action1 {encoded_str2} \n|action1 {encoded_str3} """ # noqa: E501 - selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBest.Event( + selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBestEvent( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -219,8 +219,8 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_kee } expected = f"""shared |context1 {ctx_str_1 + " " + encoded_ctx_str_1} |context2 {ctx_str_2 + " " + encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1 + " " + encoded_str1} |b {str1 + " " + encoded_str1} \n|action1 {str2 + " " + encoded_str2} \n|action1 {str3 + " " + encoded_str3} """ # noqa: E501 - selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBest.Event( + selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBestEvent( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -253,8 +253,8 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb(): context = {"context1": ctx_str_1, "context2": rl_chain.Embed(ctx_str_2)} expected = f"""shared |context1 {ctx_str_1} |context2 {encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1} |b {encoded_str1} \n|action1 {str2} \n|action1 {encoded_str3} """ # noqa: E501 - selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBest.Event( + selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBestEvent( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -290,8 +290,8 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_embed_and_ } expected = f"""shared |context1 {ctx_str_1} |context2 {ctx_str_2 + " " + encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1} |b {str1 + " " + encoded_str1} \n|action1 {str2} \n|action1 {str3 + " " + encoded_str3} """ # noqa: E501 - selected = pick_best_chain.PickBest.Selected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBest.Event( + selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0) + event = pick_best_chain.PickBestEvent( inputs={}, to_select_from=named_actions, based_on=context, selected=selected ) vw_ex_str = feature_embedder.format(event) @@ -315,7 +315,7 @@ def test_raw_features_underscored(): expected_no_embed = ( f"""shared |context {ctx_str_underscored} \n|action {str1_underscored} """ ) - event = pick_best_chain.PickBest.Event( + event = pick_best_chain.PickBestEvent( inputs={}, to_select_from=named_actions, based_on=context ) vw_ex_str = feature_embedder.format(event) @@ -325,7 +325,7 @@ def test_raw_features_underscored(): named_actions = {"action": rl_chain.Embed([str1])} context = {"context": rl_chain.Embed(ctx_str)} expected_embed = f"""shared |context {encoded_ctx_str} \n|action {encoded_str1} """ - event = pick_best_chain.PickBest.Event( + event = pick_best_chain.PickBestEvent( inputs={}, to_select_from=named_actions, based_on=context ) vw_ex_str = feature_embedder.format(event) @@ -335,7 +335,7 @@ def test_raw_features_underscored(): named_actions = {"action": rl_chain.EmbedAndKeep([str1])} context = {"context": rl_chain.EmbedAndKeep(ctx_str)} expected_embed_and_keep = f"""shared |context {ctx_str_underscored + " " + encoded_ctx_str} \n|action {str1_underscored + " " + encoded_str1} """ # noqa: E501 - event = pick_best_chain.PickBest.Event( + event = pick_best_chain.PickBestEvent( inputs={}, to_select_from=named_actions, based_on=context ) vw_ex_str = feature_embedder.format(event) From b3c0728de2893b924f34b75f8107430024154669 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Tue, 29 Aug 2023 05:28:43 -0400 Subject: [PATCH 17/65] fix mypy errors in tests --- .../rl_chain/test_pick_best_chain_call.py | 34 ++++++++------- .../rl_chain/test_pick_best_text_embedder.py | 34 ++++++++------- .../rl_chain/test_rl_chain_base_embedder.py | 42 +++++++++---------- 3 files changed, 58 insertions(+), 52 deletions(-) diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py index 3fad1667d9..7bca6b470d 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py @@ -1,3 +1,5 @@ +from typing import Any, Dict + import pytest from test_utils import MockEncoder @@ -10,7 +12,7 @@ encoded_text = "[ e n c o d e d ] " @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def setup(): +def setup() -> tuple: _PROMPT_TEMPLATE = """This is a dummy prompt that will be ignored by the fake llm""" PROMPT = PromptTemplate(input_variables=[], template=_PROMPT_TEMPLATE) @@ -19,7 +21,7 @@ def setup(): @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_multiple_ToSelectFrom_throws(): +def test_multiple_ToSelectFrom_throws() -> None: llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) actions = ["0", "1", "2"] @@ -32,7 +34,7 @@ def test_multiple_ToSelectFrom_throws(): @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_missing_basedOn_from_throws(): +def test_missing_basedOn_from_throws() -> None: llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) actions = ["0", "1", "2"] @@ -41,7 +43,7 @@ def test_missing_basedOn_from_throws(): @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_ToSelectFrom_not_a_list_throws(): +def test_ToSelectFrom_not_a_list_throws() -> None: llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) actions = {"actions": ["0", "1", "2"]} @@ -53,7 +55,7 @@ def test_ToSelectFrom_not_a_list_throws(): @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_update_with_delayed_score_with_auto_validator_throws(): +def test_update_with_delayed_score_with_auto_validator_throws() -> None: llm, PROMPT = setup() # this LLM returns a number so that the auto validator will return that auto_val_llm = FakeListChatModel(responses=["3"]) @@ -75,7 +77,7 @@ def test_update_with_delayed_score_with_auto_validator_throws(): @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_update_with_delayed_score_force(): +def test_update_with_delayed_score_force() -> None: llm, PROMPT = setup() # this LLM returns a number so that the auto validator will return that auto_val_llm = FakeListChatModel(responses=["3"]) @@ -99,7 +101,7 @@ def test_update_with_delayed_score_force(): @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_update_with_delayed_score(): +def test_update_with_delayed_score() -> None: llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=None @@ -117,11 +119,11 @@ def test_update_with_delayed_score(): @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_user_defined_scorer(): +def test_user_defined_scorer() -> None: llm, PROMPT = setup() class CustomSelectionScorer(rl_chain.SelectionScorer): - def score_response(self, inputs, llm_response: str) -> float: + def score_response(self, inputs: Dict[str, Any], llm_response: str) -> float: score = 200 return score @@ -139,7 +141,7 @@ def test_user_defined_scorer(): @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_default_embeddings(): +def test_default_embeddings() -> None: llm, PROMPT = setup() feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) chain = pick_best_chain.PickBest.from_llm( @@ -173,7 +175,7 @@ def test_default_embeddings(): @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_default_embeddings_off(): +def test_default_embeddings_off() -> None: llm, PROMPT = setup() feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) chain = pick_best_chain.PickBest.from_llm( @@ -199,7 +201,7 @@ def test_default_embeddings_off(): @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_default_embeddings_mixed_w_explicit_user_embeddings(): +def test_default_embeddings_mixed_w_explicit_user_embeddings() -> None: llm, PROMPT = setup() feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) chain = pick_best_chain.PickBest.from_llm( @@ -234,7 +236,7 @@ def test_default_embeddings_mixed_w_explicit_user_embeddings(): @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_default_no_scorer_specified(): +def test_default_no_scorer_specified() -> None: _, PROMPT = setup() chain_llm = FakeListChatModel(responses=[100]) chain = pick_best_chain.PickBest.from_llm(llm=chain_llm, prompt=PROMPT) @@ -249,7 +251,7 @@ def test_default_no_scorer_specified(): @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_explicitly_no_scorer(): +def test_explicitly_no_scorer() -> None: llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=None @@ -265,7 +267,7 @@ def test_explicitly_no_scorer(): @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_auto_scorer_with_user_defined_llm(): +def test_auto_scorer_with_user_defined_llm() -> None: llm, PROMPT = setup() scorer_llm = FakeListChatModel(responses=[300]) chain = pick_best_chain.PickBest.from_llm( @@ -284,7 +286,7 @@ def test_auto_scorer_with_user_defined_llm(): @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_calling_chain_w_reserved_inputs_throws(): +def test_calling_chain_w_reserved_inputs_throws() -> None: llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) with pytest.raises(ValueError): diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py index c299b18720..acc7491c40 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py @@ -8,7 +8,7 @@ encoded_text = "[ e n c o d e d ] " @pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_missing_context_throws(): +def test_pickbest_textembedder_missing_context_throws() -> None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_action = {"action": ["0", "1", "2"]} event = pick_best_chain.PickBestEvent( @@ -19,7 +19,7 @@ def test_pickbest_textembedder_missing_context_throws(): @pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_missing_actions_throws(): +def test_pickbest_textembedder_missing_actions_throws() -> None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) event = pick_best_chain.PickBestEvent( inputs={}, to_select_from={}, based_on={"context": "context"} @@ -29,7 +29,7 @@ def test_pickbest_textembedder_missing_actions_throws(): @pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_no_label_no_emb(): +def test_pickbest_textembedder_no_label_no_emb() -> None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": ["0", "1", "2"]} expected = """shared |context context \n|action1 0 \n|action1 1 \n|action1 2 """ @@ -41,7 +41,7 @@ def test_pickbest_textembedder_no_label_no_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_w_label_no_score_no_emb(): +def test_pickbest_textembedder_w_label_no_score_no_emb() -> None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": ["0", "1", "2"]} expected = """shared |context context \n|action1 0 \n|action1 1 \n|action1 2 """ @@ -57,7 +57,7 @@ def test_pickbest_textembedder_w_label_no_score_no_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_w_full_label_no_emb(): +def test_pickbest_textembedder_w_full_label_no_emb() -> None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": ["0", "1", "2"]} expected = ( @@ -75,7 +75,7 @@ def test_pickbest_textembedder_w_full_label_no_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_w_full_label_w_emb(): +def test_pickbest_textembedder_w_full_label_w_emb() -> None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" str2 = "1" @@ -99,7 +99,7 @@ def test_pickbest_textembedder_w_full_label_w_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_w_full_label_w_embed_and_keep(): +def test_pickbest_textembedder_w_full_label_w_embed_and_keep() -> None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" str2 = "1" @@ -123,7 +123,7 @@ def test_pickbest_textembedder_w_full_label_w_embed_and_keep(): @pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_more_namespaces_no_label_no_emb(): +def test_pickbest_textembedder_more_namespaces_no_label_no_emb() -> None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} @@ -136,7 +136,7 @@ def test_pickbest_textembedder_more_namespaces_no_label_no_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_more_namespaces_w_label_no_emb(): +def test_pickbest_textembedder_more_namespaces_w_label_no_emb() -> None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} @@ -150,7 +150,7 @@ def test_pickbest_textembedder_more_namespaces_w_label_no_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_more_namespaces_w_full_label_no_emb(): +def test_pickbest_textembedder_more_namespaces_w_full_label_no_emb() -> None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} @@ -164,7 +164,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_no_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb(): +def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb() -> None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" @@ -195,7 +195,9 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_keep(): +def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_keep() -> ( + None +): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" @@ -228,7 +230,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_kee @pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb(): +def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb() -> None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" @@ -262,7 +264,9 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_embed_and_keep(): +def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_embed_and_keep() -> ( + None +): feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" @@ -299,7 +303,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_embed_and_ @pytest.mark.requires("vowpal_wabbit_next") -def test_raw_features_underscored(): +def test_raw_features_underscored() -> None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "this is a long string" str1_underscored = str1.replace(" ", "_") diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py index 895fa8ebb6..c9f8416ceb 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py @@ -7,13 +7,13 @@ encoded_text = "[ e n c o d e d ] " @pytest.mark.requires("vowpal_wabbit_next") -def test_simple_context_str_no_emb(): +def test_simple_context_str_no_emb() -> None: expected = [{"a_namespace": "test"}] assert base.embed("test", MockEncoder(), "a_namespace") == expected @pytest.mark.requires("vowpal_wabbit_next") -def test_simple_context_str_w_emb(): +def test_simple_context_str_w_emb() -> None: str1 = "test" encoded_str1 = " ".join(char for char in str1) expected = [{"a_namespace": encoded_text + encoded_str1}] @@ -28,7 +28,7 @@ def test_simple_context_str_w_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_simple_context_str_w_nested_emb(): +def test_simple_context_str_w_nested_emb() -> None: # nested embeddings, innermost wins str1 = "test" encoded_str1 = " ".join(char for char in str1) @@ -46,13 +46,13 @@ def test_simple_context_str_w_nested_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_context_w_namespace_no_emb(): +def test_context_w_namespace_no_emb() -> None: expected = [{"test_namespace": "test"}] assert base.embed({"test_namespace": "test"}, MockEncoder()) == expected @pytest.mark.requires("vowpal_wabbit_next") -def test_context_w_namespace_w_emb(): +def test_context_w_namespace_w_emb() -> None: str1 = "test" encoded_str1 = " ".join(char for char in str1) expected = [{"test_namespace": encoded_text + encoded_str1}] @@ -67,7 +67,7 @@ def test_context_w_namespace_w_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_context_w_namespace_w_emb2(): +def test_context_w_namespace_w_emb2() -> None: str1 = "test" encoded_str1 = " ".join(char for char in str1) expected = [{"test_namespace": encoded_text + encoded_str1}] @@ -82,7 +82,7 @@ def test_context_w_namespace_w_emb2(): @pytest.mark.requires("vowpal_wabbit_next") -def test_context_w_namespace_w_some_emb(): +def test_context_w_namespace_w_some_emb() -> None: str1 = "test1" str2 = "test2" encoded_str2 = " ".join(char for char in str2) @@ -111,7 +111,7 @@ def test_context_w_namespace_w_some_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_simple_action_strlist_no_emb(): +def test_simple_action_strlist_no_emb() -> None: str1 = "test1" str2 = "test2" str3 = "test3" @@ -120,7 +120,7 @@ def test_simple_action_strlist_no_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_simple_action_strlist_w_emb(): +def test_simple_action_strlist_w_emb() -> None: str1 = "test1" str2 = "test2" str3 = "test3" @@ -148,7 +148,7 @@ def test_simple_action_strlist_w_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_simple_action_strlist_w_some_emb(): +def test_simple_action_strlist_w_some_emb() -> None: str1 = "test1" str2 = "test2" str3 = "test3" @@ -181,7 +181,7 @@ def test_simple_action_strlist_w_some_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_action_w_namespace_no_emb(): +def test_action_w_namespace_no_emb() -> None: str1 = "test1" str2 = "test2" str3 = "test3" @@ -204,7 +204,7 @@ def test_action_w_namespace_no_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_action_w_namespace_w_emb(): +def test_action_w_namespace_w_emb() -> None: str1 = "test1" str2 = "test2" str3 = "test3" @@ -246,7 +246,7 @@ def test_action_w_namespace_w_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_action_w_namespace_w_emb2(): +def test_action_w_namespace_w_emb2() -> None: str1 = "test1" str2 = "test2" str3 = "test3" @@ -292,7 +292,7 @@ def test_action_w_namespace_w_emb2(): @pytest.mark.requires("vowpal_wabbit_next") -def test_action_w_namespace_w_some_emb(): +def test_action_w_namespace_w_some_emb() -> None: str1 = "test1" str2 = "test2" str3 = "test3" @@ -333,7 +333,7 @@ def test_action_w_namespace_w_some_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_action_w_namespace_w_emb_w_more_than_one_item_in_first_dict(): +def test_action_w_namespace_w_emb_w_more_than_one_item_in_first_dict() -> None: str1 = "test1" str2 = "test2" str3 = "test3" @@ -384,7 +384,7 @@ def test_action_w_namespace_w_emb_w_more_than_one_item_in_first_dict(): @pytest.mark.requires("vowpal_wabbit_next") -def test_one_namespace_w_list_of_features_no_emb(): +def test_one_namespace_w_list_of_features_no_emb() -> None: str1 = "test1" str2 = "test2" expected = [{"test_namespace": [str1, str2]}] @@ -392,7 +392,7 @@ def test_one_namespace_w_list_of_features_no_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_one_namespace_w_list_of_features_w_some_emb(): +def test_one_namespace_w_list_of_features_w_some_emb() -> None: str1 = "test1" str2 = "test2" encoded_str2 = " ".join(char for char in str2) @@ -404,24 +404,24 @@ def test_one_namespace_w_list_of_features_w_some_emb(): @pytest.mark.requires("vowpal_wabbit_next") -def test_nested_list_features_throws(): +def test_nested_list_features_throws() -> None: with pytest.raises(ValueError): base.embed({"test_namespace": [[1, 2], [3, 4]]}, MockEncoder()) @pytest.mark.requires("vowpal_wabbit_next") -def test_dict_in_list_throws(): +def test_dict_in_list_throws() -> None: with pytest.raises(ValueError): base.embed({"test_namespace": [{"a": 1}, {"b": 2}]}, MockEncoder()) @pytest.mark.requires("vowpal_wabbit_next") -def test_nested_dict_throws(): +def test_nested_dict_throws() -> None: with pytest.raises(ValueError): base.embed({"test_namespace": {"a": {"b": 1}}}, MockEncoder()) @pytest.mark.requires("vowpal_wabbit_next") -def test_list_of_tuples_throws(): +def test_list_of_tuples_throws() -> None: with pytest.raises(ValueError): base.embed({"test_namespace": [("a", 1), ("b", 2)]}, MockEncoder()) From 8d10a52525e8e85a2b6930be37e44de7f2b9bcbd Mon Sep 17 00:00:00 2001 From: olgavrou Date: Tue, 29 Aug 2023 05:36:45 -0400 Subject: [PATCH 18/65] fix linting complaints --- libs/langchain/langchain/chains/rl_chain/pick_best_chain.py | 1 - .../chains/rl_chain/test_pick_best_text_embedder.py | 4 +--- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py index 16e8bf598c..fa7f18f8fb 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py @@ -6,7 +6,6 @@ from typing import Any, Dict, List, Optional, Tuple, Type, Union import langchain.chains.rl_chain.base as base from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun -from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.prompts import BasePromptTemplate diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py index acc7491c40..c49bacac60 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py @@ -264,9 +264,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb() -> N @pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_embed_and_keep() -> ( - None -): +def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emakeep() -> None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "0" From 44485c2b26a052939ca910c5a80a0e10dc18adaa Mon Sep 17 00:00:00 2001 From: olgavrou Date: Tue, 29 Aug 2023 05:42:45 -0400 Subject: [PATCH 19/65] make input arg type more explicit --- .../unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py index c9f8416ceb..7402c64d38 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py @@ -116,7 +116,8 @@ def test_simple_action_strlist_no_emb() -> None: str2 = "test2" str3 = "test3" expected = [{"a_namespace": str1}, {"a_namespace": str2}, {"a_namespace": str3}] - assert base.embed([str1, str2, str3], MockEncoder(), "a_namespace") == expected + to_embed: List[str] = [str1, str2, str3] + assert base.embed(to_embed, MockEncoder(), "a_namespace") == expected @pytest.mark.requires("vowpal_wabbit_next") From 758225dc17c90548d205edfafe79d833dedbe825 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Tue, 29 Aug 2023 05:44:09 -0400 Subject: [PATCH 20/65] include type --- .../unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py | 1 + 1 file changed, 1 insertion(+) diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py index 7402c64d38..4ccc75868d 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py @@ -1,5 +1,6 @@ import pytest from test_utils import MockEncoder +from typing import List import langchain.chains.rl_chain.base as base From d50c0f139de710a685c276ec373bb8c4433bfc81 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Tue, 29 Aug 2023 05:46:56 -0400 Subject: [PATCH 21/65] re order imports --- .../unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py index 4ccc75868d..d0abc97e75 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py @@ -1,6 +1,7 @@ +from typing import List + import pytest from test_utils import MockEncoder -from typing import List import langchain.chains.rl_chain.base as base From 4e6e03ef50bf76887776c6a4a2fa2057047b7976 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Tue, 29 Aug 2023 05:51:52 -0400 Subject: [PATCH 22/65] fix mypy complaint --- .../unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py index d0abc97e75..bd0cc584ef 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py @@ -1,4 +1,4 @@ -from typing import List +from typing import List, Union import pytest from test_utils import MockEncoder @@ -118,7 +118,7 @@ def test_simple_action_strlist_no_emb() -> None: str2 = "test2" str3 = "test3" expected = [{"a_namespace": str1}, {"a_namespace": str2}, {"a_namespace": str3}] - to_embed: List[str] = [str1, str2, str3] + to_embed: List[Union[str, base._Embed]] = [str1, str2, str3] assert base.embed(to_embed, MockEncoder(), "a_namespace") == expected From 0a2724d8c740354acf4df0c5e1f32acba337dbd8 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Tue, 29 Aug 2023 06:27:56 -0400 Subject: [PATCH 23/65] test --- .github/workflows/langchain_ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/langchain_ci.yml b/.github/workflows/langchain_ci.yml index 8f1fc5d874..8cd13b40ee 100644 --- a/.github/workflows/langchain_ci.yml +++ b/.github/workflows/langchain_ci.yml @@ -60,7 +60,7 @@ jobs: - "3.8" - "3.9" - "3.10" - - "3.11" + - "3.11.3" name: Python ${{ matrix.python-version }} extended tests steps: - uses: actions/checkout@v3 From 4b930f58e91b94824971623d0ead6809388a6743 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Tue, 29 Aug 2023 06:28:07 -0400 Subject: [PATCH 24/65] test --- .github/workflows/langchain_ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/langchain_ci.yml b/.github/workflows/langchain_ci.yml index 8cd13b40ee..30317dafb2 100644 --- a/.github/workflows/langchain_ci.yml +++ b/.github/workflows/langchain_ci.yml @@ -60,7 +60,7 @@ jobs: - "3.8" - "3.9" - "3.10" - - "3.11.3" + - "3.11.0" name: Python ${{ matrix.python-version }} extended tests steps: - uses: actions/checkout@v3 From 72eab3b37e7fbe5406a04691dc727bd7c2627955 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Tue, 29 Aug 2023 06:35:27 -0400 Subject: [PATCH 25/65] test --- .github/workflows/langchain_ci.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/langchain_ci.yml b/.github/workflows/langchain_ci.yml index 30317dafb2..955d307445 100644 --- a/.github/workflows/langchain_ci.yml +++ b/.github/workflows/langchain_ci.yml @@ -60,7 +60,6 @@ jobs: - "3.8" - "3.9" - "3.10" - - "3.11.0" name: Python ${{ matrix.python-version }} extended tests steps: - uses: actions/checkout@v3 From 5727148f2b0d3ad82ea7f8015394e24cc1f8567c Mon Sep 17 00:00:00 2001 From: olgavrou Date: Tue, 29 Aug 2023 07:09:58 -0400 Subject: [PATCH 26/65] make sure test don't try to download sentence transformer models --- .../rl_chain/test_pick_best_chain_call.py | 48 +++++++++++++++---- 1 file changed, 40 insertions(+), 8 deletions(-) diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py index 7bca6b470d..1b882e932d 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py @@ -23,7 +23,11 @@ def setup() -> tuple: @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_multiple_ToSelectFrom_throws() -> None: llm, PROMPT = setup() - chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) + chain = pick_best_chain.PickBest.from_llm( + llm=llm, + prompt=PROMPT, + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), + ) actions = ["0", "1", "2"] with pytest.raises(ValueError): chain.run( @@ -36,7 +40,11 @@ def test_multiple_ToSelectFrom_throws() -> None: @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_missing_basedOn_from_throws() -> None: llm, PROMPT = setup() - chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) + chain = pick_best_chain.PickBest.from_llm( + llm=llm, + prompt=PROMPT, + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), + ) actions = ["0", "1", "2"] with pytest.raises(ValueError): chain.run(action=rl_chain.ToSelectFrom(actions)) @@ -45,7 +53,11 @@ def test_missing_basedOn_from_throws() -> None: @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_ToSelectFrom_not_a_list_throws() -> None: llm, PROMPT = setup() - chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) + chain = pick_best_chain.PickBest.from_llm( + llm=llm, + prompt=PROMPT, + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), + ) actions = {"actions": ["0", "1", "2"]} with pytest.raises(ValueError): chain.run( @@ -63,6 +75,7 @@ def test_update_with_delayed_score_with_auto_validator_throws() -> None: llm=llm, prompt=PROMPT, selection_scorer=rl_chain.AutoSelectionScorer(llm=auto_val_llm), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), ) actions = ["0", "1", "2"] response = chain.run( @@ -85,6 +98,7 @@ def test_update_with_delayed_score_force() -> None: llm=llm, prompt=PROMPT, selection_scorer=rl_chain.AutoSelectionScorer(llm=auto_val_llm), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), ) actions = ["0", "1", "2"] response = chain.run( @@ -104,7 +118,10 @@ def test_update_with_delayed_score_force() -> None: def test_update_with_delayed_score() -> None: llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm( - llm=llm, prompt=PROMPT, selection_scorer=None + llm=llm, + prompt=PROMPT, + selection_scorer=None, + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), ) actions = ["0", "1", "2"] response = chain.run( @@ -128,7 +145,10 @@ def test_user_defined_scorer() -> None: return score chain = pick_best_chain.PickBest.from_llm( - llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer() + llm=llm, + prompt=PROMPT, + selection_scorer=CustomSelectionScorer(), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), ) actions = ["0", "1", "2"] response = chain.run( @@ -239,7 +259,11 @@ def test_default_embeddings_mixed_w_explicit_user_embeddings() -> None: def test_default_no_scorer_specified() -> None: _, PROMPT = setup() chain_llm = FakeListChatModel(responses=[100]) - chain = pick_best_chain.PickBest.from_llm(llm=chain_llm, prompt=PROMPT) + chain = pick_best_chain.PickBest.from_llm( + llm=chain_llm, + prompt=PROMPT, + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), + ) response = chain.run( User=rl_chain.BasedOn("Context"), action=rl_chain.ToSelectFrom(["0", "1", "2"]), @@ -254,7 +278,10 @@ def test_default_no_scorer_specified() -> None: def test_explicitly_no_scorer() -> None: llm, PROMPT = setup() chain = pick_best_chain.PickBest.from_llm( - llm=llm, prompt=PROMPT, selection_scorer=None + llm=llm, + prompt=PROMPT, + selection_scorer=None, + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), ) response = chain.run( User=rl_chain.BasedOn("Context"), @@ -274,6 +301,7 @@ def test_auto_scorer_with_user_defined_llm() -> None: llm=llm, prompt=PROMPT, selection_scorer=rl_chain.AutoSelectionScorer(llm=scorer_llm), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), ) response = chain.run( User=rl_chain.BasedOn("Context"), @@ -288,7 +316,11 @@ def test_auto_scorer_with_user_defined_llm() -> None: @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_calling_chain_w_reserved_inputs_throws() -> None: llm, PROMPT = setup() - chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) + chain = pick_best_chain.PickBest.from_llm( + llm=llm, + prompt=PROMPT, + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), + ) with pytest.raises(ValueError): chain.run( User=rl_chain.BasedOn("Context"), From f8b5c2977a7678b9a515b7c58b371e2a27183e9c Mon Sep 17 00:00:00 2001 From: olgavrou Date: Tue, 29 Aug 2023 07:17:40 -0400 Subject: [PATCH 27/65] restore ci workflow --- .github/workflows/langchain_ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/langchain_ci.yml b/.github/workflows/langchain_ci.yml index 955d307445..8f1fc5d874 100644 --- a/.github/workflows/langchain_ci.yml +++ b/.github/workflows/langchain_ci.yml @@ -60,6 +60,7 @@ jobs: - "3.8" - "3.9" - "3.10" + - "3.11" name: Python ${{ matrix.python-version }} extended tests steps: - uses: actions/checkout@v3 From 7eaaad51de825931366c1bf86f5fb61ebe6b0791 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Tue, 29 Aug 2023 07:42:45 -0400 Subject: [PATCH 28/65] cb defaults and some fixes --- .github/workflows/langchain_ci.yml | 2 +- .../langchain/chains/rl_chain/base.py | 31 ++++++++++----- .../chains/rl_chain/pick_best_chain.py | 2 +- .../rl_chain/test_pick_best_chain_call.py | 39 +++++++++++++++++-- 4 files changed, 58 insertions(+), 16 deletions(-) diff --git a/.github/workflows/langchain_ci.yml b/.github/workflows/langchain_ci.yml index 8f1fc5d874..06d04b2f47 100644 --- a/.github/workflows/langchain_ci.yml +++ b/.github/workflows/langchain_ci.yml @@ -60,7 +60,7 @@ jobs: - "3.8" - "3.9" - "3.10" - - "3.11" + # - "3.11" name: Python ${{ matrix.python-version }} extended tests steps: - uses: actions/checkout@v3 diff --git a/libs/langchain/langchain/chains/rl_chain/base.py b/libs/langchain/langchain/chains/rl_chain/base.py index 721b7d35de..fb4143f465 100644 --- a/libs/langchain/langchain/chains/rl_chain/base.py +++ b/libs/langchain/langchain/chains/rl_chain/base.py @@ -227,15 +227,17 @@ class Embedder(Generic[TEvent], ABC): ... -class SelectionScorer(ABC, BaseModel): +class SelectionScorer(Generic[TEvent], ABC, BaseModel): """Abstract method to grade the chosen selection or the response of the llm""" @abstractmethod - def score_response(self, inputs: Dict[str, Any], llm_response: str) -> float: + def score_response( + self, inputs: Dict[str, Any], llm_response: str, event: TEvent + ) -> float: ... -class AutoSelectionScorer(SelectionScorer, BaseModel): +class AutoSelectionScorer(SelectionScorer[Event], BaseModel): llm_chain: LLMChain prompt: Union[BasePromptTemplate, None] = None scoring_criteria_template_str: Optional[str] = None @@ -254,7 +256,7 @@ class AutoSelectionScorer(SelectionScorer, BaseModel): def get_default_prompt() -> ChatPromptTemplate: human_template = 'Given this based_on "{rl_chain_selected_based_on}" \ as the most important attribute, rank how good or bad this text is: \ - "{llm_response}".' + "{rl_chain_selected}".' human_message_prompt = HumanMessagePromptTemplate.from_template(human_template) default_system_prompt = AutoSelectionScorer.get_default_system_prompt() chat_prompt = ChatPromptTemplate.from_messages( @@ -281,7 +283,9 @@ class AutoSelectionScorer(SelectionScorer, BaseModel): values["llm_chain"] = LLMChain(llm=llm, prompt=prompt) return values - def score_response(self, inputs: Dict[str, Any], llm_response: str) -> float: + def score_response( + self, inputs: Dict[str, Any], llm_response: str, event: Event + ) -> float: ranking = self.llm_chain.predict(llm_response=llm_response, **inputs) ranking = ranking.strip() try: @@ -304,7 +308,7 @@ class RLChain(Chain, Generic[TEvent]): - prompt (BasePromptTemplate): The template for the base prompt. - selection_scorer (Union[SelectionScorer, None]): Scorer for the selection. Can be set to None. - policy (Optional[Policy]): The policy used by the chain to learn to populate a dynamic prompt. - - auto_embed (bool): Determines if embedding should be automatic. Default is True. + - auto_embed (bool): Determines if embedding should be automatic. Default is False. - metrics (Optional[MetricsTracker]): Tracker for metrics, can be set to None. Initialization Attributes: @@ -338,7 +342,7 @@ class RLChain(Chain, Generic[TEvent]): prompt: BasePromptTemplate selection_scorer: Union[SelectionScorer, None] active_policy: Policy = _NoOpPolicy() - auto_embed: bool = True + auto_embed: bool = False selected_input_key = "rl_chain_selected" selected_based_on_input_key = "rl_chain_selected_based_on" metrics: Optional[MetricsTracker] = None @@ -492,7 +496,7 @@ class RLChain(Chain, Generic[TEvent]): try: if self.selection_scorer: score = self.selection_scorer.score_response( - inputs=next_chain_inputs, llm_response=output + inputs=next_chain_inputs, llm_response=output, event=event ) except Exception as e: logger.info( @@ -553,7 +557,7 @@ def embed_string_type( def embed_dict_type(item: Dict, model: Any) -> Dict[str, Any]: """Helper function to embed a dictionary item.""" - inner_dict: Dict[str, Any] = {} + inner_dict: Dict = {} for ns, embed_item in item.items(): if isinstance(embed_item, list): inner_dict[ns] = [] @@ -568,10 +572,17 @@ def embed_dict_type(item: Dict, model: Any) -> Dict[str, Any]: def embed_list_type( item: list, model: Any, namespace: Optional[str] = None ) -> List[Dict[str, Union[str, List[str]]]]: - ret_list: List[Dict[str, Union[str, List[str]]]] = [] + ret_list: List = [] for embed_item in item: if isinstance(embed_item, dict): ret_list.append(embed_dict_type(embed_item, model)) + elif isinstance(embed_item, list): + item_embedding = embed_list_type(embed_item, model, namespace) + # Get the first key from the first dictionary + first_key = next(iter(item_embedding[0])) + # Group the values under that key + grouping = {first_key: [item[first_key] for item in item_embedding]} + ret_list.append(grouping) else: ret_list.append(embed_string_type(embed_item, model, namespace)) return ret_list diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py index fa7f18f8fb..04218d2934 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py @@ -161,7 +161,7 @@ class PickBest(base.RLChain[PickBestEvent]): "--quiet", "--interactions=::", "--coin", - "--epsilon=0.2", + "--squarecb", ] else: if "--cb_explore_adf" not in vw_cmd: diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py index 1b882e932d..2af08840b5 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py @@ -140,7 +140,12 @@ def test_user_defined_scorer() -> None: llm, PROMPT = setup() class CustomSelectionScorer(rl_chain.SelectionScorer): - def score_response(self, inputs: Dict[str, Any], llm_response: str) -> float: + def score_response( + self, + inputs: Dict[str, Any], + llm_response: str, + event: pick_best_chain.PickBestEvent, + ) -> float: score = 200 return score @@ -161,11 +166,11 @@ def test_user_defined_scorer() -> None: @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_default_embeddings() -> None: +def test_auto_embeddings_on() -> None: llm, PROMPT = setup() feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) chain = pick_best_chain.PickBest.from_llm( - llm=llm, prompt=PROMPT, feature_embedder=feature_embedder + llm=llm, prompt=PROMPT, feature_embedder=feature_embedder, auto_embed=True ) str1 = "0" @@ -194,6 +199,32 @@ def test_default_embeddings() -> None: assert vw_str == expected +@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") +def test_default_auto_embedder_is_off() -> None: + llm, PROMPT = setup() + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + chain = pick_best_chain.PickBest.from_llm( + llm=llm, prompt=PROMPT, feature_embedder=feature_embedder + ) + + str1 = "0" + str2 = "1" + str3 = "2" + ctx_str_1 = "context1" + + expected = f"""shared |User {ctx_str_1} \n|action {str1} \n|action {str2} \n|action {str3} """ # noqa + + actions = [str1, str2, str3] + + response = chain.run( + User=pick_best_chain.base.BasedOn(ctx_str_1), + action=pick_best_chain.base.ToSelectFrom(actions), + ) + selection_metadata = response["selection_metadata"] + vw_str = feature_embedder.format(selection_metadata) + assert vw_str == expected + + @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_default_embeddings_off() -> None: llm, PROMPT = setup() @@ -225,7 +256,7 @@ def test_default_embeddings_mixed_w_explicit_user_embeddings() -> None: llm, PROMPT = setup() feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) chain = pick_best_chain.PickBest.from_llm( - llm=llm, prompt=PROMPT, feature_embedder=feature_embedder + llm=llm, prompt=PROMPT, feature_embedder=feature_embedder, auto_embed=True ) str1 = "0" From c4ccaebbbb4a32a541c29747f92ff66ff2e37ac7 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Tue, 29 Aug 2023 08:37:59 -0400 Subject: [PATCH 29/65] activate and deactivate scorer --- .../langchain/chains/rl_chain/base.py | 77 +++++++++++-------- .../rl_chain/test_pick_best_chain_call.py | 38 +++++++++ 2 files changed, 83 insertions(+), 32 deletions(-) diff --git a/libs/langchain/langchain/chains/rl_chain/base.py b/libs/langchain/langchain/chains/rl_chain/base.py index fb4143f465..c250815943 100644 --- a/libs/langchain/langchain/chains/rl_chain/base.py +++ b/libs/langchain/langchain/chains/rl_chain/base.py @@ -343,6 +343,7 @@ class RLChain(Chain, Generic[TEvent]): selection_scorer: Union[SelectionScorer, None] active_policy: Policy = _NoOpPolicy() auto_embed: bool = False + selection_scorer_activated: bool = True selected_input_key = "rl_chain_selected" selected_based_on_input_key = "rl_chain_selected_based_on" metrics: Optional[MetricsTracker] = None @@ -400,6 +401,42 @@ class RLChain(Chain, Generic[TEvent]): """ return [self.output_key] + def update_with_delayed_score( + self, score: float, event: TEvent, force_score: bool = False + ) -> None: + """ + Updates the learned policy with the score provided. + Will raise an error if selection_scorer is set, and force_score=True was not provided during the method call + """ # noqa: E501 + if self._can_use_selection_scorer() and not force_score: + raise RuntimeError( + "The selection scorer is set, and force_score was not set to True. \ + Please set force_score=True to use this function." + ) + if self.metrics: + self.metrics.on_feedback(score) + self._call_after_scoring_before_learning(event=event, score=score) + self.active_policy.learn(event=event) + self.active_policy.log(event=event) + + def deactivate_selection_scorer(self) -> None: + """ + Deactivates the selection scorer, meaning that the chain will no longer attempt to use the selection scorer to score responses. + """ # noqa: E501 + self.selection_scorer_activated = False + + def activate_selection_scorer(self) -> None: + """ + Activates the selection scorer, meaning that the chain will attempt to use the selection scorer to score responses. + """ # noqa: E501 + self.selection_scorer_activated = True + + def save_progress(self) -> None: + """ + This function should be called to save the state of the learned policy model. + """ # noqa: E501 + self.active_policy.save() + def _validate_inputs(self, inputs: Dict[str, Any]) -> None: super()._validate_inputs(inputs) if ( @@ -412,6 +449,12 @@ class RLChain(Chain, Generic[TEvent]): they are reserved for internal use during auto reward." ) + def _can_use_selection_scorer(self) -> bool: + """ + Returns whether the chain can use the selection scorer to score responses or not. + """ # noqa: E501 + return self.selection_scorer is not None and self.selection_scorer_activated + @abstractmethod def _call_before_predict(self, inputs: Dict[str, Any]) -> TEvent: ... @@ -434,30 +477,6 @@ class RLChain(Chain, Generic[TEvent]): ) -> TEvent: ... - def update_with_delayed_score( - self, score: float, event: TEvent, force_score: bool = False - ) -> None: - """ - Updates the learned policy with the score provided. - Will raise an error if selection_scorer is set, and force_score=True was not provided during the method call - """ # noqa: E501 - if self.selection_scorer and not force_score: - raise RuntimeError( - "The selection scorer is set, and force_score was not set to True. \ - Please set force_score=True to use this function." - ) - if self.metrics: - self.metrics.on_feedback(score) - self._call_after_scoring_before_learning(event=event, score=score) - self.active_policy.learn(event=event) - self.active_policy.log(event=event) - - def set_auto_embed(self, auto_embed: bool) -> None: - """ - Sets whether the chain should auto embed the inputs or not. - """ - self.auto_embed = auto_embed - def _call( self, inputs: Dict[str, Any], @@ -494,8 +513,8 @@ class RLChain(Chain, Generic[TEvent]): score = None try: - if self.selection_scorer: - score = self.selection_scorer.score_response( + if self._can_use_selection_scorer(): + score = self.selection_scorer.score_response( # type: ignore inputs=next_chain_inputs, llm_response=output, event=event ) except Exception as e: @@ -511,12 +530,6 @@ class RLChain(Chain, Generic[TEvent]): return {self.output_key: {"response": output, "selection_metadata": event}} - def save_progress(self) -> None: - """ - This function should be called to save the state of the learned policy model. - """ - self.active_policy.save() - @property def _chain_type(self) -> str: return "llm_personalizer_chain" diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py index 2af08840b5..d7dee7fdf6 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py @@ -363,3 +363,41 @@ def test_calling_chain_w_reserved_inputs_throws() -> None: User=rl_chain.BasedOn("Context"), rl_chain_selected=rl_chain.ToSelectFrom(["0", "1", "2"]), ) + + +@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") +def test_activate_and_deactivate_scorer() -> None: + llm, PROMPT = setup() + scorer_llm = FakeListChatModel(responses=[300]) + chain = pick_best_chain.PickBest.from_llm( + llm=llm, + prompt=PROMPT, + selection_scorer=pick_best_chain.base.AutoSelectionScorer(llm=scorer_llm), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), + ) + response = chain.run( + User=pick_best_chain.base.BasedOn("Context"), + action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), + ) + # chain llm used for both basic prompt and for scoring + assert response["response"] == "hey" + selection_metadata = response["selection_metadata"] + assert selection_metadata.selected.score == 300.0 + + chain.deactivate_selection_scorer() + response = chain.run( + User=pick_best_chain.base.BasedOn("Context"), + action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), + ) + assert response["response"] == "hey" + selection_metadata = response["selection_metadata"] + assert selection_metadata.selected.score is None + + chain.activate_selection_scorer() + response = chain.run( + User=pick_best_chain.base.BasedOn("Context"), + action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), + ) + assert response["response"] == "hey" + selection_metadata = response["selection_metadata"] + assert selection_metadata.selected.score == 300.0 From 48aaa27bf7f94ebf85d22ce243cbda1bc545b19f Mon Sep 17 00:00:00 2001 From: olgavrou Date: Tue, 29 Aug 2023 08:46:55 -0400 Subject: [PATCH 30/65] update score to take entire response object to make it easier for user --- libs/langchain/langchain/chains/rl_chain/base.py | 3 ++- .../unit_tests/chains/rl_chain/test_pick_best_chain_call.py | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/libs/langchain/langchain/chains/rl_chain/base.py b/libs/langchain/langchain/chains/rl_chain/base.py index c250815943..d08200c709 100644 --- a/libs/langchain/langchain/chains/rl_chain/base.py +++ b/libs/langchain/langchain/chains/rl_chain/base.py @@ -402,7 +402,7 @@ class RLChain(Chain, Generic[TEvent]): return [self.output_key] def update_with_delayed_score( - self, score: float, event: TEvent, force_score: bool = False + self, score: float, chain_response: Dict[str, Any], force_score: bool = False ) -> None: """ Updates the learned policy with the score provided. @@ -415,6 +415,7 @@ class RLChain(Chain, Generic[TEvent]): ) if self.metrics: self.metrics.on_feedback(score) + event: TEvent = chain_response["selection_metadata"] self._call_after_scoring_before_learning(event=event, score=score) self.active_policy.learn(event=event) self.active_policy.log(event=event) diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py index d7dee7fdf6..d4576ce254 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py @@ -86,7 +86,7 @@ def test_update_with_delayed_score_with_auto_validator_throws() -> None: selection_metadata = response["selection_metadata"] assert selection_metadata.selected.score == 3.0 with pytest.raises(RuntimeError): - chain.update_with_delayed_score(event=selection_metadata, score=100) + chain.update_with_delayed_score(chain_response=response, score=100) @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") @@ -109,7 +109,7 @@ def test_update_with_delayed_score_force() -> None: selection_metadata = response["selection_metadata"] assert selection_metadata.selected.score == 3.0 chain.update_with_delayed_score( - event=selection_metadata, score=100, force_score=True + chain_response=response, score=100, force_score=True ) assert selection_metadata.selected.score == 100.0 @@ -131,7 +131,7 @@ def test_update_with_delayed_score() -> None: assert response["response"] == "hey" selection_metadata = response["selection_metadata"] assert selection_metadata.selected.score is None - chain.update_with_delayed_score(event=selection_metadata, score=100) + chain.update_with_delayed_score(chain_response=response, score=100) assert selection_metadata.selected.score == 100.0 From 2c877a4a3400b27e1e698c4736c48d3e0bf7372c Mon Sep 17 00:00:00 2001 From: olgavrou Date: Thu, 31 Aug 2023 20:14:41 -0400 Subject: [PATCH 31/65] proper embeddings and rolling window average --- .../langchain/chains/rl_chain/__init__.py | 12 +- .../langchain/chains/rl_chain/base.py | 29 ++-- .../langchain/chains/rl_chain/metrics.py | 65 ++++++-- .../rl_chain/test_pick_best_chain_call.py | 22 ++- .../rl_chain/test_pick_best_text_embedder.py | 58 ++++---- .../rl_chain/test_rl_chain_base_embedder.py | 140 +++++++++--------- 6 files changed, 184 insertions(+), 142 deletions(-) diff --git a/libs/langchain/langchain/chains/rl_chain/__init__.py b/libs/langchain/langchain/chains/rl_chain/__init__.py index 6d5cfc3e29..3a14861bd7 100644 --- a/libs/langchain/langchain/chains/rl_chain/__init__.py +++ b/libs/langchain/langchain/chains/rl_chain/__init__.py @@ -9,8 +9,14 @@ from langchain.chains.rl_chain.base import ( SelectionScorer, ToSelectFrom, VwPolicy, + embed, + stringify_embedding, +) +from langchain.chains.rl_chain.pick_best_chain import ( + PickBest, + PickBestEvent, + PickBestSelected, ) -from langchain.chains.rl_chain.pick_best_chain import PickBest def configure_logger() -> None: @@ -29,6 +35,8 @@ configure_logger() __all__ = [ "PickBest", + "PickBestEvent", + "PickBestSelected", "Embed", "BasedOn", "ToSelectFrom", @@ -37,4 +45,6 @@ __all__ = [ "Embedder", "Policy", "VwPolicy", + "embed", + "stringify_embedding", ] diff --git a/libs/langchain/langchain/chains/rl_chain/base.py b/libs/langchain/langchain/chains/rl_chain/base.py index d08200c709..6e01bb5063 100644 --- a/libs/langchain/langchain/chains/rl_chain/base.py +++ b/libs/langchain/langchain/chains/rl_chain/base.py @@ -19,7 +19,10 @@ from typing import ( from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain -from langchain.chains.rl_chain.metrics import MetricsTracker +from langchain.chains.rl_chain.metrics import ( + MetricsTrackerAverage, + MetricsTrackerRollingWindow, +) from langchain.chains.rl_chain.model_repository import ModelRepository from langchain.chains.rl_chain.vw_logger import VwLogger from langchain.prompts import ( @@ -98,6 +101,10 @@ def EmbedAndKeep(anything: Any) -> Any: # helper functions +def stringify_embedding(embedding: List) -> str: + return " ".join([f"{i}:{e}" for i, e in enumerate(embedding)]) + + def parse_lines(parser: "vw.TextFormatParser", input_str: str) -> List["vw.Example"]: return [parser.parse_line(line) for line in input_str.split("\n")] @@ -346,7 +353,7 @@ class RLChain(Chain, Generic[TEvent]): selection_scorer_activated: bool = True selected_input_key = "rl_chain_selected" selected_based_on_input_key = "rl_chain_selected_based_on" - metrics: Optional[MetricsTracker] = None + metrics: Optional[Union[MetricsTrackerRollingWindow, MetricsTrackerAverage]] = None def __init__( self, @@ -357,6 +364,7 @@ class RLChain(Chain, Generic[TEvent]): policy: Type[Policy] = VwPolicy, vw_logs: Optional[Union[str, os.PathLike]] = None, metrics_step: int = -1, + metrics_window_size: int = -1, *args: Any, **kwargs: Any, ): @@ -378,7 +386,12 @@ class RLChain(Chain, Generic[TEvent]): vw_logger=VwLogger(vw_logs), ) - self.metrics = MetricsTracker(step=metrics_step) + if metrics_window_size > 0: + self.metrics = MetricsTrackerRollingWindow( + step=metrics_step, window_size=metrics_window_size + ) + else: + self.metrics = MetricsTrackerAverage(step=metrics_step) class Config: """Configuration for this pydantic object.""" @@ -523,8 +536,9 @@ class RLChain(Chain, Generic[TEvent]): f"The selection scorer was not able to score, \ and the chain was not able to adjust to this response, error: {e}" ) - if self.metrics: + if self.metrics and score is not None: self.metrics.on_feedback(score) + event = self._call_after_scoring_before_learning(score=score, event=event) self.active_policy.learn(event=event) self.active_policy.log(event=event) @@ -547,16 +561,13 @@ def embed_string_type( item: Union[str, _Embed], model: Any, namespace: Optional[str] = None ) -> Dict[str, Union[str, List[str]]]: """Helper function to embed a string or an _Embed object.""" - join_char = "" keep_str = "" if isinstance(item, _Embed): - encoded = model.encode(item.value) - join_char = " " + encoded = stringify_embedding(model.encode(item.value)) if item.keep: keep_str = item.value.replace(" ", "_") + " " elif isinstance(item, str): encoded = item.replace(" ", "_") - join_char = "" else: raise ValueError(f"Unsupported type {type(item)} for embedding") @@ -566,7 +577,7 @@ def embed_string_type( provided when embedding a string or _Embed object." ) - return {namespace: keep_str + join_char.join(map(str, encoded))} + return {namespace: keep_str + encoded} def embed_dict_type(item: Dict, model: Any) -> Dict[str, Any]: diff --git a/libs/langchain/langchain/chains/rl_chain/metrics.py b/libs/langchain/langchain/chains/rl_chain/metrics.py index 4d6306f776..4bd65da3ae 100644 --- a/libs/langchain/langchain/chains/rl_chain/metrics.py +++ b/libs/langchain/langchain/chains/rl_chain/metrics.py @@ -1,31 +1,66 @@ -from typing import TYPE_CHECKING, Dict, List, Optional, Union +from collections import deque +from typing import TYPE_CHECKING, Dict, List, Union if TYPE_CHECKING: import pandas as pd -class MetricsTracker: +class MetricsTrackerAverage: def __init__(self, step: int): - self._history: List[Dict[str, Union[int, float]]] = [] - self._step: int = step - self._i: int = 0 - self._num: float = 0 - self._denom: float = 0 + self.history: List[Dict[str, Union[int, float]]] = [{"step": 0, "score": 0}] + self.step: int = step + self.i: int = 0 + self.num: float = 0 + self.denom: float = 0 @property def score(self) -> float: - return self._num / self._denom if self._denom > 0 else 0 + return self.num / self.denom if self.denom > 0 else 0 def on_decision(self) -> None: - self._denom += 1 + self.denom += 1 - def on_feedback(self, score: Optional[float]) -> None: - self._num += score or 0 - self._i += 1 - if self._step > 0 and self._i % self._step == 0: - self._history.append({"step": self._i, "score": self.score}) + def on_feedback(self, score: float) -> None: + self.num += score or 0 + self.i += 1 + if self.step > 0 and self.i % self.step == 0: + self.history.append({"step": self.i, "score": self.score}) def to_pandas(self) -> "pd.DataFrame": import pandas as pd - return pd.DataFrame(self._history) + return pd.DataFrame(self.history) + + +class MetricsTrackerRollingWindow: + def __init__(self, window_size: int, step: int): + self.history: List[Dict[str, Union[int, float]]] = [{"step": 0, "score": 0}] + self.step: int = step + self.i: int = 0 + self.window_size: int = window_size + self.queue: deque = deque() + self.sum: float = 0.0 + + @property + def score(self) -> float: + return self.sum / len(self.queue) if len(self.queue) > 0 else 0 + + def on_decision(self) -> None: + pass + + def on_feedback(self, value: float) -> None: + self.sum += value + self.queue.append(value) + self.i += 1 + + if len(self.queue) > self.window_size: + old_val = self.queue.popleft() + self.sum -= old_val + + if self.step > 0 and self.i % self.step == 0: + self.history.append({"step": self.i, "score": self.sum / len(self.queue)}) + + def to_pandas(self) -> "pd.DataFrame": + import pandas as pd + + return pd.DataFrame(self.history) diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py index d4576ce254..7bfa5ad550 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py @@ -8,7 +8,7 @@ import langchain.chains.rl_chain.pick_best_chain as pick_best_chain from langchain.chat_models import FakeListChatModel from langchain.prompts.prompt import PromptTemplate -encoded_text = "[ e n c o d e d ] " +encoded_keyword = "[encoded]" @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") @@ -176,15 +176,13 @@ def test_auto_embeddings_on() -> None: str1 = "0" str2 = "1" str3 = "2" - encoded_str1 = encoded_text + " ".join(char for char in str1) - encoded_str2 = encoded_text + " ".join(char for char in str2) - encoded_str3 = encoded_text + " ".join(char for char in str3) + encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1)) + encoded_str2 = rl_chain.stringify_embedding(list(encoded_keyword + str2)) + encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3)) ctx_str_1 = "context1" - ctx_str_2 = "context2" - encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) - encoded_text + " ".join(char for char in ctx_str_2) + encoded_ctx_str_1 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_1)) expected = f"""shared |User {ctx_str_1 + " " + encoded_ctx_str_1} \n|action {str1 + " " + encoded_str1} \n|action {str2 + " " + encoded_str2} \n|action {str3 + " " + encoded_str3} """ # noqa @@ -262,15 +260,15 @@ def test_default_embeddings_mixed_w_explicit_user_embeddings() -> None: str1 = "0" str2 = "1" str3 = "2" - encoded_str1 = encoded_text + " ".join(char for char in str1) - encoded_str2 = encoded_text + " ".join(char for char in str2) - encoded_str3 = encoded_text + " ".join(char for char in str3) + encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1)) + encoded_str2 = rl_chain.stringify_embedding(list(encoded_keyword + str2)) + encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3)) ctx_str_1 = "context1" ctx_str_2 = "context2" - encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) - encoded_ctx_str_2 = encoded_text + " ".join(char for char in ctx_str_2) + encoded_ctx_str_1 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_1)) + encoded_ctx_str_2 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_2)) expected = f"""shared |User {encoded_ctx_str_1} |User2 {ctx_str_2 + " " + encoded_ctx_str_2} \n|action {str1 + " " + encoded_str1} \n|action {str2 + " " + encoded_str2} \n|action {encoded_str3} """ # noqa diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py index c49bacac60..8683e3b0e5 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py @@ -4,7 +4,7 @@ from test_utils import MockEncoder import langchain.chains.rl_chain.base as rl_chain import langchain.chains.rl_chain.pick_best_chain as pick_best_chain -encoded_text = "[ e n c o d e d ] " +encoded_keyword = "[encoded]" @pytest.mark.requires("vowpal_wabbit_next") @@ -80,12 +80,12 @@ def test_pickbest_textembedder_w_full_label_w_emb() -> None: str1 = "0" str2 = "1" str3 = "2" - encoded_str1 = encoded_text + " ".join(char for char in str1) - encoded_str2 = encoded_text + " ".join(char for char in str2) - encoded_str3 = encoded_text + " ".join(char for char in str3) + encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1)) + encoded_str2 = rl_chain.stringify_embedding(list(encoded_keyword + str2)) + encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3)) ctx_str_1 = "context1" - encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) + encoded_ctx_str_1 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_1)) named_actions = {"action1": rl_chain.Embed([str1, str2, str3])} context = {"context": rl_chain.Embed(ctx_str_1)} @@ -104,12 +104,12 @@ def test_pickbest_textembedder_w_full_label_w_embed_and_keep() -> None: str1 = "0" str2 = "1" str3 = "2" - encoded_str1 = encoded_text + " ".join(char for char in str1) - encoded_str2 = encoded_text + " ".join(char for char in str2) - encoded_str3 = encoded_text + " ".join(char for char in str3) + encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1)) + encoded_str2 = rl_chain.stringify_embedding(list(encoded_keyword + str2)) + encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3)) ctx_str_1 = "context1" - encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) + encoded_ctx_str_1 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_1)) named_actions = {"action1": rl_chain.EmbedAndKeep([str1, str2, str3])} context = {"context": rl_chain.EmbedAndKeep(ctx_str_1)} @@ -170,14 +170,14 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb() -> None str1 = "0" str2 = "1" str3 = "2" - encoded_str1 = encoded_text + " ".join(char for char in str1) - encoded_str2 = encoded_text + " ".join(char for char in str2) - encoded_str3 = encoded_text + " ".join(char for char in str3) + encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1)) + encoded_str2 = rl_chain.stringify_embedding(list(encoded_keyword + str2)) + encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3)) ctx_str_1 = "context1" ctx_str_2 = "context2" - encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) - encoded_ctx_str_2 = encoded_text + " ".join(char for char in ctx_str_2) + encoded_ctx_str_1 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_1)) + encoded_ctx_str_2 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_2)) named_actions = {"action1": rl_chain.Embed([{"a": str1, "b": str1}, str2, str3])} context = { @@ -203,14 +203,14 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_kee str1 = "0" str2 = "1" str3 = "2" - encoded_str1 = encoded_text + " ".join(char for char in str1) - encoded_str2 = encoded_text + " ".join(char for char in str2) - encoded_str3 = encoded_text + " ".join(char for char in str3) + encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1)) + encoded_str2 = rl_chain.stringify_embedding(list(encoded_keyword + str2)) + encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3)) ctx_str_1 = "context1" ctx_str_2 = "context2" - encoded_ctx_str_1 = encoded_text + " ".join(char for char in ctx_str_1) - encoded_ctx_str_2 = encoded_text + " ".join(char for char in ctx_str_2) + encoded_ctx_str_1 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_1)) + encoded_ctx_str_2 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_2)) named_actions = { "action1": rl_chain.EmbedAndKeep([{"a": str1, "b": str1}, str2, str3]) @@ -236,14 +236,12 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb() -> N str1 = "0" str2 = "1" str3 = "2" - encoded_str1 = encoded_text + " ".join(char for char in str1) - encoded_text + " ".join(char for char in str2) - encoded_str3 = encoded_text + " ".join(char for char in str3) + encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1)) + encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3)) ctx_str_1 = "context1" ctx_str_2 = "context2" - encoded_text + " ".join(char for char in ctx_str_1) - encoded_ctx_str_2 = encoded_text + " ".join(char for char in ctx_str_2) + encoded_ctx_str_2 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_2)) named_actions = { "action1": [ @@ -270,14 +268,12 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emakeep() str1 = "0" str2 = "1" str3 = "2" - encoded_str1 = encoded_text + " ".join(char for char in str1) - encoded_text + " ".join(char for char in str2) - encoded_str3 = encoded_text + " ".join(char for char in str3) + encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1)) + encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3)) ctx_str_1 = "context1" ctx_str_2 = "context2" - encoded_text + " ".join(char for char in ctx_str_1) - encoded_ctx_str_2 = encoded_text + " ".join(char for char in ctx_str_2) + encoded_ctx_str_2 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_2)) named_actions = { "action1": [ @@ -305,11 +301,11 @@ def test_raw_features_underscored() -> None: feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) str1 = "this is a long string" str1_underscored = str1.replace(" ", "_") - encoded_str1 = encoded_text + " ".join(char for char in str1) + encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1)) ctx_str = "this is a long context" ctx_str_underscored = ctx_str.replace(" ", "_") - encoded_ctx_str = encoded_text + " ".join(char for char in ctx_str) + encoded_ctx_str = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str)) # No embeddings named_actions = {"action": [str1]} diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py index bd0cc584ef..1928eb26c6 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py @@ -5,7 +5,7 @@ from test_utils import MockEncoder import langchain.chains.rl_chain.base as base -encoded_text = "[ e n c o d e d ] " +encoded_keyword = "[encoded]" @pytest.mark.requires("vowpal_wabbit_next") @@ -17,12 +17,10 @@ def test_simple_context_str_no_emb() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_simple_context_str_w_emb() -> None: str1 = "test" - encoded_str1 = " ".join(char for char in str1) - expected = [{"a_namespace": encoded_text + encoded_str1}] + encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1)) + expected = [{"a_namespace": encoded_str1}] assert base.embed(base.Embed(str1), MockEncoder(), "a_namespace") == expected - expected_embed_and_keep = [ - {"a_namespace": str1 + " " + encoded_text + encoded_str1} - ] + expected_embed_and_keep = [{"a_namespace": str1 + " " + encoded_str1}] assert ( base.embed(base.EmbedAndKeep(str1), MockEncoder(), "a_namespace") == expected_embed_and_keep @@ -33,14 +31,14 @@ def test_simple_context_str_w_emb() -> None: def test_simple_context_str_w_nested_emb() -> None: # nested embeddings, innermost wins str1 = "test" - encoded_str1 = " ".join(char for char in str1) - expected = [{"a_namespace": encoded_text + encoded_str1}] + encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1)) + expected = [{"a_namespace": encoded_str1}] assert ( base.embed(base.EmbedAndKeep(base.Embed(str1)), MockEncoder(), "a_namespace") == expected ) - expected2 = [{"a_namespace": str1 + " " + encoded_text + encoded_str1}] + expected2 = [{"a_namespace": str1 + " " + encoded_str1}] assert ( base.embed(base.Embed(base.EmbedAndKeep(str1)), MockEncoder(), "a_namespace") == expected2 @@ -56,12 +54,10 @@ def test_context_w_namespace_no_emb() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_context_w_namespace_w_emb() -> None: str1 = "test" - encoded_str1 = " ".join(char for char in str1) - expected = [{"test_namespace": encoded_text + encoded_str1}] + encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1)) + expected = [{"test_namespace": encoded_str1}] assert base.embed({"test_namespace": base.Embed(str1)}, MockEncoder()) == expected - expected_embed_and_keep = [ - {"test_namespace": str1 + " " + encoded_text + encoded_str1} - ] + expected_embed_and_keep = [{"test_namespace": str1 + " " + encoded_str1}] assert ( base.embed({"test_namespace": base.EmbedAndKeep(str1)}, MockEncoder()) == expected_embed_and_keep @@ -71,12 +67,10 @@ def test_context_w_namespace_w_emb() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_context_w_namespace_w_emb2() -> None: str1 = "test" - encoded_str1 = " ".join(char for char in str1) - expected = [{"test_namespace": encoded_text + encoded_str1}] + encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1)) + expected = [{"test_namespace": encoded_str1}] assert base.embed(base.Embed({"test_namespace": str1}), MockEncoder()) == expected - expected_embed_and_keep = [ - {"test_namespace": str1 + " " + encoded_text + encoded_str1} - ] + expected_embed_and_keep = [{"test_namespace": str1 + " " + encoded_str1}] assert ( base.embed(base.EmbedAndKeep({"test_namespace": str1}), MockEncoder()) == expected_embed_and_keep @@ -87,10 +81,8 @@ def test_context_w_namespace_w_emb2() -> None: def test_context_w_namespace_w_some_emb() -> None: str1 = "test1" str2 = "test2" - encoded_str2 = " ".join(char for char in str2) - expected = [ - {"test_namespace": str1, "test_namespace2": encoded_text + encoded_str2} - ] + encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2)) + expected = [{"test_namespace": str1, "test_namespace2": encoded_str2}] assert ( base.embed( {"test_namespace": str1, "test_namespace2": base.Embed(str2)}, MockEncoder() @@ -100,7 +92,7 @@ def test_context_w_namespace_w_some_emb() -> None: expected_embed_and_keep = [ { "test_namespace": str1, - "test_namespace2": str2 + " " + encoded_text + encoded_str2, + "test_namespace2": str2 + " " + encoded_str2, } ] assert ( @@ -127,22 +119,22 @@ def test_simple_action_strlist_w_emb() -> None: str1 = "test1" str2 = "test2" str3 = "test3" - encoded_str1 = " ".join(char for char in str1) - encoded_str2 = " ".join(char for char in str2) - encoded_str3 = " ".join(char for char in str3) + encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1)) + encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2)) + encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3)) expected = [ - {"a_namespace": encoded_text + encoded_str1}, - {"a_namespace": encoded_text + encoded_str2}, - {"a_namespace": encoded_text + encoded_str3}, + {"a_namespace": encoded_str1}, + {"a_namespace": encoded_str2}, + {"a_namespace": encoded_str3}, ] assert ( base.embed(base.Embed([str1, str2, str3]), MockEncoder(), "a_namespace") == expected ) expected_embed_and_keep = [ - {"a_namespace": str1 + " " + encoded_text + encoded_str1}, - {"a_namespace": str2 + " " + encoded_text + encoded_str2}, - {"a_namespace": str3 + " " + encoded_text + encoded_str3}, + {"a_namespace": str1 + " " + encoded_str1}, + {"a_namespace": str2 + " " + encoded_str2}, + {"a_namespace": str3 + " " + encoded_str3}, ] assert ( base.embed(base.EmbedAndKeep([str1, str2, str3]), MockEncoder(), "a_namespace") @@ -155,12 +147,12 @@ def test_simple_action_strlist_w_some_emb() -> None: str1 = "test1" str2 = "test2" str3 = "test3" - encoded_str2 = " ".join(char for char in str2) - encoded_str3 = " ".join(char for char in str3) + encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2)) + encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3)) expected = [ {"a_namespace": str1}, - {"a_namespace": encoded_text + encoded_str2}, - {"a_namespace": encoded_text + encoded_str3}, + {"a_namespace": encoded_str2}, + {"a_namespace": encoded_str3}, ] assert ( base.embed( @@ -170,8 +162,8 @@ def test_simple_action_strlist_w_some_emb() -> None: ) expected_embed_and_keep = [ {"a_namespace": str1}, - {"a_namespace": str2 + " " + encoded_text + encoded_str2}, - {"a_namespace": str3 + " " + encoded_text + encoded_str3}, + {"a_namespace": str2 + " " + encoded_str2}, + {"a_namespace": str3 + " " + encoded_str3}, ] assert ( base.embed( @@ -211,13 +203,13 @@ def test_action_w_namespace_w_emb() -> None: str1 = "test1" str2 = "test2" str3 = "test3" - encoded_str1 = " ".join(char for char in str1) - encoded_str2 = " ".join(char for char in str2) - encoded_str3 = " ".join(char for char in str3) + encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1)) + encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2)) + encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3)) expected = [ - {"test_namespace": encoded_text + encoded_str1}, - {"test_namespace": encoded_text + encoded_str2}, - {"test_namespace": encoded_text + encoded_str3}, + {"test_namespace": encoded_str1}, + {"test_namespace": encoded_str2}, + {"test_namespace": encoded_str3}, ] assert ( base.embed( @@ -231,9 +223,9 @@ def test_action_w_namespace_w_emb() -> None: == expected ) expected_embed_and_keep = [ - {"test_namespace": str1 + " " + encoded_text + encoded_str1}, - {"test_namespace": str2 + " " + encoded_text + encoded_str2}, - {"test_namespace": str3 + " " + encoded_text + encoded_str3}, + {"test_namespace": str1 + " " + encoded_str1}, + {"test_namespace": str2 + " " + encoded_str2}, + {"test_namespace": str3 + " " + encoded_str3}, ] assert ( base.embed( @@ -253,13 +245,13 @@ def test_action_w_namespace_w_emb2() -> None: str1 = "test1" str2 = "test2" str3 = "test3" - encoded_str1 = " ".join(char for char in str1) - encoded_str2 = " ".join(char for char in str2) - encoded_str3 = " ".join(char for char in str3) + encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1)) + encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2)) + encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3)) expected = [ - {"test_namespace1": encoded_text + encoded_str1}, - {"test_namespace2": encoded_text + encoded_str2}, - {"test_namespace3": encoded_text + encoded_str3}, + {"test_namespace1": encoded_str1}, + {"test_namespace2": encoded_str2}, + {"test_namespace3": encoded_str3}, ] assert ( base.embed( @@ -275,9 +267,9 @@ def test_action_w_namespace_w_emb2() -> None: == expected ) expected_embed_and_keep = [ - {"test_namespace1": str1 + " " + encoded_text + encoded_str1}, - {"test_namespace2": str2 + " " + encoded_text + encoded_str2}, - {"test_namespace3": str3 + " " + encoded_text + encoded_str3}, + {"test_namespace1": str1 + " " + encoded_str1}, + {"test_namespace2": str2 + " " + encoded_str2}, + {"test_namespace3": str3 + " " + encoded_str3}, ] assert ( base.embed( @@ -299,12 +291,12 @@ def test_action_w_namespace_w_some_emb() -> None: str1 = "test1" str2 = "test2" str3 = "test3" - encoded_str2 = " ".join(char for char in str2) - encoded_str3 = " ".join(char for char in str3) + encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2)) + encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3)) expected = [ {"test_namespace": str1}, - {"test_namespace": encoded_text + encoded_str2}, - {"test_namespace": encoded_text + encoded_str3}, + {"test_namespace": encoded_str2}, + {"test_namespace": encoded_str3}, ] assert ( base.embed( @@ -319,8 +311,8 @@ def test_action_w_namespace_w_some_emb() -> None: ) expected_embed_and_keep = [ {"test_namespace": str1}, - {"test_namespace": str2 + " " + encoded_text + encoded_str2}, - {"test_namespace": str3 + " " + encoded_text + encoded_str3}, + {"test_namespace": str2 + " " + encoded_str2}, + {"test_namespace": str3 + " " + encoded_str3}, ] assert ( base.embed( @@ -340,13 +332,13 @@ def test_action_w_namespace_w_emb_w_more_than_one_item_in_first_dict() -> None: str1 = "test1" str2 = "test2" str3 = "test3" - encoded_str1 = " ".join(char for char in str1) - encoded_str2 = " ".join(char for char in str2) - encoded_str3 = " ".join(char for char in str3) + encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1)) + encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2)) + encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3)) expected = [ - {"test_namespace": encoded_text + encoded_str1, "test_namespace2": str1}, - {"test_namespace": encoded_text + encoded_str2, "test_namespace2": str2}, - {"test_namespace": encoded_text + encoded_str3, "test_namespace2": str3}, + {"test_namespace": encoded_str1, "test_namespace2": str1}, + {"test_namespace": encoded_str2, "test_namespace2": str2}, + {"test_namespace": encoded_str3, "test_namespace2": str3}, ] assert ( base.embed( @@ -361,15 +353,15 @@ def test_action_w_namespace_w_emb_w_more_than_one_item_in_first_dict() -> None: ) expected_embed_and_keep = [ { - "test_namespace": str1 + " " + encoded_text + encoded_str1, + "test_namespace": str1 + " " + encoded_str1, "test_namespace2": str1, }, { - "test_namespace": str2 + " " + encoded_text + encoded_str2, + "test_namespace": str2 + " " + encoded_str2, "test_namespace2": str2, }, { - "test_namespace": str3 + " " + encoded_text + encoded_str3, + "test_namespace": str3 + " " + encoded_str3, "test_namespace2": str3, }, ] @@ -398,8 +390,8 @@ def test_one_namespace_w_list_of_features_no_emb() -> None: def test_one_namespace_w_list_of_features_w_some_emb() -> None: str1 = "test1" str2 = "test2" - encoded_str2 = " ".join(char for char in str2) - expected = [{"test_namespace": [str1, encoded_text + encoded_str2]}] + encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2)) + expected = [{"test_namespace": [str1, encoded_str2]}] assert ( base.embed({"test_namespace": [str1, base.Embed(str2)]}, MockEncoder()) == expected From b162f1c8e1e2823fc17242563bb469107651bdb3 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 05:50:15 -0400 Subject: [PATCH 32/65] dot product of encodings as default auto_embed --- .../langchain/chains/rl_chain/base.py | 7 +- .../chains/rl_chain/pick_best_chain.py | 133 ++++++++++++++++-- .../rl_chain/test_pick_best_chain_call.py | 40 +++--- .../rl_chain/test_pick_best_text_embedder.py | 30 ++-- 4 files changed, 163 insertions(+), 47 deletions(-) diff --git a/libs/langchain/langchain/chains/rl_chain/base.py b/libs/langchain/langchain/chains/rl_chain/base.py index 6e01bb5063..66ead42e71 100644 --- a/libs/langchain/langchain/chains/rl_chain/base.py +++ b/libs/langchain/langchain/chains/rl_chain/base.py @@ -229,6 +229,9 @@ class VwPolicy(Policy): class Embedder(Generic[TEvent], ABC): + def __init__(self, *args: Any, **kwargs: Any): + pass + @abstractmethod def format(self, event: TEvent) -> str: ... @@ -498,8 +501,8 @@ class RLChain(Chain, Generic[TEvent]): ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() - if self.auto_embed: - inputs = prepare_inputs_for_autoembed(inputs=inputs) + # if self.auto_embed: + # inputs = prepare_inputs_for_autoembed(inputs=inputs) event: TEvent = self._call_before_predict(inputs=inputs) prediction = self.active_policy.predict(event=event) diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py index 04218d2934..5ed32c4cad 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py @@ -53,21 +53,25 @@ class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): model name (Any, optional): The type of embeddings to be used for feature representation. Defaults to BERT SentenceTransformer. """ # noqa E501 - def __init__(self, model: Optional[Any] = None, *args: Any, **kwargs: Any): + def __init__( + self, auto_embed: bool, model: Optional[Any] = None, *args: Any, **kwargs: Any + ): super().__init__(*args, **kwargs) if model is None: from sentence_transformers import SentenceTransformer - model = SentenceTransformer("bert-base-nli-mean-tokens") + model = SentenceTransformer("all-mpnet-base-v2") + # model = SentenceTransformer("all-MiniLM-L6-v2") self.model = model + self.auto_embed = auto_embed - def format(self, event: PickBestEvent) -> str: - """ - Converts the `BasedOn` and `ToSelectFrom` into a format that can be used by VW - """ + @staticmethod + def _str(embedding): + return " ".join([f"{i}:{e}" for i, e in enumerate(embedding)]) + def get_label(self, event: PickBestEvent) -> tuple: cost = None if event.selected: chosen_action = event.selected.index @@ -77,7 +81,11 @@ class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): else None ) prob = event.selected.probability + return chosen_action, cost, prob + else: + return None, None, None + def get_context_and_action_embeddings(self, event: PickBestEvent) -> tuple: context_emb = base.embed(event.based_on, self.model) if event.based_on else None to_select_from_var_name, to_select_from = next( iter(event.to_select_from.items()), (None, None) @@ -97,6 +105,97 @@ class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): raise ValueError( "Context and to_select_from must be provided in the inputs dictionary" ) + return context_emb, action_embs + + def get_indexed_dot_product(self, context_emb: List, action_embs: List) -> Dict: + import numpy as np + + unique_contexts = set() + for context_item in context_emb: + for ns, ee in context_item.items(): + if isinstance(ee, list): + for ea in ee: + unique_contexts.add(f"{ns}={ea}") + else: + unique_contexts.add(f"{ns}={ee}") + + encoded_contexts = self.model.encode(list(unique_contexts)) + context_embeddings = dict(zip(unique_contexts, encoded_contexts)) + + unique_actions = set() + for action in action_embs: + for ns, e in action.items(): + if isinstance(e, list): + for ea in e: + unique_actions.add(f"{ns}={ea}") + else: + unique_actions.add(f"{ns}={e}") + + encoded_actions = self.model.encode(list(unique_actions)) + action_embeddings = dict(zip(unique_actions, encoded_actions)) + + action_matrix = np.stack([v for k, v in action_embeddings.items()]) + context_matrix = np.stack([v for k, v in context_embeddings.items()]) + dot_product_matrix = np.dot(context_matrix, action_matrix.T) + + indexed_dot_product = {} + + for i, context_key in enumerate(context_embeddings.keys()): + indexed_dot_product[context_key] = {} + for j, action_key in enumerate(action_embeddings.keys()): + indexed_dot_product[context_key][action_key] = dot_product_matrix[i, j] + + return indexed_dot_product + + def format_auto_embed_on(self, event: PickBestEvent) -> str: + chosen_action, cost, prob = self.get_label(event) + context_emb, action_embs = self.get_context_and_action_embeddings(event) + indexed_dot_product = self.get_indexed_dot_product(context_emb, action_embs) + + action_lines = [] + for i, action in enumerate(action_embs): + line_parts = [] + dot_prods = [] + if cost is not None and chosen_action == i: + line_parts.append(f"{chosen_action}:{cost}:{prob}") + for ns, action in action.items(): + line_parts.append(f"|{ns}") + elements = action if isinstance(action, list) else [action] + nsa = [] + for elem in elements: + line_parts.append(f"{elem}") + ns_a = f"{ns}={elem}" + nsa.append(ns_a) + for k,v in indexed_dot_product.items(): + dot_prods.append(v[ns_a]) + nsa = " ".join(nsa) + line_parts.append(f"|# {nsa}") + + line_parts.append(f"|embedding {self._str(dot_prods)}") + action_lines.append(" ".join(line_parts)) + + shared = [] + for item in context_emb: + for ns, context in item.items(): + shared.append(f"|{ns}") + elements = context if isinstance(context, list) else [context] + nsc = [] + for elem in elements: + shared.append(f"{elem}") + nsc.append(f"{ns}={elem}") + nsc = " ".join(nsc) + shared.append(f"|@ {nsc}") + + r = "shared " + " ".join(shared) + "\n" + "\n".join(action_lines) + print(r) + return r + + def format_auto_embed_off(self, event: PickBestEvent) -> str: + """ + Converts the `BasedOn` and `ToSelectFrom` into a format that can be used by VW + """ + chosen_action, cost, prob = self.get_label(event) + context_emb, action_embs = self.get_context_and_action_embeddings(event) example_string = "" example_string += "shared " @@ -120,6 +219,12 @@ class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): # Strip the last newline return example_string[:-1] + def format(self, event: PickBestEvent) -> str: + if self.auto_embed: + return self.format_auto_embed_on(event) + else: + return self.format_auto_embed_off(event) + class PickBest(base.RLChain[PickBestEvent]): """ @@ -154,12 +259,20 @@ class PickBest(base.RLChain[PickBestEvent]): *args: Any, **kwargs: Any, ): + auto_embed = kwargs.get("auto_embed", False) + vw_cmd = kwargs.get("vw_cmd", []) if not vw_cmd: - vw_cmd = [ + interactions = ["--interactions=::"] + if auto_embed: + interactions = [ + "--interactions=@#", + "--ignore_linear=@", + "--ignore_linear=#", + "--noconstant", + ] + vw_cmd = interactions + [ "--cb_explore_adf", - "--quiet", - "--interactions=::", "--coin", "--squarecb", ] @@ -172,7 +285,7 @@ class PickBest(base.RLChain[PickBestEvent]): feature_embedder = kwargs.get("feature_embedder", None) if not feature_embedder: - feature_embedder = PickBestFeatureEmbedder() + feature_embedder = PickBestFeatureEmbedder(auto_embed=auto_embed) kwargs["feature_embedder"] = feature_embedder super().__init__(*args, **kwargs) diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py index 7bfa5ad550..3678523a04 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py @@ -26,7 +26,7 @@ def test_multiple_ToSelectFrom_throws() -> None: chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), ) actions = ["0", "1", "2"] with pytest.raises(ValueError): @@ -43,7 +43,7 @@ def test_missing_basedOn_from_throws() -> None: chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), ) actions = ["0", "1", "2"] with pytest.raises(ValueError): @@ -56,7 +56,7 @@ def test_ToSelectFrom_not_a_list_throws() -> None: chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), ) actions = {"actions": ["0", "1", "2"]} with pytest.raises(ValueError): @@ -75,7 +75,7 @@ def test_update_with_delayed_score_with_auto_validator_throws() -> None: llm=llm, prompt=PROMPT, selection_scorer=rl_chain.AutoSelectionScorer(llm=auto_val_llm), - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), ) actions = ["0", "1", "2"] response = chain.run( @@ -98,7 +98,7 @@ def test_update_with_delayed_score_force() -> None: llm=llm, prompt=PROMPT, selection_scorer=rl_chain.AutoSelectionScorer(llm=auto_val_llm), - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), ) actions = ["0", "1", "2"] response = chain.run( @@ -121,7 +121,7 @@ def test_update_with_delayed_score() -> None: llm=llm, prompt=PROMPT, selection_scorer=None, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), ) actions = ["0", "1", "2"] response = chain.run( @@ -153,7 +153,7 @@ def test_user_defined_scorer() -> None: llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer(), - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), ) actions = ["0", "1", "2"] response = chain.run( @@ -166,11 +166,11 @@ def test_user_defined_scorer() -> None: @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_auto_embeddings_on() -> None: +def test_everything_embedded() -> None: llm, PROMPT = setup() - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) chain = pick_best_chain.PickBest.from_llm( - llm=llm, prompt=PROMPT, feature_embedder=feature_embedder, auto_embed=True + llm=llm, prompt=PROMPT, feature_embedder=feature_embedder, auto_embed=False ) str1 = "0" @@ -189,8 +189,8 @@ def test_auto_embeddings_on() -> None: actions = [str1, str2, str3] response = chain.run( - User=rl_chain.BasedOn(ctx_str_1), - action=rl_chain.ToSelectFrom(actions), + User=rl_chain.EmbedAndKeep(rl_chain.BasedOn(ctx_str_1)), + action=rl_chain.EmbedAndKeep(rl_chain.ToSelectFrom(actions)), ) selection_metadata = response["selection_metadata"] vw_str = feature_embedder.format(selection_metadata) @@ -200,7 +200,7 @@ def test_auto_embeddings_on() -> None: @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_default_auto_embedder_is_off() -> None: llm, PROMPT = setup() - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, feature_embedder=feature_embedder ) @@ -226,7 +226,7 @@ def test_default_auto_embedder_is_off() -> None: @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_default_embeddings_off() -> None: llm, PROMPT = setup() - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, feature_embedder=feature_embedder, auto_embed=False ) @@ -252,7 +252,7 @@ def test_default_embeddings_off() -> None: @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_default_embeddings_mixed_w_explicit_user_embeddings() -> None: llm, PROMPT = setup() - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=True, model=MockEncoder()) chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, feature_embedder=feature_embedder, auto_embed=True ) @@ -291,7 +291,7 @@ def test_default_no_scorer_specified() -> None: chain = pick_best_chain.PickBest.from_llm( llm=chain_llm, prompt=PROMPT, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), ) response = chain.run( User=rl_chain.BasedOn("Context"), @@ -310,7 +310,7 @@ def test_explicitly_no_scorer() -> None: llm=llm, prompt=PROMPT, selection_scorer=None, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), ) response = chain.run( User=rl_chain.BasedOn("Context"), @@ -330,7 +330,7 @@ def test_auto_scorer_with_user_defined_llm() -> None: llm=llm, prompt=PROMPT, selection_scorer=rl_chain.AutoSelectionScorer(llm=scorer_llm), - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), ) response = chain.run( User=rl_chain.BasedOn("Context"), @@ -348,7 +348,7 @@ def test_calling_chain_w_reserved_inputs_throws() -> None: chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), ) with pytest.raises(ValueError): chain.run( @@ -371,7 +371,7 @@ def test_activate_and_deactivate_scorer() -> None: llm=llm, prompt=PROMPT, selection_scorer=pick_best_chain.base.AutoSelectionScorer(llm=scorer_llm), - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), ) response = chain.run( User=pick_best_chain.base.BasedOn("Context"), diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py index 8683e3b0e5..734dae8d25 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py @@ -9,7 +9,7 @@ encoded_keyword = "[encoded]" @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_missing_context_throws() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) named_action = {"action": ["0", "1", "2"]} event = pick_best_chain.PickBestEvent( inputs={}, to_select_from=named_action, based_on={} @@ -20,7 +20,7 @@ def test_pickbest_textembedder_missing_context_throws() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_missing_actions_throws() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) event = pick_best_chain.PickBestEvent( inputs={}, to_select_from={}, based_on={"context": "context"} ) @@ -30,7 +30,7 @@ def test_pickbest_textembedder_missing_actions_throws() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_no_label_no_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) named_actions = {"action1": ["0", "1", "2"]} expected = """shared |context context \n|action1 0 \n|action1 1 \n|action1 2 """ event = pick_best_chain.PickBestEvent( @@ -42,7 +42,7 @@ def test_pickbest_textembedder_no_label_no_emb() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_w_label_no_score_no_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) named_actions = {"action1": ["0", "1", "2"]} expected = """shared |context context \n|action1 0 \n|action1 1 \n|action1 2 """ selected = pick_best_chain.PickBestSelected(index=0, probability=1.0) @@ -58,7 +58,7 @@ def test_pickbest_textembedder_w_label_no_score_no_emb() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_w_full_label_no_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) named_actions = {"action1": ["0", "1", "2"]} expected = ( """shared |context context \n0:-0.0:1.0 |action1 0 \n|action1 1 \n|action1 2 """ @@ -76,7 +76,7 @@ def test_pickbest_textembedder_w_full_label_no_emb() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_w_full_label_w_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) str1 = "0" str2 = "1" str3 = "2" @@ -100,7 +100,7 @@ def test_pickbest_textembedder_w_full_label_w_emb() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_w_full_label_w_embed_and_keep() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) str1 = "0" str2 = "1" str3 = "2" @@ -124,7 +124,7 @@ def test_pickbest_textembedder_w_full_label_w_embed_and_keep() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_more_namespaces_no_label_no_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} expected = """shared |context1 context1 |context2 context2 \n|a 0 |b 0 \n|action1 1 \n|action1 2 """ # noqa: E501 @@ -137,7 +137,7 @@ def test_pickbest_textembedder_more_namespaces_no_label_no_emb() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_more_namespaces_w_label_no_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} expected = """shared |context1 context1 |context2 context2 \n|a 0 |b 0 \n|action1 1 \n|action1 2 """ # noqa: E501 @@ -151,7 +151,7 @@ def test_pickbest_textembedder_more_namespaces_w_label_no_emb() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_more_namespaces_w_full_label_no_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} expected = """shared |context1 context1 |context2 context2 \n0:-0.0:1.0 |a 0 |b 0 \n|action1 1 \n|action1 2 """ # noqa: E501 @@ -165,7 +165,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_no_emb() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) str1 = "0" str2 = "1" @@ -198,7 +198,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb() -> None def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_keep() -> ( None ): - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) str1 = "0" str2 = "1" @@ -231,7 +231,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_kee @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) str1 = "0" str2 = "1" @@ -263,7 +263,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb() -> N @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emakeep() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) str1 = "0" str2 = "1" @@ -298,7 +298,7 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emakeep() @pytest.mark.requires("vowpal_wabbit_next") def test_raw_features_underscored() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) str1 = "this is a long string" str1_underscored = str1.replace(" ", "_") encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1)) From ca163f0ee698e3cf7216cf597904c5a855717585 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 07:10:44 -0400 Subject: [PATCH 33/65] fixes and tests --- .../langchain/chains/rl_chain/base.py | 20 +--- .../chains/rl_chain/pick_best_chain.py | 48 ++++---- .../rl_chain/test_pick_best_chain_call.py | 111 +++++++++++++----- .../rl_chain/test_pick_best_text_embedder.py | 60 +++++++--- .../unit_tests/chains/rl_chain/test_utils.py | 12 ++ 5 files changed, 168 insertions(+), 83 deletions(-) diff --git a/libs/langchain/langchain/chains/rl_chain/base.py b/libs/langchain/langchain/chains/rl_chain/base.py index 66ead42e71..c69b21dd1f 100644 --- a/libs/langchain/langchain/chains/rl_chain/base.py +++ b/libs/langchain/langchain/chains/rl_chain/base.py @@ -118,8 +118,7 @@ def get_based_on_and_to_select_from(inputs: Dict[str, Any]) -> Tuple[Dict, Dict] if not to_select_from: raise ValueError( - "No variables using 'ToSelectFrom' found in the inputs. \ - Please include at least one variable containing a list to select from." + "No variables using 'ToSelectFrom' found in the inputs. Please include at least one variable containing a list to select from." # noqa: E501 ) based_on = { @@ -303,9 +302,7 @@ class AutoSelectionScorer(SelectionScorer[Event], BaseModel): return resp except Exception as e: raise RuntimeError( - f"The auto selection scorer did not manage to score the response, \ - there is always the option to try again or tweak the reward prompt.\ - Error: {e}" + f"The auto selection scorer did not manage to score the response, there is always the option to try again or tweak the reward prompt. Error: {e}" # noqa: E501 ) @@ -426,8 +423,7 @@ class RLChain(Chain, Generic[TEvent]): """ # noqa: E501 if self._can_use_selection_scorer() and not force_score: raise RuntimeError( - "The selection scorer is set, and force_score was not set to True. \ - Please set force_score=True to use this function." + "The selection scorer is set, and force_score was not set to True. Please set force_score=True to use this function." # noqa: E501 ) if self.metrics: self.metrics.on_feedback(score) @@ -461,9 +457,7 @@ class RLChain(Chain, Generic[TEvent]): or self.selected_based_on_input_key in inputs.keys() ): raise ValueError( - f"The rl chain does not accept '{self.selected_input_key}' \ - or '{self.selected_based_on_input_key}' as input keys, \ - they are reserved for internal use during auto reward." + f"The rl chain does not accept '{self.selected_input_key}' or '{self.selected_based_on_input_key}' as input keys, they are reserved for internal use during auto reward." # noqa: E501 ) def _can_use_selection_scorer(self) -> bool: @@ -501,9 +495,6 @@ class RLChain(Chain, Generic[TEvent]): ) -> Dict[str, Any]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() - # if self.auto_embed: - # inputs = prepare_inputs_for_autoembed(inputs=inputs) - event: TEvent = self._call_before_predict(inputs=inputs) prediction = self.active_policy.predict(event=event) if self.metrics: @@ -576,8 +567,7 @@ def embed_string_type( if namespace is None: raise ValueError( - "The default namespace must be \ - provided when embedding a string or _Embed object." + "The default namespace must be provided when embedding a string or _Embed object." # noqa: E501 ) return {namespace: keep_str + encoded} diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py index 5ed32c4cad..e3e93b138e 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py @@ -118,7 +118,7 @@ class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): unique_contexts.add(f"{ns}={ea}") else: unique_contexts.add(f"{ns}={ee}") - + encoded_contexts = self.model.encode(list(unique_contexts)) context_embeddings = dict(zip(unique_contexts, encoded_contexts)) @@ -144,9 +144,9 @@ class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): indexed_dot_product[context_key] = {} for j, action_key in enumerate(action_embeddings.keys()): indexed_dot_product[context_key][action_key] = dot_product_matrix[i, j] - + return indexed_dot_product - + def format_auto_embed_on(self, event: PickBestEvent) -> str: chosen_action, cost, prob = self.get_label(event) context_emb, action_embs = self.get_context_and_action_embeddings(event) @@ -166,12 +166,12 @@ class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): line_parts.append(f"{elem}") ns_a = f"{ns}={elem}" nsa.append(ns_a) - for k,v in indexed_dot_product.items(): + for k, v in indexed_dot_product.items(): dot_prods.append(v[ns_a]) nsa = " ".join(nsa) line_parts.append(f"|# {nsa}") - line_parts.append(f"|embedding {self._str(dot_prods)}") + line_parts.append(f"|dotprod {self._str(dot_prods)}") action_lines.append(" ".join(line_parts)) shared = [] @@ -186,9 +186,7 @@ class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): nsc = " ".join(nsc) shared.append(f"|@ {nsc}") - r = "shared " + " ".join(shared) + "\n" + "\n".join(action_lines) - print(r) - return r + return "shared " + " ".join(shared) + "\n" + "\n".join(action_lines) def format_auto_embed_off(self, event: PickBestEvent) -> str: """ @@ -262,29 +260,35 @@ class PickBest(base.RLChain[PickBestEvent]): auto_embed = kwargs.get("auto_embed", False) vw_cmd = kwargs.get("vw_cmd", []) - if not vw_cmd: + if vw_cmd: + if "--cb_explore_adf" not in vw_cmd: + raise ValueError( + "If vw_cmd is specified, it must include --cb_explore_adf" + ) + else: interactions = ["--interactions=::"] if auto_embed: interactions = [ "--interactions=@#", "--ignore_linear=@", "--ignore_linear=#", - "--noconstant", ] vw_cmd = interactions + [ "--cb_explore_adf", "--coin", "--squarecb", + "--quiet", ] - else: - if "--cb_explore_adf" not in vw_cmd: - raise ValueError( - "If vw_cmd is specified, it must include --cb_explore_adf" - ) + kwargs["vw_cmd"] = vw_cmd feature_embedder = kwargs.get("feature_embedder", None) - if not feature_embedder: + if feature_embedder: + if "auto_embed" in kwargs: + logger.warning( + "auto_embed will take no effect when explicit feature_embedder is provided" # noqa E501 + ) + else: feature_embedder = PickBestFeatureEmbedder(auto_embed=auto_embed) kwargs["feature_embedder"] = feature_embedder @@ -294,23 +298,17 @@ class PickBest(base.RLChain[PickBestEvent]): context, actions = base.get_based_on_and_to_select_from(inputs=inputs) if not actions: raise ValueError( - "No variables using 'ToSelectFrom' found in the inputs. \ - Please include at least one variable containing \ - a list to select from." + "No variables using 'ToSelectFrom' found in the inputs. Please include at least one variable containing a list to select from." # noqa E501 ) if len(list(actions.values())) > 1: raise ValueError( - "Only one variable using 'ToSelectFrom' can be provided in the inputs \ - for the PickBest chain. Please provide only one variable \ - containing a list to select from." + "Only one variable using 'ToSelectFrom' can be provided in the inputs for the PickBest chain. Please provide only one variable containing a list to select from." # noqa E501 ) if not context: raise ValueError( - "No variables using 'BasedOn' found in the inputs. \ - Please include at least one variable containing information \ - to base the selected of ToSelectFrom on." + "No variables using 'BasedOn' found in the inputs. Please include at least one variable containing information to base the selected of ToSelectFrom on." # noqa E501 ) event = PickBestEvent(inputs=inputs, to_select_from=actions, based_on=context) diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py index 3678523a04..7eb7ca2aea 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py @@ -1,7 +1,7 @@ from typing import Any, Dict import pytest -from test_utils import MockEncoder +from test_utils import MockEncoder, MockEncoderReturnsList import langchain.chains.rl_chain.base as rl_chain import langchain.chains.rl_chain.pick_best_chain as pick_best_chain @@ -26,7 +26,9 @@ def test_multiple_ToSelectFrom_throws() -> None: chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ), ) actions = ["0", "1", "2"] with pytest.raises(ValueError): @@ -43,7 +45,9 @@ def test_missing_basedOn_from_throws() -> None: chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ), ) actions = ["0", "1", "2"] with pytest.raises(ValueError): @@ -56,7 +60,9 @@ def test_ToSelectFrom_not_a_list_throws() -> None: chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ), ) actions = {"actions": ["0", "1", "2"]} with pytest.raises(ValueError): @@ -75,7 +81,9 @@ def test_update_with_delayed_score_with_auto_validator_throws() -> None: llm=llm, prompt=PROMPT, selection_scorer=rl_chain.AutoSelectionScorer(llm=auto_val_llm), - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ), ) actions = ["0", "1", "2"] response = chain.run( @@ -98,7 +106,9 @@ def test_update_with_delayed_score_force() -> None: llm=llm, prompt=PROMPT, selection_scorer=rl_chain.AutoSelectionScorer(llm=auto_val_llm), - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ), ) actions = ["0", "1", "2"] response = chain.run( @@ -121,7 +131,9 @@ def test_update_with_delayed_score() -> None: llm=llm, prompt=PROMPT, selection_scorer=None, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ), ) actions = ["0", "1", "2"] response = chain.run( @@ -153,7 +165,9 @@ def test_user_defined_scorer() -> None: llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer(), - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ), ) actions = ["0", "1", "2"] response = chain.run( @@ -168,7 +182,9 @@ def test_user_defined_scorer() -> None: @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_everything_embedded() -> None: llm, PROMPT = setup() - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ) chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, feature_embedder=feature_embedder, auto_embed=False ) @@ -200,7 +216,9 @@ def test_everything_embedded() -> None: @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_default_auto_embedder_is_off() -> None: llm, PROMPT = setup() - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ) chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, feature_embedder=feature_embedder ) @@ -224,9 +242,11 @@ def test_default_auto_embedder_is_off() -> None: @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_default_embeddings_off() -> None: +def test_default_w_embeddings_off() -> None: llm, PROMPT = setup() - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ) chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, feature_embedder=feature_embedder, auto_embed=False ) @@ -250,29 +270,54 @@ def test_default_embeddings_off() -> None: @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_default_embeddings_mixed_w_explicit_user_embeddings() -> None: +def test_default_w_embeddings_on() -> None: llm, PROMPT = setup() - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=True, model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder( + auto_embed=True, model=MockEncoderReturnsList() + ) chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, feature_embedder=feature_embedder, auto_embed=True ) str1 = "0" str2 = "1" - str3 = "2" - encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1)) - encoded_str2 = rl_chain.stringify_embedding(list(encoded_keyword + str2)) - encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3)) + ctx_str_1 = "context1" + dot_prod = "dotprod 0:5.0" # dot prod of [1.0, 2.0] and [1.0, 2.0] + + expected = f"""shared |User {ctx_str_1} |@ User={ctx_str_1}\n|action {str1} |# action={str1} |{dot_prod}\n|action {str2} |# action={str2} |{dot_prod}""" # noqa + actions = [str1, str2] + + response = chain.run( + User=rl_chain.BasedOn(ctx_str_1), + action=rl_chain.ToSelectFrom(actions), + ) + selection_metadata = response["selection_metadata"] + vw_str = feature_embedder.format(selection_metadata) + assert vw_str == expected + + +@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") +def test_default_embeddings_mixed_w_explicit_user_embeddings() -> None: + llm, PROMPT = setup() + feature_embedder = pick_best_chain.PickBestFeatureEmbedder( + auto_embed=True, model=MockEncoderReturnsList() + ) + chain = pick_best_chain.PickBest.from_llm( + llm=llm, prompt=PROMPT, feature_embedder=feature_embedder, auto_embed=True + ) + + str1 = "0" + str2 = "1" + encoded_str2 = rl_chain.stringify_embedding([1.0, 2.0]) ctx_str_1 = "context1" ctx_str_2 = "context2" + encoded_ctx_str_1 = rl_chain.stringify_embedding([1.0, 2.0]) + dot_prod = "dotprod 0:5.0 1:5.0" # dot prod of [1.0, 2.0] and [1.0, 2.0] - encoded_ctx_str_1 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_1)) - encoded_ctx_str_2 = rl_chain.stringify_embedding(list(encoded_keyword + ctx_str_2)) - - expected = f"""shared |User {encoded_ctx_str_1} |User2 {ctx_str_2 + " " + encoded_ctx_str_2} \n|action {str1 + " " + encoded_str1} \n|action {str2 + " " + encoded_str2} \n|action {encoded_str3} """ # noqa + expected = f"""shared |User {encoded_ctx_str_1} |@ User={encoded_ctx_str_1} |User2 {ctx_str_2} |@ User2={ctx_str_2}\n|action {str1} |# action={str1} |{dot_prod}\n|action {encoded_str2} |# action={encoded_str2} |{dot_prod}""" # noqa - actions = [str1, str2, rl_chain.Embed(str3)] + actions = [str1, rl_chain.Embed(str2)] response = chain.run( User=rl_chain.BasedOn(rl_chain.Embed(ctx_str_1)), @@ -291,7 +336,9 @@ def test_default_no_scorer_specified() -> None: chain = pick_best_chain.PickBest.from_llm( llm=chain_llm, prompt=PROMPT, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ), ) response = chain.run( User=rl_chain.BasedOn("Context"), @@ -310,7 +357,9 @@ def test_explicitly_no_scorer() -> None: llm=llm, prompt=PROMPT, selection_scorer=None, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ), ) response = chain.run( User=rl_chain.BasedOn("Context"), @@ -330,7 +379,9 @@ def test_auto_scorer_with_user_defined_llm() -> None: llm=llm, prompt=PROMPT, selection_scorer=rl_chain.AutoSelectionScorer(llm=scorer_llm), - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ), ) response = chain.run( User=rl_chain.BasedOn("Context"), @@ -348,7 +399,9 @@ def test_calling_chain_w_reserved_inputs_throws() -> None: chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ), ) with pytest.raises(ValueError): chain.run( @@ -371,7 +424,9 @@ def test_activate_and_deactivate_scorer() -> None: llm=llm, prompt=PROMPT, selection_scorer=pick_best_chain.base.AutoSelectionScorer(llm=scorer_llm), - feature_embedder=pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()), + feature_embedder=pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ), ) response = chain.run( User=pick_best_chain.base.BasedOn("Context"), diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py index 734dae8d25..1fdbdff644 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py @@ -9,7 +9,9 @@ encoded_keyword = "[encoded]" @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_missing_context_throws() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ) named_action = {"action": ["0", "1", "2"]} event = pick_best_chain.PickBestEvent( inputs={}, to_select_from=named_action, based_on={} @@ -20,7 +22,9 @@ def test_pickbest_textembedder_missing_context_throws() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_missing_actions_throws() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ) event = pick_best_chain.PickBestEvent( inputs={}, to_select_from={}, based_on={"context": "context"} ) @@ -30,7 +34,9 @@ def test_pickbest_textembedder_missing_actions_throws() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_no_label_no_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ) named_actions = {"action1": ["0", "1", "2"]} expected = """shared |context context \n|action1 0 \n|action1 1 \n|action1 2 """ event = pick_best_chain.PickBestEvent( @@ -42,7 +48,9 @@ def test_pickbest_textembedder_no_label_no_emb() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_w_label_no_score_no_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ) named_actions = {"action1": ["0", "1", "2"]} expected = """shared |context context \n|action1 0 \n|action1 1 \n|action1 2 """ selected = pick_best_chain.PickBestSelected(index=0, probability=1.0) @@ -58,7 +66,9 @@ def test_pickbest_textembedder_w_label_no_score_no_emb() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_w_full_label_no_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ) named_actions = {"action1": ["0", "1", "2"]} expected = ( """shared |context context \n0:-0.0:1.0 |action1 0 \n|action1 1 \n|action1 2 """ @@ -76,7 +86,9 @@ def test_pickbest_textembedder_w_full_label_no_emb() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_w_full_label_w_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ) str1 = "0" str2 = "1" str3 = "2" @@ -100,7 +112,9 @@ def test_pickbest_textembedder_w_full_label_w_emb() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_w_full_label_w_embed_and_keep() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ) str1 = "0" str2 = "1" str3 = "2" @@ -124,7 +138,9 @@ def test_pickbest_textembedder_w_full_label_w_embed_and_keep() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_more_namespaces_no_label_no_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} expected = """shared |context1 context1 |context2 context2 \n|a 0 |b 0 \n|action1 1 \n|action1 2 """ # noqa: E501 @@ -137,7 +153,9 @@ def test_pickbest_textembedder_more_namespaces_no_label_no_emb() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_more_namespaces_w_label_no_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} expected = """shared |context1 context1 |context2 context2 \n|a 0 |b 0 \n|action1 1 \n|action1 2 """ # noqa: E501 @@ -151,7 +169,9 @@ def test_pickbest_textembedder_more_namespaces_w_label_no_emb() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_more_namespaces_w_full_label_no_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ) named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} context = {"context1": "context1", "context2": "context2"} expected = """shared |context1 context1 |context2 context2 \n0:-0.0:1.0 |a 0 |b 0 \n|action1 1 \n|action1 2 """ # noqa: E501 @@ -165,7 +185,9 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_no_emb() -> None: @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ) str1 = "0" str2 = "1" @@ -198,7 +220,9 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb() -> None def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_keep() -> ( None ): - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ) str1 = "0" str2 = "1" @@ -231,7 +255,9 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_kee @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ) str1 = "0" str2 = "1" @@ -263,7 +289,9 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb() -> N @pytest.mark.requires("vowpal_wabbit_next") def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emakeep() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ) str1 = "0" str2 = "1" @@ -298,7 +326,9 @@ def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emakeep() @pytest.mark.requires("vowpal_wabbit_next") def test_raw_features_underscored() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False, model=MockEncoder()) + feature_embedder = pick_best_chain.PickBestFeatureEmbedder( + auto_embed=False, model=MockEncoder() + ) str1 = "this is a long string" str1_underscored = str1.replace(" ", "_") encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1)) diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_utils.py b/libs/langchain/tests/unit_tests/chains/rl_chain/test_utils.py index 625c37ee00..b2cc90b1bc 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_utils.py +++ b/libs/langchain/tests/unit_tests/chains/rl_chain/test_utils.py @@ -1,3 +1,15 @@ +from typing import Any, List + + class MockEncoder: def encode(self, to_encode: str) -> str: return "[encoded]" + to_encode + + +class MockEncoderReturnsList: + def encode(self, to_encode: Any) -> List: + if isinstance(to_encode, str): + return [1.0, 2.0] + elif isinstance(to_encode, List): + return [[1.0, 2.0] for _ in range(len(to_encode))] + raise ValueError("Invalid input type for unit test") From 67dc1a9dd20ab4a6c41498962fa3bd6e5ae1e710 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 07:36:47 -0400 Subject: [PATCH 34/65] cleanup --- libs/langchain/langchain/chains/rl_chain/__init__.py | 2 ++ libs/langchain/langchain/chains/rl_chain/pick_best_chain.py | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/langchain/langchain/chains/rl_chain/__init__.py b/libs/langchain/langchain/chains/rl_chain/__init__.py index 3a14861bd7..80242139f5 100644 --- a/libs/langchain/langchain/chains/rl_chain/__init__.py +++ b/libs/langchain/langchain/chains/rl_chain/__init__.py @@ -15,6 +15,7 @@ from langchain.chains.rl_chain.base import ( from langchain.chains.rl_chain.pick_best_chain import ( PickBest, PickBestEvent, + PickBestFeatureEmbedder, PickBestSelected, ) @@ -37,6 +38,7 @@ __all__ = [ "PickBest", "PickBestEvent", "PickBestSelected", + "PickBestFeatureEmbedder", "Embed", "BasedOn", "ToSelectFrom", diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py index e3e93b138e..afc7fc1e30 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py @@ -60,9 +60,7 @@ class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): if model is None: from sentence_transformers import SentenceTransformer - model = SentenceTransformer("all-mpnet-base-v2") - # model = SentenceTransformer("all-MiniLM-L6-v2") self.model = model self.auto_embed = auto_embed From 4e9aecda9068806c515dc8dddbcd967845e1be65 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 08:35:29 -0400 Subject: [PATCH 35/65] formatting --- libs/langchain/langchain/chains/rl_chain/pick_best_chain.py | 1 + 1 file changed, 1 insertion(+) diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py index afc7fc1e30..1155d03a1b 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py @@ -60,6 +60,7 @@ class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): if model is None: from sentence_transformers import SentenceTransformer + model = SentenceTransformer("all-mpnet-base-v2") self.model = model From 0f7cde023bc259073d5c79d985492623015047ea Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 08:43:48 -0400 Subject: [PATCH 36/65] fix linting errors --- .../langchain/chains/rl_chain/pick_best_chain.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py index 1155d03a1b..34fc3584f5 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py @@ -67,7 +67,7 @@ class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): self.auto_embed = auto_embed @staticmethod - def _str(embedding): + def _str(embedding: List[float]): return " ".join([f"{i}:{e}" for i, e in enumerate(embedding)]) def get_label(self, event: PickBestEvent) -> tuple: @@ -137,7 +137,7 @@ class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): context_matrix = np.stack([v for k, v in context_embeddings.items()]) dot_product_matrix = np.dot(context_matrix, action_matrix.T) - indexed_dot_product = {} + indexed_dot_product: Dict[Dict] = {} for i, context_key in enumerate(context_embeddings.keys()): indexed_dot_product[context_key] = {} @@ -167,8 +167,8 @@ class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): nsa.append(ns_a) for k, v in indexed_dot_product.items(): dot_prods.append(v[ns_a]) - nsa = " ".join(nsa) - line_parts.append(f"|# {nsa}") + nsa_str = " ".join(nsa) + line_parts.append(f"|# {nsa_str}") line_parts.append(f"|dotprod {self._str(dot_prods)}") action_lines.append(" ".join(line_parts)) @@ -182,8 +182,8 @@ class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): for elem in elements: shared.append(f"{elem}") nsc.append(f"{ns}={elem}") - nsc = " ".join(nsc) - shared.append(f"|@ {nsc}") + nsc_str = " ".join(nsc) + shared.append(f"|@ {nsc_str}") return "shared " + " ".join(shared) + "\n" + "\n".join(action_lines) From e10980d445a0f0eea98d58d773bd884f2abdb8b3 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 08:56:34 -0400 Subject: [PATCH 37/65] fix linting error --- libs/langchain/langchain/chains/rl_chain/pick_best_chain.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py index 34fc3584f5..e6a3007a56 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py @@ -67,7 +67,7 @@ class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): self.auto_embed = auto_embed @staticmethod - def _str(embedding: List[float]): + def _str(embedding: List[float]) -> str: return " ".join([f"{i}:{e}" for i, e in enumerate(embedding)]) def get_label(self, event: PickBestEvent) -> tuple: From ae5edefdcdd0c93440512a48cc797c51d639989e Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 16:36:29 -0400 Subject: [PATCH 38/65] cleanup --- .../langchain/chains/rl_chain/base.py | 5 ++-- .../chains/rl_chain/pick_best_chain.py | 24 ++++++++++--------- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/libs/langchain/langchain/chains/rl_chain/base.py b/libs/langchain/langchain/chains/rl_chain/base.py index c69b21dd1f..4b5ac572f9 100644 --- a/libs/langchain/langchain/chains/rl_chain/base.py +++ b/libs/langchain/langchain/chains/rl_chain/base.py @@ -316,7 +316,7 @@ class RLChain(Chain, Generic[TEvent]): - selection_scorer (Union[SelectionScorer, None]): Scorer for the selection. Can be set to None. - policy (Optional[Policy]): The policy used by the chain to learn to populate a dynamic prompt. - auto_embed (bool): Determines if embedding should be automatic. Default is False. - - metrics (Optional[MetricsTracker]): Tracker for metrics, can be set to None. + - metrics (Optional[Union[MetricsTrackerRollingWindow, MetricsTrackerAverage]]): Tracker for metrics, can be set to None. Initialization Attributes: - feature_embedder (Embedder): Embedder used for the `BasedOn` and `ToSelectFrom` inputs. @@ -325,7 +325,8 @@ class RLChain(Chain, Generic[TEvent]): - vw_cmd (List[str], optional): Command line arguments for the VW model. - policy (Type[VwPolicy]): Policy used by the chain. - vw_logs (Optional[Union[str, os.PathLike]]): Path for the VW logs. - - metrics_step (int): Step for the metrics tracker. Default is -1. + - metrics_step (int): Step for the metrics tracker. Default is -1. If set without metrics_window_size, average metrics will be tracked, otherwise rolling window metrics will be tracked. + - metrics_window_size (int): Window size for the metrics tracker. Default is -1. If set, rolling window metrics will be tracked. Notes: The class initializes the VW model using the provided arguments. If `selection_scorer` is not provided, a warning is logged, indicating that no reinforcement learning will occur unless the `update_with_delayed_score` method is called. diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py index e6a3007a56..791d12cdb4 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py @@ -137,7 +137,7 @@ class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): context_matrix = np.stack([v for k, v in context_embeddings.items()]) dot_product_matrix = np.dot(context_matrix, action_matrix.T) - indexed_dot_product: Dict[Dict] = {} + indexed_dot_product: Dict = {} for i, context_key in enumerate(context_embeddings.keys()): indexed_dot_product[context_key] = {} @@ -258,6 +258,18 @@ class PickBest(base.RLChain[PickBestEvent]): ): auto_embed = kwargs.get("auto_embed", False) + feature_embedder = kwargs.get("feature_embedder", None) + if feature_embedder: + if "auto_embed" in kwargs: + logger.warning( + "auto_embed will take no effect when explicit feature_embedder is provided" # noqa E501 + ) + # turning auto_embed off for cli setting below + auto_embed = False + else: + feature_embedder = PickBestFeatureEmbedder(auto_embed=auto_embed) + kwargs["feature_embedder"] = feature_embedder + vw_cmd = kwargs.get("vw_cmd", []) if vw_cmd: if "--cb_explore_adf" not in vw_cmd: @@ -281,16 +293,6 @@ class PickBest(base.RLChain[PickBestEvent]): kwargs["vw_cmd"] = vw_cmd - feature_embedder = kwargs.get("feature_embedder", None) - if feature_embedder: - if "auto_embed" in kwargs: - logger.warning( - "auto_embed will take no effect when explicit feature_embedder is provided" # noqa E501 - ) - else: - feature_embedder = PickBestFeatureEmbedder(auto_embed=auto_embed) - kwargs["feature_embedder"] = feature_embedder - super().__init__(*args, **kwargs) def _call_before_predict(self, inputs: Dict[str, Any]) -> PickBestEvent: From af4b560b8686a905c0f7d5d877ecae9edb20c306 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 17:28:11 -0400 Subject: [PATCH 39/65] fix poetry after merge --- libs/langchain/poetry.lock | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/libs/langchain/poetry.lock b/libs/langchain/poetry.lock index 0f67f9379a..067524c6ce 100644 --- a/libs/langchain/poetry.lock +++ b/libs/langchain/poetry.lock @@ -10939,15 +10939,15 @@ cffi = {version = ">=1.11", markers = "platform_python_implementation == \"PyPy\ cffi = ["cffi (>=1.11)"] [extras] -all = ["clarifai", "cohere", "openai", "nlpcloud", "huggingface_hub", "manifest-ml", "elasticsearch", "opensearch-py", "google-search-results", "faiss-cpu", "sentence-transformers", "transformers", "nltk", "wikipedia", "beautifulsoup4", "tiktoken", "torch", "jinja2", "pinecone-client", "pinecone-text", "marqo", "pymongo", "weaviate-client", "redis", "google-api-python-client", "google-auth", "wolframalpha", "qdrant-client", "tensorflow-text", "pypdf", "networkx", "nomic", "aleph-alpha-client", "deeplake", "libdeeplake", "pgvector", "psycopg2-binary", "pyowm", "pytesseract", "html2text", "atlassian-python-api", "gptcache", "duckduckgo-search", "arxiv", "azure-identity", "clickhouse-connect", "azure-cosmos", "lancedb", "langkit", "lark", "pexpect", "pyvespa", "O365", "jq", "docarray", "pdfminer-six", "lxml", "requests-toolbelt", "neo4j", "openlm", "azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "momento", "singlestoredb", "tigrisdb", "nebula3-python", "awadb", "esprima", "rdflib", "amadeus", "librosa", "python-arango"] -azure = ["azure-identity", "azure-cosmos", "openai", "azure-core", "azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "azure-search-documents"] +all = ["O365", "aleph-alpha-client", "amadeus", "arxiv", "atlassian-python-api", "awadb", "azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "azure-cosmos", "azure-identity", "beautifulsoup4", "clarifai", "clickhouse-connect", "cohere", "deeplake", "docarray", "duckduckgo-search", "elasticsearch", "esprima", "faiss-cpu", "google-api-python-client", "google-auth", "google-search-results", "gptcache", "html2text", "huggingface_hub", "jinja2", "jq", "lancedb", "langkit", "lark", "libdeeplake", "librosa", "lxml", "manifest-ml", "marqo", "momento", "nebula3-python", "neo4j", "networkx", "nlpcloud", "nltk", "nomic", "openai", "openlm", "opensearch-py", "pdfminer-six", "pexpect", "pgvector", "pinecone-client", "pinecone-text", "psycopg2-binary", "pymongo", "pyowm", "pypdf", "pytesseract", "python-arango", "pyvespa", "qdrant-client", "rdflib", "redis", "requests-toolbelt", "sentence-transformers", "singlestoredb", "tensorflow-text", "tigrisdb", "tiktoken", "torch", "transformers", "weaviate-client", "wikipedia", "wolframalpha"] +azure = ["azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "azure-core", "azure-cosmos", "azure-identity", "azure-search-documents", "openai"] clarifai = ["clarifai"] cohere = ["cohere"] docarray = ["docarray"] embeddings = ["sentence-transformers"] -extended-testing = ["amazon-textract-caller", "assemblyai", "beautifulsoup4", "bibtexparser", "cassio", "chardet", "esprima", "jq", "pdfminer-six", "pgvector", "pypdf", "pymupdf", "pypdfium2", "tqdm", "lxml", "atlassian-python-api", "mwparserfromhell", "mwxml", "pandas", "telethon", "psychicapi", "gql", "requests-toolbelt", "html2text", "py-trello", "scikit-learn", "streamlit", "pyspark", "openai", "sympy", "rapidfuzz", "openai", "rank-bm25", "geopandas", "jinja2", "gitpython", "newspaper3k", "feedparser", "xata", "xmltodict", "faiss-cpu", "openapi-schema-pydantic", "markdownify", "dashvector", "sqlite-vss"] +extended-testing = ["amazon-textract-caller", "assemblyai", "atlassian-python-api", "beautifulsoup4", "bibtexparser", "cassio", "chardet", "dashvector", "esprima", "faiss-cpu", "feedparser", "geopandas", "gitpython", "gql", "html2text", "jinja2", "jq", "lxml", "markdownify", "mwparserfromhell", "mwxml", "newspaper3k", "openai", "openai", "openapi-schema-pydantic", "pandas", "pdfminer-six", "pgvector", "psychicapi", "py-trello", "pymupdf", "pypdf", "pypdfium2", "pyspark", "rank-bm25", "rapidfuzz", "requests-toolbelt", "scikit-learn", "sentence-transformers", "sqlite-vss", "streamlit", "sympy", "telethon", "tqdm", "vowpal-wabbit-next", "xata", "xmltodict"] javascript = ["esprima"] -llms = ["clarifai", "cohere", "openai", "openlm", "nlpcloud", "huggingface_hub", "manifest-ml", "torch", "transformers"] +llms = ["clarifai", "cohere", "huggingface_hub", "manifest-ml", "nlpcloud", "openai", "openlm", "torch", "transformers"] openai = ["openai", "tiktoken"] qdrant = ["qdrant-client"] text-helpers = ["chardet"] @@ -10955,4 +10955,4 @@ text-helpers = ["chardet"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "b63078268a80c07577b432114302f4f86d47be25b83a245affb0dbc999fb2c1f" +content-hash = "71842b0ce1bd5c663e96a8ef14f71ce42667833cab72de4273ca07241c4465a9" From 62cf108700a8d86121430e2f8ba24f765e1eb868 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 18:08:46 -0400 Subject: [PATCH 40/65] add random policy and notebook --- .../reinforcement_learning/rl_chain.ipynb | 646 ++++++++++++++++++ .../langchain/chains/rl_chain/__init__.py | 2 + .../chains/rl_chain/pick_best_chain.py | 15 + libs/langchain/poetry.lock | 4 +- libs/langchain/pyproject.toml | 1 + 5 files changed, 666 insertions(+), 2 deletions(-) create mode 100644 docs/extras/modules/chains/reinforcement_learning/rl_chain.ipynb diff --git a/docs/extras/modules/chains/reinforcement_learning/rl_chain.ipynb b/docs/extras/modules/chains/reinforcement_learning/rl_chain.ipynb new file mode 100644 index 0000000000..6b01620336 --- /dev/null +++ b/docs/extras/modules/chains/reinforcement_learning/rl_chain.ipynb @@ -0,0 +1,646 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Learned prompt variable injection via rl chain\n", + "\n", + "The rl_chain is used primarily for prompt variable injection: when we want to enhance a prompt with a value but we are not sure which of the available variable values will make the prompt achieve what we want.\n", + "\n", + "It provides a way to learn a specific prompt engineering policy without fine tuning the underlying foundational model.\n", + "\n", + "The example layed out below is trivial and a strong llm could make a good variable selection and injection without the intervention of this chain, but it is perfect for showcasing the chain's usage. Advanced options and explanations are provided at the end.\n", + "\n", + "The goal of the below scenario is for the chain to select a meal based on the user declared preferences, and inject the meal into the prompt template. The final prompt will then be sent to the llm of choice and the llm output will be returned to the user." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# four meals defined, some vegetarian some not\n", + "\n", + "meals = [\n", + " \"Beef Enchiladas with Feta cheese. Mexican-Greek fusion\",\n", + " \"Chicken Flatbreads with red sauce. Italian-Mexican fusion\",\n", + " \"Veggie sweet potato quesadillas with vegan cheese\",\n", + " \"One-Pan Tortelonni bake with peppers and onions\",\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# pick and configure the LLM of your choice\n", + "\n", + "from langchain.llms import OpenAI\n", + "llm = OpenAI(engine=\"text-davinci-003\")\n", + "\n", + "llm.predict(\"are you ready?\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Intialize the rl chain with provided defaults\n", + "\n", + "The prompt template which will be used to query the LLM needs to be defined.\n", + "It can be anything, but here `{meal}` is being used and is going to be replaced by one of the meals above, the rl chain will try to pick and inject the best meal\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.prompts import PromptTemplate\n", + "\n", + "# here I am using the variable meal which will be replaced by one of the meals above\n", + "# and some variables like user, preference, and text_to_personalize which I will provide at chain run time\n", + "\n", + "PROMPT_TEMPLATE = \"\"\"Here is the description of a meal: \"{meal}\".\n", + "\n", + "Embed the meal into the given text: \"{text_to_personalize}\".\n", + "\n", + "Prepend a personalized message including the user's name {user} and their preference {preference}.\n", + "\n", + "Make it sound good.\n", + "\"\"\"\n", + "\n", + "PROMPT = PromptTemplate(\n", + " input_variables=[\"meal\", \"text_to_personalize\", \"user\", \"preference\"], template=PROMPT_TEMPLATE\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next the rl chain's PickBest chain is being initialized. We must provide the llm of choice and the defined prompt. As the name indicates, the chain's goal is to Pick the Best of the meals that will be provided, based on some criteria. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import langchain.chains.rl_chain as rl_chain\n", + "\n", + "chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once the chain is setup I am going to call it with the meals I want to be selected from, and some context based on which the chain will select a meal." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "response = chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(response[\"response\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## What is the chain doing\n", + "\n", + "What is happening behind the scenes here is that the rl chain will\n", + "\n", + "- take the meals\n", + "- take the user and their preference\n", + "- based on the user and their preference (context) it will select a meal\n", + "- it will auto-evaluate if that meal selection was good or bad\n", + "- it will finally inject the meal into the prompt and query the llm\n", + "- the user will get the llm response back\n", + "\n", + "Now, the way the chain is doing this is that it is learning a contextual bandit rl model that is trained to make good selections (specifially the [VowpalWabbit](https://github.com/VowpalWabbit/vowpal_wabbit) ML library is being used).\n", + "\n", + "Since this rl model will be untrained when we first start, it might make a random selection that doesn't fit the user and their preferences. But if we give it time to learn the user and their preferences, it should start to make better selections (or quickly learn a good one and just pick that!)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for _ in range(5):\n", + " try:\n", + " response = chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " )\n", + " except Exception as e:\n", + " print(e)\n", + " print(response[\"response\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## How is the chain learning\n", + "\n", + "The way the chain is learning that Tom prefers veggetarian meals is via an AutoSelectionScorer that is built into the chain. The scorer will call the LLM again and ask it to evaluate the selection (`ToSelectFrom`) using the information wrapped in (`BasedOn`).\n", + "\n", + "You can set `langchain.debug=True` if you want to see the details of the auto-scorer, but you can also define the scoring prompt yourself." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "scoring_criteria_template = \"Given {preference} rank how good or bad this selection is {meal}\"\n", + "\n", + "chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=PROMPT,\n", + " selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, scoring_criteria_template_str=scoring_criteria_template),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you want to examine the score and other selection metadata you can by examining the metadata object returned by the chain" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "response = chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + ")\n", + "print(response[\"response\"])\n", + "selection_metadata = response[\"selection_metadata\"]\n", + "print(f\"selected index: {selection_metadata.selected.index}, score: {selection_metadata.selected.score}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In a more realistic scenario it is likely that you have a well defined scoring function for what was selected. For example, you might be doing few-shot prompting and want to select prompt examples for a natural language to sql translation task. In that case the scorer could be: did the sql that was generated run in an sql engine? In that case you want to plugin a scoring function. In the example below I will just check if the meal picked was vegetarian or not." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class CustomSelectionScorer(rl_chain.SelectionScorer):\n", + " def score_response(\n", + " self, inputs, llm_response: str, event: rl_chain.PickBestEvent) -> float:\n", + "\n", + " print(event.based_on)\n", + " print(event.to_select_from)\n", + "\n", + " # you can build a complex scoring function here\n", + " # it is prefereable that the score ranges between 0 and 1 but it is not enforced\n", + "\n", + " selected_meal = event.to_select_from[\"meal\"][event.selected.index]\n", + " print(f\"selected meal: {selected_meal}\")\n", + "\n", + " if \"Tom\" in event.based_on[\"user\"]:\n", + " if \"Vegetarian\" in event.based_on[\"preference\"]:\n", + " if \"Chicken\" in selected_meal or \"Beef\" in selected_meal:\n", + " return 0.0\n", + " else:\n", + " return 1.0\n", + " else:\n", + " if \"Chicken\" in selected_meal or \"Beef\" in selected_meal:\n", + " return 1.0\n", + " else:\n", + " return 0.0\n", + " else:\n", + " raise NotImplementedError(\"I don't know how to score this user\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=PROMPT,\n", + " selection_scorer=CustomSelectionScorer(),\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "response = chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## How can I track the chains progress\n", + "\n", + "You can track the chains progress by using the metrics mechanism provided. I am going to expand the users to Tom and Anna, and extend the scoring function. I am going to initialize two chains, one with the default learning policy and one with a built-in random policy (i.e. selects a meal randomly), and plot their scoring progress." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class CustomSelectionScorer(rl_chain.SelectionScorer):\n", + " def score_preference(self, preference, selected_meal):\n", + " if \"Vegetarian\" in preference:\n", + " if \"Chicken\" in selected_meal or \"Beef\" in selected_meal:\n", + " return 0.0\n", + " else:\n", + " return 1.0\n", + " else:\n", + " if \"Chicken\" in selected_meal or \"Beef\" in selected_meal:\n", + " return 1.0\n", + " else:\n", + " return 0.0\n", + " def score_response(\n", + " self, inputs, llm_response: str, event: rl_chain.PickBestEvent) -> float:\n", + "\n", + " selected_meal = event.to_select_from[\"meal\"][event.selected.index]\n", + "\n", + " if \"Tom\" in event.based_on[\"user\"]:\n", + " return self.score_preference(event.based_on[\"preference\"], selected_meal)\n", + " elif \"Anna\" in event.based_on[\"user\"]:\n", + " return self.score_preference(event.based_on[\"preference\"], selected_meal)\n", + " else:\n", + " raise NotImplementedError(\"I don't know how to score this user\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=PROMPT,\n", + " selection_scorer=CustomSelectionScorer(),\n", + " metrics_step=5,\n", + " metrics_window_size=5, # rolling window average\n", + ")\n", + "\n", + "random_chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=PROMPT,\n", + " selection_scorer=CustomSelectionScorer(),\n", + " metrics_step=5,\n", + " metrics_window_size=5, # rolling window average\n", + " policy=rl_chain.PickBestRandomPolicy # set the random policy instead of default\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for i in range(40):\n", + " try:\n", + " if i % 2:\n", + " chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " )\n", + " random_chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " )\n", + " else:\n", + " chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Anna\"),\n", + " preference = rl_chain.BasedOn([\"Loves meat\", \"especially beef\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " )\n", + " random_chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Anna\"),\n", + " preference = rl_chain.BasedOn([\"Loves meat\", \"especially beef\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " )\n", + " except Exception as e:\n", + " print(e)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# note matplotlib is not a dependency of langchain so you need to install it\n", + "\n", + "from matplotlib import pyplot as plt\n", + "chain.metrics.to_pandas()['score'].plot(label=\"default learning policy\")\n", + "random_chain.metrics.to_pandas()['score'].plot(label=\"random selection policy\")\n", + "plt.legend()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "There is a bit of randomness involved in the rl_chain's selection since the chain explores the selection space in order to learn the world as best as it can (see details of default exploration algorithm used [here](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Contextual-Bandit-Exploration-with-SquareCB)), but overall, default chain policy should be doing better than random as it learns" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Advanced options\n", + "\n", + "The rl chain is highly configurable in order to be able to adjust to various selection scenarios. If you want to learn more about the ML library that powers it please take a look at tutorials [here](https://vowpalwabbit.org/)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "| Section | Description | Example / Usage |\n", + "|---------|-------------|-----------------|\n", + "| [**Set Chain Logging Level**](#set-chain-logging-level) | Set up the logging level for the RL chain. | `logger.setLevel(logging.INFO)` |\n", + "| [**Featurization**](#featurization) | Adjusts the input to the RL chain. Can set auto-embeddings ON for more complex embeddings. | `chain = rl_chain.PickBest.from_llm(auto_embed=True, [...])` |\n", + "| [**Learned Policy to Learn Asynchronously**](#learned-policy-to-learn-asynchronously) | Score asynchronously if user input is needed for scoring. | `chain.update_with_delayed_score(score=, chain_response=response)` |\n", + "| [**Store Progress of Learned Policy**](#store-progress-of-learned-policy) | Option to store the progress of the variable injection learned policy. | `chain.save_progress()` |\n", + "| [**Stop Learning of Learned Policy**](#stop-learning-of-learned-policy) | Toggle the RL chain's learned policy updates ON/OFF. | `chain.deactivate_selection_scorer()` |\n", + "| [**Set a Different Policy**](#set-a-different-policy) | Choose between different policies: default, random, or custom. | Custom policy creation at chain creation time. |\n", + "| [**Different Exploration Algorithms for Default Learned Policy**](#different-exploration-algorithms-for-the-default-learned-policy) | Set different exploration algorithms and hyperparameters for `VwPolicy`. | `vw_cmd = [\"--cb_explore_adf\", \"--quiet\", \"--squarecb\", \"--interactions=::\"]` |\n", + "| [**Learn Policy's Data Logs**](#learn-policys-data-logs) | Store and examine `VwPolicy`'s data logs. | `chain = rl_chain.PickBest.from_llm(vw_logs=, [...])` |\n", + "| [**Other Advanced Featurization Options**](#other-advanced-featurization-options) | Specify advanced featurization options for the RL chain. | `age = rl_chain.BasedOn(\"age:32\")` |\n", + "| [**More Info on Auto or Custom SelectionScorer**](#more-info-on-auto-or-custom-selectionscorer) | Dive deeper into how selection scoring is determined. | `selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, scoring_criteria_template_str=scoring_criteria_template)` |\n", + "\n", + "\n", + "### set chain logging level\n", + "\n", + "```\n", + "import logging\n", + "logger = logging.getLogger(\"rl_chain\")\n", + "logger.setLevel(logging.INFO)\n", + "```\n", + "\n", + "### featurization\n", + "\n", + "By default the input to the rl chain (`ToSelectFrom`, `BasedOn`) is not tampered with. This might not be sufficient featurization, so based on how complex the scenario is you can set auto-embeddings to ON\n", + "\n", + "`chain = rl_chain.PickBest.from_llm(auto_embed=True, [...])`\n", + "\n", + "This will produce more complex embeddings and featurizations of the inputs, likely accelerating RL chain learning, albeit at the cost of increased runtime..\n", + "\n", + "By default, [sbert.net's sentence_transformers's ](https://www.sbert.net/docs/pretrained_models.html#model-overview) `all-mpnet-base-v2` model will be used for these embeddings but you can set a different model by initializing the chain with it, or set an entirely different encoding object as long as it has an `encode` function that returns a list of the encodings:\n", + "\n", + "```\n", + "from sentence_transformers import SentenceTransformer\n", + "\n", + "chain = rl_chain.PickBest.from_llm(\n", + " [...]\n", + " feature_embedder=rl_chain.PickBestFeatureEmbedder(\n", + " auto_embed=True,\n", + " model=SentenceTransformer(\"all-mpnet-base-v2\")\n", + " )\n", + ")\n", + "```\n", + "\n", + "Another option is to define what inputs you think should be embedded manually:\n", + "- `auto_embed = False`\n", + "- Can wrap individual variables in `rl_chain.Embed()` or `rl_chain.EmbedAndKeep()` e.g. `user = rl_chain.BasedOn(rl_chain.Embed(\"Tom\"))`\n", + "\n", + "Final option is to define and set your own feature embedder that returns a valid input for the learned policy.\n", + "\n", + "## learned policy to learn asynchronously\n", + "\n", + "If to score the result you need input from the user (e.g. my application showed Tom the selected meal and Tom clicked on it, but Anna did not), then the scoring can be done asynchronously. The way to do that is:\n", + "\n", + "- set `selection_scorer=None` on the chain creation OR call `chain.deactivate_selection_scorer()`\n", + "- call the chain for a specific input\n", + "- keep the chain's response (`response = chain.run([...])`)\n", + "- once you have determined the score of the response/chain selection call the chain with it: `chain.update_with_delayed_score(score=, chain_response=response)`\n", + "\n", + "### store progress of learned policy\n", + "\n", + "Since the variable injection learned policy evolves over time, there is the option to store its progress and continue learning. This can be done by calling:\n", + "\n", + "`chain.save_progress()`\n", + "\n", + "which will store the rl chain's learned policy in a file called `latest.vw`. It will also store it in a file with a timestamp. That way, if `save_progress()` is called more than once, multiple checkpoints will be created, but the latest one will always be in `latest.vw`\n", + "\n", + "Next time the chain is loaded, the chain will look for a file called `latest.vw` and if the file exists it will be loaded into the chain and the learning will continue from there.\n", + "\n", + "By default the rl chain model checkpoints will be stored in the current directory but you can specify the save/load location at chain creation time:\n", + "\n", + "`chain = rl_chain.PickBest.from_llm(model_save_dir=, [...])`\n", + "\n", + "### stop learning of learned policy\n", + "\n", + "If you want the rl chain's learned policy to stop updating you can turn it off/on:\n", + "\n", + "`chain.deactivate_selection_scorer()` and `chain.activate_selection_scorer()`\n", + "\n", + "### set a different policy\n", + "\n", + "There are two policies currently available:\n", + "\n", + "- default policy: `VwPolicy` which learns a [Vowpal Wabbit](https://github.com/VowpalWabbit/vowpal_wabbit) [Contextual Bandit](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Contextual-Bandit-algorithms) model\n", + "\n", + "- random policy: `RandomPolicy` which doesn't learn anything and just selects a value randomly. this policy can be used to compare other policies with a random baseline one.\n", + "\n", + "- custom policies: a custom policy could be created and set at chain creation time\n", + "\n", + "### different exploration algorithms for the default learned policy\n", + "\n", + "The default `VwPolicy` is initialized with some default arguments. The default exploration algorithm is [SquareCB](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Contextual-Bandit-Exploration-with-SquareCB) but other Contextual Bandit exploration algorithms can be set, and other hyper parameters can be set also:\n", + "\n", + "`vw_cmd = [\"--cb_explore_adf\", \"--quiet\", \"--squarecb\", \"--interactions=::\"]`\n", + "\n", + "`chain = rl_chain.PickBest.from_llm(vw_cmd = vw_cmd, [...])`\n", + "\n", + "### learn policy's data logs\n", + "\n", + "The `VwPolicy`'s data files can be stored and examined or used to do [off policy evaluation](https://vowpalwabbit.org/docs/vowpal_wabbit/python/latest/tutorials/off_policy_evaluation.html) for hyper parameter tuning.\n", + "\n", + "The way to do this is to set a log file path to `vw_logs` on chain creation:\n", + "\n", + "`chain = rl_chain.PickBest.from_llm(vw_logs=, [...])`\n", + "\n", + "### other advanced featurization options\n", + "\n", + "Explictly numerical features can be provided with a colon separator:\n", + "`age = rl_chain.BasedOn(\"age:32\")`\n", + "\n", + "`ToSelectFrom` can be a bit more complex if the scenario demands it, instead of being a list of strings it can be:\n", + "- a list of list of strings:\n", + " ```\n", + " meal = rl_chain.ToSelectFrom([\n", + " [\"meal 1 name\", \"meal 1 description\"],\n", + " [\"meal 2 name\", \"meal 2 description\"]\n", + " ])\n", + " ```\n", + "- a list of dictionaries:\n", + " ```\n", + " meal = rl_chain.ToSelectFrom([\n", + " {\"name\":\"meal 1 name\", \"description\" : \"meal 1 description\"},\n", + " {\"name\":\"meal 2 name\", \"description\" : \"meal 2 description\"}\n", + " ])\n", + " ```\n", + "- a list of dictionaries containing lists:\n", + " ```\n", + " meal = rl_chain.ToSelectFrom([\n", + " {\"name\":[\"meal 1\", \"complex name\"], \"description\" : \"meal 1 description\"},\n", + " {\"name\":[\"meal 2\", \"complex name\"], \"description\" : \"meal 2 description\"}\n", + " ])\n", + " ```\n", + "\n", + "`BasedOn` can also take a list of strings:\n", + "```\n", + "user = rl_chain.BasedOn([\"Tom Joe\", \"age:32\", \"state of california\"])\n", + "```\n", + "\n", + "there is no dictionary provided since multiple variables can be supplied wrapped in `BasedOn`\n", + "\n", + "Storing the data logs into a file allows the examination of what different inputs do to the data format.\n", + "\n", + "### More info on Auto or Custom SelectionScorer\n", + "\n", + "The selection scorer is very important to get right since the policy uses it to learn. It determines what is called the reward in reinforcement learning, and more specifically in our Contextual Bandits setting.\n", + "\n", + "The general advice is to keep the score between [0, 1], 0 being the worst selection, 1 being the best selection from the available `ToSelectFrom` variables, based on the `BasedOn` variables, but should be adjusted if the need arises.\n", + "\n", + "In the examples provided above, the AutoSelectionScorer is set mostly to get users started but in real world scenarios it will most likely not be an adequate scorer function.\n", + "\n", + "The example also provided the option to change part of the scoring prompt template that the AutoSelectionScorer used to determine whether a selection was good or not:\n", + "\n", + "```\n", + "scoring_criteria_template = \"Given {preference} rank how good or bad this selection is {meal}\"\n", + "chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=PROMPT,\n", + " selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, scoring_criteria_template_str=scoring_criteria_template),\n", + ")\n", + "\n", + "```\n", + "\n", + "Internally the AutoSelectionScorer adjusted the scoring prompt to make sure that the llm scoring retured a single float.\n", + "\n", + "However, if needed, a FULL scoring prompt can also be provided:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.prompts.prompt import PromptTemplate\n", + "import langchain\n", + "langchain.debug = True\n", + "\n", + "REWARD_PROMPT_TEMPLATE = \"\"\"Given {preference} rank how good or bad this selection is {meal}, IMPORANT: you MUST return a single number between -1 and 1, -1 being bad, 1 being good\"\"\"\n", + "\n", + "\n", + "REWARD_PROMPT = PromptTemplate(\n", + " input_variables=[\"preference\", \"meal\"],\n", + " template=REWARD_PROMPT_TEMPLATE,\n", + ")\n", + "\n", + "chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=PROMPT,\n", + " selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, prompt=REWARD_PROMPT),\n", + ")\n", + "\n", + "chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + ")\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.16" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/libs/langchain/langchain/chains/rl_chain/__init__.py b/libs/langchain/langchain/chains/rl_chain/__init__.py index 80242139f5..f112dcea09 100644 --- a/libs/langchain/langchain/chains/rl_chain/__init__.py +++ b/libs/langchain/langchain/chains/rl_chain/__init__.py @@ -16,6 +16,7 @@ from langchain.chains.rl_chain.pick_best_chain import ( PickBest, PickBestEvent, PickBestFeatureEmbedder, + PickBestRandomPolicy, PickBestSelected, ) @@ -39,6 +40,7 @@ __all__ = [ "PickBestEvent", "PickBestSelected", "PickBestFeatureEmbedder", + "PickBestRandomPolicy", "Embed", "BasedOn", "ToSelectFrom", diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py index 791d12cdb4..31f3cece49 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py @@ -223,6 +223,21 @@ class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): return self.format_auto_embed_off(event) +class PickBestRandomPolicy(base.Policy): + def __init__(self, feature_embedder: base.Embedder, **kwargs): + self.feature_embedder = feature_embedder + + def predict(self, event: PickBestEvent): + num_items = len(event.to_select_from) + return [(i, 1.0 / num_items) for i in range(num_items)] + + def learn(self, event): + pass + + def log(self, event): + pass + + class PickBest(base.RLChain[PickBestEvent]): """ `PickBest` is a class designed to leverage the Vowpal Wabbit (VW) model for reinforcement learning with a context, with the goal of modifying the prompt before the LLM call. diff --git a/libs/langchain/poetry.lock b/libs/langchain/poetry.lock index 067524c6ce..75aeba8d08 100644 --- a/libs/langchain/poetry.lock +++ b/libs/langchain/poetry.lock @@ -10939,7 +10939,7 @@ cffi = {version = ">=1.11", markers = "platform_python_implementation == \"PyPy\ cffi = ["cffi (>=1.11)"] [extras] -all = ["O365", "aleph-alpha-client", "amadeus", "arxiv", "atlassian-python-api", "awadb", "azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "azure-cosmos", "azure-identity", "beautifulsoup4", "clarifai", "clickhouse-connect", "cohere", "deeplake", "docarray", "duckduckgo-search", "elasticsearch", "esprima", "faiss-cpu", "google-api-python-client", "google-auth", "google-search-results", "gptcache", "html2text", "huggingface_hub", "jinja2", "jq", "lancedb", "langkit", "lark", "libdeeplake", "librosa", "lxml", "manifest-ml", "marqo", "momento", "nebula3-python", "neo4j", "networkx", "nlpcloud", "nltk", "nomic", "openai", "openlm", "opensearch-py", "pdfminer-six", "pexpect", "pgvector", "pinecone-client", "pinecone-text", "psycopg2-binary", "pymongo", "pyowm", "pypdf", "pytesseract", "python-arango", "pyvespa", "qdrant-client", "rdflib", "redis", "requests-toolbelt", "sentence-transformers", "singlestoredb", "tensorflow-text", "tigrisdb", "tiktoken", "torch", "transformers", "weaviate-client", "wikipedia", "wolframalpha"] +all = ["O365", "aleph-alpha-client", "amadeus", "arxiv", "atlassian-python-api", "awadb", "azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "azure-cosmos", "azure-identity", "beautifulsoup4", "clarifai", "clickhouse-connect", "cohere", "deeplake", "docarray", "duckduckgo-search", "elasticsearch", "esprima", "faiss-cpu", "google-api-python-client", "google-auth", "google-search-results", "gptcache", "html2text", "huggingface_hub", "jinja2", "jq", "lancedb", "langkit", "lark", "libdeeplake", "librosa", "lxml", "manifest-ml", "marqo", "momento", "nebula3-python", "neo4j", "networkx", "nlpcloud", "nltk", "nomic", "openai", "openlm", "opensearch-py", "pdfminer-six", "pexpect", "pgvector", "pinecone-client", "pinecone-text", "psycopg2-binary", "pymongo", "pyowm", "pypdf", "pytesseract", "python-arango", "pyvespa", "qdrant-client", "rdflib", "redis", "requests-toolbelt", "sentence-transformers", "singlestoredb", "tensorflow-text", "tigrisdb", "tiktoken", "torch", "transformers", "vowpal-wabbit-next", "weaviate-client", "wikipedia", "wolframalpha"] azure = ["azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "azure-core", "azure-cosmos", "azure-identity", "azure-search-documents", "openai"] clarifai = ["clarifai"] cohere = ["cohere"] @@ -10955,4 +10955,4 @@ text-helpers = ["chardet"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "71842b0ce1bd5c663e96a8ef14f71ce42667833cab72de4273ca07241c4465a9" +content-hash = "7bffde1b8d57bad4b5a48d73250cb8276eb7e40dfe19f8490d5f4a25cb15322d" diff --git a/libs/langchain/pyproject.toml b/libs/langchain/pyproject.toml index 00b90087ef..198836e418 100644 --- a/libs/langchain/pyproject.toml +++ b/libs/langchain/pyproject.toml @@ -295,6 +295,7 @@ all = [ "amadeus", "librosa", "python-arango", + "vowpal-wabbit-next", ] # An extra used to be able to add extended testing. From f1d144cd6c199ef9c90d69aebf57d6adc0f40b00 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 18:33:05 -0400 Subject: [PATCH 41/65] run notebook and change location --- .../how_to/learned_prompt_optimization.ipynb | 828 ++++++++++++++++++ .../reinforcement_learning/rl_chain.ipynb | 646 -------------- 2 files changed, 828 insertions(+), 646 deletions(-) create mode 100644 docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb delete mode 100644 docs/extras/modules/chains/reinforcement_learning/rl_chain.ipynb diff --git a/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb b/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb new file mode 100644 index 0000000000..87bb3613c4 --- /dev/null +++ b/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb @@ -0,0 +1,828 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Learned prompt variable injection via rl chain\n", + "\n", + "The rl_chain (reinforcement learning chain) is used primarily for prompt variable injection: when we want to enhance a prompt with a value but we are not sure which of the available variable values will make the prompt achieve what we want.\n", + "\n", + "It provides a way to learn a specific prompt engineering policy without fine tuning the underlying foundational model.\n", + "\n", + "The example layed out below is trivial and a strong llm could make a good variable selection and injection without the intervention of this chain, but it is perfect for showcasing the chain's usage. Advanced options and explanations are provided at the end.\n", + "\n", + "The goal of the below scenario is for the chain to select a meal based on the user declared preferences, and inject the meal into the prompt template. The final prompt will then be sent to the llm of choice and the llm output will be returned to the user." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# four meals defined, some vegetarian some not\n", + "\n", + "meals = [\n", + " \"Beef Enchiladas with Feta cheese. Mexican-Greek fusion\",\n", + " \"Chicken Flatbreads with red sauce. Italian-Mexican fusion\",\n", + " \"Veggie sweet potato quesadillas with vegan cheese\",\n", + " \"One-Pan Tortelonni bake with peppers and onions\",\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/olgavrou/langchain/.testlcvenv/lib/python3.8/site-packages/langchain/utils/utils.py:155: UserWarning: WARNING! engine is not default parameter.\n", + " engine was transferred to model_kwargs.\n", + " Please confirm that engine is what you intended.\n", + " warnings.warn(\n" + ] + }, + { + "data": { + "text/plain": [ + "\"\\n\\nYes, I'm ready.\"" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# pick and configure the LLM of your choice\n", + "\n", + "from langchain.llms import OpenAI\n", + "llm = OpenAI(engine=\"text-davinci-003\")\n", + "\n", + "llm.predict(\"are you ready?\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Intialize the rl chain with provided defaults\n", + "\n", + "The prompt template which will be used to query the LLM needs to be defined.\n", + "It can be anything, but here `{meal}` is being used and is going to be replaced by one of the meals above, the rl chain will try to pick and inject the best meal\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.prompts import PromptTemplate\n", + "\n", + "# here I am using the variable meal which will be replaced by one of the meals above\n", + "# and some variables like user, preference, and text_to_personalize which I will provide at chain run time\n", + "\n", + "PROMPT_TEMPLATE = \"\"\"Here is the description of a meal: \"{meal}\".\n", + "\n", + "Embed the meal into the given text: \"{text_to_personalize}\".\n", + "\n", + "Prepend a personalized message including the user's name {user} and their preference {preference}.\n", + "\n", + "Make it sound good.\n", + "\"\"\"\n", + "\n", + "PROMPT = PromptTemplate(\n", + " input_variables=[\"meal\", \"text_to_personalize\", \"user\", \"preference\"], template=PROMPT_TEMPLATE\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next the rl chain's PickBest chain is being initialized. We must provide the llm of choice and the defined prompt. As the name indicates, the chain's goal is to Pick the Best of the meals that will be provided, based on some criteria. " + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/olgavrou/langchain/.testlcvenv/lib/python3.8/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + } + ], + "source": [ + "import langchain.chains.rl_chain as rl_chain\n", + "\n", + "chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once the chain is setup I am going to call it with the meals I want to be selected from, and some context based on which the chain will select a meal." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "response = chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Hey Tom! Our chefs have put together something special for you this week! We know you're a vegetarian who is ok with regular dairy, so they've crafted a delicious and unique Italian-Mexican fusion dish: Chicken Flatbreads with red sauce. We think you'll absolutely love it!\n" + ] + } + ], + "source": [ + "print(response[\"response\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## What is the chain doing\n", + "\n", + "What is happening behind the scenes here is that the rl chain will\n", + "\n", + "- take the meals\n", + "- take the user and their preference\n", + "- based on the user and their preference (context) it will select a meal\n", + "- it will auto-evaluate if that meal selection was good or bad\n", + "- it will finally inject the meal into the prompt and query the llm\n", + "- the user will get the llm response back\n", + "\n", + "Now, the way the chain is doing this is that it is learning a contextual bandit rl model that is trained to make good selections (specifially the [VowpalWabbit](https://github.com/VowpalWabbit/vowpal_wabbit) ML library is being used).\n", + "\n", + "Since this rl model will be untrained when we first start, it might make a random selection that doesn't fit the user and their preferences. But if we give it time to learn the user and their preferences, it should start to make better selections (or quickly learn a good one and just pick that!)." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tom, our chefs have crafted a delicious fusion dish that we think you'll love - Beef Enchiladas with Feta cheese - a Mexican-Greek fusion, and as a vegetarian who can tolerate regular dairy, it's the perfect treat for you!\n", + "Hey Tom! Our master chefs have outdone themselves this week with an amazing dish that you're sure to love. Our specialty dish is a Mexican-Greek fusion of Beef Enchiladas with Feta cheese, and it's perfectly suited for your Vegetarian preferences with regular dairy being ok. Enjoy!\n", + "Hey Tom, we have a special treat for you this week - veggie sweet potato quesadillas with vegan cheese! Our master chefs have put together something delicious and just perfect for your vegetarian preferences, with regular dairy ok as well. We hope you love it!\n", + "Hey Tom, we've got something special for you this week! Our master chefs have crafted delicious veggie sweet potato quesadillas with vegan cheese for our vegetarian friends, but regular dairy is ok too! Enjoy!\n", + "Hey Tom, we've got something special for you this week! Our master chefs have created a delicious veggie sweet potato quesadilla with vegan cheese - perfect for your vegetarian diet, with regular dairy also OK. Enjoy!\n" + ] + } + ], + "source": [ + "for _ in range(5):\n", + " try:\n", + " response = chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " )\n", + " except Exception as e:\n", + " print(e)\n", + " print(response[\"response\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## How is the chain learning\n", + "\n", + "The way the chain is learning that Tom prefers veggetarian meals is via an AutoSelectionScorer that is built into the chain. The scorer will call the LLM again and ask it to evaluate the selection (`ToSelectFrom`) using the information wrapped in (`BasedOn`).\n", + "\n", + "You can set `langchain.debug=True` if you want to see the details of the auto-scorer, but you can also define the scoring prompt yourself." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "scoring_criteria_template = \"Given {preference} rank how good or bad this selection is {meal}\"\n", + "\n", + "chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=PROMPT,\n", + " selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, scoring_criteria_template_str=scoring_criteria_template),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you want to examine the score and other selection metadata you can by examining the metadata object returned by the chain" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\"Hey Tom! We're so excited for you to try out this week's specialty dish. Our master chefs have put together some delicious veggie sweet potato quesadillas with vegan cheese for you, perfect for vegetarians or anyone who's ok with regular dairy. We can't wait for you to enjoy it!\"\n", + "selected index: 2, score: 0.5\n" + ] + } + ], + "source": [ + "response = chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + ")\n", + "print(response[\"response\"])\n", + "selection_metadata = response[\"selection_metadata\"]\n", + "print(f\"selected index: {selection_metadata.selected.index}, score: {selection_metadata.selected.score}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In a more realistic scenario it is likely that you have a well defined scoring function for what was selected. For example, you might be doing few-shot prompting and want to select prompt examples for a natural language to sql translation task. In that case the scorer could be: did the sql that was generated run in an sql engine? In that case you want to plugin a scoring function. In the example below I will just check if the meal picked was vegetarian or not." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "class CustomSelectionScorer(rl_chain.SelectionScorer):\n", + " def score_response(\n", + " self, inputs, llm_response: str, event: rl_chain.PickBestEvent) -> float:\n", + "\n", + " print(event.based_on)\n", + " print(event.to_select_from)\n", + "\n", + " # you can build a complex scoring function here\n", + " # it is prefereable that the score ranges between 0 and 1 but it is not enforced\n", + "\n", + " selected_meal = event.to_select_from[\"meal\"][event.selected.index]\n", + " print(f\"selected meal: {selected_meal}\")\n", + "\n", + " if \"Tom\" in event.based_on[\"user\"]:\n", + " if \"Vegetarian\" in event.based_on[\"preference\"]:\n", + " if \"Chicken\" in selected_meal or \"Beef\" in selected_meal:\n", + " return 0.0\n", + " else:\n", + " return 1.0\n", + " else:\n", + " if \"Chicken\" in selected_meal or \"Beef\" in selected_meal:\n", + " return 1.0\n", + " else:\n", + " return 0.0\n", + " else:\n", + " raise NotImplementedError(\"I don't know how to score this user\")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=PROMPT,\n", + " selection_scorer=CustomSelectionScorer(),\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'user': ['Tom'], 'preference': ['Vegetarian', 'regular dairy is ok']}\n", + "{'meal': ['Beef Enchiladas with Feta cheese. Mexican-Greek fusion', 'Chicken Flatbreads with red sauce. Italian-Mexican fusion', 'Veggie sweet potato quesadillas with vegan cheese', 'One-Pan Tortelonni bake with peppers and onions']}\n", + "selected meal: Veggie sweet potato quesadillas with vegan cheese\n" + ] + } + ], + "source": [ + "response = chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## How can I track the chains progress\n", + "\n", + "You can track the chains progress by using the metrics mechanism provided. I am going to expand the users to Tom and Anna, and extend the scoring function. I am going to initialize two chains, one with the default learning policy and one with a built-in random policy (i.e. selects a meal randomly), and plot their scoring progress." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "class CustomSelectionScorer(rl_chain.SelectionScorer):\n", + " def score_preference(self, preference, selected_meal):\n", + " if \"Vegetarian\" in preference:\n", + " if \"Chicken\" in selected_meal or \"Beef\" in selected_meal:\n", + " return 0.0\n", + " else:\n", + " return 1.0\n", + " else:\n", + " if \"Chicken\" in selected_meal or \"Beef\" in selected_meal:\n", + " return 1.0\n", + " else:\n", + " return 0.0\n", + " def score_response(\n", + " self, inputs, llm_response: str, event: rl_chain.PickBestEvent) -> float:\n", + "\n", + " selected_meal = event.to_select_from[\"meal\"][event.selected.index]\n", + "\n", + " if \"Tom\" in event.based_on[\"user\"]:\n", + " return self.score_preference(event.based_on[\"preference\"], selected_meal)\n", + " elif \"Anna\" in event.based_on[\"user\"]:\n", + " return self.score_preference(event.based_on[\"preference\"], selected_meal)\n", + " else:\n", + " raise NotImplementedError(\"I don't know how to score this user\")" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=PROMPT,\n", + " selection_scorer=CustomSelectionScorer(),\n", + " metrics_step=5,\n", + " metrics_window_size=5, # rolling window average\n", + ")\n", + "\n", + "random_chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=PROMPT,\n", + " selection_scorer=CustomSelectionScorer(),\n", + " metrics_step=5,\n", + " metrics_window_size=5, # rolling window average\n", + " policy=rl_chain.PickBestRandomPolicy # set the random policy instead of default\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [], + "source": [ + "for i in range(40):\n", + " try:\n", + " if i % 2:\n", + " chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " )\n", + " random_chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " )\n", + " else:\n", + " chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Anna\"),\n", + " preference = rl_chain.BasedOn([\"Loves meat\", \"especially beef\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " )\n", + " random_chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Anna\"),\n", + " preference = rl_chain.BasedOn([\"Loves meat\", \"especially beef\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " )\n", + " except Exception as e:\n", + " print(e)" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The final average score for the default policy, calculated over a rolling window, is: 1.0\n", + "The final average score for the random policy, calculated over a rolling window, is: 0.4\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAGdCAYAAADAAnMpAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAABmX0lEQVR4nO3dd3RU5drG4d+kh4SEHhIIhN5rgAiIoKIREUWPioB0PBZQEFHBAqhHgaP4oYJgRSw0D4qNIkVAECGELh1Ch4SakARSZvb3x4ZIhEDKJDuZ3NdaWWtmsss9KTPP7P3u57UZhmEgIiIiYhE3qwOIiIhI8aZiRERERCylYkREREQspWJERERELKViRERERCylYkREREQspWJERERELKViRERERCzlYXWA7HA4HBw7doySJUtis9msjiMiIiLZYBgG58+fJyQkBDe3rI9/FIli5NixY4SGhlodQ0RERHLh8OHDVK5cOcvvF4lipGTJkoD5ZAICAixOIyIiItmRkJBAaGhoxvt4VopEMXL51ExAQICKERERkSLmRkMsNIBVRERELKViRERERCylYkREREQsVSTGjGSH3W4nLS3N6hgiLsPd3R0PDw9dTi8i+c4lipHExESOHDmCYRhWRxFxKSVKlCA4OBgvLy+ro4iICyvyxYjdbufIkSOUKFGC8uXL61OciBMYhkFqaionT54kJiaGWrVqXbdhkYhIXhT5YiQtLQ3DMChfvjy+vr5WxxFxGb6+vnh6enLw4EFSU1Px8fGxOpKIuCiX+aijIyIizqejISJSEPRKIyIiIpbKcTGycuVKunTpQkhICDabjXnz5t1wneXLl9O8eXO8vb2pWbMmX3zxRS6iur4OHTowdOjQHK0zb948atasibu7e47XvZ7s/m6vtHz5cmw2G+fOnXNajrzIzXOwyj9/92FhYUycONGyPCIiBSnHxUhSUhJNmjRh8uTJ2Vo+JiaGzp07c+utt7Jp0yaGDh3KwIEDWbRoUY7DytUef/xxHnzwQQ4fPswbb7yRL/s4cOAANpuNTZs25cv288vx48fp1KmT1TFyJSoqin//+99WxxARKRA5HsDaqVOnHL3AT506lWrVqjFhwgQA6tWrx6pVq/i///s/IiMjc7p7uUJiYiJxcXFERkYSEhJidZwCYRgGdrsdD48b/+lWrFixABLlj/Lly1sdQUSkwOT71TRr1qyhY8eOmR6LjIy87imFlJQUUlJSMu4nJCTkVzzLJCUl8eSTT/Ldd99RsmRJhg8fftUyKSkpvPzyy8ycOZNz587RsGFDxo8fT4cOHVi+fDm33norALfddhsAv/32G40aNWLw4MGsXLmSs2fPUqNGDV566SW6d++esd2wsDCGDh2a6XfQtGlTunbtypgxY67KUa1aNQCaNWsGQPv27Vm+fHm2nueqVasYOXIk69evp1y5ctx///2MHTsWPz8/AL766ivee+89du3ahZ+fH7fddhsTJ06kQoUKABnPc/78+bzyyits3bqVX3/9lTFjxtC4cWN8fHz49NNP8fLy4oknnsiU32az8f3339O1a1cOHDhAtWrVmDt3Lh988AFr166lVq1aTJ06ldatW2es88knn/D6669z+vRpIiMjadeuHa+//nqWp54ub3fmzJm8//77bNiwgZo1azJ58mTat2+fsdyKFSt4/vnn2bx5M2XKlKFPnz785z//ybKo+ufv6Ny5c7z44ovMmzeP+Ph4atasybhx47j11lsJDg7m888/58EHH8xYf968efTs2ZMTJ07ccLZMkbw4du4C36w9SHKq3eookkf921YjtEwJS/ad78XIiRMnCAoKyvRYUFAQCQkJXLhw4ZqX444dO5bXXnstV/szDIMLadb8U/h6umf7qp7nn3+eFStW8MMPP1ChQgVeeuklNmzYQNOmTTOWGTx4MNu3b2fWrFmEhITw/fffc9ddd7F161batGnDrl27qFOnDnPnzqVNmzaUKVOGkydPEh4ezosvvkhAQAC//PILvXr1okaNGrRq1SpXz2vdunW0atWKJUuW0KBBg2w3wNq3bx933XUX//nPf/j88885efIkgwcPZvDgwUybNg0wL81+4403qFOnDnFxcQwbNoy+ffsyf/78TNsaMWIE77zzDtWrV6d06dIATJ8+nWHDhrF27VrWrFlD3759adu2LXfccUeWmV5++WXeeecdatWqxcsvv0z37t3Zu3cvHh4erF69mieeeILx48dz7733smTJEl599dVsPdfnn3+eiRMnUr9+fd599126dOlCTEwMZcuW5ejRo9x999307duXL7/8kp07d/LYY4/h4+NzzeLvnxwOB506deL8+fN8/fXX1KhRg+3bt+Pu7o6fnx+PPPII06ZNy1SMXL6vQkTy09mkVLp/8icHTydbHUWcoEuTENctRnJj5MiRDBs2LON+QkICoaGh2Vr3Qpqd+qOsGY+y/fVISnjd+EeamJjIZ599xtdff83tt98OmG+slStXzljm0KFDTJs2jUOHDmWcghk+fDgLFy5k2rRpvPXWWxlHD8qUKZNxSqJSpUqZjrI8/fTTLFq0iDlz5uS6GLl8yqBs2bI5OvUxduxYevbsmfHpvlatWrz//vu0b9+eKVOm4OPjQ//+/TOWr169Ou+//z4tW7YkMTERf3//jO+9/vrrVxUZjRs3ZvTo0RnbnjRpEkuXLr1uMTJ8+HA6d+4MwGuvvUaDBg3Yu3cvdevW5YMPPqBTp04ZP7/atWvzxx9/8PPPP9/wuQ4ePJh//etfAEyZMoWFCxfy2Wef8cILL/Dhhx8SGhrKpEmTsNls1K1bl2PHjvHiiy8yatSoG14+u2TJEtatW8eOHTuoXbt2xs/qsoEDB9KmTRuOHz9OcHAwcXFxzJ8/nyVLltwwt0hupaY7eOLraA6eTqZyaV/ua1o8ThW7sqAA63oJ5XsxUrFiRWJjYzM9FhsbS0BAQJZNyry9vfH29s7vaJbZt28fqampREREZDxWpkwZ6tSpk3F/69at2O32jDefy1JSUihbtmyW27bb7bz11lvMmTOHo0ePkpqaSkpKCiVKFHy1u3nzZrZs2cI333yT8ZhhGDgcDmJiYqhXrx7R0dGMGTOGzZs3c/bsWRwOB2AWY/Xr189Yr0WLFldtv3HjxpnuX34jvp4r1wkODgYgLi6OunXrsmvXLu6///5My7dq1SpbxciVp3o8PDxo0aIFO3bsAGDHjh20bt0601Gztm3bZkxjUKVKletue9OmTVSuXPmqv4UrMzZo0IDp06czYsQIvv76a6pWrcott9xyw9wiuWEYBq/O28bamDP4e3vwed+W1A7SUTjJvXwvRlq3bn3VIffFixdnevF2Jl9Pd7a/bs3AWF9Pd6dtKzExEXd3d6Kjo3F3z7zdK48Y/NPbb7/Ne++9x8SJE2nUqBF+fn4MHTqU1NTUjGXc3NyumscnPyYZTExM5PHHH+eZZ5656ntVqlQhKSmJyMhIIiMj+eabbyhfvjyHDh0iMjIyU14gY4zJlTw9PTPdt9lsGcVMVq5c53JxcKN1rJadzsIDBw5k8uTJjBgxgmnTptGvXz81ApR889mqGGavP4ybDT7o0UyFiORZjouRxMRE9u7dm3E/JiaGTZs2UaZMGapUqcLIkSM5evQoX375JQBPPPEEkyZN4oUXXqB///4sW7aMOXPm8MsvvzjvWVzBZrNl61SJlWrUqIGnpydr167N+FR89uxZdu/enTHosVmzZtjtduLi4mjXrl22t7169Wruu+8+Hn30UcB8o929e3emowzly5fn+PHjGfcTEhKIiYnJcpuXx4jY7Tkbi9O8eXO2b99OzZo1r/n9rVu3cvr0acaNG5dxGm79+vU52ocz1alTh6ioqEyP/fN+Vv7888+MIxHp6elER0czePBgwLyCbO7cuRiGkVEgrF69mpIlS2Y6NZeVxo0bc+TIEXbv3p3l0ZFHH32UF154gffff5/t27fTp0+fbOUWyamlO2J5c7551O+VzvW5tU4FixOJK8hxn5H169fTrFmzjCsrhg0bRrNmzRg1ahRg9nY4dOhQxvLVqlXjl19+YfHixTRp0oQJEybw6aefFuvLev39/RkwYADPP/88y5YtY9u2bfTt2zfT2IHatWvTs2dPevfuzXfffUdMTAzr1q1j7Nix1y3katWqxeLFi/njjz/YsWMHjz/++FWnyW677Ta++uorfv/9d7Zu3UqfPn2uOvpypQoVKuDr68vChQuJjY0lPj4+W8/zxRdf5I8//mDw4MFs2rSJPXv28MMPP2S8SVepUgUvLy8++OAD9u/fz48//phvvVKy4+mnn2b+/Pm8++677Nmzh48++ogFCxZk6wjD5MmT+f7779m5cyeDBg3i7NmzGeNhnnrqKQ4fPszTTz/Nzp07+eGHHxg9ejTDhg3LVrv19u3bc8stt/Cvf/2LxYsXExMTw4IFC1i4cGHGMqVLl+aBBx7g+eef584778xWkSOSUzuOJ/DMzI0YBvSIqEK/tmFWRxIXkeNipEOHDhiGcdXX5a6qX3zxxVWXfXbo0IGNGzeSkpLCvn376Nu3rxOiF21vv/027dq1o0uXLnTs2JGbb76Z8PDwTMtMmzaN3r1789xzz1GnTh26du1KVFTUdccYvPLKKzRv3pzIyEg6dOhAxYoV6dq1a6ZlRo4cSfv27bnnnnvo3LkzXbt2pUaNGllu08PDg/fff5+PPvqIkJAQ7rvvvmw9x8aNG7NixQp2795Nu3btMorWywNyy5cvzxdffMG3335L/fr1GTduHO+88062tp0f2rZty9SpU3n33Xdp0qQJCxcu5Nlnn83WBHHjxo1j3LhxNGnShFWrVvHjjz9Srlw5wBxUPH/+fNatW0eTJk144oknGDBgAK+88kq2s82dO5eWLVvSvXt36tevzwsvvHDVkaoBAwaQmpqaaVCwiLOcPJ/CwOnrSUq106ZGWV67t4FOBYrT2Ix/Dh4ohBISEggMDCQ+Pp6AgIBM37t48SIxMTFUq1ZNs4qK0z322GPs3LmT33///Zrfv9xnZOPGjZkuy7bCV199xbPPPsuxY8eyffn1jej/SwAuptnp8cmfbDh0jmrl/Pj+qTaUKuGcvzFxbdd7/75S4R5cIVLA3nnnHe644w78/PxYsGAB06dP58MPP7Q61nUlJydz/Phxxo0bx+OPP+60QkQEzCtnRszdwoZD5wj09eSzPi1UiIjTadZekSusW7eOO+64g0aNGjF16lTef/99Bg4caHWs6/rvf/9L3bp1qVixIiNHjrQ6jriYyb/tZd6mY3i42ZjSsznVy2d9NZ9Ibuk0jYhkSf9fxduCrcd58psNALx5f0N6RlS1OJEUNdk9TaMjIyIicpWtR+J5ds4mAPq1DVMhIvlKxYiIiGRyIv4iA7+M4mKagw51yvNK5/o3XkkkD1SMiIhIhgupdh77cj2xCSnUquDPB92b4e6mS3glf6kYERERABwOg+e+3cTWo/GU8fPi874tKenjeeMVRfJIxYiIiADwf0t2M3/rCbzc3fioV7hl08lL8aNiREREmLfxKB8sM+cde+uBRrQMK2NxIilOVIwUE3379r2qLXxR0KFDB4YOHZqv+1i+fDk2m41z587l636c4YsvvqBUqVIZ98eMGWN551cp+qIPnuWFuVsAeKJ9DR4M19xGUrBUjEixcq3ipk2bNhw/fpzAwEBrQuXB8OHDWbp0qdUxpAg7cjaZx79aT2q6gzvrB/FCZB2rI0kxpHbwhURqaqraeFvEy8uLihUrWh0jV/z9/fH3V0dMyZ3ElHQGTl/PqcRU6gcH8H/dmuKmK2fEAjoyYpEOHTowePBghg4dSrly5YiMjATg3XffpVGjRvj5+REaGspTTz1FYmJixnqXD9MvWrSIevXq4e/vz1133cXx48czlrHb7QwbNoxSpUpRtmxZXnjhBf7ZaDclJYVnnnmGChUq4OPjw80330xUVFTG9y+fuli0aBHNmjXD19eX2267jbi4OBYsWEC9evUICAigR48eJCcnZ/k8Dx48SJcuXShdujR+fn40aNCA+fPnZ3x/27ZtdOrUCX9/f4KCgujVqxenTp3KcnspKSkMHz6cSpUq4efnR0RExFWzRK9evZoOHTpQokQJSpcuTWRkJGfPnqVv376sWLGC9957D5vNhs1m48CBA9c8TTN37lwaNGiAt7c3YWFhTJgwIdM+wsLCeOutt+jfvz8lS5akSpUqfPzxx1nmhr9/54MHDyYwMJBy5crx6quvZvrdnD17lt69e1O6dGlKlChBp06d2LNnT5bbvNZpms8//zwje3BwMIMHDwagf//+3HPPPZmWTUtLo0KFCnz22WfXzS6ux+4wGDJzIztPnKd8SW8+7dMCP299PhVruF4xYhiQmmTNVw4760+fPh0vLy9Wr17N1KlTAXBzc+P999/nr7/+Yvr06SxbtowXXngh03rJycm88847fPXVV6xcuZJDhw4xfPjwjO9PmDCBL774gs8//5xVq1Zx5swZvv/++0zbeOGFF5g7dy7Tp09nw4YN1KxZk8jISM6cOZNpuTFjxjBp0iT++OMPDh8+zMMPP8zEiROZMWMGv/zyC7/++isffPBBls9x0KBBpKSksHLlSrZu3cr48eMzPsmfO3eO2267jWbNmrF+/XoWLlxIbGwsDz/8cJbbGzx4MGvWrGHWrFls2bKFhx56iLvuuivjDXvTpk3cfvvt1K9fnzVr1rBq1Sq6dOmC3W7nvffeo3Xr1jz22GMcP36c48ePExoaetU+oqOjefjhh3nkkUfYunUrY8aM4dVXX+WLL77ItNyECRNo0aIFGzdu5KmnnuLJJ59k165dWWYH83fu4eHBunXreO+993j33Xf59NNPM77ft29f1q9fz48//siaNWswDIO7776btLS06273silTpjBo0CD+/e9/s3XrVn788Udq1qwJwMCBA1m4cGGmwvXnn38mOTmZbt26ZWv74jrGL9zJ0p1xeHu48UnvFoSU8rU6khRnRhEQHx9vAEZ8fPxV37tw4YKxfft248KFC+YDKYmGMTrAmq+UxGw/p/bt2xvNmjW74XLffvutUbZs2Yz706ZNMwBj7969GY9NnjzZCAoKyrgfHBxs/Pe//824n5aWZlSuXNm47777DMMwjMTERMPT09P45ptvMpZJTU01QkJCMtb77bffDMBYsmRJxjJjx441AGPfvn0Zjz3++ONGZGRklvkbNWpkjBkz5prfe+ONN4w777wz02OHDx82AGPXrl2GYZg/pyFDhhiGYRgHDx403N3djaNHj2Za5/bbbzdGjhxpGIZhdO/e3Wjbtm2Wea7c3mWXn+vZs2cNwzCMHj16GHfccUemZZ5//nmjfv36GferVq1qPProoxn3HQ6HUaFCBWPKlCnX3Xe9evUMh8OR8diLL75o1KtXzzAMw9i9e7cBGKtXr874/qlTpwxfX19jzpw5hmGYv//AwMCM748ePdpo0qRJxv2QkBDj5ZdfzjJD/fr1jfHjx2fc79Kli9G3b98sl7/q/0tcwqx1B42qL/5sVH3xZ+PHTUdvvIJILl3v/ftKrndkpAgJDw+/6rElS5Zw++23U6lSJUqWLEmvXr04ffp0plMhJUqUoEaNGhn3g4ODiYuLAyA+Pp7jx48TERGR8X0PDw9atGiRcX/fvn2kpaXRtm3bjMc8PT1p1aoVO3bsyJSncePGGbeDgoIoUaIE1atXz/TY5X1fyzPPPMN//vMf2rZty+jRo9myZUvG9zZv3sxvv/2WMe7B39+funXrZmT8p61bt2K326ldu3amdVasWJGx/OUjI3mxY8eOTD8bgLZt27Jnzx7sdnvGY1f+bGw2GxUrVrzuzwLgpptuwmb7+5x869atM7a7Y8cOPDw8Mv3uypYtS506da76vVxLXFwcx44du+7zHzhwINOmTQMgNjaWBQsW0L9//xtuW1zHn/tP8/L32wAY2rEWXZqEWJxIxBUHsHqWgJeOWbfvHPDz88t0/8CBA9xzzz08+eSTvPnmm5QpU4ZVq1YxYMAAUlNTKVHC3L6nZ+aOiDab7aoxIc5y5b5sNts19+1wOLJcf+DAgURGRmac0hk7diwTJkzg6aefJjExkS5dujB+/Pir1gsODr7qscTERNzd3YmOjsbd3T3T9y6f+vH1LbhDzTn9WeS37Dz33r17M2LECNasWcMff/xBtWrVaNeuXQGkk8Lg4Okknvg6mnSHwT2Ngxlyey2rI4kArjhmxGYDLz9rvmx5G4UeHR2Nw+FgwoQJ3HTTTdSuXZtjx3JWWAUGBhIcHMzatWszHktPTyc6Ojrjfo0aNTLGqlyWlpZGVFQU9es7f0Ks0NBQnnjiCb777juee+45PvnkEwCaN2/OX3/9RVhYGDVr1sz09c9CDaBZs2bY7Xbi4uKuWv7y1TCNGze+7qWuXl5emY5uXEu9evUy/WzAHBRbu3btq4qgnLry9wLw559/UqtWLdzd3alXrx7p6emZljl9+jS7du3K1u+lZMmShIWFXff5ly1blq5duzJt2jS++OIL+vXrl/snI0VK/IU0+n8RxbnkNJqEluKdh5pkOkonYiXXK0aKsJo1a5KWlsYHH3zA/v37+eqrrzIGtubEkCFDGDduHPPmzWPnzp089dRTma4U8fPz48knn+T5559n4cKFbN++nccee4zk5GQGDBjgxGcEQ4cOZdGiRcTExLBhwwZ+++036tWrB5iDW8+cOUP37t2Jiopi3759LFq0iH79+l2zYKhduzY9e/akd+/efPfdd8TExLBu3TrGjh3LL7/8AsDIkSOJioriqaeeYsuWLezcuZMpU6ZkXKETFhbG2rVrOXDgAKdOnbrmkYznnnuOpUuX8sYbb7B7926mT5/OpEmTMg0Szq1Dhw4xbNgwdu3axcyZM/nggw8YMmQIALVq1eK+++7jscceY9WqVWzevJlHH32USpUqcd9992Vr+2PGjGHChAm8//777Nmzhw0bNlw1wHjgwIFMnz6dHTt20KdPnzw/Jyn80u0OBs/YwL6TSQQH+vBJr3B8PPNWWIs4k4qRQqRJkya8++67jB8/noYNG/LNN98wduzYHG/nueeeo1evXvTp04fWrVtTsmRJ7r///kzLjBs3jn/961/06tWL5s2bs3fvXhYtWkTp0qWd9XQA8zLjQYMGUa9ePe666y5q167Nhx9+CEBISAirV6/Gbrdz55130qhRI4YOHUqpUqVwc7v2n+a0adPo3bs3zz33HHXq1KFr165ERUVRpUoVwCxYfv31VzZv3kyrVq1o3bo1P/zwAx4e5hnJ4cOH4+7uTv369SlfvjyHDh26ah/Nmzdnzpw5zJo1i4YNGzJq1Chef/11+vbtm+efR+/evblw4QKtWrVi0KBBDBkyhH//+9+Znl94eDj33HMPrVu3xjAM5s+ff9Upoaz06dOHiRMn8uGHH9KgQQPuueeeqy4N7tixI8HBwURGRhISovECxcHrP2/n9z2n8PV059M+LagQ4GN1JJFMbEZ+DTZwooSEBAIDA4mPjycgICDT9y5evEhMTAzVqlXDx0f/YFJ4dejQgaZNmzJx4kRLcyQmJlKpUiWmTZvGAw88cN1l9f9V9H255gCjfvgLmw2mPhpOZIOi2eBPiqbrvX9fyfUGsIrINTkcDk6dOsWECRMoVaoU9957r9WRJJ/9vuckr/20HYAXIuuqEJFCS8WISDFx6NAhqlWrRuXKlfniiy8yTl2Ja9obl8hT32zA7jD4V/PKPNG++o1XErGIXo1ECsg/29YXtLCwsHy7BFwKl7NJqQyYHsX5i+m0DCvNWw801JUzUqhpAKuIiAtJTXfwxNfRHDydTGgZX6Y+Go63h66ckcJNxYiIiIswDINX521jbcwZSnp78FmflpT197Y6lsgNuUwxosPPIs6n/6ui5dPfY5i9/jBuNni/RzNqB5W0OpJIthT5YuRyR8zU1FSLk4i4nstzImW3z4lYZ+mOWN5aYM5h9Ern+txap4LFiUSyr8gPYPXw8KBEiRKcPHkST0/PLJtliUj2GYZBcnIycXFxlCpVKs9t8CV/7TiewDMzN2IY0COiCv3ahlkdSSRHinwxYrPZCA4OJiYmhoMHD1odR8SllCpVKmPeHymcTp5PYeD09SSl2mlToyyv3dtAV85IkVPkixEwJz+rVauWTtWIOJGnp6eOiBRyF9PsPP7Veo6eu0C1cn582LM5nu46OixFj0sUIwBubm5qVy0ixYZhGIyYu4UNh84R6OvJZ31aUKqEl9WxRHJFJbSISBE0+be9zNt0DA83G1N6Nqd6eX+rI4nkmooREZEiZv7W47zz624AXruvAW1qlrM4kUjeqBgRESlCth6JZ9icTQD0axtGz4iq1gYScQIVIyIiRcSJ+IsM/DKKi2kOOtQpzyud61sdScQpVIyIiBQBF1LtPPblemITUqhVwZ8PujfD3U2X8IprUDEiIlLIORwGz327ia1H4ynj58XnfVtS0kddccV1qBgRESnk/m/JbuZvPYGXuxsf9QontEwJqyOJOJWKERGRQmzexqN8sGwvAGMfaETLsDIWJxJxPhUjIiKFVPTBs7wwdwsAT3aowb/CK1ucSCR/qBgRESmEjpxN5vGv1pOa7uDO+kE8f2cdqyOJ5BsVIyIihUxiSjoDp6/nVGIq9YMD+L9uTXHTlTPiwlSMiIgUInaHwZCZG9l54jzlS3rzaZ8W+Hm7zDRiItekYkREpBAZv3AnS3fG4e3hxie9WxBSytfqSCL5TsWIiEghMTvqEB+v3A/AOw81oWloKWsDiRQQFSMiIoXAn/tP8/L32wAY2rEWXZqEWJxIpOCoGBERsdiBU0k88XU06Q6DLk1CGHJ7LasjiRQoFSMiIhaKv5DGgOlRnEtOo0loKd5+sDE2m66ckeJFxYiIiEXS7Q4Gz9jAvpNJBAf68EmvcHw83a2OJVLgVIyIiFjk9Z+38/ueU/h6uvNpnxZUCPCxOpKIJVSMiIhY4Ms1B/hyzUFsNpj4SFMahARaHUnEMipGREQK2O97TvLaT9sBeCGyLpENKlqcSMRaKkZERArQ3rhEnvpmA3aHwb+aV+aJ9tWtjiRiORUjIiIF5GxSKgOmR3H+Yjotw0rz1gMNdeWMCCpGREQKRGq6gye+jubg6WRCy/gy9dFwvD105YwI5LIYmTx5MmFhYfj4+BAREcG6deuuu/zEiROpU6cOvr6+hIaG8uyzz3Lx4sVcBRYRKWoMw+DVedtYG3OGkt4efNanJWX9va2OJVJo5LgYmT17NsOGDWP06NFs2LCBJk2aEBkZSVxc3DWXnzFjBiNGjGD06NHs2LGDzz77jNmzZ/PSSy/lObyISFHw6e8xzF5/GDcbvN+jGbWDSlodSaRQsRmGYeRkhYiICFq2bMmkSZMAcDgchIaG8vTTTzNixIirlh88eDA7duxg6dKlGY8999xzrF27llWrVmVrnwkJCQQGBhIfH09AQEBO4ooUacmp6SzbGUdqusPqKJJLcedTGL9wJ4YBo+6pT/+bq1kdSaTAZPf92yMnG01NTSU6OpqRI0dmPObm5kbHjh1Zs2bNNddp06YNX3/9NevWraNVq1bs37+f+fPn06tXryz3k5KSQkpKSqYnI1LcJKWk8+DUNew4rr9/V9Ajogr92oZZHUOkUMpRMXLq1CnsdjtBQUGZHg8KCmLnzp3XXKdHjx6cOnWKm2++GcMwSE9P54knnrjuaZqxY8fy2muv5SSaiEtxOAyGzNrEjuMJlCrhSePKpayOJHnQqFIAQzvW1pUzIlnIUTGSG8uXL+ett97iww8/JCIigr179zJkyBDeeOMNXn311WuuM3LkSIYNG5ZxPyEhgdDQ0PyOKlJojF+0kyU7YvHycOPzvi1pXqW01ZFERPJNjoqRcuXK4e7uTmxsbKbHY2NjqVjx2h0EX331VXr16sXAgQMBaNSoEUlJSfz73//m5Zdfxs3t6jG03t7eeHtrpLkUT9+uP8xHK/YD8PaDjVWIiIjLy9HVNF5eXoSHh2cajOpwOFi6dCmtW7e+5jrJyclXFRzu7ua19TkcOyvi8tbFnOGl77cC8PRtNbmvaSWLE4mI5L8cn6YZNmwYffr0oUWLFrRq1YqJEyeSlJREv379AOjduzeVKlVi7NixAHTp0oV3332XZs2aZZymefXVV+nSpUtGUSIicOh0Mo9/tZ40u8HdjSrybMfaVkcSESkQOS5GunXrxsmTJxk1ahQnTpygadOmLFy4MGNQ66FDhzIdCXnllVew2Wy88sorHD16lPLly9OlSxfefPNN5z0LkSIu4WIa/adHcTY5jUaVApnwUFPc3DTYUUSKhxz3GbGC+oyIK0u3O+g/fT0rd58kKMCbHwbdTMVAH6tjiYjkWXbfvzU3jYjF/vPLDlbuPomPpxuf9m6pQkREih0VIyIW+vrPg3zxxwEAJnZrSqPKgdYGEhGxgIoREYus2nOK0T/+BcDzkXW4q2GwxYlERKyhYkTEAvtOJvLUN9HYHQb3N6vEUx1qWB1JRMQyKkZECti55FQGTl9PwsV0wquWZuwDjdQmXESKNRUjIgUoze7gya83EHMqiUqlfPmoVzg+nuq3IyLFm4oRkQJiGAajftjGmv2n8fNy57O+LSjnr2kPRERUjIgUkM9XH2DmusPYbPB+92bUraieOSIioGJEpED8tjOON3/ZDsDLd9fj9npBFicSESk8VIyI5LNdJ87z9MyNOAx4pGUoA26uZnUkEZFCRcWISD46lZjCgOlRJKakE1GtDK/f11BXzoiI/IOKEZF8kpJu54mvojly9gJhZUsw9dFwvDz0Lyci8k96ZRTJB4ZhMHLuVtYfPEtJHw8+7dOS0n5eVscSESmUVIyI5IMPl+/ju41HcXez8WHP5tSs4G91JBGRQkvFiIiTLdx2nLcX7QJgTJf6tKtV3uJEIiKFm4oRESfadjSeZ2dvBqBP66r0ah1mbSARkSJAxYiIk8QmXGTg9PVcSLPTrlY5Xr2nvtWRRESKBBUjIk5wIdXOY1+u50TCRWqU92NSj+Z4uOvfS0QkO/RqKZJHDofB8P9tZsuReEqX8OTzvi0J9PW0OpaISJGhYkQkjyYu3cMvW47j6W5j6qPhVC3rZ3UkEZEiRcWISB78sOko7y/dA8CbXRsRUb2sxYlERIoeFSMiubTx0Fme/98WAP59S3UebhlqcSIRkaJJxYhILhw9d4HHvowmNd1Bx3pBvHhXXasjiYgUWSpGRHIoKSWdgdPXcyoxhboVSzLxkaa4u2nyOxGR3FIxIpIDDofBkFmb2HE8gXL+XnzapwX+3h5WxxIRKdJUjIjkwPhFO1myIxYvDzc+7t2CyqVLWB1JRKTIUzEikk3frj/MRyv2A/D2g41pXqW0xYlERFyDihGRbFi7/zQvfb8VgKdvq8l9TStZnEhExHWoGBG5gUOnk3ni62jS7AZ3N6rIsx1rWx1JRMSlqBgRuY6Ei2n0nx7F2eQ0GlcOZMJDTXHTlTMiIk6lYkQkC+l2B4NnbGRvXCJBAd580rsFvl7uVscSEXE5KkZEsvCfX3awcvdJfDzd+LR3S4ICfKyOJCLiklSMiFzD138e5Is/DgAwsVtTGlUOtDaQiIgLUzEi8g+r9pxi9I9/AfB8ZB3uahhscSIREdemYkTkCvtOJvLUN9HYHQb3N6vEUx1qWB1JRMTlqRgRueRccioDvogi4WI64VVLM/aBRthsunJGRCS/qRgRAdLsDp78egMHTidTqZQvH/UKx8dTV86IiBQEFSNS7BmGwagftrFm/2n8vNz5rG8Lyvl7Wx1LRKTYUDEixd7nqw8wc91hbDZ4v3sz6lYMsDqSiEixomJEirXfdsbx5i/bAXj57nrcXi/I4kQiIsWPihEptnadOM/TMzfiMOCRlqEMuLma1ZFERIolFSNSLJ1KTKH/F1EkpqQTUa0Mr9/XUFfOiIhYRMWIFDsp6XYe/yqao+cuEFa2BFMfDcfLQ/8KIiJW0SuwFCuGYTBy7laiD56lpI8Hn/ZpSWk/L6tjiYgUaypGpFj5cPk+vtt4FHc3Gx/2bE7NCv5WRxIRKfZUjEixsXDbcd5etAuAMV3q065WeYsTiYgIqBiRYmLb0Xienb0ZgD6tq9KrdZi1gUREJIOKEXF5sQkXGTh9PRfS7LSrVY5X76lvdSQREbmCihFxaRdS7Tz25XpOJFykRnk/JvVojoe7/uxFRAoTvSqLy3I4DIZ/u5ktR+IpXcKTz/u2JNDX0+pYIiLyDypGxGVNXLqHX7Yex9PdxtRHw6la1s/qSCIicg0qRsQl/bDpKO8v3QPAm10bEVG9rMWJREQkKypGxOVsPHSW5/+3BYB/31Kdh1uGWpxIRESuR8WIuJSj5y7w2JfRpKY76FgviBfvqmt1JBERuQEVI+IyklLSGTh9PacSU6hbsSTvPdIUdzdNficiUtipGBGXYHcYDJm1iR3HEyjn78VnfVvi5+1hdSwREcmGXBUjkydPJiwsDB8fHyIiIli3bt11lz937hyDBg0iODgYb29vateuzfz583MVWORa/rtoJ0t2xOLl4cbHvVtQqZSv1ZFERCSbcvzRcfbs2QwbNoypU6cSERHBxIkTiYyMZNeuXVSoUOGq5VNTU7njjjuoUKEC//vf/6hUqRIHDx6kVKlSzsgvwrfrD/PRiv0AvP1gY5pXKW1xIhERyQmbYRhGTlaIiIigZcuWTJo0CQCHw0FoaChPP/00I0aMuGr5qVOn8vbbb7Nz5048PXPXcCohIYHAwEDi4+MJCAjI1TaKq7jYY8Qb/mBzzbET+08lMXjGBtLsBs/cVpNhd9axOpLzXYwHn0CrU0hu2dPAngpe6nNTZF1MAC9/cNPIhpzK7vt3jo6MpKamEh0dzciRIzMec3Nzo2PHjqxZs+aa6/z444+0bt2aQYMG8cMPP1C+fHl69OjBiy++iLu7+zXXSUlJISUlJdOTkZzbvHIeTZb14U97a4akDcJw4SFCdzeqyNCOta2O4VwOB8wdANt/gHvehfC+VieSnEo8CdPuguQz0OcnqNjQ6kSSUzErYcYjUKEu9P4RvP2tTuSSclSMnDp1CrvdTlBQUKbHg4KC2Llz5zXX2b9/P8uWLaNnz57Mnz+fvXv38tRTT5GWlsbo0aOvuc7YsWN57bXXchJNriE1eiYA97qvIdYjhClu3S1OlD9uql6GCQ81xc3VrpxZ9jr89Z15++dhULoaVG9vbSbJvrSLMLsnnN5r3p/5CDy2DPyvPp0thdTpfTC7F6QlwdFo+O7f0O1rHSHJB/l+uYHD4aBChQp8/PHHuLu7Ex4eztGjR3n77bezLEZGjhzJsGHDMu4nJCQQGqrGVTlhOBxUi/8z4/5jxlweu+8uaPywhakk2zbNgFX/Z94OaQbHNsKcXjBwGZSraW02uTHDgJ+egcNrzVNsvmXgbAzM6mkeIfH0sTqh3MiFszDjYbh4DirUNwuTXb/A0tfgDn1YdrYclXflypXD3d2d2NjYTI/HxsZSsWLFa64THBxM7dq1M52SqVevHidOnCA1NfWa63h7exMQEJDpS3Jm/1/rKMc5kg1v0ls9YT74w2A4fP0rn6QQOLgGfnzGvN3uOei3ECq3MseOzHjYPOQvhdvvE2DLbLC5w0PToef/wKcUHFkHPw42ixUpvOxpMKePeVQroDL0mgf3meMkWT0RNn5jZTqXlKNixMvLi/DwcJYuXZrxmMPhYOnSpbRu3fqa67Rt25a9e/ficDgyHtu9ezfBwcF4eXnlMrbcSNzGXwDYXaIZHneNhTqdwZ4Cs3rAuUMWp5MsnYkxD+070qBeF7j1FfNT9CPfQGAonNkH3/YxXyylcNr+Ayx7w7x999tQ41bzaNbDX4KbB2z9Fla+Y21GyZphwIIXIGYFePpBj1lQMsg8qnzL8+YyPw2Bg39Ym9PF5PjE17Bhw/jkk0+YPn06O3bs4MknnyQpKYl+/foB0Lt370wDXJ988knOnDnDkCFD2L17N7/88gtvvfUWgwYNct6zkKsEHFkOQErYreb5zQc+hqBGkHTSHIyVct7agHK1iwnmuILk0xDcBO7/6O9z0/4VoPssc0R/zEqYP1yfrgujY5vgu8fN260eh5YD/v5e9fZmcQLw23/gr3kFnU6yY93HsP5zwAb/+hQqNvr7ex1egvr3mR8WZvU0PzyIU+S4GOnWrRvvvPMOo0aNomnTpmzatImFCxdmDGo9dOgQx48fz1g+NDSURYsWERUVRePGjXnmmWcYMmTINS8DFudITDhLrZS/AKgUfo/5oLc/dJ8JfhUg7i+YOxAcdgtTSib2dPhffzi5E/wrXio8/nEpaMWG5osjNoj+AtZOtSKpZCXhuFlMpl+AGrdD5FtXL9OiP0Q8ad7+/gk4uqFgM8r17VkCCy+9N93xGtS9O/P33dyg61QIbgoXzpi/74vxBR7TFeW4z4gV1GckZzYtnkHT1U9yxFaRyqN3Zf7mkfUw7W7zlE3rwRD5pjUhJbMFI2DtFPDwhX7zoVLzrJf94wP49RWwuUGPOVDrjoLLKdeWmgzTOsHxTVC+Lgz4NeveMA47zOgGexdDyWDzCpuAkAKNK9cQtxM+uwNSEqDpo+YYkaz6MyUcg09ug/PHoWZH6D4b3DX9xLVk9/1b1ye5oJRdiwE4WrbN1d+s3AK6fmjeXjMJNnxZgMnkmtZ/bhYiAPdPuX4hAmYR2awXGA74th/E7cj/jJI1hwPmPWEWIr5lzKNa12tS5+YOD35uFi3nLx1NSU0usLhyDUmnzcHhKQlQtS3c83/XbxQZEGIeafbwhb1L4NeXCy6ri1Ix4oIqn14NgHfdO6+9QKMHof2L5u2fn4UDqwoomVxl/wqYf2lQ3K0vQ4P7b7yOzQad34WqN0PqefNTdtKp/M0pWVs+1hy06uZpDjQuU+3G6/gEQI/ZUKIsHN8M3z9uFjVS8NJTYPajcO4glA6Dh78Cj2xcXBHSDO6/dKp07VSI+ixfY7o6FSMu5sjebVQyYkk13KkV0SnrBduPMN/4HOnmP+LpfQUXUkyn9pq9Qxzp0Oihv0fqZ4eHF3T7ymyEdu6gOZguPeXG64lzbfkWVv7XvN3lPah6jaORWSkdBt2+AXcv2PEj/KZTpgXOMMwPZIf+AO8A83SLX9nsr9+gK9z2inl7/vOwf3l+pCwWVIy4mKPrfwJgj3dD/EqWynpBNzfoOgVCmpvNfWY+AhfOFUhG4YqGSvFQqQXce53z01kpUcYcM+IdCIf/NC83LPxDwFzH4XXww6WrAtsOgWY9c76Nqq3NIgbg93dg82zn5ZMbW/0ebPrGHH/10DSz5XtOtRsOjR4Gww5zesOpPc7PWQyoGHExPgd/A+B85Wy0Dff0Nc97lgyBU7vh277mVR2Sv+xp5ovWmX1mQ6VHZuS+I2f52uaLqM0dNs80GzJJ/jt3yOzZY0+BOnfD7dfuJp0tTXtA26Hm7R/VmLDA7PwFlowxb981zhyImhs2G9z7wRWNCbupMWEuqBhxISkXk6mVvAmA8s06Z2+lkhXNpj6eJWD/b39f1ib5wzDMw7kxKy81VJptNlTKi5q3Q6fx5u0lr8GOn/OeU7KWch5mdjd79gQ1ggc+MQel5sXto6HuPebsvmpMmP+Ob4G5jwEGtBgArf6dt+2pMWGeqRhxIXuiFlPClsIpSlG9QavsrxjcxGyKBhD1Caz7JH8CijnQLXoaYIMHP3PeLK6tHoOWl15cv3vMHBQpzuewm29isdvMnj3dZzpnFlc3N7PJXcXLjQm7qTFhfjkfaxaTaUlQvYNZyOf0FOm1qDFhnqgYcSGJfy0CICbwJmw5nVWyXpe/DzUveBH2Lr3+8pJzexbDopfM23e8DnWuM8A4N+4aBzVug7Rk88X2/Annbl9gyWjYvQDcvc1CpJQTJ/D09jffzPyDIG47/G+AGhM6W9oFmNUdEo5A2Vrw0Bfg7um87f+zMeGfU5y3bRenYsSFBMWZl/Taaufy3OfNz0KT7uZArG/7wcldN15Hsiduh/kzNRxmQ6U2Tzt/H+4e8OA0KFcbEo5e+vR3wfn7Ka42fGU2nAOzV0/lFs7fR2BleGQmePjAnkWweJTz91FcGYY54PhotDlpYY/Z4Fva+fup0wnuvDQ30a8vw+5fnb8PF6RixEWcPHaAao4DOAwbNSO65G4jNps5sj/0JkjRQCynSTpl/ixTz2evoVJe+JYyP137loZjG2DeUzpU7AwHVpmXgILZo6fRg/m3r8rhakyYH1b8F7bNNScr7PY1lK2Rf/u6sjHh//pD7Pb825eLUDHiImLW/gjAXs9alCpXMfcb8vA2B2KVqgJnY8weJOmpTkpZDOW2oVJelK1hvti6ecJf38Hycfm7P1d3Zr/5O3Skmb152hfAIO+G/4IOlyYc/flZiPk9//fpyrZ9B8svzRXU+V2o1i5/9/fPxoQzu0HiyfzdZxGnYsRFuO8zx3icDr4l7xvzK2c2//EqCQdXwy/P6tN1bhgG/DQUDq0xGyr1mJOzhkp5EXYz3POueXvFONj6v4LZr6u5fKnmhbNmT56uU/6eSTm/tX/RLEoc6WZzPDUmzJ2j0TDv0uSErQdDeJ+C2W+mxoSHLn2wU2PCrKgYcQH29HRqJkYBULrRXc7ZaFB9c/4Mmxts/No8XCw5s3oibJ7xd0Ol8nUKdv/Ne5svvmCeKz8SXbD7L+rs6WbvnVO7zV483WeavXkKis0G902GSuFqTJhb8UdhZg9Ivwi1Is2B4wVJjQmzTcWIC9izaQWBJJGAHzWbZaPZWXbVvhPuvNSi+tdXYdcC523b1e342ez5AXDX+Nw3VMqrO16H2neZL8YzH4H4I9bkKIoWjYR9y8wePD1mmT15Cpqnr9kUL6CSGhPmVGqS+TefeAIq1DevcslrP5jcKF8bHv7i78aEq/6v4DMUASpGXMDZLQsB2Osfjoenk8cj3PQkhPcFDJg7EE5sc+72XdHxLWavDwxoORAi8thQKS/c3M0X4QoNICkOZjwCKYnW5Skq1n0C6y713nngY7MXj1VKVjQHJWc0JnzRuixFhcMB3/0bTmyBEuUuzaSc9fT1+a7GbX83Jlz6Guz4yboshZSKERdQ5thKANKr3e78jdtscPc7UO0WSE289Ekjzvn7cRXnT5g/o7Rks6HSXYVg8Kh3SfOTvV95iN1qvkhrhtis7Vtm9toBuH2U2YPHasGNzU6v2CDqUzUmvJFlb8DOn81JCB/5BkpXtTrRFY0JMf8H1ZgwExUjRVz86Vhqppn9QKrm9pLeG3H3hIemQ5kaEH/YnCE27WL+7KsoS7tgtvJOOHqpodJ05zZUyotSVczD/e7esOsX89OZXO3kbpjT1+y106Q73DzM6kR/q3cPdFRjwhvaNBNWXRq8fe8kqHKTtXmupMaEWVIxUsTtXfsz7jaDA25VCKqcj9fNXx6I5RMIR9aZE3ppINbfDMPs6XE02uzx0WO22fOjMAltBfddGoi8eiJs/MbSOIVO8hlzJuWUeLPXTpf38q8fTG61HQpNelxqTNhXjQn/6eAa+OkZ83a756BJN2vz/JMaE2ZJxUgRZ9+9BIAT5dvm/87K1YSHvzQHYm391pzyXEwrxps9Pdw8zF4i+dlQKS8aPwy3PG/e/mmI+eItZi+d2b3M3jqlqpiH9j28rU51NZsNukyEKq0hJcEsntSY0HT2IMzuaU42WK8L3PqK1Ymu7arGhE/qtCkqRoo0w+Eg7NyfAPg1iCyYnVbvAJ0vFSHL/gN/zSuY/RZm2+bC8rHm7YJoqJRXHV6C+veZTbxm94QzMVYnspZhwPzn4OAqs7dO99lmr53CysPbbGpXqiqcPaDGhAAXEy51jD5tDja+/6OC6weTG5kaE35vfpgp5grxb0tu5MCOKCpwhguGF7Va3llwO27RHyKeMG9//wQc3VBw+y5sjkSbp2egYBsq5YWbG3SdCsFNzRfvmY+Yzb2KqzWTzZbrNjezt05QfasT3ZhfOfNUoHeAGhM67DB3AJzcAf6Xrjzy8rM61Y2pMWEmKkaKsNiN8wHY7dsEH98C/ue7802zd0b65UGbxwp2/4VB/BFzBtD0i2Yvj4JuqJQXXiXMJl4lg+HkTnP+jOLYv2LXQvj10uH8O980e+sUFRXqmeMPLjcmvDyJX3Hz6yuw51fw8DX/pgNCrE6UfVc2Jpz3FBxZb20eC6kYKcL8Dy8H4ELVWwt+5+4e5qfI8nXh/HHz03VqcsHnsErK5cucY61tqJQXAZe6inr4wt4lf78pFxexf5mfqDHMXjo3PWl1opyr1REiL50iXDyq+DUmXD8N/rw0qeD9U6BSc2vz5MblxoT2FHNAazFtTKhipIhKToyn9kWzAVlIi3usCeETaB4SLVHWvGb++8eLx0Ash8N8rie2/t1Qybuk1alyJ6QZ3D/VvL12Cqz/3No8BSXxpNkALjURwtqZvXQK25Uz2RXxOIT3o9g1JoxZCfOHm7dvfdmcxLAoutyYMKhhsW5MqGKkiNqzbgFetnSO2SoQWqORdUHKVPt7INaOH+G3N63LUlCWvX5FQ6UZhaOhUl406Aq3XToq8stw2L/cyjT5L+2ieWox/pDZO+fhLwtPP5jcsNng7rehWvvi05jw1F7z6idHOjR66O8rxIoq75LmUcpi3JhQxUgRdXH7rwAcLtMGm9Wjxqu2gXvfN2///g5smWNtnvy0acbfc0vcOwmqRFibx1naDYdGD5v9K+b0Nl/sXZFhmH0ojqwzj+z1mG320Cnq3D3h4elQtualxoQ9XLcx4YWzMLMbXDwHlVqY/4dF9ajWla5qTDjG6kQFSsVIERVy2uwP4VXnDouTXNK0h9mQCeCHwXB4naVx8sXBNfBjIW6olBc2G9z7AVRuZV5Z46r9K36fAFtmm71yHpoO5WpZnch5fEublyX7lIIjUa7ZmNCeBnP6wOm9EFDZfPP29LE6lfNkakz4XrFqTKhipAg6un8HocYx0gx3akbcbXWcv90+Gup0NgdizeoB5w5Znch5zh4we3I40qDevYW3oVJeePqYzb4CQ+HMPvi2j/ni7yq2/2DOWQLmaY0aFgz8zm+XGxO6eZiNCVe6UGNCw4AFL0DMCvD0M49qlQyyOpXzXdWY8A9r8xQQFSNF0JH15oyPe7zrUzKwEB1idnMzZzgNagRJJ80mRCnnrU6VdxcTzEFlGQ2Vphbuhkp54V/BfJH38v97gKArfLo+thG+e9y8HfEEtBxgbZ78VL29OSAX4Lf/mE21XMHajy4NsLbBg59BxYZWJ8o/VzYmnFU8GhO66Cuqa/M68BsA8ZVusTjJNXj7mzPE+gdB3HZzdL/DbnWq3LOnmz04ilpDpbwIagD/+gywQfQXsHaq1YnyJuG4eclk+gWzN86dxWCQdYt+cNOlZnzfP1n0GxPuWQKLRpq373gd6nSyNk9+u7Ix4YUzxaIxoYqRIiY15SK1k8wXlvJNO1ucJguBleGRmeDhA7sXmv0PiqrFr8LexUWzoVJe1LkL7rx0SmPRS7BnsbV5cis12XwhP3/c7Inz4Odmj5zi4M7/QM07in5jwrid8L9+YDig6aPQ5mmrExWMYtaYUMVIEbNn/VL8bBc5TSDVGxaiqbH/qXI43DfZvL1mktluu6jJ1FBpatFsqJQXrQdDs17mm8C3/SB2u9WJcsbhgHlPwPFNZi+c7rPMK2iKCzf3S40J613RmDDJ6lQ5k3Tq0kzKCVC1Ldzzf65x5Ux2XdWY8GWrE+UbFSNFTMK2hQDsD4zAzb2Qd/xs9CC0H2He/vlZiPnd2jw5sX/FFQ2VXjF7cRQ3Nps58V/VmyH1vHk5ZdIpq1Nl3/Kx5qBVN0+zF06ZalYnKng+AeZp04zGhE8Unf4V6SnmJIDnDkLpMHM2bA8vq1MVvEyNCadC1GfW5sknKkaKmApxqwCw1bjd4iTZ1GEENHjAbE40pxec3md1ohs7tdfMmtFQabjViazj4QXdvoLS1cyro2b1NN8kCrst38LK/5q3u7xn9sIprkqHQbdvzCZ9RaUxoWGYH2AOrTEnA+wxB/zKWp3KOlc2Jpz/POz7zdI4+UHFSBFy6sQhatj34zBsVL+pi9Vxssdmg64fQkjzS82KHoEL56xOlbULZ83DwhfjoXJL12molBclyphvBt6BcPhP83LDwnyFzeF18MMg83bbIdCsp7V5CoOqraHLFY0JN8+2Ns+NrH4PNn1jTgL40DQoX8fqRNa7sjHht33g1B6rEzmVipEiJOZP85LefR41KFOhksVpcsDz8uDPSnBqN3zbt3AOxLKnmd1Hz+wze224WkOlvChfGx7+wmwWtnnm311oC5tzh8zBmvYUs+fN7WOsTlR4NO0ONz9r3v5xMBxaa22erOz4GZaMMW/fNd68Akqu0Ziwm0s1JlQxUoTY9i0F4FTFdhYnyYWSFc2CxLME7P8NFo6wOlFmhmEe/oxZafbY6D7L7Lkhf6txG3Qab95e+hrs+MnaPP+Uct7sB5N00ux188DHrtsPJrduGwV17wF7auFsTHh8izkvCwa0HAgR/7Y6UeHyz8aEc3q7TGNC/acWEfb0dGqcN1usBzYqotfYBzeBBz4xb0d9Aus+sTbPldZ+BNHTAJs5g6YrN1TKi1aPQatLbxDf/dscFFkYOOww9zGI+wv8KpiDNr39rU5V+FxuTFixESSfKlyNCc/Hmv1g0pKgegfzqIhc7crGhAd+d5nGhCpGioh9W1ZTmvOcN3yp1byD1XFyr949Ztt4gAUvwt6l1uYBs4dGcWqolFeRY82jJGnJ5pvH+RNWJ4Ilo2H3AnOSse4zzV43cm1efuYcNpcbE/5vgPWNCdMuwKzukHAEytYy5w0qLv1gcuOfjQn/nGJ1ojxTMVJEnN48H4A9/i3w9PK2OE0e3fwsNOl+aSBWPzi5y7oscTvMDIYDmhWjhkp54e4BD06DcrUh4eilT7MXrMuz4Sv44wPzdtcPoXIL67IUFYGVLvWv8IE9i6xtTGgY5oDjo9HmZH89ZoNvKevyFBVXNib89WXY/au1efJIxUgRUerYSgDSwlxgci+bzbzcMvQmSLFwIFbSpcPUqefNhkqdi1lDpbzwLXXpTaM0HNsA856y5lDxgVXmJaBg9rRp9GDBZyiqKoVD10ufqK1sTLjiv7Btrjm538NfQdka1uQoiq5sTPi//kWvMeEVVIwUAfFnT1ErdScAVVrdY3EaJ/HwNgdilaoCZ2PM5kbpqQW3fzVUyrsy1c1mYm6e8Nd3sHxcwe7/zH7zd+hIgwb3Q/sXC3b/rqDhA9Dh0ilKKxoTbvsOlr9l3u78LlQrgoPzrXStxoSJJ61OlSsqRoqAfWt/xsPm4KBbZYKrutD19n7lzP4VXiXh4Gr45dmC+XRtGPDT0EsNlQLVUCkvwm42W3QDrBgHW/9XMPu9cM48qnXhrNnDpusUXTmTW+1fhIb/KvjGhEeiYd6T5u3WgyG8T8Hs19X8szHh7EeLRmPCf9B/bxGQvsucpOx4ubYWJ8kHFeqZTY1sbrDxa/NwcX5bPRE2zzB7ZqihUt417/X3WJt5T8GR9fm7P3u6OXHaqd1m75ruM81eNpI7Nps5j1SlFpea/nXL/8aE8UfNAavpF6H2XebAccm9fzYm/PGZIneFjYqRQs5wOKh6dg0AvvUjLU6TT2rdAZGXDtX++irsWpB/+9rxMyx5zbzdaTzULCJt9Qu7jq9B7U5ms7GZ3SH+SP7ta9FI2LfM7FnTfabZw0byxtPXbPIXUBlO78nfxoSpSWYn5sRYqFDfvJTerZDPs1UUXNmYcMuswtuYMAsqRgq5Q7s2EsRpLhqe1GnlosUIQMQTEN4XMGDuQDixzfn7OL4FvnuMjIZKrR5z/j6KKzd3+NcnENQQkuLM5mMpic7fz7pPYN3H5u0HPjZ714hzlAy6dJTJ71JjwnwYg+NwmP1pTmyBEuXM5oLeJZ2/n+KqsDcmvA4VI4Xc8Q0/A7Dbtwk+JVy4iZPNBne/A9VugdTES5+c4py3/fMnzG2mJUP1W9VQKT94lzTfzPzKQ+xW803HmTPE7ltm9qYBs1dNvSIyP1NREtzYLCqxQdSnzm9MuOwN2PmzOWnfIzOgdFXnbl/MD1ktL33QKkyNCW9AxUghV+LwCgCSQ9tbnKQAuHuazY7K1ID4w+YMsWkX877dtAtm6+uEo2ZvjIe+UEOl/FKqCjwy02w+tusX89OZM5zcDXP6mr1pmlwxx4o4X93O0HGMeduZjQk3zYRV75q375sMVSKcs1252l3j/m5MOOORwtGY8AZUjBRiF5LOU+fCFgCCw13kkt4buTwQyycQjqwzJ/TKy0CsfzZU6j5LDZXyW2hL880GzMHCG7/J2/aSz5gzKafEm71purynfjD5re0QaNrzUmPCvnlvTHhwDfz0jHm73XBo/HCeI8p1XNmY8Pwx6xsTZoOKkUJsz7pFeNvSOEE5qtRuanWcglOuJjz8pTkQa+u3sPKd3G9rxXg1VLJC44fglufN2z8NgYN/5G476akwu5fZi6ZUFbM3jUcR70BcFNhs5iXbVdpASoJZDCadzt22zh6A2T3Nyfnq3Qu3vuzUqJIF31KXPnxdbkz4pHNPmzqZipFCLHnHIgAOlWmNrbj1UKjeATpfKkJ++w/8NS/n29g2F5aPNW/f839qqFTQOrwE9e8zm5LN6glnYnK2vmHAL8Pg4CqzF02POWZvGikYHt5mU7tSVc2CYk6vnDcmvJhgniZIPm0ONr5/qvrBFKSyNa5oTPi92QuokNJfRSEWcmo1AB6177A4iUVa9IeIS02Rvn8Cjm7I/rpHos2eF2A2VGre2/n55Prc3KDrVAhuChfOmAOIL8Znf/01k2HjV2YPmoemmT1ppGD5lb3UvyIg540JHXaYOwBO7gD/iuandC+//M0rV8vUmHB8wTUmzCEVI4XUsQO7qOI4SrrhRo2bisl4kWu58z9QsyOkXx6EeuzG68QfUUOlwsLrci+QYDi505w/Izv9K3YthF9fMW/f+abZi0asUaGuOf7gcmPCy5MS3sivr8CeX8HD1/wbCAjJ35yStYJuTJgLKkYKqcNR5vXhe7zqEViqGLcqd/eABz+H8nXh/HHz03VqctbLpyRe0VCpgRoqFQYBIZdmiPWFvUv+LjKyEvuX+Ykaw+w9c9OTBZFSrqdWR4i8dMpz8agbNyZcPw3+/NC8ff9UqNQ8f/PJjXV8zfxwdrkx4bnDVifKRMVIIeV14DcAzoVonAM+geYMsSXKmtfMf//4tQdiORzm905sNXtd9FBDpUIjpJnZpAxg7RRY//m1l0s8aY4xSE00e87c/Y6unCksIh43T53eqDFhzEqYP9y8fesr0KBrQSWU63FzNz+cXW5MOLN7/jQmzCUVI4VQWmoKtRKjASjXtLPFaQqJ0mHQ7RtzINaOH+G3N69eZtnrmRsqlapS4DHlOurfC7e9at7+ZTjsX575+2kXzVNx8YfMXjMPTTd7z0jhYLNBp/9CtfZZNyY8tde8+smRDo0egluGW5NVri2/GxPmQa6KkcmTJxMWFoaPjw8RERGsW7cuW+vNmjULm81G165dc7PbYmPPht/wt13gLAHUaOyCk+PlVtXWcO/75u3f34HNs//+3qYZf8/FcN9kCG1V8Pnkxto9B427mf0r5vSGU3vMxw0Dfnza7C3jc2km5RJlrM0qV3P3hIenQ9malxoT9vi7MeGFs+YU9hfPQeWWcO8kHdUqjEpVMT+sZTQmHGN1IiAXxcjs2bMZNmwYo0ePZsOGDTRp0oTIyEji4q7fuvvAgQMMHz6cdu102uFG4rea52P3BbTCzV3jHTJp2gPaDjVv/zgYDq8zGyr9qIZKRYLNBl3eh9AI88qaGd3Mpma/T4Ctc8zeMg9/afaakcLJt/SlxoSl4EiU+X9oT4M5feD0XggMNd/sPH2sTipZCW0F912aIX31e3lvTOgEOS5G3n33XR577DH69etH/fr1mTp1KiVKlODzz7M4BwzY7XZ69uzJa6+9RvXq1fMUuDgof+J3AIzqt1mcpJC6fTTUvcdsojSrh9lQyZFm9rRQQ6XCz9PHPOUWWAXO7INpncw5S8DsLVO9g6XxJBvK1oBuX5nNBLd+Cx/dAjErwMvfvITXv4LVCeVGGj/snMaETpKjYiQ1NZXo6Gg6duz49wbc3OjYsSNr1qzJcr3XX3+dChUqMGDAgGztJyUlhYSEhExfxcXp2CPUtO8DoNpNmgjsmtzc4P6PoGIjSDp5qaFSU7OnhRoqFQ3+lwYYe/mbl/yC2VOmRX9rc0n2VbsFOk8wb8dtB2zmAMmKDS2NJTmQ18aETpSjV+5Tp05ht9sJCgrK9HhQUBAnTlx7Ip5Vq1bx2Wef8ckn2Z/9cezYsQQGBmZ8hYaG5iRmkRaz1rykd697DcpV1ADMLHlf+gRWuhqUqW4OyvIqYXUqyYmgBuZl217+ZpvwO/9jdSLJqfC+cPMwc9D4XWOhTierE0lOXNmYMO3C32O4LJCvU5eeP3+eXr168cknn1CuXPbbOI8cOZJhw4Zl3E9ISCg+BcmlGTJPBrVFZ81vILAyDI4CbJqFt6iqHQnP79P4gqKs42ho/6J+h0WVVwnzg93545b2g8nRK3i5cuVwd3cnNjY20+OxsbFUrFjxquX37dvHgQMH6NLl79MNjkuXEXl4eLBr1y5q1Lh64jJvb2+8vYvfZFgOu53qCWsBCGioTxjZoks/iz69iRV9+h0WbQHB5peFcnSaxsvLi/DwcJYuXZrxmMPhYOnSpbRu3fqq5evWrcvWrVvZtGlTxte9997LrbfeyqZNm4rP0Y5s2r9tDWVIIMnwoVa4Bq+KiEjxkONj28OGDaNPnz60aNGCVq1aMXHiRJKSkujXrx8AvXv3plKlSowdOxYfHx8aNsw8mKlUqVIAVz0ucGrjfGoCu/2a08xbnzRERKR4yHEx0q1bN06ePMmoUaM4ceIETZs2ZeHChRmDWg8dOoSbrmjIlYCjKwBIDbvV4iQiIiIFx2YY2Z0P2joJCQkEBgYSHx9PQECA1XHyRcK50/j+Xy08bXaO9v6TStU1XbqIiBRt2X3/1iGMQmLv2vl42uwctoWoEBERkWJFxUghkbZ7MQDHyrWxOImIiEjBUjFSCBgOB1VOm614fepFWpxGRESkYKkYKQQO791CMCdJNTyo1UrFiIiIFC8qRgqBY+t/BmCXTyNK+AdanEZERKRgqRgpBEocWg5AUmh7a4OIiIhYQMWIxS4mJ1LrwmYAgprfY3EaERGRgqdixGJ7on7F15ZKHGUIqxtudRwREZECp2LEYknbfwXgQKmbsKlzrYiIFEN697NYxZOrAHCvfafFSURERKyhYsRCJw7vJcxxGLtho+ZNGi8iIiLFk4oRCx1a9xMAezzrElimvMVpRERErKFixEIe+5cBcDakncVJRERErKNixCLpaanUTFoPQJnGnSxOIyIiYh0VIxbZu2E5ASRzDn9qNr3F6jgiIiKWUTFikXNbFwKwr2RL3D08LE4jIiJiHRUjFilz4ncA7NVvtziJiIiItVSMWODsyePUTNsDQLWILhanERERsZaKEQvsW/sTbjaD/W5hlA8JszqOiIiIpVSMWMDYswSAuAptLU4iIiJiPRUjBcxht1Mtfi0A/g3vsjiNiIiI9VSMFLCYv9ZSjnMkG97UatHR6jgiIiKWUzFSwOI2LQBgd4lmePuUsDiNiIiI9VSMFLCAI8sBSAm71dogIiIihYSKkQKUmHCW2il/AVApXLP0ioiIgIqRArV37QI8bXaO2CpSuWZDq+OIiIgUCipGClDKzkUAHC3bxuIkIiIihYeKkQJiOByEnlkDgHfdOy1OIyIiUnioGCkgR/b/RYgRS6rhTq2ITlbHERERKTRUjBSQY+t/BmCPd0P8SpayNoyIiEghomKkgPgc/A2A85XbW5xERESkcFExUgBSLiZTK3kTAOWbdbY2jIiISCGjYqQA7IlaTAlbCqcoRfUGrayOIyIiUqioGCkAiX+Zl/TGBN6EzU0/chERkSvpnbEABMWtBsBWWxPjiYiI/JOKkXwWdzSGao4DOAwbNSO6WB1HRESk0FExks8OrPsJgL2etShVrqLFaURERAofFSP5zH3fUgBOB99icRIREZHCScVIPrKnp1MzMQqA0o3usjiNiIhI4aRiJB/t2bSCQJJIwI+azdTsTERE5FpUjOSjs5sXALDXPxwPTy+L04iIiBROKkbyUZnjvwOQXu12i5OIiIgUXipG8kn86Vhqpu0CoKou6RUREcmSipF8snftz7jbDA64VSGocg2r44iIiBRaKkbyiX33EgBOlG9rcRIREZHCTcVIPjAcDsLO/QmAX4NIi9OIiIgUbipG8sGBHVFU4AwXDC9qtbzT6jgiIiKFmoqRfBC74RcAdvs2wcfXz+I0IiIihZuKkXzgf2QFABeq3mpxEhERkcJPxYiTJSfGU/viNgAqtbjH4jQiIiKFn4oRJ9uzbgFetnSO2SpQuUYjq+OIiIgUeipGnOzi9l8BOFymDTY3/XhFRERuRO+WTlbp9B8AeNW5w+IkIiIiRYOKESc6uv8vKhvHSTPcqRlxt9VxREREigQVI050JOpnAPZ416dkYBmL04iIiBQNKkacyOvgcgDiK91ibRAREZEiJFfFyOTJkwkLC8PHx4eIiAjWrVuX5bKffPIJ7dq1o3Tp0pQuXZqOHTted/miKjXlIrWTNgBQvmlni9OIiIgUHTkuRmbPns2wYcMYPXo0GzZsoEmTJkRGRhIXF3fN5ZcvX0737t357bffWLNmDaGhodx5550cPXo0z+ELkz3rl+Jnu8hpAqne8Car44iIiBQZNsMwjJysEBERQcuWLZk0aRIADoeD0NBQnn76aUaMGHHD9e12O6VLl2bSpEn07t07W/tMSEggMDCQ+Ph4AgICchK3wKz56GlaH/+SqMA7afnst1bHERERsVx2379zdGQkNTWV6OhoOnbs+PcG3Nzo2LEja9asydY2kpOTSUtLo0yZrAd4pqSkkJCQkOmrsKsQ9zsAthq3W5xERESkaMlRMXLq1CnsdjtBQUGZHg8KCuLEiRPZ2saLL75ISEhIpoLmn8aOHUtgYGDGV2hoaE5iFrhTxw5Swx6Dw7BR/aYuVscREREpUgr0appx48Yxa9Ysvv/+e3x8fLJcbuTIkcTHx2d8HT58uABT5lzMup8A2OdRgzIVKlmcRkREpGjxyMnC5cqVw93dndjY2EyPx8bGUrFixeuu+8477zBu3DiWLFlC48aNr7ust7c33t7eOYlmKdu+ZQCcqtiOWhZnERERKWpydGTEy8uL8PBwli5dmvGYw+Fg6dKltG7dOsv1/vvf//LGG2+wcOFCWrRokfu0hZA9PZ0a581LlQMbdbI4jYiISNGToyMjAMOGDaNPnz60aNGCVq1aMXHiRJKSkujXrx8AvXv3plKlSowdOxaA8ePHM2rUKGbMmEFYWFjG2BJ/f3/8/f2d+FSssW/LKmpznvOGL7Wad7A6joiISJGT42KkW7dunDx5klGjRnHixAmaNm3KwoULMwa1Hjp0CLcrZqudMmUKqampPPjgg5m2M3r0aMaMGZO39IXA6c0LANjj34LmXkXn1JKIiEhhkeM+I1YozH1Gdr7Zmrpp21nbYBQRDz1ndRwREZFCI1/6jEhm8WdOUit1BwBVWt1jcRoREZGiScVIHuxb+zPuNoODbpUJrlrH6jgiIiJFkoqRPEjfvQSA4+XaWpxERESk6FIxkkuGw0HVs2YLfN/6kRanERERKbpUjOTSwV0bCOI0Fw1P6rRSMSIiIpJbKkZy6cSGXwDY7dsEnxJFv1+KiIiIVVSM5JLfoeUAJIe2tzaIiIhIEadiJBcuJJ2n9sWtAASH65JeERGRvFAxkgu71y3E25bGCcpRpXZTq+OIiIgUaSpGcuHCjl8BOFSmNTY3/QhFRETyQu+kuRByajUAHrU7WpxERESk6FMxkkPHDuyiiuMo6YYbNSI0XkRERCSvVIzk0OGonwDY41WPwNLlLE4jIiJS9KkYySGvmGUAnAtpZ3ESERER16BiJAfSUlOolbQBgHJNO1ucRkRExDWoGMmBPdHL8Ldd4CwB1GisyfFEREScQcVIDsRvWwjAvoBWuLm7W5xGRETENagYyYHyJ34HwKh+m8VJREREXIeKkWw6deIwNe37AKh2UxeL04iIiLgOFSPZdGDdzwDsda9BuYpVLE4jIiLiOlSMZNfeJQCcDNLAVREREWdSMZINDrud6gnrAAho2MniNCIiIq5FxUg27Nv6B2VIIMnwoVa4Bq+KiIg4k4qRbDi9aQEAu/2a4+XtY3EaERER16JiJBsCjq4AIDXsVouTiIiIuB4VIzeQcO40tVO3A1C5hS7pFRERcTYVIzewd+18PGwODttCqFS9ntVxREREXI6KkRtI2/UrAMfKtbE4iYiIiGtSMXIdhsNBlTNrAPCpF2lxGhEREdekYuQ6Du3ZQjAnSTU8qNVKxYiIiEh+UDFyHcejzRbwu3waUcI/0OI0IiIirknFyHWUOLQcgKTQ9tYGERERcWEqRrJwMTmR2hc2ARDU/B5rw4iIiLgwFSNZ2BP1Kz62NOIoQ1jdcKvjiIiIuCwVI1lI2r4IgAOlbsLmph+TiIhIftG7bBYqnlwNgHvtOy1OIiIi4tpUjFzDicN7CXMcxm7YqHmTxouIiIjkJxUj13Bo3U8A7PGsS2CZ8hanERERcW0qRq7BY/8yAM6GtLM4iYiIiOtTMfIP6Wmp1ExaD0CZxp0sTiMiIuL6VIz8w94NywkgmXP4U7PpLVbHERERcXkqRv7h7NYFAOwr2RJ3Dw+L04iIiLg+FSP/UPbEKgDs1W+3OImIiEjxoGLkCmdPHqdm2h4AqkV0sTiNiIhI8aBi5Ar71v6Em81gv1sY5UPCrI4jIiJSLKgYuYKxZwkAcRXaWpxERESk+FAxconDbqda/FoA/BveZXEaERGR4kPFyCUxf62lHOdINryp1aKj1XFERESKDRUjl8Rt/AWA3SWa4e1TwuI0IiIixYeKkUsCjq4EICXsVouTiIiIFC8qRoDEhLPUTvkLgErhmqVXRESkIKkYAfb8+QueNjtHbBWpXLOh1XFERESKFRUjQOquxQAcLdvG4iQiIiLFT7EvRgyHg9AzawDwrnunxWlERESKn2JfjBzZ/xchRiyphju1IjpZHUdERKTYKfbFyNGonwDY490Qv5KlrA0jIiJSDOWqGJk8eTJhYWH4+PgQERHBunXrrrv8t99+S926dfHx8aFRo0bMnz8/V2Hzg++h5QCcr9ze2iAiIiLFVI6LkdmzZzNs2DBGjx7Nhg0baNKkCZGRkcTFxV1z+T/++IPu3bszYMAANm7cSNeuXenatSvbtm3Lc/i8unghiVrJmwAo36yztWFERESKKZthGEZOVoiIiKBly5ZMmjQJAIfDQWhoKE8//TQjRoy4avlu3bqRlJTEzz//nPHYTTfdRNOmTZk6dWq29pmQkEBgYCDx8fEEBATkJO51bV35A42W9eYUpSg7KgabW7E/ayUiIuI02X3/ztG7b2pqKtHR0XTs+PfcLW5ubnTs2JE1a9Zcc501a9ZkWh4gMjIyy+UBUlJSSEhIyPSVH5K2LwIgJvAmFSIiIiIWydE78KlTp7Db7QQFBWV6PCgoiBMnTlxznRMnTuRoeYCxY8cSGBiY8RUaGpqTmNkWFLcKAFttTYwnIiJilUJ5OGDkyJHEx8dnfB0+fNjp+zAcDs6EP0NU4F3UjOji9O2LiIhI9njkZOFy5crh7u5ObGxspsdjY2OpWLHiNdepWLFijpYH8Pb2xtvbOyfRcszm5kZ454HQeWC+7kdERESuL0dHRry8vAgPD2fp0qUZjzkcDpYuXUrr1q2vuU7r1q0zLQ+wePHiLJcXERGR4iVHR0YAhg0bRp8+fWjRogWtWrVi4sSJJCUl0a9fPwB69+5NpUqVGDt2LABDhgyhffv2TJgwgc6dOzNr1izWr1/Pxx9/7NxnIiIiIkVSjouRbt26cfLkSUaNGsWJEydo2rQpCxcuzBikeujQIdyuuDKlTZs2zJgxg1deeYWXXnqJWrVqMW/ePBo21Oy4IiIikos+I1bIrz4jIiIikn/ypc+IiIiIiLOpGBERERFLqRgRERERS6kYEREREUupGBERERFLqRgRERERS6kYEREREUupGBERERFLqRgRERERS+W4HbwVLjeJTUhIsDiJiIiIZNfl9+0bNXsvEsXI+fPnAQgNDbU4iYiIiOTU+fPnCQwMzPL7RWJuGofDwbFjxyhZsiQ2m81p201ISCA0NJTDhw+77Jw3rv4c9fyKPld/jnp+RZ+rP8f8fH6GYXD+/HlCQkIyTaL7T0XiyIibmxuVK1fOt+0HBAS45B/YlVz9Oer5FX2u/hz1/Io+V3+O+fX8rndE5DINYBURERFLqRgRERERSxXrYsTb25vRo0fj7e1tdZR84+rPUc+v6HP156jnV/S5+nMsDM+vSAxgFREREddVrI+MiIiIiPVUjIiIiIilVIyIiIiIpVSMiIiIiKWKdTEyefJkwsLC8PHxISIignXr1lkdyWlWrlxJly5dCAkJwWazMW/ePKsjOdXYsWNp2bIlJUuWpEKFCnTt2pVdu3ZZHctppkyZQuPGjTOaELVu3ZoFCxZYHSvfjBs3DpvNxtChQ62O4jRjxozBZrNl+qpbt67VsZzq6NGjPProo5QtWxZfX18aNWrE+vXrrY7lNGFhYVf9Dm02G4MGDbI6mlPY7XZeffVVqlWrhq+vLzVq1OCNN9644Twy+aHYFiOzZ89m2LBhjB49mg0bNtCkSRMiIyOJi4uzOppTJCUl0aRJEyZPnmx1lHyxYsUKBg0axJ9//snixYtJS0vjzjvvJCkpyepoTlG5cmXGjRtHdHQ069ev57bbbuO+++7jr7/+sjqa00VFRfHRRx/RuHFjq6M4XYMGDTh+/HjG16pVq6yO5DRnz56lbdu2eHp6smDBArZv386ECRMoXbq01dGcJioqKtPvb/HixQA89NBDFidzjvHjxzNlyhQmTZrEjh07GD9+PP/973/54IMPCj6MUUy1atXKGDRoUMZ9u91uhISEGGPHjrUwVf4AjO+//97qGPkqLi7OAIwVK1ZYHSXflC5d2vj000+tjuFU58+fN2rVqmUsXrzYaN++vTFkyBCrIznN6NGjjSZNmlgdI9+8+OKLxs0332x1jAI1ZMgQo0aNGobD4bA6ilN07tzZ6N+/f6bHHnjgAaNnz54FnqVYHhlJTU0lOjqajh07Zjzm5uZGx44dWbNmjYXJJLfi4+MBKFOmjMVJnM9utzNr1iySkpJo3bq11XGcatCgQXTu3DnT/6Ir2bNnDyEhIVSvXp2ePXty6NAhqyM5zY8//kiLFi146KGHqFChAs2aNeOTTz6xOla+SU1N5euvv6Z///5OnbDVSm3atGHp0qXs3r0bgM2bN7Nq1So6depU4FmKxER5znbq1CnsdjtBQUGZHg8KCmLnzp0WpZLccjgcDB06lLZt29KwYUOr4zjN1q1bad26NRcvXsTf35/vv/+e+vXrWx3LaWbNmsWGDRuIioqyOkq+iIiI4IsvvqBOnTocP36c1157jXbt2rFt2zZKlixpdbw8279/P1OmTGHYsGG89NJLREVF8cwzz+Dl5UWfPn2sjud08+bN49y5c/Tt29fqKE4zYsQIEhISqFu3Lu7u7tjtdt5880169uxZ4FmKZTEirmXQoEFs27bNpc7HA9SpU4dNmzYRHx/P//73P/r06cOKFStcoiA5fPgwQ4YMYfHixfj4+FgdJ19c+emycePGREREULVqVebMmcOAAQMsTOYcDoeDFi1a8NZbbwHQrFkztm3bxtSpU12yGPnss8/o1KkTISEhVkdxmjlz5vDNN98wY8YMGjRowKZNmxg6dCghISEF/jsslsVIuXLlcHd3JzY2NtPjsbGxVKxY0aJUkhuDBw/m559/ZuXKlVSuXNnqOE7l5eVFzZo1AQgPDycqKor33nuPjz76yOJkeRcdHU1cXBzNmzfPeMxut7Ny5UomTZpESkoK7u7uFiZ0vlKlSlG7dm327t1rdRSnCA4OvqowrlevHnPnzrUoUf45ePAgS5Ys4bvvvrM6ilM9//zzjBgxgkceeQSARo0acfDgQcaOHVvgxUixHDPi5eVFeHg4S5cuzXjM4XCwdOlSlzsn76oMw2Dw4MF8//33LFu2jGrVqlkdKd85HA5SUlKsjuEUt99+O1u3bmXTpk0ZXy1atKBnz55s2rTJ5QoRgMTERPbt20dwcLDVUZyibdu2V11Ov3v3bqpWrWpRovwzbdo0KlSoQOfOna2O4lTJycm4uWUuA9zd3XE4HAWepVgeGQEYNmwYffr0oUWLFrRq1YqJEyeSlJREv379rI7mFImJiZk+gcXExLBp0ybKlClDlSpVLEzmHIMGDWLGjBn88MMPlCxZkhMnTgAQGBiIr6+vxenybuTIkXTq1IkqVapw/vx5ZsyYwfLly1m0aJHV0ZyiZMmSV43v8fPzo2zZsi4z7mf48OF06dKFqlWrcuzYMUaPHo27uzvdu3e3OppTPPvss7Rp04a33nqLhx9+mHXr1vHxxx/z8ccfWx3NqRwOB9OmTaNPnz54eLjWW2aXLl148803qVKlCg0aNGDjxo28++679O/fv+DDFPj1O4XIBx98YFSpUsXw8vIyWrVqZfz5559WR3Ka3377zQCu+urTp4/V0ZziWs8NMKZNm2Z1NKfo37+/UbVqVcPLy8soX768cfvttxu//vqr1bHylatd2tutWzcjODjY8PLyMipVqmR069bN2Lt3r9WxnOqnn34yGjZsaHh7ext169Y1Pv74Y6sjOd2iRYsMwNi1a5fVUZwuISHBGDJkiFGlShXDx8fHqF69uvHyyy8bKSkpBZ7FZhgWtFoTERERuaRYjhkRERGRwkPFiIiIiFhKxYiIiIhYSsWIiIiIWErFiIiIiFhKxYiIiIhYSsWIiIiIWErFiIiIiFhKxYiIiIhYSsWIiIiIWErFiIiIiFhKxYiIiIhY6v8BJbu4xxr8bjEAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# note matplotlib is not a dependency of langchain so you need to install to plot\n", + "\n", + "# from matplotlib import pyplot as plt\n", + "# chain.metrics.to_pandas()['score'].plot(label=\"default learning policy\")\n", + "# random_chain.metrics.to_pandas()['score'].plot(label=\"random selection policy\")\n", + "# plt.legend()\n", + "\n", + "print(f\"The final average score for the default policy, calculated over a rolling window, is: {chain.metrics.to_pandas()['score'].iloc[-1]}\")\n", + "print(f\"The final average score for the random policy, calculated over a rolling window, is: {random_chain.metrics.to_pandas()['score'].iloc[-1]}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "There is a bit of randomness involved in the rl_chain's selection since the chain explores the selection space in order to learn the world as best as it can (see details of default exploration algorithm used [here](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Contextual-Bandit-Exploration-with-SquareCB)), but overall, default chain policy should be doing better than random as it learns" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Advanced options\n", + "\n", + "The rl chain is highly configurable in order to be able to adjust to various selection scenarios. If you want to learn more about the ML library that powers it please take a look at tutorials [here](https://vowpalwabbit.org/)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "| Section | Description | Example / Usage |\n", + "|---------|-------------|-----------------|\n", + "| [**Set Chain Logging Level**](#set-chain-logging-level) | Set up the logging level for the RL chain. | `logger.setLevel(logging.INFO)` |\n", + "| [**Featurization**](#featurization) | Adjusts the input to the RL chain. Can set auto-embeddings ON for more complex embeddings. | `chain = rl_chain.PickBest.from_llm(auto_embed=True, [...])` |\n", + "| [**Learned Policy to Learn Asynchronously**](#learned-policy-to-learn-asynchronously) | Score asynchronously if user input is needed for scoring. | `chain.update_with_delayed_score(score=, chain_response=response)` |\n", + "| [**Store Progress of Learned Policy**](#store-progress-of-learned-policy) | Option to store the progress of the variable injection learned policy. | `chain.save_progress()` |\n", + "| [**Stop Learning of Learned Policy**](#stop-learning-of-learned-policy) | Toggle the RL chain's learned policy updates ON/OFF. | `chain.deactivate_selection_scorer()` |\n", + "| [**Set a Different Policy**](#set-a-different-policy) | Choose between different policies: default, random, or custom. | Custom policy creation at chain creation time. |\n", + "| [**Different Exploration Algorithms for Default Learned Policy**](#different-exploration-algorithms-for-the-default-learned-policy) | Set different exploration algorithms and hyperparameters for `VwPolicy`. | `vw_cmd = [\"--cb_explore_adf\", \"--quiet\", \"--squarecb\", \"--interactions=::\"]` |\n", + "| [**Learn Policy's Data Logs**](#learn-policys-data-logs) | Store and examine `VwPolicy`'s data logs. | `chain = rl_chain.PickBest.from_llm(vw_logs=, [...])` |\n", + "| [**Other Advanced Featurization Options**](#other-advanced-featurization-options) | Specify advanced featurization options for the RL chain. | `age = rl_chain.BasedOn(\"age:32\")` |\n", + "| [**More Info on Auto or Custom SelectionScorer**](#more-info-on-auto-or-custom-selectionscorer) | Dive deeper into how selection scoring is determined. | `selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, scoring_criteria_template_str=scoring_criteria_template)` |\n", + "\n", + "### set chain logging level\n", + "\n", + "```\n", + "import logging\n", + "logger = logging.getLogger(\"rl_chain\")\n", + "logger.setLevel(logging.INFO)\n", + "```\n", + "\n", + "### featurization\n", + "\n", + "By default the input to the rl chain (`ToSelectFrom`, `BasedOn`) is not tampered with. This might not be sufficient featurization, so based on how complex the scenario is you can set auto-embeddings to ON\n", + "\n", + "`chain = rl_chain.PickBest.from_llm(auto_embed=True, [...])`\n", + "\n", + "This will produce more complex embeddings and featurizations of the inputs, likely accelerating RL chain learning, albeit at the cost of increased runtime..\n", + "\n", + "By default, [sbert.net's sentence_transformers's ](https://www.sbert.net/docs/pretrained_models.html#model-overview) `all-mpnet-base-v2` model will be used for these embeddings but you can set a different model by initializing the chain with it, or set an entirely different encoding object as long as it has an `encode` function that returns a list of the encodings:\n", + "\n", + "```\n", + "from sentence_transformers import SentenceTransformer\n", + "\n", + "chain = rl_chain.PickBest.from_llm(\n", + " [...]\n", + " feature_embedder=rl_chain.PickBestFeatureEmbedder(\n", + " auto_embed=True,\n", + " model=SentenceTransformer(\"all-mpnet-base-v2\")\n", + " )\n", + ")\n", + "```\n", + "\n", + "Another option is to define what inputs you think should be embedded manually:\n", + "- `auto_embed = False`\n", + "- Can wrap individual variables in `rl_chain.Embed()` or `rl_chain.EmbedAndKeep()` e.g. `user = rl_chain.BasedOn(rl_chain.Embed(\"Tom\"))`\n", + "\n", + "Final option is to define and set your own feature embedder that returns a valid input for the learned policy.\n", + "\n", + "## learned policy to learn asynchronously\n", + "\n", + "If to score the result you need input from the user (e.g. my application showed Tom the selected meal and Tom clicked on it, but Anna did not), then the scoring can be done asynchronously. The way to do that is:\n", + "\n", + "- set `selection_scorer=None` on the chain creation OR call `chain.deactivate_selection_scorer()`\n", + "- call the chain for a specific input\n", + "- keep the chain's response (`response = chain.run([...])`)\n", + "- once you have determined the score of the response/chain selection call the chain with it: `chain.update_with_delayed_score(score=, chain_response=response)`\n", + "\n", + "### store progress of learned policy\n", + "\n", + "Since the variable injection learned policy evolves over time, there is the option to store its progress and continue learning. This can be done by calling:\n", + "\n", + "`chain.save_progress()`\n", + "\n", + "which will store the rl chain's learned policy in a file called `latest.vw`. It will also store it in a file with a timestamp. That way, if `save_progress()` is called more than once, multiple checkpoints will be created, but the latest one will always be in `latest.vw`\n", + "\n", + "Next time the chain is loaded, the chain will look for a file called `latest.vw` and if the file exists it will be loaded into the chain and the learning will continue from there.\n", + "\n", + "By default the rl chain model checkpoints will be stored in the current directory but you can specify the save/load location at chain creation time:\n", + "\n", + "`chain = rl_chain.PickBest.from_llm(model_save_dir=, [...])`\n", + "\n", + "### stop learning of learned policy\n", + "\n", + "If you want the rl chain's learned policy to stop updating you can turn it off/on:\n", + "\n", + "`chain.deactivate_selection_scorer()` and `chain.activate_selection_scorer()`\n", + "\n", + "### set a different policy\n", + "\n", + "There are two policies currently available:\n", + "\n", + "- default policy: `VwPolicy` which learns a [Vowpal Wabbit](https://github.com/VowpalWabbit/vowpal_wabbit) [Contextual Bandit](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Contextual-Bandit-algorithms) model\n", + "\n", + "- random policy: `RandomPolicy` which doesn't learn anything and just selects a value randomly. this policy can be used to compare other policies with a random baseline one.\n", + "\n", + "- custom policies: a custom policy could be created and set at chain creation time\n", + "\n", + "### different exploration algorithms for the default learned policy\n", + "\n", + "The default `VwPolicy` is initialized with some default arguments. The default exploration algorithm is [SquareCB](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Contextual-Bandit-Exploration-with-SquareCB) but other Contextual Bandit exploration algorithms can be set, and other hyper parameters can be set also:\n", + "\n", + "`vw_cmd = [\"--cb_explore_adf\", \"--quiet\", \"--squarecb\", \"--interactions=::\"]`\n", + "\n", + "`chain = rl_chain.PickBest.from_llm(vw_cmd = vw_cmd, [...])`\n", + "\n", + "### learn policy's data logs\n", + "\n", + "The `VwPolicy`'s data files can be stored and examined or used to do [off policy evaluation](https://vowpalwabbit.org/docs/vowpal_wabbit/python/latest/tutorials/off_policy_evaluation.html) for hyper parameter tuning.\n", + "\n", + "The way to do this is to set a log file path to `vw_logs` on chain creation:\n", + "\n", + "`chain = rl_chain.PickBest.from_llm(vw_logs=, [...])`\n", + "\n", + "### other advanced featurization options\n", + "\n", + "Explictly numerical features can be provided with a colon separator:\n", + "`age = rl_chain.BasedOn(\"age:32\")`\n", + "\n", + "`ToSelectFrom` can be a bit more complex if the scenario demands it, instead of being a list of strings it can be:\n", + "- a list of list of strings:\n", + " ```\n", + " meal = rl_chain.ToSelectFrom([\n", + " [\"meal 1 name\", \"meal 1 description\"],\n", + " [\"meal 2 name\", \"meal 2 description\"]\n", + " ])\n", + " ```\n", + "- a list of dictionaries:\n", + " ```\n", + " meal = rl_chain.ToSelectFrom([\n", + " {\"name\":\"meal 1 name\", \"description\" : \"meal 1 description\"},\n", + " {\"name\":\"meal 2 name\", \"description\" : \"meal 2 description\"}\n", + " ])\n", + " ```\n", + "- a list of dictionaries containing lists:\n", + " ```\n", + " meal = rl_chain.ToSelectFrom([\n", + " {\"name\":[\"meal 1\", \"complex name\"], \"description\" : \"meal 1 description\"},\n", + " {\"name\":[\"meal 2\", \"complex name\"], \"description\" : \"meal 2 description\"}\n", + " ])\n", + " ```\n", + "\n", + "`BasedOn` can also take a list of strings:\n", + "```\n", + "user = rl_chain.BasedOn([\"Tom Joe\", \"age:32\", \"state of california\"])\n", + "```\n", + "\n", + "there is no dictionary provided since multiple variables can be supplied wrapped in `BasedOn`\n", + "\n", + "Storing the data logs into a file allows the examination of what different inputs do to the data format.\n", + "\n", + "### More info on Auto or Custom SelectionScorer\n", + "\n", + "The selection scorer is very important to get right since the policy uses it to learn. It determines what is called the reward in reinforcement learning, and more specifically in our Contextual Bandits setting.\n", + "\n", + "The general advice is to keep the score between [0, 1], 0 being the worst selection, 1 being the best selection from the available `ToSelectFrom` variables, based on the `BasedOn` variables, but should be adjusted if the need arises.\n", + "\n", + "In the examples provided above, the AutoSelectionScorer is set mostly to get users started but in real world scenarios it will most likely not be an adequate scorer function.\n", + "\n", + "The example also provided the option to change part of the scoring prompt template that the AutoSelectionScorer used to determine whether a selection was good or not:\n", + "\n", + "```\n", + "scoring_criteria_template = \"Given {preference} rank how good or bad this selection is {meal}\"\n", + "chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=PROMPT,\n", + " selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, scoring_criteria_template_str=scoring_criteria_template),\n", + ")\n", + "\n", + "```\n", + "\n", + "Internally the AutoSelectionScorer adjusted the scoring prompt to make sure that the llm scoring retured a single float.\n", + "\n", + "However, if needed, a FULL scoring prompt can also be provided:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:PickBest] Entering Chain run with input:\n", + "\u001b[0m[inputs]\n", + "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:PickBest > 2:chain:LLMChain] Entering Chain run with input:\n", + "\u001b[0m[inputs]\n", + "\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[1:chain:PickBest > 2:chain:LLMChain > 3:llm:OpenAI] Entering LLM run with input:\n", + "\u001b[0m{\n", + " \"prompts\": [\n", + " \"Here is the description of a meal: \\\"Beef Enchiladas with Feta cheese. Mexican-Greek fusion\\\".\\n\\nEmbed the meal into the given text: \\\"This is the weeks specialty dish, our master chefs believe you will love it!\\\".\\n\\nPrepend a personalized message including the user's name Tom and their preference ['Vegetarian', 'regular dairy is ok'].\\n\\nMake it sound good.\"\n", + " ]\n", + "}\n", + "\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[1:chain:PickBest > 2:chain:LLMChain > 3:llm:OpenAI] [1.63s] Exiting LLM run with output:\n", + "\u001b[0m{\n", + " \"generations\": [\n", + " [\n", + " {\n", + " \"text\": \"\\nHey Tom, we have a special treat this week! Our master chefs have created a Mexican-Greek fusion dish of Beef Enchiladas with Feta cheese - perfect for those who enjoy vegetarian options and can enjoy regular dairy. We know you're going to love it!\",\n", + " \"generation_info\": {\n", + " \"finish_reason\": \"stop\",\n", + " \"logprobs\": null\n", + " }\n", + " }\n", + " ]\n", + " ],\n", + " \"llm_output\": {\n", + " \"token_usage\": {\n", + " \"prompt_tokens\": 89,\n", + " \"total_tokens\": 145,\n", + " \"completion_tokens\": 56\n", + " },\n", + " \"model_name\": \"text-davinci-003\"\n", + " },\n", + " \"run\": null\n", + "}\n", + "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:PickBest > 2:chain:LLMChain] [1.63s] Exiting Chain run with output:\n", + "\u001b[0m{\n", + " \"text\": \"\\nHey Tom, we have a special treat this week! Our master chefs have created a Mexican-Greek fusion dish of Beef Enchiladas with Feta cheese - perfect for those who enjoy vegetarian options and can enjoy regular dairy. We know you're going to love it!\"\n", + "}\n", + "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:LLMChain] Entering Chain run with input:\n", + "\u001b[0m[inputs]\n", + "\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[1:chain:LLMChain > 2:llm:OpenAI] Entering LLM run with input:\n", + "\u001b[0m{\n", + " \"prompts\": [\n", + " \"Given ['Vegetarian', 'regular dairy is ok'] rank how good or bad this selection is ['Beef Enchiladas with Feta cheese. Mexican-Greek fusion', 'Chicken Flatbreads with red sauce. Italian-Mexican fusion', 'Veggie sweet potato quesadillas with vegan cheese', 'One-Pan Tortelonni bake with peppers and onions'], IMPORANT: you MUST return a single number between -1 and 1, -1 being bad, 1 being good\"\n", + " ]\n", + "}\n", + "\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[1:chain:LLMChain > 2:llm:OpenAI] [487ms] Exiting LLM run with output:\n", + "\u001b[0m{\n", + " \"generations\": [\n", + " [\n", + " {\n", + " \"text\": \"\\n\\n0.5\",\n", + " \"generation_info\": {\n", + " \"finish_reason\": \"stop\",\n", + " \"logprobs\": null\n", + " }\n", + " }\n", + " ]\n", + " ],\n", + " \"llm_output\": {\n", + " \"token_usage\": {\n", + " \"prompt_tokens\": 104,\n", + " \"total_tokens\": 109,\n", + " \"completion_tokens\": 5\n", + " },\n", + " \"model_name\": \"text-davinci-003\"\n", + " },\n", + " \"run\": null\n", + "}\n", + "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:LLMChain] [488ms] Exiting Chain run with output:\n", + "\u001b[0m{\n", + " \"text\": \"\\n\\n0.5\"\n", + "}\n", + "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:PickBest] [2.13s] Exiting Chain run with output:\n", + "\u001b[0m[outputs]\n" + ] + }, + { + "data": { + "text/plain": [ + "{'response': \"Hey Tom, we have a special treat this week! Our master chefs have created a Mexican-Greek fusion dish of Beef Enchiladas with Feta cheese - perfect for those who enjoy vegetarian options and can enjoy regular dairy. We know you're going to love it!\",\n", + " 'selection_metadata': }" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from langchain.prompts.prompt import PromptTemplate\n", + "import langchain\n", + "langchain.debug = True\n", + "\n", + "REWARD_PROMPT_TEMPLATE = \"\"\"Given {preference} rank how good or bad this selection is {meal}, IMPORANT: you MUST return a single number between -1 and 1, -1 being bad, 1 being good\"\"\"\n", + "\n", + "\n", + "REWARD_PROMPT = PromptTemplate(\n", + " input_variables=[\"preference\", \"meal\"],\n", + " template=REWARD_PROMPT_TEMPLATE,\n", + ")\n", + "\n", + "chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=PROMPT,\n", + " selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, prompt=REWARD_PROMPT),\n", + ")\n", + "\n", + "chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + ")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.16" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/extras/modules/chains/reinforcement_learning/rl_chain.ipynb b/docs/extras/modules/chains/reinforcement_learning/rl_chain.ipynb deleted file mode 100644 index 6b01620336..0000000000 --- a/docs/extras/modules/chains/reinforcement_learning/rl_chain.ipynb +++ /dev/null @@ -1,646 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Learned prompt variable injection via rl chain\n", - "\n", - "The rl_chain is used primarily for prompt variable injection: when we want to enhance a prompt with a value but we are not sure which of the available variable values will make the prompt achieve what we want.\n", - "\n", - "It provides a way to learn a specific prompt engineering policy without fine tuning the underlying foundational model.\n", - "\n", - "The example layed out below is trivial and a strong llm could make a good variable selection and injection without the intervention of this chain, but it is perfect for showcasing the chain's usage. Advanced options and explanations are provided at the end.\n", - "\n", - "The goal of the below scenario is for the chain to select a meal based on the user declared preferences, and inject the meal into the prompt template. The final prompt will then be sent to the llm of choice and the llm output will be returned to the user." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# four meals defined, some vegetarian some not\n", - "\n", - "meals = [\n", - " \"Beef Enchiladas with Feta cheese. Mexican-Greek fusion\",\n", - " \"Chicken Flatbreads with red sauce. Italian-Mexican fusion\",\n", - " \"Veggie sweet potato quesadillas with vegan cheese\",\n", - " \"One-Pan Tortelonni bake with peppers and onions\",\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# pick and configure the LLM of your choice\n", - "\n", - "from langchain.llms import OpenAI\n", - "llm = OpenAI(engine=\"text-davinci-003\")\n", - "\n", - "llm.predict(\"are you ready?\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "##### Intialize the rl chain with provided defaults\n", - "\n", - "The prompt template which will be used to query the LLM needs to be defined.\n", - "It can be anything, but here `{meal}` is being used and is going to be replaced by one of the meals above, the rl chain will try to pick and inject the best meal\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.prompts import PromptTemplate\n", - "\n", - "# here I am using the variable meal which will be replaced by one of the meals above\n", - "# and some variables like user, preference, and text_to_personalize which I will provide at chain run time\n", - "\n", - "PROMPT_TEMPLATE = \"\"\"Here is the description of a meal: \"{meal}\".\n", - "\n", - "Embed the meal into the given text: \"{text_to_personalize}\".\n", - "\n", - "Prepend a personalized message including the user's name {user} and their preference {preference}.\n", - "\n", - "Make it sound good.\n", - "\"\"\"\n", - "\n", - "PROMPT = PromptTemplate(\n", - " input_variables=[\"meal\", \"text_to_personalize\", \"user\", \"preference\"], template=PROMPT_TEMPLATE\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next the rl chain's PickBest chain is being initialized. We must provide the llm of choice and the defined prompt. As the name indicates, the chain's goal is to Pick the Best of the meals that will be provided, based on some criteria. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import langchain.chains.rl_chain as rl_chain\n", - "\n", - "chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Once the chain is setup I am going to call it with the meals I want to be selected from, and some context based on which the chain will select a meal." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "response = chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Tom\"),\n", - " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(response[\"response\"])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## What is the chain doing\n", - "\n", - "What is happening behind the scenes here is that the rl chain will\n", - "\n", - "- take the meals\n", - "- take the user and their preference\n", - "- based on the user and their preference (context) it will select a meal\n", - "- it will auto-evaluate if that meal selection was good or bad\n", - "- it will finally inject the meal into the prompt and query the llm\n", - "- the user will get the llm response back\n", - "\n", - "Now, the way the chain is doing this is that it is learning a contextual bandit rl model that is trained to make good selections (specifially the [VowpalWabbit](https://github.com/VowpalWabbit/vowpal_wabbit) ML library is being used).\n", - "\n", - "Since this rl model will be untrained when we first start, it might make a random selection that doesn't fit the user and their preferences. But if we give it time to learn the user and their preferences, it should start to make better selections (or quickly learn a good one and just pick that!)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "for _ in range(5):\n", - " try:\n", - " response = chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Tom\"),\n", - " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - " )\n", - " except Exception as e:\n", - " print(e)\n", - " print(response[\"response\"])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## How is the chain learning\n", - "\n", - "The way the chain is learning that Tom prefers veggetarian meals is via an AutoSelectionScorer that is built into the chain. The scorer will call the LLM again and ask it to evaluate the selection (`ToSelectFrom`) using the information wrapped in (`BasedOn`).\n", - "\n", - "You can set `langchain.debug=True` if you want to see the details of the auto-scorer, but you can also define the scoring prompt yourself." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "scoring_criteria_template = \"Given {preference} rank how good or bad this selection is {meal}\"\n", - "\n", - "chain = rl_chain.PickBest.from_llm(\n", - " llm=llm,\n", - " prompt=PROMPT,\n", - " selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, scoring_criteria_template_str=scoring_criteria_template),\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If you want to examine the score and other selection metadata you can by examining the metadata object returned by the chain" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "response = chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Tom\"),\n", - " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - ")\n", - "print(response[\"response\"])\n", - "selection_metadata = response[\"selection_metadata\"]\n", - "print(f\"selected index: {selection_metadata.selected.index}, score: {selection_metadata.selected.score}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In a more realistic scenario it is likely that you have a well defined scoring function for what was selected. For example, you might be doing few-shot prompting and want to select prompt examples for a natural language to sql translation task. In that case the scorer could be: did the sql that was generated run in an sql engine? In that case you want to plugin a scoring function. In the example below I will just check if the meal picked was vegetarian or not." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class CustomSelectionScorer(rl_chain.SelectionScorer):\n", - " def score_response(\n", - " self, inputs, llm_response: str, event: rl_chain.PickBestEvent) -> float:\n", - "\n", - " print(event.based_on)\n", - " print(event.to_select_from)\n", - "\n", - " # you can build a complex scoring function here\n", - " # it is prefereable that the score ranges between 0 and 1 but it is not enforced\n", - "\n", - " selected_meal = event.to_select_from[\"meal\"][event.selected.index]\n", - " print(f\"selected meal: {selected_meal}\")\n", - "\n", - " if \"Tom\" in event.based_on[\"user\"]:\n", - " if \"Vegetarian\" in event.based_on[\"preference\"]:\n", - " if \"Chicken\" in selected_meal or \"Beef\" in selected_meal:\n", - " return 0.0\n", - " else:\n", - " return 1.0\n", - " else:\n", - " if \"Chicken\" in selected_meal or \"Beef\" in selected_meal:\n", - " return 1.0\n", - " else:\n", - " return 0.0\n", - " else:\n", - " raise NotImplementedError(\"I don't know how to score this user\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "chain = rl_chain.PickBest.from_llm(\n", - " llm=llm,\n", - " prompt=PROMPT,\n", - " selection_scorer=CustomSelectionScorer(),\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "response = chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Tom\"),\n", - " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## How can I track the chains progress\n", - "\n", - "You can track the chains progress by using the metrics mechanism provided. I am going to expand the users to Tom and Anna, and extend the scoring function. I am going to initialize two chains, one with the default learning policy and one with a built-in random policy (i.e. selects a meal randomly), and plot their scoring progress." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class CustomSelectionScorer(rl_chain.SelectionScorer):\n", - " def score_preference(self, preference, selected_meal):\n", - " if \"Vegetarian\" in preference:\n", - " if \"Chicken\" in selected_meal or \"Beef\" in selected_meal:\n", - " return 0.0\n", - " else:\n", - " return 1.0\n", - " else:\n", - " if \"Chicken\" in selected_meal or \"Beef\" in selected_meal:\n", - " return 1.0\n", - " else:\n", - " return 0.0\n", - " def score_response(\n", - " self, inputs, llm_response: str, event: rl_chain.PickBestEvent) -> float:\n", - "\n", - " selected_meal = event.to_select_from[\"meal\"][event.selected.index]\n", - "\n", - " if \"Tom\" in event.based_on[\"user\"]:\n", - " return self.score_preference(event.based_on[\"preference\"], selected_meal)\n", - " elif \"Anna\" in event.based_on[\"user\"]:\n", - " return self.score_preference(event.based_on[\"preference\"], selected_meal)\n", - " else:\n", - " raise NotImplementedError(\"I don't know how to score this user\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "chain = rl_chain.PickBest.from_llm(\n", - " llm=llm,\n", - " prompt=PROMPT,\n", - " selection_scorer=CustomSelectionScorer(),\n", - " metrics_step=5,\n", - " metrics_window_size=5, # rolling window average\n", - ")\n", - "\n", - "random_chain = rl_chain.PickBest.from_llm(\n", - " llm=llm,\n", - " prompt=PROMPT,\n", - " selection_scorer=CustomSelectionScorer(),\n", - " metrics_step=5,\n", - " metrics_window_size=5, # rolling window average\n", - " policy=rl_chain.PickBestRandomPolicy # set the random policy instead of default\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "for i in range(40):\n", - " try:\n", - " if i % 2:\n", - " chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Tom\"),\n", - " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - " )\n", - " random_chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Tom\"),\n", - " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - " )\n", - " else:\n", - " chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Anna\"),\n", - " preference = rl_chain.BasedOn([\"Loves meat\", \"especially beef\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - " )\n", - " random_chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Anna\"),\n", - " preference = rl_chain.BasedOn([\"Loves meat\", \"especially beef\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - " )\n", - " except Exception as e:\n", - " print(e)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# note matplotlib is not a dependency of langchain so you need to install it\n", - "\n", - "from matplotlib import pyplot as plt\n", - "chain.metrics.to_pandas()['score'].plot(label=\"default learning policy\")\n", - "random_chain.metrics.to_pandas()['score'].plot(label=\"random selection policy\")\n", - "plt.legend()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "There is a bit of randomness involved in the rl_chain's selection since the chain explores the selection space in order to learn the world as best as it can (see details of default exploration algorithm used [here](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Contextual-Bandit-Exploration-with-SquareCB)), but overall, default chain policy should be doing better than random as it learns" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Advanced options\n", - "\n", - "The rl chain is highly configurable in order to be able to adjust to various selection scenarios. If you want to learn more about the ML library that powers it please take a look at tutorials [here](https://vowpalwabbit.org/)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "| Section | Description | Example / Usage |\n", - "|---------|-------------|-----------------|\n", - "| [**Set Chain Logging Level**](#set-chain-logging-level) | Set up the logging level for the RL chain. | `logger.setLevel(logging.INFO)` |\n", - "| [**Featurization**](#featurization) | Adjusts the input to the RL chain. Can set auto-embeddings ON for more complex embeddings. | `chain = rl_chain.PickBest.from_llm(auto_embed=True, [...])` |\n", - "| [**Learned Policy to Learn Asynchronously**](#learned-policy-to-learn-asynchronously) | Score asynchronously if user input is needed for scoring. | `chain.update_with_delayed_score(score=, chain_response=response)` |\n", - "| [**Store Progress of Learned Policy**](#store-progress-of-learned-policy) | Option to store the progress of the variable injection learned policy. | `chain.save_progress()` |\n", - "| [**Stop Learning of Learned Policy**](#stop-learning-of-learned-policy) | Toggle the RL chain's learned policy updates ON/OFF. | `chain.deactivate_selection_scorer()` |\n", - "| [**Set a Different Policy**](#set-a-different-policy) | Choose between different policies: default, random, or custom. | Custom policy creation at chain creation time. |\n", - "| [**Different Exploration Algorithms for Default Learned Policy**](#different-exploration-algorithms-for-the-default-learned-policy) | Set different exploration algorithms and hyperparameters for `VwPolicy`. | `vw_cmd = [\"--cb_explore_adf\", \"--quiet\", \"--squarecb\", \"--interactions=::\"]` |\n", - "| [**Learn Policy's Data Logs**](#learn-policys-data-logs) | Store and examine `VwPolicy`'s data logs. | `chain = rl_chain.PickBest.from_llm(vw_logs=, [...])` |\n", - "| [**Other Advanced Featurization Options**](#other-advanced-featurization-options) | Specify advanced featurization options for the RL chain. | `age = rl_chain.BasedOn(\"age:32\")` |\n", - "| [**More Info on Auto or Custom SelectionScorer**](#more-info-on-auto-or-custom-selectionscorer) | Dive deeper into how selection scoring is determined. | `selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, scoring_criteria_template_str=scoring_criteria_template)` |\n", - "\n", - "\n", - "### set chain logging level\n", - "\n", - "```\n", - "import logging\n", - "logger = logging.getLogger(\"rl_chain\")\n", - "logger.setLevel(logging.INFO)\n", - "```\n", - "\n", - "### featurization\n", - "\n", - "By default the input to the rl chain (`ToSelectFrom`, `BasedOn`) is not tampered with. This might not be sufficient featurization, so based on how complex the scenario is you can set auto-embeddings to ON\n", - "\n", - "`chain = rl_chain.PickBest.from_llm(auto_embed=True, [...])`\n", - "\n", - "This will produce more complex embeddings and featurizations of the inputs, likely accelerating RL chain learning, albeit at the cost of increased runtime..\n", - "\n", - "By default, [sbert.net's sentence_transformers's ](https://www.sbert.net/docs/pretrained_models.html#model-overview) `all-mpnet-base-v2` model will be used for these embeddings but you can set a different model by initializing the chain with it, or set an entirely different encoding object as long as it has an `encode` function that returns a list of the encodings:\n", - "\n", - "```\n", - "from sentence_transformers import SentenceTransformer\n", - "\n", - "chain = rl_chain.PickBest.from_llm(\n", - " [...]\n", - " feature_embedder=rl_chain.PickBestFeatureEmbedder(\n", - " auto_embed=True,\n", - " model=SentenceTransformer(\"all-mpnet-base-v2\")\n", - " )\n", - ")\n", - "```\n", - "\n", - "Another option is to define what inputs you think should be embedded manually:\n", - "- `auto_embed = False`\n", - "- Can wrap individual variables in `rl_chain.Embed()` or `rl_chain.EmbedAndKeep()` e.g. `user = rl_chain.BasedOn(rl_chain.Embed(\"Tom\"))`\n", - "\n", - "Final option is to define and set your own feature embedder that returns a valid input for the learned policy.\n", - "\n", - "## learned policy to learn asynchronously\n", - "\n", - "If to score the result you need input from the user (e.g. my application showed Tom the selected meal and Tom clicked on it, but Anna did not), then the scoring can be done asynchronously. The way to do that is:\n", - "\n", - "- set `selection_scorer=None` on the chain creation OR call `chain.deactivate_selection_scorer()`\n", - "- call the chain for a specific input\n", - "- keep the chain's response (`response = chain.run([...])`)\n", - "- once you have determined the score of the response/chain selection call the chain with it: `chain.update_with_delayed_score(score=, chain_response=response)`\n", - "\n", - "### store progress of learned policy\n", - "\n", - "Since the variable injection learned policy evolves over time, there is the option to store its progress and continue learning. This can be done by calling:\n", - "\n", - "`chain.save_progress()`\n", - "\n", - "which will store the rl chain's learned policy in a file called `latest.vw`. It will also store it in a file with a timestamp. That way, if `save_progress()` is called more than once, multiple checkpoints will be created, but the latest one will always be in `latest.vw`\n", - "\n", - "Next time the chain is loaded, the chain will look for a file called `latest.vw` and if the file exists it will be loaded into the chain and the learning will continue from there.\n", - "\n", - "By default the rl chain model checkpoints will be stored in the current directory but you can specify the save/load location at chain creation time:\n", - "\n", - "`chain = rl_chain.PickBest.from_llm(model_save_dir=, [...])`\n", - "\n", - "### stop learning of learned policy\n", - "\n", - "If you want the rl chain's learned policy to stop updating you can turn it off/on:\n", - "\n", - "`chain.deactivate_selection_scorer()` and `chain.activate_selection_scorer()`\n", - "\n", - "### set a different policy\n", - "\n", - "There are two policies currently available:\n", - "\n", - "- default policy: `VwPolicy` which learns a [Vowpal Wabbit](https://github.com/VowpalWabbit/vowpal_wabbit) [Contextual Bandit](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Contextual-Bandit-algorithms) model\n", - "\n", - "- random policy: `RandomPolicy` which doesn't learn anything and just selects a value randomly. this policy can be used to compare other policies with a random baseline one.\n", - "\n", - "- custom policies: a custom policy could be created and set at chain creation time\n", - "\n", - "### different exploration algorithms for the default learned policy\n", - "\n", - "The default `VwPolicy` is initialized with some default arguments. The default exploration algorithm is [SquareCB](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Contextual-Bandit-Exploration-with-SquareCB) but other Contextual Bandit exploration algorithms can be set, and other hyper parameters can be set also:\n", - "\n", - "`vw_cmd = [\"--cb_explore_adf\", \"--quiet\", \"--squarecb\", \"--interactions=::\"]`\n", - "\n", - "`chain = rl_chain.PickBest.from_llm(vw_cmd = vw_cmd, [...])`\n", - "\n", - "### learn policy's data logs\n", - "\n", - "The `VwPolicy`'s data files can be stored and examined or used to do [off policy evaluation](https://vowpalwabbit.org/docs/vowpal_wabbit/python/latest/tutorials/off_policy_evaluation.html) for hyper parameter tuning.\n", - "\n", - "The way to do this is to set a log file path to `vw_logs` on chain creation:\n", - "\n", - "`chain = rl_chain.PickBest.from_llm(vw_logs=, [...])`\n", - "\n", - "### other advanced featurization options\n", - "\n", - "Explictly numerical features can be provided with a colon separator:\n", - "`age = rl_chain.BasedOn(\"age:32\")`\n", - "\n", - "`ToSelectFrom` can be a bit more complex if the scenario demands it, instead of being a list of strings it can be:\n", - "- a list of list of strings:\n", - " ```\n", - " meal = rl_chain.ToSelectFrom([\n", - " [\"meal 1 name\", \"meal 1 description\"],\n", - " [\"meal 2 name\", \"meal 2 description\"]\n", - " ])\n", - " ```\n", - "- a list of dictionaries:\n", - " ```\n", - " meal = rl_chain.ToSelectFrom([\n", - " {\"name\":\"meal 1 name\", \"description\" : \"meal 1 description\"},\n", - " {\"name\":\"meal 2 name\", \"description\" : \"meal 2 description\"}\n", - " ])\n", - " ```\n", - "- a list of dictionaries containing lists:\n", - " ```\n", - " meal = rl_chain.ToSelectFrom([\n", - " {\"name\":[\"meal 1\", \"complex name\"], \"description\" : \"meal 1 description\"},\n", - " {\"name\":[\"meal 2\", \"complex name\"], \"description\" : \"meal 2 description\"}\n", - " ])\n", - " ```\n", - "\n", - "`BasedOn` can also take a list of strings:\n", - "```\n", - "user = rl_chain.BasedOn([\"Tom Joe\", \"age:32\", \"state of california\"])\n", - "```\n", - "\n", - "there is no dictionary provided since multiple variables can be supplied wrapped in `BasedOn`\n", - "\n", - "Storing the data logs into a file allows the examination of what different inputs do to the data format.\n", - "\n", - "### More info on Auto or Custom SelectionScorer\n", - "\n", - "The selection scorer is very important to get right since the policy uses it to learn. It determines what is called the reward in reinforcement learning, and more specifically in our Contextual Bandits setting.\n", - "\n", - "The general advice is to keep the score between [0, 1], 0 being the worst selection, 1 being the best selection from the available `ToSelectFrom` variables, based on the `BasedOn` variables, but should be adjusted if the need arises.\n", - "\n", - "In the examples provided above, the AutoSelectionScorer is set mostly to get users started but in real world scenarios it will most likely not be an adequate scorer function.\n", - "\n", - "The example also provided the option to change part of the scoring prompt template that the AutoSelectionScorer used to determine whether a selection was good or not:\n", - "\n", - "```\n", - "scoring_criteria_template = \"Given {preference} rank how good or bad this selection is {meal}\"\n", - "chain = rl_chain.PickBest.from_llm(\n", - " llm=llm,\n", - " prompt=PROMPT,\n", - " selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, scoring_criteria_template_str=scoring_criteria_template),\n", - ")\n", - "\n", - "```\n", - "\n", - "Internally the AutoSelectionScorer adjusted the scoring prompt to make sure that the llm scoring retured a single float.\n", - "\n", - "However, if needed, a FULL scoring prompt can also be provided:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.prompts.prompt import PromptTemplate\n", - "import langchain\n", - "langchain.debug = True\n", - "\n", - "REWARD_PROMPT_TEMPLATE = \"\"\"Given {preference} rank how good or bad this selection is {meal}, IMPORANT: you MUST return a single number between -1 and 1, -1 being bad, 1 being good\"\"\"\n", - "\n", - "\n", - "REWARD_PROMPT = PromptTemplate(\n", - " input_variables=[\"preference\", \"meal\"],\n", - " template=REWARD_PROMPT_TEMPLATE,\n", - ")\n", - "\n", - "chain = rl_chain.PickBest.from_llm(\n", - " llm=llm,\n", - " prompt=PROMPT,\n", - " selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, prompt=REWARD_PROMPT),\n", - ")\n", - "\n", - "chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Tom\"),\n", - " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - ")\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.16" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 -} From 5b1812f95b226dba2e88279ac334b4bf247a250a Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 18:35:59 -0400 Subject: [PATCH 42/65] fix linting checks --- .../langchain/chains/rl_chain/pick_best_chain.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py index 31f3cece49..f048e30132 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py @@ -224,17 +224,17 @@ class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): class PickBestRandomPolicy(base.Policy): - def __init__(self, feature_embedder: base.Embedder, **kwargs): + def __init__(self, feature_embedder: base.Embedder, **kwargs: Any): self.feature_embedder = feature_embedder - def predict(self, event: PickBestEvent): + def predict(self, event: PickBestEvent) -> List[Tuple[int, float]]: num_items = len(event.to_select_from) return [(i, 1.0 / num_items) for i in range(num_items)] - def learn(self, event): + def learn(self, event: PickBestEvent) -> None: pass - def log(self, event): + def log(self, event: PickBestEvent) -> None: pass From 736e0dd46ef1f46474884dafce407d0b36b150ec Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 18:40:53 -0400 Subject: [PATCH 43/65] fix --- libs/langchain/langchain/chains/rl_chain/pick_best_chain.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py index f048e30132..0da0780313 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py @@ -223,7 +223,7 @@ class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): return self.format_auto_embed_off(event) -class PickBestRandomPolicy(base.Policy): +class PickBestRandomPolicy(base.Policy[PickBestEvent]): def __init__(self, feature_embedder: base.Embedder, **kwargs: Any): self.feature_embedder = feature_embedder From 5c2069890f1466da037baef5f29aa1375074eab9 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 18:46:45 -0400 Subject: [PATCH 44/65] policy fixes --- .../how_to/learned_prompt_optimization.ipynb | 21 +------------------ .../langchain/chains/rl_chain/base.py | 2 +- 2 files changed, 2 insertions(+), 21 deletions(-) diff --git a/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb b/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb index 87bb3613c4..9d9a055c9e 100644 --- a/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb +++ b/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb @@ -36,16 +36,6 @@ "execution_count": 2, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/olgavrou/langchain/.testlcvenv/lib/python3.8/site-packages/langchain/utils/utils.py:155: UserWarning: WARNING! engine is not default parameter.\n", - " engine was transferred to model_kwargs.\n", - " Please confirm that engine is what you intended.\n", - " warnings.warn(\n" - ] - }, { "data": { "text/plain": [ @@ -112,16 +102,7 @@ "cell_type": "code", "execution_count": 4, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/olgavrou/langchain/.testlcvenv/lib/python3.8/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - } - ], + "outputs": [], "source": [ "import langchain.chains.rl_chain as rl_chain\n", "\n", diff --git a/libs/langchain/langchain/chains/rl_chain/base.py b/libs/langchain/langchain/chains/rl_chain/base.py index 4b5ac572f9..26ac9a43e1 100644 --- a/libs/langchain/langchain/chains/rl_chain/base.py +++ b/libs/langchain/langchain/chains/rl_chain/base.py @@ -166,7 +166,7 @@ class Event(Generic[TSelected], ABC): TEvent = TypeVar("TEvent", bound=Event) -class Policy(ABC): +class Policy(Generic[TEvent], ABC): def __init__(self, **kwargs: Any): pass From 5b6ebbc82556c95efff28b1a863029e692f9ebd6 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 19:42:43 -0400 Subject: [PATCH 45/65] fixes in notebook --- .../how_to/learned_prompt_optimization.ipynb | 83 ++++++++++++------- 1 file changed, 52 insertions(+), 31 deletions(-) diff --git a/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb b/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb index 9d9a055c9e..5ff4a95dcc 100644 --- a/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb +++ b/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb @@ -12,12 +12,12 @@ "\n", "The example layed out below is trivial and a strong llm could make a good variable selection and injection without the intervention of this chain, but it is perfect for showcasing the chain's usage. Advanced options and explanations are provided at the end.\n", "\n", - "The goal of the below scenario is for the chain to select a meal based on the user declared preferences, and inject the meal into the prompt template. The final prompt will then be sent to the llm of choice and the llm output will be returned to the user." + "The goal of this example scenario is for the chain to select a meal based on the user declared preferences, and inject the meal into the prompt template. The final prompt will then be sent to the llm of choice and the llm output will be returned to the user." ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 36, "metadata": {}, "outputs": [], "source": [ @@ -33,7 +33,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 37, "metadata": {}, "outputs": [ { @@ -42,7 +42,7 @@ "\"\\n\\nYes, I'm ready.\"" ] }, - "execution_count": 2, + "execution_count": 37, "metadata": {}, "output_type": "execute_result" } @@ -68,7 +68,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 38, "metadata": {}, "outputs": [], "source": [ @@ -81,13 +81,15 @@ "\n", "Embed the meal into the given text: \"{text_to_personalize}\".\n", "\n", - "Prepend a personalized message including the user's name {user} and their preference {preference}.\n", + "Prepend a personalized message including the user's name \"{user}\" \n", + " and their preference \"{preference}\".\n", "\n", "Make it sound good.\n", "\"\"\"\n", "\n", "PROMPT = PromptTemplate(\n", - " input_variables=[\"meal\", \"text_to_personalize\", \"user\", \"preference\"], template=PROMPT_TEMPLATE\n", + " input_variables=[\"meal\", \"text_to_personalize\", \"user\", \"preference\"], \n", + " template=PROMPT_TEMPLATE\n", ")" ] }, @@ -100,7 +102,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 39, "metadata": {}, "outputs": [], "source": [ @@ -118,7 +120,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 40, "metadata": {}, "outputs": [], "source": [ @@ -126,20 +128,21 @@ " meal = rl_chain.ToSelectFrom(meals),\n", " user = rl_chain.BasedOn(\"Tom\"),\n", " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs \\\n", + " believe you will love it!\",\n", ")" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 41, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Hey Tom! Our chefs have put together something special for you this week! We know you're a vegetarian who is ok with regular dairy, so they've crafted a delicious and unique Italian-Mexican fusion dish: Chicken Flatbreads with red sauce. We think you'll absolutely love it!\n" + "Hey Tom! We have an amazing special dish for you this week - veggie sweet potato quesadillas with vegan cheese, which we're sure you'll love as a vegetarian who's ok with regular dairy. Enjoy!\n" ] } ], @@ -169,18 +172,23 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 42, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Tom, our chefs have crafted a delicious fusion dish that we think you'll love - Beef Enchiladas with Feta cheese - a Mexican-Greek fusion, and as a vegetarian who can tolerate regular dairy, it's the perfect treat for you!\n", - "Hey Tom! Our master chefs have outdone themselves this week with an amazing dish that you're sure to love. Our specialty dish is a Mexican-Greek fusion of Beef Enchiladas with Feta cheese, and it's perfectly suited for your Vegetarian preferences with regular dairy being ok. Enjoy!\n", - "Hey Tom, we have a special treat for you this week - veggie sweet potato quesadillas with vegan cheese! Our master chefs have put together something delicious and just perfect for your vegetarian preferences, with regular dairy ok as well. We hope you love it!\n", - "Hey Tom, we've got something special for you this week! Our master chefs have crafted delicious veggie sweet potato quesadillas with vegan cheese for our vegetarian friends, but regular dairy is ok too! Enjoy!\n", - "Hey Tom, we've got something special for you this week! Our master chefs have created a delicious veggie sweet potato quesadilla with vegan cheese - perfect for your vegetarian diet, with regular dairy also OK. Enjoy!\n" + "\"Hey Tom, our master chefs have prepared something special for you this week - a Mexican-Greek fusion of Beef Enchiladas with Feta cheese that is sure to tantalize your taste buds. Don't worry, we've got you covered with a vegetarian option and regular dairy is ok - so you can enjoy the delicious flavors without any worries!\"\n", + "\n", + "\"Hey Tom! Our master chefs have created a truly unique dish this week, perfect for you! Beef Enchiladas with Feta cheese - a delicious Mexican-Greek fusion - and made with vegetarian ingredients and regular dairy. We know you'll love it!\"\n", + "\n", + "Hey Tom, we have something special for you this week - our veggie sweet potato quesadillas with vegan cheese! We know you like vegetarian dishes and don't mind regular dairy, so we think you'll love this delicious meal.\n", + "\n", + "Hey Tom, we have the perfect dish for you this week! Our master chefs have crafted delicious veggie sweet potato quesadillas with vegan cheese, perfect for vegetarians and those who are okay with regular dairy. We guarantee that you will love it!\n", + "\n", + "Hey Tom! Our master chefs have crafted a delicious Veggie Sweet Potato Quesadillas with vegan cheese, specially designed with your Vegetarian preference in mind - they're sure you will love it! Enjoy this weeks specialty dish!\n", + "\n" ] } ], @@ -195,7 +203,8 @@ " )\n", " except Exception as e:\n", " print(e)\n", - " print(response[\"response\"])" + " print(response[\"response\"])\n", + " print()" ] }, { @@ -495,18 +504,18 @@ "source": [ "| Section | Description | Example / Usage |\n", "|---------|-------------|-----------------|\n", - "| [**Set Chain Logging Level**](#set-chain-logging-level) | Set up the logging level for the RL chain. | `logger.setLevel(logging.INFO)` |\n", + "| [**Change Chain Logging Level**](#change-chain-logging-level) | Change the logging level for the RL chain. | `logger.setLevel(logging.INFO)` |\n", "| [**Featurization**](#featurization) | Adjusts the input to the RL chain. Can set auto-embeddings ON for more complex embeddings. | `chain = rl_chain.PickBest.from_llm(auto_embed=True, [...])` |\n", "| [**Learned Policy to Learn Asynchronously**](#learned-policy-to-learn-asynchronously) | Score asynchronously if user input is needed for scoring. | `chain.update_with_delayed_score(score=, chain_response=response)` |\n", "| [**Store Progress of Learned Policy**](#store-progress-of-learned-policy) | Option to store the progress of the variable injection learned policy. | `chain.save_progress()` |\n", "| [**Stop Learning of Learned Policy**](#stop-learning-of-learned-policy) | Toggle the RL chain's learned policy updates ON/OFF. | `chain.deactivate_selection_scorer()` |\n", "| [**Set a Different Policy**](#set-a-different-policy) | Choose between different policies: default, random, or custom. | Custom policy creation at chain creation time. |\n", - "| [**Different Exploration Algorithms for Default Learned Policy**](#different-exploration-algorithms-for-the-default-learned-policy) | Set different exploration algorithms and hyperparameters for `VwPolicy`. | `vw_cmd = [\"--cb_explore_adf\", \"--quiet\", \"--squarecb\", \"--interactions=::\"]` |\n", - "| [**Learn Policy's Data Logs**](#learn-policys-data-logs) | Store and examine `VwPolicy`'s data logs. | `chain = rl_chain.PickBest.from_llm(vw_logs=, [...])` |\n", + "| [**Different Exploration Algorithms and Options for Default Learned Policy**](#different-exploration-algorithms-and-options-for-the-default-learned-policy) | Set different exploration algorithms and hyperparameters for `VwPolicy`. | `vw_cmd = [\"--cb_explore_adf\", \"--quiet\", \"--squarecb\", \"--interactions=::\"]` |\n", + "| [**Learn Policy's Data Logs**](#learned-policys-data-logs) | Store and examine `VwPolicy`'s data logs. | `chain = rl_chain.PickBest.from_llm(vw_logs=, [...])` |\n", "| [**Other Advanced Featurization Options**](#other-advanced-featurization-options) | Specify advanced featurization options for the RL chain. | `age = rl_chain.BasedOn(\"age:32\")` |\n", "| [**More Info on Auto or Custom SelectionScorer**](#more-info-on-auto-or-custom-selectionscorer) | Dive deeper into how selection scoring is determined. | `selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, scoring_criteria_template_str=scoring_criteria_template)` |\n", "\n", - "### set chain logging level\n", + "### change chain logging level\n", "\n", "```\n", "import logging\n", @@ -516,13 +525,15 @@ "\n", "### featurization\n", "\n", + "#### auto_embed\n", + "\n", "By default the input to the rl chain (`ToSelectFrom`, `BasedOn`) is not tampered with. This might not be sufficient featurization, so based on how complex the scenario is you can set auto-embeddings to ON\n", "\n", "`chain = rl_chain.PickBest.from_llm(auto_embed=True, [...])`\n", "\n", - "This will produce more complex embeddings and featurizations of the inputs, likely accelerating RL chain learning, albeit at the cost of increased runtime..\n", + "This will produce more complex embeddings and featurizations of the inputs, likely accelerating RL chain learning, albeit at the cost of increased runtime.\n", "\n", - "By default, [sbert.net's sentence_transformers's ](https://www.sbert.net/docs/pretrained_models.html#model-overview) `all-mpnet-base-v2` model will be used for these embeddings but you can set a different model by initializing the chain with it, or set an entirely different encoding object as long as it has an `encode` function that returns a list of the encodings:\n", + "By default, [sbert.net's sentence_transformers's ](https://www.sbert.net/docs/pretrained_models.html#model-overview) `all-mpnet-base-v2` model will be used for these embeddings but you can set a different embeddings model by initializing the chain with it as shown in this example. You could also set an entirely different embeddings encoding object, as long as it has an `encode()` function that returns a list of the encodings.\n", "\n", "```\n", "from sentence_transformers import SentenceTransformer\n", @@ -536,11 +547,15 @@ ")\n", "```\n", "\n", + "#### explicitly defined embeddings\n", + "\n", "Another option is to define what inputs you think should be embedded manually:\n", "- `auto_embed = False`\n", "- Can wrap individual variables in `rl_chain.Embed()` or `rl_chain.EmbedAndKeep()` e.g. `user = rl_chain.BasedOn(rl_chain.Embed(\"Tom\"))`\n", "\n", - "Final option is to define and set your own feature embedder that returns a valid input for the learned policy.\n", + "#### custom featurization\n", + "\n", + "Another final option is to define and set a custom featurization/embedder class that returns a valid input for the learned policy.\n", "\n", "## learned policy to learn asynchronously\n", "\n", @@ -581,15 +596,15 @@ "\n", "- custom policies: a custom policy could be created and set at chain creation time\n", "\n", - "### different exploration algorithms for the default learned policy\n", + "### different exploration algorithms and options for the default learned policy\n", "\n", - "The default `VwPolicy` is initialized with some default arguments. The default exploration algorithm is [SquareCB](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Contextual-Bandit-Exploration-with-SquareCB) but other Contextual Bandit exploration algorithms can be set, and other hyper parameters can be set also:\n", + "The default `VwPolicy` is initialized with some default arguments. The default exploration algorithm is [SquareCB](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Contextual-Bandit-Exploration-with-SquareCB) but other Contextual Bandit exploration algorithms can be set, and other hyper parameters can be tuned (see [here](https://vowpalwabbit.org/docs/vowpal_wabbit/python/9.6.0/command_line_args.html) for available options).\n", "\n", "`vw_cmd = [\"--cb_explore_adf\", \"--quiet\", \"--squarecb\", \"--interactions=::\"]`\n", "\n", "`chain = rl_chain.PickBest.from_llm(vw_cmd = vw_cmd, [...])`\n", "\n", - "### learn policy's data logs\n", + "### learned policy's data logs\n", "\n", "The `VwPolicy`'s data files can be stored and examined or used to do [off policy evaluation](https://vowpalwabbit.org/docs/vowpal_wabbit/python/latest/tutorials/off_policy_evaluation.html) for hyper parameter tuning.\n", "\n", @@ -636,7 +651,7 @@ "\n", "### More info on Auto or Custom SelectionScorer\n", "\n", - "The selection scorer is very important to get right since the policy uses it to learn. It determines what is called the reward in reinforcement learning, and more specifically in our Contextual Bandits setting.\n", + "It is very important to get the selection scorer right since the policy uses it to learn. It determines what is called the reward in reinforcement learning, and more specifically in our Contextual Bandits setting.\n", "\n", "The general advice is to keep the score between [0, 1], 0 being the worst selection, 1 being the best selection from the available `ToSelectFrom` variables, based on the `BasedOn` variables, but should be adjusted if the need arises.\n", "\n", @@ -761,7 +776,13 @@ "import langchain\n", "langchain.debug = True\n", "\n", - "REWARD_PROMPT_TEMPLATE = \"\"\"Given {preference} rank how good or bad this selection is {meal}, IMPORANT: you MUST return a single number between -1 and 1, -1 being bad, 1 being good\"\"\"\n", + "REWARD_PROMPT_TEMPLATE = \"\"\"\n", + "\n", + "Given {preference} rank how good or bad this selection is {meal}\n", + "\n", + "IMPORANT: you MUST return a single number between -1 and 1, -1 being bad, 1 being good\n", + "\n", + "\"\"\"\n", "\n", "\n", "REWARD_PROMPT = PromptTemplate(\n", From 8dc5365ee2d28d52c23551efad5bdc5b30b1033e Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 20:50:25 -0400 Subject: [PATCH 46/65] no cache key --- .github/workflows/langchain_ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/langchain_ci.yml b/.github/workflows/langchain_ci.yml index 06d04b2f47..e02d2edbd1 100644 --- a/.github/workflows/langchain_ci.yml +++ b/.github/workflows/langchain_ci.yml @@ -71,7 +71,7 @@ jobs: python-version: ${{ matrix.python-version }} poetry-version: ${{ env.POETRY_VERSION }} working-directory: libs/langchain - cache-key: extended + # cache-key: extended - name: Install dependencies shell: bash From f22fcb8bcd955d1ec4d22a980bcdd71ccfd516b0 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 20:52:18 -0400 Subject: [PATCH 47/65] no cache --- .github/workflows/_lint.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/_lint.yml b/.github/workflows/_lint.yml index 1a01b225a0..01ca17fbd5 100644 --- a/.github/workflows/_lint.yml +++ b/.github/workflows/_lint.yml @@ -87,7 +87,7 @@ jobs: python-version: ${{ matrix.python-version }} poetry-version: ${{ env.POETRY_VERSION }} working-directory: ${{ inputs.working-directory }} - cache-key: lint + # cache-key: lint - name: Check Poetry File shell: bash From 9c45d5a27ecd111747aa336590b8003592dcf96b Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 20:58:05 -0400 Subject: [PATCH 48/65] restore hash keys --- .github/workflows/_lint.yml | 2 +- .github/workflows/langchain_ci.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/_lint.yml b/.github/workflows/_lint.yml index 01ca17fbd5..1a01b225a0 100644 --- a/.github/workflows/_lint.yml +++ b/.github/workflows/_lint.yml @@ -87,7 +87,7 @@ jobs: python-version: ${{ matrix.python-version }} poetry-version: ${{ env.POETRY_VERSION }} working-directory: ${{ inputs.working-directory }} - # cache-key: lint + cache-key: lint - name: Check Poetry File shell: bash diff --git a/.github/workflows/langchain_ci.yml b/.github/workflows/langchain_ci.yml index e02d2edbd1..06d04b2f47 100644 --- a/.github/workflows/langchain_ci.yml +++ b/.github/workflows/langchain_ci.yml @@ -71,7 +71,7 @@ jobs: python-version: ${{ matrix.python-version }} poetry-version: ${{ env.POETRY_VERSION }} working-directory: libs/langchain - # cache-key: extended + cache-key: extended - name: Install dependencies shell: bash From 8d3a8fbefee0fdbe8205c9aa184712da66b5a957 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 22:31:15 -0400 Subject: [PATCH 49/65] fixes --- .github/workflows/langchain_ci.yml | 2 +- libs/langchain/pyproject.toml | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/langchain_ci.yml b/.github/workflows/langchain_ci.yml index 06d04b2f47..8f1fc5d874 100644 --- a/.github/workflows/langchain_ci.yml +++ b/.github/workflows/langchain_ci.yml @@ -60,7 +60,7 @@ jobs: - "3.8" - "3.9" - "3.10" - # - "3.11" + - "3.11" name: Python ${{ matrix.python-version }} extended tests steps: - uses: actions/checkout@v3 diff --git a/libs/langchain/pyproject.toml b/libs/langchain/pyproject.toml index 198836e418..a92b62249c 100644 --- a/libs/langchain/pyproject.toml +++ b/libs/langchain/pyproject.toml @@ -125,11 +125,11 @@ newspaper3k = {version = "^0.2.8", optional = true} amazon-textract-caller = {version = "<2", optional = true} xata = {version = "^1.0.0a7", optional = true} xmltodict = {version = "^0.13.0", optional = true} -vowpal-wabbit-next = {version = "0.6.0", optional = true} markdownify = {version = "^0.11.6", optional = true} assemblyai = {version = "^0.17.0", optional = true} dashvector = {version = "^1.0.1", optional = true} sqlite-vss = {version = "^0.1.2", optional = true} +vowpal-wabbit-next = {version = "0.6.0", optional = true} [tool.poetry.group.test.dependencies] @@ -295,7 +295,6 @@ all = [ "amadeus", "librosa", "python-arango", - "vowpal-wabbit-next", ] # An extra used to be able to add extended testing. @@ -344,11 +343,11 @@ extended_testing = [ "xmltodict", "faiss-cpu", "openapi-schema-pydantic", - "vowpal-wabbit-next", "sentence-transformers", "markdownify", "dashvector", "sqlite-vss", + "vowpal-wabbit-next", ] [tool.ruff] From fdb611cc4289503bf2bb202341faf7708668f969 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 22:45:50 -0400 Subject: [PATCH 50/65] update poetry --- libs/langchain/poetry.lock | 488 ++----------------------------------- 1 file changed, 21 insertions(+), 467 deletions(-) diff --git a/libs/langchain/poetry.lock b/libs/langchain/poetry.lock index 75aeba8d08..b3ad397a7d 100644 --- a/libs/langchain/poetry.lock +++ b/libs/langchain/poetry.lock @@ -1,10 +1,9 @@ -# This file is automatically @generated by Poetry and should not be changed by hand. +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. [[package]] name = "absl-py" version = "1.4.0" description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py." -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -16,7 +15,6 @@ files = [ name = "aioboto3" version = "11.3.0" description = "Async boto3 wrapper" -category = "main" optional = true python-versions = ">=3.7,<4.0" files = [ @@ -35,7 +33,6 @@ s3cse = ["cryptography (>=2.3.1)"] name = "aiobotocore" version = "2.6.0" description = "Async client for aws services using botocore and aiohttp" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -58,7 +55,6 @@ boto3 = ["boto3 (>=1.28.17,<1.28.18)"] name = "aiodns" version = "3.0.0" description = "Simple DNS resolver for asyncio" -category = "main" optional = true python-versions = "*" files = [ @@ -73,7 +69,6 @@ pycares = ">=4.0.0" name = "aiofiles" version = "23.2.1" description = "File support for asyncio." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -85,7 +80,6 @@ files = [ name = "aiohttp" version = "3.8.5" description = "Async http client/server framework (asyncio)" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -194,7 +188,6 @@ speedups = ["Brotli", "aiodns", "cchardet"] name = "aiohttp-retry" version = "2.8.3" description = "Simple retry client for aiohttp" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -209,7 +202,6 @@ aiohttp = "*" name = "aioitertools" version = "0.11.0" description = "itertools and builtins for AsyncIO and mixed iterables" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -224,7 +216,6 @@ typing_extensions = {version = ">=4.0", markers = "python_version < \"3.10\""} name = "aiosignal" version = "1.3.1" description = "aiosignal: a list of registered asynchronous callbacks" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -239,7 +230,6 @@ frozenlist = ">=1.1.0" name = "aleph-alpha-client" version = "2.17.0" description = "python client to interact with Aleph Alpha api endpoints" -category = "main" optional = true python-versions = "*" files = [ @@ -267,7 +257,6 @@ types = ["mypy", "types-Pillow", "types-requests"] name = "altair" version = "4.2.2" description = "Altair: A declarative statistical visualization library for Python." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -290,7 +279,6 @@ dev = ["black", "docutils", "flake8", "ipython", "m2r", "mistune (<2.0.0)", "pyt name = "amadeus" version = "8.1.0" description = "Python module for the Amadeus travel APIs" -category = "main" optional = true python-versions = ">=3.4.8" files = [ @@ -301,7 +289,6 @@ files = [ name = "amazon-textract-caller" version = "0.0.29" description = "Amazon Textract Caller tools" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -321,7 +308,6 @@ testing = ["amazon-textract-response-parser", "pytest"] name = "amazon-textract-response-parser" version = "1.0.0" description = "Easily parse JSON returned by Amazon Textract." -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -337,7 +323,6 @@ marshmallow = ">=3.14,<4" name = "anyio" version = "3.7.1" description = "High level compatibility layer for multiple asynchronous event loop implementations" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -359,7 +344,6 @@ trio = ["trio (<0.22)"] name = "appnope" version = "0.1.3" description = "Disable App Nap on macOS >= 10.9" -category = "dev" optional = false python-versions = "*" files = [ @@ -371,7 +355,6 @@ files = [ name = "argon2-cffi" version = "23.1.0" description = "Argon2 for Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -392,7 +375,6 @@ typing = ["mypy"] name = "argon2-cffi-bindings" version = "21.2.0" description = "Low-level CFFI bindings for Argon2" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -430,7 +412,6 @@ tests = ["pytest"] name = "arrow" version = "1.2.3" description = "Better dates & times for Python" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -445,7 +426,6 @@ python-dateutil = ">=2.7.0" name = "arxiv" version = "1.4.8" description = "Python wrapper for the arXiv API: http://arxiv.org/help/api/" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -460,7 +440,6 @@ feedparser = "*" name = "assemblyai" version = "0.17.0" description = "AssemblyAI Python SDK" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -481,7 +460,6 @@ extras = ["pyaudio (>=0.2.13)"] name = "asttokens" version = "2.2.1" description = "Annotate AST trees with source code positions" -category = "dev" optional = false python-versions = "*" files = [ @@ -499,7 +477,6 @@ test = ["astroid", "pytest"] name = "astunparse" version = "1.6.3" description = "An AST unparser for Python" -category = "main" optional = true python-versions = "*" files = [ @@ -515,7 +492,6 @@ wheel = ">=0.23.0,<1.0" name = "async-lru" version = "2.0.4" description = "Simple LRU cache for asyncio" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -530,7 +506,6 @@ typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} name = "async-timeout" version = "4.0.3" description = "Timeout context manager for asyncio programs" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -542,7 +517,6 @@ files = [ name = "atlassian-python-api" version = "3.41.0" description = "Python Atlassian REST API Wrapper" -category = "main" optional = true python-versions = "*" files = [ @@ -564,7 +538,6 @@ kerberos = ["requests-kerberos"] name = "attr" version = "0.3.2" description = "Simple decorator to set attributes of target function or class in a DRY way." -category = "main" optional = true python-versions = "*" files = [ @@ -576,7 +549,6 @@ files = [ name = "attrs" version = "23.1.0" description = "Classes Without Boilerplate" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -595,7 +567,6 @@ tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pyte name = "audioread" version = "3.0.0" description = "multi-library, cross-platform audio decoding" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -606,7 +577,6 @@ files = [ name = "authlib" version = "1.2.1" description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." -category = "main" optional = true python-versions = "*" files = [ @@ -621,7 +591,6 @@ cryptography = ">=3.2" name = "awadb" version = "0.3.10" description = "AI Native database for embedding vectors" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -648,7 +617,6 @@ test = ["pytest (>=6.0)"] name = "azure-ai-formrecognizer" version = "3.3.0" description = "Microsoft Azure Form Recognizer Client Library for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -666,7 +634,6 @@ typing-extensions = ">=4.0.1" name = "azure-ai-vision" version = "0.11.1b1" description = "Microsoft Azure AI Vision SDK for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -678,7 +645,6 @@ files = [ name = "azure-cognitiveservices-speech" version = "1.31.0" description = "Microsoft Cognitive Services Speech SDK for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -694,7 +660,6 @@ files = [ name = "azure-common" version = "1.1.28" description = "Microsoft Azure Client Library for Python (Common)" -category = "main" optional = true python-versions = "*" files = [ @@ -706,7 +671,6 @@ files = [ name = "azure-core" version = "1.29.1" description = "Microsoft Azure Core Library for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -726,7 +690,6 @@ aio = ["aiohttp (>=3.0)"] name = "azure-cosmos" version = "4.5.0" description = "Microsoft Azure Cosmos Client Library for Python" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -741,7 +704,6 @@ azure-core = ">=1.23.0,<2.0.0" name = "azure-identity" version = "1.14.0" description = "Microsoft Azure Identity Library for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -759,7 +721,6 @@ msal-extensions = ">=0.3.0,<2.0.0" name = "azure-search-documents" version = "11.4.0b8" description = "Microsoft Azure Cognitive Search Client Library for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -776,7 +737,6 @@ isodate = ">=0.6.0" name = "babel" version = "2.12.1" description = "Internationalization utilities" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -791,7 +751,6 @@ pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""} name = "backcall" version = "0.2.0" description = "Specifications for callback functions passed in to an API" -category = "dev" optional = false python-versions = "*" files = [ @@ -803,7 +762,6 @@ files = [ name = "backoff" version = "2.2.1" description = "Function decoration for backoff and retry" -category = "main" optional = true python-versions = ">=3.7,<4.0" files = [ @@ -815,7 +773,6 @@ files = [ name = "backports-zoneinfo" version = "0.2.1" description = "Backport of the standard library zoneinfo module" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -844,7 +801,6 @@ tzdata = ["tzdata"] name = "beautifulsoup4" version = "4.12.2" description = "Screen-scraping library" -category = "main" optional = false python-versions = ">=3.6.0" files = [ @@ -863,7 +819,6 @@ lxml = ["lxml"] name = "bibtexparser" version = "1.4.0" description = "Bibtex parser for python 3" -category = "main" optional = true python-versions = "*" files = [ @@ -877,7 +832,6 @@ pyparsing = ">=2.0.3" name = "black" version = "23.7.0" description = "The uncompromising code formatter." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -924,7 +878,6 @@ uvloop = ["uvloop (>=0.15.2)"] name = "bleach" version = "6.0.0" description = "An easy safelist-based HTML-sanitizing tool." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -943,7 +896,6 @@ css = ["tinycss2 (>=1.1.0,<1.2)"] name = "blinker" version = "1.6.2" description = "Fast, simple object-to-object and broadcast signaling" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -955,7 +907,6 @@ files = [ name = "boto3" version = "1.28.17" description = "The AWS SDK for Python" -category = "main" optional = true python-versions = ">= 3.7" files = [ @@ -975,7 +926,6 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] name = "botocore" version = "1.31.17" description = "Low-level, data-driven core of boto 3." -category = "main" optional = true python-versions = ">= 3.7" files = [ @@ -995,7 +945,6 @@ crt = ["awscrt (==0.16.26)"] name = "brotli" version = "1.0.9" description = "Python bindings for the Brotli compression library" -category = "main" optional = true python-versions = "*" files = [ @@ -1087,7 +1036,6 @@ files = [ name = "brotlicffi" version = "1.0.9.2" description = "Python CFFI bindings to the Brotli library" -category = "main" optional = true python-versions = "*" files = [ @@ -1130,7 +1078,6 @@ cffi = ">=1.0.0" name = "build" version = "0.10.0" description = "A simple, correct Python build frontend" -category = "main" optional = true python-versions = ">= 3.7" files = [ @@ -1154,7 +1101,6 @@ virtualenv = ["virtualenv (>=20.0.35)"] name = "cachetools" version = "5.3.1" description = "Extensible memoizing collections and decorators" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -1166,7 +1112,6 @@ files = [ name = "cassandra-driver" version = "3.28.0" description = "DataStax Driver for Apache Cassandra" -category = "main" optional = false python-versions = "*" files = [ @@ -1218,7 +1163,6 @@ graph = ["gremlinpython (==3.4.6)"] name = "cassio" version = "0.1.0" description = "A framework-agnostic Python library to seamlessly integrate Apache Cassandra(R) with ML/LLM/genAI workloads." -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -1234,7 +1178,6 @@ numpy = ">=1.0" name = "certifi" version = "2023.7.22" description = "Python package for providing Mozilla's CA Bundle." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1246,7 +1189,6 @@ files = [ name = "cffi" version = "1.15.1" description = "Foreign Function Interface for Python calling C code." -category = "main" optional = false python-versions = "*" files = [ @@ -1323,7 +1265,6 @@ pycparser = "*" name = "chardet" version = "5.2.0" description = "Universal encoding detector for Python 3" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -1335,7 +1276,6 @@ files = [ name = "charset-normalizer" version = "3.2.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "main" optional = false python-versions = ">=3.7.0" files = [ @@ -1420,7 +1360,6 @@ files = [ name = "clarifai" version = "9.7.1" description = "Clarifai Python Utilities" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -1437,7 +1376,6 @@ tritonclient = "2.34.0" name = "clarifai-grpc" version = "9.7.3" description = "Clarifai gRPC API Client" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -1455,7 +1393,6 @@ requests = ">=2.25.1" name = "click" version = "8.1.7" description = "Composable command line interface toolkit" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1470,7 +1407,6 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} name = "click-plugins" version = "1.1.1" description = "An extension module for click to enable registering CLI commands via setuptools entry-points." -category = "main" optional = true python-versions = "*" files = [ @@ -1488,7 +1424,6 @@ dev = ["coveralls", "pytest (>=3.6)", "pytest-cov", "wheel"] name = "clickhouse-connect" version = "0.5.25" description = "ClickHouse core driver, SqlAlchemy, and Superset libraries" -category = "main" optional = true python-versions = "~=3.7" files = [ @@ -1578,7 +1513,6 @@ superset = ["apache-superset (>=1.4.1)"] name = "cligj" version = "0.7.2" description = "Click params for commmand line interfaces to GeoJSON" -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, <4" files = [ @@ -1596,7 +1530,6 @@ test = ["pytest-cov"] name = "codespell" version = "2.2.5" description = "Codespell" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1614,7 +1547,6 @@ types = ["chardet (>=5.1.0)", "mypy", "pytest", "pytest-cov", "pytest-dependency name = "cohere" version = "4.21" description = "" -category = "main" optional = true python-versions = ">=3.7,<4.0" files = [ @@ -1634,7 +1566,6 @@ urllib3 = ">=1.26,<3" name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" files = [ @@ -1646,7 +1577,6 @@ files = [ name = "colored" version = "1.4.4" description = "Simple library for color and formatting to terminal" -category = "dev" optional = false python-versions = "*" files = [ @@ -1657,7 +1587,6 @@ files = [ name = "comm" version = "0.1.4" description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -1677,7 +1606,6 @@ typing = ["mypy (>=0.990)"] name = "coverage" version = "7.3.0" description = "Code coverage measurement for Python" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -1745,7 +1673,6 @@ toml = ["tomli"] name = "cryptography" version = "41.0.3" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -1791,7 +1718,6 @@ test-randomorder = ["pytest-randomly"] name = "cssselect" version = "1.2.0" description = "cssselect parses CSS3 Selectors and translates them to XPath 1.0" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -1803,7 +1729,6 @@ files = [ name = "dashvector" version = "1.0.1" description = "DashVector Client Python Sdk Library" -category = "main" optional = true python-versions = ">=3.7.0" files = [ @@ -1823,7 +1748,6 @@ protobuf = ">=3.8.0,<4.0.0" name = "dataclasses-json" version = "0.5.9" description = "Easily serialize dataclasses to and from JSON" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -1843,7 +1767,6 @@ dev = ["flake8", "hypothesis", "ipython", "mypy (>=0.710)", "portray", "pytest ( name = "debugpy" version = "1.6.7.post1" description = "An implementation of the Debug Adapter Protocol for Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -1871,7 +1794,6 @@ files = [ name = "decorator" version = "5.1.1" description = "Decorators for Humans" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -1883,7 +1805,6 @@ files = [ name = "deeplake" version = "3.6.19" description = "Activeloop Deep Lake" -category = "main" optional = true python-versions = "*" files = [ @@ -1921,7 +1842,6 @@ visualizer = ["IPython", "flask"] name = "defusedxml" version = "0.7.1" description = "XML bomb protection for Python stdlib modules" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -1933,7 +1853,6 @@ files = [ name = "deprecated" version = "1.2.14" description = "Python @deprecated decorator to deprecate old python classes, functions or methods." -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -1951,7 +1870,6 @@ dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] name = "deprecation" version = "2.1.0" description = "A library to handle automated deprecations" -category = "main" optional = true python-versions = "*" files = [ @@ -1966,7 +1884,6 @@ packaging = "*" name = "dill" version = "0.3.7" description = "serialize all of Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -1981,7 +1898,6 @@ graph = ["objgraph (>=1.7.2)"] name = "dnspython" version = "2.4.2" description = "DNS toolkit" -category = "main" optional = true python-versions = ">=3.8,<4.0" files = [ @@ -2001,7 +1917,6 @@ wmi = ["wmi (>=1.5.1,<2.0.0)"] name = "docarray" version = "0.32.1" description = "The data structure for multimodal data" -category = "main" optional = true python-versions = ">=3.7,<4.0" files = [ @@ -2040,7 +1955,6 @@ web = ["fastapi (>=0.87.0)"] name = "docker" version = "6.1.3" description = "A Python library for the Docker Engine API." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -2062,7 +1976,6 @@ ssh = ["paramiko (>=2.4.3)"] name = "docopt" version = "0.6.2" description = "Pythonic argument parser, that will make you smile" -category = "main" optional = true python-versions = "*" files = [ @@ -2073,7 +1986,6 @@ files = [ name = "duckdb" version = "0.8.1" description = "DuckDB embedded database" -category = "dev" optional = false python-versions = "*" files = [ @@ -2135,7 +2047,6 @@ files = [ name = "duckdb-engine" version = "0.7.3" description = "SQLAlchemy driver for duckdb" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -2152,7 +2063,6 @@ sqlalchemy = ">=1.3.22" name = "duckduckgo-search" version = "3.8.5" description = "Search for words, documents, images, news, maps and text translation using the DuckDuckGo.com search engine." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -2170,7 +2080,6 @@ lxml = ">=4.9.2" name = "elastic-transport" version = "8.4.0" description = "Transport classes and utilities shared among Python Elastic client libraries" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -2189,7 +2098,6 @@ develop = ["aiohttp", "mock", "pytest", "pytest-asyncio", "pytest-cov", "pytest- name = "elasticsearch" version = "8.9.0" description = "Python client for Elasticsearch" -category = "main" optional = true python-versions = ">=3.6, <4" files = [ @@ -2208,7 +2116,6 @@ requests = ["requests (>=2.4.0,<3.0.0)"] name = "entrypoints" version = "0.4" description = "Discover and load entry points from installed packages." -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -2220,7 +2127,6 @@ files = [ name = "esprima" version = "4.0.1" description = "ECMAScript parsing infrastructure for multipurpose analysis in Python" -category = "main" optional = true python-versions = "*" files = [ @@ -2231,7 +2137,6 @@ files = [ name = "exceptiongroup" version = "1.1.3" description = "Backport of PEP 654 (exception groups)" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -2246,7 +2151,6 @@ test = ["pytest (>=6)"] name = "executing" version = "1.2.0" description = "Get the currently executing AST node of a frame, and other information" -category = "dev" optional = false python-versions = "*" files = [ @@ -2261,7 +2165,6 @@ tests = ["asttokens", "littleutils", "pytest", "rich"] name = "faiss-cpu" version = "1.7.4" description = "A library for efficient similarity search and clustering of dense vectors." -category = "main" optional = true python-versions = "*" files = [ @@ -2296,7 +2199,6 @@ files = [ name = "fastavro" version = "1.8.2" description = "Fast read/write of AVRO files" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -2337,7 +2239,6 @@ zstandard = ["zstandard"] name = "fastjsonschema" version = "2.18.0" description = "Fastest Python implementation of JSON schema" -category = "dev" optional = false python-versions = "*" files = [ @@ -2352,7 +2253,6 @@ devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benc name = "feedfinder2" version = "0.0.4" description = "Find the feed URLs for a website." -category = "main" optional = true python-versions = "*" files = [ @@ -2368,7 +2268,6 @@ six = "*" name = "feedparser" version = "6.0.10" description = "Universal feed parser, handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -2383,7 +2282,6 @@ sgmllib3k = "*" name = "filelock" version = "3.12.2" description = "A platform independent file lock." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -2399,7 +2297,6 @@ testing = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "diff-cover (>=7.5)", "p name = "fiona" version = "1.9.4.post1" description = "Fiona reads and writes spatial data files" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -2444,7 +2341,6 @@ test = ["Fiona[s3]", "pytest (>=7)", "pytest-cov", "pytz"] name = "flatbuffers" version = "23.5.26" description = "The FlatBuffers serialization format for Python" -category = "main" optional = true python-versions = "*" files = [ @@ -2456,7 +2352,6 @@ files = [ name = "fqdn" version = "1.5.1" description = "Validates fully-qualified domain names against RFC 1123, so that they are acceptable to modern bowsers" -category = "dev" optional = false python-versions = ">=2.7, !=3.0, !=3.1, !=3.2, !=3.3, !=3.4, <4" files = [ @@ -2468,7 +2363,6 @@ files = [ name = "freezegun" version = "1.2.2" description = "Let your Python tests travel through time" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -2483,7 +2377,6 @@ python-dateutil = ">=2.7" name = "frozenlist" version = "1.4.0" description = "A list-like structure which implements collections.abc.MutableSequence" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -2554,7 +2447,6 @@ files = [ name = "fsspec" version = "2023.6.0" description = "File-system specification" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -2590,7 +2482,6 @@ tqdm = ["tqdm"] name = "future" version = "0.18.3" description = "Clean single-source support for Python 3 and 2" -category = "main" optional = true python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -2601,7 +2492,6 @@ files = [ name = "gast" version = "0.4.0" description = "Python AST that abstracts the underlying Python version" -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -2613,7 +2503,6 @@ files = [ name = "geojson" version = "2.5.0" description = "Python bindings and utilities for GeoJSON" -category = "main" optional = true python-versions = "*" files = [ @@ -2625,7 +2514,6 @@ files = [ name = "geomet" version = "0.2.1.post1" description = "GeoJSON <-> WKT/WKB conversion utilities" -category = "main" optional = false python-versions = ">2.6, !=3.3.*, <4" files = [ @@ -2641,7 +2529,6 @@ six = "*" name = "geopandas" version = "0.13.2" description = "Geographic pandas extensions" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -2660,7 +2547,6 @@ shapely = ">=1.7.1" name = "gitdb" version = "4.0.10" description = "Git Object Database" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -2675,7 +2561,6 @@ smmap = ">=3.0.1,<6" name = "gitpython" version = "3.1.32" description = "GitPython is a Python library used to interact with Git repositories" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -2690,7 +2575,6 @@ gitdb = ">=4.0.1,<5" name = "google-api-core" version = "2.11.1" description = "Google API client core library" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -2713,7 +2597,6 @@ grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] name = "google-api-python-client" version = "2.70.0" description = "Google API Client Library for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -2722,7 +2605,7 @@ files = [ ] [package.dependencies] -google-api-core = ">=1.31.5,<2.0.0 || >2.3.0,<3.0.0dev" +google-api-core = ">=1.31.5,<2.0.dev0 || >2.3.0,<3.0.0dev" google-auth = ">=1.19.0,<3.0.0dev" google-auth-httplib2 = ">=0.1.0" httplib2 = ">=0.15.0,<1dev" @@ -2732,7 +2615,6 @@ uritemplate = ">=3.0.1,<5" name = "google-auth" version = "2.22.0" description = "Google Authentication Library" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -2758,7 +2640,6 @@ requests = ["requests (>=2.20.0,<3.0.0.dev0)"] name = "google-auth-httplib2" version = "0.1.0" description = "Google Authentication Library: httplib2 transport" -category = "main" optional = true python-versions = "*" files = [ @@ -2775,7 +2656,6 @@ six = "*" name = "google-auth-oauthlib" version = "1.0.0" description = "Google Authentication Library" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -2794,7 +2674,6 @@ tool = ["click (>=6.0.0)"] name = "google-pasta" version = "0.2.0" description = "pasta is an AST-based Python refactoring library" -category = "main" optional = true python-versions = "*" files = [ @@ -2810,7 +2689,6 @@ six = "*" name = "google-search-results" version = "2.4.2" description = "Scrape and search localized results from Google, Bing, Baidu, Yahoo, Yandex, Ebay, Homedepot, youtube at scale using SerpApi.com" -category = "main" optional = true python-versions = ">=3.5" files = [ @@ -2824,7 +2702,6 @@ requests = "*" name = "googleapis-common-protos" version = "1.60.0" description = "Common protobufs used in Google APIs" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -2842,7 +2719,6 @@ grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] name = "gptcache" version = "0.1.39.1" description = "GPTCache, a powerful caching library that can be used to speed up and lower the cost of chat applications that rely on the LLM service. GPTCache works as a memcache for AIGC applications, similar to how Redis works for traditional applications." -category = "main" optional = true python-versions = ">=3.8.1" files = [ @@ -2859,7 +2735,6 @@ requests = "*" name = "gql" version = "3.4.1" description = "GraphQL client for Python" -category = "main" optional = true python-versions = "*" files = [ @@ -2886,7 +2761,6 @@ websockets = ["websockets (>=10,<11)", "websockets (>=9,<10)"] name = "graphql-core" version = "3.2.3" description = "GraphQL implementation for Python, a port of GraphQL.js, the JavaScript reference implementation for GraphQL." -category = "main" optional = true python-versions = ">=3.6,<4" files = [ @@ -2898,7 +2772,6 @@ files = [ name = "greenlet" version = "2.0.2" description = "Lightweight in-process concurrent programming" -category = "main" optional = false python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" files = [ @@ -2972,7 +2845,6 @@ test = ["objgraph", "psutil"] name = "grpcio" version = "1.57.0" description = "HTTP/2-based RPC framework" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -3030,7 +2902,6 @@ protobuf = ["grpcio-tools (>=1.57.0)"] name = "grpcio-tools" version = "1.48.2" description = "Protobuf code generator for gRPC" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -3091,7 +2962,6 @@ setuptools = "*" name = "h11" version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -3103,7 +2973,6 @@ files = [ name = "h2" version = "4.1.0" description = "HTTP/2 State-Machine based protocol implementation" -category = "main" optional = true python-versions = ">=3.6.1" files = [ @@ -3119,7 +2988,6 @@ hyperframe = ">=6.0,<7" name = "h5py" version = "3.9.0" description = "Read and write HDF5 files from Python" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -3153,7 +3021,6 @@ numpy = ">=1.17.3" name = "hnswlib" version = "0.7.0" description = "hnswlib" -category = "main" optional = true python-versions = "*" files = [ @@ -3167,7 +3034,6 @@ numpy = "*" name = "hpack" version = "4.0.0" description = "Pure-Python HPACK header compression" -category = "main" optional = true python-versions = ">=3.6.1" files = [ @@ -3179,7 +3045,6 @@ files = [ name = "html2text" version = "2020.1.16" description = "Turn HTML into equivalent Markdown-structured text." -category = "main" optional = true python-versions = ">=3.5" files = [ @@ -3191,7 +3056,6 @@ files = [ name = "httpcore" version = "0.17.3" description = "A minimal low-level HTTP client." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -3203,17 +3067,16 @@ files = [ anyio = ">=3.0,<5.0" certifi = "*" h11 = ">=0.13,<0.15" -sniffio = ">=1.0.0,<2.0.0" +sniffio = "==1.*" [package.extras] http2 = ["h2 (>=3,<5)"] -socks = ["socksio (>=1.0.0,<2.0.0)"] +socks = ["socksio (==1.*)"] [[package]] name = "httplib2" version = "0.22.0" description = "A comprehensive HTTP client library." -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -3228,7 +3091,6 @@ pyparsing = {version = ">=2.4.2,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.0.2 || >3.0 name = "httpx" version = "0.24.1" description = "The next generation HTTP client." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -3244,19 +3106,18 @@ h2 = {version = ">=3,<5", optional = true, markers = "extra == \"http2\""} httpcore = ">=0.15.0,<0.18.0" idna = "*" sniffio = "*" -socksio = {version = ">=1.0.0,<2.0.0", optional = true, markers = "extra == \"socks\""} +socksio = {version = "==1.*", optional = true, markers = "extra == \"socks\""} [package.extras] brotli = ["brotli", "brotlicffi"] -cli = ["click (>=8.0.0,<9.0.0)", "pygments (>=2.0.0,<3.0.0)", "rich (>=10,<14)"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] -socks = ["socksio (>=1.0.0,<2.0.0)"] +socks = ["socksio (==1.*)"] [[package]] name = "huggingface-hub" version = "0.16.4" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" -category = "main" optional = true python-versions = ">=3.7.0" files = [ @@ -3289,7 +3150,6 @@ typing = ["pydantic", "types-PyYAML", "types-requests", "types-simplejson", "typ name = "humbug" version = "0.3.2" description = "Humbug: Do you build developer tools? Humbug helps you know your users." -category = "main" optional = true python-versions = "*" files = [ @@ -3309,7 +3169,6 @@ profile = ["GPUtil", "psutil", "types-psutil"] name = "hyperframe" version = "6.0.1" description = "HTTP/2 framing layer for Python" -category = "main" optional = true python-versions = ">=3.6.1" files = [ @@ -3321,7 +3180,6 @@ files = [ name = "idna" version = "3.4" description = "Internationalized Domain Names in Applications (IDNA)" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -3333,7 +3191,6 @@ files = [ name = "importlib-metadata" version = "6.8.0" description = "Read metadata from Python packages" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -3353,7 +3210,6 @@ testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs name = "importlib-resources" version = "6.0.1" description = "Read resources from Python packages" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -3372,7 +3228,6 @@ testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", name = "iniconfig" version = "2.0.0" description = "brain-dead simple config-ini parsing" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3384,7 +3239,6 @@ files = [ name = "ipykernel" version = "6.25.1" description = "IPython Kernel for Jupyter" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -3398,7 +3252,7 @@ comm = ">=0.1.1" debugpy = ">=1.6.5" ipython = ">=7.23.1" jupyter-client = ">=6.1.12" -jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" matplotlib-inline = ">=0.1" nest-asyncio = "*" packaging = "*" @@ -3418,7 +3272,6 @@ test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio" name = "ipython" version = "8.12.2" description = "IPython: Productive Interactive Computing" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -3458,7 +3311,6 @@ test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pa name = "ipython-genutils" version = "0.2.0" description = "Vestigial utilities from IPython" -category = "dev" optional = false python-versions = "*" files = [ @@ -3470,7 +3322,6 @@ files = [ name = "ipywidgets" version = "8.1.0" description = "Jupyter interactive widgets" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3492,7 +3343,6 @@ test = ["ipykernel", "jsonschema", "pytest (>=3.6.0)", "pytest-cov", "pytz"] name = "isodate" version = "0.6.1" description = "An ISO 8601 date/time/duration parser and formatter" -category = "main" optional = true python-versions = "*" files = [ @@ -3507,7 +3357,6 @@ six = "*" name = "isoduration" version = "20.11.0" description = "Operations with ISO 8601 durations" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3522,7 +3371,6 @@ arrow = ">=0.15.0" name = "jaraco-context" version = "4.3.0" description = "Context managers by jaraco" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -3538,7 +3386,6 @@ testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-chec name = "jedi" version = "0.19.0" description = "An autocompletion tool for Python that can be used for text editors." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -3558,7 +3405,6 @@ testing = ["Django (<3.1)", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] name = "jieba3k" version = "0.35.1" description = "Chinese Words Segementation Utilities" -category = "main" optional = true python-versions = "*" files = [ @@ -3569,7 +3415,6 @@ files = [ name = "jinja2" version = "3.1.2" description = "A very fast and expressive template engine." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -3587,7 +3432,6 @@ i18n = ["Babel (>=2.7)"] name = "jmespath" version = "1.0.1" description = "JSON Matching Expressions" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -3599,7 +3443,6 @@ files = [ name = "joblib" version = "1.3.2" description = "Lightweight pipelining with Python functions" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -3611,7 +3454,6 @@ files = [ name = "jq" version = "1.4.1" description = "jq is a lightweight and flexible JSON processor." -category = "main" optional = true python-versions = ">=3.5" files = [ @@ -3676,7 +3518,6 @@ files = [ name = "json5" version = "0.9.14" description = "A Python implementation of the JSON5 data format." -category = "dev" optional = false python-versions = "*" files = [ @@ -3691,7 +3532,6 @@ dev = ["hypothesis"] name = "jsonable" version = "0.3.1" description = "An abstract class that supports jsonserialization/deserialization." -category = "main" optional = true python-versions = "*" files = [ @@ -3703,7 +3543,6 @@ files = [ name = "jsonlines" version = "3.1.0" description = "Library with helpers for the jsonlines file format" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -3718,7 +3557,6 @@ attrs = ">=19.2.0" name = "jsonpointer" version = "2.4" description = "Identify specific nodes in a JSON document (RFC 6901)" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" files = [ @@ -3730,7 +3568,6 @@ files = [ name = "jsonschema" version = "4.19.0" description = "An implementation of JSON Schema validation for Python" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -3762,7 +3599,6 @@ format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339- name = "jsonschema-specifications" version = "2023.7.1" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -3778,7 +3614,6 @@ referencing = ">=0.28.0" name = "jupyter" version = "1.0.0" description = "Jupyter metapackage. Install all the Jupyter components in one go." -category = "dev" optional = false python-versions = "*" files = [ @@ -3799,7 +3634,6 @@ qtconsole = "*" name = "jupyter-client" version = "8.3.0" description = "Jupyter protocol implementation and client libraries" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -3809,7 +3643,7 @@ files = [ [package.dependencies] importlib-metadata = {version = ">=4.8.3", markers = "python_version < \"3.10\""} -jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" python-dateutil = ">=2.8.2" pyzmq = ">=23.0" tornado = ">=6.2" @@ -3823,7 +3657,6 @@ test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pyt name = "jupyter-console" version = "6.6.3" description = "Jupyter terminal console" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -3835,7 +3668,7 @@ files = [ ipykernel = ">=6.14" ipython = "*" jupyter-client = ">=7.0.0" -jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" prompt-toolkit = ">=3.0.30" pygments = "*" pyzmq = ">=17" @@ -3848,7 +3681,6 @@ test = ["flaky", "pexpect", "pytest"] name = "jupyter-core" version = "5.3.1" description = "Jupyter core package. A base package on which Jupyter projects rely." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -3869,7 +3701,6 @@ test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"] name = "jupyter-events" version = "0.7.0" description = "Jupyter Event System library" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -3895,7 +3726,6 @@ test = ["click", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.19.0)", "p name = "jupyter-lsp" version = "2.2.0" description = "Multi-Language Server WebSocket proxy for Jupyter Notebook/Lab server" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -3911,7 +3741,6 @@ jupyter-server = ">=1.1.2" name = "jupyter-server" version = "2.7.2" description = "The backend—i.e. core services, APIs, and REST endpoints—to Jupyter web applications." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -3924,7 +3753,7 @@ anyio = ">=3.1.0" argon2-cffi = "*" jinja2 = "*" jupyter-client = ">=7.4.4" -jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" jupyter-events = ">=0.6.0" jupyter-server-terminals = "*" nbconvert = ">=6.4.4" @@ -3948,7 +3777,6 @@ test = ["flaky", "ipykernel", "pre-commit", "pytest (>=7.0)", "pytest-console-sc name = "jupyter-server-terminals" version = "0.4.4" description = "A Jupyter Server Extension Providing Terminals." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -3968,7 +3796,6 @@ test = ["coverage", "jupyter-server (>=2.0.0)", "pytest (>=7.0)", "pytest-cov", name = "jupyterlab" version = "4.0.5" description = "JupyterLab computational environment" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -4002,7 +3829,6 @@ test = ["coverage", "pytest (>=7.0)", "pytest-check-links (>=0.7)", "pytest-cons name = "jupyterlab-pygments" version = "0.2.2" description = "Pygments theme using JupyterLab CSS variables" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -4014,7 +3840,6 @@ files = [ name = "jupyterlab-server" version = "2.24.0" description = "A set of server components for JupyterLab and JupyterLab like applications." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -4041,7 +3866,6 @@ test = ["hatch", "ipykernel", "jupyterlab-server[openapi]", "openapi-spec-valida name = "jupyterlab-widgets" version = "3.0.8" description = "Jupyter interactive widgets for JupyterLab" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -4053,7 +3877,6 @@ files = [ name = "keras" version = "2.13.1" description = "Deep learning for humans." -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -4065,7 +3888,6 @@ files = [ name = "lancedb" version = "0.1.16" description = "lancedb" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -4092,7 +3914,6 @@ tests = ["pandas (>=1.4)", "pytest", "pytest-asyncio", "pytest-mock"] name = "langkit" version = "0.0.15" description = "A collection of text metric udfs for whylogs profiling and monitoring in WhyLabs" -category = "main" optional = true python-versions = ">=3.8,<4.0" files = [ @@ -4112,7 +3933,6 @@ all = ["datasets (>=2.12.0,<3.0.0)", "evaluate (>=0.4.0,<0.5.0)", "nltk (>=3.8.1 name = "langsmith" version = "0.0.25" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." -category = "main" optional = false python-versions = ">=3.8.1,<4.0" files = [ @@ -4128,7 +3948,6 @@ requests = ">=2,<3" name = "lark" version = "1.1.7" description = "a modern parsing library" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -4146,7 +3965,6 @@ regex = ["regex"] name = "lazy-loader" version = "0.3" description = "lazy_loader" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -4162,10 +3980,11 @@ test = ["pytest (>=7.4)", "pytest-cov (>=4.1)"] name = "libclang" version = "16.0.6" description = "Clang Python Bindings, mirrored from the official LLVM repo: https://github.com/llvm/llvm-project/tree/main/clang/bindings/python, to make the installation process easier." -category = "main" optional = true python-versions = "*" files = [ + {file = "libclang-16.0.6-1-py2.py3-none-manylinux2014_aarch64.whl", hash = "sha256:88bc7e7b393c32e41e03ba77ef02fdd647da1f764c2cd028e69e0837080b79f6"}, + {file = "libclang-16.0.6-1-py2.py3-none-manylinux2014_armv7l.whl", hash = "sha256:d80ed5827736ed5ec2bcedf536720476fd9d4fa4c79ef0cb24aea4c59332f361"}, {file = "libclang-16.0.6-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:da9e47ebc3f0a6d90fb169ef25f9fbcd29b4a4ef97a8b0e3e3a17800af1423f4"}, {file = "libclang-16.0.6-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:e1a5ad1e895e5443e205568c85c04b4608e4e973dae42f4dfd9cb46c81d1486b"}, {file = "libclang-16.0.6-py2.py3-none-manylinux2010_x86_64.whl", hash = "sha256:9dcdc730939788b8b69ffd6d5d75fe5366e3ee007f1e36a99799ec0b0c001492"}, @@ -4181,7 +4000,6 @@ files = [ name = "libdeeplake" version = "0.0.60" description = "C++ backend for Deep Lake" -category = "main" optional = true python-versions = "*" files = [ @@ -4214,7 +4032,6 @@ numpy = "*" name = "librosa" version = "0.10.1" description = "Python module for audio and music processing" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -4246,7 +4063,6 @@ tests = ["matplotlib (>=3.3.0)", "packaging (>=20.0)", "pytest", "pytest-cov", " name = "llvmlite" version = "0.40.1" description = "lightweight wrapper around basic LLVM functionality" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -4280,7 +4096,6 @@ files = [ name = "loguru" version = "0.7.0" description = "Python logging made (stupidly) simple" -category = "main" optional = true python-versions = ">=3.5" files = [ @@ -4299,7 +4114,6 @@ dev = ["Sphinx (==5.3.0)", "colorama (==0.4.5)", "colorama (==0.4.6)", "freezegu name = "lxml" version = "4.9.3" description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*" files = [ @@ -4407,7 +4221,6 @@ source = ["Cython (>=0.29.35)"] name = "lz4" version = "4.3.2" description = "LZ4 Bindings for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -4457,7 +4270,6 @@ tests = ["psutil", "pytest (!=3.3.0)", "pytest-cov"] name = "manifest-ml" version = "0.0.1" description = "Manifest for Prompt Programming Foundation Models." -category = "main" optional = true python-versions = ">=3.8.0" files = [ @@ -4481,7 +4293,6 @@ dev = ["autopep8 (>=1.6.0)", "black (>=22.3.0)", "docformatter (>=1.4)", "flake8 name = "markdown" version = "3.4.4" description = "Python implementation of John Gruber's Markdown." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -4497,7 +4308,6 @@ testing = ["coverage", "pyyaml"] name = "markdown-it-py" version = "3.0.0" description = "Python port of markdown-it. Markdown parsing, done right!" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -4522,7 +4332,6 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] name = "markdownify" version = "0.11.6" description = "Convert HTML to markdown." -category = "main" optional = true python-versions = "*" files = [ @@ -4538,7 +4347,6 @@ six = ">=1.15,<2" name = "markupsafe" version = "2.1.3" description = "Safely add untrusted strings to HTML/XML markup." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -4598,7 +4406,6 @@ files = [ name = "marqo" version = "1.2.4" description = "Tensor search for humans" -category = "main" optional = true python-versions = ">=3" files = [ @@ -4617,7 +4424,6 @@ urllib3 = "*" name = "marshmallow" version = "3.20.1" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -4638,7 +4444,6 @@ tests = ["pytest", "pytz", "simplejson"] name = "marshmallow-enum" version = "1.5.1" description = "Enum field for Marshmallow" -category = "main" optional = false python-versions = "*" files = [ @@ -4653,7 +4458,6 @@ marshmallow = ">=2.0.0" name = "matplotlib-inline" version = "0.1.6" description = "Inline Matplotlib backend for Jupyter" -category = "dev" optional = false python-versions = ">=3.5" files = [ @@ -4668,7 +4472,6 @@ traitlets = "*" name = "mdurl" version = "0.1.2" description = "Markdown URL utilities" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -4680,7 +4483,6 @@ files = [ name = "mistune" version = "3.0.1" description = "A sane and fast Markdown parser with useful plugins and renderers" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -4692,7 +4494,6 @@ files = [ name = "mmh3" version = "3.1.0" description = "Python wrapper for MurmurHash (MurmurHash3), a set of fast and robust hash functions." -category = "main" optional = true python-versions = "*" files = [ @@ -4737,7 +4538,6 @@ files = [ name = "momento" version = "1.7.1" description = "SDK for Momento" -category = "main" optional = true python-versions = ">=3.7,<4.0" files = [ @@ -4754,7 +4554,6 @@ pyjwt = ">=2.4.0,<3.0.0" name = "momento-wire-types" version = "0.67.0" description = "Momento Client Proto Generated Files" -category = "main" optional = true python-versions = ">=3.7,<4.0" files = [ @@ -4770,7 +4569,6 @@ protobuf = ">=3,<5" name = "more-itertools" version = "10.1.0" description = "More routines for operating on iterables, beyond itertools" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -4782,7 +4580,6 @@ files = [ name = "mpmath" version = "1.3.0" description = "Python library for arbitrary-precision floating-point arithmetic" -category = "main" optional = true python-versions = "*" files = [ @@ -4800,7 +4597,6 @@ tests = ["pytest (>=4.6)"] name = "msal" version = "1.23.0" description = "The Microsoft Authentication Library (MSAL) for Python library enables your app to access the Microsoft Cloud by supporting authentication of users with Microsoft Azure Active Directory accounts (AAD) and Microsoft Accounts (MSA) using industry standard OAuth2 and OpenID Connect." -category = "main" optional = true python-versions = "*" files = [ @@ -4820,7 +4616,6 @@ broker = ["pymsalruntime (>=0.13.2,<0.14)"] name = "msal-extensions" version = "1.0.0" description = "Microsoft Authentication Library extensions (MSAL EX) provides a persistence API that can save your data on disk, encrypted on Windows, macOS and Linux. Concurrent data access will be coordinated by a file lock mechanism." -category = "main" optional = true python-versions = "*" files = [ @@ -4839,7 +4634,6 @@ portalocker = [ name = "msgpack" version = "1.0.5" description = "MessagePack serializer" -category = "main" optional = true python-versions = "*" files = [ @@ -4912,7 +4706,6 @@ files = [ name = "msrest" version = "0.7.1" description = "AutoRest swagger generator Python client runtime." -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -4934,7 +4727,6 @@ async = ["aiodns", "aiohttp (>=3.0)"] name = "multidict" version = "6.0.4" description = "multidict implementation" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -5018,7 +4810,6 @@ files = [ name = "multiprocess" version = "0.70.15" description = "better multiprocessing and multithreading in Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -5047,7 +4838,6 @@ dill = ">=0.3.7" name = "mwcli" version = "0.0.3" description = "Utilities for processing MediaWiki on the command line." -category = "main" optional = true python-versions = "*" files = [ @@ -5064,7 +4854,6 @@ para = "*" name = "mwparserfromhell" version = "0.6.4" description = "MWParserFromHell is a parser for MediaWiki wikicode." -category = "main" optional = true python-versions = ">= 3.6" files = [ @@ -5102,7 +4891,6 @@ files = [ name = "mwtypes" version = "0.3.2" description = "A set of types for processing MediaWiki data." -category = "main" optional = true python-versions = "*" files = [ @@ -5117,7 +4905,6 @@ jsonable = ">=0.3.0" name = "mwxml" version = "0.3.3" description = "A set of utilities for processing MediaWiki XML dump data." -category = "main" optional = true python-versions = "*" files = [ @@ -5135,7 +4922,6 @@ para = ">=0.0.1" name = "mypy" version = "0.991" description = "Optional static typing for Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -5186,7 +4972,6 @@ reports = ["lxml"] name = "mypy-extensions" version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -5198,7 +4983,6 @@ files = [ name = "mypy-protobuf" version = "3.3.0" description = "Generate mypy stub files from protobuf specs" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -5214,7 +4998,6 @@ types-protobuf = ">=3.19.12" name = "nbclient" version = "0.8.0" description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor." -category = "dev" optional = false python-versions = ">=3.8.0" files = [ @@ -5224,7 +5007,7 @@ files = [ [package.dependencies] jupyter-client = ">=6.1.12" -jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" nbformat = ">=5.1" traitlets = ">=5.4" @@ -5237,7 +5020,6 @@ test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>= name = "nbconvert" version = "7.7.4" description = "Converting Jupyter Notebooks" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -5276,7 +5058,6 @@ webpdf = ["playwright"] name = "nbformat" version = "5.9.2" description = "The Jupyter Notebook format" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -5298,7 +5079,6 @@ test = ["pep440", "pre-commit", "pytest", "testpath"] name = "nebula3-python" version = "3.4.0" description = "Python client for NebulaGraph V3.4" -category = "main" optional = true python-versions = "*" files = [ @@ -5316,7 +5096,6 @@ six = ">=1.16.0" name = "neo4j" version = "5.11.0" description = "Neo4j Bolt driver for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -5334,7 +5113,6 @@ pandas = ["numpy (>=1.7.0,<2.0.0)", "pandas (>=1.1.0,<3.0.0)"] name = "nest-asyncio" version = "1.5.7" description = "Patch asyncio to allow nested event loops" -category = "main" optional = false python-versions = ">=3.5" files = [ @@ -5346,7 +5124,6 @@ files = [ name = "networkx" version = "2.8.8" description = "Python package for creating and manipulating graphs and networks" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -5365,7 +5142,6 @@ test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] name = "newspaper3k" version = "0.2.8" description = "Simplified python article discovery & extraction." -category = "main" optional = true python-versions = "*" files = [ @@ -5392,7 +5168,6 @@ tldextract = ">=2.0.1" name = "nlpcloud" version = "1.1.44" description = "Python client for the NLP Cloud API" -category = "main" optional = true python-versions = "*" files = [ @@ -5407,7 +5182,6 @@ requests = "*" name = "nltk" version = "3.8.1" description = "Natural Language Toolkit" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -5433,7 +5207,6 @@ twitter = ["twython"] name = "nomic" version = "1.1.14" description = "The offical Nomic python client." -category = "main" optional = true python-versions = "*" files = [ @@ -5461,7 +5234,6 @@ gpt4all = ["peft (==0.3.0.dev0)", "sentencepiece", "torch", "transformers (==4.2 name = "notebook" version = "7.0.2" description = "Jupyter Notebook - A web-based notebook environment for interactive computing" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -5486,7 +5258,6 @@ test = ["ipykernel", "jupyter-server[test] (>=2.4.0,<3)", "jupyterlab-server[tes name = "notebook-shim" version = "0.2.3" description = "A shim layer for notebook traits and config" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -5504,7 +5275,6 @@ test = ["pytest", "pytest-console-scripts", "pytest-jupyter", "pytest-tornasync" name = "numba" version = "0.57.1" description = "compiling Python code using LLVM" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -5536,14 +5306,13 @@ files = [ [package.dependencies] importlib-metadata = {version = "*", markers = "python_version < \"3.9\""} -llvmlite = ">=0.40.0dev0,<0.41" +llvmlite = "==0.40.*" numpy = ">=1.21,<1.25" [[package]] name = "numcodecs" version = "0.11.0" description = "A Python package providing buffer compression and transformation codecs for use" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -5576,7 +5345,6 @@ zfpy = ["zfpy (>=1.0.0)"] name = "numexpr" version = "2.8.5" description = "Fast numerical expression evaluator for NumPy" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -5619,7 +5387,6 @@ numpy = ">=1.13.3" name = "numpy" version = "1.24.3" description = "Fundamental package for array computing in Python" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -5657,7 +5424,6 @@ files = [ name = "nvidia-cublas-cu11" version = "11.10.3.66" description = "CUBLAS native runtime libraries" -category = "main" optional = true python-versions = ">=3" files = [ @@ -5673,7 +5439,6 @@ wheel = "*" name = "nvidia-cuda-nvrtc-cu11" version = "11.7.99" description = "NVRTC native runtime libraries" -category = "main" optional = true python-versions = ">=3" files = [ @@ -5690,7 +5455,6 @@ wheel = "*" name = "nvidia-cuda-runtime-cu11" version = "11.7.99" description = "CUDA Runtime native Libraries" -category = "main" optional = true python-versions = ">=3" files = [ @@ -5706,7 +5470,6 @@ wheel = "*" name = "nvidia-cudnn-cu11" version = "8.5.0.96" description = "cuDNN runtime libraries" -category = "main" optional = true python-versions = ">=3" files = [ @@ -5722,7 +5485,6 @@ wheel = "*" name = "o365" version = "2.0.27" description = "Microsoft Graph and Office 365 API made easy" -category = "main" optional = true python-versions = ">=3.4" files = [ @@ -5743,7 +5505,6 @@ tzlocal = ">=4.0,<5.0" name = "oauthlib" version = "3.2.2" description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -5760,7 +5521,6 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] name = "openai" version = "0.27.8" description = "Python client library for the OpenAI API" -category = "main" optional = false python-versions = ">=3.7.1" files = [ @@ -5775,7 +5535,7 @@ tqdm = "*" [package.extras] datalib = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] -dev = ["black (>=21.6b0,<22.0)", "pytest (>=6.0.0,<7.0.0)", "pytest-asyncio", "pytest-mock"] +dev = ["black (>=21.6b0,<22.0)", "pytest (==6.*)", "pytest-asyncio", "pytest-mock"] embeddings = ["matplotlib", "numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "plotly", "scikit-learn (>=1.0.2)", "scipy", "tenacity (>=8.0.1)"] wandb = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "wandb"] @@ -5783,7 +5543,6 @@ wandb = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1 name = "openapi-schema-pydantic" version = "1.2.4" description = "OpenAPI (v3) specification schema as pydantic class" -category = "main" optional = true python-versions = ">=3.6.1" files = [ @@ -5798,7 +5557,6 @@ pydantic = ">=1.8.2" name = "openlm" version = "0.0.5" description = "Drop-in OpenAI-compatible that can call LLMs from other providers" -category = "main" optional = true python-versions = ">=3.8.1,<4.0" files = [ @@ -5813,7 +5571,6 @@ requests = ">=2,<3" name = "opensearch-py" version = "2.3.1" description = "Python client for OpenSearch" -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4" files = [ @@ -5838,7 +5595,6 @@ kerberos = ["requests-kerberos"] name = "opt-einsum" version = "3.3.0" description = "Optimizing numpys einsum function" -category = "main" optional = true python-versions = ">=3.5" files = [ @@ -5857,7 +5613,6 @@ tests = ["pytest", "pytest-cov", "pytest-pep8"] name = "orjson" version = "3.9.5" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -5927,7 +5682,6 @@ files = [ name = "overrides" version = "7.4.0" description = "A decorator to automatically detect mismatch when overriding a method." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -5939,7 +5693,6 @@ files = [ name = "packaging" version = "23.1" description = "Core utilities for Python packages" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -5951,7 +5704,6 @@ files = [ name = "pandas" version = "2.0.3" description = "Powerful data structures for data analysis, time series, and statistics" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -6019,7 +5771,6 @@ xml = ["lxml (>=4.6.3)"] name = "pandocfilters" version = "1.5.0" description = "Utilities for writing pandoc filters in python" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -6031,7 +5782,6 @@ files = [ name = "para" version = "0.0.8" description = "a set utilities that ake advantage of python's 'multiprocessing' module to distribute CPU-intensive tasks" -category = "main" optional = true python-versions = "*" files = [ @@ -6043,7 +5793,6 @@ files = [ name = "parso" version = "0.8.3" description = "A Python Parser" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -6059,7 +5808,6 @@ testing = ["docopt", "pytest (<6.0.0)"] name = "pathos" version = "0.3.1" description = "parallel graph management and execution in heterogeneous computing" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -6077,7 +5825,6 @@ ppft = ">=1.7.6.7" name = "pathspec" version = "0.11.2" description = "Utility library for gitignore style pattern matching of file paths." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -6089,7 +5836,6 @@ files = [ name = "pdfminer-six" version = "20221105" description = "PDF parser and analyzer" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -6110,7 +5856,6 @@ image = ["Pillow"] name = "pexpect" version = "4.8.0" description = "Pexpect allows easy control of interactive console applications." -category = "main" optional = false python-versions = "*" files = [ @@ -6125,7 +5870,6 @@ ptyprocess = ">=0.5" name = "pgvector" version = "0.1.8" description = "pgvector support for Python" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -6139,7 +5883,6 @@ numpy = "*" name = "pickleshare" version = "0.7.5" description = "Tiny 'shelve'-like database with concurrency support" -category = "dev" optional = false python-versions = "*" files = [ @@ -6151,7 +5894,6 @@ files = [ name = "pillow" version = "10.0.0" description = "Python Imaging Library (Fork)" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -6221,7 +5963,6 @@ tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "pa name = "pinecone-client" version = "2.2.2" description = "Pinecone client and SDK" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -6247,7 +5988,6 @@ grpc = ["googleapis-common-protos (>=1.53.0)", "grpc-gateway-protoc-gen-openapiv name = "pinecone-text" version = "0.4.2" description = "Text utilities library by Pinecone.io" -category = "main" optional = true python-versions = ">=3.8,<4.0" files = [ @@ -6267,7 +6007,6 @@ wget = ">=3.2,<4.0" name = "pkgutil-resolve-name" version = "1.3.10" description = "Resolve a name to an object." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -6279,7 +6018,6 @@ files = [ name = "platformdirs" version = "3.10.0" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -6295,7 +6033,6 @@ test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-co name = "playwright" version = "1.37.0" description = "A high-level API to automate web browsers" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -6317,7 +6054,6 @@ typing-extensions = {version = "*", markers = "python_version <= \"3.8\""} name = "pluggy" version = "1.2.0" description = "plugin and hook calling mechanisms for python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -6333,7 +6069,6 @@ testing = ["pytest", "pytest-benchmark"] name = "pooch" version = "1.7.0" description = "\"Pooch manages your Python library's sample data files: it automatically downloads and stores them in a local directory, with support for versioning and corruption checks.\"" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -6355,7 +6090,6 @@ xxhash = ["xxhash (>=1.4.3)"] name = "portalocker" version = "2.7.0" description = "Wraps the portalocker recipe for easy usage" -category = "main" optional = true python-versions = ">=3.5" files = [ @@ -6375,7 +6109,6 @@ tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "p name = "pox" version = "0.3.3" description = "utilities for filesystem exploration and automated builds" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -6387,7 +6120,6 @@ files = [ name = "ppft" version = "1.7.6.7" description = "distributed and parallel Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -6402,7 +6134,6 @@ dill = ["dill (>=0.3.7)"] name = "prometheus-client" version = "0.17.1" description = "Python client for the Prometheus monitoring system." -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -6417,7 +6148,6 @@ twisted = ["twisted"] name = "prompt-toolkit" version = "3.0.39" description = "Library for building powerful interactive command lines in Python" -category = "dev" optional = false python-versions = ">=3.7.0" files = [ @@ -6432,7 +6162,6 @@ wcwidth = "*" name = "protobuf" version = "3.20.3" description = "Protocol Buffers" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -6464,7 +6193,6 @@ files = [ name = "psutil" version = "5.9.5" description = "Cross-platform lib for process and system monitoring in Python." -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -6491,7 +6219,6 @@ test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] name = "psychicapi" version = "0.8.4" description = "Psychic.dev is an open-source data integration platform for LLMs. This is the Python client for Psychic" -category = "main" optional = true python-versions = "*" files = [ @@ -6506,7 +6233,6 @@ requests = "*" name = "psycopg2-binary" version = "2.9.7" description = "psycopg2 - Python-PostgreSQL Database Adapter" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -6576,7 +6302,6 @@ files = [ name = "ptyprocess" version = "0.7.0" description = "Run a subprocess in a pseudo terminal" -category = "main" optional = false python-versions = "*" files = [ @@ -6588,7 +6313,6 @@ files = [ name = "pure-eval" version = "0.2.2" description = "Safely evaluate AST nodes without side effects" -category = "dev" optional = false python-versions = "*" files = [ @@ -6603,7 +6327,6 @@ tests = ["pytest"] name = "py" version = "1.11.0" description = "library with cross-python path, ini-parsing, io, code, log facilities" -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -6615,7 +6338,6 @@ files = [ name = "py-trello" version = "0.19.0" description = "Python wrapper around the Trello API" -category = "main" optional = true python-versions = "*" files = [ @@ -6632,7 +6354,6 @@ requests-oauthlib = ">=0.4.1" name = "py4j" version = "0.10.9.7" description = "Enables Python programs to dynamically access arbitrary Java objects" -category = "main" optional = true python-versions = "*" files = [ @@ -6644,7 +6365,6 @@ files = [ name = "pyaes" version = "1.6.1" description = "Pure-Python Implementation of the AES block-cipher and common modes of operation" -category = "main" optional = true python-versions = "*" files = [ @@ -6655,7 +6375,6 @@ files = [ name = "pyarrow" version = "12.0.1" description = "Python library for Apache Arrow" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -6693,7 +6412,6 @@ numpy = ">=1.16.6" name = "pyasn1" version = "0.5.0" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" -category = "main" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ @@ -6705,7 +6423,6 @@ files = [ name = "pyasn1-modules" version = "0.3.0" description = "A collection of ASN.1-based protocols modules" -category = "main" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ @@ -6720,7 +6437,6 @@ pyasn1 = ">=0.4.6,<0.6.0" name = "pycares" version = "4.3.0" description = "Python interface for c-ares" -category = "main" optional = true python-versions = "*" files = [ @@ -6788,7 +6504,6 @@ idna = ["idna (>=2.1)"] name = "pycparser" version = "2.21" description = "C parser in Python" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -6800,7 +6515,6 @@ files = [ name = "pydantic" version = "1.10.12" description = "Data validation and settings management using python type hints" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -6853,7 +6567,6 @@ email = ["email-validator (>=1.0.3)"] name = "pydeck" version = "0.8.0" description = "Widget for deck.gl maps" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -6873,7 +6586,6 @@ jupyter = ["ipykernel (>=5.1.2)", "ipython (>=5.8.0)", "ipywidgets (>=7,<8)", "t name = "pyee" version = "9.0.4" description = "A port of node.js's EventEmitter to python." -category = "dev" optional = false python-versions = "*" files = [ @@ -6888,7 +6600,6 @@ typing-extensions = "*" name = "pygments" version = "2.16.1" description = "Pygments is a syntax highlighting package written in Python." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -6903,7 +6614,6 @@ plugins = ["importlib-metadata"] name = "pyjwt" version = "2.8.0" description = "JSON Web Token implementation in Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -6924,7 +6634,6 @@ tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] name = "pylance" version = "0.5.10" description = "python wrapper for lance-rs" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -6946,7 +6655,6 @@ tests = ["duckdb", "ml_dtypes", "pandas (>=1.4)", "polars[pandas,pyarrow]", "pyt name = "pymongo" version = "4.5.0" description = "Python driver for MongoDB " -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -7048,7 +6756,6 @@ zstd = ["zstandard"] name = "pympler" version = "1.0.1" description = "A development tool to measure, monitor and analyze the memory behavior of Python objects." -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -7060,7 +6767,6 @@ files = [ name = "pymupdf" version = "1.22.5" description = "Python bindings for the PDF toolkit and renderer MuPDF" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -7100,7 +6806,6 @@ files = [ name = "pyowm" version = "3.3.0" description = "A Python wrapper around OpenWeatherMap web APIs" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -7120,7 +6825,6 @@ requests = [ name = "pyparsing" version = "3.1.1" description = "pyparsing module - Classes and methods to define and execute parsing grammars" -category = "main" optional = true python-versions = ">=3.6.8" files = [ @@ -7135,7 +6839,6 @@ diagrams = ["jinja2", "railroad-diagrams"] name = "pypdf" version = "3.15.2" description = "A pure-python PDF library capable of splitting, merging, cropping, and transforming PDF files" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -7157,7 +6860,6 @@ image = ["Pillow (>=8.0.0)"] name = "pypdfium2" version = "4.18.0" description = "Python bindings to PDFium" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -7179,7 +6881,6 @@ files = [ name = "pyphen" version = "0.14.0" description = "Pure Python module to hyphenate text" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -7195,7 +6896,6 @@ test = ["flake8", "isort", "pytest"] name = "pyproj" version = "3.5.0" description = "Python interface to PROJ (cartographic projections and coordinate transformations library)" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -7243,7 +6943,6 @@ certifi = "*" name = "pyproject-hooks" version = "1.0.0" description = "Wrappers to call pyproject.toml-based build backend hooks." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -7258,7 +6957,6 @@ tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} name = "pysocks" version = "1.7.1" description = "A Python SOCKS client module. See https://github.com/Anorov/PySocks for more information." -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -7271,7 +6969,6 @@ files = [ name = "pyspark" version = "3.4.1" description = "Apache Spark Python API" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -7292,7 +6989,6 @@ sql = ["numpy (>=1.15)", "pandas (>=1.0.5)", "pyarrow (>=1.0.0)"] name = "pytesseract" version = "0.3.10" description = "Python-tesseract is a python wrapper for Google's Tesseract-OCR" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -7308,7 +7004,6 @@ Pillow = ">=8.0.0" name = "pytest" version = "7.4.0" description = "pytest: simple powerful testing with Python" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -7331,7 +7026,6 @@ testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "no name = "pytest-asyncio" version = "0.20.3" description = "Pytest support for asyncio" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -7350,7 +7044,6 @@ testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy name = "pytest-cov" version = "4.1.0" description = "Pytest plugin for measuring coverage." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -7369,7 +7062,6 @@ testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtuale name = "pytest-dotenv" version = "0.5.2" description = "A py.test plugin that parses environment files before running tests" -category = "dev" optional = false python-versions = "*" files = [ @@ -7385,7 +7077,6 @@ python-dotenv = ">=0.9.1" name = "pytest-mock" version = "3.11.1" description = "Thin-wrapper around the mock package for easier use with pytest" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -7403,7 +7094,6 @@ dev = ["pre-commit", "pytest-asyncio", "tox"] name = "pytest-socket" version = "0.6.0" description = "Pytest Plugin to disable socket calls during tests" -category = "dev" optional = false python-versions = ">=3.7,<4.0" files = [ @@ -7418,7 +7108,6 @@ pytest = ">=3.6.3" name = "pytest-vcr" version = "1.0.2" description = "Plugin for managing VCR.py cassettes" -category = "dev" optional = false python-versions = "*" files = [ @@ -7434,7 +7123,6 @@ vcrpy = "*" name = "pytest-watcher" version = "0.2.6" description = "Continiously runs pytest on changes in *.py files" -category = "dev" optional = false python-versions = ">=3.7.0,<4.0.0" files = [ @@ -7449,7 +7137,6 @@ watchdog = ">=2.0.0" name = "python-arango" version = "7.6.0" description = "Python Driver for ArangoDB" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -7473,7 +7160,6 @@ dev = ["black (>=22.3.0)", "flake8 (>=4.0.1)", "isort (>=5.10.1)", "mock", "mypy name = "python-dateutil" version = "2.8.2" description = "Extensions to the standard Python datetime module" -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ @@ -7488,7 +7174,6 @@ six = ">=1.5" name = "python-dotenv" version = "1.0.0" description = "Read key-value pairs from a .env file and set them as environment variables" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -7503,7 +7188,6 @@ cli = ["click (>=5.0)"] name = "python-json-logger" version = "2.0.7" description = "A python library adding a json log formatter" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -7515,7 +7199,6 @@ files = [ name = "python-rapidjson" version = "1.10" description = "Python wrapper around rapidjson" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -7581,7 +7264,6 @@ files = [ name = "pytz" version = "2023.3" description = "World timezone definitions, modern and historical" -category = "main" optional = false python-versions = "*" files = [ @@ -7593,7 +7275,6 @@ files = [ name = "pytz-deprecation-shim" version = "0.1.0.post0" description = "Shims to make deprecation of pytz easier" -category = "main" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ @@ -7609,7 +7290,6 @@ tzdata = {version = "*", markers = "python_version >= \"3.6\""} name = "pyvespa" version = "0.33.0" description = "Python API for vespa.ai" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -7634,7 +7314,6 @@ ml = ["keras-tuner", "tensorflow", "tensorflow-ranking", "torch (<1.13)", "trans name = "pywin32" version = "306" description = "Python for Window Extensions" -category = "main" optional = false python-versions = "*" files = [ @@ -7658,7 +7337,6 @@ files = [ name = "pywinpty" version = "2.0.11" description = "Pseudo terminal support for Windows from Python." -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -7673,7 +7351,6 @@ files = [ name = "pyyaml" version = "6.0.1" description = "YAML parser and emitter for Python" -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -7723,7 +7400,6 @@ files = [ name = "pyzmq" version = "25.1.1" description = "Python bindings for 0MQ" -category = "dev" optional = false python-versions = ">=3.6" files = [ @@ -7829,7 +7505,6 @@ cffi = {version = "*", markers = "implementation_name == \"pypy\""} name = "qdrant-client" version = "1.4.0" description = "Client library for the Qdrant vector search engine" -category = "main" optional = true python-versions = ">=3.7,<3.12" files = [ @@ -7850,7 +7525,6 @@ urllib3 = ">=1.26.14,<2.0.0" name = "qtconsole" version = "5.4.3" description = "Jupyter Qt console" -category = "dev" optional = false python-versions = ">= 3.7" files = [ @@ -7877,7 +7551,6 @@ test = ["flaky", "pytest", "pytest-qt"] name = "qtpy" version = "2.3.1" description = "Provides an abstraction layer on top of the various Qt bindings (PyQt5/6 and PySide2/6)." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -7895,7 +7568,6 @@ test = ["pytest (>=6,!=7.0.0,!=7.0.1)", "pytest-cov (>=3.0.0)", "pytest-qt"] name = "rank-bm25" version = "0.2.2" description = "Various BM25 algorithms for document ranking" -category = "main" optional = true python-versions = "*" files = [ @@ -7913,7 +7585,6 @@ dev = ["pytest"] name = "rapidfuzz" version = "3.2.0" description = "rapid fuzzy string matching" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -8018,7 +7689,6 @@ full = ["numpy"] name = "ratelimiter" version = "1.2.0.post0" description = "Simple python rate limiting object" -category = "main" optional = true python-versions = "*" files = [ @@ -8033,7 +7703,6 @@ test = ["pytest (>=3.0)", "pytest-asyncio"] name = "rdflib" version = "6.3.2" description = "RDFLib is a Python library for working with RDF, a simple yet powerful language for representing information." -category = "main" optional = true python-versions = ">=3.7,<4.0" files = [ @@ -8055,7 +7724,6 @@ networkx = ["networkx (>=2.0.0,<3.0.0)"] name = "redis" version = "4.6.0" description = "Python client for Redis database and key-value store" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -8074,7 +7742,6 @@ ocsp = ["cryptography (>=36.0.1)", "pyopenssl (==20.0.1)", "requests (>=2.26.0)" name = "referencing" version = "0.30.2" description = "JSON Referencing + Python" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -8090,7 +7757,6 @@ rpds-py = ">=0.7.0" name = "regex" version = "2023.8.8" description = "Alternative regular expression module, to replace re." -category = "main" optional = false python-versions = ">=3.6" files = [ @@ -8188,7 +7854,6 @@ files = [ name = "requests" version = "2.31.0" description = "Python HTTP for Humans." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -8211,7 +7876,6 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] name = "requests-file" version = "1.5.1" description = "File transport adapter for Requests" -category = "main" optional = true python-versions = "*" files = [ @@ -8227,7 +7891,6 @@ six = "*" name = "requests-oauthlib" version = "1.3.1" description = "OAuthlib authentication support for Requests." -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -8246,7 +7909,6 @@ rsa = ["oauthlib[signedtoken] (>=3.0.0)"] name = "requests-toolbelt" version = "1.0.0" description = "A utility belt for advanced users of python-requests" -category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ @@ -8261,7 +7923,6 @@ requests = ">=2.0.1,<3.0.0" name = "responses" version = "0.22.0" description = "A utility library for mocking out the `requests` Python library." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -8282,7 +7943,6 @@ tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asy name = "retry" version = "0.9.2" description = "Easy to use retry decorator." -category = "main" optional = true python-versions = "*" files = [ @@ -8298,7 +7958,6 @@ py = ">=1.4.26,<2.0.0" name = "rfc3339-validator" version = "0.1.4" description = "A pure python RFC3339 validator" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -8313,7 +7972,6 @@ six = "*" name = "rfc3986-validator" version = "0.1.1" description = "Pure python rfc3986 validator" -category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ @@ -8325,7 +7983,6 @@ files = [ name = "rich" version = "13.5.2" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" -category = "main" optional = true python-versions = ">=3.7.0" files = [ @@ -8345,7 +8002,6 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] name = "rpds-py" version = "0.9.2" description = "Python bindings to Rust's persistent data structures (rpds)" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -8452,7 +8108,6 @@ files = [ name = "rsa" version = "4.9" description = "Pure-Python RSA implementation" -category = "main" optional = true python-versions = ">=3.6,<4" files = [ @@ -8467,7 +8122,6 @@ pyasn1 = ">=0.1.3" name = "ruff" version = "0.0.249" description = "An extremely fast Python linter, written in Rust." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -8494,7 +8148,6 @@ files = [ name = "s3transfer" version = "0.6.2" description = "An Amazon S3 Transfer Manager" -category = "main" optional = true python-versions = ">= 3.7" files = [ @@ -8512,7 +8165,6 @@ crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"] name = "safetensors" version = "0.3.2" description = "Fast and Safe Tensor serialization" -category = "main" optional = true python-versions = "*" files = [ @@ -8586,7 +8238,6 @@ torch = ["torch (>=1.10)"] name = "scikit-learn" version = "1.3.0" description = "A set of python modules for machine learning and data mining" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -8629,7 +8280,6 @@ tests = ["black (>=23.3.0)", "matplotlib (>=3.1.3)", "mypy (>=1.3)", "numpydoc ( name = "scipy" version = "1.9.3" description = "Fundamental algorithms for scientific computing in Python" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -8668,7 +8318,6 @@ test = ["asv", "gmpy2", "mpmath", "pytest", "pytest-cov", "pytest-xdist", "sciki name = "semver" version = "3.0.1" description = "Python helper for Semantic Versioning (https://semver.org)" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -8680,7 +8329,6 @@ files = [ name = "send2trash" version = "1.8.2" description = "Send file to trash natively under Mac OS X, Windows and Linux" -category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" files = [ @@ -8697,7 +8345,6 @@ win32 = ["pywin32"] name = "sentence-transformers" version = "2.2.2" description = "Multilingual text embeddings" -category = "main" optional = true python-versions = ">=3.6.0" files = [ @@ -8720,7 +8367,6 @@ transformers = ">=4.6.0,<5.0.0" name = "sentencepiece" version = "0.1.99" description = "SentencePiece python wrapper" -category = "main" optional = true python-versions = "*" files = [ @@ -8775,7 +8421,6 @@ files = [ name = "setuptools" version = "67.8.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -8792,7 +8437,6 @@ testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs ( name = "sgmllib3k" version = "1.0.0" description = "Py3k port of sgmllib." -category = "main" optional = true python-versions = "*" files = [ @@ -8803,7 +8447,6 @@ files = [ name = "shapely" version = "2.0.1" description = "Manipulation and analysis of geometric objects" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -8851,14 +8494,13 @@ files = [ numpy = ">=1.14" [package.extras] -docs = ["matplotlib", "numpydoc (>=1.1.0,<1.2.0)", "sphinx", "sphinx-book-theme", "sphinx-remove-toctrees"] +docs = ["matplotlib", "numpydoc (==1.1.*)", "sphinx", "sphinx-book-theme", "sphinx-remove-toctrees"] test = ["pytest", "pytest-cov"] [[package]] name = "singlestoredb" version = "0.7.1" description = "Interface to the SingleStore database and cluster management APIs" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -8891,7 +8533,6 @@ sqlalchemy = ["sqlalchemy-singlestoredb"] name = "six" version = "1.16.0" description = "Python 2 and 3 compatibility utilities" -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -8903,7 +8544,6 @@ files = [ name = "smmap" version = "5.0.0" description = "A pure Python implementation of a sliding window memory map manager" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -8915,7 +8555,6 @@ files = [ name = "sniffio" version = "1.3.0" description = "Sniff out which async library your code is running under" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -8927,7 +8566,6 @@ files = [ name = "socksio" version = "1.0.0" description = "Sans-I/O implementation of SOCKS4, SOCKS4A, and SOCKS5." -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -8939,7 +8577,6 @@ files = [ name = "soundfile" version = "0.12.1" description = "An audio library based on libsndfile, CFFI and NumPy" -category = "main" optional = true python-versions = "*" files = [ @@ -8963,7 +8600,6 @@ numpy = ["numpy"] name = "soupsieve" version = "2.4.1" description = "A modern CSS selector implementation for Beautiful Soup." -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -8975,7 +8611,6 @@ files = [ name = "soxr" version = "0.3.6" description = "High quality, one-dimensional sample-rate conversion library" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -9017,7 +8652,6 @@ test = ["pytest"] name = "sqlalchemy" version = "2.0.20" description = "Database Abstraction Library" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -9065,7 +8699,7 @@ files = [ ] [package.dependencies] -greenlet = {version = "!=0.4.17", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""} +greenlet = {version = "!=0.4.17", markers = "platform_machine == \"win32\" or platform_machine == \"WIN32\" or platform_machine == \"AMD64\" or platform_machine == \"amd64\" or platform_machine == \"x86_64\" or platform_machine == \"ppc64le\" or platform_machine == \"aarch64\""} typing-extensions = ">=4.2.0" [package.extras] @@ -9096,7 +8730,6 @@ sqlcipher = ["sqlcipher3-binary"] name = "sqlite-vss" version = "0.1.2" description = "" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -9112,7 +8745,6 @@ test = ["pytest"] name = "sqlitedict" version = "2.1.0" description = "Persistent dict in Python, backed up by sqlite3 and pickle, multithread-safe." -category = "main" optional = true python-versions = "*" files = [ @@ -9123,7 +8755,6 @@ files = [ name = "sqlparams" version = "5.1.0" description = "Convert between various DB API 2.0 parameter styles." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -9135,7 +8766,6 @@ files = [ name = "stack-data" version = "0.6.2" description = "Extract data from python stack frames and tracebacks for informative displays" -category = "dev" optional = false python-versions = "*" files = [ @@ -9155,7 +8785,6 @@ tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] name = "streamlit" version = "1.22.0" description = "A faster way to build and share data apps" -category = "main" optional = true python-versions = ">=3.7, !=3.9.7" files = [ @@ -9196,7 +8825,6 @@ snowflake = ["snowflake-snowpark-python"] name = "stringcase" version = "1.2.0" description = "String case converter." -category = "main" optional = true python-versions = "*" files = [ @@ -9207,7 +8835,6 @@ files = [ name = "sympy" version = "1.12" description = "Computer algebra system (CAS) in Python" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -9222,7 +8849,6 @@ mpmath = ">=0.19" name = "syrupy" version = "4.2.1" description = "Pytest Snapshot Test Utility" -category = "dev" optional = false python-versions = ">=3.8.1,<4" files = [ @@ -9238,7 +8864,6 @@ pytest = ">=7.0.0,<8.0.0" name = "telethon" version = "1.29.3" description = "Full-featured Telegram client library for Python 3" -category = "main" optional = true python-versions = ">=3.5" files = [ @@ -9256,7 +8881,6 @@ cryptg = ["cryptg"] name = "tenacity" version = "8.2.3" description = "Retry code until it succeeds" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -9271,7 +8895,6 @@ doc = ["reno", "sphinx", "tornado (>=4.5)"] name = "tensorboard" version = "2.13.0" description = "TensorBoard lets you watch Tensors Flow" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -9296,7 +8919,6 @@ wheel = ">=0.26" name = "tensorboard-data-server" version = "0.7.1" description = "Fast data loading for TensorBoard" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -9309,7 +8931,6 @@ files = [ name = "tensorflow" version = "2.13.0" description = "TensorFlow is an open source machine learning framework for everyone." -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -9362,7 +8983,6 @@ wrapt = ">=1.11.0" name = "tensorflow-estimator" version = "2.13.0" description = "TensorFlow Estimator." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -9373,7 +8993,6 @@ files = [ name = "tensorflow-hub" version = "0.14.0" description = "TensorFlow Hub is a library to foster the publication, discovery, and consumption of reusable parts of machine learning models." -category = "main" optional = true python-versions = "*" files = [ @@ -9388,7 +9007,6 @@ protobuf = ">=3.19.6" name = "tensorflow-io-gcs-filesystem" version = "0.33.0" description = "TensorFlow IO" -category = "main" optional = true python-versions = ">=3.7, <3.12" files = [ @@ -9419,7 +9037,6 @@ tensorflow-rocm = ["tensorflow-rocm (>=2.13.0,<2.14.0)"] name = "tensorflow-macos" version = "2.13.0" description = "TensorFlow is an open source machine learning framework for everyone." -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -9455,7 +9072,6 @@ wrapt = ">=1.11.0" name = "tensorflow-text" version = "2.13.0" description = "TF.Text is a TensorFlow library of text related ops, modules, and subgraphs." -category = "main" optional = true python-versions = "*" files = [ @@ -9480,7 +9096,6 @@ tests = ["absl-py", "pytest", "tensorflow-datasets (>=3.2.0)"] name = "termcolor" version = "2.3.0" description = "ANSI color formatting for output in terminal" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -9495,7 +9110,6 @@ tests = ["pytest", "pytest-cov"] name = "terminado" version = "0.17.1" description = "Tornado websocket backend for the Xterm.js Javascript terminal emulator library." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -9516,7 +9130,6 @@ test = ["pre-commit", "pytest (>=7.0)", "pytest-timeout"] name = "textstat" version = "0.7.3" description = "Calculate statistical features from text" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -9531,7 +9144,6 @@ pyphen = "*" name = "threadpoolctl" version = "3.2.0" description = "threadpoolctl" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -9543,7 +9155,6 @@ files = [ name = "tigrisdb" version = "1.0.0b6" description = "Python SDK for Tigris " -category = "main" optional = true python-versions = ">=3.8,<4.0" files = [ @@ -9559,7 +9170,6 @@ protobuf = ">=3.19.6" name = "tiktoken" version = "0.3.3" description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -9605,7 +9215,6 @@ blobfile = ["blobfile (>=2)"] name = "tinycss2" version = "1.2.1" description = "A tiny CSS parser" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -9624,7 +9233,6 @@ test = ["flake8", "isort", "pytest"] name = "tinysegmenter" version = "0.3" description = "Very compact Japanese tokenizer" -category = "main" optional = true python-versions = "*" files = [ @@ -9635,7 +9243,6 @@ files = [ name = "tldextract" version = "3.4.4" description = "Accurately separates a URL's subdomain, domain, and public suffix, using the Public Suffix List (PSL). By default, this includes the public ICANN TLDs and their exceptions. You can optionally support the Public Suffix List's private domains as well." -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -9653,7 +9260,6 @@ requests-file = ">=1.4" name = "tokenizers" version = "0.13.3" description = "Fast and Customizable Tokenizers" -category = "main" optional = true python-versions = "*" files = [ @@ -9708,7 +9314,6 @@ testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] name = "toml" version = "0.10.2" description = "Python Library for Tom's Obvious, Minimal Language" -category = "main" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" files = [ @@ -9720,7 +9325,6 @@ files = [ name = "tomli" version = "2.0.1" description = "A lil' TOML parser" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -9732,7 +9336,6 @@ files = [ name = "toolz" version = "0.12.0" description = "List processing tools and functional utilities" -category = "main" optional = true python-versions = ">=3.5" files = [ @@ -9744,7 +9347,6 @@ files = [ name = "torch" version = "1.13.1" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" -category = "main" optional = true python-versions = ">=3.7.0" files = [ @@ -9785,7 +9387,6 @@ opt-einsum = ["opt-einsum (>=3.3)"] name = "torchvision" version = "0.14.1" description = "image and video datasets and models for torch deep learning" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -9812,7 +9413,7 @@ files = [ [package.dependencies] numpy = "*" -pillow = ">=5.3.0,<8.3.0 || >=8.4.0" +pillow = ">=5.3.0,<8.3.dev0 || >=8.4.dev0" requests = "*" torch = "1.13.1" typing-extensions = "*" @@ -9824,7 +9425,6 @@ scipy = ["scipy"] name = "tornado" version = "6.3.3" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." -category = "main" optional = false python-versions = ">= 3.8" files = [ @@ -9845,7 +9445,6 @@ files = [ name = "tqdm" version = "4.66.1" description = "Fast, Extensible Progress Meter" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -9866,7 +9465,6 @@ telegram = ["requests"] name = "traitlets" version = "5.9.0" description = "Traitlets Python configuration system" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -9882,7 +9480,6 @@ test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"] name = "transformers" version = "4.32.0" description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" -category = "main" optional = true python-versions = ">=3.8.0" files = [ @@ -9952,7 +9549,6 @@ vision = ["Pillow (<10.0.0)"] name = "tritonclient" version = "2.34.0" description = "Python client library and utilities for communicating with Triton Inference Server" -category = "main" optional = true python-versions = "*" files = [ @@ -9974,7 +9570,6 @@ http = ["aiohttp (>=3.8.1,<4.0.0)", "geventhttpclient (>=1.4.4,<=2.0.2)", "numpy name = "types-chardet" version = "5.0.4.6" description = "Typing stubs for chardet" -category = "dev" optional = false python-versions = "*" files = [ @@ -9986,7 +9581,6 @@ files = [ name = "types-protobuf" version = "4.24.0.1" description = "Typing stubs for protobuf" -category = "dev" optional = false python-versions = "*" files = [ @@ -9998,7 +9592,6 @@ files = [ name = "types-pyopenssl" version = "23.2.0.2" description = "Typing stubs for pyOpenSSL" -category = "dev" optional = false python-versions = "*" files = [ @@ -10013,7 +9606,6 @@ cryptography = ">=35.0.0" name = "types-pytz" version = "2023.3.0.1" description = "Typing stubs for pytz" -category = "dev" optional = false python-versions = "*" files = [ @@ -10025,7 +9617,6 @@ files = [ name = "types-pyyaml" version = "6.0.12.11" description = "Typing stubs for PyYAML" -category = "dev" optional = false python-versions = "*" files = [ @@ -10037,7 +9628,6 @@ files = [ name = "types-redis" version = "4.6.0.5" description = "Typing stubs for redis" -category = "dev" optional = false python-versions = "*" files = [ @@ -10053,7 +9643,6 @@ types-pyOpenSSL = "*" name = "types-requests" version = "2.31.0.2" description = "Typing stubs for requests" -category = "main" optional = false python-versions = "*" files = [ @@ -10068,7 +9657,6 @@ types-urllib3 = "*" name = "types-toml" version = "0.10.8.7" description = "Typing stubs for toml" -category = "dev" optional = false python-versions = "*" files = [ @@ -10080,7 +9668,6 @@ files = [ name = "types-urllib3" version = "1.26.25.14" description = "Typing stubs for urllib3" -category = "main" optional = false python-versions = "*" files = [ @@ -10092,7 +9679,6 @@ files = [ name = "typing-extensions" version = "4.5.0" description = "Backported and Experimental Type Hints for Python 3.7+" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -10104,7 +9690,6 @@ files = [ name = "typing-inspect" version = "0.9.0" description = "Runtime inspection utilities for typing module." -category = "main" optional = false python-versions = "*" files = [ @@ -10120,7 +9705,6 @@ typing-extensions = ">=3.7.4" name = "tzdata" version = "2023.3" description = "Provider of IANA time zone data" -category = "main" optional = false python-versions = ">=2" files = [ @@ -10132,7 +9716,6 @@ files = [ name = "tzlocal" version = "4.3.1" description = "tzinfo object for the local timezone" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -10152,7 +9735,6 @@ devenv = ["black", "check-manifest", "flake8", "pyroma", "pytest (>=4.3)", "pyte name = "uri-template" version = "1.3.0" description = "RFC 6570 URI Template Processor" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -10167,7 +9749,6 @@ dev = ["flake8", "flake8-annotations", "flake8-bandit", "flake8-bugbear", "flake name = "uritemplate" version = "4.1.1" description = "Implementation of RFC 6570 URI Templates" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -10179,7 +9760,6 @@ files = [ name = "urllib3" version = "1.26.16" description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ @@ -10196,7 +9776,6 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] name = "validators" version = "0.21.0" description = "Python Data Validation for Humans™" -category = "main" optional = true python-versions = ">=3.8,<4.0" files = [ @@ -10208,7 +9787,6 @@ files = [ name = "vcrpy" version = "5.1.0" description = "Automatically mock your HTTP interactions to simplify and speed up testing" -category = "dev" optional = false python-versions = ">=3.8" files = [ @@ -10253,7 +9831,6 @@ numpy = "*" name = "watchdog" version = "3.0.0" description = "Filesystem events monitoring" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -10293,7 +9870,6 @@ watchmedo = ["PyYAML (>=3.10)"] name = "wcwidth" version = "0.2.6" description = "Measures the displayed width of unicode strings in a terminal" -category = "dev" optional = false python-versions = "*" files = [ @@ -10305,7 +9881,6 @@ files = [ name = "weaviate-client" version = "3.23.0" description = "A python native Weaviate client" -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -10326,7 +9901,6 @@ grpc = ["grpcio", "grpcio-tools"] name = "webcolors" version = "1.13" description = "A library for working with the color formats defined by HTML and CSS." -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -10342,7 +9916,6 @@ tests = ["pytest", "pytest-cov"] name = "webencodings" version = "0.5.1" description = "Character encoding aliases for legacy web content" -category = "dev" optional = false python-versions = "*" files = [ @@ -10354,7 +9927,6 @@ files = [ name = "websocket-client" version = "1.6.2" description = "WebSocket client for Python with low level API options" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -10371,7 +9943,6 @@ test = ["websockets"] name = "websockets" version = "11.0.3" description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -10451,7 +10022,6 @@ files = [ name = "werkzeug" version = "2.3.7" description = "The comprehensive WSGI web application library." -category = "main" optional = true python-versions = ">=3.8" files = [ @@ -10469,7 +10039,6 @@ watchdog = ["watchdog (>=2.3)"] name = "wget" version = "3.2" description = "pure python download utility" -category = "main" optional = true python-versions = "*" files = [ @@ -10480,7 +10049,6 @@ files = [ name = "wheel" version = "0.41.2" description = "A built-package format for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -10495,7 +10063,6 @@ test = ["pytest (>=6.0.0)", "setuptools (>=65)"] name = "whylabs-client" version = "0.5.4" description = "WhyLabs API client" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -10511,7 +10078,6 @@ urllib3 = ">=1.25.3" name = "whylogs" version = "1.2.6" description = "Profile and monitor your ML data pipeline end-to-end" -category = "main" optional = true python-versions = ">=3.7.1,<4" files = [ @@ -10545,7 +10111,6 @@ viz = ["Pillow (>=9.2.0,<10.0.0)", "ipython", "numpy", "numpy (>=1.23.2)", "pyba name = "whylogs-sketching" version = "3.4.1.dev3" description = "sketching library of whylogs" -category = "main" optional = true python-versions = "*" files = [ @@ -10586,7 +10151,6 @@ files = [ name = "widgetsnbextension" version = "4.0.8" description = "Jupyter interactive widgets for Jupyter Notebook" -category = "dev" optional = false python-versions = ">=3.7" files = [ @@ -10598,7 +10162,6 @@ files = [ name = "wikipedia" version = "1.4.0" description = "Wikipedia API for Python" -category = "main" optional = true python-versions = "*" files = [ @@ -10613,7 +10176,6 @@ requests = ">=2.0.0,<3.0.0" name = "win32-setctime" version = "1.1.0" description = "A small Python utility to set file creation time on Windows" -category = "main" optional = true python-versions = ">=3.5" files = [ @@ -10628,7 +10190,6 @@ dev = ["black (>=19.3b0)", "pytest (>=4.6.2)"] name = "wolframalpha" version = "5.0.0" description = "Wolfram|Alpha 2.0 API client" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -10649,7 +10210,6 @@ testing = ["keyring", "pmxbot", "pytest (>=3.5,!=3.7.3)", "pytest-black (>=0.3.7 name = "wonderwords" version = "2.2.0" description = "A python package for random words and sentences in the english language" -category = "main" optional = true python-versions = ">=3.6" files = [ @@ -10664,7 +10224,6 @@ cli = ["rich (==9.10.0)"] name = "wrapt" version = "1.15.0" description = "Module for decorators, wrappers and monkey patching." -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" files = [ @@ -10749,7 +10308,6 @@ files = [ name = "xata" version = "1.0.0b0" description = "Python client for Xata.io" -category = "main" optional = true python-versions = ">=3.8,<4.0" files = [ @@ -10767,7 +10325,6 @@ requests = ">=2.28.1,<3.0.0" name = "xmltodict" version = "0.13.0" description = "Makes working with XML feel like you are working with JSON" -category = "main" optional = true python-versions = ">=3.4" files = [ @@ -10779,7 +10336,6 @@ files = [ name = "yarl" version = "1.9.2" description = "Yet another URL library" -category = "main" optional = false python-versions = ">=3.7" files = [ @@ -10867,7 +10423,6 @@ multidict = ">=4.0" name = "zipp" version = "3.16.2" description = "Backport of pathlib-compatible object wrapper for zip files" -category = "main" optional = false python-versions = ">=3.8" files = [ @@ -10883,7 +10438,6 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p name = "zstandard" version = "0.21.0" description = "Zstandard bindings for Python" -category = "main" optional = true python-versions = ">=3.7" files = [ @@ -10939,7 +10493,7 @@ cffi = {version = ">=1.11", markers = "platform_python_implementation == \"PyPy\ cffi = ["cffi (>=1.11)"] [extras] -all = ["O365", "aleph-alpha-client", "amadeus", "arxiv", "atlassian-python-api", "awadb", "azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "azure-cosmos", "azure-identity", "beautifulsoup4", "clarifai", "clickhouse-connect", "cohere", "deeplake", "docarray", "duckduckgo-search", "elasticsearch", "esprima", "faiss-cpu", "google-api-python-client", "google-auth", "google-search-results", "gptcache", "html2text", "huggingface_hub", "jinja2", "jq", "lancedb", "langkit", "lark", "libdeeplake", "librosa", "lxml", "manifest-ml", "marqo", "momento", "nebula3-python", "neo4j", "networkx", "nlpcloud", "nltk", "nomic", "openai", "openlm", "opensearch-py", "pdfminer-six", "pexpect", "pgvector", "pinecone-client", "pinecone-text", "psycopg2-binary", "pymongo", "pyowm", "pypdf", "pytesseract", "python-arango", "pyvespa", "qdrant-client", "rdflib", "redis", "requests-toolbelt", "sentence-transformers", "singlestoredb", "tensorflow-text", "tigrisdb", "tiktoken", "torch", "transformers", "vowpal-wabbit-next", "weaviate-client", "wikipedia", "wolframalpha"] +all = ["O365", "aleph-alpha-client", "amadeus", "arxiv", "atlassian-python-api", "awadb", "azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "azure-cosmos", "azure-identity", "beautifulsoup4", "clarifai", "clickhouse-connect", "cohere", "deeplake", "docarray", "duckduckgo-search", "elasticsearch", "esprima", "faiss-cpu", "google-api-python-client", "google-auth", "google-search-results", "gptcache", "html2text", "huggingface_hub", "jinja2", "jq", "lancedb", "langkit", "lark", "libdeeplake", "librosa", "lxml", "manifest-ml", "marqo", "momento", "nebula3-python", "neo4j", "networkx", "nlpcloud", "nltk", "nomic", "openai", "openlm", "opensearch-py", "pdfminer-six", "pexpect", "pgvector", "pinecone-client", "pinecone-text", "psycopg2-binary", "pymongo", "pyowm", "pypdf", "pytesseract", "python-arango", "pyvespa", "qdrant-client", "rdflib", "redis", "requests-toolbelt", "sentence-transformers", "singlestoredb", "tensorflow-text", "tigrisdb", "tiktoken", "torch", "transformers", "weaviate-client", "wikipedia", "wolframalpha"] azure = ["azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "azure-core", "azure-cosmos", "azure-identity", "azure-search-documents", "openai"] clarifai = ["clarifai"] cohere = ["cohere"] @@ -10955,4 +10509,4 @@ text-helpers = ["chardet"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "7bffde1b8d57bad4b5a48d73250cb8276eb7e40dfe19f8490d5f4a25cb15322d" +content-hash = "3707ffe51bf8aca6cca6512f9d3c358facbbc36a17a00ee9c7d513c557eddddb" From 7a4387c60d3ee57b7a7ced82199a60bbed3c8f0c Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 23:46:04 -0400 Subject: [PATCH 51/65] notebook fix --- .../how_to/learned_prompt_optimization.ipynb | 117 +++++++++--------- 1 file changed, 56 insertions(+), 61 deletions(-) diff --git a/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb b/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb index 5ff4a95dcc..2bf81f0f06 100644 --- a/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb +++ b/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb @@ -4,15 +4,15 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Learned prompt variable injection via rl chain\n", + "# Learned Prompt Variable Injection via RL Chain\n", "\n", - "The rl_chain (reinforcement learning chain) is used primarily for prompt variable injection: when we want to enhance a prompt with a value but we are not sure which of the available variable values will make the prompt achieve what we want.\n", + "LLM prompts can be enhanced by injecting specific terms into template sentences. Selecting the right terms is crucial for obtaining high-quality responses. This notebook introduces automated prompt engineering through term injection using Reinforcement Learning with VowpalWabbit.\n", "\n", - "It provides a way to learn a specific prompt engineering policy without fine tuning the underlying foundational model.\n", + "The rl_chain (reinforcement learning chain) provides a way to automatically determine the best terms to inject without the need for fine-tuning the underlying foundational model.\n", "\n", - "The example layed out below is trivial and a strong llm could make a good variable selection and injection without the intervention of this chain, but it is perfect for showcasing the chain's usage. Advanced options and explanations are provided at the end.\n", + "For illustration, consider the scenario of a meal delivery service. We use LangChain to ask customers, like Tom, about their dietary preferences and recommend suitable meals from our extensive menu. The rl_chain selects a meal based on user preferences, injects it into a prompt template, and forwards the prompt to an LLM. The LLM's response, which is a personalized recommendation, is then returned to the user.\n", "\n", - "The goal of this example scenario is for the chain to select a meal based on the user declared preferences, and inject the meal into the prompt template. The final prompt will then be sent to the llm of choice and the llm output will be returned to the user." + "The example laid out below is a toy example to demonstrate the applicability of the concept. Advanced options and explanations are provided at the end." ] }, { @@ -35,25 +35,12 @@ "cell_type": "code", "execution_count": 37, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "\"\\n\\nYes, I'm ready.\"" - ] - }, - "execution_count": 37, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "# pick and configure the LLM of your choice\n", "\n", "from langchain.llms import OpenAI\n", - "llm = OpenAI(engine=\"text-davinci-003\")\n", - "\n", - "llm.predict(\"are you ready?\")" + "llm = OpenAI(engine=\"text-davinci-003\")\n" ] }, { @@ -156,18 +143,18 @@ "source": [ "## What is the chain doing\n", "\n", - "What is happening behind the scenes here is that the rl chain will\n", + "Here's a step-by-step breakdown of the RL chain's operations:\n", "\n", - "- take the meals\n", - "- take the user and their preference\n", - "- based on the user and their preference (context) it will select a meal\n", - "- it will auto-evaluate if that meal selection was good or bad\n", - "- it will finally inject the meal into the prompt and query the llm\n", - "- the user will get the llm response back\n", + "1. Accept the list of meals.\n", + "2. Consider the user and their dietary preferences.\n", + "3. Based on this context, select an appropriate meal.\n", + "4. Automatically evaluate the appropriateness of the meal choice.\n", + "5. Inject the selected meal into the prompt and submit it to the LLM.\n", + "6. Return the LLM's response to the user.\n", "\n", - "Now, the way the chain is doing this is that it is learning a contextual bandit rl model that is trained to make good selections (specifially the [VowpalWabbit](https://github.com/VowpalWabbit/vowpal_wabbit) ML library is being used).\n", + "Technically, the chain achieves this by employing a contextual bandit reinforcement learning model, specifically utilizing the [VowpalWabbit](https://github.com/VowpalWabbit/vowpal_wabbit) ML library.\n", "\n", - "Since this rl model will be untrained when we first start, it might make a random selection that doesn't fit the user and their preferences. But if we give it time to learn the user and their preferences, it should start to make better selections (or quickly learn a good one and just pick that!)." + "Initially, since the RL model is untrained, it might opt for random selections that don't necessarily align with a user's preferences. However, as it gains more exposure to the user's choices and feedback, it should start to make better selections (or quickly learn a good one and just pick that!).\n" ] }, { @@ -213,6 +200,8 @@ "source": [ "## How is the chain learning\n", "\n", + "It's important to note that while the RL model can make sophisticated selections, it doesn't inherently recognize concepts like \"vegetarian\" or understand that \"beef enchiladas\" aren't vegetarian-friendly. Instead, it leverages the LLM to ground its choices in common sense.\n", + "\n", "The way the chain is learning that Tom prefers veggetarian meals is via an AutoSelectionScorer that is built into the chain. The scorer will call the LLM again and ask it to evaluate the selection (`ToSelectFrom`) using the information wrapped in (`BasedOn`).\n", "\n", "You can set `langchain.debug=True` if you want to see the details of the auto-scorer, but you can also define the scoring prompt yourself." @@ -275,7 +264,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 43, "metadata": {}, "outputs": [], "source": [ @@ -309,7 +298,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 44, "metadata": {}, "outputs": [], "source": [ @@ -355,7 +344,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 45, "metadata": {}, "outputs": [], "source": [ @@ -386,7 +375,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 46, "metadata": {}, "outputs": [], "source": [ @@ -410,42 +399,48 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 47, "metadata": {}, "outputs": [], "source": [ - "for i in range(40):\n", + "for _ in range(20):\n", " try:\n", - " if i % 2:\n", - " chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Tom\"),\n", - " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - " )\n", - " random_chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Tom\"),\n", - " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - " )\n", - " else:\n", - " chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Anna\"),\n", - " preference = rl_chain.BasedOn([\"Loves meat\", \"especially beef\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - " )\n", - " random_chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Anna\"),\n", - " preference = rl_chain.BasedOn([\"Loves meat\", \"especially beef\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - " )\n", + " chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " )\n", + " random_chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " )\n", + " \n", + " chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Anna\"),\n", + " preference = rl_chain.BasedOn([\"Loves meat\", \"especially beef\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " )\n", + " random_chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Anna\"),\n", + " preference = rl_chain.BasedOn([\"Loves meat\", \"especially beef\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " )\n", " except Exception as e:\n", " print(e)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The RL chain converges to the fact that Anna prefers beef and Tom is vegetarian. The random chain picks at random, and so will send beef to vegetarians half the time." + ] + }, { "cell_type": "code", "execution_count": 25, From d6320cc2c0e38d6e22fe589d5074dfaf313c5a00 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 4 Sep 2023 23:47:26 -0400 Subject: [PATCH 52/65] .. --- .../chains/how_to/learned_prompt_optimization.ipynb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb b/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb index 2bf81f0f06..45a02af45c 100644 --- a/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb +++ b/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb @@ -47,10 +47,10 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "##### Intialize the rl chain with provided defaults\n", + "##### Intialize the RL chain with provided defaults\n", "\n", "The prompt template which will be used to query the LLM needs to be defined.\n", - "It can be anything, but here `{meal}` is being used and is going to be replaced by one of the meals above, the rl chain will try to pick and inject the best meal\n" + "It can be anything, but here `{meal}` is being used and is going to be replaced by one of the meals above, the RL chain will try to pick and inject the best meal\n" ] }, { @@ -84,7 +84,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Next the rl chain's PickBest chain is being initialized. We must provide the llm of choice and the defined prompt. As the name indicates, the chain's goal is to Pick the Best of the meals that will be provided, based on some criteria. " + "Next the RL chain's PickBest chain is being initialized. We must provide the llm of choice and the defined prompt. As the name indicates, the chain's goal is to Pick the Best of the meals that will be provided, based on some criteria. " ] }, { @@ -490,7 +490,7 @@ "source": [ "## Advanced options\n", "\n", - "The rl chain is highly configurable in order to be able to adjust to various selection scenarios. If you want to learn more about the ML library that powers it please take a look at tutorials [here](https://vowpalwabbit.org/)\n" + "The RL chain is highly configurable in order to be able to adjust to various selection scenarios. If you want to learn more about the ML library that powers it please take a look at tutorials [here](https://vowpalwabbit.org/)\n" ] }, { From 11f20cded1e8c49799be931be9d853772e913433 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 11 Sep 2023 12:16:08 -0400 Subject: [PATCH 54/65] move everything into experimental --- .../how_to/learned_prompt_optimization.ipynb | 30 +- .../rl_chain/__init__.py | 4 +- .../langchain_experimental}/rl_chain/base.py | 8 +- .../rl_chain/metrics.py | 0 .../rl_chain/model_repository.py | 0 .../rl_chain/pick_best_chain.py | 3 +- .../rl_chain/vw_logger.py | 0 libs/experimental/poetry.lock | 1101 ++++++++++++++--- libs/experimental/pyproject.toml | 5 + .../rl_chain/test_pick_best_chain_call.py | 23 +- .../rl_chain/test_pick_best_text_embedder.py | 4 +- .../rl_chain/test_rl_chain_base_embedder.py | 2 +- .../tests/unit_tests}/rl_chain/test_utils.py | 0 libs/langchain/poetry.lock | 29 +- libs/langchain/pyproject.toml | 3 - 15 files changed, 987 insertions(+), 225 deletions(-) rename libs/{langchain/langchain/chains => experimental/langchain_experimental}/rl_chain/__init__.py (89%) rename libs/{langchain/langchain/chains => experimental/langchain_experimental}/rl_chain/base.py (98%) rename libs/{langchain/langchain/chains => experimental/langchain_experimental}/rl_chain/metrics.py (100%) rename libs/{langchain/langchain/chains => experimental/langchain_experimental}/rl_chain/model_repository.py (100%) rename libs/{langchain/langchain/chains => experimental/langchain_experimental}/rl_chain/pick_best_chain.py (99%) rename libs/{langchain/langchain/chains => experimental/langchain_experimental}/rl_chain/vw_logger.py (100%) rename libs/{langchain/tests/unit_tests/chains => experimental/tests/unit_tests}/rl_chain/test_pick_best_chain_call.py (96%) rename libs/{langchain/tests/unit_tests/chains => experimental/tests/unit_tests}/rl_chain/test_pick_best_text_embedder.py (99%) rename libs/{langchain/tests/unit_tests/chains => experimental/tests/unit_tests}/rl_chain/test_rl_chain_base_embedder.py (99%) rename libs/{langchain/tests/unit_tests/chains => experimental/tests/unit_tests}/rl_chain/test_utils.py (100%) diff --git a/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb b/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb index 45a02af45c..3e0702b4f1 100644 --- a/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb +++ b/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb @@ -17,7 +17,17 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Install necessary packages\n", + "# ! pip install langchain langchain-experimental matplotlib" + ] + }, + { + "cell_type": "code", + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -33,14 +43,14 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# pick and configure the LLM of your choice\n", "\n", "from langchain.llms import OpenAI\n", - "llm = OpenAI(engine=\"text-davinci-003\")\n" + "llm = OpenAI(engine=\"text-davinci-003\")" ] }, { @@ -93,7 +103,7 @@ "metadata": {}, "outputs": [], "source": [ - "import langchain.chains.rl_chain as rl_chain\n", + "import langchain_experimental.rl_chain as rl_chain\n", "\n", "chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)\n" ] @@ -466,12 +476,10 @@ } ], "source": [ - "# note matplotlib is not a dependency of langchain so you need to install to plot\n", - "\n", - "# from matplotlib import pyplot as plt\n", - "# chain.metrics.to_pandas()['score'].plot(label=\"default learning policy\")\n", - "# random_chain.metrics.to_pandas()['score'].plot(label=\"random selection policy\")\n", - "# plt.legend()\n", + "from matplotlib import pyplot as plt\n", + "chain.metrics.to_pandas()['score'].plot(label=\"default learning policy\")\n", + "random_chain.metrics.to_pandas()['score'].plot(label=\"random selection policy\")\n", + "plt.legend()\n", "\n", "print(f\"The final average score for the default policy, calculated over a rolling window, is: {chain.metrics.to_pandas()['score'].iloc[-1]}\")\n", "print(f\"The final average score for the random policy, calculated over a rolling window, is: {random_chain.metrics.to_pandas()['score'].iloc[-1]}\")" @@ -816,7 +824,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.16" + "version": "3.9.17" }, "orig_nbformat": 4 }, diff --git a/libs/langchain/langchain/chains/rl_chain/__init__.py b/libs/experimental/langchain_experimental/rl_chain/__init__.py similarity index 89% rename from libs/langchain/langchain/chains/rl_chain/__init__.py rename to libs/experimental/langchain_experimental/rl_chain/__init__.py index f112dcea09..ca558dd6f3 100644 --- a/libs/langchain/langchain/chains/rl_chain/__init__.py +++ b/libs/experimental/langchain_experimental/rl_chain/__init__.py @@ -1,6 +1,6 @@ import logging -from langchain.chains.rl_chain.base import ( +from langchain_experimental.rl_chain.base import ( AutoSelectionScorer, BasedOn, Embed, @@ -12,7 +12,7 @@ from langchain.chains.rl_chain.base import ( embed, stringify_embedding, ) -from langchain.chains.rl_chain.pick_best_chain import ( +from langchain_experimental.rl_chain.pick_best_chain import ( PickBest, PickBestEvent, PickBestFeatureEmbedder, diff --git a/libs/langchain/langchain/chains/rl_chain/base.py b/libs/experimental/langchain_experimental/rl_chain/base.py similarity index 98% rename from libs/langchain/langchain/chains/rl_chain/base.py rename to libs/experimental/langchain_experimental/rl_chain/base.py index 26ac9a43e1..9b3d7e018a 100644 --- a/libs/langchain/langchain/chains/rl_chain/base.py +++ b/libs/experimental/langchain_experimental/rl_chain/base.py @@ -19,19 +19,19 @@ from typing import ( from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain -from langchain.chains.rl_chain.metrics import ( +from langchain_experimental.rl_chain.metrics import ( MetricsTrackerAverage, MetricsTrackerRollingWindow, ) -from langchain.chains.rl_chain.model_repository import ModelRepository -from langchain.chains.rl_chain.vw_logger import VwLogger +from langchain_experimental.rl_chain.model_repository import ModelRepository +from langchain_experimental.rl_chain.vw_logger import VwLogger from langchain.prompts import ( BasePromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) -from langchain.pydantic_v1 import BaseModel, Extra, root_validator +from langchain_experimental.pydantic_v1 import BaseModel, Extra, root_validator if TYPE_CHECKING: import vowpal_wabbit_next as vw diff --git a/libs/langchain/langchain/chains/rl_chain/metrics.py b/libs/experimental/langchain_experimental/rl_chain/metrics.py similarity index 100% rename from libs/langchain/langchain/chains/rl_chain/metrics.py rename to libs/experimental/langchain_experimental/rl_chain/metrics.py diff --git a/libs/langchain/langchain/chains/rl_chain/model_repository.py b/libs/experimental/langchain_experimental/rl_chain/model_repository.py similarity index 100% rename from libs/langchain/langchain/chains/rl_chain/model_repository.py rename to libs/experimental/langchain_experimental/rl_chain/model_repository.py diff --git a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py b/libs/experimental/langchain_experimental/rl_chain/pick_best_chain.py similarity index 99% rename from libs/langchain/langchain/chains/rl_chain/pick_best_chain.py rename to libs/experimental/langchain_experimental/rl_chain/pick_best_chain.py index 0da0780313..090db9d863 100644 --- a/libs/langchain/langchain/chains/rl_chain/pick_best_chain.py +++ b/libs/experimental/langchain_experimental/rl_chain/pick_best_chain.py @@ -3,7 +3,7 @@ from __future__ import annotations import logging from typing import Any, Dict, List, Optional, Tuple, Type, Union -import langchain.chains.rl_chain.base as base +import langchain_experimental.rl_chain.base as base from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.llm import LLMChain @@ -307,6 +307,7 @@ class PickBest(base.RLChain[PickBestEvent]): ] kwargs["vw_cmd"] = vw_cmd + logger.info(f"vw_cmd: {vw_cmd}") super().__init__(*args, **kwargs) diff --git a/libs/langchain/langchain/chains/rl_chain/vw_logger.py b/libs/experimental/langchain_experimental/rl_chain/vw_logger.py similarity index 100% rename from libs/langchain/langchain/chains/rl_chain/vw_logger.py rename to libs/experimental/langchain_experimental/rl_chain/vw_logger.py diff --git a/libs/experimental/poetry.lock b/libs/experimental/poetry.lock index b0d5b9139a..fc8d7a61b7 100644 --- a/libs/experimental/poetry.lock +++ b/libs/experimental/poetry.lock @@ -124,24 +124,24 @@ frozenlist = ">=1.1.0" [[package]] name = "anyio" -version = "3.7.1" +version = "4.0.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "anyio-3.7.1-py3-none-any.whl", hash = "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"}, - {file = "anyio-3.7.1.tar.gz", hash = "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780"}, + {file = "anyio-4.0.0-py3-none-any.whl", hash = "sha256:cfdb2b588b9fc25ede96d8db56ed50848b0b649dca3dd1df0b11f683bb9e0b5f"}, + {file = "anyio-4.0.0.tar.gz", hash = "sha256:f7ed51751b2c2add651e5747c891b47e26d2a21be5d32d9311dfe9692f3e5d7a"}, ] [package.dependencies] -exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} idna = ">=2.8" sniffio = ">=1.1" [package.extras] -doc = ["Sphinx", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-jquery"] -test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] -trio = ["trio (<0.22)"] +doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +trio = ["trio (>=0.22)"] [[package]] name = "appnope" @@ -227,17 +227,17 @@ python-dateutil = ">=2.7.0" [[package]] name = "asttokens" -version = "2.2.1" +version = "2.4.0" description = "Annotate AST trees with source code positions" optional = false python-versions = "*" files = [ - {file = "asttokens-2.2.1-py2.py3-none-any.whl", hash = "sha256:6b0ac9e93fb0335014d382b8fa9b3afa7df546984258005da0b9e7095b3deb1c"}, - {file = "asttokens-2.2.1.tar.gz", hash = "sha256:4622110b2a6f30b77e1473affaa97e711bc2f07d3f10848420ff1898edbe94f3"}, + {file = "asttokens-2.4.0-py2.py3-none-any.whl", hash = "sha256:cf8fc9e61a86461aa9fb161a14a0841a03c405fa829ac6b202670b3495d2ce69"}, + {file = "asttokens-2.4.0.tar.gz", hash = "sha256:2e0171b991b2c959acc6c49318049236844a5da1d65ba2672c4880c1c894834e"}, ] [package.dependencies] -six = "*" +six = ">=1.12.0" [package.extras] test = ["astroid", "pytest"] @@ -734,29 +734,33 @@ dev = ["flake8", "hypothesis", "ipython", "mypy (>=0.710)", "portray", "pytest ( [[package]] name = "debugpy" -version = "1.6.7.post1" +version = "1.7.0" description = "An implementation of the Debug Adapter Protocol for Python" optional = false python-versions = ">=3.7" files = [ - {file = "debugpy-1.6.7.post1-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:903bd61d5eb433b6c25b48eae5e23821d4c1a19e25c9610205f5aeaccae64e32"}, - {file = "debugpy-1.6.7.post1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d16882030860081e7dd5aa619f30dec3c2f9a421e69861125f83cc372c94e57d"}, - {file = "debugpy-1.6.7.post1-cp310-cp310-win32.whl", hash = "sha256:eea8d8cfb9965ac41b99a61f8e755a8f50e9a20330938ad8271530210f54e09c"}, - {file = "debugpy-1.6.7.post1-cp310-cp310-win_amd64.whl", hash = "sha256:85969d864c45f70c3996067cfa76a319bae749b04171f2cdeceebe4add316155"}, - {file = "debugpy-1.6.7.post1-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:890f7ab9a683886a0f185786ffbda3b46495c4b929dab083b8c79d6825832a52"}, - {file = "debugpy-1.6.7.post1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4ac7a4dba28801d184b7fc0e024da2635ca87d8b0a825c6087bb5168e3c0d28"}, - {file = "debugpy-1.6.7.post1-cp37-cp37m-win32.whl", hash = "sha256:3370ef1b9951d15799ef7af41f8174194f3482ee689988379763ef61a5456426"}, - {file = "debugpy-1.6.7.post1-cp37-cp37m-win_amd64.whl", hash = "sha256:65b28435a17cba4c09e739621173ff90c515f7b9e8ea469b92e3c28ef8e5cdfb"}, - {file = "debugpy-1.6.7.post1-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:92b6dae8bfbd497c90596bbb69089acf7954164aea3228a99d7e43e5267f5b36"}, - {file = "debugpy-1.6.7.post1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72f5d2ecead8125cf669e62784ef1e6300f4067b0f14d9f95ee00ae06fc7c4f7"}, - {file = "debugpy-1.6.7.post1-cp38-cp38-win32.whl", hash = "sha256:f0851403030f3975d6e2eaa4abf73232ab90b98f041e3c09ba33be2beda43fcf"}, - {file = "debugpy-1.6.7.post1-cp38-cp38-win_amd64.whl", hash = "sha256:3de5d0f97c425dc49bce4293df6a04494309eedadd2b52c22e58d95107e178d9"}, - {file = "debugpy-1.6.7.post1-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:38651c3639a4e8bbf0ca7e52d799f6abd07d622a193c406be375da4d510d968d"}, - {file = "debugpy-1.6.7.post1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:038c51268367c9c935905a90b1c2d2dbfe304037c27ba9d19fe7409f8cdc710c"}, - {file = "debugpy-1.6.7.post1-cp39-cp39-win32.whl", hash = "sha256:4b9eba71c290852f959d2cf8a03af28afd3ca639ad374d393d53d367f7f685b2"}, - {file = "debugpy-1.6.7.post1-cp39-cp39-win_amd64.whl", hash = "sha256:973a97ed3b434eab0f792719a484566c35328196540676685c975651266fccf9"}, - {file = "debugpy-1.6.7.post1-py2.py3-none-any.whl", hash = "sha256:1093a5c541af079c13ac8c70ab8b24d1d35c8cacb676306cf11e57f699c02926"}, - {file = "debugpy-1.6.7.post1.zip", hash = "sha256:fe87ec0182ef624855d05e6ed7e0b7cb1359d2ffa2a925f8ec2d22e98b75d0ca"}, + {file = "debugpy-1.7.0-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:17ad9a681aca1704c55b9a5edcb495fa8f599e4655c9872b7f9cf3dc25890d48"}, + {file = "debugpy-1.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1285920a3f9a75f5d1acf59ab1b9da9ae6eb9a05884cd7674f95170c9cafa4de"}, + {file = "debugpy-1.7.0-cp310-cp310-win32.whl", hash = "sha256:a6f43a681c5025db1f1c0568069d1d1bad306a02e7c36144912b26d9c90e4724"}, + {file = "debugpy-1.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:9e9571d831ad3c75b5fb6f3efcb71c471cf2a74ba84af6ac1c79ce00683bed4b"}, + {file = "debugpy-1.7.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:538765a41198aa88cc089295b39c7322dd598f9ef1d52eaae12145c63bf9430a"}, + {file = "debugpy-1.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7e8cf91f8f3f9b5fad844dd88427b85d398bda1e2a0cd65d5a21312fcbc0c6f"}, + {file = "debugpy-1.7.0-cp311-cp311-win32.whl", hash = "sha256:18a69f8e142a716310dd0af6d7db08992aed99e2606108732efde101e7c65e2a"}, + {file = "debugpy-1.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:7515a5ba5ee9bfe956685909c5f28734c1cecd4ee813523363acfe3ca824883a"}, + {file = "debugpy-1.7.0-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:bc8da67ade39d9e75608cdb8601d07e63a4e85966e0572c981f14e2cf42bcdef"}, + {file = "debugpy-1.7.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5036e918c6ba8fc4c4f1fd0207d81db634431a02f0dc2ba51b12fd793c8c9de"}, + {file = "debugpy-1.7.0-cp37-cp37m-win32.whl", hash = "sha256:d5be95b3946a4d7b388e45068c7b75036ac5a610f41014aee6cafcd5506423ad"}, + {file = "debugpy-1.7.0-cp37-cp37m-win_amd64.whl", hash = "sha256:0e90314a078d4e3f009520c8387aba8f74c3034645daa7a332a3d1bb81335756"}, + {file = "debugpy-1.7.0-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:1565fd904f9571c430adca597771255cff4f92171486fced6f765dcbdfc8ec8d"}, + {file = "debugpy-1.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6516f36a2e95b3be27f171f12b641e443863f4ad5255d0fdcea6ae0be29bb912"}, + {file = "debugpy-1.7.0-cp38-cp38-win32.whl", hash = "sha256:2b0e489613bc066051439df04c56777ec184b957d6810cb65f235083aef7a0dc"}, + {file = "debugpy-1.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:7bf0b4bbd841b2397b6a8de15da9227f1164f6d43ceee971c50194eaed930a9d"}, + {file = "debugpy-1.7.0-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:ad22e1095b9977af432465c1e09132ba176e18df3834b1efcab1a449346b350b"}, + {file = "debugpy-1.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f625e427f21423e5874139db529e18cb2966bdfcc1cb87a195538c5b34d163d1"}, + {file = "debugpy-1.7.0-cp39-cp39-win32.whl", hash = "sha256:18bca8429d6632e2d3435055416d2d88f0309cc39709f4f6355c8d412cc61f24"}, + {file = "debugpy-1.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:dc8a12ac8b97ef3d6973c6679a093138c7c9b03eb685f0e253269a195f651559"}, + {file = "debugpy-1.7.0-py2.py3-none-any.whl", hash = "sha256:f6de2e6f24f62969e0f0ef682d78c98161c4dca29e9fb05df4d2989005005502"}, + {file = "debugpy-1.7.0.zip", hash = "sha256:676911c710e85567b17172db934a71319ed9d995104610ce23fd74a07f66e6f6"}, ] [[package]] @@ -937,6 +941,41 @@ files = [ {file = "frozenlist-1.4.0.tar.gz", hash = "sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251"}, ] +[[package]] +name = "fsspec" +version = "2023.9.0" +description = "File-system specification" +optional = true +python-versions = ">=3.8" +files = [ + {file = "fsspec-2023.9.0-py3-none-any.whl", hash = "sha256:d55b9ab2a4c1f2b759888ae9f93e40c2aa72c0808132e87e282b549f9e6c4254"}, + {file = "fsspec-2023.9.0.tar.gz", hash = "sha256:4dbf0fefee035b7c6d3bbbe6bc99b2f201f40d4dca95b67c2b719be77bcd917f"}, +] + +[package.extras] +abfs = ["adlfs"] +adl = ["adlfs"] +arrow = ["pyarrow (>=1)"] +dask = ["dask", "distributed"] +devel = ["pytest", "pytest-cov"] +dropbox = ["dropbox", "dropboxdrivefs", "requests"] +full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] +fuse = ["fusepy"] +gcs = ["gcsfs"] +git = ["pygit2"] +github = ["requests"] +gs = ["gcsfs"] +gui = ["panel"] +hdfs = ["pyarrow (>=1)"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "requests"] +libarchive = ["libarchive-c"] +oci = ["ocifs"] +s3 = ["s3fs"] +sftp = ["paramiko"] +smb = ["smbprotocol"] +ssh = ["paramiko"] +tqdm = ["tqdm"] + [[package]] name = "greenlet" version = "2.0.2" @@ -1010,6 +1049,39 @@ files = [ docs = ["Sphinx", "docutils (<0.18)"] test = ["objgraph", "psutil"] +[[package]] +name = "huggingface-hub" +version = "0.17.0" +description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" +optional = true +python-versions = ">=3.8.0" +files = [ + {file = "huggingface_hub-0.17.0-py3-none-any.whl", hash = "sha256:8111ef89ebf5514154b4e929662f57fc43818d06c95dabdfa4c77f9087383172"}, + {file = "huggingface_hub-0.17.0.tar.gz", hash = "sha256:a048c64e0f651c32afe41a1818bf2cd47de902ff65dfba395ff71b999d9d4655"}, +] + +[package.dependencies] +filelock = "*" +fsspec = "*" +packaging = ">=20.9" +pyyaml = ">=5.1" +requests = "*" +tqdm = ">=4.42.1" +typing-extensions = ">=3.7.4.3" + +[package.extras] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (==23.7)", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (<2.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] +cli = ["InquirerPy (==0.3.4)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (==23.7)", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (<2.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] +docs = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (==23.7)", "gradio", "hf-doc-builder", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (<2.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)", "watchdog"] +fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] +inference = ["aiohttp", "pydantic (<2.0)"] +quality = ["black (==23.7)", "mypy (==1.5.1)", "ruff (>=0.0.241)"] +tensorflow = ["graphviz", "pydot", "tensorflow"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic (<2.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +torch = ["torch"] +typing = ["pydantic (<2.0)", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"] + [[package]] name = "idna" version = "3.4" @@ -1071,13 +1143,13 @@ files = [ [[package]] name = "ipykernel" -version = "6.25.1" +version = "6.25.2" description = "IPython Kernel for Jupyter" optional = false python-versions = ">=3.8" files = [ - {file = "ipykernel-6.25.1-py3-none-any.whl", hash = "sha256:c8a2430b357073b37c76c21c52184db42f6b4b0e438e1eb7df3c4440d120497c"}, - {file = "ipykernel-6.25.1.tar.gz", hash = "sha256:050391364c0977e768e354bdb60cbbfbee7cbb943b1af1618382021136ffd42f"}, + {file = "ipykernel-6.25.2-py3-none-any.whl", hash = "sha256:2e2ee359baba19f10251b99415bb39de1e97d04e1fab385646f24f0596510b77"}, + {file = "ipykernel-6.25.2.tar.gz", hash = "sha256:f468ddd1f17acb48c8ce67fcfa49ba6d46d4f9ac0438c1f441be7c3d1372230b"}, ] [package.dependencies] @@ -1223,6 +1295,17 @@ MarkupSafe = ">=2.0" [package.extras] i18n = ["Babel (>=2.7)"] +[[package]] +name = "joblib" +version = "1.3.2" +description = "Lightweight pipelining with Python functions" +optional = true +python-versions = ">=3.7" +files = [ + {file = "joblib-1.3.2-py3-none-any.whl", hash = "sha256:ef4331c65f239985f3f2220ecc87db222f08fd22097a3dd5698f693875f8cbb9"}, + {file = "joblib-1.3.2.tar.gz", hash = "sha256:92f865e621e17784e7955080b6d042489e3b8e294949cc44c6eac304f59772b1"}, +] + [[package]] name = "json5" version = "0.9.14" @@ -1245,6 +1328,7 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" files = [ {file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"}, + {file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"}, ] [[package]] @@ -1315,13 +1399,13 @@ qtconsole = "*" [[package]] name = "jupyter-client" -version = "8.3.0" +version = "8.3.1" description = "Jupyter protocol implementation and client libraries" optional = false python-versions = ">=3.8" files = [ - {file = "jupyter_client-8.3.0-py3-none-any.whl", hash = "sha256:7441af0c0672edc5d28035e92ba5e32fadcfa8a4e608a434c228836a89df6158"}, - {file = "jupyter_client-8.3.0.tar.gz", hash = "sha256:3af69921fe99617be1670399a0b857ad67275eefcfa291e2c81a160b7b650f5f"}, + {file = "jupyter_client-8.3.1-py3-none-any.whl", hash = "sha256:5eb9f55eb0650e81de6b7e34308d8b92d04fe4ec41cd8193a913979e33d8e1a5"}, + {file = "jupyter_client-8.3.1.tar.gz", hash = "sha256:60294b2d5b869356c893f57b1a877ea6510d60d45cf4b38057f1672d85699ac9"}, ] [package.dependencies] @@ -1422,13 +1506,13 @@ jupyter-server = ">=1.1.2" [[package]] name = "jupyter-server" -version = "2.7.2" +version = "2.7.3" description = "The backend—i.e. core services, APIs, and REST endpoints—to Jupyter web applications." optional = false python-versions = ">=3.8" files = [ - {file = "jupyter_server-2.7.2-py3-none-any.whl", hash = "sha256:98a375347b580e837e7016007c24680a4261ed8ad7cd35196ac087d229f48e5a"}, - {file = "jupyter_server-2.7.2.tar.gz", hash = "sha256:d64fb4e593907290e5df916e3c9399c15ab2cd7bdb71cbcd1d36452dbfb30523"}, + {file = "jupyter_server-2.7.3-py3-none-any.whl", hash = "sha256:8e4b90380b59d7a1e31086c4692231f2a2ea4cb269f5516e60aba72ce8317fc9"}, + {file = "jupyter_server-2.7.3.tar.gz", hash = "sha256:d4916c8581c4ebbc534cebdaa8eca2478d9f3bfdd88eae29fcab0120eac57649"}, ] [package.dependencies] @@ -1742,6 +1826,23 @@ files = [ {file = "mistune-3.0.1.tar.gz", hash = "sha256:e912116c13aa0944f9dc530db38eb88f6a77087ab128f49f84a48f4c05ea163c"}, ] +[[package]] +name = "mpmath" +version = "1.3.0" +description = "Python library for arbitrary-precision floating-point arithmetic" +optional = true +python-versions = "*" +files = [ + {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, + {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, +] + +[package.extras] +develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] +docs = ["sphinx"] +gmpy = ["gmpy2 (>=2.1.0a4)"] +tests = ["pytest (>=4.6)"] + [[package]] name = "multidict" version = "6.0.4" @@ -1947,13 +2048,13 @@ test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>= [[package]] name = "nbconvert" -version = "7.7.4" +version = "7.8.0" description = "Converting Jupyter Notebooks" optional = false python-versions = ">=3.8" files = [ - {file = "nbconvert-7.7.4-py3-none-any.whl", hash = "sha256:ace26f4386d08eb5c55833596a942048c5502a95e05590cb523826a749a40a37"}, - {file = "nbconvert-7.7.4.tar.gz", hash = "sha256:1113d039fa3fc3a846ffa5a3b0a019e85aaa94c566a09fa0c400fb7638e46087"}, + {file = "nbconvert-7.8.0-py3-none-any.whl", hash = "sha256:aec605e051fa682ccc7934ccc338ba1e8b626cfadbab0db592106b630f63f0f2"}, + {file = "nbconvert-7.8.0.tar.gz", hash = "sha256:f5bc15a1247e14dd41ceef0c0a3bc70020e016576eb0578da62f1c5b4f950479"}, ] [package.dependencies] @@ -2015,19 +2116,61 @@ files = [ {file = "nest_asyncio-1.5.7.tar.gz", hash = "sha256:6a80f7b98f24d9083ed24608977c09dd608d83f91cccc24c9d2cba6d10e01c10"}, ] +[[package]] +name = "networkx" +version = "3.1" +description = "Python package for creating and manipulating graphs and networks" +optional = true +python-versions = ">=3.8" +files = [ + {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"}, + {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"}, +] + +[package.extras] +default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"] +developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"] +doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"] +extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"] +test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] + +[[package]] +name = "nltk" +version = "3.8.1" +description = "Natural Language Toolkit" +optional = true +python-versions = ">=3.7" +files = [ + {file = "nltk-3.8.1-py3-none-any.whl", hash = "sha256:fd5c9109f976fa86bcadba8f91e47f5e9293bd034474752e92a520f81c93dda5"}, + {file = "nltk-3.8.1.zip", hash = "sha256:1834da3d0682cba4f2cede2f9aad6b0fafb6461ba451db0efb6f9c39798d64d3"}, +] + +[package.dependencies] +click = "*" +joblib = "*" +regex = ">=2021.8.3" +tqdm = "*" + +[package.extras] +all = ["matplotlib", "numpy", "pyparsing", "python-crfsuite", "requests", "scikit-learn", "scipy", "twython"] +corenlp = ["requests"] +machine-learning = ["numpy", "python-crfsuite", "scikit-learn", "scipy"] +plot = ["matplotlib"] +tgrep = ["pyparsing"] +twitter = ["twython"] + [[package]] name = "notebook" -version = "7.0.2" +version = "7.0.3" description = "Jupyter Notebook - A web-based notebook environment for interactive computing" optional = false python-versions = ">=3.8" files = [ - {file = "notebook-7.0.2-py3-none-any.whl", hash = "sha256:c77b1499dc9b07ce4f4f26990dcb25b2107b434f2536766b51a72a4228d9a4b6"}, - {file = "notebook-7.0.2.tar.gz", hash = "sha256:d70d6a07418c829bd5f54337ce993b7105261d9026f9d3fe68e9b8aa1a20da9a"}, + {file = "notebook-7.0.3-py3-none-any.whl", hash = "sha256:786ab2e3287c068667adce3029b540dd18fc5d23f49181b4b4ee4f6b48a7ca81"}, + {file = "notebook-7.0.3.tar.gz", hash = "sha256:07f3c5062fd0e6e69864437a0347abc485d991aae87a92c47d659699f571b729"}, ] [package.dependencies] -importlib-resources = {version = ">=5.0", markers = "python_version < \"3.9\""} jupyter-server = ">=2.4.0,<3" jupyterlab = ">=4.0.2,<5" jupyterlab-server = ">=2.22.1,<3" @@ -2037,7 +2180,7 @@ tornado = ">=6.2.0" [package.extras] dev = ["hatch", "pre-commit"] docs = ["myst-parser", "nbsphinx", "pydata-sphinx-theme", "sphinx (>=1.3.6)", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] -test = ["ipykernel", "jupyter-server[test] (>=2.4.0,<3)", "jupyterlab-server[test] (>=2.22.1,<3)", "nbval", "pytest (>=7.0)", "pytest-console-scripts", "pytest-timeout", "pytest-tornasync", "requests"] +test = ["importlib-resources (>=5.0)", "ipykernel", "jupyter-server[test] (>=2.4.0,<3)", "jupyterlab-server[test] (>=2.22.1,<3)", "nbval", "pytest (>=7.0)", "pytest-console-scripts", "pytest-timeout", "pytest-tornasync", "requests"] [[package]] name = "notebook-shim" @@ -2171,6 +2314,73 @@ files = [ {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, ] +[[package]] +name = "pandas" +version = "2.0.3" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = true +python-versions = ">=3.8" +files = [ + {file = "pandas-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4c7c9f27a4185304c7caf96dc7d91bc60bc162221152de697c98eb0b2648dd8"}, + {file = "pandas-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f167beed68918d62bffb6ec64f2e1d8a7d297a038f86d4aed056b9493fca407f"}, + {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce0c6f76a0f1ba361551f3e6dceaff06bde7514a374aa43e33b588ec10420183"}, + {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba619e410a21d8c387a1ea6e8a0e49bb42216474436245718d7f2e88a2f8d7c0"}, + {file = "pandas-2.0.3-cp310-cp310-win32.whl", hash = "sha256:3ef285093b4fe5058eefd756100a367f27029913760773c8bf1d2d8bebe5d210"}, + {file = "pandas-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:9ee1a69328d5c36c98d8e74db06f4ad518a1840e8ccb94a4ba86920986bb617e"}, + {file = "pandas-2.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b084b91d8d66ab19f5bb3256cbd5ea661848338301940e17f4492b2ce0801fe8"}, + {file = "pandas-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:37673e3bdf1551b95bf5d4ce372b37770f9529743d2498032439371fc7b7eb26"}, + {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9cb1e14fdb546396b7e1b923ffaeeac24e4cedd14266c3497216dd4448e4f2d"}, + {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9cd88488cceb7635aebb84809d087468eb33551097d600c6dad13602029c2df"}, + {file = "pandas-2.0.3-cp311-cp311-win32.whl", hash = "sha256:694888a81198786f0e164ee3a581df7d505024fbb1f15202fc7db88a71d84ebd"}, + {file = "pandas-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:6a21ab5c89dcbd57f78d0ae16630b090eec626360085a4148693def5452d8a6b"}, + {file = "pandas-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9e4da0d45e7f34c069fe4d522359df7d23badf83abc1d1cef398895822d11061"}, + {file = "pandas-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:32fca2ee1b0d93dd71d979726b12b61faa06aeb93cf77468776287f41ff8fdc5"}, + {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:258d3624b3ae734490e4d63c430256e716f488c4fcb7c8e9bde2d3aa46c29089"}, + {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eae3dc34fa1aa7772dd3fc60270d13ced7346fcbcfee017d3132ec625e23bb0"}, + {file = "pandas-2.0.3-cp38-cp38-win32.whl", hash = "sha256:f3421a7afb1a43f7e38e82e844e2bca9a6d793d66c1a7f9f0ff39a795bbc5e02"}, + {file = "pandas-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:69d7f3884c95da3a31ef82b7618af5710dba95bb885ffab339aad925c3e8ce78"}, + {file = "pandas-2.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5247fb1ba347c1261cbbf0fcfba4a3121fbb4029d95d9ef4dc45406620b25c8b"}, + {file = "pandas-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:81af086f4543c9d8bb128328b5d32e9986e0c84d3ee673a2ac6fb57fd14f755e"}, + {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1994c789bf12a7c5098277fb43836ce090f1073858c10f9220998ac74f37c69b"}, + {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ec591c48e29226bcbb316e0c1e9423622bc7a4eaf1ef7c3c9fa1a3981f89641"}, + {file = "pandas-2.0.3-cp39-cp39-win32.whl", hash = "sha256:04dbdbaf2e4d46ca8da896e1805bc04eb85caa9a82e259e8eed00254d5e0c682"}, + {file = "pandas-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:1168574b036cd8b93abc746171c9b4f1b83467438a5e45909fed645cf8692dbc"}, + {file = "pandas-2.0.3.tar.gz", hash = "sha256:c02f372a88e0d17f36d3093a644c73cfc1788e876a7c4bcb4020a77512e2043c"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.20.3", markers = "python_version < \"3.10\""}, + {version = ">=1.21.0", markers = "python_version >= \"3.10\""}, + {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, +] +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.1" + +[package.extras] +all = ["PyQt5 (>=5.15.1)", "SQLAlchemy (>=1.4.16)", "beautifulsoup4 (>=4.9.3)", "bottleneck (>=1.3.2)", "brotlipy (>=0.7.0)", "fastparquet (>=0.6.3)", "fsspec (>=2021.07.0)", "gcsfs (>=2021.07.0)", "html5lib (>=1.1)", "hypothesis (>=6.34.2)", "jinja2 (>=3.0.0)", "lxml (>=4.6.3)", "matplotlib (>=3.6.1)", "numba (>=0.53.1)", "numexpr (>=2.7.3)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pandas-gbq (>=0.15.0)", "psycopg2 (>=2.8.6)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "python-snappy (>=0.6.0)", "pyxlsb (>=1.0.8)", "qtpy (>=2.2.0)", "s3fs (>=2021.08.0)", "scipy (>=1.7.1)", "tables (>=3.6.1)", "tabulate (>=0.8.9)", "xarray (>=0.21.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)", "zstandard (>=0.15.2)"] +aws = ["s3fs (>=2021.08.0)"] +clipboard = ["PyQt5 (>=5.15.1)", "qtpy (>=2.2.0)"] +compression = ["brotlipy (>=0.7.0)", "python-snappy (>=0.6.0)", "zstandard (>=0.15.2)"] +computation = ["scipy (>=1.7.1)", "xarray (>=0.21.0)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pyxlsb (>=1.0.8)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)"] +feather = ["pyarrow (>=7.0.0)"] +fss = ["fsspec (>=2021.07.0)"] +gcp = ["gcsfs (>=2021.07.0)", "pandas-gbq (>=0.15.0)"] +hdf5 = ["tables (>=3.6.1)"] +html = ["beautifulsoup4 (>=4.9.3)", "html5lib (>=1.1)", "lxml (>=4.6.3)"] +mysql = ["SQLAlchemy (>=1.4.16)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.0.0)", "tabulate (>=0.8.9)"] +parquet = ["pyarrow (>=7.0.0)"] +performance = ["bottleneck (>=1.3.2)", "numba (>=0.53.1)", "numexpr (>=2.7.1)"] +plot = ["matplotlib (>=3.6.1)"] +postgresql = ["SQLAlchemy (>=1.4.16)", "psycopg2 (>=2.8.6)"] +spss = ["pyreadstat (>=1.1.2)"] +sql-other = ["SQLAlchemy (>=1.4.16)"] +test = ["hypothesis (>=6.34.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.6.3)"] + [[package]] name = "pandocfilters" version = "1.5.0" @@ -2266,6 +2476,75 @@ files = [ {file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"}, ] +[[package]] +name = "pillow" +version = "10.0.0" +description = "Python Imaging Library (Fork)" +optional = true +python-versions = ">=3.8" +files = [ + {file = "Pillow-10.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f62406a884ae75fb2f818694469519fb685cc7eaff05d3451a9ebe55c646891"}, + {file = "Pillow-10.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d5db32e2a6ccbb3d34d87c87b432959e0db29755727afb37290e10f6e8e62614"}, + {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edf4392b77bdc81f36e92d3a07a5cd072f90253197f4a52a55a8cec48a12483b"}, + {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:520f2a520dc040512699f20fa1c363eed506e94248d71f85412b625026f6142c"}, + {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:8c11160913e3dd06c8ffdb5f233a4f254cb449f4dfc0f8f4549eda9e542c93d1"}, + {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a74ba0c356aaa3bb8e3eb79606a87669e7ec6444be352870623025d75a14a2bf"}, + {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5d0dae4cfd56969d23d94dc8e89fb6a217be461c69090768227beb8ed28c0a3"}, + {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22c10cc517668d44b211717fd9775799ccec4124b9a7f7b3635fc5386e584992"}, + {file = "Pillow-10.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:dffe31a7f47b603318c609f378ebcd57f1554a3a6a8effbc59c3c69f804296de"}, + {file = "Pillow-10.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:9fb218c8a12e51d7ead2a7c9e101a04982237d4855716af2e9499306728fb485"}, + {file = "Pillow-10.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d35e3c8d9b1268cbf5d3670285feb3528f6680420eafe35cccc686b73c1e330f"}, + {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ed64f9ca2f0a95411e88a4efbd7a29e5ce2cea36072c53dd9d26d9c76f753b3"}, + {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b6eb5502f45a60a3f411c63187db83a3d3107887ad0d036c13ce836f8a36f1d"}, + {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:c1fbe7621c167ecaa38ad29643d77a9ce7311583761abf7836e1510c580bf3dd"}, + {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cd25d2a9d2b36fcb318882481367956d2cf91329f6892fe5d385c346c0649629"}, + {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3b08d4cc24f471b2c8ca24ec060abf4bebc6b144cb89cba638c720546b1cf538"}, + {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d737a602fbd82afd892ca746392401b634e278cb65d55c4b7a8f48e9ef8d008d"}, + {file = "Pillow-10.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:3a82c40d706d9aa9734289740ce26460a11aeec2d9c79b7af87bb35f0073c12f"}, + {file = "Pillow-10.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:bc2ec7c7b5d66b8ec9ce9f720dbb5fa4bace0f545acd34870eff4a369b44bf37"}, + {file = "Pillow-10.0.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:d80cf684b541685fccdd84c485b31ce73fc5c9b5d7523bf1394ce134a60c6883"}, + {file = "Pillow-10.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76de421f9c326da8f43d690110f0e79fe3ad1e54be811545d7d91898b4c8493e"}, + {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81ff539a12457809666fef6624684c008e00ff6bf455b4b89fd00a140eecd640"}, + {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce543ed15570eedbb85df19b0a1a7314a9c8141a36ce089c0a894adbfccb4568"}, + {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:685ac03cc4ed5ebc15ad5c23bc555d68a87777586d970c2c3e216619a5476223"}, + {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d72e2ecc68a942e8cf9739619b7f408cc7b272b279b56b2c83c6123fcfa5cdff"}, + {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d50b6aec14bc737742ca96e85d6d0a5f9bfbded018264b3b70ff9d8c33485551"}, + {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:00e65f5e822decd501e374b0650146063fbb30a7264b4d2744bdd7b913e0cab5"}, + {file = "Pillow-10.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:f31f9fdbfecb042d046f9d91270a0ba28368a723302786c0009ee9b9f1f60199"}, + {file = "Pillow-10.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:1ce91b6ec08d866b14413d3f0bbdea7e24dfdc8e59f562bb77bc3fe60b6144ca"}, + {file = "Pillow-10.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:349930d6e9c685c089284b013478d6f76e3a534e36ddfa912cde493f235372f3"}, + {file = "Pillow-10.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3a684105f7c32488f7153905a4e3015a3b6c7182e106fe3c37fbb5ef3e6994c3"}, + {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4f69b3700201b80bb82c3a97d5e9254084f6dd5fb5b16fc1a7b974260f89f43"}, + {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f07ea8d2f827d7d2a49ecf1639ec02d75ffd1b88dcc5b3a61bbb37a8759ad8d"}, + {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:040586f7d37b34547153fa383f7f9aed68b738992380ac911447bb78f2abe530"}, + {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f88a0b92277de8e3ca715a0d79d68dc82807457dae3ab8699c758f07c20b3c51"}, + {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c7cf14a27b0d6adfaebb3ae4153f1e516df54e47e42dcc073d7b3d76111a8d86"}, + {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3400aae60685b06bb96f99a21e1ada7bc7a413d5f49bce739828ecd9391bb8f7"}, + {file = "Pillow-10.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:dbc02381779d412145331789b40cc7b11fdf449e5d94f6bc0b080db0a56ea3f0"}, + {file = "Pillow-10.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9211e7ad69d7c9401cfc0e23d49b69ca65ddd898976d660a2fa5904e3d7a9baa"}, + {file = "Pillow-10.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:faaf07ea35355b01a35cb442dd950d8f1bb5b040a7787791a535de13db15ed90"}, + {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9f72a021fbb792ce98306ffb0c348b3c9cb967dce0f12a49aa4c3d3fdefa967"}, + {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f7c16705f44e0504a3a2a14197c1f0b32a95731d251777dcb060aa83022cb2d"}, + {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:76edb0a1fa2b4745fb0c99fb9fb98f8b180a1bbceb8be49b087e0b21867e77d3"}, + {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:368ab3dfb5f49e312231b6f27b8820c823652b7cd29cfbd34090565a015e99ba"}, + {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:608bfdee0d57cf297d32bcbb3c728dc1da0907519d1784962c5f0c68bb93e5a3"}, + {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5c6e3df6bdd396749bafd45314871b3d0af81ff935b2d188385e970052091017"}, + {file = "Pillow-10.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:7be600823e4c8631b74e4a0d38384c73f680e6105a7d3c6824fcf226c178c7e6"}, + {file = "Pillow-10.0.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:92be919bbc9f7d09f7ae343c38f5bb21c973d2576c1d45600fce4b74bafa7ac0"}, + {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8182b523b2289f7c415f589118228d30ac8c355baa2f3194ced084dac2dbba"}, + {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:38250a349b6b390ee6047a62c086d3817ac69022c127f8a5dc058c31ccef17f3"}, + {file = "Pillow-10.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:88af2003543cc40c80f6fca01411892ec52b11021b3dc22ec3bc9d5afd1c5334"}, + {file = "Pillow-10.0.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c189af0545965fa8d3b9613cfdb0cd37f9d71349e0f7750e1fd704648d475ed2"}, + {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce7b031a6fc11365970e6a5686d7ba8c63e4c1cf1ea143811acbb524295eabed"}, + {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db24668940f82321e746773a4bc617bfac06ec831e5c88b643f91f122a785684"}, + {file = "Pillow-10.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:efe8c0681042536e0d06c11f48cebe759707c9e9abf880ee213541c5b46c5bf3"}, + {file = "Pillow-10.0.0.tar.gz", hash = "sha256:9c82b5b3e043c7af0d95792d0d20ccf68f61a1fec6b3530e718b688422727396"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] + [[package]] name = "pkgutil-resolve-name" version = "1.3.10" @@ -2829,13 +3108,13 @@ cffi = {version = "*", markers = "implementation_name == \"pypy\""} [[package]] name = "qtconsole" -version = "5.4.3" +version = "5.4.4" description = "Jupyter Qt console" optional = false python-versions = ">= 3.7" files = [ - {file = "qtconsole-5.4.3-py3-none-any.whl", hash = "sha256:35fd6e87b1f6d1fd41801b07e69339f8982e76afd4fa8ef35595bc6036717189"}, - {file = "qtconsole-5.4.3.tar.gz", hash = "sha256:5e4082a86a201796b2a5cfd4298352d22b158b51b57736531824715fc2a979dd"}, + {file = "qtconsole-5.4.4-py3-none-any.whl", hash = "sha256:a3b69b868e041c2c698bdc75b0602f42e130ffb256d6efa48f9aa756c97672aa"}, + {file = "qtconsole-5.4.4.tar.gz", hash = "sha256:b7ffb53d74f23cee29f4cdb55dd6fabc8ec312d94f3c46ba38e1dde458693dfb"}, ] [package.dependencies] @@ -2846,7 +3125,7 @@ jupyter-core = "*" packaging = "*" pygments = "*" pyzmq = ">=17.1" -qtpy = ">=2.0.1" +qtpy = ">=2.4.0" traitlets = "<5.2.1 || >5.2.1,<5.2.2 || >5.2.2" [package.extras] @@ -2855,13 +3134,13 @@ test = ["flaky", "pytest", "pytest-qt"] [[package]] name = "qtpy" -version = "2.3.1" +version = "2.4.0" description = "Provides an abstraction layer on top of the various Qt bindings (PyQt5/6 and PySide2/6)." optional = false python-versions = ">=3.7" files = [ - {file = "QtPy-2.3.1-py3-none-any.whl", hash = "sha256:5193d20e0b16e4d9d3bc2c642d04d9f4e2c892590bd1b9c92bfe38a95d5a2e12"}, - {file = "QtPy-2.3.1.tar.gz", hash = "sha256:a8c74982d6d172ce124d80cafd39653df78989683f760f2281ba91a6e7b9de8b"}, + {file = "QtPy-2.4.0-py3-none-any.whl", hash = "sha256:4d4f045a41e09ac9fa57fcb47ef05781aa5af294a0a646acc1b729d14225e741"}, + {file = "QtPy-2.4.0.tar.gz", hash = "sha256:db2d508167aa6106781565c8da5c6f1487debacba33519cedc35fa8997d424d4"}, ] [package.dependencies] @@ -3045,108 +3324,108 @@ files = [ [[package]] name = "rpds-py" -version = "0.9.2" +version = "0.10.2" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.8" files = [ - {file = "rpds_py-0.9.2-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:ab6919a09c055c9b092798ce18c6c4adf49d24d4d9e43a92b257e3f2548231e7"}, - {file = "rpds_py-0.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d55777a80f78dd09410bd84ff8c95ee05519f41113b2df90a69622f5540c4f8b"}, - {file = "rpds_py-0.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a216b26e5af0a8e265d4efd65d3bcec5fba6b26909014effe20cd302fd1138fa"}, - {file = "rpds_py-0.9.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:29cd8bfb2d716366a035913ced99188a79b623a3512292963d84d3e06e63b496"}, - {file = "rpds_py-0.9.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44659b1f326214950a8204a248ca6199535e73a694be8d3e0e869f820767f12f"}, - {file = "rpds_py-0.9.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:745f5a43fdd7d6d25a53ab1a99979e7f8ea419dfefebcab0a5a1e9095490ee5e"}, - {file = "rpds_py-0.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a987578ac5214f18b99d1f2a3851cba5b09f4a689818a106c23dbad0dfeb760f"}, - {file = "rpds_py-0.9.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bf4151acb541b6e895354f6ff9ac06995ad9e4175cbc6d30aaed08856558201f"}, - {file = "rpds_py-0.9.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:03421628f0dc10a4119d714a17f646e2837126a25ac7a256bdf7c3943400f67f"}, - {file = "rpds_py-0.9.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:13b602dc3e8dff3063734f02dcf05111e887f301fdda74151a93dbbc249930fe"}, - {file = "rpds_py-0.9.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fae5cb554b604b3f9e2c608241b5d8d303e410d7dfb6d397c335f983495ce7f6"}, - {file = "rpds_py-0.9.2-cp310-none-win32.whl", hash = "sha256:47c5f58a8e0c2c920cc7783113df2fc4ff12bf3a411d985012f145e9242a2764"}, - {file = "rpds_py-0.9.2-cp310-none-win_amd64.whl", hash = "sha256:4ea6b73c22d8182dff91155af018b11aac9ff7eca085750455c5990cb1cfae6e"}, - {file = "rpds_py-0.9.2-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:e564d2238512c5ef5e9d79338ab77f1cbbda6c2d541ad41b2af445fb200385e3"}, - {file = "rpds_py-0.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f411330a6376fb50e5b7a3e66894e4a39e60ca2e17dce258d53768fea06a37bd"}, - {file = "rpds_py-0.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e7521f5af0233e89939ad626b15278c71b69dc1dfccaa7b97bd4cdf96536bb7"}, - {file = "rpds_py-0.9.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8d3335c03100a073883857e91db9f2e0ef8a1cf42dc0369cbb9151c149dbbc1b"}, - {file = "rpds_py-0.9.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d25b1c1096ef0447355f7293fbe9ad740f7c47ae032c2884113f8e87660d8f6e"}, - {file = "rpds_py-0.9.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a5d3fbd02efd9cf6a8ffc2f17b53a33542f6b154e88dd7b42ef4a4c0700fdad"}, - {file = "rpds_py-0.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5934e2833afeaf36bd1eadb57256239785f5af0220ed8d21c2896ec4d3a765f"}, - {file = "rpds_py-0.9.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:095b460e117685867d45548fbd8598a8d9999227e9061ee7f012d9d264e6048d"}, - {file = "rpds_py-0.9.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:91378d9f4151adc223d584489591dbb79f78814c0734a7c3bfa9c9e09978121c"}, - {file = "rpds_py-0.9.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:24a81c177379300220e907e9b864107614b144f6c2a15ed5c3450e19cf536fae"}, - {file = "rpds_py-0.9.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:de0b6eceb46141984671802d412568d22c6bacc9b230174f9e55fc72ef4f57de"}, - {file = "rpds_py-0.9.2-cp311-none-win32.whl", hash = "sha256:700375326ed641f3d9d32060a91513ad668bcb7e2cffb18415c399acb25de2ab"}, - {file = "rpds_py-0.9.2-cp311-none-win_amd64.whl", hash = "sha256:0766babfcf941db8607bdaf82569ec38107dbb03c7f0b72604a0b346b6eb3298"}, - {file = "rpds_py-0.9.2-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:b1440c291db3f98a914e1afd9d6541e8fc60b4c3aab1a9008d03da4651e67386"}, - {file = "rpds_py-0.9.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0f2996fbac8e0b77fd67102becb9229986396e051f33dbceada3debaacc7033f"}, - {file = "rpds_py-0.9.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f30d205755566a25f2ae0382944fcae2f350500ae4df4e795efa9e850821d82"}, - {file = "rpds_py-0.9.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:159fba751a1e6b1c69244e23ba6c28f879a8758a3e992ed056d86d74a194a0f3"}, - {file = "rpds_py-0.9.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1f044792e1adcea82468a72310c66a7f08728d72a244730d14880cd1dabe36b"}, - {file = "rpds_py-0.9.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9251eb8aa82e6cf88510530b29eef4fac825a2b709baf5b94a6094894f252387"}, - {file = "rpds_py-0.9.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01899794b654e616c8625b194ddd1e5b51ef5b60ed61baa7a2d9c2ad7b2a4238"}, - {file = "rpds_py-0.9.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0c43f8ae8f6be1d605b0465671124aa8d6a0e40f1fb81dcea28b7e3d87ca1e1"}, - {file = "rpds_py-0.9.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:207f57c402d1f8712618f737356e4b6f35253b6d20a324d9a47cb9f38ee43a6b"}, - {file = "rpds_py-0.9.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b52e7c5ae35b00566d244ffefba0f46bb6bec749a50412acf42b1c3f402e2c90"}, - {file = "rpds_py-0.9.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:978fa96dbb005d599ec4fd9ed301b1cc45f1a8f7982d4793faf20b404b56677d"}, - {file = "rpds_py-0.9.2-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:6aa8326a4a608e1c28da191edd7c924dff445251b94653988efb059b16577a4d"}, - {file = "rpds_py-0.9.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:aad51239bee6bff6823bbbdc8ad85136c6125542bbc609e035ab98ca1e32a192"}, - {file = "rpds_py-0.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4bd4dc3602370679c2dfb818d9c97b1137d4dd412230cfecd3c66a1bf388a196"}, - {file = "rpds_py-0.9.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dd9da77c6ec1f258387957b754f0df60766ac23ed698b61941ba9acccd3284d1"}, - {file = "rpds_py-0.9.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:190ca6f55042ea4649ed19c9093a9be9d63cd8a97880106747d7147f88a49d18"}, - {file = "rpds_py-0.9.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:876bf9ed62323bc7dcfc261dbc5572c996ef26fe6406b0ff985cbcf460fc8a4c"}, - {file = "rpds_py-0.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa2818759aba55df50592ecbc95ebcdc99917fa7b55cc6796235b04193eb3c55"}, - {file = "rpds_py-0.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9ea4d00850ef1e917815e59b078ecb338f6a8efda23369677c54a5825dbebb55"}, - {file = "rpds_py-0.9.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:5855c85eb8b8a968a74dc7fb014c9166a05e7e7a8377fb91d78512900aadd13d"}, - {file = "rpds_py-0.9.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:14c408e9d1a80dcb45c05a5149e5961aadb912fff42ca1dd9b68c0044904eb32"}, - {file = "rpds_py-0.9.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:65a0583c43d9f22cb2130c7b110e695fff834fd5e832a776a107197e59a1898e"}, - {file = "rpds_py-0.9.2-cp38-none-win32.whl", hash = "sha256:71f2f7715935a61fa3e4ae91d91b67e571aeb5cb5d10331ab681256bda2ad920"}, - {file = "rpds_py-0.9.2-cp38-none-win_amd64.whl", hash = "sha256:674c704605092e3ebbbd13687b09c9f78c362a4bc710343efe37a91457123044"}, - {file = "rpds_py-0.9.2-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:07e2c54bef6838fa44c48dfbc8234e8e2466d851124b551fc4e07a1cfeb37260"}, - {file = "rpds_py-0.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f7fdf55283ad38c33e35e2855565361f4bf0abd02470b8ab28d499c663bc5d7c"}, - {file = "rpds_py-0.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:890ba852c16ace6ed9f90e8670f2c1c178d96510a21b06d2fa12d8783a905193"}, - {file = "rpds_py-0.9.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:50025635ba8b629a86d9d5474e650da304cb46bbb4d18690532dd79341467846"}, - {file = "rpds_py-0.9.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:517cbf6e67ae3623c5127206489d69eb2bdb27239a3c3cc559350ef52a3bbf0b"}, - {file = "rpds_py-0.9.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0836d71ca19071090d524739420a61580f3f894618d10b666cf3d9a1688355b1"}, - {file = "rpds_py-0.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c439fd54b2b9053717cca3de9583be6584b384d88d045f97d409f0ca867d80f"}, - {file = "rpds_py-0.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f68996a3b3dc9335037f82754f9cdbe3a95db42bde571d8c3be26cc6245f2324"}, - {file = "rpds_py-0.9.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7d68dc8acded354c972116f59b5eb2e5864432948e098c19fe6994926d8e15c3"}, - {file = "rpds_py-0.9.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f963c6b1218b96db85fc37a9f0851eaf8b9040aa46dec112611697a7023da535"}, - {file = "rpds_py-0.9.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5a46859d7f947061b4010e554ccd1791467d1b1759f2dc2ec9055fa239f1bc26"}, - {file = "rpds_py-0.9.2-cp39-none-win32.whl", hash = "sha256:e07e5dbf8a83c66783a9fe2d4566968ea8c161199680e8ad38d53e075df5f0d0"}, - {file = "rpds_py-0.9.2-cp39-none-win_amd64.whl", hash = "sha256:682726178138ea45a0766907957b60f3a1bf3acdf212436be9733f28b6c5af3c"}, - {file = "rpds_py-0.9.2-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:196cb208825a8b9c8fc360dc0f87993b8b260038615230242bf18ec84447c08d"}, - {file = "rpds_py-0.9.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:c7671d45530fcb6d5e22fd40c97e1e1e01965fc298cbda523bb640f3d923b387"}, - {file = "rpds_py-0.9.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83b32f0940adec65099f3b1c215ef7f1d025d13ff947975a055989cb7fd019a4"}, - {file = "rpds_py-0.9.2-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f67da97f5b9eac838b6980fc6da268622e91f8960e083a34533ca710bec8611"}, - {file = "rpds_py-0.9.2-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:03975db5f103997904c37e804e5f340c8fdabbb5883f26ee50a255d664eed58c"}, - {file = "rpds_py-0.9.2-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:987b06d1cdb28f88a42e4fb8a87f094e43f3c435ed8e486533aea0bf2e53d931"}, - {file = "rpds_py-0.9.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c861a7e4aef15ff91233751619ce3a3d2b9e5877e0fcd76f9ea4f6847183aa16"}, - {file = "rpds_py-0.9.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:02938432352359805b6da099c9c95c8a0547fe4b274ce8f1a91677401bb9a45f"}, - {file = "rpds_py-0.9.2-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:ef1f08f2a924837e112cba2953e15aacfccbbfcd773b4b9b4723f8f2ddded08e"}, - {file = "rpds_py-0.9.2-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:35da5cc5cb37c04c4ee03128ad59b8c3941a1e5cd398d78c37f716f32a9b7f67"}, - {file = "rpds_py-0.9.2-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:141acb9d4ccc04e704e5992d35472f78c35af047fa0cfae2923835d153f091be"}, - {file = "rpds_py-0.9.2-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:79f594919d2c1a0cc17d1988a6adaf9a2f000d2e1048f71f298b056b1018e872"}, - {file = "rpds_py-0.9.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:a06418fe1155e72e16dddc68bb3780ae44cebb2912fbd8bb6ff9161de56e1798"}, - {file = "rpds_py-0.9.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b2eb034c94b0b96d5eddb290b7b5198460e2d5d0c421751713953a9c4e47d10"}, - {file = "rpds_py-0.9.2-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8b08605d248b974eb02f40bdcd1a35d3924c83a2a5e8f5d0fa5af852c4d960af"}, - {file = "rpds_py-0.9.2-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a0805911caedfe2736935250be5008b261f10a729a303f676d3d5fea6900c96a"}, - {file = "rpds_py-0.9.2-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab2299e3f92aa5417d5e16bb45bb4586171c1327568f638e8453c9f8d9e0f020"}, - {file = "rpds_py-0.9.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c8d7594e38cf98d8a7df25b440f684b510cf4627fe038c297a87496d10a174f"}, - {file = "rpds_py-0.9.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8b9ec12ad5f0a4625db34db7e0005be2632c1013b253a4a60e8302ad4d462afd"}, - {file = "rpds_py-0.9.2-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1fcdee18fea97238ed17ab6478c66b2095e4ae7177e35fb71fbe561a27adf620"}, - {file = "rpds_py-0.9.2-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:933a7d5cd4b84f959aedeb84f2030f0a01d63ae6cf256629af3081cf3e3426e8"}, - {file = "rpds_py-0.9.2-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:686ba516e02db6d6f8c279d1641f7067ebb5dc58b1d0536c4aaebb7bf01cdc5d"}, - {file = "rpds_py-0.9.2-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:0173c0444bec0a3d7d848eaeca2d8bd32a1b43f3d3fde6617aac3731fa4be05f"}, - {file = "rpds_py-0.9.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:d576c3ef8c7b2d560e301eb33891d1944d965a4d7a2eacb6332eee8a71827db6"}, - {file = "rpds_py-0.9.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed89861ee8c8c47d6beb742a602f912b1bb64f598b1e2f3d758948721d44d468"}, - {file = "rpds_py-0.9.2-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1054a08e818f8e18910f1bee731583fe8f899b0a0a5044c6e680ceea34f93876"}, - {file = "rpds_py-0.9.2-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99e7c4bb27ff1aab90dcc3e9d37ee5af0231ed98d99cb6f5250de28889a3d502"}, - {file = "rpds_py-0.9.2-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c545d9d14d47be716495076b659db179206e3fd997769bc01e2d550eeb685596"}, - {file = "rpds_py-0.9.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9039a11bca3c41be5a58282ed81ae422fa680409022b996032a43badef2a3752"}, - {file = "rpds_py-0.9.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fb39aca7a64ad0c9490adfa719dbeeb87d13be137ca189d2564e596f8ba32c07"}, - {file = "rpds_py-0.9.2-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:2d8b3b3a2ce0eaa00c5bbbb60b6713e94e7e0becab7b3db6c5c77f979e8ed1f1"}, - {file = "rpds_py-0.9.2-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:99b1c16f732b3a9971406fbfe18468592c5a3529585a45a35adbc1389a529a03"}, - {file = "rpds_py-0.9.2-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:c27ee01a6c3223025f4badd533bea5e87c988cb0ba2811b690395dfe16088cfe"}, - {file = "rpds_py-0.9.2.tar.gz", hash = "sha256:8d70e8f14900f2657c249ea4def963bed86a29b81f81f5b76b5a9215680de945"}, + {file = "rpds_py-0.10.2-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:9f00d54b18dd837f1431d66b076737deb7c29ce3ebb8412ceaf44d5e1954ac0c"}, + {file = "rpds_py-0.10.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f4d561f4728f825e3b793a53064b606ca0b6fc264f67d09e54af452aafc5b82"}, + {file = "rpds_py-0.10.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:013d6c784150d10236a74b4094a79d96a256b814457e388fc5a4ba9efe24c402"}, + {file = "rpds_py-0.10.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bd1142d22fdb183a0fff66d79134bf644401437fed874f81066d314c67ee193c"}, + {file = "rpds_py-0.10.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a0536ed2b9297c75104e1a3da330828ba1b2639fa53b38d396f98bf7e3c68df"}, + {file = "rpds_py-0.10.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:41bd430b7b63aa802c02964e331ac0b177148fef5f807d2c90d05ce71a52b4d4"}, + {file = "rpds_py-0.10.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e8474f7233fe1949ce4e03bea698a600c2d5d6b51dab6d6e6336dbe69acf23e"}, + {file = "rpds_py-0.10.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d9d7efaad48b859053b90dedd69bc92f2095084251e732e4c57ac9726bcb1e64"}, + {file = "rpds_py-0.10.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5612b0b1de8d5114520094bd5fc3d04eb8af6f3e10d48ef05b7c8e77c1fd9545"}, + {file = "rpds_py-0.10.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5d5eaf988951f6ecb6854ca3300b87123599c711183c83da7ce39717a7cbdbce"}, + {file = "rpds_py-0.10.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:75c8766734ac0053e1d683567e65e85306c4ec62631b0591caeb287ac8f72e08"}, + {file = "rpds_py-0.10.2-cp310-none-win32.whl", hash = "sha256:8de9b88f0cbac73cfed34220d13c57849e62a7099a714b929142425e926d223a"}, + {file = "rpds_py-0.10.2-cp310-none-win_amd64.whl", hash = "sha256:2275f1a022e2383da5d2d101fe11ccdcbae799148c4b83260a4b9309fa3e1fc2"}, + {file = "rpds_py-0.10.2-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:dd91a7d7a9ce7f4983097c91ce211f3e5569cc21caa16f2692298a07e396f82b"}, + {file = "rpds_py-0.10.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e82b4a70cc67094f3f3fd77579702f48fcf1de7bdc67d79b8f1e24d089a6162c"}, + {file = "rpds_py-0.10.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e281b71922208e00886e4b7ffbfcf27874486364f177418ab676f102130e7ec9"}, + {file = "rpds_py-0.10.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b3eb1a0d2b6d232d1bcdfc3fcc5f7b004ab3fbd9203011a3172f051d4527c0b6"}, + {file = "rpds_py-0.10.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02945ae38fd78efc40900f509890de84cfd5ffe2cd2939eeb3a8800dc68b87cb"}, + {file = "rpds_py-0.10.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ccfb77f6dc8abffa6f1c7e3975ed9070a41ce5fcc11154d2bead8c1baa940f09"}, + {file = "rpds_py-0.10.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af52078719209bef33e38131486fd784832dd8d1dc9b85f00a44f6e7437dd021"}, + {file = "rpds_py-0.10.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:56ba7c1100ed079527f2b995bf5486a2e557e6d5b733c52e8947476338815b69"}, + {file = "rpds_py-0.10.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:899b03a3be785a7e1ff84b237da71f0efa2f021512f147dd34ffdf7aa82cb678"}, + {file = "rpds_py-0.10.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:22e6de18f00583f06928cc8d0993104ecc62f7c6da6478db2255de89a30e45d1"}, + {file = "rpds_py-0.10.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:edd74b760a6bb950397e7a7bd2f38e6700f6525062650b1d77c6d851b82f02c2"}, + {file = "rpds_py-0.10.2-cp311-none-win32.whl", hash = "sha256:18909093944727e068ebfc92e2e6ed1c4fa44135507c1c0555213ce211c53214"}, + {file = "rpds_py-0.10.2-cp311-none-win_amd64.whl", hash = "sha256:9568764e72d85cf7855ca78b48e07ed1be47bf230e2cea8dabda3c95f660b0ff"}, + {file = "rpds_py-0.10.2-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:0fc625059b83695fbb4fc8b7a8b66fa94ff9c7b78c84fb9986cd53ff88a28d80"}, + {file = "rpds_py-0.10.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c86231c66e4f422e7c13ea6200bb4048b3016c8bfd11b4fd0dabd04d2c8e3501"}, + {file = "rpds_py-0.10.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56777c57246e048908b550af9b81b0ec9cf804fd47cb7502ccd93238bd6025c2"}, + {file = "rpds_py-0.10.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a4cb372e22e9c879bd9a9cc9b20b7c1fbf30a605ac953da45ecec05d8a6e1c77"}, + {file = "rpds_py-0.10.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa3b3a43dabc4cc57a7800f526cbe03f71c69121e21b863fdf497b59b462b163"}, + {file = "rpds_py-0.10.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d222086daa55421d599609b32d0ebe544e57654c4a0a1490c54a7ebaa67561"}, + {file = "rpds_py-0.10.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:529aab727f54a937085184e7436e1d0e19975cf10115eda12d37a683e4ee5342"}, + {file = "rpds_py-0.10.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43e9b1531d6a898bdf086acb75c41265c7ec4331267d7619148d407efc72bd24"}, + {file = "rpds_py-0.10.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c2772bb95062e3f9774140205cd65d8997e39620715486cf5f843cf4ad8f744c"}, + {file = "rpds_py-0.10.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ba1b28e44f611f3f2b436bd8290050a61db4b59a8e24be4465f44897936b3824"}, + {file = "rpds_py-0.10.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5aba767e64b494483ad60c4873bec78d16205a21f8247c99749bd990d9c846c2"}, + {file = "rpds_py-0.10.2-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:e1954f4b239d1a92081647eecfd51cbfd08ea16eb743b8af1cd0113258feea14"}, + {file = "rpds_py-0.10.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:de4a2fd524993578fe093044f291b4b24aab134390030b3b9b5f87fd41ab7e75"}, + {file = "rpds_py-0.10.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e69737bd56006a86fd5a78b2b85447580a6138c930a75eb9ef39fe03d90782b1"}, + {file = "rpds_py-0.10.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f40abbcc0a7d9a8a80870af839d317e6932533f98682aabd977add6c53beeb23"}, + {file = "rpds_py-0.10.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29ec8507664f94cc08457d98cfc41c3cdbddfa8952438e644177a29b04937876"}, + {file = "rpds_py-0.10.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bcde80aefe7054fad6277762fb7e9d35c72ea479a485ae1bb14629c640987b30"}, + {file = "rpds_py-0.10.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a65de5c02884760a14a58304fb6303f9ddfc582e630f385daea871e1bdb18686"}, + {file = "rpds_py-0.10.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e92e5817eb6bfed23aa5e45bfe30647b83602bdd6f9e25d63524d4e6258458b0"}, + {file = "rpds_py-0.10.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2c8fc6c841ada60a86d29c9ebe2e8757c47eda6553f3596c560e59ca6e9b6fa1"}, + {file = "rpds_py-0.10.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:8557c807388e6617161fe51b1a4747ea8d1133f2d2ad8e79583439abebe58fbd"}, + {file = "rpds_py-0.10.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:00e97d43a36811b78fa9ad9d3329bf34f76a31e891a7031a2ac01450c9b168ab"}, + {file = "rpds_py-0.10.2-cp38-none-win32.whl", hash = "sha256:1ed3d5385d14be894e12a9033be989e012214a9811e7194849c94032ad69682a"}, + {file = "rpds_py-0.10.2-cp38-none-win_amd64.whl", hash = "sha256:02b4a2e28eb24dac4ef43dda4f6a6f7766e355179b143f7d0c76a1c5488a307b"}, + {file = "rpds_py-0.10.2-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:2a55631b93e47956fbc97d69ba2054a8c6a4016f9a3064ec4e031f5f1030cb90"}, + {file = "rpds_py-0.10.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2ffbf1b38c88d0466de542e91b08225d51782282512f8e2b11715126c41fda48"}, + {file = "rpds_py-0.10.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213f9ef5c02ec2f883c1075d25a873149daadbaea50d18d622e9db55ec9849c2"}, + {file = "rpds_py-0.10.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b00150a9a3fd0a8efaa90bc2696c105b04039d50763dd1c95a34c88c5966cb57"}, + {file = "rpds_py-0.10.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ab0f7aabdbce4a202e013083eeab71afdb85efa405dc4a06fea98cde81204675"}, + {file = "rpds_py-0.10.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2cd0c9fb5d40887500b4ed818770c68ab4fa6e0395d286f9704be6751b1b7d98"}, + {file = "rpds_py-0.10.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8578fc6c8bdd0201327503720fa581000b4bd3934abbf07e2628d1ad3de157d"}, + {file = "rpds_py-0.10.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d27d08056fcd61ff47a0cd8407eff4d3e816c82cb6b9c6f0ce9a0ad49225f81"}, + {file = "rpds_py-0.10.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c8f6526df47953b07c45b95c4d1da6b9a0861c0e5da0271db96bb1d807825412"}, + {file = "rpds_py-0.10.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:177c033e467a66a054dd3a9534167234a3d0b2e41445807b13b626e01da25d92"}, + {file = "rpds_py-0.10.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9c74cbee9e532dc34371127f7686d6953e5153a1f22beab7f953d95ee4a0fe09"}, + {file = "rpds_py-0.10.2-cp39-none-win32.whl", hash = "sha256:05a1382905026bdd560f806c8c7c16e0f3e3fb359ba8868203ca6e5799884968"}, + {file = "rpds_py-0.10.2-cp39-none-win_amd64.whl", hash = "sha256:3fd503c27e7b7034128e30847ecdb4bff4ca5e60f29ad022a9f66ae8940d54ac"}, + {file = "rpds_py-0.10.2-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:4a96147791e49e84207dd1530109aa0e9eeaf1c8b7a59f150047fc0fcdf9bb64"}, + {file = "rpds_py-0.10.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:203eb1532d51591d32e8dfafd60b5d31347ea7278c8da02b4b550287f6abe28b"}, + {file = "rpds_py-0.10.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2f416cdfe92f5fbb77177f5f3f7830059d1582db05f2c7119bf80069d1ab69b"}, + {file = "rpds_py-0.10.2-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b2660000e1a113869c86eb5cc07f3343467490f3cd9d0299f81da9ddae7137b7"}, + {file = "rpds_py-0.10.2-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1adb04e4b4e41bf30aaa77eeb169c1b9ba9e5010e2e6ce8d6c17e1446edc9b68"}, + {file = "rpds_py-0.10.2-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2bca97521ee786087f0c5ef318fef3eef0266a9c3deff88205523cf353af7394"}, + {file = "rpds_py-0.10.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4969592e3cdeefa4cbb15a26cec102cbd4a1d6e5b695fac9fa026e19741138c8"}, + {file = "rpds_py-0.10.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:df61f818edf7c8626bfa392f825860fb670b5f8336e238eb0ec7e2a5689cdded"}, + {file = "rpds_py-0.10.2-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:b589d93a60e78fe55d5bc76ee8c2bf945dbdbb7cd16044c53e0307604e448de1"}, + {file = "rpds_py-0.10.2-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:73da69e1f612c3e682e34dcb971272d90d6f27b2c99acff444ca455a89978574"}, + {file = "rpds_py-0.10.2-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:89438e8885a186c69fe31f7ef98bb2bf29688c466c3caf9060f404c0be89ae80"}, + {file = "rpds_py-0.10.2-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:c4ecc4e9a5d73a816cae36ee6b5d8b7a0c72013cae1e101406e832887c3dc2d8"}, + {file = "rpds_py-0.10.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:907b214da5d2fcff0b6ddb83de1333890ca92abaf4bbf8d9c61dc1b95c87fd6e"}, + {file = "rpds_py-0.10.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb44644371eaa29a3aba7b69b1862d0d56f073bb7585baa32e4271a71a91ee82"}, + {file = "rpds_py-0.10.2-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:80c3cf46511653f94dfe07c7c79ab105c4164d6e1dfcb35b7214fb9af53eaef4"}, + {file = "rpds_py-0.10.2-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaba0613c759ebf95988a84f766ca6b7432d55ce399194f95dde588ad1be0878"}, + {file = "rpds_py-0.10.2-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0527c97dcd8bb983822ee31d3760187083fd3ba18ac4dd22cf5347c89d5628f4"}, + {file = "rpds_py-0.10.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9cdfd649011ce2d90cb0dd304c5aba1190fac0c266d19a9e2b96b81cfd150a09"}, + {file = "rpds_py-0.10.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:75eea40355a8690459c7291ce6c8ce39c27bd223675c7da6619f510c728feb97"}, + {file = "rpds_py-0.10.2-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4f1b804cfad04f862d6a84af9d1ad941b06f671878f0f7ecad6c92007d423de6"}, + {file = "rpds_py-0.10.2-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:bf77f9017fcfa1232f98598a637406e6c33982ccba8a5922339575c3e2b90ea5"}, + {file = "rpds_py-0.10.2-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:46c4c550bf59ce05d6bff2c98053822549aaf9fbaf81103edea325e03350bca1"}, + {file = "rpds_py-0.10.2-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:46af4a742b90c7460e94214f923452c2c1d050a9da1d2b8d4c70cbc045e692b7"}, + {file = "rpds_py-0.10.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:2a86d246a160d98d820ee7d02dc18c923c228de095be362e57b9fd8970b2c4a1"}, + {file = "rpds_py-0.10.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae141c9017f8f473a6ee07a9425da021816a9f8c0683c2e5442f0ccf56b0fc62"}, + {file = "rpds_py-0.10.2-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e1147bc3d0dd1e549d991110d0a09557ec9f925dbc1ca62871fcdab2ec9d716b"}, + {file = "rpds_py-0.10.2-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fce7a8ee8d0f682c953c0188735d823f0fcb62779bf92cd6ba473a8e730e26ad"}, + {file = "rpds_py-0.10.2-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4c7f9d70f99e1fbcbf57c75328b80e1c0a7f6cad43e75efa90a97221be5efe15"}, + {file = "rpds_py-0.10.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b309908b6ff5ffbf6394818cb73b5a2a74073acee2c57fe8719046389aeff0d"}, + {file = "rpds_py-0.10.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3ff1f585a0fdc1415bd733b804f33d386064a308672249b14828130dd43e7c31"}, + {file = "rpds_py-0.10.2-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:0188b580c490bccb031e9b67e9e8c695a3c44ac5e06218b152361eca847317c3"}, + {file = "rpds_py-0.10.2-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:abe081453166e206e3a8c6d8ace57214c17b6d9477d7601ac14a365344dbc1f4"}, + {file = "rpds_py-0.10.2-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:9118de88c16947eaf5b92f749e65b0501ea69e7c2be7bd6aefc12551622360e1"}, + {file = "rpds_py-0.10.2.tar.gz", hash = "sha256:289073f68452b96e70990085324be7223944c7409973d13ddfe0eea1c1b5663b"}, ] [[package]] @@ -3175,6 +3454,165 @@ files = [ {file = "ruff-0.0.249.tar.gz", hash = "sha256:b590689f08ecef971c45555cbda6854cdf48f3828fc326802828e851b1a14b3d"}, ] +[[package]] +name = "safetensors" +version = "0.3.3" +description = "Fast and Safe Tensor serialization" +optional = true +python-versions = "*" +files = [ + {file = "safetensors-0.3.3-cp310-cp310-macosx_10_11_x86_64.whl", hash = "sha256:92e4d0c8b2836120fddd134474c5bda8963f322333941f8b9f643e5b24f041eb"}, + {file = "safetensors-0.3.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:3dcadb6153c42addc9c625a622ebde9293fabe1973f9ef31ba10fb42c16e8536"}, + {file = "safetensors-0.3.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:08f26b61e1b0a14dc959aa9d568776bd038805f611caef1de04a80c468d4a7a4"}, + {file = "safetensors-0.3.3-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:17f41344d9a075f2f21b289a49a62e98baff54b5754240ba896063bce31626bf"}, + {file = "safetensors-0.3.3-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:f1045f798e1a16a6ced98d6a42ec72936d367a2eec81dc5fade6ed54638cd7d2"}, + {file = "safetensors-0.3.3-cp310-cp310-macosx_13_0_x86_64.whl", hash = "sha256:eaf0e4bc91da13f21ac846a39429eb3f3b7ed06295a32321fa3eb1a59b5c70f3"}, + {file = "safetensors-0.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25149180d4dc8ca48bac2ac3852a9424b466e36336a39659b35b21b2116f96fc"}, + {file = "safetensors-0.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9e943bf78c39de8865398a71818315e7d5d1af93c7b30d4da3fc852e62ad9bc"}, + {file = "safetensors-0.3.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cccfcac04a010354e87c7a2fe16a1ff004fc4f6e7ef8efc966ed30122ce00bc7"}, + {file = "safetensors-0.3.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a07121f427e646a50d18c1be0fa1a2cbf6398624c31149cd7e6b35486d72189e"}, + {file = "safetensors-0.3.3-cp310-cp310-win32.whl", hash = "sha256:a85e29cbfddfea86453cc0f4889b4bcc6b9c155be9a60e27be479a34e199e7ef"}, + {file = "safetensors-0.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:e13adad4a3e591378f71068d14e92343e626cf698ff805f61cdb946e684a218e"}, + {file = "safetensors-0.3.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:cbc3312f134baf07334dd517341a4b470b2931f090bd9284888acb7dfaf4606f"}, + {file = "safetensors-0.3.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d15030af39d5d30c22bcbc6d180c65405b7ea4c05b7bab14a570eac7d7d43722"}, + {file = "safetensors-0.3.3-cp311-cp311-macosx_12_0_universal2.whl", hash = "sha256:f84a74cbe9859b28e3d6d7715ac1dd3097bebf8d772694098f6d42435245860c"}, + {file = "safetensors-0.3.3-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:10d637423d98ab2e6a4ad96abf4534eb26fcaf8ca3115623e64c00759374e90d"}, + {file = "safetensors-0.3.3-cp311-cp311-macosx_13_0_universal2.whl", hash = "sha256:3b46f5de8b44084aff2e480874c550c399c730c84b2e8ad1bddb062c94aa14e9"}, + {file = "safetensors-0.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e76da691a82dfaf752854fa6d17c8eba0c8466370c5ad8cf1bfdf832d3c7ee17"}, + {file = "safetensors-0.3.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4e342fd54e66aa9512dd13e410f791e47aa4feeb5f4c9a20882c72f3d272f29"}, + {file = "safetensors-0.3.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:178fd30b5dc73bce14a39187d948cedd0e5698e2f055b7ea16b5a96c9b17438e"}, + {file = "safetensors-0.3.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e8fdf7407dba44587ed5e79d5de3533d242648e1f2041760b21474bd5ea5c8c"}, + {file = "safetensors-0.3.3-cp311-cp311-win32.whl", hash = "sha256:7d3b744cee8d7a46ffa68db1a2ff1a1a432488e3f7a5a97856fe69e22139d50c"}, + {file = "safetensors-0.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:f579877d30feec9b6ba409d05fa174633a4fc095675a4a82971d831a8bb60b97"}, + {file = "safetensors-0.3.3-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:2fff5b19a1b462c17322998b2f4b8bce43c16fe208968174d2f3a1446284ceed"}, + {file = "safetensors-0.3.3-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:41adb1d39e8aad04b16879e3e0cbcb849315999fad73bc992091a01e379cb058"}, + {file = "safetensors-0.3.3-cp37-cp37m-macosx_12_0_x86_64.whl", hash = "sha256:0f2b404250b3b877b11d34afcc30d80e7035714a1116a3df56acaca6b6c00096"}, + {file = "safetensors-0.3.3-cp37-cp37m-macosx_13_0_x86_64.whl", hash = "sha256:b43956ef20e9f4f2e648818a9e7b3499edd6b753a0f5526d4f6a6826fbee8446"}, + {file = "safetensors-0.3.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d61a99b34169981f088ccfbb2c91170843efc869a0a0532f422db7211bf4f474"}, + {file = "safetensors-0.3.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c0008aab36cd20e9a051a68563c6f80d40f238c2611811d7faa5a18bf3fd3984"}, + {file = "safetensors-0.3.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:93d54166072b143084fdcd214a080a088050c1bb1651016b55942701b31334e4"}, + {file = "safetensors-0.3.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c32ee08f61cea56a5d62bbf94af95df6040c8ab574afffaeb7b44ae5da1e9e3"}, + {file = "safetensors-0.3.3-cp37-cp37m-win32.whl", hash = "sha256:351600f367badd59f7bfe86d317bb768dd8c59c1561c6fac43cafbd9c1af7827"}, + {file = "safetensors-0.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:034717e297849dae1af0a7027a14b8647bd2e272c24106dced64d83e10d468d1"}, + {file = "safetensors-0.3.3-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:8530399666748634bc0b301a6a5523756931b0c2680d188e743d16304afe917a"}, + {file = "safetensors-0.3.3-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:9d741c1f1621e489ba10aa3d135b54202684f6e205df52e219d5eecd673a80c9"}, + {file = "safetensors-0.3.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:0c345fd85b4d2093a5109596ff4cd9dfc2e84992e881b4857fbc4a93a3b89ddb"}, + {file = "safetensors-0.3.3-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:69ccee8d05f55cdf76f7e6c87d2bdfb648c16778ef8acfd2ecc495e273e9233e"}, + {file = "safetensors-0.3.3-cp38-cp38-macosx_13_0_arm64.whl", hash = "sha256:c08a9a4b7a4ca389232fa8d097aebc20bbd4f61e477abc7065b5c18b8202dede"}, + {file = "safetensors-0.3.3-cp38-cp38-macosx_13_0_x86_64.whl", hash = "sha256:a002868d2e3f49bbe81bee2655a411c24fa1f8e68b703dec6629cb989d6ae42e"}, + {file = "safetensors-0.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3bd2704cb41faa44d3ec23e8b97330346da0395aec87f8eaf9c9e2c086cdbf13"}, + {file = "safetensors-0.3.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b2951bf3f0ad63df5e6a95263652bd6c194a6eb36fd4f2d29421cd63424c883"}, + {file = "safetensors-0.3.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:07114cec116253ca2e7230fdea30acf76828f21614afd596d7b5438a2f719bd8"}, + {file = "safetensors-0.3.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ab43aeeb9eadbb6b460df3568a662e6f1911ecc39387f8752afcb6a7d96c087"}, + {file = "safetensors-0.3.3-cp38-cp38-win32.whl", hash = "sha256:f2f59fce31dd3429daca7269a6b06f65e6547a0c248f5116976c3f1e9b73f251"}, + {file = "safetensors-0.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:c31ca0d8610f57799925bf08616856b39518ab772c65093ef1516762e796fde4"}, + {file = "safetensors-0.3.3-cp39-cp39-macosx_10_11_x86_64.whl", hash = "sha256:59a596b3225c96d59af412385981f17dd95314e3fffdf359c7e3f5bb97730a19"}, + {file = "safetensors-0.3.3-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:82a16e92210a6221edd75ab17acdd468dd958ef5023d9c6c1289606cc30d1479"}, + {file = "safetensors-0.3.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:98a929e763a581f516373ef31983ed1257d2d0da912a8e05d5cd12e9e441c93a"}, + {file = "safetensors-0.3.3-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:12b83f1986cd16ea0454c636c37b11e819d60dd952c26978310a0835133480b7"}, + {file = "safetensors-0.3.3-cp39-cp39-macosx_13_0_arm64.whl", hash = "sha256:f439175c827c2f1bbd54df42789c5204a10983a30bc4242bc7deaf854a24f3f0"}, + {file = "safetensors-0.3.3-cp39-cp39-macosx_13_0_x86_64.whl", hash = "sha256:0085be33b8cbcb13079b3a8e131656e05b0bc5e6970530d4c24150f7afd76d70"}, + {file = "safetensors-0.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e3ec70c87b1e910769034206ad5efc051069b105aac1687f6edcd02526767f4"}, + {file = "safetensors-0.3.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f490132383e5e490e710608f4acffcb98ed37f91b885c7217d3f9f10aaff9048"}, + {file = "safetensors-0.3.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:79d1b6c7ed5596baf79c80fbce5198c3cdcc521ae6a157699f427aba1a90082d"}, + {file = "safetensors-0.3.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad3cc8006e7a86ee7c88bd2813ec59cd7cc75b03e6fa4af89b9c7b235b438d68"}, + {file = "safetensors-0.3.3-cp39-cp39-win32.whl", hash = "sha256:ab29f54c6b8c301ca05fa014728996bd83aac6e21528f893aaf8945c71f42b6d"}, + {file = "safetensors-0.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:0fa82004eae1a71e2aa29843ef99de9350e459a0fc2f65fc6ee0da9690933d2d"}, + {file = "safetensors-0.3.3.tar.gz", hash = "sha256:edb7072d788c4f929d0f5735d3a2fb51e5a27f833587828583b7f5747af1a2b8"}, +] + +[package.extras] +all = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "flax (>=0.6.3)", "h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "isort (>=5.5.4)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "numpy (>=1.21.6)", "paddlepaddle (>=2.4.1)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "setuptools-rust (>=1.5.2)", "tensorflow (==2.11.0)", "torch (>=1.10)"] +dev = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "flax (>=0.6.3)", "h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "isort (>=5.5.4)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "numpy (>=1.21.6)", "paddlepaddle (>=2.4.1)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "setuptools-rust (>=1.5.2)", "tensorflow (==2.11.0)", "torch (>=1.10)"] +jax = ["flax (>=0.6.3)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "numpy (>=1.21.6)"] +numpy = ["numpy (>=1.21.6)"] +paddlepaddle = ["numpy (>=1.21.6)", "paddlepaddle (>=2.4.1)"] +pinned-tf = ["tensorflow (==2.11.0)"] +quality = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "isort (>=5.5.4)"] +tensorflow = ["numpy (>=1.21.6)", "tensorflow (>=2.11.0)"] +testing = ["h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "numpy (>=1.21.6)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "setuptools-rust (>=1.5.2)"] +torch = ["numpy (>=1.21.6)", "torch (>=1.10)"] + +[[package]] +name = "scikit-learn" +version = "1.3.0" +description = "A set of python modules for machine learning and data mining" +optional = true +python-versions = ">=3.8" +files = [ + {file = "scikit-learn-1.3.0.tar.gz", hash = "sha256:8be549886f5eda46436b6e555b0e4873b4f10aa21c07df45c4bc1735afbccd7a"}, + {file = "scikit_learn-1.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:981287869e576d42c682cf7ca96af0c6ac544ed9316328fd0d9292795c742cf5"}, + {file = "scikit_learn-1.3.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:436aaaae2c916ad16631142488e4c82f4296af2404f480e031d866863425d2a2"}, + {file = "scikit_learn-1.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7e28d8fa47a0b30ae1bd7a079519dd852764e31708a7804da6cb6f8b36e3630"}, + {file = "scikit_learn-1.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae80c08834a473d08a204d966982a62e11c976228d306a2648c575e3ead12111"}, + {file = "scikit_learn-1.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:552fd1b6ee22900cf1780d7386a554bb96949e9a359999177cf30211e6b20df6"}, + {file = "scikit_learn-1.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79970a6d759eb00a62266a31e2637d07d2d28446fca8079cf9afa7c07b0427f8"}, + {file = "scikit_learn-1.3.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:850a00b559e636b23901aabbe79b73dc604b4e4248ba9e2d6e72f95063765603"}, + {file = "scikit_learn-1.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee04835fb016e8062ee9fe9074aef9b82e430504e420bff51e3e5fffe72750ca"}, + {file = "scikit_learn-1.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d953531f5d9f00c90c34fa3b7d7cfb43ecff4c605dac9e4255a20b114a27369"}, + {file = "scikit_learn-1.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:151ac2bf65ccf363664a689b8beafc9e6aae36263db114b4ca06fbbbf827444a"}, + {file = "scikit_learn-1.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6a885a9edc9c0a341cab27ec4f8a6c58b35f3d449c9d2503a6fd23e06bbd4f6a"}, + {file = "scikit_learn-1.3.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:9877af9c6d1b15486e18a94101b742e9d0d2f343d35a634e337411ddb57783f3"}, + {file = "scikit_learn-1.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c470f53cea065ff3d588050955c492793bb50c19a92923490d18fcb637f6383a"}, + {file = "scikit_learn-1.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd6e2d7389542eae01077a1ee0318c4fec20c66c957f45c7aac0c6eb0fe3c612"}, + {file = "scikit_learn-1.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:3a11936adbc379a6061ea32fa03338d4ca7248d86dd507c81e13af428a5bc1db"}, + {file = "scikit_learn-1.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:998d38fcec96584deee1e79cd127469b3ad6fefd1ea6c2dfc54e8db367eb396b"}, + {file = "scikit_learn-1.3.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:ded35e810438a527e17623ac6deae3b360134345b7c598175ab7741720d7ffa7"}, + {file = "scikit_learn-1.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e8102d5036e28d08ab47166b48c8d5e5810704daecf3a476a4282d562be9a28"}, + {file = "scikit_learn-1.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7617164951c422747e7c32be4afa15d75ad8044f42e7d70d3e2e0429a50e6718"}, + {file = "scikit_learn-1.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:1d54fb9e6038284548072df22fd34777e434153f7ffac72c8596f2d6987110dd"}, +] + +[package.dependencies] +joblib = ">=1.1.1" +numpy = ">=1.17.3" +scipy = ">=1.5.0" +threadpoolctl = ">=2.0.0" + +[package.extras] +benchmark = ["matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "pandas (>=1.0.5)"] +docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)", "sphinx (>=6.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.10.1)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] +examples = ["matplotlib (>=3.1.3)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)"] +tests = ["black (>=23.3.0)", "matplotlib (>=3.1.3)", "mypy (>=1.3)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.0.272)", "scikit-image (>=0.16.2)"] + +[[package]] +name = "scipy" +version = "1.9.3" +description = "Fundamental algorithms for scientific computing in Python" +optional = true +python-versions = ">=3.8" +files = [ + {file = "scipy-1.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1884b66a54887e21addf9c16fb588720a8309a57b2e258ae1c7986d4444d3bc0"}, + {file = "scipy-1.9.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:83b89e9586c62e787f5012e8475fbb12185bafb996a03257e9675cd73d3736dd"}, + {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a72d885fa44247f92743fc20732ae55564ff2a519e8302fb7e18717c5355a8b"}, + {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d01e1dd7b15bd2449c8bfc6b7cc67d630700ed655654f0dfcf121600bad205c9"}, + {file = "scipy-1.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:68239b6aa6f9c593da8be1509a05cb7f9efe98b80f43a5861cd24c7557e98523"}, + {file = "scipy-1.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b41bc822679ad1c9a5f023bc93f6d0543129ca0f37c1ce294dd9d386f0a21096"}, + {file = "scipy-1.9.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:90453d2b93ea82a9f434e4e1cba043e779ff67b92f7a0e85d05d286a3625df3c"}, + {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83c06e62a390a9167da60bedd4575a14c1f58ca9dfde59830fc42e5197283dab"}, + {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abaf921531b5aeaafced90157db505e10345e45038c39e5d9b6c7922d68085cb"}, + {file = "scipy-1.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:06d2e1b4c491dc7d8eacea139a1b0b295f74e1a1a0f704c375028f8320d16e31"}, + {file = "scipy-1.9.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5a04cd7d0d3eff6ea4719371cbc44df31411862b9646db617c99718ff68d4840"}, + {file = "scipy-1.9.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:545c83ffb518094d8c9d83cce216c0c32f8c04aaf28b92cc8283eda0685162d5"}, + {file = "scipy-1.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d54222d7a3ba6022fdf5773931b5d7c56efe41ede7f7128c7b1637700409108"}, + {file = "scipy-1.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cff3a5295234037e39500d35316a4c5794739433528310e117b8a9a0c76d20fc"}, + {file = "scipy-1.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:2318bef588acc7a574f5bfdff9c172d0b1bf2c8143d9582e05f878e580a3781e"}, + {file = "scipy-1.9.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d644a64e174c16cb4b2e41dfea6af722053e83d066da7343f333a54dae9bc31c"}, + {file = "scipy-1.9.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:da8245491d73ed0a994ed9c2e380fd058ce2fa8a18da204681f2fe1f57f98f95"}, + {file = "scipy-1.9.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4db5b30849606a95dcf519763dd3ab6fe9bd91df49eba517359e450a7d80ce2e"}, + {file = "scipy-1.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c68db6b290cbd4049012990d7fe71a2abd9ffbe82c0056ebe0f01df8be5436b0"}, + {file = "scipy-1.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:5b88e6d91ad9d59478fafe92a7c757d00c59e3bdc3331be8ada76a4f8d683f58"}, + {file = "scipy-1.9.3.tar.gz", hash = "sha256:fbc5c05c85c1a02be77b1ff591087c83bc44579c6d2bd9fb798bb64ea5e1a027"}, +] + +[package.dependencies] +numpy = ">=1.18.5,<1.26.0" + +[package.extras] +dev = ["flake8", "mypy", "pycodestyle", "typing_extensions"] +doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-panels (>=0.5.2)", "sphinx-tabs"] +test = ["asv", "gmpy2", "mpmath", "pytest", "pytest-cov", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + [[package]] name = "send2trash" version = "1.8.2" @@ -3191,6 +3629,82 @@ nativelib = ["pyobjc-framework-Cocoa", "pywin32"] objc = ["pyobjc-framework-Cocoa"] win32 = ["pywin32"] +[[package]] +name = "sentence-transformers" +version = "2.2.2" +description = "Multilingual text embeddings" +optional = true +python-versions = ">=3.6.0" +files = [ + {file = "sentence-transformers-2.2.2.tar.gz", hash = "sha256:dbc60163b27de21076c9a30d24b5b7b6fa05141d68cf2553fa9a77bf79a29136"}, +] + +[package.dependencies] +huggingface-hub = ">=0.4.0" +nltk = "*" +numpy = "*" +scikit-learn = "*" +scipy = "*" +sentencepiece = "*" +torch = ">=1.6.0" +torchvision = "*" +tqdm = "*" +transformers = ">=4.6.0,<5.0.0" + +[[package]] +name = "sentencepiece" +version = "0.1.99" +description = "SentencePiece python wrapper" +optional = true +python-versions = "*" +files = [ + {file = "sentencepiece-0.1.99-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0eb528e70571b7c02723e5804322469b82fe7ea418c96051d0286c0fa028db73"}, + {file = "sentencepiece-0.1.99-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:77d7fafb2c4e4659cbdf303929503f37a26eabc4ff31d3a79bf1c5a1b338caa7"}, + {file = "sentencepiece-0.1.99-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:be9cf5b9e404c245aeb3d3723c737ba7a8f5d4ba262ef233a431fa6c45f732a0"}, + {file = "sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baed1a26464998f9710d20e52607c29ffd4293e7c71c6a1f83f51ad0911ec12c"}, + {file = "sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9832f08bb372d4c8b567612f8eab9e36e268dff645f1c28f9f8e851be705f6d1"}, + {file = "sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:019e7535108e309dae2b253a75834fc3128240aa87c00eb80732078cdc182588"}, + {file = "sentencepiece-0.1.99-cp310-cp310-win32.whl", hash = "sha256:fa16a830416bb823fa2a52cbdd474d1f7f3bba527fd2304fb4b140dad31bb9bc"}, + {file = "sentencepiece-0.1.99-cp310-cp310-win_amd64.whl", hash = "sha256:14b0eccb7b641d4591c3e12ae44cab537d68352e4d3b6424944f0c447d2348d5"}, + {file = "sentencepiece-0.1.99-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6d3c56f24183a1e8bd61043ff2c58dfecdc68a5dd8955dc13bab83afd5f76b81"}, + {file = "sentencepiece-0.1.99-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ed6ea1819fd612c989999e44a51bf556d0ef6abfb553080b9be3d347e18bcfb7"}, + {file = "sentencepiece-0.1.99-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a2a0260cd1fb7bd8b4d4f39dc2444a8d5fd4e0a0c4d5c899810ef1abf99b2d45"}, + {file = "sentencepiece-0.1.99-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a1abff4d1ff81c77cac3cc6fefa34fa4b8b371e5ee51cb7e8d1ebc996d05983"}, + {file = "sentencepiece-0.1.99-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:004e6a621d4bc88978eecb6ea7959264239a17b70f2cbc348033d8195c9808ec"}, + {file = "sentencepiece-0.1.99-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db361e03342c41680afae5807590bc88aa0e17cfd1a42696a160e4005fcda03b"}, + {file = "sentencepiece-0.1.99-cp311-cp311-win32.whl", hash = "sha256:2d95e19168875b70df62916eb55428a0cbcb834ac51d5a7e664eda74def9e1e0"}, + {file = "sentencepiece-0.1.99-cp311-cp311-win_amd64.whl", hash = "sha256:f90d73a6f81248a909f55d8e6ef56fec32d559e1e9af045f0b0322637cb8e5c7"}, + {file = "sentencepiece-0.1.99-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:62e24c81e74bd87a6e0d63c51beb6527e4c0add67e1a17bac18bcd2076afcfeb"}, + {file = "sentencepiece-0.1.99-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57efcc2d51caff20d9573567d9fd3f854d9efe613ed58a439c78c9f93101384a"}, + {file = "sentencepiece-0.1.99-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a904c46197993bd1e95b93a6e373dca2f170379d64441041e2e628ad4afb16f"}, + {file = "sentencepiece-0.1.99-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d89adf59854741c0d465f0e1525b388c0d174f611cc04af54153c5c4f36088c4"}, + {file = "sentencepiece-0.1.99-cp36-cp36m-win32.whl", hash = "sha256:47c378146928690d1bc106fdf0da768cebd03b65dd8405aa3dd88f9c81e35dba"}, + {file = "sentencepiece-0.1.99-cp36-cp36m-win_amd64.whl", hash = "sha256:9ba142e7a90dd6d823c44f9870abdad45e6c63958eb60fe44cca6828d3b69da2"}, + {file = "sentencepiece-0.1.99-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b7b1a9ae4d7c6f1f867e63370cca25cc17b6f4886729595b885ee07a58d3cec3"}, + {file = "sentencepiece-0.1.99-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0f644c9d4d35c096a538507b2163e6191512460035bf51358794a78515b74f7"}, + {file = "sentencepiece-0.1.99-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c8843d23a0f686d85e569bd6dcd0dd0e0cbc03731e63497ca6d5bacd18df8b85"}, + {file = "sentencepiece-0.1.99-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33e6f690a1caebb4867a2e367afa1918ad35be257ecdb3455d2bbd787936f155"}, + {file = "sentencepiece-0.1.99-cp37-cp37m-win32.whl", hash = "sha256:8a321866c2f85da7beac74a824b4ad6ddc2a4c9bccd9382529506d48f744a12c"}, + {file = "sentencepiece-0.1.99-cp37-cp37m-win_amd64.whl", hash = "sha256:c42f753bcfb7661c122a15b20be7f684b61fc8592c89c870adf52382ea72262d"}, + {file = "sentencepiece-0.1.99-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:85b476406da69c70586f0bb682fcca4c9b40e5059814f2db92303ea4585c650c"}, + {file = "sentencepiece-0.1.99-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cfbcfe13c69d3f87b7fcd5da168df7290a6d006329be71f90ba4f56bc77f8561"}, + {file = "sentencepiece-0.1.99-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:445b0ec381af1cd4eef95243e7180c63d9c384443c16c4c47a28196bd1cda937"}, + {file = "sentencepiece-0.1.99-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6890ea0f2b4703f62d0bf27932e35808b1f679bdb05c7eeb3812b935ba02001"}, + {file = "sentencepiece-0.1.99-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb71af492b0eefbf9f2501bec97bcd043b6812ab000d119eaf4bd33f9e283d03"}, + {file = "sentencepiece-0.1.99-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27b866b5bd3ddd54166bbcbf5c8d7dd2e0b397fac8537991c7f544220b1f67bc"}, + {file = "sentencepiece-0.1.99-cp38-cp38-win32.whl", hash = "sha256:b133e8a499eac49c581c3c76e9bdd08c338cc1939e441fee6f92c0ccb5f1f8be"}, + {file = "sentencepiece-0.1.99-cp38-cp38-win_amd64.whl", hash = "sha256:0eaf3591dd0690a87f44f4df129cf8d05d8a4029b5b6709b489b8e27f9a9bcff"}, + {file = "sentencepiece-0.1.99-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38efeda9bbfb55052d482a009c6a37e52f42ebffcea9d3a98a61de7aee356a28"}, + {file = "sentencepiece-0.1.99-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6c030b081dc1e1bcc9fadc314b19b740715d3d566ad73a482da20d7d46fd444c"}, + {file = "sentencepiece-0.1.99-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:84dbe53e02e4f8a2e45d2ac3e430d5c83182142658e25edd76539b7648928727"}, + {file = "sentencepiece-0.1.99-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b0f55d0a0ee1719b4b04221fe0c9f0c3461dc3dabd77a035fa2f4788eb3ef9a"}, + {file = "sentencepiece-0.1.99-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18e800f206cd235dc27dc749299e05853a4e4332e8d3dfd81bf13d0e5b9007d9"}, + {file = "sentencepiece-0.1.99-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ae1c40cda8f9d5b0423cfa98542735c0235e7597d79caf318855cdf971b2280"}, + {file = "sentencepiece-0.1.99-cp39-cp39-win32.whl", hash = "sha256:c84ce33af12ca222d14a1cdd37bd76a69401e32bc68fe61c67ef6b59402f4ab8"}, + {file = "sentencepiece-0.1.99-cp39-cp39-win_amd64.whl", hash = "sha256:350e5c74d739973f1c9643edb80f7cc904dc948578bcb1d43c6f2b173e5d18dd"}, + {file = "sentencepiece-0.1.99.tar.gz", hash = "sha256:189c48f5cb2949288f97ccdb97f0473098d9c3dcf5a3d99d4eabe719ec27297f"}, +] + [[package]] name = "setuptools" version = "67.8.0" @@ -3252,13 +3766,13 @@ files = [ [[package]] name = "soupsieve" -version = "2.4.1" +version = "2.5" description = "A modern CSS selector implementation for Beautiful Soup." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "soupsieve-2.4.1-py3-none-any.whl", hash = "sha256:1c1bfee6819544a3447586c889157365a27e10d88cde3ad3da0cf0ddf646feb8"}, - {file = "soupsieve-2.4.1.tar.gz", hash = "sha256:89d12b2d5dfcd2c9e8c22326da9d9aa9cb3dfab0a83a024f05704076ee8d35ea"}, + {file = "soupsieve-2.5-py3-none-any.whl", hash = "sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7"}, + {file = "soupsieve-2.5.tar.gz", hash = "sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690"}, ] [[package]] @@ -3507,6 +4021,20 @@ pure-eval = "*" [package.extras] tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] +[[package]] +name = "sympy" +version = "1.12" +description = "Computer algebra system (CAS) in Python" +optional = true +python-versions = ">=3.8" +files = [ + {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"}, + {file = "sympy-1.12.tar.gz", hash = "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8"}, +] + +[package.dependencies] +mpmath = ">=0.19" + [[package]] name = "tenacity" version = "8.2.3" @@ -3619,6 +4147,17 @@ mxnet = ["mxnet (>=1.5.1,<1.6.0)"] tensorflow = ["tensorflow (>=2.0.0,<2.6.0)"] torch = ["torch (>=1.6.0)"] +[[package]] +name = "threadpoolctl" +version = "3.2.0" +description = "threadpoolctl" +optional = true +python-versions = ">=3.8" +files = [ + {file = "threadpoolctl-3.2.0-py3-none-any.whl", hash = "sha256:2b7818516e423bdaebb97c723f86a7c6b0a83d3f3b0970328d66f4d9104dc032"}, + {file = "threadpoolctl-3.2.0.tar.gz", hash = "sha256:c96a0ba3bdddeaca37dc4cc7344aafad41cdb8c313f74fdfe387a867bba93355"}, +] + [[package]] name = "tinycss2" version = "1.2.1" @@ -3654,6 +4193,60 @@ idna = "*" requests = ">=2.1.0" requests-file = ">=1.4" +[[package]] +name = "tokenizers" +version = "0.13.3" +description = "Fast and Customizable Tokenizers" +optional = true +python-versions = "*" +files = [ + {file = "tokenizers-0.13.3-cp310-cp310-macosx_10_11_x86_64.whl", hash = "sha256:f3835c5be51de8c0a092058a4d4380cb9244fb34681fd0a295fbf0a52a5fdf33"}, + {file = "tokenizers-0.13.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4ef4c3e821730f2692489e926b184321e887f34fb8a6b80b8096b966ba663d07"}, + {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5fd1a6a25353e9aa762e2aae5a1e63883cad9f4e997c447ec39d071020459bc"}, + {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee0b1b311d65beab83d7a41c56a1e46ab732a9eed4460648e8eb0bd69fc2d059"}, + {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ef4215284df1277dadbcc5e17d4882bda19f770d02348e73523f7e7d8b8d396"}, + {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4d53976079cff8a033f778fb9adca2d9d69d009c02fa2d71a878b5f3963ed30"}, + {file = "tokenizers-0.13.3-cp310-cp310-win32.whl", hash = "sha256:1f0e3b4c2ea2cd13238ce43548959c118069db7579e5d40ec270ad77da5833ce"}, + {file = "tokenizers-0.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:89649c00d0d7211e8186f7a75dfa1db6996f65edce4b84821817eadcc2d3c79e"}, + {file = "tokenizers-0.13.3-cp311-cp311-macosx_10_11_universal2.whl", hash = "sha256:56b726e0d2bbc9243872b0144515ba684af5b8d8cd112fb83ee1365e26ec74c8"}, + {file = "tokenizers-0.13.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:cc5c022ce692e1f499d745af293ab9ee6f5d92538ed2faf73f9708c89ee59ce6"}, + {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f55c981ac44ba87c93e847c333e58c12abcbb377a0c2f2ef96e1a266e4184ff2"}, + {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f247eae99800ef821a91f47c5280e9e9afaeed9980fc444208d5aa6ba69ff148"}, + {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b3e3215d048e94f40f1c95802e45dcc37c5b05eb46280fc2ccc8cd351bff839"}, + {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ba2b0bf01777c9b9bc94b53764d6684554ce98551fec496f71bc5be3a03e98b"}, + {file = "tokenizers-0.13.3-cp311-cp311-win32.whl", hash = "sha256:cc78d77f597d1c458bf0ea7c2a64b6aa06941c7a99cb135b5969b0278824d808"}, + {file = "tokenizers-0.13.3-cp311-cp311-win_amd64.whl", hash = "sha256:ecf182bf59bd541a8876deccf0360f5ae60496fd50b58510048020751cf1724c"}, + {file = "tokenizers-0.13.3-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:0527dc5436a1f6bf2c0327da3145687d3bcfbeab91fed8458920093de3901b44"}, + {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:07cbb2c307627dc99b44b22ef05ff4473aa7c7cc1fec8f0a8b37d8a64b1a16d2"}, + {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4560dbdeaae5b7ee0d4e493027e3de6d53c991b5002d7ff95083c99e11dd5ac0"}, + {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64064bd0322405c9374305ab9b4c07152a1474370327499911937fd4a76d004b"}, + {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8c6e2ab0f2e3d939ca66aa1d596602105fe33b505cd2854a4c1717f704c51de"}, + {file = "tokenizers-0.13.3-cp37-cp37m-win32.whl", hash = "sha256:6cc29d410768f960db8677221e497226e545eaaea01aa3613fa0fdf2cc96cff4"}, + {file = "tokenizers-0.13.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fc2a7fdf864554a0dacf09d32e17c0caa9afe72baf9dd7ddedc61973bae352d8"}, + {file = "tokenizers-0.13.3-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:8791dedba834c1fc55e5f1521be325ea3dafb381964be20684b92fdac95d79b7"}, + {file = "tokenizers-0.13.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:d607a6a13718aeb20507bdf2b96162ead5145bbbfa26788d6b833f98b31b26e1"}, + {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3791338f809cd1bf8e4fee6b540b36822434d0c6c6bc47162448deee3f77d425"}, + {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2f35f30e39e6aab8716f07790f646bdc6e4a853816cc49a95ef2a9016bf9ce6"}, + {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:310204dfed5aa797128b65d63538a9837cbdd15da2a29a77d67eefa489edda26"}, + {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0f9b92ea052305166559f38498b3b0cae159caea712646648aaa272f7160963"}, + {file = "tokenizers-0.13.3-cp38-cp38-win32.whl", hash = "sha256:9a3fa134896c3c1f0da6e762d15141fbff30d094067c8f1157b9fdca593b5806"}, + {file = "tokenizers-0.13.3-cp38-cp38-win_amd64.whl", hash = "sha256:8e7b0cdeace87fa9e760e6a605e0ae8fc14b7d72e9fc19c578116f7287bb873d"}, + {file = "tokenizers-0.13.3-cp39-cp39-macosx_10_11_x86_64.whl", hash = "sha256:00cee1e0859d55507e693a48fa4aef07060c4bb6bd93d80120e18fea9371c66d"}, + {file = "tokenizers-0.13.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:a23ff602d0797cea1d0506ce69b27523b07e70f6dda982ab8cf82402de839088"}, + {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70ce07445050b537d2696022dafb115307abdffd2a5c106f029490f84501ef97"}, + {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:280ffe95f50eaaf655b3a1dc7ff1d9cf4777029dbbc3e63a74e65a056594abc3"}, + {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97acfcec592f7e9de8cadcdcda50a7134423ac8455c0166b28c9ff04d227b371"}, + {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd7730c98a3010cd4f523465867ff95cd9d6430db46676ce79358f65ae39797b"}, + {file = "tokenizers-0.13.3-cp39-cp39-win32.whl", hash = "sha256:48625a108029cb1ddf42e17a81b5a3230ba6888a70c9dc14e81bc319e812652d"}, + {file = "tokenizers-0.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:bc0a6f1ba036e482db6453571c9e3e60ecd5489980ffd95d11dc9f960483d783"}, + {file = "tokenizers-0.13.3.tar.gz", hash = "sha256:2e546dbb68b623008a5442353137fbb0123d311a6d7ba52f2667c8862a75af2e"}, +] + +[package.extras] +dev = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] +docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] +testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] + [[package]] name = "tomli" version = "2.0.1" @@ -3665,6 +4258,83 @@ files = [ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] +[[package]] +name = "torch" +version = "2.0.1" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +optional = true +python-versions = ">=3.8.0" +files = [ + {file = "torch-2.0.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:8ced00b3ba471856b993822508f77c98f48a458623596a4c43136158781e306a"}, + {file = "torch-2.0.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:359bfaad94d1cda02ab775dc1cc386d585712329bb47b8741607ef6ef4950747"}, + {file = "torch-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:7c84e44d9002182edd859f3400deaa7410f5ec948a519cc7ef512c2f9b34d2c4"}, + {file = "torch-2.0.1-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:567f84d657edc5582d716900543e6e62353dbe275e61cdc36eda4929e46df9e7"}, + {file = "torch-2.0.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:787b5a78aa7917465e9b96399b883920c88a08f4eb63b5a5d2d1a16e27d2f89b"}, + {file = "torch-2.0.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:e617b1d0abaf6ced02dbb9486803abfef0d581609b09641b34fa315c9c40766d"}, + {file = "torch-2.0.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b6019b1de4978e96daa21d6a3ebb41e88a0b474898fe251fd96189587408873e"}, + {file = "torch-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:dbd68cbd1cd9da32fe5d294dd3411509b3d841baecb780b38b3b7b06c7754434"}, + {file = "torch-2.0.1-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:ef654427d91600129864644e35deea761fb1fe131710180b952a6f2e2207075e"}, + {file = "torch-2.0.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:25aa43ca80dcdf32f13da04c503ec7afdf8e77e3a0183dd85cd3e53b2842e527"}, + {file = "torch-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:5ef3ea3d25441d3957348f7e99c7824d33798258a2bf5f0f0277cbcadad2e20d"}, + {file = "torch-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:0882243755ff28895e8e6dc6bc26ebcf5aa0911ed81b2a12f241fc4b09075b13"}, + {file = "torch-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:f66aa6b9580a22b04d0af54fcd042f52406a8479e2b6a550e3d9f95963e168c8"}, + {file = "torch-2.0.1-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:1adb60d369f2650cac8e9a95b1d5758e25d526a34808f7448d0bd599e4ae9072"}, + {file = "torch-2.0.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:1bcffc16b89e296826b33b98db5166f990e3b72654a2b90673e817b16c50e32b"}, + {file = "torch-2.0.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:e10e1597f2175365285db1b24019eb6f04d53dcd626c735fc502f1e8b6be9875"}, + {file = "torch-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:423e0ae257b756bb45a4b49072046772d1ad0c592265c5080070e0767da4e490"}, + {file = "torch-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:8742bdc62946c93f75ff92da00e3803216c6cce9b132fbca69664ca38cfb3e18"}, + {file = "torch-2.0.1-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:c62df99352bd6ee5a5a8d1832452110435d178b5164de450831a3a8cc14dc680"}, + {file = "torch-2.0.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:671a2565e3f63b8fe8e42ae3e36ad249fe5e567435ea27b94edaa672a7d0c416"}, +] + +[package.dependencies] +filelock = "*" +jinja2 = "*" +networkx = "*" +sympy = "*" +typing-extensions = "*" + +[package.extras] +opt-einsum = ["opt-einsum (>=3.3)"] + +[[package]] +name = "torchvision" +version = "0.15.2" +description = "image and video datasets and models for torch deep learning" +optional = true +python-versions = ">=3.8" +files = [ + {file = "torchvision-0.15.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7754088774e810c5672b142a45dcf20b1bd986a5a7da90f8660c43dc43fb850c"}, + {file = "torchvision-0.15.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37eb138e13f6212537a3009ac218695483a635c404b6cc1d8e0d0d978026a86d"}, + {file = "torchvision-0.15.2-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:54143f7cc0797d199b98a53b7d21c3f97615762d4dd17ad45a41c7e80d880e73"}, + {file = "torchvision-0.15.2-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:1eefebf5fbd01a95fe8f003d623d941601c94b5cec547b420da89cb369d9cf96"}, + {file = "torchvision-0.15.2-cp310-cp310-win_amd64.whl", hash = "sha256:96fae30c5ca8423f4b9790df0f0d929748e32718d88709b7b567d2f630c042e3"}, + {file = "torchvision-0.15.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5f35f6bd5bcc4568e6522e4137fa60fcc72f4fa3e615321c26cd87e855acd398"}, + {file = "torchvision-0.15.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:757505a0ab2be7096cb9d2bf4723202c971cceddb72c7952a7e877f773de0f8a"}, + {file = "torchvision-0.15.2-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:012ad25cfd9019ff9b0714a168727e3845029be1af82296ff1e1482931fa4b80"}, + {file = "torchvision-0.15.2-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b02a7ffeaa61448737f39a4210b8ee60234bda0515a0c0d8562f884454105b0f"}, + {file = "torchvision-0.15.2-cp311-cp311-win_amd64.whl", hash = "sha256:10be76ceded48329d0a0355ac33da131ee3993ff6c125e4a02ab34b5baa2472c"}, + {file = "torchvision-0.15.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8f12415b686dba884fb086f53ac803f692be5a5cdd8a758f50812b30fffea2e4"}, + {file = "torchvision-0.15.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:31211c01f8b8ec33b8a638327b5463212e79a03e43c895f88049f97af1bd12fd"}, + {file = "torchvision-0.15.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:c55f9889e436f14b4f84a9c00ebad0d31f5b4626f10cf8018e6c676f92a6d199"}, + {file = "torchvision-0.15.2-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:9a192f2aa979438f23c20e883980b23d13268ab9f819498774a6d2eb021802c2"}, + {file = "torchvision-0.15.2-cp38-cp38-win_amd64.whl", hash = "sha256:c07071bc8d02aa8fcdfe139ab6a1ef57d3b64c9e30e84d12d45c9f4d89fb6536"}, + {file = "torchvision-0.15.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4790260fcf478a41c7ecc60a6d5200a88159fdd8d756e9f29f0f8c59c4a67a68"}, + {file = "torchvision-0.15.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:987ab62225b4151a11e53fd06150c5258ced24ac9d7c547e0e4ab6fbca92a5ce"}, + {file = "torchvision-0.15.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:63df26673e66cba3f17e07c327a8cafa3cce98265dbc3da329f1951d45966838"}, + {file = "torchvision-0.15.2-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b85f98d4cc2f72452f6792ab4463a3541bc5678a8cdd3da0e139ba2fe8b56d42"}, + {file = "torchvision-0.15.2-cp39-cp39-win_amd64.whl", hash = "sha256:07c462524cc1bba5190c16a9d47eac1fca024d60595a310f23c00b4ffff18b30"}, +] + +[package.dependencies] +numpy = "*" +pillow = ">=5.3.0,<8.3.dev0 || >=8.4.dev0" +requests = "*" +torch = "2.0.1" + +[package.extras] +scipy = ["scipy"] + [[package]] name = "tornado" version = "6.3.3" @@ -3720,6 +4390,75 @@ files = [ docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"] +[[package]] +name = "transformers" +version = "4.33.1" +description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" +optional = true +python-versions = ">=3.8.0" +files = [ + {file = "transformers-4.33.1-py3-none-any.whl", hash = "sha256:0630c2d26448d7c6cb78435e6c43910c89e99387badea6be1f565ffa3f093f1d"}, + {file = "transformers-4.33.1.tar.gz", hash = "sha256:744265e9f0724d22c229938f28376af54abce730ef647f35bd1685abf49912a4"}, +] + +[package.dependencies] +filelock = "*" +huggingface-hub = ">=0.15.1,<1.0" +numpy = ">=1.17" +packaging = ">=20.0" +pyyaml = ">=5.1" +regex = "!=2019.12.17" +requests = "*" +safetensors = ">=0.3.1" +tokenizers = ">=0.11.1,<0.11.3 || >0.11.3,<0.14" +tqdm = ">=4.27" + +[package.extras] +accelerate = ["accelerate (>=0.20.3)"] +agents = ["Pillow (<10.0.0)", "accelerate (>=0.20.3)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch (>=1.10,!=1.12.0)"] +all = ["Pillow (<10.0.0)", "accelerate (>=0.20.3)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision"] +audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +codecarbon = ["codecarbon (==1.2.0)"] +deepspeed = ["accelerate (>=0.20.3)", "deepspeed (>=0.9.3)"] +deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.20.3)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "timeout-decorator"] +dev = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "accelerate (>=0.20.3)", "av (==9.2.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "urllib3 (<2.0.0)"] +dev-torch = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "accelerate (>=0.20.3)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "timeout-decorator", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +docs = ["Pillow (<10.0.0)", "accelerate (>=0.20.3)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "hf-doc-builder", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision"] +docs-specific = ["hf-doc-builder"] +fairscale = ["fairscale (>0.3)"] +flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)"] +flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +ftfy = ["ftfy"] +integrations = ["optuna", "ray[tune]", "sigopt"] +ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"] +modelcreation = ["cookiecutter (==1.7.3)"] +natten = ["natten (>=0.14.6)"] +onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"] +onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"] +optuna = ["optuna"] +quality = ["GitPython (<3.1.19)", "black (>=23.1,<24.0)", "datasets (!=2.5.0)", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "ruff (>=0.0.241,<=0.0.259)", "urllib3 (<2.0.0)"] +ray = ["ray[tune]"] +retrieval = ["datasets (!=2.5.0)", "faiss-cpu"] +sagemaker = ["sagemaker (>=2.31.0)"] +sentencepiece = ["protobuf", "sentencepiece (>=0.1.91,!=0.1.92)"] +serving = ["fastapi", "pydantic (<2)", "starlette", "uvicorn"] +sigopt = ["sigopt"] +sklearn = ["scikit-learn"] +speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +testing = ["GitPython (<3.1.19)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "parameterized", "protobuf", "psutil", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "timeout-decorator"] +tf = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx"] +tf-cpu = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow-cpu (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx"] +tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +timm = ["timm"] +tokenizers = ["tokenizers (>=0.11.1,!=0.11.3,<0.14)"] +torch = ["accelerate (>=0.20.3)", "torch (>=1.10,!=1.12.0)"] +torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +torch-vision = ["Pillow (<10.0.0)", "torchvision"] +torchhub = ["filelock", "huggingface-hub (>=0.15.1,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.10,!=1.12.0)", "tqdm (>=4.27)"] +video = ["av (==9.2.0)", "decord (==0.6.0)"] +vision = ["Pillow (<10.0.0)"] + [[package]] name = "typer" version = "0.9.0" @@ -3778,6 +4517,17 @@ files = [ mypy-extensions = ">=0.3.0" typing-extensions = ">=3.7.4" +[[package]] +name = "tzdata" +version = "2023.3" +description = "Provider of IANA time zone data" +optional = true +python-versions = ">=2" +files = [ + {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"}, + {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"}, +] + [[package]] name = "uri-template" version = "1.3.0" @@ -3809,6 +4559,33 @@ secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17. socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "vowpal-wabbit-next" +version = "0.6.0" +description = "Experimental python bindings for VowpalWabbit" +optional = true +python-versions = ">=3.7" +files = [ + {file = "vowpal-wabbit-next-0.6.0.tar.gz", hash = "sha256:f0381614d99fac6a0f52e995ee0bfc7b681054f397bea7ff08b8a523d5315a54"}, + {file = "vowpal_wabbit_next-0.6.0-cp310-cp310-macosx_10_13_universal2.whl", hash = "sha256:cfbb831cfe9eb81185aff7cdca437ae17c6d9aca8d74e26c326e3ef4ee8e81e7"}, + {file = "vowpal_wabbit_next-0.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d31829778f9c600f5c121f614516ca1bc9ede5d1bc77b1eb3b59b32d9138db9"}, + {file = "vowpal_wabbit_next-0.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:714347606ab302a2f72870b6ae6dce58de4bec1b489f4bd65d80a8e326e1db8a"}, + {file = "vowpal_wabbit_next-0.6.0-cp311-cp311-macosx_10_13_universal2.whl", hash = "sha256:3a8482d5c0b9357fdb36b62d659e6b74e93aeab165b910292572a98e91d7a014"}, + {file = "vowpal_wabbit_next-0.6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e4349099b938102f51fb6fedf035bc1deacb2971cd2a48641ca7d45186efda0"}, + {file = "vowpal_wabbit_next-0.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:c8f58cdc49f270b1bed6f0fdd7520c8ba1b328de5cd8a2760c0ec70a630de92e"}, + {file = "vowpal_wabbit_next-0.6.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8b7052ce7212fd1cae8ffd966e240c814f3c1df08fd612437d48f0f23e7694c"}, + {file = "vowpal_wabbit_next-0.6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d24d9c380d0e9b41151337c7f9e2a33ec5bfd738fdee9f65c1a40e486234aca3"}, + {file = "vowpal_wabbit_next-0.6.0-cp38-cp38-macosx_10_13_universal2.whl", hash = "sha256:0d77a8c55249ec9a7f404939ecc6948db0527e522e8a7ae149ec7cd29b3ade04"}, + {file = "vowpal_wabbit_next-0.6.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa2f52f1267fbc26c7757335f9c76a0f00b112971e04c85b8a9bc9e82300597"}, + {file = "vowpal_wabbit_next-0.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d04f91200ecae73196d9f5601853d63afce8c1c8a0d310a608e8ddfa3b190cb"}, + {file = "vowpal_wabbit_next-0.6.0-cp39-cp39-macosx_10_13_universal2.whl", hash = "sha256:2df4a652729c0db34afd8fb4fc49b0090d6f061e2d49899e5f092fd4c3d23253"}, + {file = "vowpal_wabbit_next-0.6.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c289a260ab759f04903b441701cff66ea74d6c061d966caaba0c65ac12d05528"}, + {file = "vowpal_wabbit_next-0.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:8d022cab07274f227df159a81bccf034def7dd54ad70392ee98743ffa4953072"}, +] + +[package.dependencies] +numpy = "*" + [[package]] name = "wasabi" version = "1.1.2" @@ -3862,13 +4639,13 @@ files = [ [[package]] name = "websocket-client" -version = "1.6.2" +version = "1.6.3" description = "WebSocket client for Python with low level API options" optional = false python-versions = ">=3.8" files = [ - {file = "websocket-client-1.6.2.tar.gz", hash = "sha256:53e95c826bf800c4c465f50093a8c4ff091c7327023b10bfaff40cf1ef170eaa"}, - {file = "websocket_client-1.6.2-py3-none-any.whl", hash = "sha256:ce54f419dfae71f4bdba69ebe65bf7f0a93fe71bc009ad3a010aacc3eebad537"}, + {file = "websocket-client-1.6.3.tar.gz", hash = "sha256:3aad25d31284266bcfcfd1fd8a743f63282305a364b8d0948a43bd606acc652f"}, + {file = "websocket_client-1.6.3-py3-none-any.whl", hash = "sha256:6cfc30d051ebabb73a5fa246efdcc14c8fbebbd0330f8984ac3bb6d9edd2ad03"}, ] [package.extras] @@ -3990,9 +4767,9 @@ docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.link testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] [extras] -extended-testing = ["faker", "presidio-analyzer", "presidio-anonymizer"] +extended-testing = ["faker", "presidio-analyzer", "presidio-anonymizer", "sentence-transformers", "vowpal-wabbit-next"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "66ac482bd05eb74414210ac28fc1e8dae1a9928a4a1314e1326fada3551aa8ad" +content-hash = "a392728e7880f0fc679885888dbc69838f6de94607803fec40b4640ae63d02d8" diff --git a/libs/experimental/pyproject.toml b/libs/experimental/pyproject.toml index 8e876c392c..0b7124e460 100644 --- a/libs/experimental/pyproject.toml +++ b/libs/experimental/pyproject.toml @@ -14,6 +14,9 @@ langchain = ">=0.0.239" presidio-anonymizer = {version = "^2.2.33", optional = true} presidio-analyzer = {version = "^2.2.33", optional = true} faker = {version = "^19.3.1", optional = true} +vowpal-wabbit-next = {version = "0.6.0", optional = true} +sentence-transformers = {version = "^2", optional = true} +pandas = {version = "^2.0.1", optional = true} [tool.poetry.group.lint.dependencies] @@ -42,6 +45,8 @@ extended_testing = [ "presidio-anonymizer", "presidio-analyzer", "faker", + "vowpal-wabbit-next", + "sentence-transformers", ] [tool.ruff] diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py b/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_chain_call.py similarity index 96% rename from libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py rename to libs/experimental/tests/unit_tests/rl_chain/test_pick_best_chain_call.py index 7eb7ca2aea..6bb0437678 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_chain_call.py +++ b/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_chain_call.py @@ -3,8 +3,8 @@ from typing import Any, Dict import pytest from test_utils import MockEncoder, MockEncoderReturnsList -import langchain.chains.rl_chain.base as rl_chain -import langchain.chains.rl_chain.pick_best_chain as pick_best_chain +import langchain_experimental.rl_chain.base as rl_chain +import langchain_experimental.rl_chain.pick_best_chain as pick_best_chain from langchain.chat_models import FakeListChatModel from langchain.prompts.prompt import PromptTemplate @@ -332,7 +332,7 @@ def test_default_embeddings_mixed_w_explicit_user_embeddings() -> None: @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_default_no_scorer_specified() -> None: _, PROMPT = setup() - chain_llm = FakeListChatModel(responses=[100]) + chain_llm = FakeListChatModel(responses=["hey", "100"]) chain = pick_best_chain.PickBest.from_llm( llm=chain_llm, prompt=PROMPT, @@ -345,7 +345,7 @@ def test_default_no_scorer_specified() -> None: action=rl_chain.ToSelectFrom(["0", "1", "2"]), ) # chain llm used for both basic prompt and for scoring - assert response["response"] == "100" + assert response["response"] == "hey" selection_metadata = response["selection_metadata"] assert selection_metadata.selected.score == 100.0 @@ -374,7 +374,7 @@ def test_explicitly_no_scorer() -> None: @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_auto_scorer_with_user_defined_llm() -> None: llm, PROMPT = setup() - scorer_llm = FakeListChatModel(responses=[300]) + scorer_llm = FakeListChatModel(responses=["300"]) chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, @@ -418,8 +418,9 @@ def test_calling_chain_w_reserved_inputs_throws() -> None: @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") def test_activate_and_deactivate_scorer() -> None: - llm, PROMPT = setup() - scorer_llm = FakeListChatModel(responses=[300]) + _, PROMPT = setup() + llm = FakeListChatModel(responses=["hey1", "hey2", "hey3"]) + scorer_llm = FakeListChatModel(responses=["300", "400"]) chain = pick_best_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, @@ -433,7 +434,7 @@ def test_activate_and_deactivate_scorer() -> None: action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), ) # chain llm used for both basic prompt and for scoring - assert response["response"] == "hey" + assert response["response"] == "hey1" selection_metadata = response["selection_metadata"] assert selection_metadata.selected.score == 300.0 @@ -442,7 +443,7 @@ def test_activate_and_deactivate_scorer() -> None: User=pick_best_chain.base.BasedOn("Context"), action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), ) - assert response["response"] == "hey" + assert response["response"] == "hey2" selection_metadata = response["selection_metadata"] assert selection_metadata.selected.score is None @@ -451,6 +452,6 @@ def test_activate_and_deactivate_scorer() -> None: User=pick_best_chain.base.BasedOn("Context"), action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), ) - assert response["response"] == "hey" + assert response["response"] == "hey3" selection_metadata = response["selection_metadata"] - assert selection_metadata.selected.score == 300.0 + assert selection_metadata.selected.score == 400.0 diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py b/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_text_embedder.py similarity index 99% rename from libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py rename to libs/experimental/tests/unit_tests/rl_chain/test_pick_best_text_embedder.py index 1fdbdff644..fafa77e9f4 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_pick_best_text_embedder.py +++ b/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_text_embedder.py @@ -1,8 +1,8 @@ import pytest from test_utils import MockEncoder -import langchain.chains.rl_chain.base as rl_chain -import langchain.chains.rl_chain.pick_best_chain as pick_best_chain +import langchain_experimental.rl_chain.base as rl_chain +import langchain_experimental.rl_chain.pick_best_chain as pick_best_chain encoded_keyword = "[encoded]" diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py b/libs/experimental/tests/unit_tests/rl_chain/test_rl_chain_base_embedder.py similarity index 99% rename from libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py rename to libs/experimental/tests/unit_tests/rl_chain/test_rl_chain_base_embedder.py index 1928eb26c6..7e8b23857f 100644 --- a/libs/langchain/tests/unit_tests/chains/rl_chain/test_rl_chain_base_embedder.py +++ b/libs/experimental/tests/unit_tests/rl_chain/test_rl_chain_base_embedder.py @@ -3,7 +3,7 @@ from typing import List, Union import pytest from test_utils import MockEncoder -import langchain.chains.rl_chain.base as base +import langchain_experimental.rl_chain.base as base encoded_keyword = "[encoded]" diff --git a/libs/langchain/tests/unit_tests/chains/rl_chain/test_utils.py b/libs/experimental/tests/unit_tests/rl_chain/test_utils.py similarity index 100% rename from libs/langchain/tests/unit_tests/chains/rl_chain/test_utils.py rename to libs/experimental/tests/unit_tests/rl_chain/test_utils.py diff --git a/libs/langchain/poetry.lock b/libs/langchain/poetry.lock index b3ad397a7d..17756f0f68 100644 --- a/libs/langchain/poetry.lock +++ b/libs/langchain/poetry.lock @@ -9800,33 +9800,6 @@ urllib3 = {version = "<2", markers = "python_version < \"3.10\""} wrapt = "*" yarl = "*" -[[package]] -name = "vowpal-wabbit-next" -version = "0.6.0" -description = "Experimental python bindings for VowpalWabbit" -optional = true -python-versions = ">=3.7" -files = [ - {file = "vowpal-wabbit-next-0.6.0.tar.gz", hash = "sha256:f0381614d99fac6a0f52e995ee0bfc7b681054f397bea7ff08b8a523d5315a54"}, - {file = "vowpal_wabbit_next-0.6.0-cp310-cp310-macosx_10_13_universal2.whl", hash = "sha256:cfbb831cfe9eb81185aff7cdca437ae17c6d9aca8d74e26c326e3ef4ee8e81e7"}, - {file = "vowpal_wabbit_next-0.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d31829778f9c600f5c121f614516ca1bc9ede5d1bc77b1eb3b59b32d9138db9"}, - {file = "vowpal_wabbit_next-0.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:714347606ab302a2f72870b6ae6dce58de4bec1b489f4bd65d80a8e326e1db8a"}, - {file = "vowpal_wabbit_next-0.6.0-cp311-cp311-macosx_10_13_universal2.whl", hash = "sha256:3a8482d5c0b9357fdb36b62d659e6b74e93aeab165b910292572a98e91d7a014"}, - {file = "vowpal_wabbit_next-0.6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e4349099b938102f51fb6fedf035bc1deacb2971cd2a48641ca7d45186efda0"}, - {file = "vowpal_wabbit_next-0.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:c8f58cdc49f270b1bed6f0fdd7520c8ba1b328de5cd8a2760c0ec70a630de92e"}, - {file = "vowpal_wabbit_next-0.6.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8b7052ce7212fd1cae8ffd966e240c814f3c1df08fd612437d48f0f23e7694c"}, - {file = "vowpal_wabbit_next-0.6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d24d9c380d0e9b41151337c7f9e2a33ec5bfd738fdee9f65c1a40e486234aca3"}, - {file = "vowpal_wabbit_next-0.6.0-cp38-cp38-macosx_10_13_universal2.whl", hash = "sha256:0d77a8c55249ec9a7f404939ecc6948db0527e522e8a7ae149ec7cd29b3ade04"}, - {file = "vowpal_wabbit_next-0.6.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa2f52f1267fbc26c7757335f9c76a0f00b112971e04c85b8a9bc9e82300597"}, - {file = "vowpal_wabbit_next-0.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d04f91200ecae73196d9f5601853d63afce8c1c8a0d310a608e8ddfa3b190cb"}, - {file = "vowpal_wabbit_next-0.6.0-cp39-cp39-macosx_10_13_universal2.whl", hash = "sha256:2df4a652729c0db34afd8fb4fc49b0090d6f061e2d49899e5f092fd4c3d23253"}, - {file = "vowpal_wabbit_next-0.6.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c289a260ab759f04903b441701cff66ea74d6c061d966caaba0c65ac12d05528"}, - {file = "vowpal_wabbit_next-0.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:8d022cab07274f227df159a81bccf034def7dd54ad70392ee98743ffa4953072"}, -] - -[package.dependencies] -numpy = "*" - [[package]] name = "watchdog" version = "3.0.0" @@ -10509,4 +10482,4 @@ text-helpers = ["chardet"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "3707ffe51bf8aca6cca6512f9d3c358facbbc36a17a00ee9c7d513c557eddddb" +content-hash = "b42a2fe9986973ebfe9804276429dff258c1124ef2df5985fa56ecd0cde7e7e1" diff --git a/libs/langchain/pyproject.toml b/libs/langchain/pyproject.toml index 7b511eb991..e9df12a991 100644 --- a/libs/langchain/pyproject.toml +++ b/libs/langchain/pyproject.toml @@ -129,7 +129,6 @@ markdownify = {version = "^0.11.6", optional = true} assemblyai = {version = "^0.17.0", optional = true} dashvector = {version = "^1.0.1", optional = true} sqlite-vss = {version = "^0.1.2", optional = true} -vowpal-wabbit-next = {version = "0.6.0", optional = true} [tool.poetry.group.test.dependencies] @@ -343,11 +342,9 @@ extended_testing = [ "xmltodict", "faiss-cpu", "openapi-schema-pydantic", - "sentence-transformers", "markdownify", "dashvector", "sqlite-vss", - "vowpal-wabbit-next", ] [tool.ruff] From 2dba4046fadbaa289adfaf80d4e6e895e31a3799 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 11 Sep 2023 12:20:19 -0400 Subject: [PATCH 55/65] update experimental poetry lock --- libs/experimental/poetry.lock | 777 +++++++++++++++++++++++++++++++++- 1 file changed, 775 insertions(+), 2 deletions(-) diff --git a/libs/experimental/poetry.lock b/libs/experimental/poetry.lock index 9e8cf9f1af..a336617ff5 100644 --- a/libs/experimental/poetry.lock +++ b/libs/experimental/poetry.lock @@ -937,6 +937,41 @@ files = [ {file = "frozenlist-1.4.0.tar.gz", hash = "sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251"}, ] +[[package]] +name = "fsspec" +version = "2023.9.0" +description = "File-system specification" +optional = true +python-versions = ">=3.8" +files = [ + {file = "fsspec-2023.9.0-py3-none-any.whl", hash = "sha256:d55b9ab2a4c1f2b759888ae9f93e40c2aa72c0808132e87e282b549f9e6c4254"}, + {file = "fsspec-2023.9.0.tar.gz", hash = "sha256:4dbf0fefee035b7c6d3bbbe6bc99b2f201f40d4dca95b67c2b719be77bcd917f"}, +] + +[package.extras] +abfs = ["adlfs"] +adl = ["adlfs"] +arrow = ["pyarrow (>=1)"] +dask = ["dask", "distributed"] +devel = ["pytest", "pytest-cov"] +dropbox = ["dropbox", "dropboxdrivefs", "requests"] +full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] +fuse = ["fusepy"] +gcs = ["gcsfs"] +git = ["pygit2"] +github = ["requests"] +gs = ["gcsfs"] +gui = ["panel"] +hdfs = ["pyarrow (>=1)"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "requests"] +libarchive = ["libarchive-c"] +oci = ["ocifs"] +s3 = ["s3fs"] +sftp = ["paramiko"] +smb = ["smbprotocol"] +ssh = ["paramiko"] +tqdm = ["tqdm"] + [[package]] name = "greenlet" version = "2.0.2" @@ -1010,6 +1045,39 @@ files = [ docs = ["Sphinx", "docutils (<0.18)"] test = ["objgraph", "psutil"] +[[package]] +name = "huggingface-hub" +version = "0.17.1" +description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" +optional = true +python-versions = ">=3.8.0" +files = [ + {file = "huggingface_hub-0.17.1-py3-none-any.whl", hash = "sha256:7a9dc262a2e0ecf8c1749c8b9a7510a7a22981849f561af4345942d421822451"}, + {file = "huggingface_hub-0.17.1.tar.gz", hash = "sha256:dd828d2a24ee6af86392042cc1052c482c053eb574864669f0cae4d29620e62c"}, +] + +[package.dependencies] +filelock = "*" +fsspec = "*" +packaging = ">=20.9" +pyyaml = ">=5.1" +requests = "*" +tqdm = ">=4.42.1" +typing-extensions = ">=3.7.4.3" + +[package.extras] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (==23.7)", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (<2.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] +cli = ["InquirerPy (==0.3.4)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (==23.7)", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (<2.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] +docs = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (==23.7)", "gradio", "hf-doc-builder", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (<2.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)", "watchdog"] +fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] +inference = ["aiohttp", "pydantic (<2.0)"] +quality = ["black (==23.7)", "mypy (==1.5.1)", "ruff (>=0.0.241)"] +tensorflow = ["graphviz", "pydot", "tensorflow"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic (<2.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +torch = ["torch"] +typing = ["pydantic (<2.0)", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"] + [[package]] name = "idna" version = "3.4" @@ -1223,6 +1291,17 @@ MarkupSafe = ">=2.0" [package.extras] i18n = ["Babel (>=2.7)"] +[[package]] +name = "joblib" +version = "1.3.2" +description = "Lightweight pipelining with Python functions" +optional = true +python-versions = ">=3.7" +files = [ + {file = "joblib-1.3.2-py3-none-any.whl", hash = "sha256:ef4331c65f239985f3f2220ecc87db222f08fd22097a3dd5698f693875f8cbb9"}, + {file = "joblib-1.3.2.tar.gz", hash = "sha256:92f865e621e17784e7955080b6d042489e3b8e294949cc44c6eac304f59772b1"}, +] + [[package]] name = "json5" version = "0.9.14" @@ -1743,6 +1822,23 @@ files = [ {file = "mistune-3.0.1.tar.gz", hash = "sha256:e912116c13aa0944f9dc530db38eb88f6a77087ab128f49f84a48f4c05ea163c"}, ] +[[package]] +name = "mpmath" +version = "1.3.0" +description = "Python library for arbitrary-precision floating-point arithmetic" +optional = true +python-versions = "*" +files = [ + {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, + {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, +] + +[package.extras] +develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] +docs = ["sphinx"] +gmpy = ["gmpy2 (>=2.1.0a4)"] +tests = ["pytest (>=4.6)"] + [[package]] name = "multidict" version = "6.0.4" @@ -2016,6 +2112,49 @@ files = [ {file = "nest_asyncio-1.5.7.tar.gz", hash = "sha256:6a80f7b98f24d9083ed24608977c09dd608d83f91cccc24c9d2cba6d10e01c10"}, ] +[[package]] +name = "networkx" +version = "3.1" +description = "Python package for creating and manipulating graphs and networks" +optional = true +python-versions = ">=3.8" +files = [ + {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"}, + {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"}, +] + +[package.extras] +default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"] +developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"] +doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"] +extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"] +test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] + +[[package]] +name = "nltk" +version = "3.8.1" +description = "Natural Language Toolkit" +optional = true +python-versions = ">=3.7" +files = [ + {file = "nltk-3.8.1-py3-none-any.whl", hash = "sha256:fd5c9109f976fa86bcadba8f91e47f5e9293bd034474752e92a520f81c93dda5"}, + {file = "nltk-3.8.1.zip", hash = "sha256:1834da3d0682cba4f2cede2f9aad6b0fafb6461ba451db0efb6f9c39798d64d3"}, +] + +[package.dependencies] +click = "*" +joblib = "*" +regex = ">=2021.8.3" +tqdm = "*" + +[package.extras] +all = ["matplotlib", "numpy", "pyparsing", "python-crfsuite", "requests", "scikit-learn", "scipy", "twython"] +corenlp = ["requests"] +machine-learning = ["numpy", "python-crfsuite", "scikit-learn", "scipy"] +plot = ["matplotlib"] +tgrep = ["pyparsing"] +twitter = ["twython"] + [[package]] name = "notebook" version = "7.0.2" @@ -2172,6 +2311,73 @@ files = [ {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, ] +[[package]] +name = "pandas" +version = "2.0.3" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = true +python-versions = ">=3.8" +files = [ + {file = "pandas-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4c7c9f27a4185304c7caf96dc7d91bc60bc162221152de697c98eb0b2648dd8"}, + {file = "pandas-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f167beed68918d62bffb6ec64f2e1d8a7d297a038f86d4aed056b9493fca407f"}, + {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce0c6f76a0f1ba361551f3e6dceaff06bde7514a374aa43e33b588ec10420183"}, + {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba619e410a21d8c387a1ea6e8a0e49bb42216474436245718d7f2e88a2f8d7c0"}, + {file = "pandas-2.0.3-cp310-cp310-win32.whl", hash = "sha256:3ef285093b4fe5058eefd756100a367f27029913760773c8bf1d2d8bebe5d210"}, + {file = "pandas-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:9ee1a69328d5c36c98d8e74db06f4ad518a1840e8ccb94a4ba86920986bb617e"}, + {file = "pandas-2.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b084b91d8d66ab19f5bb3256cbd5ea661848338301940e17f4492b2ce0801fe8"}, + {file = "pandas-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:37673e3bdf1551b95bf5d4ce372b37770f9529743d2498032439371fc7b7eb26"}, + {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9cb1e14fdb546396b7e1b923ffaeeac24e4cedd14266c3497216dd4448e4f2d"}, + {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9cd88488cceb7635aebb84809d087468eb33551097d600c6dad13602029c2df"}, + {file = "pandas-2.0.3-cp311-cp311-win32.whl", hash = "sha256:694888a81198786f0e164ee3a581df7d505024fbb1f15202fc7db88a71d84ebd"}, + {file = "pandas-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:6a21ab5c89dcbd57f78d0ae16630b090eec626360085a4148693def5452d8a6b"}, + {file = "pandas-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9e4da0d45e7f34c069fe4d522359df7d23badf83abc1d1cef398895822d11061"}, + {file = "pandas-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:32fca2ee1b0d93dd71d979726b12b61faa06aeb93cf77468776287f41ff8fdc5"}, + {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:258d3624b3ae734490e4d63c430256e716f488c4fcb7c8e9bde2d3aa46c29089"}, + {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eae3dc34fa1aa7772dd3fc60270d13ced7346fcbcfee017d3132ec625e23bb0"}, + {file = "pandas-2.0.3-cp38-cp38-win32.whl", hash = "sha256:f3421a7afb1a43f7e38e82e844e2bca9a6d793d66c1a7f9f0ff39a795bbc5e02"}, + {file = "pandas-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:69d7f3884c95da3a31ef82b7618af5710dba95bb885ffab339aad925c3e8ce78"}, + {file = "pandas-2.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5247fb1ba347c1261cbbf0fcfba4a3121fbb4029d95d9ef4dc45406620b25c8b"}, + {file = "pandas-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:81af086f4543c9d8bb128328b5d32e9986e0c84d3ee673a2ac6fb57fd14f755e"}, + {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1994c789bf12a7c5098277fb43836ce090f1073858c10f9220998ac74f37c69b"}, + {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ec591c48e29226bcbb316e0c1e9423622bc7a4eaf1ef7c3c9fa1a3981f89641"}, + {file = "pandas-2.0.3-cp39-cp39-win32.whl", hash = "sha256:04dbdbaf2e4d46ca8da896e1805bc04eb85caa9a82e259e8eed00254d5e0c682"}, + {file = "pandas-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:1168574b036cd8b93abc746171c9b4f1b83467438a5e45909fed645cf8692dbc"}, + {file = "pandas-2.0.3.tar.gz", hash = "sha256:c02f372a88e0d17f36d3093a644c73cfc1788e876a7c4bcb4020a77512e2043c"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.20.3", markers = "python_version < \"3.10\""}, + {version = ">=1.21.0", markers = "python_version >= \"3.10\""}, + {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, +] +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.1" + +[package.extras] +all = ["PyQt5 (>=5.15.1)", "SQLAlchemy (>=1.4.16)", "beautifulsoup4 (>=4.9.3)", "bottleneck (>=1.3.2)", "brotlipy (>=0.7.0)", "fastparquet (>=0.6.3)", "fsspec (>=2021.07.0)", "gcsfs (>=2021.07.0)", "html5lib (>=1.1)", "hypothesis (>=6.34.2)", "jinja2 (>=3.0.0)", "lxml (>=4.6.3)", "matplotlib (>=3.6.1)", "numba (>=0.53.1)", "numexpr (>=2.7.3)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pandas-gbq (>=0.15.0)", "psycopg2 (>=2.8.6)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "python-snappy (>=0.6.0)", "pyxlsb (>=1.0.8)", "qtpy (>=2.2.0)", "s3fs (>=2021.08.0)", "scipy (>=1.7.1)", "tables (>=3.6.1)", "tabulate (>=0.8.9)", "xarray (>=0.21.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)", "zstandard (>=0.15.2)"] +aws = ["s3fs (>=2021.08.0)"] +clipboard = ["PyQt5 (>=5.15.1)", "qtpy (>=2.2.0)"] +compression = ["brotlipy (>=0.7.0)", "python-snappy (>=0.6.0)", "zstandard (>=0.15.2)"] +computation = ["scipy (>=1.7.1)", "xarray (>=0.21.0)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pyxlsb (>=1.0.8)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)"] +feather = ["pyarrow (>=7.0.0)"] +fss = ["fsspec (>=2021.07.0)"] +gcp = ["gcsfs (>=2021.07.0)", "pandas-gbq (>=0.15.0)"] +hdf5 = ["tables (>=3.6.1)"] +html = ["beautifulsoup4 (>=4.9.3)", "html5lib (>=1.1)", "lxml (>=4.6.3)"] +mysql = ["SQLAlchemy (>=1.4.16)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.0.0)", "tabulate (>=0.8.9)"] +parquet = ["pyarrow (>=7.0.0)"] +performance = ["bottleneck (>=1.3.2)", "numba (>=0.53.1)", "numexpr (>=2.7.1)"] +plot = ["matplotlib (>=3.6.1)"] +postgresql = ["SQLAlchemy (>=1.4.16)", "psycopg2 (>=2.8.6)"] +spss = ["pyreadstat (>=1.1.2)"] +sql-other = ["SQLAlchemy (>=1.4.16)"] +test = ["hypothesis (>=6.34.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.6.3)"] + [[package]] name = "pandocfilters" version = "1.5.0" @@ -2267,6 +2473,75 @@ files = [ {file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"}, ] +[[package]] +name = "pillow" +version = "10.0.0" +description = "Python Imaging Library (Fork)" +optional = true +python-versions = ">=3.8" +files = [ + {file = "Pillow-10.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f62406a884ae75fb2f818694469519fb685cc7eaff05d3451a9ebe55c646891"}, + {file = "Pillow-10.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d5db32e2a6ccbb3d34d87c87b432959e0db29755727afb37290e10f6e8e62614"}, + {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edf4392b77bdc81f36e92d3a07a5cd072f90253197f4a52a55a8cec48a12483b"}, + {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:520f2a520dc040512699f20fa1c363eed506e94248d71f85412b625026f6142c"}, + {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:8c11160913e3dd06c8ffdb5f233a4f254cb449f4dfc0f8f4549eda9e542c93d1"}, + {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a74ba0c356aaa3bb8e3eb79606a87669e7ec6444be352870623025d75a14a2bf"}, + {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5d0dae4cfd56969d23d94dc8e89fb6a217be461c69090768227beb8ed28c0a3"}, + {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22c10cc517668d44b211717fd9775799ccec4124b9a7f7b3635fc5386e584992"}, + {file = "Pillow-10.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:dffe31a7f47b603318c609f378ebcd57f1554a3a6a8effbc59c3c69f804296de"}, + {file = "Pillow-10.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:9fb218c8a12e51d7ead2a7c9e101a04982237d4855716af2e9499306728fb485"}, + {file = "Pillow-10.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d35e3c8d9b1268cbf5d3670285feb3528f6680420eafe35cccc686b73c1e330f"}, + {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ed64f9ca2f0a95411e88a4efbd7a29e5ce2cea36072c53dd9d26d9c76f753b3"}, + {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b6eb5502f45a60a3f411c63187db83a3d3107887ad0d036c13ce836f8a36f1d"}, + {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:c1fbe7621c167ecaa38ad29643d77a9ce7311583761abf7836e1510c580bf3dd"}, + {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cd25d2a9d2b36fcb318882481367956d2cf91329f6892fe5d385c346c0649629"}, + {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3b08d4cc24f471b2c8ca24ec060abf4bebc6b144cb89cba638c720546b1cf538"}, + {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d737a602fbd82afd892ca746392401b634e278cb65d55c4b7a8f48e9ef8d008d"}, + {file = "Pillow-10.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:3a82c40d706d9aa9734289740ce26460a11aeec2d9c79b7af87bb35f0073c12f"}, + {file = "Pillow-10.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:bc2ec7c7b5d66b8ec9ce9f720dbb5fa4bace0f545acd34870eff4a369b44bf37"}, + {file = "Pillow-10.0.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:d80cf684b541685fccdd84c485b31ce73fc5c9b5d7523bf1394ce134a60c6883"}, + {file = "Pillow-10.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76de421f9c326da8f43d690110f0e79fe3ad1e54be811545d7d91898b4c8493e"}, + {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81ff539a12457809666fef6624684c008e00ff6bf455b4b89fd00a140eecd640"}, + {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce543ed15570eedbb85df19b0a1a7314a9c8141a36ce089c0a894adbfccb4568"}, + {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:685ac03cc4ed5ebc15ad5c23bc555d68a87777586d970c2c3e216619a5476223"}, + {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d72e2ecc68a942e8cf9739619b7f408cc7b272b279b56b2c83c6123fcfa5cdff"}, + {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d50b6aec14bc737742ca96e85d6d0a5f9bfbded018264b3b70ff9d8c33485551"}, + {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:00e65f5e822decd501e374b0650146063fbb30a7264b4d2744bdd7b913e0cab5"}, + {file = "Pillow-10.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:f31f9fdbfecb042d046f9d91270a0ba28368a723302786c0009ee9b9f1f60199"}, + {file = "Pillow-10.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:1ce91b6ec08d866b14413d3f0bbdea7e24dfdc8e59f562bb77bc3fe60b6144ca"}, + {file = "Pillow-10.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:349930d6e9c685c089284b013478d6f76e3a534e36ddfa912cde493f235372f3"}, + {file = "Pillow-10.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3a684105f7c32488f7153905a4e3015a3b6c7182e106fe3c37fbb5ef3e6994c3"}, + {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4f69b3700201b80bb82c3a97d5e9254084f6dd5fb5b16fc1a7b974260f89f43"}, + {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f07ea8d2f827d7d2a49ecf1639ec02d75ffd1b88dcc5b3a61bbb37a8759ad8d"}, + {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:040586f7d37b34547153fa383f7f9aed68b738992380ac911447bb78f2abe530"}, + {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f88a0b92277de8e3ca715a0d79d68dc82807457dae3ab8699c758f07c20b3c51"}, + {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c7cf14a27b0d6adfaebb3ae4153f1e516df54e47e42dcc073d7b3d76111a8d86"}, + {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3400aae60685b06bb96f99a21e1ada7bc7a413d5f49bce739828ecd9391bb8f7"}, + {file = "Pillow-10.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:dbc02381779d412145331789b40cc7b11fdf449e5d94f6bc0b080db0a56ea3f0"}, + {file = "Pillow-10.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9211e7ad69d7c9401cfc0e23d49b69ca65ddd898976d660a2fa5904e3d7a9baa"}, + {file = "Pillow-10.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:faaf07ea35355b01a35cb442dd950d8f1bb5b040a7787791a535de13db15ed90"}, + {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9f72a021fbb792ce98306ffb0c348b3c9cb967dce0f12a49aa4c3d3fdefa967"}, + {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f7c16705f44e0504a3a2a14197c1f0b32a95731d251777dcb060aa83022cb2d"}, + {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:76edb0a1fa2b4745fb0c99fb9fb98f8b180a1bbceb8be49b087e0b21867e77d3"}, + {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:368ab3dfb5f49e312231b6f27b8820c823652b7cd29cfbd34090565a015e99ba"}, + {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:608bfdee0d57cf297d32bcbb3c728dc1da0907519d1784962c5f0c68bb93e5a3"}, + {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5c6e3df6bdd396749bafd45314871b3d0af81ff935b2d188385e970052091017"}, + {file = "Pillow-10.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:7be600823e4c8631b74e4a0d38384c73f680e6105a7d3c6824fcf226c178c7e6"}, + {file = "Pillow-10.0.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:92be919bbc9f7d09f7ae343c38f5bb21c973d2576c1d45600fce4b74bafa7ac0"}, + {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8182b523b2289f7c415f589118228d30ac8c355baa2f3194ced084dac2dbba"}, + {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:38250a349b6b390ee6047a62c086d3817ac69022c127f8a5dc058c31ccef17f3"}, + {file = "Pillow-10.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:88af2003543cc40c80f6fca01411892ec52b11021b3dc22ec3bc9d5afd1c5334"}, + {file = "Pillow-10.0.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c189af0545965fa8d3b9613cfdb0cd37f9d71349e0f7750e1fd704648d475ed2"}, + {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce7b031a6fc11365970e6a5686d7ba8c63e4c1cf1ea143811acbb524295eabed"}, + {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db24668940f82321e746773a4bc617bfac06ec831e5c88b643f91f122a785684"}, + {file = "Pillow-10.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:efe8c0681042536e0d06c11f48cebe759707c9e9abf880ee213541c5b46c5bf3"}, + {file = "Pillow-10.0.0.tar.gz", hash = "sha256:9c82b5b3e043c7af0d95792d0d20ccf68f61a1fec6b3530e718b688422727396"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] + [[package]] name = "pkgutil-resolve-name" version = "1.3.10" @@ -3176,6 +3451,165 @@ files = [ {file = "ruff-0.0.249.tar.gz", hash = "sha256:b590689f08ecef971c45555cbda6854cdf48f3828fc326802828e851b1a14b3d"}, ] +[[package]] +name = "safetensors" +version = "0.3.3" +description = "Fast and Safe Tensor serialization" +optional = true +python-versions = "*" +files = [ + {file = "safetensors-0.3.3-cp310-cp310-macosx_10_11_x86_64.whl", hash = "sha256:92e4d0c8b2836120fddd134474c5bda8963f322333941f8b9f643e5b24f041eb"}, + {file = "safetensors-0.3.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:3dcadb6153c42addc9c625a622ebde9293fabe1973f9ef31ba10fb42c16e8536"}, + {file = "safetensors-0.3.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:08f26b61e1b0a14dc959aa9d568776bd038805f611caef1de04a80c468d4a7a4"}, + {file = "safetensors-0.3.3-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:17f41344d9a075f2f21b289a49a62e98baff54b5754240ba896063bce31626bf"}, + {file = "safetensors-0.3.3-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:f1045f798e1a16a6ced98d6a42ec72936d367a2eec81dc5fade6ed54638cd7d2"}, + {file = "safetensors-0.3.3-cp310-cp310-macosx_13_0_x86_64.whl", hash = "sha256:eaf0e4bc91da13f21ac846a39429eb3f3b7ed06295a32321fa3eb1a59b5c70f3"}, + {file = "safetensors-0.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25149180d4dc8ca48bac2ac3852a9424b466e36336a39659b35b21b2116f96fc"}, + {file = "safetensors-0.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9e943bf78c39de8865398a71818315e7d5d1af93c7b30d4da3fc852e62ad9bc"}, + {file = "safetensors-0.3.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cccfcac04a010354e87c7a2fe16a1ff004fc4f6e7ef8efc966ed30122ce00bc7"}, + {file = "safetensors-0.3.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a07121f427e646a50d18c1be0fa1a2cbf6398624c31149cd7e6b35486d72189e"}, + {file = "safetensors-0.3.3-cp310-cp310-win32.whl", hash = "sha256:a85e29cbfddfea86453cc0f4889b4bcc6b9c155be9a60e27be479a34e199e7ef"}, + {file = "safetensors-0.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:e13adad4a3e591378f71068d14e92343e626cf698ff805f61cdb946e684a218e"}, + {file = "safetensors-0.3.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:cbc3312f134baf07334dd517341a4b470b2931f090bd9284888acb7dfaf4606f"}, + {file = "safetensors-0.3.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d15030af39d5d30c22bcbc6d180c65405b7ea4c05b7bab14a570eac7d7d43722"}, + {file = "safetensors-0.3.3-cp311-cp311-macosx_12_0_universal2.whl", hash = "sha256:f84a74cbe9859b28e3d6d7715ac1dd3097bebf8d772694098f6d42435245860c"}, + {file = "safetensors-0.3.3-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:10d637423d98ab2e6a4ad96abf4534eb26fcaf8ca3115623e64c00759374e90d"}, + {file = "safetensors-0.3.3-cp311-cp311-macosx_13_0_universal2.whl", hash = "sha256:3b46f5de8b44084aff2e480874c550c399c730c84b2e8ad1bddb062c94aa14e9"}, + {file = "safetensors-0.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e76da691a82dfaf752854fa6d17c8eba0c8466370c5ad8cf1bfdf832d3c7ee17"}, + {file = "safetensors-0.3.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4e342fd54e66aa9512dd13e410f791e47aa4feeb5f4c9a20882c72f3d272f29"}, + {file = "safetensors-0.3.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:178fd30b5dc73bce14a39187d948cedd0e5698e2f055b7ea16b5a96c9b17438e"}, + {file = "safetensors-0.3.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e8fdf7407dba44587ed5e79d5de3533d242648e1f2041760b21474bd5ea5c8c"}, + {file = "safetensors-0.3.3-cp311-cp311-win32.whl", hash = "sha256:7d3b744cee8d7a46ffa68db1a2ff1a1a432488e3f7a5a97856fe69e22139d50c"}, + {file = "safetensors-0.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:f579877d30feec9b6ba409d05fa174633a4fc095675a4a82971d831a8bb60b97"}, + {file = "safetensors-0.3.3-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:2fff5b19a1b462c17322998b2f4b8bce43c16fe208968174d2f3a1446284ceed"}, + {file = "safetensors-0.3.3-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:41adb1d39e8aad04b16879e3e0cbcb849315999fad73bc992091a01e379cb058"}, + {file = "safetensors-0.3.3-cp37-cp37m-macosx_12_0_x86_64.whl", hash = "sha256:0f2b404250b3b877b11d34afcc30d80e7035714a1116a3df56acaca6b6c00096"}, + {file = "safetensors-0.3.3-cp37-cp37m-macosx_13_0_x86_64.whl", hash = "sha256:b43956ef20e9f4f2e648818a9e7b3499edd6b753a0f5526d4f6a6826fbee8446"}, + {file = "safetensors-0.3.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d61a99b34169981f088ccfbb2c91170843efc869a0a0532f422db7211bf4f474"}, + {file = "safetensors-0.3.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c0008aab36cd20e9a051a68563c6f80d40f238c2611811d7faa5a18bf3fd3984"}, + {file = "safetensors-0.3.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:93d54166072b143084fdcd214a080a088050c1bb1651016b55942701b31334e4"}, + {file = "safetensors-0.3.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c32ee08f61cea56a5d62bbf94af95df6040c8ab574afffaeb7b44ae5da1e9e3"}, + {file = "safetensors-0.3.3-cp37-cp37m-win32.whl", hash = "sha256:351600f367badd59f7bfe86d317bb768dd8c59c1561c6fac43cafbd9c1af7827"}, + {file = "safetensors-0.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:034717e297849dae1af0a7027a14b8647bd2e272c24106dced64d83e10d468d1"}, + {file = "safetensors-0.3.3-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:8530399666748634bc0b301a6a5523756931b0c2680d188e743d16304afe917a"}, + {file = "safetensors-0.3.3-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:9d741c1f1621e489ba10aa3d135b54202684f6e205df52e219d5eecd673a80c9"}, + {file = "safetensors-0.3.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:0c345fd85b4d2093a5109596ff4cd9dfc2e84992e881b4857fbc4a93a3b89ddb"}, + {file = "safetensors-0.3.3-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:69ccee8d05f55cdf76f7e6c87d2bdfb648c16778ef8acfd2ecc495e273e9233e"}, + {file = "safetensors-0.3.3-cp38-cp38-macosx_13_0_arm64.whl", hash = "sha256:c08a9a4b7a4ca389232fa8d097aebc20bbd4f61e477abc7065b5c18b8202dede"}, + {file = "safetensors-0.3.3-cp38-cp38-macosx_13_0_x86_64.whl", hash = "sha256:a002868d2e3f49bbe81bee2655a411c24fa1f8e68b703dec6629cb989d6ae42e"}, + {file = "safetensors-0.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3bd2704cb41faa44d3ec23e8b97330346da0395aec87f8eaf9c9e2c086cdbf13"}, + {file = "safetensors-0.3.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b2951bf3f0ad63df5e6a95263652bd6c194a6eb36fd4f2d29421cd63424c883"}, + {file = "safetensors-0.3.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:07114cec116253ca2e7230fdea30acf76828f21614afd596d7b5438a2f719bd8"}, + {file = "safetensors-0.3.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ab43aeeb9eadbb6b460df3568a662e6f1911ecc39387f8752afcb6a7d96c087"}, + {file = "safetensors-0.3.3-cp38-cp38-win32.whl", hash = "sha256:f2f59fce31dd3429daca7269a6b06f65e6547a0c248f5116976c3f1e9b73f251"}, + {file = "safetensors-0.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:c31ca0d8610f57799925bf08616856b39518ab772c65093ef1516762e796fde4"}, + {file = "safetensors-0.3.3-cp39-cp39-macosx_10_11_x86_64.whl", hash = "sha256:59a596b3225c96d59af412385981f17dd95314e3fffdf359c7e3f5bb97730a19"}, + {file = "safetensors-0.3.3-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:82a16e92210a6221edd75ab17acdd468dd958ef5023d9c6c1289606cc30d1479"}, + {file = "safetensors-0.3.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:98a929e763a581f516373ef31983ed1257d2d0da912a8e05d5cd12e9e441c93a"}, + {file = "safetensors-0.3.3-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:12b83f1986cd16ea0454c636c37b11e819d60dd952c26978310a0835133480b7"}, + {file = "safetensors-0.3.3-cp39-cp39-macosx_13_0_arm64.whl", hash = "sha256:f439175c827c2f1bbd54df42789c5204a10983a30bc4242bc7deaf854a24f3f0"}, + {file = "safetensors-0.3.3-cp39-cp39-macosx_13_0_x86_64.whl", hash = "sha256:0085be33b8cbcb13079b3a8e131656e05b0bc5e6970530d4c24150f7afd76d70"}, + {file = "safetensors-0.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e3ec70c87b1e910769034206ad5efc051069b105aac1687f6edcd02526767f4"}, + {file = "safetensors-0.3.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f490132383e5e490e710608f4acffcb98ed37f91b885c7217d3f9f10aaff9048"}, + {file = "safetensors-0.3.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:79d1b6c7ed5596baf79c80fbce5198c3cdcc521ae6a157699f427aba1a90082d"}, + {file = "safetensors-0.3.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad3cc8006e7a86ee7c88bd2813ec59cd7cc75b03e6fa4af89b9c7b235b438d68"}, + {file = "safetensors-0.3.3-cp39-cp39-win32.whl", hash = "sha256:ab29f54c6b8c301ca05fa014728996bd83aac6e21528f893aaf8945c71f42b6d"}, + {file = "safetensors-0.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:0fa82004eae1a71e2aa29843ef99de9350e459a0fc2f65fc6ee0da9690933d2d"}, + {file = "safetensors-0.3.3.tar.gz", hash = "sha256:edb7072d788c4f929d0f5735d3a2fb51e5a27f833587828583b7f5747af1a2b8"}, +] + +[package.extras] +all = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "flax (>=0.6.3)", "h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "isort (>=5.5.4)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "numpy (>=1.21.6)", "paddlepaddle (>=2.4.1)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "setuptools-rust (>=1.5.2)", "tensorflow (==2.11.0)", "torch (>=1.10)"] +dev = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "flax (>=0.6.3)", "h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "isort (>=5.5.4)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "numpy (>=1.21.6)", "paddlepaddle (>=2.4.1)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "setuptools-rust (>=1.5.2)", "tensorflow (==2.11.0)", "torch (>=1.10)"] +jax = ["flax (>=0.6.3)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "numpy (>=1.21.6)"] +numpy = ["numpy (>=1.21.6)"] +paddlepaddle = ["numpy (>=1.21.6)", "paddlepaddle (>=2.4.1)"] +pinned-tf = ["tensorflow (==2.11.0)"] +quality = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "isort (>=5.5.4)"] +tensorflow = ["numpy (>=1.21.6)", "tensorflow (>=2.11.0)"] +testing = ["h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "numpy (>=1.21.6)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "setuptools-rust (>=1.5.2)"] +torch = ["numpy (>=1.21.6)", "torch (>=1.10)"] + +[[package]] +name = "scikit-learn" +version = "1.3.0" +description = "A set of python modules for machine learning and data mining" +optional = true +python-versions = ">=3.8" +files = [ + {file = "scikit-learn-1.3.0.tar.gz", hash = "sha256:8be549886f5eda46436b6e555b0e4873b4f10aa21c07df45c4bc1735afbccd7a"}, + {file = "scikit_learn-1.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:981287869e576d42c682cf7ca96af0c6ac544ed9316328fd0d9292795c742cf5"}, + {file = "scikit_learn-1.3.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:436aaaae2c916ad16631142488e4c82f4296af2404f480e031d866863425d2a2"}, + {file = "scikit_learn-1.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7e28d8fa47a0b30ae1bd7a079519dd852764e31708a7804da6cb6f8b36e3630"}, + {file = "scikit_learn-1.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae80c08834a473d08a204d966982a62e11c976228d306a2648c575e3ead12111"}, + {file = "scikit_learn-1.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:552fd1b6ee22900cf1780d7386a554bb96949e9a359999177cf30211e6b20df6"}, + {file = "scikit_learn-1.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79970a6d759eb00a62266a31e2637d07d2d28446fca8079cf9afa7c07b0427f8"}, + {file = "scikit_learn-1.3.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:850a00b559e636b23901aabbe79b73dc604b4e4248ba9e2d6e72f95063765603"}, + {file = "scikit_learn-1.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee04835fb016e8062ee9fe9074aef9b82e430504e420bff51e3e5fffe72750ca"}, + {file = "scikit_learn-1.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d953531f5d9f00c90c34fa3b7d7cfb43ecff4c605dac9e4255a20b114a27369"}, + {file = "scikit_learn-1.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:151ac2bf65ccf363664a689b8beafc9e6aae36263db114b4ca06fbbbf827444a"}, + {file = "scikit_learn-1.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6a885a9edc9c0a341cab27ec4f8a6c58b35f3d449c9d2503a6fd23e06bbd4f6a"}, + {file = "scikit_learn-1.3.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:9877af9c6d1b15486e18a94101b742e9d0d2f343d35a634e337411ddb57783f3"}, + {file = "scikit_learn-1.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c470f53cea065ff3d588050955c492793bb50c19a92923490d18fcb637f6383a"}, + {file = "scikit_learn-1.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd6e2d7389542eae01077a1ee0318c4fec20c66c957f45c7aac0c6eb0fe3c612"}, + {file = "scikit_learn-1.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:3a11936adbc379a6061ea32fa03338d4ca7248d86dd507c81e13af428a5bc1db"}, + {file = "scikit_learn-1.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:998d38fcec96584deee1e79cd127469b3ad6fefd1ea6c2dfc54e8db367eb396b"}, + {file = "scikit_learn-1.3.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:ded35e810438a527e17623ac6deae3b360134345b7c598175ab7741720d7ffa7"}, + {file = "scikit_learn-1.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e8102d5036e28d08ab47166b48c8d5e5810704daecf3a476a4282d562be9a28"}, + {file = "scikit_learn-1.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7617164951c422747e7c32be4afa15d75ad8044f42e7d70d3e2e0429a50e6718"}, + {file = "scikit_learn-1.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:1d54fb9e6038284548072df22fd34777e434153f7ffac72c8596f2d6987110dd"}, +] + +[package.dependencies] +joblib = ">=1.1.1" +numpy = ">=1.17.3" +scipy = ">=1.5.0" +threadpoolctl = ">=2.0.0" + +[package.extras] +benchmark = ["matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "pandas (>=1.0.5)"] +docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)", "sphinx (>=6.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.10.1)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] +examples = ["matplotlib (>=3.1.3)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)"] +tests = ["black (>=23.3.0)", "matplotlib (>=3.1.3)", "mypy (>=1.3)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.0.272)", "scikit-image (>=0.16.2)"] + +[[package]] +name = "scipy" +version = "1.9.3" +description = "Fundamental algorithms for scientific computing in Python" +optional = true +python-versions = ">=3.8" +files = [ + {file = "scipy-1.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1884b66a54887e21addf9c16fb588720a8309a57b2e258ae1c7986d4444d3bc0"}, + {file = "scipy-1.9.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:83b89e9586c62e787f5012e8475fbb12185bafb996a03257e9675cd73d3736dd"}, + {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a72d885fa44247f92743fc20732ae55564ff2a519e8302fb7e18717c5355a8b"}, + {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d01e1dd7b15bd2449c8bfc6b7cc67d630700ed655654f0dfcf121600bad205c9"}, + {file = "scipy-1.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:68239b6aa6f9c593da8be1509a05cb7f9efe98b80f43a5861cd24c7557e98523"}, + {file = "scipy-1.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b41bc822679ad1c9a5f023bc93f6d0543129ca0f37c1ce294dd9d386f0a21096"}, + {file = "scipy-1.9.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:90453d2b93ea82a9f434e4e1cba043e779ff67b92f7a0e85d05d286a3625df3c"}, + {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83c06e62a390a9167da60bedd4575a14c1f58ca9dfde59830fc42e5197283dab"}, + {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abaf921531b5aeaafced90157db505e10345e45038c39e5d9b6c7922d68085cb"}, + {file = "scipy-1.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:06d2e1b4c491dc7d8eacea139a1b0b295f74e1a1a0f704c375028f8320d16e31"}, + {file = "scipy-1.9.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5a04cd7d0d3eff6ea4719371cbc44df31411862b9646db617c99718ff68d4840"}, + {file = "scipy-1.9.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:545c83ffb518094d8c9d83cce216c0c32f8c04aaf28b92cc8283eda0685162d5"}, + {file = "scipy-1.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d54222d7a3ba6022fdf5773931b5d7c56efe41ede7f7128c7b1637700409108"}, + {file = "scipy-1.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cff3a5295234037e39500d35316a4c5794739433528310e117b8a9a0c76d20fc"}, + {file = "scipy-1.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:2318bef588acc7a574f5bfdff9c172d0b1bf2c8143d9582e05f878e580a3781e"}, + {file = "scipy-1.9.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d644a64e174c16cb4b2e41dfea6af722053e83d066da7343f333a54dae9bc31c"}, + {file = "scipy-1.9.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:da8245491d73ed0a994ed9c2e380fd058ce2fa8a18da204681f2fe1f57f98f95"}, + {file = "scipy-1.9.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4db5b30849606a95dcf519763dd3ab6fe9bd91df49eba517359e450a7d80ce2e"}, + {file = "scipy-1.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c68db6b290cbd4049012990d7fe71a2abd9ffbe82c0056ebe0f01df8be5436b0"}, + {file = "scipy-1.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:5b88e6d91ad9d59478fafe92a7c757d00c59e3bdc3331be8ada76a4f8d683f58"}, + {file = "scipy-1.9.3.tar.gz", hash = "sha256:fbc5c05c85c1a02be77b1ff591087c83bc44579c6d2bd9fb798bb64ea5e1a027"}, +] + +[package.dependencies] +numpy = ">=1.18.5,<1.26.0" + +[package.extras] +dev = ["flake8", "mypy", "pycodestyle", "typing_extensions"] +doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-panels (>=0.5.2)", "sphinx-tabs"] +test = ["asv", "gmpy2", "mpmath", "pytest", "pytest-cov", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + [[package]] name = "send2trash" version = "1.8.2" @@ -3192,6 +3626,82 @@ nativelib = ["pyobjc-framework-Cocoa", "pywin32"] objc = ["pyobjc-framework-Cocoa"] win32 = ["pywin32"] +[[package]] +name = "sentence-transformers" +version = "2.2.2" +description = "Multilingual text embeddings" +optional = true +python-versions = ">=3.6.0" +files = [ + {file = "sentence-transformers-2.2.2.tar.gz", hash = "sha256:dbc60163b27de21076c9a30d24b5b7b6fa05141d68cf2553fa9a77bf79a29136"}, +] + +[package.dependencies] +huggingface-hub = ">=0.4.0" +nltk = "*" +numpy = "*" +scikit-learn = "*" +scipy = "*" +sentencepiece = "*" +torch = ">=1.6.0" +torchvision = "*" +tqdm = "*" +transformers = ">=4.6.0,<5.0.0" + +[[package]] +name = "sentencepiece" +version = "0.1.99" +description = "SentencePiece python wrapper" +optional = true +python-versions = "*" +files = [ + {file = "sentencepiece-0.1.99-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0eb528e70571b7c02723e5804322469b82fe7ea418c96051d0286c0fa028db73"}, + {file = "sentencepiece-0.1.99-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:77d7fafb2c4e4659cbdf303929503f37a26eabc4ff31d3a79bf1c5a1b338caa7"}, + {file = "sentencepiece-0.1.99-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:be9cf5b9e404c245aeb3d3723c737ba7a8f5d4ba262ef233a431fa6c45f732a0"}, + {file = "sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baed1a26464998f9710d20e52607c29ffd4293e7c71c6a1f83f51ad0911ec12c"}, + {file = "sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9832f08bb372d4c8b567612f8eab9e36e268dff645f1c28f9f8e851be705f6d1"}, + {file = "sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:019e7535108e309dae2b253a75834fc3128240aa87c00eb80732078cdc182588"}, + {file = "sentencepiece-0.1.99-cp310-cp310-win32.whl", hash = "sha256:fa16a830416bb823fa2a52cbdd474d1f7f3bba527fd2304fb4b140dad31bb9bc"}, + {file = "sentencepiece-0.1.99-cp310-cp310-win_amd64.whl", hash = "sha256:14b0eccb7b641d4591c3e12ae44cab537d68352e4d3b6424944f0c447d2348d5"}, + {file = "sentencepiece-0.1.99-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6d3c56f24183a1e8bd61043ff2c58dfecdc68a5dd8955dc13bab83afd5f76b81"}, + {file = "sentencepiece-0.1.99-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ed6ea1819fd612c989999e44a51bf556d0ef6abfb553080b9be3d347e18bcfb7"}, + {file = "sentencepiece-0.1.99-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a2a0260cd1fb7bd8b4d4f39dc2444a8d5fd4e0a0c4d5c899810ef1abf99b2d45"}, + {file = "sentencepiece-0.1.99-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a1abff4d1ff81c77cac3cc6fefa34fa4b8b371e5ee51cb7e8d1ebc996d05983"}, + {file = "sentencepiece-0.1.99-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:004e6a621d4bc88978eecb6ea7959264239a17b70f2cbc348033d8195c9808ec"}, + {file = "sentencepiece-0.1.99-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db361e03342c41680afae5807590bc88aa0e17cfd1a42696a160e4005fcda03b"}, + {file = "sentencepiece-0.1.99-cp311-cp311-win32.whl", hash = "sha256:2d95e19168875b70df62916eb55428a0cbcb834ac51d5a7e664eda74def9e1e0"}, + {file = "sentencepiece-0.1.99-cp311-cp311-win_amd64.whl", hash = "sha256:f90d73a6f81248a909f55d8e6ef56fec32d559e1e9af045f0b0322637cb8e5c7"}, + {file = "sentencepiece-0.1.99-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:62e24c81e74bd87a6e0d63c51beb6527e4c0add67e1a17bac18bcd2076afcfeb"}, + {file = "sentencepiece-0.1.99-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57efcc2d51caff20d9573567d9fd3f854d9efe613ed58a439c78c9f93101384a"}, + {file = "sentencepiece-0.1.99-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a904c46197993bd1e95b93a6e373dca2f170379d64441041e2e628ad4afb16f"}, + {file = "sentencepiece-0.1.99-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d89adf59854741c0d465f0e1525b388c0d174f611cc04af54153c5c4f36088c4"}, + {file = "sentencepiece-0.1.99-cp36-cp36m-win32.whl", hash = "sha256:47c378146928690d1bc106fdf0da768cebd03b65dd8405aa3dd88f9c81e35dba"}, + {file = "sentencepiece-0.1.99-cp36-cp36m-win_amd64.whl", hash = "sha256:9ba142e7a90dd6d823c44f9870abdad45e6c63958eb60fe44cca6828d3b69da2"}, + {file = "sentencepiece-0.1.99-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b7b1a9ae4d7c6f1f867e63370cca25cc17b6f4886729595b885ee07a58d3cec3"}, + {file = "sentencepiece-0.1.99-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0f644c9d4d35c096a538507b2163e6191512460035bf51358794a78515b74f7"}, + {file = "sentencepiece-0.1.99-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c8843d23a0f686d85e569bd6dcd0dd0e0cbc03731e63497ca6d5bacd18df8b85"}, + {file = "sentencepiece-0.1.99-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33e6f690a1caebb4867a2e367afa1918ad35be257ecdb3455d2bbd787936f155"}, + {file = "sentencepiece-0.1.99-cp37-cp37m-win32.whl", hash = "sha256:8a321866c2f85da7beac74a824b4ad6ddc2a4c9bccd9382529506d48f744a12c"}, + {file = "sentencepiece-0.1.99-cp37-cp37m-win_amd64.whl", hash = "sha256:c42f753bcfb7661c122a15b20be7f684b61fc8592c89c870adf52382ea72262d"}, + {file = "sentencepiece-0.1.99-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:85b476406da69c70586f0bb682fcca4c9b40e5059814f2db92303ea4585c650c"}, + {file = "sentencepiece-0.1.99-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cfbcfe13c69d3f87b7fcd5da168df7290a6d006329be71f90ba4f56bc77f8561"}, + {file = "sentencepiece-0.1.99-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:445b0ec381af1cd4eef95243e7180c63d9c384443c16c4c47a28196bd1cda937"}, + {file = "sentencepiece-0.1.99-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6890ea0f2b4703f62d0bf27932e35808b1f679bdb05c7eeb3812b935ba02001"}, + {file = "sentencepiece-0.1.99-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb71af492b0eefbf9f2501bec97bcd043b6812ab000d119eaf4bd33f9e283d03"}, + {file = "sentencepiece-0.1.99-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27b866b5bd3ddd54166bbcbf5c8d7dd2e0b397fac8537991c7f544220b1f67bc"}, + {file = "sentencepiece-0.1.99-cp38-cp38-win32.whl", hash = "sha256:b133e8a499eac49c581c3c76e9bdd08c338cc1939e441fee6f92c0ccb5f1f8be"}, + {file = "sentencepiece-0.1.99-cp38-cp38-win_amd64.whl", hash = "sha256:0eaf3591dd0690a87f44f4df129cf8d05d8a4029b5b6709b489b8e27f9a9bcff"}, + {file = "sentencepiece-0.1.99-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38efeda9bbfb55052d482a009c6a37e52f42ebffcea9d3a98a61de7aee356a28"}, + {file = "sentencepiece-0.1.99-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6c030b081dc1e1bcc9fadc314b19b740715d3d566ad73a482da20d7d46fd444c"}, + {file = "sentencepiece-0.1.99-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:84dbe53e02e4f8a2e45d2ac3e430d5c83182142658e25edd76539b7648928727"}, + {file = "sentencepiece-0.1.99-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b0f55d0a0ee1719b4b04221fe0c9f0c3461dc3dabd77a035fa2f4788eb3ef9a"}, + {file = "sentencepiece-0.1.99-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18e800f206cd235dc27dc749299e05853a4e4332e8d3dfd81bf13d0e5b9007d9"}, + {file = "sentencepiece-0.1.99-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ae1c40cda8f9d5b0423cfa98542735c0235e7597d79caf318855cdf971b2280"}, + {file = "sentencepiece-0.1.99-cp39-cp39-win32.whl", hash = "sha256:c84ce33af12ca222d14a1cdd37bd76a69401e32bc68fe61c67ef6b59402f4ab8"}, + {file = "sentencepiece-0.1.99-cp39-cp39-win_amd64.whl", hash = "sha256:350e5c74d739973f1c9643edb80f7cc904dc948578bcb1d43c6f2b173e5d18dd"}, + {file = "sentencepiece-0.1.99.tar.gz", hash = "sha256:189c48f5cb2949288f97ccdb97f0473098d9c3dcf5a3d99d4eabe719ec27297f"}, +] + [[package]] name = "setuptools" version = "67.8.0" @@ -3508,6 +4018,20 @@ pure-eval = "*" [package.extras] tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] +[[package]] +name = "sympy" +version = "1.12" +description = "Computer algebra system (CAS) in Python" +optional = true +python-versions = ">=3.8" +files = [ + {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"}, + {file = "sympy-1.12.tar.gz", hash = "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8"}, +] + +[package.dependencies] +mpmath = ">=0.19" + [[package]] name = "tenacity" version = "8.2.3" @@ -3620,6 +4144,17 @@ mxnet = ["mxnet (>=1.5.1,<1.6.0)"] tensorflow = ["tensorflow (>=2.0.0,<2.6.0)"] torch = ["torch (>=1.6.0)"] +[[package]] +name = "threadpoolctl" +version = "3.2.0" +description = "threadpoolctl" +optional = true +python-versions = ">=3.8" +files = [ + {file = "threadpoolctl-3.2.0-py3-none-any.whl", hash = "sha256:2b7818516e423bdaebb97c723f86a7c6b0a83d3f3b0970328d66f4d9104dc032"}, + {file = "threadpoolctl-3.2.0.tar.gz", hash = "sha256:c96a0ba3bdddeaca37dc4cc7344aafad41cdb8c313f74fdfe387a867bba93355"}, +] + [[package]] name = "tinycss2" version = "1.2.1" @@ -3655,6 +4190,60 @@ idna = "*" requests = ">=2.1.0" requests-file = ">=1.4" +[[package]] +name = "tokenizers" +version = "0.13.3" +description = "Fast and Customizable Tokenizers" +optional = true +python-versions = "*" +files = [ + {file = "tokenizers-0.13.3-cp310-cp310-macosx_10_11_x86_64.whl", hash = "sha256:f3835c5be51de8c0a092058a4d4380cb9244fb34681fd0a295fbf0a52a5fdf33"}, + {file = "tokenizers-0.13.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4ef4c3e821730f2692489e926b184321e887f34fb8a6b80b8096b966ba663d07"}, + {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5fd1a6a25353e9aa762e2aae5a1e63883cad9f4e997c447ec39d071020459bc"}, + {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee0b1b311d65beab83d7a41c56a1e46ab732a9eed4460648e8eb0bd69fc2d059"}, + {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ef4215284df1277dadbcc5e17d4882bda19f770d02348e73523f7e7d8b8d396"}, + {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4d53976079cff8a033f778fb9adca2d9d69d009c02fa2d71a878b5f3963ed30"}, + {file = "tokenizers-0.13.3-cp310-cp310-win32.whl", hash = "sha256:1f0e3b4c2ea2cd13238ce43548959c118069db7579e5d40ec270ad77da5833ce"}, + {file = "tokenizers-0.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:89649c00d0d7211e8186f7a75dfa1db6996f65edce4b84821817eadcc2d3c79e"}, + {file = "tokenizers-0.13.3-cp311-cp311-macosx_10_11_universal2.whl", hash = "sha256:56b726e0d2bbc9243872b0144515ba684af5b8d8cd112fb83ee1365e26ec74c8"}, + {file = "tokenizers-0.13.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:cc5c022ce692e1f499d745af293ab9ee6f5d92538ed2faf73f9708c89ee59ce6"}, + {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f55c981ac44ba87c93e847c333e58c12abcbb377a0c2f2ef96e1a266e4184ff2"}, + {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f247eae99800ef821a91f47c5280e9e9afaeed9980fc444208d5aa6ba69ff148"}, + {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b3e3215d048e94f40f1c95802e45dcc37c5b05eb46280fc2ccc8cd351bff839"}, + {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ba2b0bf01777c9b9bc94b53764d6684554ce98551fec496f71bc5be3a03e98b"}, + {file = "tokenizers-0.13.3-cp311-cp311-win32.whl", hash = "sha256:cc78d77f597d1c458bf0ea7c2a64b6aa06941c7a99cb135b5969b0278824d808"}, + {file = "tokenizers-0.13.3-cp311-cp311-win_amd64.whl", hash = "sha256:ecf182bf59bd541a8876deccf0360f5ae60496fd50b58510048020751cf1724c"}, + {file = "tokenizers-0.13.3-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:0527dc5436a1f6bf2c0327da3145687d3bcfbeab91fed8458920093de3901b44"}, + {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:07cbb2c307627dc99b44b22ef05ff4473aa7c7cc1fec8f0a8b37d8a64b1a16d2"}, + {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4560dbdeaae5b7ee0d4e493027e3de6d53c991b5002d7ff95083c99e11dd5ac0"}, + {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64064bd0322405c9374305ab9b4c07152a1474370327499911937fd4a76d004b"}, + {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8c6e2ab0f2e3d939ca66aa1d596602105fe33b505cd2854a4c1717f704c51de"}, + {file = "tokenizers-0.13.3-cp37-cp37m-win32.whl", hash = "sha256:6cc29d410768f960db8677221e497226e545eaaea01aa3613fa0fdf2cc96cff4"}, + {file = "tokenizers-0.13.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fc2a7fdf864554a0dacf09d32e17c0caa9afe72baf9dd7ddedc61973bae352d8"}, + {file = "tokenizers-0.13.3-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:8791dedba834c1fc55e5f1521be325ea3dafb381964be20684b92fdac95d79b7"}, + {file = "tokenizers-0.13.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:d607a6a13718aeb20507bdf2b96162ead5145bbbfa26788d6b833f98b31b26e1"}, + {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3791338f809cd1bf8e4fee6b540b36822434d0c6c6bc47162448deee3f77d425"}, + {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2f35f30e39e6aab8716f07790f646bdc6e4a853816cc49a95ef2a9016bf9ce6"}, + {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:310204dfed5aa797128b65d63538a9837cbdd15da2a29a77d67eefa489edda26"}, + {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0f9b92ea052305166559f38498b3b0cae159caea712646648aaa272f7160963"}, + {file = "tokenizers-0.13.3-cp38-cp38-win32.whl", hash = "sha256:9a3fa134896c3c1f0da6e762d15141fbff30d094067c8f1157b9fdca593b5806"}, + {file = "tokenizers-0.13.3-cp38-cp38-win_amd64.whl", hash = "sha256:8e7b0cdeace87fa9e760e6a605e0ae8fc14b7d72e9fc19c578116f7287bb873d"}, + {file = "tokenizers-0.13.3-cp39-cp39-macosx_10_11_x86_64.whl", hash = "sha256:00cee1e0859d55507e693a48fa4aef07060c4bb6bd93d80120e18fea9371c66d"}, + {file = "tokenizers-0.13.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:a23ff602d0797cea1d0506ce69b27523b07e70f6dda982ab8cf82402de839088"}, + {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70ce07445050b537d2696022dafb115307abdffd2a5c106f029490f84501ef97"}, + {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:280ffe95f50eaaf655b3a1dc7ff1d9cf4777029dbbc3e63a74e65a056594abc3"}, + {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97acfcec592f7e9de8cadcdcda50a7134423ac8455c0166b28c9ff04d227b371"}, + {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd7730c98a3010cd4f523465867ff95cd9d6430db46676ce79358f65ae39797b"}, + {file = "tokenizers-0.13.3-cp39-cp39-win32.whl", hash = "sha256:48625a108029cb1ddf42e17a81b5a3230ba6888a70c9dc14e81bc319e812652d"}, + {file = "tokenizers-0.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:bc0a6f1ba036e482db6453571c9e3e60ecd5489980ffd95d11dc9f960483d783"}, + {file = "tokenizers-0.13.3.tar.gz", hash = "sha256:2e546dbb68b623008a5442353137fbb0123d311a6d7ba52f2667c8862a75af2e"}, +] + +[package.extras] +dev = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] +docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] +testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] + [[package]] name = "tomli" version = "2.0.1" @@ -3666,6 +4255,83 @@ files = [ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] +[[package]] +name = "torch" +version = "2.0.1" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +optional = true +python-versions = ">=3.8.0" +files = [ + {file = "torch-2.0.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:8ced00b3ba471856b993822508f77c98f48a458623596a4c43136158781e306a"}, + {file = "torch-2.0.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:359bfaad94d1cda02ab775dc1cc386d585712329bb47b8741607ef6ef4950747"}, + {file = "torch-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:7c84e44d9002182edd859f3400deaa7410f5ec948a519cc7ef512c2f9b34d2c4"}, + {file = "torch-2.0.1-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:567f84d657edc5582d716900543e6e62353dbe275e61cdc36eda4929e46df9e7"}, + {file = "torch-2.0.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:787b5a78aa7917465e9b96399b883920c88a08f4eb63b5a5d2d1a16e27d2f89b"}, + {file = "torch-2.0.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:e617b1d0abaf6ced02dbb9486803abfef0d581609b09641b34fa315c9c40766d"}, + {file = "torch-2.0.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b6019b1de4978e96daa21d6a3ebb41e88a0b474898fe251fd96189587408873e"}, + {file = "torch-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:dbd68cbd1cd9da32fe5d294dd3411509b3d841baecb780b38b3b7b06c7754434"}, + {file = "torch-2.0.1-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:ef654427d91600129864644e35deea761fb1fe131710180b952a6f2e2207075e"}, + {file = "torch-2.0.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:25aa43ca80dcdf32f13da04c503ec7afdf8e77e3a0183dd85cd3e53b2842e527"}, + {file = "torch-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:5ef3ea3d25441d3957348f7e99c7824d33798258a2bf5f0f0277cbcadad2e20d"}, + {file = "torch-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:0882243755ff28895e8e6dc6bc26ebcf5aa0911ed81b2a12f241fc4b09075b13"}, + {file = "torch-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:f66aa6b9580a22b04d0af54fcd042f52406a8479e2b6a550e3d9f95963e168c8"}, + {file = "torch-2.0.1-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:1adb60d369f2650cac8e9a95b1d5758e25d526a34808f7448d0bd599e4ae9072"}, + {file = "torch-2.0.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:1bcffc16b89e296826b33b98db5166f990e3b72654a2b90673e817b16c50e32b"}, + {file = "torch-2.0.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:e10e1597f2175365285db1b24019eb6f04d53dcd626c735fc502f1e8b6be9875"}, + {file = "torch-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:423e0ae257b756bb45a4b49072046772d1ad0c592265c5080070e0767da4e490"}, + {file = "torch-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:8742bdc62946c93f75ff92da00e3803216c6cce9b132fbca69664ca38cfb3e18"}, + {file = "torch-2.0.1-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:c62df99352bd6ee5a5a8d1832452110435d178b5164de450831a3a8cc14dc680"}, + {file = "torch-2.0.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:671a2565e3f63b8fe8e42ae3e36ad249fe5e567435ea27b94edaa672a7d0c416"}, +] + +[package.dependencies] +filelock = "*" +jinja2 = "*" +networkx = "*" +sympy = "*" +typing-extensions = "*" + +[package.extras] +opt-einsum = ["opt-einsum (>=3.3)"] + +[[package]] +name = "torchvision" +version = "0.15.2" +description = "image and video datasets and models for torch deep learning" +optional = true +python-versions = ">=3.8" +files = [ + {file = "torchvision-0.15.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7754088774e810c5672b142a45dcf20b1bd986a5a7da90f8660c43dc43fb850c"}, + {file = "torchvision-0.15.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37eb138e13f6212537a3009ac218695483a635c404b6cc1d8e0d0d978026a86d"}, + {file = "torchvision-0.15.2-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:54143f7cc0797d199b98a53b7d21c3f97615762d4dd17ad45a41c7e80d880e73"}, + {file = "torchvision-0.15.2-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:1eefebf5fbd01a95fe8f003d623d941601c94b5cec547b420da89cb369d9cf96"}, + {file = "torchvision-0.15.2-cp310-cp310-win_amd64.whl", hash = "sha256:96fae30c5ca8423f4b9790df0f0d929748e32718d88709b7b567d2f630c042e3"}, + {file = "torchvision-0.15.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5f35f6bd5bcc4568e6522e4137fa60fcc72f4fa3e615321c26cd87e855acd398"}, + {file = "torchvision-0.15.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:757505a0ab2be7096cb9d2bf4723202c971cceddb72c7952a7e877f773de0f8a"}, + {file = "torchvision-0.15.2-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:012ad25cfd9019ff9b0714a168727e3845029be1af82296ff1e1482931fa4b80"}, + {file = "torchvision-0.15.2-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b02a7ffeaa61448737f39a4210b8ee60234bda0515a0c0d8562f884454105b0f"}, + {file = "torchvision-0.15.2-cp311-cp311-win_amd64.whl", hash = "sha256:10be76ceded48329d0a0355ac33da131ee3993ff6c125e4a02ab34b5baa2472c"}, + {file = "torchvision-0.15.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8f12415b686dba884fb086f53ac803f692be5a5cdd8a758f50812b30fffea2e4"}, + {file = "torchvision-0.15.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:31211c01f8b8ec33b8a638327b5463212e79a03e43c895f88049f97af1bd12fd"}, + {file = "torchvision-0.15.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:c55f9889e436f14b4f84a9c00ebad0d31f5b4626f10cf8018e6c676f92a6d199"}, + {file = "torchvision-0.15.2-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:9a192f2aa979438f23c20e883980b23d13268ab9f819498774a6d2eb021802c2"}, + {file = "torchvision-0.15.2-cp38-cp38-win_amd64.whl", hash = "sha256:c07071bc8d02aa8fcdfe139ab6a1ef57d3b64c9e30e84d12d45c9f4d89fb6536"}, + {file = "torchvision-0.15.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4790260fcf478a41c7ecc60a6d5200a88159fdd8d756e9f29f0f8c59c4a67a68"}, + {file = "torchvision-0.15.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:987ab62225b4151a11e53fd06150c5258ced24ac9d7c547e0e4ab6fbca92a5ce"}, + {file = "torchvision-0.15.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:63df26673e66cba3f17e07c327a8cafa3cce98265dbc3da329f1951d45966838"}, + {file = "torchvision-0.15.2-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b85f98d4cc2f72452f6792ab4463a3541bc5678a8cdd3da0e139ba2fe8b56d42"}, + {file = "torchvision-0.15.2-cp39-cp39-win_amd64.whl", hash = "sha256:07c462524cc1bba5190c16a9d47eac1fca024d60595a310f23c00b4ffff18b30"}, +] + +[package.dependencies] +numpy = "*" +pillow = ">=5.3.0,<8.3.dev0 || >=8.4.dev0" +requests = "*" +torch = "2.0.1" + +[package.extras] +scipy = ["scipy"] + [[package]] name = "tornado" version = "6.3.3" @@ -3721,6 +4387,75 @@ files = [ docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"] +[[package]] +name = "transformers" +version = "4.33.1" +description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" +optional = true +python-versions = ">=3.8.0" +files = [ + {file = "transformers-4.33.1-py3-none-any.whl", hash = "sha256:0630c2d26448d7c6cb78435e6c43910c89e99387badea6be1f565ffa3f093f1d"}, + {file = "transformers-4.33.1.tar.gz", hash = "sha256:744265e9f0724d22c229938f28376af54abce730ef647f35bd1685abf49912a4"}, +] + +[package.dependencies] +filelock = "*" +huggingface-hub = ">=0.15.1,<1.0" +numpy = ">=1.17" +packaging = ">=20.0" +pyyaml = ">=5.1" +regex = "!=2019.12.17" +requests = "*" +safetensors = ">=0.3.1" +tokenizers = ">=0.11.1,<0.11.3 || >0.11.3,<0.14" +tqdm = ">=4.27" + +[package.extras] +accelerate = ["accelerate (>=0.20.3)"] +agents = ["Pillow (<10.0.0)", "accelerate (>=0.20.3)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch (>=1.10,!=1.12.0)"] +all = ["Pillow (<10.0.0)", "accelerate (>=0.20.3)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision"] +audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +codecarbon = ["codecarbon (==1.2.0)"] +deepspeed = ["accelerate (>=0.20.3)", "deepspeed (>=0.9.3)"] +deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.20.3)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "timeout-decorator"] +dev = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "accelerate (>=0.20.3)", "av (==9.2.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "urllib3 (<2.0.0)"] +dev-torch = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "accelerate (>=0.20.3)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "timeout-decorator", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +docs = ["Pillow (<10.0.0)", "accelerate (>=0.20.3)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "hf-doc-builder", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision"] +docs-specific = ["hf-doc-builder"] +fairscale = ["fairscale (>0.3)"] +flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)"] +flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +ftfy = ["ftfy"] +integrations = ["optuna", "ray[tune]", "sigopt"] +ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"] +modelcreation = ["cookiecutter (==1.7.3)"] +natten = ["natten (>=0.14.6)"] +onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"] +onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"] +optuna = ["optuna"] +quality = ["GitPython (<3.1.19)", "black (>=23.1,<24.0)", "datasets (!=2.5.0)", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "ruff (>=0.0.241,<=0.0.259)", "urllib3 (<2.0.0)"] +ray = ["ray[tune]"] +retrieval = ["datasets (!=2.5.0)", "faiss-cpu"] +sagemaker = ["sagemaker (>=2.31.0)"] +sentencepiece = ["protobuf", "sentencepiece (>=0.1.91,!=0.1.92)"] +serving = ["fastapi", "pydantic (<2)", "starlette", "uvicorn"] +sigopt = ["sigopt"] +sklearn = ["scikit-learn"] +speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +testing = ["GitPython (<3.1.19)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "parameterized", "protobuf", "psutil", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "timeout-decorator"] +tf = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx"] +tf-cpu = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow-cpu (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx"] +tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] +timm = ["timm"] +tokenizers = ["tokenizers (>=0.11.1,!=0.11.3,<0.14)"] +torch = ["accelerate (>=0.20.3)", "torch (>=1.10,!=1.12.0)"] +torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +torch-vision = ["Pillow (<10.0.0)", "torchvision"] +torchhub = ["filelock", "huggingface-hub (>=0.15.1,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.10,!=1.12.0)", "tqdm (>=4.27)"] +video = ["av (==9.2.0)", "decord (==0.6.0)"] +vision = ["Pillow (<10.0.0)"] + [[package]] name = "typer" version = "0.9.0" @@ -3804,6 +4539,17 @@ files = [ mypy-extensions = ">=0.3.0" typing-extensions = ">=3.7.4" +[[package]] +name = "tzdata" +version = "2023.3" +description = "Provider of IANA time zone data" +optional = true +python-versions = ">=2" +files = [ + {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"}, + {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"}, +] + [[package]] name = "uri-template" version = "1.3.0" @@ -3835,6 +4581,33 @@ secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17. socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "vowpal-wabbit-next" +version = "0.6.0" +description = "Experimental python bindings for VowpalWabbit" +optional = true +python-versions = ">=3.7" +files = [ + {file = "vowpal-wabbit-next-0.6.0.tar.gz", hash = "sha256:f0381614d99fac6a0f52e995ee0bfc7b681054f397bea7ff08b8a523d5315a54"}, + {file = "vowpal_wabbit_next-0.6.0-cp310-cp310-macosx_10_13_universal2.whl", hash = "sha256:cfbb831cfe9eb81185aff7cdca437ae17c6d9aca8d74e26c326e3ef4ee8e81e7"}, + {file = "vowpal_wabbit_next-0.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d31829778f9c600f5c121f614516ca1bc9ede5d1bc77b1eb3b59b32d9138db9"}, + {file = "vowpal_wabbit_next-0.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:714347606ab302a2f72870b6ae6dce58de4bec1b489f4bd65d80a8e326e1db8a"}, + {file = "vowpal_wabbit_next-0.6.0-cp311-cp311-macosx_10_13_universal2.whl", hash = "sha256:3a8482d5c0b9357fdb36b62d659e6b74e93aeab165b910292572a98e91d7a014"}, + {file = "vowpal_wabbit_next-0.6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e4349099b938102f51fb6fedf035bc1deacb2971cd2a48641ca7d45186efda0"}, + {file = "vowpal_wabbit_next-0.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:c8f58cdc49f270b1bed6f0fdd7520c8ba1b328de5cd8a2760c0ec70a630de92e"}, + {file = "vowpal_wabbit_next-0.6.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8b7052ce7212fd1cae8ffd966e240c814f3c1df08fd612437d48f0f23e7694c"}, + {file = "vowpal_wabbit_next-0.6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d24d9c380d0e9b41151337c7f9e2a33ec5bfd738fdee9f65c1a40e486234aca3"}, + {file = "vowpal_wabbit_next-0.6.0-cp38-cp38-macosx_10_13_universal2.whl", hash = "sha256:0d77a8c55249ec9a7f404939ecc6948db0527e522e8a7ae149ec7cd29b3ade04"}, + {file = "vowpal_wabbit_next-0.6.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa2f52f1267fbc26c7757335f9c76a0f00b112971e04c85b8a9bc9e82300597"}, + {file = "vowpal_wabbit_next-0.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d04f91200ecae73196d9f5601853d63afce8c1c8a0d310a608e8ddfa3b190cb"}, + {file = "vowpal_wabbit_next-0.6.0-cp39-cp39-macosx_10_13_universal2.whl", hash = "sha256:2df4a652729c0db34afd8fb4fc49b0090d6f061e2d49899e5f092fd4c3d23253"}, + {file = "vowpal_wabbit_next-0.6.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c289a260ab759f04903b441701cff66ea74d6c061d966caaba0c65ac12d05528"}, + {file = "vowpal_wabbit_next-0.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:8d022cab07274f227df159a81bccf034def7dd54ad70392ee98743ffa4953072"}, +] + +[package.dependencies] +numpy = "*" + [[package]] name = "wasabi" version = "1.1.2" @@ -4016,9 +4789,9 @@ docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.link testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] [extras] -extended-testing = ["faker", "presidio-analyzer", "presidio-anonymizer"] +extended-testing = ["faker", "presidio-analyzer", "presidio-anonymizer", "sentence-transformers", "vowpal-wabbit-next"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "443e88f690572715cf58671e4480a006574c7141a1258dff0a0818b954184901" +content-hash = "0e25f0b8760e893644f6d28e5f2bd6f66a010b3084e82d7b711c90ef34b3b9fa" From a2f29bf5955eabf0daf7f5262f7355db0928e3e5 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 11 Sep 2023 12:45:39 -0400 Subject: [PATCH 56/65] ignore linting --- .../rl_chain/test_pick_best_chain_call.py | 90 +++++++++---------- 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_chain_call.py b/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_chain_call.py index 6bb0437678..add69a9c9e 100644 --- a/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_chain_call.py +++ b/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_chain_call.py @@ -90,11 +90,11 @@ def test_update_with_delayed_score_with_auto_validator_throws() -> None: User=rl_chain.BasedOn("Context"), action=rl_chain.ToSelectFrom(actions), ) - assert response["response"] == "hey" - selection_metadata = response["selection_metadata"] - assert selection_metadata.selected.score == 3.0 + assert response["response"] == "hey" # type: ignore + selection_metadata = response["selection_metadata"] # type: ignore + assert selection_metadata.selected.score == 3.0 # type: ignore with pytest.raises(RuntimeError): - chain.update_with_delayed_score(chain_response=response, score=100) + chain.update_with_delayed_score(chain_response=response, score=100) # type: ignore @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") @@ -115,13 +115,13 @@ def test_update_with_delayed_score_force() -> None: User=rl_chain.BasedOn("Context"), action=rl_chain.ToSelectFrom(actions), ) - assert response["response"] == "hey" - selection_metadata = response["selection_metadata"] - assert selection_metadata.selected.score == 3.0 + assert response["response"] == "hey" # type: ignore + selection_metadata = response["selection_metadata"] # type: ignore + assert selection_metadata.selected.score == 3.0 # type: ignore chain.update_with_delayed_score( - chain_response=response, score=100, force_score=True + chain_response=response, score=100, force_score=True # type: ignore ) - assert selection_metadata.selected.score == 100.0 + assert selection_metadata.selected.score == 100.0 # type: ignore @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") @@ -140,11 +140,11 @@ def test_update_with_delayed_score() -> None: User=rl_chain.BasedOn("Context"), action=rl_chain.ToSelectFrom(actions), ) - assert response["response"] == "hey" - selection_metadata = response["selection_metadata"] - assert selection_metadata.selected.score is None - chain.update_with_delayed_score(chain_response=response, score=100) - assert selection_metadata.selected.score == 100.0 + assert response["response"] == "hey" # type: ignore + selection_metadata = response["selection_metadata"] # type: ignore + assert selection_metadata.selected.score is None # type: ignore + chain.update_with_delayed_score(chain_response=response, score=100) # type: ignore + assert selection_metadata.selected.score == 100.0 # type: ignore @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") @@ -174,9 +174,9 @@ def test_user_defined_scorer() -> None: User=rl_chain.BasedOn("Context"), action=rl_chain.ToSelectFrom(actions), ) - assert response["response"] == "hey" - selection_metadata = response["selection_metadata"] - assert selection_metadata.selected.score == 200.0 + assert response["response"] == "hey" # type: ignore + selection_metadata = response["selection_metadata"] # type: ignore + assert selection_metadata.selected.score == 200.0 # type: ignore @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") @@ -208,8 +208,8 @@ def test_everything_embedded() -> None: User=rl_chain.EmbedAndKeep(rl_chain.BasedOn(ctx_str_1)), action=rl_chain.EmbedAndKeep(rl_chain.ToSelectFrom(actions)), ) - selection_metadata = response["selection_metadata"] - vw_str = feature_embedder.format(selection_metadata) + selection_metadata = response["selection_metadata"] # type: ignore + vw_str = feature_embedder.format(selection_metadata) # type: ignore assert vw_str == expected @@ -236,8 +236,8 @@ def test_default_auto_embedder_is_off() -> None: User=pick_best_chain.base.BasedOn(ctx_str_1), action=pick_best_chain.base.ToSelectFrom(actions), ) - selection_metadata = response["selection_metadata"] - vw_str = feature_embedder.format(selection_metadata) + selection_metadata = response["selection_metadata"] # type: ignore + vw_str = feature_embedder.format(selection_metadata) # type: ignore assert vw_str == expected @@ -264,8 +264,8 @@ def test_default_w_embeddings_off() -> None: User=rl_chain.BasedOn(ctx_str_1), action=rl_chain.ToSelectFrom(actions), ) - selection_metadata = response["selection_metadata"] - vw_str = feature_embedder.format(selection_metadata) + selection_metadata = response["selection_metadata"] # type: ignore + vw_str = feature_embedder.format(selection_metadata) # type: ignore assert vw_str == expected @@ -292,8 +292,8 @@ def test_default_w_embeddings_on() -> None: User=rl_chain.BasedOn(ctx_str_1), action=rl_chain.ToSelectFrom(actions), ) - selection_metadata = response["selection_metadata"] - vw_str = feature_embedder.format(selection_metadata) + selection_metadata = response["selection_metadata"] # type: ignore + vw_str = feature_embedder.format(selection_metadata) # type: ignore assert vw_str == expected @@ -324,8 +324,8 @@ def test_default_embeddings_mixed_w_explicit_user_embeddings() -> None: User2=rl_chain.BasedOn(ctx_str_2), action=rl_chain.ToSelectFrom(actions), ) - selection_metadata = response["selection_metadata"] - vw_str = feature_embedder.format(selection_metadata) + selection_metadata = response["selection_metadata"] # type: ignore + vw_str = feature_embedder.format(selection_metadata) # type: ignore assert vw_str == expected @@ -345,9 +345,9 @@ def test_default_no_scorer_specified() -> None: action=rl_chain.ToSelectFrom(["0", "1", "2"]), ) # chain llm used for both basic prompt and for scoring - assert response["response"] == "hey" - selection_metadata = response["selection_metadata"] - assert selection_metadata.selected.score == 100.0 + assert response["response"] == "hey" # type: ignore + selection_metadata = response["selection_metadata"] # type: ignore + assert selection_metadata.selected.score == 100.0 # type: ignore @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") @@ -366,9 +366,9 @@ def test_explicitly_no_scorer() -> None: action=rl_chain.ToSelectFrom(["0", "1", "2"]), ) # chain llm used for both basic prompt and for scoring - assert response["response"] == "hey" - selection_metadata = response["selection_metadata"] - assert selection_metadata.selected.score is None + assert response["response"] == "hey" # type: ignore + selection_metadata = response["selection_metadata"] # type: ignore + assert selection_metadata.selected.score is None # type: ignore @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") @@ -388,9 +388,9 @@ def test_auto_scorer_with_user_defined_llm() -> None: action=rl_chain.ToSelectFrom(["0", "1", "2"]), ) # chain llm used for both basic prompt and for scoring - assert response["response"] == "hey" - selection_metadata = response["selection_metadata"] - assert selection_metadata.selected.score == 300.0 + assert response["response"] == "hey" # type: ignore + selection_metadata = response["selection_metadata"] # type: ignore + assert selection_metadata.selected.score == 300.0 # type: ignore @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") @@ -434,24 +434,24 @@ def test_activate_and_deactivate_scorer() -> None: action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), ) # chain llm used for both basic prompt and for scoring - assert response["response"] == "hey1" - selection_metadata = response["selection_metadata"] - assert selection_metadata.selected.score == 300.0 + assert response["response"] == "hey1" # type: ignore + selection_metadata = response["selection_metadata"] # type: ignore + assert selection_metadata.selected.score == 300.0 # type: ignore chain.deactivate_selection_scorer() response = chain.run( User=pick_best_chain.base.BasedOn("Context"), action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), ) - assert response["response"] == "hey2" - selection_metadata = response["selection_metadata"] - assert selection_metadata.selected.score is None + assert response["response"] == "hey2" # type: ignore + selection_metadata = response["selection_metadata"] # type: ignore + assert selection_metadata.selected.score is None # type: ignore chain.activate_selection_scorer() response = chain.run( User=pick_best_chain.base.BasedOn("Context"), action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), ) - assert response["response"] == "hey3" - selection_metadata = response["selection_metadata"] - assert selection_metadata.selected.score == 400.0 + assert response["response"] == "hey3" # type: ignore + selection_metadata = response["selection_metadata"] # type: ignore + assert selection_metadata.selected.score == 400.0 # type: ignore From 631289a38d06e3d769f7300686a5ca2b59b85433 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 11 Sep 2023 12:46:24 -0400 Subject: [PATCH 57/65] move unit tests into integration tests --- .../chains}/rl_chain/test_pick_best_chain_call.py | 0 .../chains}/rl_chain/test_pick_best_text_embedder.py | 0 .../chains}/rl_chain/test_rl_chain_base_embedder.py | 0 .../chains}/rl_chain/test_utils.py | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename libs/experimental/tests/{unit_tests => integration_tests/chains}/rl_chain/test_pick_best_chain_call.py (100%) rename libs/experimental/tests/{unit_tests => integration_tests/chains}/rl_chain/test_pick_best_text_embedder.py (100%) rename libs/experimental/tests/{unit_tests => integration_tests/chains}/rl_chain/test_rl_chain_base_embedder.py (100%) rename libs/experimental/tests/{unit_tests => integration_tests/chains}/rl_chain/test_utils.py (100%) diff --git a/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_chain_call.py b/libs/experimental/tests/integration_tests/chains/rl_chain/test_pick_best_chain_call.py similarity index 100% rename from libs/experimental/tests/unit_tests/rl_chain/test_pick_best_chain_call.py rename to libs/experimental/tests/integration_tests/chains/rl_chain/test_pick_best_chain_call.py diff --git a/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_text_embedder.py b/libs/experimental/tests/integration_tests/chains/rl_chain/test_pick_best_text_embedder.py similarity index 100% rename from libs/experimental/tests/unit_tests/rl_chain/test_pick_best_text_embedder.py rename to libs/experimental/tests/integration_tests/chains/rl_chain/test_pick_best_text_embedder.py diff --git a/libs/experimental/tests/unit_tests/rl_chain/test_rl_chain_base_embedder.py b/libs/experimental/tests/integration_tests/chains/rl_chain/test_rl_chain_base_embedder.py similarity index 100% rename from libs/experimental/tests/unit_tests/rl_chain/test_rl_chain_base_embedder.py rename to libs/experimental/tests/integration_tests/chains/rl_chain/test_rl_chain_base_embedder.py diff --git a/libs/experimental/tests/unit_tests/rl_chain/test_utils.py b/libs/experimental/tests/integration_tests/chains/rl_chain/test_utils.py similarity index 100% rename from libs/experimental/tests/unit_tests/rl_chain/test_utils.py rename to libs/experimental/tests/integration_tests/chains/rl_chain/test_utils.py From 248db75cd60596efd932dd83e767e4b55529f778 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 11 Sep 2023 13:01:18 -0400 Subject: [PATCH 58/65] fix linting errors --- .../langchain_experimental/rl_chain/base.py | 13 ++-- .../rl_chain/pick_best_chain.py | 3 +- .../rl_chain/test_pick_best_chain_call.py | 66 ++++++++++--------- 3 files changed, 43 insertions(+), 39 deletions(-) diff --git a/libs/experimental/langchain_experimental/rl_chain/base.py b/libs/experimental/langchain_experimental/rl_chain/base.py index 9b3d7e018a..facf977450 100644 --- a/libs/experimental/langchain_experimental/rl_chain/base.py +++ b/libs/experimental/langchain_experimental/rl_chain/base.py @@ -19,19 +19,20 @@ from typing import ( from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.base import Chain from langchain.chains.llm import LLMChain -from langchain_experimental.rl_chain.metrics import ( - MetricsTrackerAverage, - MetricsTrackerRollingWindow, -) -from langchain_experimental.rl_chain.model_repository import ModelRepository -from langchain_experimental.rl_chain.vw_logger import VwLogger from langchain.prompts import ( BasePromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) + from langchain_experimental.pydantic_v1 import BaseModel, Extra, root_validator +from langchain_experimental.rl_chain.metrics import ( + MetricsTrackerAverage, + MetricsTrackerRollingWindow, +) +from langchain_experimental.rl_chain.model_repository import ModelRepository +from langchain_experimental.rl_chain.vw_logger import VwLogger if TYPE_CHECKING: import vowpal_wabbit_next as vw diff --git a/libs/experimental/langchain_experimental/rl_chain/pick_best_chain.py b/libs/experimental/langchain_experimental/rl_chain/pick_best_chain.py index 090db9d863..c17a5f8bc2 100644 --- a/libs/experimental/langchain_experimental/rl_chain/pick_best_chain.py +++ b/libs/experimental/langchain_experimental/rl_chain/pick_best_chain.py @@ -3,12 +3,13 @@ from __future__ import annotations import logging from typing import Any, Dict, List, Optional, Tuple, Type, Union -import langchain_experimental.rl_chain.base as base from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains.llm import LLMChain from langchain.prompts import BasePromptTemplate +import langchain_experimental.rl_chain.base as base + logger = logging.getLogger(__name__) # sentinel object used to distinguish between diff --git a/libs/experimental/tests/integration_tests/chains/rl_chain/test_pick_best_chain_call.py b/libs/experimental/tests/integration_tests/chains/rl_chain/test_pick_best_chain_call.py index add69a9c9e..765e52e05e 100644 --- a/libs/experimental/tests/integration_tests/chains/rl_chain/test_pick_best_chain_call.py +++ b/libs/experimental/tests/integration_tests/chains/rl_chain/test_pick_best_chain_call.py @@ -1,12 +1,12 @@ from typing import Any, Dict import pytest +from langchain.chat_models import FakeListChatModel +from langchain.prompts.prompt import PromptTemplate from test_utils import MockEncoder, MockEncoderReturnsList import langchain_experimental.rl_chain.base as rl_chain import langchain_experimental.rl_chain.pick_best_chain as pick_best_chain -from langchain.chat_models import FakeListChatModel -from langchain.prompts.prompt import PromptTemplate encoded_keyword = "[encoded]" @@ -90,11 +90,13 @@ def test_update_with_delayed_score_with_auto_validator_throws() -> None: User=rl_chain.BasedOn("Context"), action=rl_chain.ToSelectFrom(actions), ) - assert response["response"] == "hey" # type: ignore - selection_metadata = response["selection_metadata"] # type: ignore - assert selection_metadata.selected.score == 3.0 # type: ignore + assert response["response"] == "hey" # type: ignore + selection_metadata = response["selection_metadata"] # type: ignore + assert selection_metadata.selected.score == 3.0 # type: ignore with pytest.raises(RuntimeError): - chain.update_with_delayed_score(chain_response=response, score=100) # type: ignore + chain.update_with_delayed_score( + chain_response=response, score=100 # type: ignore + ) @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") @@ -209,7 +211,7 @@ def test_everything_embedded() -> None: action=rl_chain.EmbedAndKeep(rl_chain.ToSelectFrom(actions)), ) selection_metadata = response["selection_metadata"] # type: ignore - vw_str = feature_embedder.format(selection_metadata) # type: ignore + vw_str = feature_embedder.format(selection_metadata) # type: ignore assert vw_str == expected @@ -237,7 +239,7 @@ def test_default_auto_embedder_is_off() -> None: action=pick_best_chain.base.ToSelectFrom(actions), ) selection_metadata = response["selection_metadata"] # type: ignore - vw_str = feature_embedder.format(selection_metadata) # type: ignore + vw_str = feature_embedder.format(selection_metadata) # type: ignore assert vw_str == expected @@ -264,8 +266,8 @@ def test_default_w_embeddings_off() -> None: User=rl_chain.BasedOn(ctx_str_1), action=rl_chain.ToSelectFrom(actions), ) - selection_metadata = response["selection_metadata"] # type: ignore - vw_str = feature_embedder.format(selection_metadata) # type: ignore + selection_metadata = response["selection_metadata"] # type: ignore + vw_str = feature_embedder.format(selection_metadata) # type: ignore assert vw_str == expected @@ -292,8 +294,8 @@ def test_default_w_embeddings_on() -> None: User=rl_chain.BasedOn(ctx_str_1), action=rl_chain.ToSelectFrom(actions), ) - selection_metadata = response["selection_metadata"] # type: ignore - vw_str = feature_embedder.format(selection_metadata) # type: ignore + selection_metadata = response["selection_metadata"] # type: ignore + vw_str = feature_embedder.format(selection_metadata) # type: ignore assert vw_str == expected @@ -324,8 +326,8 @@ def test_default_embeddings_mixed_w_explicit_user_embeddings() -> None: User2=rl_chain.BasedOn(ctx_str_2), action=rl_chain.ToSelectFrom(actions), ) - selection_metadata = response["selection_metadata"] # type: ignore - vw_str = feature_embedder.format(selection_metadata) # type: ignore + selection_metadata = response["selection_metadata"] # type: ignore + vw_str = feature_embedder.format(selection_metadata) # type: ignore assert vw_str == expected @@ -345,9 +347,9 @@ def test_default_no_scorer_specified() -> None: action=rl_chain.ToSelectFrom(["0", "1", "2"]), ) # chain llm used for both basic prompt and for scoring - assert response["response"] == "hey" # type: ignore - selection_metadata = response["selection_metadata"] # type: ignore - assert selection_metadata.selected.score == 100.0 # type: ignore + assert response["response"] == "hey" # type: ignore + selection_metadata = response["selection_metadata"] # type: ignore + assert selection_metadata.selected.score == 100.0 # type: ignore @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") @@ -366,9 +368,9 @@ def test_explicitly_no_scorer() -> None: action=rl_chain.ToSelectFrom(["0", "1", "2"]), ) # chain llm used for both basic prompt and for scoring - assert response["response"] == "hey" # type: ignore - selection_metadata = response["selection_metadata"] # type: ignore - assert selection_metadata.selected.score is None # type: ignore + assert response["response"] == "hey" # type: ignore + selection_metadata = response["selection_metadata"] # type: ignore + assert selection_metadata.selected.score is None # type: ignore @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") @@ -388,9 +390,9 @@ def test_auto_scorer_with_user_defined_llm() -> None: action=rl_chain.ToSelectFrom(["0", "1", "2"]), ) # chain llm used for both basic prompt and for scoring - assert response["response"] == "hey" # type: ignore - selection_metadata = response["selection_metadata"] # type: ignore - assert selection_metadata.selected.score == 300.0 # type: ignore + assert response["response"] == "hey" # type: ignore + selection_metadata = response["selection_metadata"] # type: ignore + assert selection_metadata.selected.score == 300.0 # type: ignore @pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") @@ -434,24 +436,24 @@ def test_activate_and_deactivate_scorer() -> None: action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), ) # chain llm used for both basic prompt and for scoring - assert response["response"] == "hey1" # type: ignore - selection_metadata = response["selection_metadata"] # type: ignore - assert selection_metadata.selected.score == 300.0 # type: ignore + assert response["response"] == "hey1" # type: ignore + selection_metadata = response["selection_metadata"] # type: ignore + assert selection_metadata.selected.score == 300.0 # type: ignore chain.deactivate_selection_scorer() response = chain.run( User=pick_best_chain.base.BasedOn("Context"), action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), ) - assert response["response"] == "hey2" # type: ignore - selection_metadata = response["selection_metadata"] # type: ignore - assert selection_metadata.selected.score is None # type: ignore + assert response["response"] == "hey2" # type: ignore + selection_metadata = response["selection_metadata"] # type: ignore + assert selection_metadata.selected.score is None # type: ignore chain.activate_selection_scorer() response = chain.run( User=pick_best_chain.base.BasedOn("Context"), action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), ) - assert response["response"] == "hey3" # type: ignore - selection_metadata = response["selection_metadata"] # type: ignore - assert selection_metadata.selected.score == 400.0 # type: ignore + assert response["response"] == "hey3" # type: ignore + selection_metadata = response["selection_metadata"] # type: ignore + assert selection_metadata.selected.score == 400.0 # type: ignore From 7185fdc9906294e234f172f5b089546b00784059 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 11 Sep 2023 13:26:41 -0400 Subject: [PATCH 59/65] check if libcublas is available before running extended tests --- libs/experimental/tests/unit_tests/__init__.py | 8 ++++++++ .../experimental/tests/unit_tests/test_data_anonymizer.py | 8 ++++++++ .../tests/unit_tests/test_reversible_data_anonymizer.py | 7 +++++++ 3 files changed, 23 insertions(+) diff --git a/libs/experimental/tests/unit_tests/__init__.py b/libs/experimental/tests/unit_tests/__init__.py index e69de29bb2..48e02109f0 100644 --- a/libs/experimental/tests/unit_tests/__init__.py +++ b/libs/experimental/tests/unit_tests/__init__.py @@ -0,0 +1,8 @@ +import ctypes + +def is_libcublas_available(): + try: + ctypes.CDLL("libcublas.so") + return True + except OSError: + return False \ No newline at end of file diff --git a/libs/experimental/tests/unit_tests/test_data_anonymizer.py b/libs/experimental/tests/unit_tests/test_data_anonymizer.py index 138b60eca8..07f1519488 100644 --- a/libs/experimental/tests/unit_tests/test_data_anonymizer.py +++ b/libs/experimental/tests/unit_tests/test_data_anonymizer.py @@ -1,6 +1,7 @@ from typing import Iterator, List import pytest +from . import is_libcublas_available @pytest.fixture(scope="module", autouse=True) @@ -11,6 +12,13 @@ def check_spacy_model() -> Iterator[None]: pytest.skip(reason="Spacy model 'en_core_web_lg' not installed") yield +@pytest.fixture(scope="module", autouse=True) +def check_libcublas() -> Iterator[None]: + if not is_libcublas_available(): + pytest.skip(reason="libcublas.so is not available") + yield + + @pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") @pytest.mark.parametrize( diff --git a/libs/experimental/tests/unit_tests/test_reversible_data_anonymizer.py b/libs/experimental/tests/unit_tests/test_reversible_data_anonymizer.py index 9484a0e9dc..e2cc1912c2 100644 --- a/libs/experimental/tests/unit_tests/test_reversible_data_anonymizer.py +++ b/libs/experimental/tests/unit_tests/test_reversible_data_anonymizer.py @@ -2,6 +2,7 @@ import os from typing import Iterator, List import pytest +from . import is_libcublas_available @pytest.fixture(scope="module", autouse=True) @@ -12,6 +13,12 @@ def check_spacy_model() -> Iterator[None]: pytest.skip(reason="Spacy model 'en_core_web_lg' not installed") yield +@pytest.fixture(scope="module", autouse=True) +def check_libcublas() -> Iterator[None]: + if not is_libcublas_available(): + pytest.skip(reason="libcublas.so is not available") + yield + @pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") @pytest.mark.parametrize( From ccea1e91474b69dc04756c4fa65e71a67d953db4 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 11 Sep 2023 13:31:47 -0400 Subject: [PATCH 60/65] fix linting error --- libs/experimental/tests/unit_tests/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/experimental/tests/unit_tests/__init__.py b/libs/experimental/tests/unit_tests/__init__.py index 48e02109f0..0594a6bd14 100644 --- a/libs/experimental/tests/unit_tests/__init__.py +++ b/libs/experimental/tests/unit_tests/__init__.py @@ -1,6 +1,6 @@ import ctypes -def is_libcublas_available(): +def is_libcublas_available() -> bool: try: ctypes.CDLL("libcublas.so") return True From 42d0d485a990e484c92cd71fc76a7438a32f55a2 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 11 Sep 2023 13:33:43 -0400 Subject: [PATCH 61/65] black formatting --- libs/experimental/tests/unit_tests/__init__.py | 3 ++- libs/experimental/tests/unit_tests/test_data_anonymizer.py | 2 +- .../tests/unit_tests/test_reversible_data_anonymizer.py | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/libs/experimental/tests/unit_tests/__init__.py b/libs/experimental/tests/unit_tests/__init__.py index 0594a6bd14..f177990421 100644 --- a/libs/experimental/tests/unit_tests/__init__.py +++ b/libs/experimental/tests/unit_tests/__init__.py @@ -1,8 +1,9 @@ import ctypes + def is_libcublas_available() -> bool: try: ctypes.CDLL("libcublas.so") return True except OSError: - return False \ No newline at end of file + return False diff --git a/libs/experimental/tests/unit_tests/test_data_anonymizer.py b/libs/experimental/tests/unit_tests/test_data_anonymizer.py index 07f1519488..17ea8ff1a5 100644 --- a/libs/experimental/tests/unit_tests/test_data_anonymizer.py +++ b/libs/experimental/tests/unit_tests/test_data_anonymizer.py @@ -12,6 +12,7 @@ def check_spacy_model() -> Iterator[None]: pytest.skip(reason="Spacy model 'en_core_web_lg' not installed") yield + @pytest.fixture(scope="module", autouse=True) def check_libcublas() -> Iterator[None]: if not is_libcublas_available(): @@ -19,7 +20,6 @@ def check_libcublas() -> Iterator[None]: yield - @pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") @pytest.mark.parametrize( "analyzed_fields,should_contain", diff --git a/libs/experimental/tests/unit_tests/test_reversible_data_anonymizer.py b/libs/experimental/tests/unit_tests/test_reversible_data_anonymizer.py index e2cc1912c2..4302cec992 100644 --- a/libs/experimental/tests/unit_tests/test_reversible_data_anonymizer.py +++ b/libs/experimental/tests/unit_tests/test_reversible_data_anonymizer.py @@ -13,6 +13,7 @@ def check_spacy_model() -> Iterator[None]: pytest.skip(reason="Spacy model 'en_core_web_lg' not installed") yield + @pytest.fixture(scope="module", autouse=True) def check_libcublas() -> Iterator[None]: if not is_libcublas_available(): From 30d02e3a349922e01e0dcee52ab5132dc2063f29 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 11 Sep 2023 13:36:01 -0400 Subject: [PATCH 62/65] fix linting --- libs/experimental/tests/unit_tests/test_data_anonymizer.py | 1 + .../tests/unit_tests/test_reversible_data_anonymizer.py | 1 + 2 files changed, 2 insertions(+) diff --git a/libs/experimental/tests/unit_tests/test_data_anonymizer.py b/libs/experimental/tests/unit_tests/test_data_anonymizer.py index 17ea8ff1a5..6abd42d754 100644 --- a/libs/experimental/tests/unit_tests/test_data_anonymizer.py +++ b/libs/experimental/tests/unit_tests/test_data_anonymizer.py @@ -1,6 +1,7 @@ from typing import Iterator, List import pytest + from . import is_libcublas_available diff --git a/libs/experimental/tests/unit_tests/test_reversible_data_anonymizer.py b/libs/experimental/tests/unit_tests/test_reversible_data_anonymizer.py index 4302cec992..b3634d7c45 100644 --- a/libs/experimental/tests/unit_tests/test_reversible_data_anonymizer.py +++ b/libs/experimental/tests/unit_tests/test_reversible_data_anonymizer.py @@ -2,6 +2,7 @@ import os from typing import Iterator, List import pytest + from . import is_libcublas_available From 32445de3653c47c854a0578f8800065f8e97df86 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Mon, 11 Sep 2023 13:44:24 -0400 Subject: [PATCH 63/65] remove log line --- .../langchain_experimental/rl_chain/pick_best_chain.py | 1 - 1 file changed, 1 deletion(-) diff --git a/libs/experimental/langchain_experimental/rl_chain/pick_best_chain.py b/libs/experimental/langchain_experimental/rl_chain/pick_best_chain.py index c17a5f8bc2..f9075dc565 100644 --- a/libs/experimental/langchain_experimental/rl_chain/pick_best_chain.py +++ b/libs/experimental/langchain_experimental/rl_chain/pick_best_chain.py @@ -308,7 +308,6 @@ class PickBest(base.RLChain[PickBestEvent]): ] kwargs["vw_cmd"] = vw_cmd - logger.info(f"vw_cmd: {vw_cmd}") super().__init__(*args, **kwargs) From 3b07c0cf3d4640e90b00256c57a25bb51fc8e006 Mon Sep 17 00:00:00 2001 From: olgavrou Date: Fri, 6 Oct 2023 04:07:22 +0300 Subject: [PATCH 64/65] RL Chain with VowpalWabbit (#10242) - Description: This PR adds a new chain `rl_chain.PickBest` for learned prompt variable injection, detailed description and usage can be found in the example notebook added. It essentially adds a [VowpalWabbit](https://github.com/VowpalWabbit/vowpal_wabbit) layer before the llm call in order to learn or personalize prompt variable selections. Most of the code is to make the API simple and provide lots of defaults and data wrangling that is needed to use Vowpal Wabbit, so that the user of the chain doesn't have to worry about it. - Dependencies: [vowpal-wabbit-next](https://pypi.org/project/vowpal-wabbit-next/), - sentence-transformers (already a dep) - numpy (already a dep) - tagging @ataymano who contributed to this chain - Tag maintainer: @baskaryan - Twitter handle: @olgavrou Added example notebook and unit tests --- .../chains => unit_tests}/rl_chain/test_pick_best_chain_call.py | 0 .../rl_chain/test_pick_best_text_embedder.py | 0 .../chains => unit_tests}/rl_chain/test_rl_chain_base_embedder.py | 0 .../chains => unit_tests}/rl_chain/test_utils.py | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename libs/experimental/tests/{integration_tests/chains => unit_tests}/rl_chain/test_pick_best_chain_call.py (100%) rename libs/experimental/tests/{integration_tests/chains => unit_tests}/rl_chain/test_pick_best_text_embedder.py (100%) rename libs/experimental/tests/{integration_tests/chains => unit_tests}/rl_chain/test_rl_chain_base_embedder.py (100%) rename libs/experimental/tests/{integration_tests/chains => unit_tests}/rl_chain/test_utils.py (100%) diff --git a/libs/experimental/tests/integration_tests/chains/rl_chain/test_pick_best_chain_call.py b/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_chain_call.py similarity index 100% rename from libs/experimental/tests/integration_tests/chains/rl_chain/test_pick_best_chain_call.py rename to libs/experimental/tests/unit_tests/rl_chain/test_pick_best_chain_call.py diff --git a/libs/experimental/tests/integration_tests/chains/rl_chain/test_pick_best_text_embedder.py b/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_text_embedder.py similarity index 100% rename from libs/experimental/tests/integration_tests/chains/rl_chain/test_pick_best_text_embedder.py rename to libs/experimental/tests/unit_tests/rl_chain/test_pick_best_text_embedder.py diff --git a/libs/experimental/tests/integration_tests/chains/rl_chain/test_rl_chain_base_embedder.py b/libs/experimental/tests/unit_tests/rl_chain/test_rl_chain_base_embedder.py similarity index 100% rename from libs/experimental/tests/integration_tests/chains/rl_chain/test_rl_chain_base_embedder.py rename to libs/experimental/tests/unit_tests/rl_chain/test_rl_chain_base_embedder.py diff --git a/libs/experimental/tests/integration_tests/chains/rl_chain/test_utils.py b/libs/experimental/tests/unit_tests/rl_chain/test_utils.py similarity index 100% rename from libs/experimental/tests/integration_tests/chains/rl_chain/test_utils.py rename to libs/experimental/tests/unit_tests/rl_chain/test_utils.py From a3a2ce623e630bd0d5d6dcc7916a9b7da04e59ee Mon Sep 17 00:00:00 2001 From: Bagatur Date: Thu, 5 Oct 2023 18:18:19 -0700 Subject: [PATCH 65/65] Revise vowpal_wabbit notebook --- .../how_to/learned_prompt_optimization.ipynb | 833 ----------------- .../more/learned_prompt_optimization.ipynb | 834 ++++++++++++++++++ .../rl_chain/model_repository.py | 8 +- 3 files changed, 841 insertions(+), 834 deletions(-) delete mode 100644 docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb create mode 100644 docs/extras/use_cases/more/learned_prompt_optimization.ipynb diff --git a/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb b/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb deleted file mode 100644 index 3e0702b4f1..0000000000 --- a/docs/extras/modules/chains/how_to/learned_prompt_optimization.ipynb +++ /dev/null @@ -1,833 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Learned Prompt Variable Injection via RL Chain\n", - "\n", - "LLM prompts can be enhanced by injecting specific terms into template sentences. Selecting the right terms is crucial for obtaining high-quality responses. This notebook introduces automated prompt engineering through term injection using Reinforcement Learning with VowpalWabbit.\n", - "\n", - "The rl_chain (reinforcement learning chain) provides a way to automatically determine the best terms to inject without the need for fine-tuning the underlying foundational model.\n", - "\n", - "For illustration, consider the scenario of a meal delivery service. We use LangChain to ask customers, like Tom, about their dietary preferences and recommend suitable meals from our extensive menu. The rl_chain selects a meal based on user preferences, injects it into a prompt template, and forwards the prompt to an LLM. The LLM's response, which is a personalized recommendation, is then returned to the user.\n", - "\n", - "The example laid out below is a toy example to demonstrate the applicability of the concept. Advanced options and explanations are provided at the end." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Install necessary packages\n", - "# ! pip install langchain langchain-experimental matplotlib" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "# four meals defined, some vegetarian some not\n", - "\n", - "meals = [\n", - " \"Beef Enchiladas with Feta cheese. Mexican-Greek fusion\",\n", - " \"Chicken Flatbreads with red sauce. Italian-Mexican fusion\",\n", - " \"Veggie sweet potato quesadillas with vegan cheese\",\n", - " \"One-Pan Tortelonni bake with peppers and onions\",\n", - "]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# pick and configure the LLM of your choice\n", - "\n", - "from langchain.llms import OpenAI\n", - "llm = OpenAI(engine=\"text-davinci-003\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "##### Intialize the RL chain with provided defaults\n", - "\n", - "The prompt template which will be used to query the LLM needs to be defined.\n", - "It can be anything, but here `{meal}` is being used and is going to be replaced by one of the meals above, the RL chain will try to pick and inject the best meal\n" - ] - }, - { - "cell_type": "code", - "execution_count": 38, - "metadata": {}, - "outputs": [], - "source": [ - "from langchain.prompts import PromptTemplate\n", - "\n", - "# here I am using the variable meal which will be replaced by one of the meals above\n", - "# and some variables like user, preference, and text_to_personalize which I will provide at chain run time\n", - "\n", - "PROMPT_TEMPLATE = \"\"\"Here is the description of a meal: \"{meal}\".\n", - "\n", - "Embed the meal into the given text: \"{text_to_personalize}\".\n", - "\n", - "Prepend a personalized message including the user's name \"{user}\" \n", - " and their preference \"{preference}\".\n", - "\n", - "Make it sound good.\n", - "\"\"\"\n", - "\n", - "PROMPT = PromptTemplate(\n", - " input_variables=[\"meal\", \"text_to_personalize\", \"user\", \"preference\"], \n", - " template=PROMPT_TEMPLATE\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next the RL chain's PickBest chain is being initialized. We must provide the llm of choice and the defined prompt. As the name indicates, the chain's goal is to Pick the Best of the meals that will be provided, based on some criteria. " - ] - }, - { - "cell_type": "code", - "execution_count": 39, - "metadata": {}, - "outputs": [], - "source": [ - "import langchain_experimental.rl_chain as rl_chain\n", - "\n", - "chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Once the chain is setup I am going to call it with the meals I want to be selected from, and some context based on which the chain will select a meal." - ] - }, - { - "cell_type": "code", - "execution_count": 40, - "metadata": {}, - "outputs": [], - "source": [ - "response = chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Tom\"),\n", - " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs \\\n", - " believe you will love it!\",\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 41, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Hey Tom! We have an amazing special dish for you this week - veggie sweet potato quesadillas with vegan cheese, which we're sure you'll love as a vegetarian who's ok with regular dairy. Enjoy!\n" - ] - } - ], - "source": [ - "print(response[\"response\"])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## What is the chain doing\n", - "\n", - "Here's a step-by-step breakdown of the RL chain's operations:\n", - "\n", - "1. Accept the list of meals.\n", - "2. Consider the user and their dietary preferences.\n", - "3. Based on this context, select an appropriate meal.\n", - "4. Automatically evaluate the appropriateness of the meal choice.\n", - "5. Inject the selected meal into the prompt and submit it to the LLM.\n", - "6. Return the LLM's response to the user.\n", - "\n", - "Technically, the chain achieves this by employing a contextual bandit reinforcement learning model, specifically utilizing the [VowpalWabbit](https://github.com/VowpalWabbit/vowpal_wabbit) ML library.\n", - "\n", - "Initially, since the RL model is untrained, it might opt for random selections that don't necessarily align with a user's preferences. However, as it gains more exposure to the user's choices and feedback, it should start to make better selections (or quickly learn a good one and just pick that!).\n" - ] - }, - { - "cell_type": "code", - "execution_count": 42, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\"Hey Tom, our master chefs have prepared something special for you this week - a Mexican-Greek fusion of Beef Enchiladas with Feta cheese that is sure to tantalize your taste buds. Don't worry, we've got you covered with a vegetarian option and regular dairy is ok - so you can enjoy the delicious flavors without any worries!\"\n", - "\n", - "\"Hey Tom! Our master chefs have created a truly unique dish this week, perfect for you! Beef Enchiladas with Feta cheese - a delicious Mexican-Greek fusion - and made with vegetarian ingredients and regular dairy. We know you'll love it!\"\n", - "\n", - "Hey Tom, we have something special for you this week - our veggie sweet potato quesadillas with vegan cheese! We know you like vegetarian dishes and don't mind regular dairy, so we think you'll love this delicious meal.\n", - "\n", - "Hey Tom, we have the perfect dish for you this week! Our master chefs have crafted delicious veggie sweet potato quesadillas with vegan cheese, perfect for vegetarians and those who are okay with regular dairy. We guarantee that you will love it!\n", - "\n", - "Hey Tom! Our master chefs have crafted a delicious Veggie Sweet Potato Quesadillas with vegan cheese, specially designed with your Vegetarian preference in mind - they're sure you will love it! Enjoy this weeks specialty dish!\n", - "\n" - ] - } - ], - "source": [ - "for _ in range(5):\n", - " try:\n", - " response = chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Tom\"),\n", - " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - " )\n", - " except Exception as e:\n", - " print(e)\n", - " print(response[\"response\"])\n", - " print()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## How is the chain learning\n", - "\n", - "It's important to note that while the RL model can make sophisticated selections, it doesn't inherently recognize concepts like \"vegetarian\" or understand that \"beef enchiladas\" aren't vegetarian-friendly. Instead, it leverages the LLM to ground its choices in common sense.\n", - "\n", - "The way the chain is learning that Tom prefers veggetarian meals is via an AutoSelectionScorer that is built into the chain. The scorer will call the LLM again and ask it to evaluate the selection (`ToSelectFrom`) using the information wrapped in (`BasedOn`).\n", - "\n", - "You can set `langchain.debug=True` if you want to see the details of the auto-scorer, but you can also define the scoring prompt yourself." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "scoring_criteria_template = \"Given {preference} rank how good or bad this selection is {meal}\"\n", - "\n", - "chain = rl_chain.PickBest.from_llm(\n", - " llm=llm,\n", - " prompt=PROMPT,\n", - " selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, scoring_criteria_template_str=scoring_criteria_template),\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If you want to examine the score and other selection metadata you can by examining the metadata object returned by the chain" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\"Hey Tom! We're so excited for you to try out this week's specialty dish. Our master chefs have put together some delicious veggie sweet potato quesadillas with vegan cheese for you, perfect for vegetarians or anyone who's ok with regular dairy. We can't wait for you to enjoy it!\"\n", - "selected index: 2, score: 0.5\n" - ] - } - ], - "source": [ - "response = chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Tom\"),\n", - " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - ")\n", - "print(response[\"response\"])\n", - "selection_metadata = response[\"selection_metadata\"]\n", - "print(f\"selected index: {selection_metadata.selected.index}, score: {selection_metadata.selected.score}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In a more realistic scenario it is likely that you have a well defined scoring function for what was selected. For example, you might be doing few-shot prompting and want to select prompt examples for a natural language to sql translation task. In that case the scorer could be: did the sql that was generated run in an sql engine? In that case you want to plugin a scoring function. In the example below I will just check if the meal picked was vegetarian or not." - ] - }, - { - "cell_type": "code", - "execution_count": 43, - "metadata": {}, - "outputs": [], - "source": [ - "class CustomSelectionScorer(rl_chain.SelectionScorer):\n", - " def score_response(\n", - " self, inputs, llm_response: str, event: rl_chain.PickBestEvent) -> float:\n", - "\n", - " print(event.based_on)\n", - " print(event.to_select_from)\n", - "\n", - " # you can build a complex scoring function here\n", - " # it is prefereable that the score ranges between 0 and 1 but it is not enforced\n", - "\n", - " selected_meal = event.to_select_from[\"meal\"][event.selected.index]\n", - " print(f\"selected meal: {selected_meal}\")\n", - "\n", - " if \"Tom\" in event.based_on[\"user\"]:\n", - " if \"Vegetarian\" in event.based_on[\"preference\"]:\n", - " if \"Chicken\" in selected_meal or \"Beef\" in selected_meal:\n", - " return 0.0\n", - " else:\n", - " return 1.0\n", - " else:\n", - " if \"Chicken\" in selected_meal or \"Beef\" in selected_meal:\n", - " return 1.0\n", - " else:\n", - " return 0.0\n", - " else:\n", - " raise NotImplementedError(\"I don't know how to score this user\")" - ] - }, - { - "cell_type": "code", - "execution_count": 44, - "metadata": {}, - "outputs": [], - "source": [ - "chain = rl_chain.PickBest.from_llm(\n", - " llm=llm,\n", - " prompt=PROMPT,\n", - " selection_scorer=CustomSelectionScorer(),\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'user': ['Tom'], 'preference': ['Vegetarian', 'regular dairy is ok']}\n", - "{'meal': ['Beef Enchiladas with Feta cheese. Mexican-Greek fusion', 'Chicken Flatbreads with red sauce. Italian-Mexican fusion', 'Veggie sweet potato quesadillas with vegan cheese', 'One-Pan Tortelonni bake with peppers and onions']}\n", - "selected meal: Veggie sweet potato quesadillas with vegan cheese\n" - ] - } - ], - "source": [ - "response = chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Tom\"),\n", - " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## How can I track the chains progress\n", - "\n", - "You can track the chains progress by using the metrics mechanism provided. I am going to expand the users to Tom and Anna, and extend the scoring function. I am going to initialize two chains, one with the default learning policy and one with a built-in random policy (i.e. selects a meal randomly), and plot their scoring progress." - ] - }, - { - "cell_type": "code", - "execution_count": 45, - "metadata": {}, - "outputs": [], - "source": [ - "class CustomSelectionScorer(rl_chain.SelectionScorer):\n", - " def score_preference(self, preference, selected_meal):\n", - " if \"Vegetarian\" in preference:\n", - " if \"Chicken\" in selected_meal or \"Beef\" in selected_meal:\n", - " return 0.0\n", - " else:\n", - " return 1.0\n", - " else:\n", - " if \"Chicken\" in selected_meal or \"Beef\" in selected_meal:\n", - " return 1.0\n", - " else:\n", - " return 0.0\n", - " def score_response(\n", - " self, inputs, llm_response: str, event: rl_chain.PickBestEvent) -> float:\n", - "\n", - " selected_meal = event.to_select_from[\"meal\"][event.selected.index]\n", - "\n", - " if \"Tom\" in event.based_on[\"user\"]:\n", - " return self.score_preference(event.based_on[\"preference\"], selected_meal)\n", - " elif \"Anna\" in event.based_on[\"user\"]:\n", - " return self.score_preference(event.based_on[\"preference\"], selected_meal)\n", - " else:\n", - " raise NotImplementedError(\"I don't know how to score this user\")" - ] - }, - { - "cell_type": "code", - "execution_count": 46, - "metadata": {}, - "outputs": [], - "source": [ - "chain = rl_chain.PickBest.from_llm(\n", - " llm=llm,\n", - " prompt=PROMPT,\n", - " selection_scorer=CustomSelectionScorer(),\n", - " metrics_step=5,\n", - " metrics_window_size=5, # rolling window average\n", - ")\n", - "\n", - "random_chain = rl_chain.PickBest.from_llm(\n", - " llm=llm,\n", - " prompt=PROMPT,\n", - " selection_scorer=CustomSelectionScorer(),\n", - " metrics_step=5,\n", - " metrics_window_size=5, # rolling window average\n", - " policy=rl_chain.PickBestRandomPolicy # set the random policy instead of default\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 47, - "metadata": {}, - "outputs": [], - "source": [ - "for _ in range(20):\n", - " try:\n", - " chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Tom\"),\n", - " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - " )\n", - " random_chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Tom\"),\n", - " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - " )\n", - " \n", - " chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Anna\"),\n", - " preference = rl_chain.BasedOn([\"Loves meat\", \"especially beef\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - " )\n", - " random_chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Anna\"),\n", - " preference = rl_chain.BasedOn([\"Loves meat\", \"especially beef\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - " )\n", - " except Exception as e:\n", - " print(e)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The RL chain converges to the fact that Anna prefers beef and Tom is vegetarian. The random chain picks at random, and so will send beef to vegetarians half the time." - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The final average score for the default policy, calculated over a rolling window, is: 1.0\n", - "The final average score for the random policy, calculated over a rolling window, is: 0.4\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAGdCAYAAADAAnMpAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAABmX0lEQVR4nO3dd3RU5drG4d+kh4SEHhIIhN5rgAiIoKIREUWPioB0PBZQEFHBAqhHgaP4oYJgRSw0D4qNIkVAECGELh1Ch4SakARSZvb3x4ZIhEDKJDuZ3NdaWWtmsss9KTPP7P3u57UZhmEgIiIiYhE3qwOIiIhI8aZiRERERCylYkREREQspWJERERELKViRERERCylYkREREQspWJERERELKViRERERCzlYXWA7HA4HBw7doySJUtis9msjiMiIiLZYBgG58+fJyQkBDe3rI9/FIli5NixY4SGhlodQ0RERHLh8OHDVK5cOcvvF4lipGTJkoD5ZAICAixOIyIiItmRkJBAaGhoxvt4VopEMXL51ExAQICKERERkSLmRkMsNIBVRERELKViRERERCylYkREREQsVSTGjGSH3W4nLS3N6hgiLsPd3R0PDw9dTi8i+c4lipHExESOHDmCYRhWRxFxKSVKlCA4OBgvLy+ro4iICyvyxYjdbufIkSOUKFGC8uXL61OciBMYhkFqaionT54kJiaGWrVqXbdhkYhIXhT5YiQtLQ3DMChfvjy+vr5WxxFxGb6+vnh6enLw4EFSU1Px8fGxOpKIuCiX+aijIyIizqejISJSEPRKIyIiIpbKcTGycuVKunTpQkhICDabjXnz5t1wneXLl9O8eXO8vb2pWbMmX3zxRS6iur4OHTowdOjQHK0zb948atasibu7e47XvZ7s/m6vtHz5cmw2G+fOnXNajrzIzXOwyj9/92FhYUycONGyPCIiBSnHxUhSUhJNmjRh8uTJ2Vo+JiaGzp07c+utt7Jp0yaGDh3KwIEDWbRoUY7DytUef/xxHnzwQQ4fPswbb7yRL/s4cOAANpuNTZs25cv288vx48fp1KmT1TFyJSoqin//+99WxxARKRA5HsDaqVOnHL3AT506lWrVqjFhwgQA6tWrx6pVq/i///s/IiMjc7p7uUJiYiJxcXFERkYSEhJidZwCYRgGdrsdD48b/+lWrFixABLlj/Lly1sdQUSkwOT71TRr1qyhY8eOmR6LjIy87imFlJQUUlJSMu4nJCTkVzzLJCUl8eSTT/Ldd99RsmRJhg8fftUyKSkpvPzyy8ycOZNz587RsGFDxo8fT4cOHVi+fDm33norALfddhsAv/32G40aNWLw4MGsXLmSs2fPUqNGDV566SW6d++esd2wsDCGDh2a6XfQtGlTunbtypgxY67KUa1aNQCaNWsGQPv27Vm+fHm2nueqVasYOXIk69evp1y5ctx///2MHTsWPz8/AL766ivee+89du3ahZ+fH7fddhsTJ06kQoUKABnPc/78+bzyyits3bqVX3/9lTFjxtC4cWN8fHz49NNP8fLy4oknnsiU32az8f3339O1a1cOHDhAtWrVmDt3Lh988AFr166lVq1aTJ06ldatW2es88knn/D6669z+vRpIiMjadeuHa+//nqWp54ub3fmzJm8//77bNiwgZo1azJ58mTat2+fsdyKFSt4/vnn2bx5M2XKlKFPnz785z//ybKo+ufv6Ny5c7z44ovMmzeP+Ph4atasybhx47j11lsJDg7m888/58EHH8xYf968efTs2ZMTJ07ccLZMkbw4du4C36w9SHKq3eookkf921YjtEwJS/ad78XIiRMnCAoKyvRYUFAQCQkJXLhw4ZqX444dO5bXXnstV/szDIMLadb8U/h6umf7qp7nn3+eFStW8MMPP1ChQgVeeuklNmzYQNOmTTOWGTx4MNu3b2fWrFmEhITw/fffc9ddd7F161batGnDrl27qFOnDnPnzqVNmzaUKVOGkydPEh4ezosvvkhAQAC//PILvXr1okaNGrRq1SpXz2vdunW0atWKJUuW0KBBg2w3wNq3bx933XUX//nPf/j88885efIkgwcPZvDgwUybNg0wL81+4403qFOnDnFxcQwbNoy+ffsyf/78TNsaMWIE77zzDtWrV6d06dIATJ8+nWHDhrF27VrWrFlD3759adu2LXfccUeWmV5++WXeeecdatWqxcsvv0z37t3Zu3cvHh4erF69mieeeILx48dz7733smTJEl599dVsPdfnn3+eiRMnUr9+fd599126dOlCTEwMZcuW5ejRo9x999307duXL7/8kp07d/LYY4/h4+NzzeLvnxwOB506deL8+fN8/fXX1KhRg+3bt+Pu7o6fnx+PPPII06ZNy1SMXL6vQkTy09mkVLp/8icHTydbHUWcoEuTENctRnJj5MiRDBs2LON+QkICoaGh2Vr3Qpqd+qOsGY+y/fVISnjd+EeamJjIZ599xtdff83tt98OmG+slStXzljm0KFDTJs2jUOHDmWcghk+fDgLFy5k2rRpvPXWWxlHD8qUKZNxSqJSpUqZjrI8/fTTLFq0iDlz5uS6GLl8yqBs2bI5OvUxduxYevbsmfHpvlatWrz//vu0b9+eKVOm4OPjQ//+/TOWr169Ou+//z4tW7YkMTERf3//jO+9/vrrVxUZjRs3ZvTo0RnbnjRpEkuXLr1uMTJ8+HA6d+4MwGuvvUaDBg3Yu3cvdevW5YMPPqBTp04ZP7/atWvzxx9/8PPPP9/wuQ4ePJh//etfAEyZMoWFCxfy2Wef8cILL/Dhhx8SGhrKpEmTsNls1K1bl2PHjvHiiy8yatSoG14+u2TJEtatW8eOHTuoXbt2xs/qsoEDB9KmTRuOHz9OcHAwcXFxzJ8/nyVLltwwt0hupaY7eOLraA6eTqZyaV/ua1o8ThW7sqAA63oJ5XsxUrFiRWJjYzM9FhsbS0BAQJZNyry9vfH29s7vaJbZt28fqampREREZDxWpkwZ6tSpk3F/69at2O32jDefy1JSUihbtmyW27bb7bz11lvMmTOHo0ePkpqaSkpKCiVKFHy1u3nzZrZs2cI333yT8ZhhGDgcDmJiYqhXrx7R0dGMGTOGzZs3c/bsWRwOB2AWY/Xr189Yr0WLFldtv3HjxpnuX34jvp4r1wkODgYgLi6OunXrsmvXLu6///5My7dq1SpbxciVp3o8PDxo0aIFO3bsAGDHjh20bt0601Gztm3bZkxjUKVKletue9OmTVSuXPmqv4UrMzZo0IDp06czYsQIvv76a6pWrcott9xyw9wiuWEYBq/O28bamDP4e3vwed+W1A7SUTjJvXwvRlq3bn3VIffFixdnevF2Jl9Pd7a/bs3AWF9Pd6dtKzExEXd3d6Kjo3F3z7zdK48Y/NPbb7/Ne++9x8SJE2nUqBF+fn4MHTqU1NTUjGXc3NyumscnPyYZTExM5PHHH+eZZ5656ntVqlQhKSmJyMhIIiMj+eabbyhfvjyHDh0iMjIyU14gY4zJlTw9PTPdt9lsGcVMVq5c53JxcKN1rJadzsIDBw5k8uTJjBgxgmnTptGvXz81ApR889mqGGavP4ybDT7o0UyFiORZjouRxMRE9u7dm3E/JiaGTZs2UaZMGapUqcLIkSM5evQoX375JQBPPPEEkyZN4oUXXqB///4sW7aMOXPm8MsvvzjvWVzBZrNl61SJlWrUqIGnpydr167N+FR89uxZdu/enTHosVmzZtjtduLi4mjXrl22t7169Wruu+8+Hn30UcB8o929e3emowzly5fn+PHjGfcTEhKIiYnJcpuXx4jY7Tkbi9O8eXO2b99OzZo1r/n9rVu3cvr0acaNG5dxGm79+vU52ocz1alTh6ioqEyP/fN+Vv7888+MIxHp6elER0czePBgwLyCbO7cuRiGkVEgrF69mpIlS2Y6NZeVxo0bc+TIEXbv3p3l0ZFHH32UF154gffff5/t27fTp0+fbOUWyamlO2J5c7551O+VzvW5tU4FixOJK8hxn5H169fTrFmzjCsrhg0bRrNmzRg1ahRg9nY4dOhQxvLVqlXjl19+YfHixTRp0oQJEybw6aefFuvLev39/RkwYADPP/88y5YtY9u2bfTt2zfT2IHatWvTs2dPevfuzXfffUdMTAzr1q1j7Nix1y3katWqxeLFi/njjz/YsWMHjz/++FWnyW677Ta++uorfv/9d7Zu3UqfPn2uOvpypQoVKuDr68vChQuJjY0lPj4+W8/zxRdf5I8//mDw4MFs2rSJPXv28MMPP2S8SVepUgUvLy8++OAD9u/fz48//phvvVKy4+mnn2b+/Pm8++677Nmzh48++ogFCxZk6wjD5MmT+f7779m5cyeDBg3i7NmzGeNhnnrqKQ4fPszTTz/Nzp07+eGHHxg9ejTDhg3LVrv19u3bc8stt/Cvf/2LxYsXExMTw4IFC1i4cGHGMqVLl+aBBx7g+eef584778xWkSOSUzuOJ/DMzI0YBvSIqEK/tmFWRxIXkeNipEOHDhiGcdXX5a6qX3zxxVWXfXbo0IGNGzeSkpLCvn376Nu3rxOiF21vv/027dq1o0uXLnTs2JGbb76Z8PDwTMtMmzaN3r1789xzz1GnTh26du1KVFTUdccYvPLKKzRv3pzIyEg6dOhAxYoV6dq1a6ZlRo4cSfv27bnnnnvo3LkzXbt2pUaNGllu08PDg/fff5+PPvqIkJAQ7rvvvmw9x8aNG7NixQp2795Nu3btMorWywNyy5cvzxdffMG3335L/fr1GTduHO+88062tp0f2rZty9SpU3n33Xdp0qQJCxcu5Nlnn83WBHHjxo1j3LhxNGnShFWrVvHjjz9Srlw5wBxUPH/+fNatW0eTJk144oknGDBgAK+88kq2s82dO5eWLVvSvXt36tevzwsvvHDVkaoBAwaQmpqaaVCwiLOcPJ/CwOnrSUq106ZGWV67t4FOBYrT2Ix/Dh4ohBISEggMDCQ+Pp6AgIBM37t48SIxMTFUq1ZNs4qK0z322GPs3LmT33///Zrfv9xnZOPGjZkuy7bCV199xbPPPsuxY8eyffn1jej/SwAuptnp8cmfbDh0jmrl/Pj+qTaUKuGcvzFxbdd7/75S4R5cIVLA3nnnHe644w78/PxYsGAB06dP58MPP7Q61nUlJydz/Phxxo0bx+OPP+60QkQEzCtnRszdwoZD5wj09eSzPi1UiIjTadZekSusW7eOO+64g0aNGjF16lTef/99Bg4caHWs6/rvf/9L3bp1qVixIiNHjrQ6jriYyb/tZd6mY3i42ZjSsznVy2d9NZ9Ibuk0jYhkSf9fxduCrcd58psNALx5f0N6RlS1OJEUNdk9TaMjIyIicpWtR+J5ds4mAPq1DVMhIvlKxYiIiGRyIv4iA7+M4mKagw51yvNK5/o3XkkkD1SMiIhIhgupdh77cj2xCSnUquDPB92b4e6mS3glf6kYERERABwOg+e+3cTWo/GU8fPi874tKenjeeMVRfJIxYiIiADwf0t2M3/rCbzc3fioV7hl08lL8aNiREREmLfxKB8sM+cde+uBRrQMK2NxIilOVIwUE3379r2qLXxR0KFDB4YOHZqv+1i+fDk2m41z587l636c4YsvvqBUqVIZ98eMGWN551cp+qIPnuWFuVsAeKJ9DR4M19xGUrBUjEixcq3ipk2bNhw/fpzAwEBrQuXB8OHDWbp0qdUxpAg7cjaZx79aT2q6gzvrB/FCZB2rI0kxpHbwhURqaqraeFvEy8uLihUrWh0jV/z9/fH3V0dMyZ3ElHQGTl/PqcRU6gcH8H/dmuKmK2fEAjoyYpEOHTowePBghg4dSrly5YiMjATg3XffpVGjRvj5+REaGspTTz1FYmJixnqXD9MvWrSIevXq4e/vz1133cXx48czlrHb7QwbNoxSpUpRtmxZXnjhBf7ZaDclJYVnnnmGChUq4OPjw80330xUVFTG9y+fuli0aBHNmjXD19eX2267jbi4OBYsWEC9evUICAigR48eJCcnZ/k8Dx48SJcuXShdujR+fn40aNCA+fPnZ3x/27ZtdOrUCX9/f4KCgujVqxenTp3KcnspKSkMHz6cSpUq4efnR0RExFWzRK9evZoOHTpQokQJSpcuTWRkJGfPnqVv376sWLGC9957D5vNhs1m48CBA9c8TTN37lwaNGiAt7c3YWFhTJgwIdM+wsLCeOutt+jfvz8lS5akSpUqfPzxx1nmhr9/54MHDyYwMJBy5crx6quvZvrdnD17lt69e1O6dGlKlChBp06d2LNnT5bbvNZpms8//zwje3BwMIMHDwagf//+3HPPPZmWTUtLo0KFCnz22WfXzS6ux+4wGDJzIztPnKd8SW8+7dMCP299PhVruF4xYhiQmmTNVw4760+fPh0vLy9Wr17N1KlTAXBzc+P999/nr7/+Yvr06SxbtowXXngh03rJycm88847fPXVV6xcuZJDhw4xfPjwjO9PmDCBL774gs8//5xVq1Zx5swZvv/++0zbeOGFF5g7dy7Tp09nw4YN1KxZk8jISM6cOZNpuTFjxjBp0iT++OMPDh8+zMMPP8zEiROZMWMGv/zyC7/++isffPBBls9x0KBBpKSksHLlSrZu3cr48eMzPsmfO3eO2267jWbNmrF+/XoWLlxIbGwsDz/8cJbbGzx4MGvWrGHWrFls2bKFhx56iLvuuivjDXvTpk3cfvvt1K9fnzVr1rBq1Sq6dOmC3W7nvffeo3Xr1jz22GMcP36c48ePExoaetU+oqOjefjhh3nkkUfYunUrY8aM4dVXX+WLL77ItNyECRNo0aIFGzdu5KmnnuLJJ59k165dWWYH83fu4eHBunXreO+993j33Xf59NNPM77ft29f1q9fz48//siaNWswDIO7776btLS06273silTpjBo0CD+/e9/s3XrVn788Udq1qwJwMCBA1m4cGGmwvXnn38mOTmZbt26ZWv74jrGL9zJ0p1xeHu48UnvFoSU8rU6khRnRhEQHx9vAEZ8fPxV37tw4YKxfft248KFC+YDKYmGMTrAmq+UxGw/p/bt2xvNmjW74XLffvutUbZs2Yz706ZNMwBj7969GY9NnjzZCAoKyrgfHBxs/Pe//824n5aWZlSuXNm47777DMMwjMTERMPT09P45ptvMpZJTU01QkJCMtb77bffDMBYsmRJxjJjx441AGPfvn0Zjz3++ONGZGRklvkbNWpkjBkz5prfe+ONN4w777wz02OHDx82AGPXrl2GYZg/pyFDhhiGYRgHDx403N3djaNHj2Za5/bbbzdGjhxpGIZhdO/e3Wjbtm2Wea7c3mWXn+vZs2cNwzCMHj16GHfccUemZZ5//nmjfv36GferVq1qPProoxn3HQ6HUaFCBWPKlCnX3Xe9evUMh8OR8diLL75o1KtXzzAMw9i9e7cBGKtXr874/qlTpwxfX19jzpw5hmGYv//AwMCM748ePdpo0qRJxv2QkBDj5ZdfzjJD/fr1jfHjx2fc79Kli9G3b98sl7/q/0tcwqx1B42qL/5sVH3xZ+PHTUdvvIJILl3v/ftKrndkpAgJDw+/6rElS5Zw++23U6lSJUqWLEmvXr04ffp0plMhJUqUoEaNGhn3g4ODiYuLAyA+Pp7jx48TERGR8X0PDw9atGiRcX/fvn2kpaXRtm3bjMc8PT1p1aoVO3bsyJSncePGGbeDgoIoUaIE1atXz/TY5X1fyzPPPMN//vMf2rZty+jRo9myZUvG9zZv3sxvv/2WMe7B39+funXrZmT8p61bt2K326ldu3amdVasWJGx/OUjI3mxY8eOTD8bgLZt27Jnzx7sdnvGY1f+bGw2GxUrVrzuzwLgpptuwmb7+5x869atM7a7Y8cOPDw8Mv3uypYtS506da76vVxLXFwcx44du+7zHzhwINOmTQMgNjaWBQsW0L9//xtuW1zHn/tP8/L32wAY2rEWXZqEWJxIxBUHsHqWgJeOWbfvHPDz88t0/8CBA9xzzz08+eSTvPnmm5QpU4ZVq1YxYMAAUlNTKVHC3L6nZ+aOiDab7aoxIc5y5b5sNts19+1wOLJcf+DAgURGRmac0hk7diwTJkzg6aefJjExkS5dujB+/Pir1gsODr7qscTERNzd3YmOjsbd3T3T9y6f+vH1LbhDzTn9WeS37Dz33r17M2LECNasWcMff/xBtWrVaNeuXQGkk8Lg4Okknvg6mnSHwT2Ngxlyey2rI4kArjhmxGYDLz9rvmx5G4UeHR2Nw+FgwoQJ3HTTTdSuXZtjx3JWWAUGBhIcHMzatWszHktPTyc6Ojrjfo0aNTLGqlyWlpZGVFQU9es7f0Ks0NBQnnjiCb777juee+45PvnkEwCaN2/OX3/9RVhYGDVr1sz09c9CDaBZs2bY7Xbi4uKuWv7y1TCNGze+7qWuXl5emY5uXEu9evUy/WzAHBRbu3btq4qgnLry9wLw559/UqtWLdzd3alXrx7p6emZljl9+jS7du3K1u+lZMmShIWFXff5ly1blq5duzJt2jS++OIL+vXrl/snI0VK/IU0+n8RxbnkNJqEluKdh5pkOkonYiXXK0aKsJo1a5KWlsYHH3zA/v37+eqrrzIGtubEkCFDGDduHPPmzWPnzp089dRTma4U8fPz48knn+T5559n4cKFbN++nccee4zk5GQGDBjgxGcEQ4cOZdGiRcTExLBhwwZ+++036tWrB5iDW8+cOUP37t2Jiopi3759LFq0iH79+l2zYKhduzY9e/akd+/efPfdd8TExLBu3TrGjh3LL7/8AsDIkSOJioriqaeeYsuWLezcuZMpU6ZkXKETFhbG2rVrOXDgAKdOnbrmkYznnnuOpUuX8sYbb7B7926mT5/OpEmTMg0Szq1Dhw4xbNgwdu3axcyZM/nggw8YMmQIALVq1eK+++7jscceY9WqVWzevJlHH32USpUqcd9992Vr+2PGjGHChAm8//777Nmzhw0bNlw1wHjgwIFMnz6dHTt20KdPnzw/Jyn80u0OBs/YwL6TSQQH+vBJr3B8PPNWWIs4k4qRQqRJkya8++67jB8/noYNG/LNN98wduzYHG/nueeeo1evXvTp04fWrVtTsmRJ7r///kzLjBs3jn/961/06tWL5s2bs3fvXhYtWkTp0qWd9XQA8zLjQYMGUa9ePe666y5q167Nhx9+CEBISAirV6/Gbrdz55130qhRI4YOHUqpUqVwc7v2n+a0adPo3bs3zz33HHXq1KFr165ERUVRpUoVwCxYfv31VzZv3kyrVq1o3bo1P/zwAx4e5hnJ4cOH4+7uTv369SlfvjyHDh26ah/Nmzdnzpw5zJo1i4YNGzJq1Chef/11+vbtm+efR+/evblw4QKtWrVi0KBBDBkyhH//+9+Znl94eDj33HMPrVu3xjAM5s+ff9Upoaz06dOHiRMn8uGHH9KgQQPuueeeqy4N7tixI8HBwURGRhISovECxcHrP2/n9z2n8PV059M+LagQ4GN1JJFMbEZ+DTZwooSEBAIDA4mPjycgICDT9y5evEhMTAzVqlXDx0f/YFJ4dejQgaZNmzJx4kRLcyQmJlKpUiWmTZvGAw88cN1l9f9V9H255gCjfvgLmw2mPhpOZIOi2eBPiqbrvX9fyfUGsIrINTkcDk6dOsWECRMoVaoU9957r9WRJJ/9vuckr/20HYAXIuuqEJFCS8WISDFx6NAhqlWrRuXKlfniiy8yTl2Ja9obl8hT32zA7jD4V/PKPNG++o1XErGIXo1ECsg/29YXtLCwsHy7BFwKl7NJqQyYHsX5i+m0DCvNWw801JUzUqhpAKuIiAtJTXfwxNfRHDydTGgZX6Y+Go63h66ckcJNxYiIiIswDINX521jbcwZSnp78FmflpT197Y6lsgNuUwxosPPIs6n/6ui5dPfY5i9/jBuNni/RzNqB5W0OpJIthT5YuRyR8zU1FSLk4i4nstzImW3z4lYZ+mOWN5aYM5h9Ern+txap4LFiUSyr8gPYPXw8KBEiRKcPHkST0/PLJtliUj2GYZBcnIycXFxlCpVKs9t8CV/7TiewDMzN2IY0COiCv3ahlkdSSRHinwxYrPZCA4OJiYmhoMHD1odR8SllCpVKmPeHymcTp5PYeD09SSl2mlToyyv3dtAV85IkVPkixEwJz+rVauWTtWIOJGnp6eOiBRyF9PsPP7Veo6eu0C1cn582LM5nu46OixFj0sUIwBubm5qVy0ixYZhGIyYu4UNh84R6OvJZ31aUKqEl9WxRHJFJbSISBE0+be9zNt0DA83G1N6Nqd6eX+rI4nkmooREZEiZv7W47zz624AXruvAW1qlrM4kUjeqBgRESlCth6JZ9icTQD0axtGz4iq1gYScQIVIyIiRcSJ+IsM/DKKi2kOOtQpzyud61sdScQpVIyIiBQBF1LtPPblemITUqhVwZ8PujfD3U2X8IprUDEiIlLIORwGz327ia1H4ynj58XnfVtS0kddccV1qBgRESnk/m/JbuZvPYGXuxsf9QontEwJqyOJOJWKERGRQmzexqN8sGwvAGMfaETLsDIWJxJxPhUjIiKFVPTBs7wwdwsAT3aowb/CK1ucSCR/qBgRESmEjpxN5vGv1pOa7uDO+kE8f2cdqyOJ5BsVIyIihUxiSjoDp6/nVGIq9YMD+L9uTXHTlTPiwlSMiIgUInaHwZCZG9l54jzlS3rzaZ8W+Hm7zDRiItekYkREpBAZv3AnS3fG4e3hxie9WxBSytfqSCL5TsWIiEghMTvqEB+v3A/AOw81oWloKWsDiRQQFSMiIoXAn/tP8/L32wAY2rEWXZqEWJxIpOCoGBERsdiBU0k88XU06Q6DLk1CGHJ7LasjiRQoFSMiIhaKv5DGgOlRnEtOo0loKd5+sDE2m66ckeJFxYiIiEXS7Q4Gz9jAvpNJBAf68EmvcHw83a2OJVLgVIyIiFjk9Z+38/ueU/h6uvNpnxZUCPCxOpKIJVSMiIhY4Ms1B/hyzUFsNpj4SFMahARaHUnEMipGREQK2O97TvLaT9sBeCGyLpENKlqcSMRaKkZERArQ3rhEnvpmA3aHwb+aV+aJ9tWtjiRiORUjIiIF5GxSKgOmR3H+Yjotw0rz1gMNdeWMCCpGREQKRGq6gye+jubg6WRCy/gy9dFwvD105YwI5LIYmTx5MmFhYfj4+BAREcG6deuuu/zEiROpU6cOvr6+hIaG8uyzz3Lx4sVcBRYRKWoMw+DVedtYG3OGkt4efNanJWX9va2OJVJo5LgYmT17NsOGDWP06NFs2LCBJk2aEBkZSVxc3DWXnzFjBiNGjGD06NHs2LGDzz77jNmzZ/PSSy/lObyISFHw6e8xzF5/GDcbvN+jGbWDSlodSaRQsRmGYeRkhYiICFq2bMmkSZMAcDgchIaG8vTTTzNixIirlh88eDA7duxg6dKlGY8999xzrF27llWrVmVrnwkJCQQGBhIfH09AQEBO4ooUacmp6SzbGUdqusPqKJJLcedTGL9wJ4YBo+6pT/+bq1kdSaTAZPf92yMnG01NTSU6OpqRI0dmPObm5kbHjh1Zs2bNNddp06YNX3/9NevWraNVq1bs37+f+fPn06tXryz3k5KSQkpKSqYnI1LcJKWk8+DUNew4rr9/V9Ajogr92oZZHUOkUMpRMXLq1CnsdjtBQUGZHg8KCmLnzp3XXKdHjx6cOnWKm2++GcMwSE9P54knnrjuaZqxY8fy2muv5SSaiEtxOAyGzNrEjuMJlCrhSePKpayOJHnQqFIAQzvW1pUzIlnIUTGSG8uXL+ett97iww8/JCIigr179zJkyBDeeOMNXn311WuuM3LkSIYNG5ZxPyEhgdDQ0PyOKlJojF+0kyU7YvHycOPzvi1pXqW01ZFERPJNjoqRcuXK4e7uTmxsbKbHY2NjqVjx2h0EX331VXr16sXAgQMBaNSoEUlJSfz73//m5Zdfxs3t6jG03t7eeHtrpLkUT9+uP8xHK/YD8PaDjVWIiIjLy9HVNF5eXoSHh2cajOpwOFi6dCmtW7e+5jrJyclXFRzu7ua19TkcOyvi8tbFnOGl77cC8PRtNbmvaSWLE4mI5L8cn6YZNmwYffr0oUWLFrRq1YqJEyeSlJREv379AOjduzeVKlVi7NixAHTp0oV3332XZs2aZZymefXVV+nSpUtGUSIicOh0Mo9/tZ40u8HdjSrybMfaVkcSESkQOS5GunXrxsmTJxk1ahQnTpygadOmLFy4MGNQ66FDhzIdCXnllVew2Wy88sorHD16lPLly9OlSxfefPNN5z0LkSIu4WIa/adHcTY5jUaVApnwUFPc3DTYUUSKhxz3GbGC+oyIK0u3O+g/fT0rd58kKMCbHwbdTMVAH6tjiYjkWXbfvzU3jYjF/vPLDlbuPomPpxuf9m6pQkREih0VIyIW+vrPg3zxxwEAJnZrSqPKgdYGEhGxgIoREYus2nOK0T/+BcDzkXW4q2GwxYlERKyhYkTEAvtOJvLUN9HYHQb3N6vEUx1qWB1JRMQyKkZECti55FQGTl9PwsV0wquWZuwDjdQmXESKNRUjIgUoze7gya83EHMqiUqlfPmoVzg+nuq3IyLFm4oRkQJiGAajftjGmv2n8fNy57O+LSjnr2kPRERUjIgUkM9XH2DmusPYbPB+92bUraieOSIioGJEpED8tjOON3/ZDsDLd9fj9npBFicSESk8VIyI5LNdJ87z9MyNOAx4pGUoA26uZnUkEZFCRcWISD46lZjCgOlRJKakE1GtDK/f11BXzoiI/IOKEZF8kpJu54mvojly9gJhZUsw9dFwvDz0Lyci8k96ZRTJB4ZhMHLuVtYfPEtJHw8+7dOS0n5eVscSESmUVIyI5IMPl+/ju41HcXez8WHP5tSs4G91JBGRQkvFiIiTLdx2nLcX7QJgTJf6tKtV3uJEIiKFm4oRESfadjSeZ2dvBqBP66r0ah1mbSARkSJAxYiIk8QmXGTg9PVcSLPTrlY5Xr2nvtWRRESKBBUjIk5wIdXOY1+u50TCRWqU92NSj+Z4uOvfS0QkO/RqKZJHDofB8P9tZsuReEqX8OTzvi0J9PW0OpaISJGhYkQkjyYu3cMvW47j6W5j6qPhVC3rZ3UkEZEiRcWISB78sOko7y/dA8CbXRsRUb2sxYlERIoeFSMiubTx0Fme/98WAP59S3UebhlqcSIRkaJJxYhILhw9d4HHvowmNd1Bx3pBvHhXXasjiYgUWSpGRHIoKSWdgdPXcyoxhboVSzLxkaa4u2nyOxGR3FIxIpIDDofBkFmb2HE8gXL+XnzapwX+3h5WxxIRKdJUjIjkwPhFO1myIxYvDzc+7t2CyqVLWB1JRKTIUzEikk3frj/MRyv2A/D2g41pXqW0xYlERFyDihGRbFi7/zQvfb8VgKdvq8l9TStZnEhExHWoGBG5gUOnk3ni62jS7AZ3N6rIsx1rWx1JRMSlqBgRuY6Ei2n0nx7F2eQ0GlcOZMJDTXHTlTMiIk6lYkQkC+l2B4NnbGRvXCJBAd580rsFvl7uVscSEXE5KkZEsvCfX3awcvdJfDzd+LR3S4ICfKyOJCLiklSMiFzD138e5Is/DgAwsVtTGlUOtDaQiIgLUzEi8g+r9pxi9I9/AfB8ZB3uahhscSIREdemYkTkCvtOJvLUN9HYHQb3N6vEUx1qWB1JRMTlqRgRueRccioDvogi4WI64VVLM/aBRthsunJGRCS/qRgRAdLsDp78egMHTidTqZQvH/UKx8dTV86IiBQEFSNS7BmGwagftrFm/2n8vNz5rG8Lyvl7Wx1LRKTYUDEixd7nqw8wc91hbDZ4v3sz6lYMsDqSiEixomJEirXfdsbx5i/bAXj57nrcXi/I4kQiIsWPihEptnadOM/TMzfiMOCRlqEMuLma1ZFERIolFSNSLJ1KTKH/F1EkpqQTUa0Mr9/XUFfOiIhYRMWIFDsp6XYe/yqao+cuEFa2BFMfDcfLQ/8KIiJW0SuwFCuGYTBy7laiD56lpI8Hn/ZpSWk/L6tjiYgUaypGpFj5cPk+vtt4FHc3Gx/2bE7NCv5WRxIRKfZUjEixsXDbcd5etAuAMV3q065WeYsTiYgIqBiRYmLb0Xienb0ZgD6tq9KrdZi1gUREJIOKEXF5sQkXGTh9PRfS7LSrVY5X76lvdSQREbmCihFxaRdS7Tz25XpOJFykRnk/JvVojoe7/uxFRAoTvSqLy3I4DIZ/u5ktR+IpXcKTz/u2JNDX0+pYIiLyDypGxGVNXLqHX7Yex9PdxtRHw6la1s/qSCIicg0qRsQl/bDpKO8v3QPAm10bEVG9rMWJREQkKypGxOVsPHSW5/+3BYB/31Kdh1uGWpxIRESuR8WIuJSj5y7w2JfRpKY76FgviBfvqmt1JBERuQEVI+IyklLSGTh9PacSU6hbsSTvPdIUdzdNficiUtipGBGXYHcYDJm1iR3HEyjn78VnfVvi5+1hdSwREcmGXBUjkydPJiwsDB8fHyIiIli3bt11lz937hyDBg0iODgYb29vateuzfz583MVWORa/rtoJ0t2xOLl4cbHvVtQqZSv1ZFERCSbcvzRcfbs2QwbNoypU6cSERHBxIkTiYyMZNeuXVSoUOGq5VNTU7njjjuoUKEC//vf/6hUqRIHDx6kVKlSzsgvwrfrD/PRiv0AvP1gY5pXKW1xIhERyQmbYRhGTlaIiIigZcuWTJo0CQCHw0FoaChPP/00I0aMuGr5qVOn8vbbb7Nz5048PXPXcCohIYHAwEDi4+MJCAjI1TaKq7jYY8Qb/mBzzbET+08lMXjGBtLsBs/cVpNhd9axOpLzXYwHn0CrU0hu2dPAngpe6nNTZF1MAC9/cNPIhpzK7vt3jo6MpKamEh0dzciRIzMec3Nzo2PHjqxZs+aa6/z444+0bt2aQYMG8cMPP1C+fHl69OjBiy++iLu7+zXXSUlJISUlJdOTkZzbvHIeTZb14U97a4akDcJw4SFCdzeqyNCOta2O4VwOB8wdANt/gHvehfC+VieSnEo8CdPuguQz0OcnqNjQ6kSSUzErYcYjUKEu9P4RvP2tTuSSclSMnDp1CrvdTlBQUKbHg4KC2Llz5zXX2b9/P8uWLaNnz57Mnz+fvXv38tRTT5GWlsbo0aOvuc7YsWN57bXXchJNriE1eiYA97qvIdYjhClu3S1OlD9uql6GCQ81xc3VrpxZ9jr89Z15++dhULoaVG9vbSbJvrSLMLsnnN5r3p/5CDy2DPyvPp0thdTpfTC7F6QlwdFo+O7f0O1rHSHJB/l+uYHD4aBChQp8/PHHuLu7Ex4eztGjR3n77bezLEZGjhzJsGHDMu4nJCQQGqrGVTlhOBxUi/8z4/5jxlweu+8uaPywhakk2zbNgFX/Z94OaQbHNsKcXjBwGZSraW02uTHDgJ+egcNrzVNsvmXgbAzM6mkeIfH0sTqh3MiFszDjYbh4DirUNwuTXb/A0tfgDn1YdrYclXflypXD3d2d2NjYTI/HxsZSsWLFa64THBxM7dq1M52SqVevHidOnCA1NfWa63h7exMQEJDpS3Jm/1/rKMc5kg1v0ls9YT74w2A4fP0rn6QQOLgGfnzGvN3uOei3ECq3MseOzHjYPOQvhdvvE2DLbLC5w0PToef/wKcUHFkHPw42ixUpvOxpMKePeVQroDL0mgf3meMkWT0RNn5jZTqXlKNixMvLi/DwcJYuXZrxmMPhYOnSpbRu3fqa67Rt25a9e/ficDgyHtu9ezfBwcF4eXnlMrbcSNzGXwDYXaIZHneNhTqdwZ4Cs3rAuUMWp5MsnYkxD+070qBeF7j1FfNT9CPfQGAonNkH3/YxXyylcNr+Ayx7w7x999tQ41bzaNbDX4KbB2z9Fla+Y21GyZphwIIXIGYFePpBj1lQMsg8qnzL8+YyPw2Bg39Ym9PF5PjE17Bhw/jkk0+YPn06O3bs4MknnyQpKYl+/foB0Lt370wDXJ988knOnDnDkCFD2L17N7/88gtvvfUWgwYNct6zkKsEHFkOQErYreb5zQc+hqBGkHTSHIyVct7agHK1iwnmuILk0xDcBO7/6O9z0/4VoPssc0R/zEqYP1yfrgujY5vgu8fN260eh5YD/v5e9fZmcQLw23/gr3kFnU6yY93HsP5zwAb/+hQqNvr7ex1egvr3mR8WZvU0PzyIU+S4GOnWrRvvvPMOo0aNomnTpmzatImFCxdmDGo9dOgQx48fz1g+NDSURYsWERUVRePGjXnmmWcYMmTINS8DFudITDhLrZS/AKgUfo/5oLc/dJ8JfhUg7i+YOxAcdgtTSib2dPhffzi5E/wrXio8/nEpaMWG5osjNoj+AtZOtSKpZCXhuFlMpl+AGrdD5FtXL9OiP0Q8ad7+/gk4uqFgM8r17VkCCy+9N93xGtS9O/P33dyg61QIbgoXzpi/74vxBR7TFeW4z4gV1GckZzYtnkHT1U9yxFaRyqN3Zf7mkfUw7W7zlE3rwRD5pjUhJbMFI2DtFPDwhX7zoVLzrJf94wP49RWwuUGPOVDrjoLLKdeWmgzTOsHxTVC+Lgz4NeveMA47zOgGexdDyWDzCpuAkAKNK9cQtxM+uwNSEqDpo+YYkaz6MyUcg09ug/PHoWZH6D4b3DX9xLVk9/1b1ye5oJRdiwE4WrbN1d+s3AK6fmjeXjMJNnxZgMnkmtZ/bhYiAPdPuX4hAmYR2awXGA74th/E7cj/jJI1hwPmPWEWIr5lzKNa12tS5+YOD35uFi3nLx1NSU0usLhyDUmnzcHhKQlQtS3c83/XbxQZEGIeafbwhb1L4NeXCy6ri1Ix4oIqn14NgHfdO6+9QKMHof2L5u2fn4UDqwoomVxl/wqYf2lQ3K0vQ4P7b7yOzQad34WqN0PqefNTdtKp/M0pWVs+1hy06uZpDjQuU+3G6/gEQI/ZUKIsHN8M3z9uFjVS8NJTYPajcO4glA6Dh78Cj2xcXBHSDO6/dKp07VSI+ixfY7o6FSMu5sjebVQyYkk13KkV0SnrBduPMN/4HOnmP+LpfQUXUkyn9pq9Qxzp0Oihv0fqZ4eHF3T7ymyEdu6gOZguPeXG64lzbfkWVv7XvN3lPah6jaORWSkdBt2+AXcv2PEj/KZTpgXOMMwPZIf+AO8A83SLX9nsr9+gK9z2inl7/vOwf3l+pCwWVIy4mKPrfwJgj3dD/EqWynpBNzfoOgVCmpvNfWY+AhfOFUhG4YqGSvFQqQXce53z01kpUcYcM+IdCIf/NC83LPxDwFzH4XXww6WrAtsOgWY9c76Nqq3NIgbg93dg82zn5ZMbW/0ebPrGHH/10DSz5XtOtRsOjR4Gww5zesOpPc7PWQyoGHExPgd/A+B85Wy0Dff0Nc97lgyBU7vh277mVR2Sv+xp5ovWmX1mQ6VHZuS+I2f52uaLqM0dNs80GzJJ/jt3yOzZY0+BOnfD7dfuJp0tTXtA26Hm7R/VmLDA7PwFlowxb981zhyImhs2G9z7wRWNCbupMWEuqBhxISkXk6mVvAmA8s06Z2+lkhXNpj6eJWD/b39f1ib5wzDMw7kxKy81VJptNlTKi5q3Q6fx5u0lr8GOn/OeU7KWch5mdjd79gQ1ggc+MQel5sXto6HuPebsvmpMmP+Ob4G5jwEGtBgArf6dt+2pMWGeqRhxIXuiFlPClsIpSlG9QavsrxjcxGyKBhD1Caz7JH8CijnQLXoaYIMHP3PeLK6tHoOWl15cv3vMHBQpzuewm29isdvMnj3dZzpnFlc3N7PJXcXLjQm7qTFhfjkfaxaTaUlQvYNZyOf0FOm1qDFhnqgYcSGJfy0CICbwJmw5nVWyXpe/DzUveBH2Lr3+8pJzexbDopfM23e8DnWuM8A4N+4aBzVug7Rk88X2/Annbl9gyWjYvQDcvc1CpJQTJ/D09jffzPyDIG47/G+AGhM6W9oFmNUdEo5A2Vrw0Bfg7um87f+zMeGfU5y3bRenYsSFBMWZl/Taaufy3OfNz0KT7uZArG/7wcldN15Hsiduh/kzNRxmQ6U2Tzt/H+4e8OA0KFcbEo5e+vR3wfn7Ka42fGU2nAOzV0/lFs7fR2BleGQmePjAnkWweJTz91FcGYY54PhotDlpYY/Z4Fva+fup0wnuvDQ30a8vw+5fnb8PF6RixEWcPHaAao4DOAwbNSO65G4jNps5sj/0JkjRQCynSTpl/ixTz2evoVJe+JYyP137loZjG2DeUzpU7AwHVpmXgILZo6fRg/m3r8rhakyYH1b8F7bNNScr7PY1lK2Rf/u6sjHh//pD7Pb825eLUDHiImLW/gjAXs9alCpXMfcb8vA2B2KVqgJnY8weJOmpTkpZDOW2oVJelK1hvti6ecJf38Hycfm7P1d3Zr/5O3Skmb152hfAIO+G/4IOlyYc/flZiPk9//fpyrZ9B8svzRXU+V2o1i5/9/fPxoQzu0HiyfzdZxGnYsRFuO8zx3icDr4l7xvzK2c2//EqCQdXwy/P6tN1bhgG/DQUDq0xGyr1mJOzhkp5EXYz3POueXvFONj6v4LZr6u5fKnmhbNmT56uU/6eSTm/tX/RLEoc6WZzPDUmzJ2j0TDv0uSErQdDeJ+C2W+mxoSHLn2wU2PCrKgYcQH29HRqJkYBULrRXc7ZaFB9c/4Mmxts/No8XCw5s3oibJ7xd0Ol8nUKdv/Ne5svvmCeKz8SXbD7L+rs6WbvnVO7zV483WeavXkKis0G902GSuFqTJhb8UdhZg9Ivwi1Is2B4wVJjQmzTcWIC9izaQWBJJGAHzWbZaPZWXbVvhPuvNSi+tdXYdcC523b1e342ez5AXDX+Nw3VMqrO16H2neZL8YzH4H4I9bkKIoWjYR9y8wePD1mmT15Cpqnr9kUL6CSGhPmVGqS+TefeAIq1DevcslrP5jcKF8bHv7i78aEq/6v4DMUASpGXMDZLQsB2Osfjoenk8cj3PQkhPcFDJg7EE5sc+72XdHxLWavDwxoORAi8thQKS/c3M0X4QoNICkOZjwCKYnW5Skq1n0C6y713nngY7MXj1VKVjQHJWc0JnzRuixFhcMB3/0bTmyBEuUuzaSc9fT1+a7GbX83Jlz6Guz4yboshZSKERdQ5thKANKr3e78jdtscPc7UO0WSE289Ekjzvn7cRXnT5g/o7Rks6HSXYVg8Kh3SfOTvV95iN1qvkhrhtis7Vtm9toBuH2U2YPHasGNzU6v2CDqUzUmvJFlb8DOn81JCB/5BkpXtTrRFY0JMf8H1ZgwExUjRVz86Vhqppn9QKrm9pLeG3H3hIemQ5kaEH/YnCE27WL+7KsoS7tgtvJOOHqpodJ05zZUyotSVczD/e7esOsX89OZXO3kbpjT1+y106Q73DzM6kR/q3cPdFRjwhvaNBNWXRq8fe8kqHKTtXmupMaEWVIxUsTtXfsz7jaDA25VCKqcj9fNXx6I5RMIR9aZE3ppINbfDMPs6XE02uzx0WO22fOjMAltBfddGoi8eiJs/MbSOIVO8hlzJuWUeLPXTpf38q8fTG61HQpNelxqTNhXjQn/6eAa+OkZ83a756BJN2vz/JMaE2ZJxUgRZ9+9BIAT5dvm/87K1YSHvzQHYm391pzyXEwrxps9Pdw8zF4i+dlQKS8aPwy3PG/e/mmI+eItZi+d2b3M3jqlqpiH9j28rU51NZsNukyEKq0hJcEsntSY0HT2IMzuaU42WK8L3PqK1Ymu7arGhE/qtCkqRoo0w+Eg7NyfAPg1iCyYnVbvAJ0vFSHL/gN/zSuY/RZm2+bC8rHm7YJoqJRXHV6C+veZTbxm94QzMVYnspZhwPzn4OAqs7dO99lmr53CysPbbGpXqiqcPaDGhAAXEy51jD5tDja+/6OC6weTG5kaE35vfpgp5grxb0tu5MCOKCpwhguGF7Va3llwO27RHyKeMG9//wQc3VBw+y5sjkSbp2egYBsq5YWbG3SdCsFNzRfvmY+Yzb2KqzWTzZbrNjezt05QfasT3ZhfOfNUoHeAGhM67DB3AJzcAf6Xrjzy8rM61Y2pMWEmKkaKsNiN8wHY7dsEH98C/ue7802zd0b65UGbxwp2/4VB/BFzBtD0i2Yvj4JuqJQXXiXMJl4lg+HkTnP+jOLYv2LXQvj10uH8O980e+sUFRXqmeMPLjcmvDyJX3Hz6yuw51fw8DX/pgNCrE6UfVc2Jpz3FBxZb20eC6kYKcL8Dy8H4ELVWwt+5+4e5qfI8nXh/HHz03VqcsHnsErK5cucY61tqJQXAZe6inr4wt4lf78pFxexf5mfqDHMXjo3PWl1opyr1REiL50iXDyq+DUmXD8N/rw0qeD9U6BSc2vz5MblxoT2FHNAazFtTKhipIhKToyn9kWzAVlIi3usCeETaB4SLVHWvGb++8eLx0Ash8N8rie2/t1Qybuk1alyJ6QZ3D/VvL12Cqz/3No8BSXxpNkALjURwtqZvXQK25Uz2RXxOIT3o9g1JoxZCfOHm7dvfdmcxLAoutyYMKhhsW5MqGKkiNqzbgFetnSO2SoQWqORdUHKVPt7INaOH+G3N63LUlCWvX5FQ6UZhaOhUl406Aq3XToq8stw2L/cyjT5L+2ieWox/pDZO+fhLwtPP5jcsNng7rehWvvi05jw1F7z6idHOjR66O8rxIoq75LmUcpi3JhQxUgRdXH7rwAcLtMGm9Wjxqu2gXvfN2///g5smWNtnvy0acbfc0vcOwmqRFibx1naDYdGD5v9K+b0Nl/sXZFhmH0ojqwzj+z1mG320Cnq3D3h4elQtualxoQ9XLcx4YWzMLMbXDwHlVqY/4dF9ajWla5qTDjG6kQFSsVIERVy2uwP4VXnDouTXNK0h9mQCeCHwXB4naVx8sXBNfBjIW6olBc2G9z7AVRuZV5Z46r9K36fAFtmm71yHpoO5WpZnch5fEublyX7lIIjUa7ZmNCeBnP6wOm9EFDZfPP29LE6lfNkakz4XrFqTKhipAg6un8HocYx0gx3akbcbXWcv90+Gup0NgdizeoB5w5Znch5zh4we3I40qDevYW3oVJeePqYzb4CQ+HMPvi2j/ni7yq2/2DOWQLmaY0aFgz8zm+XGxO6eZiNCVe6UGNCw4AFL0DMCvD0M49qlQyyOpXzXdWY8A9r8xQQFSNF0JH15oyPe7zrUzKwEB1idnMzZzgNagRJJ80mRCnnrU6VdxcTzEFlGQ2Vphbuhkp54V/BfJH38v97gKArfLo+thG+e9y8HfEEtBxgbZ78VL29OSAX4Lf/mE21XMHajy4NsLbBg59BxYZWJ8o/VzYmnFU8GhO66Cuqa/M68BsA8ZVusTjJNXj7mzPE+gdB3HZzdL/DbnWq3LOnmz04ilpDpbwIagD/+gywQfQXsHaq1YnyJuG4eclk+gWzN86dxWCQdYt+cNOlZnzfP1n0GxPuWQKLRpq373gd6nSyNk9+u7Ix4YUzxaIxoYqRIiY15SK1k8wXlvJNO1ucJguBleGRmeDhA7sXmv0PiqrFr8LexUWzoVJe1LkL7rx0SmPRS7BnsbV5cis12XwhP3/c7Inz4Odmj5zi4M7/QM07in5jwrid8L9+YDig6aPQ5mmrExWMYtaYUMVIEbNn/VL8bBc5TSDVGxaiqbH/qXI43DfZvL1mktluu6jJ1FBpatFsqJQXrQdDs17mm8C3/SB2u9WJcsbhgHlPwPFNZi+c7rPMK2iKCzf3S40J613RmDDJ6lQ5k3Tq0kzKCVC1Ldzzf65x5Ux2XdWY8GWrE+UbFSNFTMK2hQDsD4zAzb2Qd/xs9CC0H2He/vlZiPnd2jw5sX/FFQ2VXjF7cRQ3Nps58V/VmyH1vHk5ZdIpq1Nl3/Kx5qBVN0+zF06ZalYnKng+AeZp04zGhE8Unf4V6SnmJIDnDkLpMHM2bA8vq1MVvEyNCadC1GfW5sknKkaKmApxqwCw1bjd4iTZ1GEENHjAbE40pxec3md1ohs7tdfMmtFQabjViazj4QXdvoLS1cyro2b1NN8kCrst38LK/5q3u7xn9sIprkqHQbdvzCZ9RaUxoWGYH2AOrTEnA+wxB/zKWp3KOlc2Jpz/POz7zdI4+UHFSBFy6sQhatj34zBsVL+pi9Vxssdmg64fQkjzS82KHoEL56xOlbULZ83DwhfjoXJL12molBclyphvBt6BcPhP83LDwnyFzeF18MMg83bbIdCsp7V5CoOqraHLFY0JN8+2Ns+NrH4PNn1jTgL40DQoX8fqRNa7sjHht33g1B6rEzmVipEiJOZP85LefR41KFOhksVpcsDz8uDPSnBqN3zbt3AOxLKnmd1Hz+wze224WkOlvChfGx7+wmwWtnnm311oC5tzh8zBmvYUs+fN7WOsTlR4NO0ONz9r3v5xMBxaa22erOz4GZaMMW/fNd68Akqu0Ziwm0s1JlQxUoTY9i0F4FTFdhYnyYWSFc2CxLME7P8NFo6wOlFmhmEe/oxZafbY6D7L7Lkhf6txG3Qab95e+hrs+MnaPP+Uct7sB5N00ux188DHrtsPJrduGwV17wF7auFsTHh8izkvCwa0HAgR/7Y6UeHyz8aEc3q7TGNC/acWEfb0dGqcN1usBzYqotfYBzeBBz4xb0d9Aus+sTbPldZ+BNHTAJs5g6YrN1TKi1aPQatLbxDf/dscFFkYOOww9zGI+wv8KpiDNr39rU5V+FxuTFixESSfKlyNCc/Hmv1g0pKgegfzqIhc7crGhAd+d5nGhCpGioh9W1ZTmvOcN3yp1byD1XFyr949Ztt4gAUvwt6l1uYBs4dGcWqolFeRY82jJGnJ5pvH+RNWJ4Ilo2H3AnOSse4zzV43cm1efuYcNpcbE/5vgPWNCdMuwKzukHAEytYy5w0qLv1gcuOfjQn/nGJ1ojxTMVJEnN48H4A9/i3w9PK2OE0e3fwsNOl+aSBWPzi5y7oscTvMDIYDmhWjhkp54e4BD06DcrUh4eilT7MXrMuz4Sv44wPzdtcPoXIL67IUFYGVLvWv8IE9i6xtTGgY5oDjo9HmZH89ZoNvKevyFBVXNib89WXY/au1efJIxUgRUerYSgDSwlxgci+bzbzcMvQmSLFwIFbSpcPUqefNhkqdi1lDpbzwLXXpTaM0HNsA856y5lDxgVXmJaBg9rRp9GDBZyiqKoVD10ufqK1sTLjiv7Btrjm538NfQdka1uQoiq5sTPi//kWvMeEVVIwUAfFnT1ErdScAVVrdY3EaJ/HwNgdilaoCZ2PM5kbpqQW3fzVUyrsy1c1mYm6e8Nd3sHxcwe7/zH7zd+hIgwb3Q/sXC3b/rqDhA9Dh0ilKKxoTbvsOlr9l3u78LlQrgoPzrXStxoSJJ61OlSsqRoqAfWt/xsPm4KBbZYKrutD19n7lzP4VXiXh4Gr45dmC+XRtGPDT0EsNlQLVUCkvwm42W3QDrBgHW/9XMPu9cM48qnXhrNnDpusUXTmTW+1fhIb/KvjGhEeiYd6T5u3WgyG8T8Hs19X8szHh7EeLRmPCf9B/bxGQvsucpOx4ubYWJ8kHFeqZTY1sbrDxa/NwcX5bPRE2zzB7ZqihUt417/X3WJt5T8GR9fm7P3u6OXHaqd1m75ruM81eNpI7Nps5j1SlFpea/nXL/8aE8UfNAavpF6H2XebAccm9fzYm/PGZIneFjYqRQs5wOKh6dg0AvvUjLU6TT2rdAZGXDtX++irsWpB/+9rxMyx5zbzdaTzULCJt9Qu7jq9B7U5ms7GZ3SH+SP7ta9FI2LfM7FnTfabZw0byxtPXbPIXUBlO78nfxoSpSWYn5sRYqFDfvJTerZDPs1UUXNmYcMuswtuYMAsqRgq5Q7s2EsRpLhqe1GnlosUIQMQTEN4XMGDuQDixzfn7OL4FvnuMjIZKrR5z/j6KKzd3+NcnENQQkuLM5mMpic7fz7pPYN3H5u0HPjZ714hzlAy6dJTJ71JjwnwYg+NwmP1pTmyBEuXM5oLeJZ2/n+KqsDcmvA4VI4Xc8Q0/A7Dbtwk+JVy4iZPNBne/A9VugdTES5+c4py3/fMnzG2mJUP1W9VQKT94lzTfzPzKQ+xW803HmTPE7ltm9qYBs1dNvSIyP1NREtzYLCqxQdSnzm9MuOwN2PmzOWnfIzOgdFXnbl/MD1ktL33QKkyNCW9AxUghV+LwCgCSQ9tbnKQAuHuazY7K1ID4w+YMsWkX877dtAtm6+uEo2ZvjIe+UEOl/FKqCjwy02w+tusX89OZM5zcDXP6mr1pmlwxx4o4X93O0HGMeduZjQk3zYRV75q375sMVSKcs1252l3j/m5MOOORwtGY8AZUjBRiF5LOU+fCFgCCw13kkt4buTwQyycQjqwzJ/TKy0CsfzZU6j5LDZXyW2hL880GzMHCG7/J2/aSz5gzKafEm71purynfjD5re0QaNrzUmPCvnlvTHhwDfz0jHm73XBo/HCeI8p1XNmY8Pwx6xsTZoOKkUJsz7pFeNvSOEE5qtRuanWcglOuJjz8pTkQa+u3sPKd3G9rxXg1VLJC44fglufN2z8NgYN/5G476akwu5fZi6ZUFbM3jUcR70BcFNhs5iXbVdpASoJZDCadzt22zh6A2T3Nyfnq3Qu3vuzUqJIF31KXPnxdbkz4pHNPmzqZipFCLHnHIgAOlWmNrbj1UKjeATpfKkJ++w/8NS/n29g2F5aPNW/f839qqFTQOrwE9e8zm5LN6glnYnK2vmHAL8Pg4CqzF02POWZvGikYHt5mU7tSVc2CYk6vnDcmvJhgniZIPm0ONr5/qvrBFKSyNa5oTPi92QuokNJfRSEWcmo1AB6177A4iUVa9IeIS02Rvn8Cjm7I/rpHos2eF2A2VGre2/n55Prc3KDrVAhuChfOmAOIL8Znf/01k2HjV2YPmoemmT1ppGD5lb3UvyIg540JHXaYOwBO7gD/iuandC+//M0rV8vUmHB8wTUmzCEVI4XUsQO7qOI4SrrhRo2bisl4kWu58z9QsyOkXx6EeuzG68QfUUOlwsLrci+QYDi505w/Izv9K3YthF9fMW/f+abZi0asUaGuOf7gcmPCy5MS3sivr8CeX8HD1/wbCAjJ35yStYJuTJgLKkYKqcNR5vXhe7zqEViqGLcqd/eABz+H8nXh/HHz03VqctbLpyRe0VCpgRoqFQYBIZdmiPWFvUv+LjKyEvuX+Ykaw+w9c9OTBZFSrqdWR4i8dMpz8agbNyZcPw3+/NC8ff9UqNQ8f/PJjXV8zfxwdrkx4bnDVifKRMVIIeV14DcAzoVonAM+geYMsSXKmtfMf//4tQdiORzm905sNXtd9FBDpUIjpJnZpAxg7RRY//m1l0s8aY4xSE00e87c/Y6unCksIh43T53eqDFhzEqYP9y8fesr0KBrQSWU63FzNz+cXW5MOLN7/jQmzCUVI4VQWmoKtRKjASjXtLPFaQqJ0mHQ7RtzINaOH+G3N69eZtnrmRsqlapS4DHlOurfC7e9at7+ZTjsX575+2kXzVNx8YfMXjMPTTd7z0jhYLNBp/9CtfZZNyY8tde8+smRDo0egluGW5NVri2/GxPmQa6KkcmTJxMWFoaPjw8RERGsW7cuW+vNmjULm81G165dc7PbYmPPht/wt13gLAHUaOyCk+PlVtXWcO/75u3f34HNs//+3qYZf8/FcN9kCG1V8Pnkxto9B427mf0r5vSGU3vMxw0Dfnza7C3jc2km5RJlrM0qV3P3hIenQ9malxoT9vi7MeGFs+YU9hfPQeWWcO8kHdUqjEpVMT+sZTQmHGN1IiAXxcjs2bMZNmwYo0ePZsOGDTRp0oTIyEji4q7fuvvAgQMMHz6cdu102uFG4rea52P3BbTCzV3jHTJp2gPaDjVv/zgYDq8zGyr9qIZKRYLNBl3eh9AI88qaGd3Mpma/T4Ctc8zeMg9/afaakcLJt/SlxoSl4EiU+X9oT4M5feD0XggMNd/sPH2sTipZCW0F912aIX31e3lvTOgEOS5G3n33XR577DH69etH/fr1mTp1KiVKlODzz7M4BwzY7XZ69uzJa6+9RvXq1fMUuDgof+J3AIzqt1mcpJC6fTTUvcdsojSrh9lQyZFm9rRQQ6XCz9PHPOUWWAXO7INpncw5S8DsLVO9g6XxJBvK1oBuX5nNBLd+Cx/dAjErwMvfvITXv4LVCeVGGj/snMaETpKjYiQ1NZXo6Gg6duz49wbc3OjYsSNr1qzJcr3XX3+dChUqMGDAgGztJyUlhYSEhExfxcXp2CPUtO8DoNpNmgjsmtzc4P6PoGIjSDp5qaFSU7OnhRoqFQ3+lwYYe/mbl/yC2VOmRX9rc0n2VbsFOk8wb8dtB2zmAMmKDS2NJTmQ18aETpSjV+5Tp05ht9sJCgrK9HhQUBAnTlx7Ip5Vq1bx2Wef8ckn2Z/9cezYsQQGBmZ8hYaG5iRmkRaz1rykd697DcpV1ADMLHlf+gRWuhqUqW4OyvIqYXUqyYmgBuZl217+ZpvwO/9jdSLJqfC+cPMwc9D4XWOhTierE0lOXNmYMO3C32O4LJCvU5eeP3+eXr168cknn1CuXPbbOI8cOZJhw4Zl3E9ISCg+BcmlGTJPBrVFZ81vILAyDI4CbJqFt6iqHQnP79P4gqKs42ho/6J+h0WVVwnzg93545b2g8nRK3i5cuVwd3cnNjY20+OxsbFUrFjxquX37dvHgQMH6NLl79MNjkuXEXl4eLBr1y5q1Lh64jJvb2+8vYvfZFgOu53qCWsBCGioTxjZoks/iz69iRV9+h0WbQHB5peFcnSaxsvLi/DwcJYuXZrxmMPhYOnSpbRu3fqq5evWrcvWrVvZtGlTxte9997LrbfeyqZNm4rP0Y5s2r9tDWVIIMnwoVa4Bq+KiEjxkONj28OGDaNPnz60aNGCVq1aMXHiRJKSkujXrx8AvXv3plKlSowdOxYfHx8aNsw8mKlUqVIAVz0ucGrjfGoCu/2a08xbnzRERKR4yHEx0q1bN06ePMmoUaM4ceIETZs2ZeHChRmDWg8dOoSbrmjIlYCjKwBIDbvV4iQiIiIFx2YY2Z0P2joJCQkEBgYSHx9PQECA1XHyRcK50/j+Xy08bXaO9v6TStU1XbqIiBRt2X3/1iGMQmLv2vl42uwctoWoEBERkWJFxUghkbZ7MQDHyrWxOImIiEjBUjFSCBgOB1VOm614fepFWpxGRESkYKkYKQQO791CMCdJNTyo1UrFiIiIFC8qRgqBY+t/BmCXTyNK+AdanEZERKRgqRgpBEocWg5AUmh7a4OIiIhYQMWIxS4mJ1LrwmYAgprfY3EaERGRgqdixGJ7on7F15ZKHGUIqxtudRwREZECp2LEYknbfwXgQKmbsKlzrYiIFEN697NYxZOrAHCvfafFSURERKyhYsRCJw7vJcxxGLtho+ZNGi8iIiLFk4oRCx1a9xMAezzrElimvMVpRERErKFixEIe+5cBcDakncVJRERErKNixCLpaanUTFoPQJnGnSxOIyIiYh0VIxbZu2E5ASRzDn9qNr3F6jgiIiKWUTFikXNbFwKwr2RL3D08LE4jIiJiHRUjFilz4ncA7NVvtziJiIiItVSMWODsyePUTNsDQLWILhanERERsZaKEQvsW/sTbjaD/W5hlA8JszqOiIiIpVSMWMDYswSAuAptLU4iIiJiPRUjBcxht1Mtfi0A/g3vsjiNiIiI9VSMFLCYv9ZSjnMkG97UatHR6jgiIiKWUzFSwOI2LQBgd4lmePuUsDiNiIiI9VSMFLCAI8sBSAm71dogIiIihYSKkQKUmHCW2il/AVApXLP0ioiIgIqRArV37QI8bXaO2CpSuWZDq+OIiIgUCipGClDKzkUAHC3bxuIkIiIihYeKkQJiOByEnlkDgHfdOy1OIyIiUnioGCkgR/b/RYgRS6rhTq2ITlbHERERKTRUjBSQY+t/BmCPd0P8SpayNoyIiEghomKkgPgc/A2A85XbW5xERESkcFExUgBSLiZTK3kTAOWbdbY2jIiISCGjYqQA7IlaTAlbCqcoRfUGrayOIyIiUqioGCkAiX+Zl/TGBN6EzU0/chERkSvpnbEABMWtBsBWWxPjiYiI/JOKkXwWdzSGao4DOAwbNSO6WB1HRESk0FExks8OrPsJgL2etShVrqLFaURERAofFSP5zH3fUgBOB99icRIREZHCScVIPrKnp1MzMQqA0o3usjiNiIhI4aRiJB/t2bSCQJJIwI+azdTsTERE5FpUjOSjs5sXALDXPxwPTy+L04iIiBROKkbyUZnjvwOQXu12i5OIiIgUXipG8kn86Vhqpu0CoKou6RUREcmSipF8snftz7jbDA64VSGocg2r44iIiBRaKkbyiX33EgBOlG9rcRIREZHCTcVIPjAcDsLO/QmAX4NIi9OIiIgUbipG8sGBHVFU4AwXDC9qtbzT6jgiIiKFmoqRfBC74RcAdvs2wcfXz+I0IiIihZuKkXzgf2QFABeq3mpxEhERkcJPxYiTJSfGU/viNgAqtbjH4jQiIiKFn4oRJ9uzbgFetnSO2SpQuUYjq+OIiIgUeipGnOzi9l8BOFymDTY3/XhFRERuRO+WTlbp9B8AeNW5w+IkIiIiRYOKESc6uv8vKhvHSTPcqRlxt9VxREREigQVI050JOpnAPZ416dkYBmL04iIiBQNKkacyOvgcgDiK91ibRAREZEiJFfFyOTJkwkLC8PHx4eIiAjWrVuX5bKffPIJ7dq1o3Tp0pQuXZqOHTted/miKjXlIrWTNgBQvmlni9OIiIgUHTkuRmbPns2wYcMYPXo0GzZsoEmTJkRGRhIXF3fN5ZcvX0737t357bffWLNmDaGhodx5550cPXo0z+ELkz3rl+Jnu8hpAqne8Car44iIiBQZNsMwjJysEBERQcuWLZk0aRIADoeD0NBQnn76aUaMGHHD9e12O6VLl2bSpEn07t07W/tMSEggMDCQ+Ph4AgICchK3wKz56GlaH/+SqMA7afnst1bHERERsVx2379zdGQkNTWV6OhoOnbs+PcG3Nzo2LEja9asydY2kpOTSUtLo0yZrAd4pqSkkJCQkOmrsKsQ9zsAthq3W5xERESkaMlRMXLq1CnsdjtBQUGZHg8KCuLEiRPZ2saLL75ISEhIpoLmn8aOHUtgYGDGV2hoaE5iFrhTxw5Swx6Dw7BR/aYuVscREREpUgr0appx48Yxa9Ysvv/+e3x8fLJcbuTIkcTHx2d8HT58uABT5lzMup8A2OdRgzIVKlmcRkREpGjxyMnC5cqVw93dndjY2EyPx8bGUrFixeuu+8477zBu3DiWLFlC48aNr7ust7c33t7eOYlmKdu+ZQCcqtiOWhZnERERKWpydGTEy8uL8PBwli5dmvGYw+Fg6dKltG7dOsv1/vvf//LGG2+wcOFCWrRokfu0hZA9PZ0a581LlQMbdbI4jYiISNGToyMjAMOGDaNPnz60aNGCVq1aMXHiRJKSkujXrx8AvXv3plKlSowdOxaA8ePHM2rUKGbMmEFYWFjG2BJ/f3/8/f2d+FSssW/LKmpznvOGL7Wad7A6joiISJGT42KkW7dunDx5klGjRnHixAmaNm3KwoULMwa1Hjp0CLcrZqudMmUKqampPPjgg5m2M3r0aMaMGZO39IXA6c0LANjj34LmXkXn1JKIiEhhkeM+I1YozH1Gdr7Zmrpp21nbYBQRDz1ndRwREZFCI1/6jEhm8WdOUit1BwBVWt1jcRoREZGiScVIHuxb+zPuNoODbpUJrlrH6jgiIiJFkoqRPEjfvQSA4+XaWpxERESk6FIxkkuGw0HVs2YLfN/6kRanERERKbpUjOTSwV0bCOI0Fw1P6rRSMSIiIpJbKkZy6cSGXwDY7dsEnxJFv1+KiIiIVVSM5JLfoeUAJIe2tzaIiIhIEadiJBcuJJ2n9sWtAASH65JeERGRvFAxkgu71y3E25bGCcpRpXZTq+OIiIgUaSpGcuHCjl8BOFSmNTY3/QhFRETyQu+kuRByajUAHrU7WpxERESk6FMxkkPHDuyiiuMo6YYbNSI0XkRERCSvVIzk0OGonwDY41WPwNLlLE4jIiJS9KkYySGvmGUAnAtpZ3ESERER16BiJAfSUlOolbQBgHJNO1ucRkRExDWoGMmBPdHL8Ldd4CwB1GisyfFEREScQcVIDsRvWwjAvoBWuLm7W5xGRETENagYyYHyJ34HwKh+m8VJREREXIeKkWw6deIwNe37AKh2UxeL04iIiLgOFSPZdGDdzwDsda9BuYpVLE4jIiLiOlSMZNfeJQCcDNLAVREREWdSMZINDrud6gnrAAho2MniNCIiIq5FxUg27Nv6B2VIIMnwoVa4Bq+KiIg4k4qRbDi9aQEAu/2a4+XtY3EaERER16JiJBsCjq4AIDXsVouTiIiIuB4VIzeQcO40tVO3A1C5hS7pFRERcTYVIzewd+18PGwODttCqFS9ntVxREREXI6KkRtI2/UrAMfKtbE4iYiIiGtSMXIdhsNBlTNrAPCpF2lxGhEREdekYuQ6Du3ZQjAnSTU8qNVKxYiIiEh+UDFyHcejzRbwu3waUcI/0OI0IiIirknFyHWUOLQcgKTQ9tYGERERcWEqRrJwMTmR2hc2ARDU/B5rw4iIiLgwFSNZ2BP1Kz62NOIoQ1jdcKvjiIiIuCwVI1lI2r4IgAOlbsLmph+TiIhIftG7bBYqnlwNgHvtOy1OIiIi4tpUjFzDicN7CXMcxm7YqHmTxouIiIjkJxUj13Bo3U8A7PGsS2CZ8hanERERcW0qRq7BY/8yAM6GtLM4iYiIiOtTMfIP6Wmp1ExaD0CZxp0sTiMiIuL6VIz8w94NywkgmXP4U7PpLVbHERERcXkqRv7h7NYFAOwr2RJ3Dw+L04iIiLg+FSP/UPbEKgDs1W+3OImIiEjxoGLkCmdPHqdm2h4AqkV0sTiNiIhI8aBi5Ar71v6Em81gv1sY5UPCrI4jIiJSLKgYuYKxZwkAcRXaWpxERESk+FAxconDbqda/FoA/BveZXEaERGR4kPFyCUxf62lHOdINryp1aKj1XFERESKDRUjl8Rt/AWA3SWa4e1TwuI0IiIixYeKkUsCjq4EICXsVouTiIiIFC8qRoDEhLPUTvkLgErhmqVXRESkIKkYAfb8+QueNjtHbBWpXLOh1XFERESKFRUjQOquxQAcLdvG4iQiIiLFT7EvRgyHg9AzawDwrnunxWlERESKn2JfjBzZ/xchRiyphju1IjpZHUdERKTYKfbFyNGonwDY490Qv5KlrA0jIiJSDOWqGJk8eTJhYWH4+PgQERHBunXrrrv8t99+S926dfHx8aFRo0bMnz8/V2Hzg++h5QCcr9ze2iAiIiLFVI6LkdmzZzNs2DBGjx7Nhg0baNKkCZGRkcTFxV1z+T/++IPu3bszYMAANm7cSNeuXenatSvbtm3Lc/i8unghiVrJmwAo36yztWFERESKKZthGEZOVoiIiKBly5ZMmjQJAIfDQWhoKE8//TQjRoy4avlu3bqRlJTEzz//nPHYTTfdRNOmTZk6dWq29pmQkEBgYCDx8fEEBATkJO51bV35A42W9eYUpSg7KgabW7E/ayUiIuI02X3/ztG7b2pqKtHR0XTs+PfcLW5ubnTs2JE1a9Zcc501a9ZkWh4gMjIyy+UBUlJSSEhIyPSVH5K2LwIgJvAmFSIiIiIWydE78KlTp7Db7QQFBWV6PCgoiBMnTlxznRMnTuRoeYCxY8cSGBiY8RUaGpqTmNkWFLcKAFttTYwnIiJilUJ5OGDkyJHEx8dnfB0+fNjp+zAcDs6EP0NU4F3UjOji9O2LiIhI9njkZOFy5crh7u5ObGxspsdjY2OpWLHiNdepWLFijpYH8Pb2xtvbOyfRcszm5kZ454HQeWC+7kdERESuL0dHRry8vAgPD2fp0qUZjzkcDpYuXUrr1q2vuU7r1q0zLQ+wePHiLJcXERGR4iVHR0YAhg0bRp8+fWjRogWtWrVi4sSJJCUl0a9fPwB69+5NpUqVGDt2LABDhgyhffv2TJgwgc6dOzNr1izWr1/Pxx9/7NxnIiIiIkVSjouRbt26cfLkSUaNGsWJEydo2rQpCxcuzBikeujQIdyuuDKlTZs2zJgxg1deeYWXXnqJWrVqMW/ePBo21Oy4IiIikos+I1bIrz4jIiIikn/ypc+IiIiIiLOpGBERERFLqRgRERERS6kYEREREUupGBERERFLqRgRERERS6kYEREREUupGBERERFLqRgRERERS+W4HbwVLjeJTUhIsDiJiIiIZNfl9+0bNXsvEsXI+fPnAQgNDbU4iYiIiOTU+fPnCQwMzPL7RWJuGofDwbFjxyhZsiQ2m81p201ISCA0NJTDhw+77Jw3rv4c9fyKPld/jnp+RZ+rP8f8fH6GYXD+/HlCQkIyTaL7T0XiyIibmxuVK1fOt+0HBAS45B/YlVz9Oer5FX2u/hz1/Io+V3+O+fX8rndE5DINYBURERFLqRgRERERSxXrYsTb25vRo0fj7e1tdZR84+rPUc+v6HP156jnV/S5+nMsDM+vSAxgFREREddVrI+MiIiIiPVUjIiIiIilVIyIiIiIpVSMiIiIiKWKdTEyefJkwsLC8PHxISIignXr1lkdyWlWrlxJly5dCAkJwWazMW/ePKsjOdXYsWNp2bIlJUuWpEKFCnTt2pVdu3ZZHctppkyZQuPGjTOaELVu3ZoFCxZYHSvfjBs3DpvNxtChQ62O4jRjxozBZrNl+qpbt67VsZzq6NGjPProo5QtWxZfX18aNWrE+vXrrY7lNGFhYVf9Dm02G4MGDbI6mlPY7XZeffVVqlWrhq+vLzVq1OCNN9644Twy+aHYFiOzZ89m2LBhjB49mg0bNtCkSRMiIyOJi4uzOppTJCUl0aRJEyZPnmx1lHyxYsUKBg0axJ9//snixYtJS0vjzjvvJCkpyepoTlG5cmXGjRtHdHQ069ev57bbbuO+++7jr7/+sjqa00VFRfHRRx/RuHFjq6M4XYMGDTh+/HjG16pVq6yO5DRnz56lbdu2eHp6smDBArZv386ECRMoXbq01dGcJioqKtPvb/HixQA89NBDFidzjvHjxzNlyhQmTZrEjh07GD9+PP/973/54IMPCj6MUUy1atXKGDRoUMZ9u91uhISEGGPHjrUwVf4AjO+//97qGPkqLi7OAIwVK1ZYHSXflC5d2vj000+tjuFU58+fN2rVqmUsXrzYaN++vTFkyBCrIznN6NGjjSZNmlgdI9+8+OKLxs0332x1jAI1ZMgQo0aNGobD4bA6ilN07tzZ6N+/f6bHHnjgAaNnz54FnqVYHhlJTU0lOjqajh07Zjzm5uZGx44dWbNmjYXJJLfi4+MBKFOmjMVJnM9utzNr1iySkpJo3bq11XGcatCgQXTu3DnT/6Ir2bNnDyEhIVSvXp2ePXty6NAhqyM5zY8//kiLFi146KGHqFChAs2aNeOTTz6xOla+SU1N5euvv6Z///5OnbDVSm3atGHp0qXs3r0bgM2bN7Nq1So6depU4FmKxER5znbq1CnsdjtBQUGZHg8KCmLnzp0WpZLccjgcDB06lLZt29KwYUOr4zjN1q1bad26NRcvXsTf35/vv/+e+vXrWx3LaWbNmsWGDRuIioqyOkq+iIiI4IsvvqBOnTocP36c1157jXbt2rFt2zZKlixpdbw8279/P1OmTGHYsGG89NJLREVF8cwzz+Dl5UWfPn2sjud08+bN49y5c/Tt29fqKE4zYsQIEhISqFu3Lu7u7tjtdt5880169uxZ4FmKZTEirmXQoEFs27bNpc7HA9SpU4dNmzYRHx/P//73P/r06cOKFStcoiA5fPgwQ4YMYfHixfj4+FgdJ19c+emycePGREREULVqVebMmcOAAQMsTOYcDoeDFi1a8NZbbwHQrFkztm3bxtSpU12yGPnss8/o1KkTISEhVkdxmjlz5vDNN98wY8YMGjRowKZNmxg6dCghISEF/jsslsVIuXLlcHd3JzY2NtPjsbGxVKxY0aJUkhuDBw/m559/ZuXKlVSuXNnqOE7l5eVFzZo1AQgPDycqKor33nuPjz76yOJkeRcdHU1cXBzNmzfPeMxut7Ny5UomTZpESkoK7u7uFiZ0vlKlSlG7dm327t1rdRSnCA4OvqowrlevHnPnzrUoUf45ePAgS5Ys4bvvvrM6ilM9//zzjBgxgkceeQSARo0acfDgQcaOHVvgxUixHDPi5eVFeHg4S5cuzXjM4XCwdOlSlzsn76oMw2Dw4MF8//33LFu2jGrVqlkdKd85HA5SUlKsjuEUt99+O1u3bmXTpk0ZXy1atKBnz55s2rTJ5QoRgMTERPbt20dwcLDVUZyibdu2V11Ov3v3bqpWrWpRovwzbdo0KlSoQOfOna2O4lTJycm4uWUuA9zd3XE4HAWepVgeGQEYNmwYffr0oUWLFrRq1YqJEyeSlJREv379rI7mFImJiZk+gcXExLBp0ybKlClDlSpVLEzmHIMGDWLGjBn88MMPlCxZkhMnTgAQGBiIr6+vxenybuTIkXTq1IkqVapw/vx5ZsyYwfLly1m0aJHV0ZyiZMmSV43v8fPzo2zZsi4z7mf48OF06dKFqlWrcuzYMUaPHo27uzvdu3e3OppTPPvss7Rp04a33nqLhx9+mHXr1vHxxx/z8ccfWx3NqRwOB9OmTaNPnz54eLjWW2aXLl148803qVKlCg0aNGDjxo28++679O/fv+DDFPj1O4XIBx98YFSpUsXw8vIyWrVqZfz5559WR3Ka3377zQCu+urTp4/V0ZziWs8NMKZNm2Z1NKfo37+/UbVqVcPLy8soX768cfvttxu//vqr1bHylatd2tutWzcjODjY8PLyMipVqmR069bN2Lt3r9WxnOqnn34yGjZsaHh7ext169Y1Pv74Y6sjOd2iRYsMwNi1a5fVUZwuISHBGDJkiFGlShXDx8fHqF69uvHyyy8bKSkpBZ7FZhgWtFoTERERuaRYjhkRERGRwkPFiIiIiFhKxYiIiIhYSsWIiIiIWErFiIiIiFhKxYiIiIhYSsWIiIiIWErFiIiIiFhKxYiIiIhYSsWIiIiIWErFiIiIiFhKxYiIiIhY6v8BJbu4xxr8bjEAAAAASUVORK5CYII=", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "from matplotlib import pyplot as plt\n", - "chain.metrics.to_pandas()['score'].plot(label=\"default learning policy\")\n", - "random_chain.metrics.to_pandas()['score'].plot(label=\"random selection policy\")\n", - "plt.legend()\n", - "\n", - "print(f\"The final average score for the default policy, calculated over a rolling window, is: {chain.metrics.to_pandas()['score'].iloc[-1]}\")\n", - "print(f\"The final average score for the random policy, calculated over a rolling window, is: {random_chain.metrics.to_pandas()['score'].iloc[-1]}\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "There is a bit of randomness involved in the rl_chain's selection since the chain explores the selection space in order to learn the world as best as it can (see details of default exploration algorithm used [here](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Contextual-Bandit-Exploration-with-SquareCB)), but overall, default chain policy should be doing better than random as it learns" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Advanced options\n", - "\n", - "The RL chain is highly configurable in order to be able to adjust to various selection scenarios. If you want to learn more about the ML library that powers it please take a look at tutorials [here](https://vowpalwabbit.org/)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "| Section | Description | Example / Usage |\n", - "|---------|-------------|-----------------|\n", - "| [**Change Chain Logging Level**](#change-chain-logging-level) | Change the logging level for the RL chain. | `logger.setLevel(logging.INFO)` |\n", - "| [**Featurization**](#featurization) | Adjusts the input to the RL chain. Can set auto-embeddings ON for more complex embeddings. | `chain = rl_chain.PickBest.from_llm(auto_embed=True, [...])` |\n", - "| [**Learned Policy to Learn Asynchronously**](#learned-policy-to-learn-asynchronously) | Score asynchronously if user input is needed for scoring. | `chain.update_with_delayed_score(score=, chain_response=response)` |\n", - "| [**Store Progress of Learned Policy**](#store-progress-of-learned-policy) | Option to store the progress of the variable injection learned policy. | `chain.save_progress()` |\n", - "| [**Stop Learning of Learned Policy**](#stop-learning-of-learned-policy) | Toggle the RL chain's learned policy updates ON/OFF. | `chain.deactivate_selection_scorer()` |\n", - "| [**Set a Different Policy**](#set-a-different-policy) | Choose between different policies: default, random, or custom. | Custom policy creation at chain creation time. |\n", - "| [**Different Exploration Algorithms and Options for Default Learned Policy**](#different-exploration-algorithms-and-options-for-the-default-learned-policy) | Set different exploration algorithms and hyperparameters for `VwPolicy`. | `vw_cmd = [\"--cb_explore_adf\", \"--quiet\", \"--squarecb\", \"--interactions=::\"]` |\n", - "| [**Learn Policy's Data Logs**](#learned-policys-data-logs) | Store and examine `VwPolicy`'s data logs. | `chain = rl_chain.PickBest.from_llm(vw_logs=, [...])` |\n", - "| [**Other Advanced Featurization Options**](#other-advanced-featurization-options) | Specify advanced featurization options for the RL chain. | `age = rl_chain.BasedOn(\"age:32\")` |\n", - "| [**More Info on Auto or Custom SelectionScorer**](#more-info-on-auto-or-custom-selectionscorer) | Dive deeper into how selection scoring is determined. | `selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, scoring_criteria_template_str=scoring_criteria_template)` |\n", - "\n", - "### change chain logging level\n", - "\n", - "```\n", - "import logging\n", - "logger = logging.getLogger(\"rl_chain\")\n", - "logger.setLevel(logging.INFO)\n", - "```\n", - "\n", - "### featurization\n", - "\n", - "#### auto_embed\n", - "\n", - "By default the input to the rl chain (`ToSelectFrom`, `BasedOn`) is not tampered with. This might not be sufficient featurization, so based on how complex the scenario is you can set auto-embeddings to ON\n", - "\n", - "`chain = rl_chain.PickBest.from_llm(auto_embed=True, [...])`\n", - "\n", - "This will produce more complex embeddings and featurizations of the inputs, likely accelerating RL chain learning, albeit at the cost of increased runtime.\n", - "\n", - "By default, [sbert.net's sentence_transformers's ](https://www.sbert.net/docs/pretrained_models.html#model-overview) `all-mpnet-base-v2` model will be used for these embeddings but you can set a different embeddings model by initializing the chain with it as shown in this example. You could also set an entirely different embeddings encoding object, as long as it has an `encode()` function that returns a list of the encodings.\n", - "\n", - "```\n", - "from sentence_transformers import SentenceTransformer\n", - "\n", - "chain = rl_chain.PickBest.from_llm(\n", - " [...]\n", - " feature_embedder=rl_chain.PickBestFeatureEmbedder(\n", - " auto_embed=True,\n", - " model=SentenceTransformer(\"all-mpnet-base-v2\")\n", - " )\n", - ")\n", - "```\n", - "\n", - "#### explicitly defined embeddings\n", - "\n", - "Another option is to define what inputs you think should be embedded manually:\n", - "- `auto_embed = False`\n", - "- Can wrap individual variables in `rl_chain.Embed()` or `rl_chain.EmbedAndKeep()` e.g. `user = rl_chain.BasedOn(rl_chain.Embed(\"Tom\"))`\n", - "\n", - "#### custom featurization\n", - "\n", - "Another final option is to define and set a custom featurization/embedder class that returns a valid input for the learned policy.\n", - "\n", - "## learned policy to learn asynchronously\n", - "\n", - "If to score the result you need input from the user (e.g. my application showed Tom the selected meal and Tom clicked on it, but Anna did not), then the scoring can be done asynchronously. The way to do that is:\n", - "\n", - "- set `selection_scorer=None` on the chain creation OR call `chain.deactivate_selection_scorer()`\n", - "- call the chain for a specific input\n", - "- keep the chain's response (`response = chain.run([...])`)\n", - "- once you have determined the score of the response/chain selection call the chain with it: `chain.update_with_delayed_score(score=, chain_response=response)`\n", - "\n", - "### store progress of learned policy\n", - "\n", - "Since the variable injection learned policy evolves over time, there is the option to store its progress and continue learning. This can be done by calling:\n", - "\n", - "`chain.save_progress()`\n", - "\n", - "which will store the rl chain's learned policy in a file called `latest.vw`. It will also store it in a file with a timestamp. That way, if `save_progress()` is called more than once, multiple checkpoints will be created, but the latest one will always be in `latest.vw`\n", - "\n", - "Next time the chain is loaded, the chain will look for a file called `latest.vw` and if the file exists it will be loaded into the chain and the learning will continue from there.\n", - "\n", - "By default the rl chain model checkpoints will be stored in the current directory but you can specify the save/load location at chain creation time:\n", - "\n", - "`chain = rl_chain.PickBest.from_llm(model_save_dir=, [...])`\n", - "\n", - "### stop learning of learned policy\n", - "\n", - "If you want the rl chain's learned policy to stop updating you can turn it off/on:\n", - "\n", - "`chain.deactivate_selection_scorer()` and `chain.activate_selection_scorer()`\n", - "\n", - "### set a different policy\n", - "\n", - "There are two policies currently available:\n", - "\n", - "- default policy: `VwPolicy` which learns a [Vowpal Wabbit](https://github.com/VowpalWabbit/vowpal_wabbit) [Contextual Bandit](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Contextual-Bandit-algorithms) model\n", - "\n", - "- random policy: `RandomPolicy` which doesn't learn anything and just selects a value randomly. this policy can be used to compare other policies with a random baseline one.\n", - "\n", - "- custom policies: a custom policy could be created and set at chain creation time\n", - "\n", - "### different exploration algorithms and options for the default learned policy\n", - "\n", - "The default `VwPolicy` is initialized with some default arguments. The default exploration algorithm is [SquareCB](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Contextual-Bandit-Exploration-with-SquareCB) but other Contextual Bandit exploration algorithms can be set, and other hyper parameters can be tuned (see [here](https://vowpalwabbit.org/docs/vowpal_wabbit/python/9.6.0/command_line_args.html) for available options).\n", - "\n", - "`vw_cmd = [\"--cb_explore_adf\", \"--quiet\", \"--squarecb\", \"--interactions=::\"]`\n", - "\n", - "`chain = rl_chain.PickBest.from_llm(vw_cmd = vw_cmd, [...])`\n", - "\n", - "### learned policy's data logs\n", - "\n", - "The `VwPolicy`'s data files can be stored and examined or used to do [off policy evaluation](https://vowpalwabbit.org/docs/vowpal_wabbit/python/latest/tutorials/off_policy_evaluation.html) for hyper parameter tuning.\n", - "\n", - "The way to do this is to set a log file path to `vw_logs` on chain creation:\n", - "\n", - "`chain = rl_chain.PickBest.from_llm(vw_logs=, [...])`\n", - "\n", - "### other advanced featurization options\n", - "\n", - "Explictly numerical features can be provided with a colon separator:\n", - "`age = rl_chain.BasedOn(\"age:32\")`\n", - "\n", - "`ToSelectFrom` can be a bit more complex if the scenario demands it, instead of being a list of strings it can be:\n", - "- a list of list of strings:\n", - " ```\n", - " meal = rl_chain.ToSelectFrom([\n", - " [\"meal 1 name\", \"meal 1 description\"],\n", - " [\"meal 2 name\", \"meal 2 description\"]\n", - " ])\n", - " ```\n", - "- a list of dictionaries:\n", - " ```\n", - " meal = rl_chain.ToSelectFrom([\n", - " {\"name\":\"meal 1 name\", \"description\" : \"meal 1 description\"},\n", - " {\"name\":\"meal 2 name\", \"description\" : \"meal 2 description\"}\n", - " ])\n", - " ```\n", - "- a list of dictionaries containing lists:\n", - " ```\n", - " meal = rl_chain.ToSelectFrom([\n", - " {\"name\":[\"meal 1\", \"complex name\"], \"description\" : \"meal 1 description\"},\n", - " {\"name\":[\"meal 2\", \"complex name\"], \"description\" : \"meal 2 description\"}\n", - " ])\n", - " ```\n", - "\n", - "`BasedOn` can also take a list of strings:\n", - "```\n", - "user = rl_chain.BasedOn([\"Tom Joe\", \"age:32\", \"state of california\"])\n", - "```\n", - "\n", - "there is no dictionary provided since multiple variables can be supplied wrapped in `BasedOn`\n", - "\n", - "Storing the data logs into a file allows the examination of what different inputs do to the data format.\n", - "\n", - "### More info on Auto or Custom SelectionScorer\n", - "\n", - "It is very important to get the selection scorer right since the policy uses it to learn. It determines what is called the reward in reinforcement learning, and more specifically in our Contextual Bandits setting.\n", - "\n", - "The general advice is to keep the score between [0, 1], 0 being the worst selection, 1 being the best selection from the available `ToSelectFrom` variables, based on the `BasedOn` variables, but should be adjusted if the need arises.\n", - "\n", - "In the examples provided above, the AutoSelectionScorer is set mostly to get users started but in real world scenarios it will most likely not be an adequate scorer function.\n", - "\n", - "The example also provided the option to change part of the scoring prompt template that the AutoSelectionScorer used to determine whether a selection was good or not:\n", - "\n", - "```\n", - "scoring_criteria_template = \"Given {preference} rank how good or bad this selection is {meal}\"\n", - "chain = rl_chain.PickBest.from_llm(\n", - " llm=llm,\n", - " prompt=PROMPT,\n", - " selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, scoring_criteria_template_str=scoring_criteria_template),\n", - ")\n", - "\n", - "```\n", - "\n", - "Internally the AutoSelectionScorer adjusted the scoring prompt to make sure that the llm scoring retured a single float.\n", - "\n", - "However, if needed, a FULL scoring prompt can also be provided:\n" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:PickBest] Entering Chain run with input:\n", - "\u001b[0m[inputs]\n", - "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:PickBest > 2:chain:LLMChain] Entering Chain run with input:\n", - "\u001b[0m[inputs]\n", - "\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[1:chain:PickBest > 2:chain:LLMChain > 3:llm:OpenAI] Entering LLM run with input:\n", - "\u001b[0m{\n", - " \"prompts\": [\n", - " \"Here is the description of a meal: \\\"Beef Enchiladas with Feta cheese. Mexican-Greek fusion\\\".\\n\\nEmbed the meal into the given text: \\\"This is the weeks specialty dish, our master chefs believe you will love it!\\\".\\n\\nPrepend a personalized message including the user's name Tom and their preference ['Vegetarian', 'regular dairy is ok'].\\n\\nMake it sound good.\"\n", - " ]\n", - "}\n", - "\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[1:chain:PickBest > 2:chain:LLMChain > 3:llm:OpenAI] [1.63s] Exiting LLM run with output:\n", - "\u001b[0m{\n", - " \"generations\": [\n", - " [\n", - " {\n", - " \"text\": \"\\nHey Tom, we have a special treat this week! Our master chefs have created a Mexican-Greek fusion dish of Beef Enchiladas with Feta cheese - perfect for those who enjoy vegetarian options and can enjoy regular dairy. We know you're going to love it!\",\n", - " \"generation_info\": {\n", - " \"finish_reason\": \"stop\",\n", - " \"logprobs\": null\n", - " }\n", - " }\n", - " ]\n", - " ],\n", - " \"llm_output\": {\n", - " \"token_usage\": {\n", - " \"prompt_tokens\": 89,\n", - " \"total_tokens\": 145,\n", - " \"completion_tokens\": 56\n", - " },\n", - " \"model_name\": \"text-davinci-003\"\n", - " },\n", - " \"run\": null\n", - "}\n", - "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:PickBest > 2:chain:LLMChain] [1.63s] Exiting Chain run with output:\n", - "\u001b[0m{\n", - " \"text\": \"\\nHey Tom, we have a special treat this week! Our master chefs have created a Mexican-Greek fusion dish of Beef Enchiladas with Feta cheese - perfect for those who enjoy vegetarian options and can enjoy regular dairy. We know you're going to love it!\"\n", - "}\n", - "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:LLMChain] Entering Chain run with input:\n", - "\u001b[0m[inputs]\n", - "\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[1:chain:LLMChain > 2:llm:OpenAI] Entering LLM run with input:\n", - "\u001b[0m{\n", - " \"prompts\": [\n", - " \"Given ['Vegetarian', 'regular dairy is ok'] rank how good or bad this selection is ['Beef Enchiladas with Feta cheese. Mexican-Greek fusion', 'Chicken Flatbreads with red sauce. Italian-Mexican fusion', 'Veggie sweet potato quesadillas with vegan cheese', 'One-Pan Tortelonni bake with peppers and onions'], IMPORANT: you MUST return a single number between -1 and 1, -1 being bad, 1 being good\"\n", - " ]\n", - "}\n", - "\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[1:chain:LLMChain > 2:llm:OpenAI] [487ms] Exiting LLM run with output:\n", - "\u001b[0m{\n", - " \"generations\": [\n", - " [\n", - " {\n", - " \"text\": \"\\n\\n0.5\",\n", - " \"generation_info\": {\n", - " \"finish_reason\": \"stop\",\n", - " \"logprobs\": null\n", - " }\n", - " }\n", - " ]\n", - " ],\n", - " \"llm_output\": {\n", - " \"token_usage\": {\n", - " \"prompt_tokens\": 104,\n", - " \"total_tokens\": 109,\n", - " \"completion_tokens\": 5\n", - " },\n", - " \"model_name\": \"text-davinci-003\"\n", - " },\n", - " \"run\": null\n", - "}\n", - "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:LLMChain] [488ms] Exiting Chain run with output:\n", - "\u001b[0m{\n", - " \"text\": \"\\n\\n0.5\"\n", - "}\n", - "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:PickBest] [2.13s] Exiting Chain run with output:\n", - "\u001b[0m[outputs]\n" - ] - }, - { - "data": { - "text/plain": [ - "{'response': \"Hey Tom, we have a special treat this week! Our master chefs have created a Mexican-Greek fusion dish of Beef Enchiladas with Feta cheese - perfect for those who enjoy vegetarian options and can enjoy regular dairy. We know you're going to love it!\",\n", - " 'selection_metadata': }" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from langchain.prompts.prompt import PromptTemplate\n", - "import langchain\n", - "langchain.debug = True\n", - "\n", - "REWARD_PROMPT_TEMPLATE = \"\"\"\n", - "\n", - "Given {preference} rank how good or bad this selection is {meal}\n", - "\n", - "IMPORANT: you MUST return a single number between -1 and 1, -1 being bad, 1 being good\n", - "\n", - "\"\"\"\n", - "\n", - "\n", - "REWARD_PROMPT = PromptTemplate(\n", - " input_variables=[\"preference\", \"meal\"],\n", - " template=REWARD_PROMPT_TEMPLATE,\n", - ")\n", - "\n", - "chain = rl_chain.PickBest.from_llm(\n", - " llm=llm,\n", - " prompt=PROMPT,\n", - " selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, prompt=REWARD_PROMPT),\n", - ")\n", - "\n", - "chain.run(\n", - " meal = rl_chain.ToSelectFrom(meals),\n", - " user = rl_chain.BasedOn(\"Tom\"),\n", - " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", - " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", - ")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.17" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/extras/use_cases/more/learned_prompt_optimization.ipynb b/docs/extras/use_cases/more/learned_prompt_optimization.ipynb new file mode 100644 index 0000000000..adebd66925 --- /dev/null +++ b/docs/extras/use_cases/more/learned_prompt_optimization.ipynb @@ -0,0 +1,834 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Learned Prompt Variable Injection via RL\n", + "\n", + "LLM prompts can be enhanced by injecting specific terms into template sentences. Selecting the right terms is crucial for obtaining high-quality responses. This notebook introduces automated prompt engineering through term injection using Reinforcement Learning with VowpalWabbit.\n", + "\n", + "The rl_chain (reinforcement learning chain) provides a way to automatically determine the best terms to inject without the need for fine-tuning the underlying foundational model.\n", + "\n", + "For illustration, consider the scenario of a meal delivery service. We use LangChain to ask customers, like Tom, about their dietary preferences and recommend suitable meals from our extensive menu. The rl_chain selects a meal based on user preferences, injects it into a prompt template, and forwards the prompt to an LLM. The LLM's response, which is a personalized recommendation, is then returned to the user.\n", + "\n", + "The example laid out below is a toy example to demonstrate the applicability of the concept. Advanced options and explanations are provided at the end." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Install necessary packages\n", + "# ! pip install langchain langchain-experimental matplotlib vowpal_wabbit_next sentence-transformers pandas" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# four meals defined, some vegetarian some not\n", + "\n", + "meals = [\n", + " \"Beef Enchiladas with Feta cheese. Mexican-Greek fusion\",\n", + " \"Chicken Flatbreads with red sauce. Italian-Mexican fusion\",\n", + " \"Veggie sweet potato quesadillas with vegan cheese\",\n", + " \"One-Pan Tortelonni bake with peppers and onions\",\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "# pick and configure the LLM of your choice\n", + "\n", + "from langchain.llms import OpenAI\n", + "llm = OpenAI(model=\"text-davinci-003\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Intialize the RL chain with provided defaults\n", + "\n", + "The prompt template which will be used to query the LLM needs to be defined.\n", + "It can be anything, but here `{meal}` is being used and is going to be replaced by one of the meals above, the RL chain will try to pick and inject the best meal\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.prompts import PromptTemplate\n", + "\n", + "# here I am using the variable meal which will be replaced by one of the meals above\n", + "# and some variables like user, preference, and text_to_personalize which I will provide at chain run time\n", + "\n", + "PROMPT_TEMPLATE = \"\"\"Here is the description of a meal: \"{meal}\".\n", + "\n", + "Embed the meal into the given text: \"{text_to_personalize}\".\n", + "\n", + "Prepend a personalized message including the user's name \"{user}\" \n", + " and their preference \"{preference}\".\n", + "\n", + "Make it sound good.\n", + "\"\"\"\n", + "\n", + "PROMPT = PromptTemplate(\n", + " input_variables=[\"meal\", \"text_to_personalize\", \"user\", \"preference\"], \n", + " template=PROMPT_TEMPLATE\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next the RL chain's PickBest chain is being initialized. We must provide the llm of choice and the defined prompt. As the name indicates, the chain's goal is to Pick the Best of the meals that will be provided, based on some criteria. " + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "import langchain_experimental.rl_chain as rl_chain\n", + "\n", + "chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once the chain is setup I am going to call it with the meals I want to be selected from, and some context based on which the chain will select a meal." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "response = chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs \\\n", + " believe you will love it!\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Hey Tom! We've got a special treat for you this week - our master chefs have cooked up a delicious One-Pan Tortelonni Bake with peppers and onions, perfect for any Vegetarian who is ok with regular dairy! We know you'll love it!\n" + ] + } + ], + "source": [ + "print(response[\"response\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## What is the chain doing\n", + "\n", + "Here's a step-by-step breakdown of the RL chain's operations:\n", + "\n", + "1. Accept the list of meals.\n", + "2. Consider the user and their dietary preferences.\n", + "3. Based on this context, select an appropriate meal.\n", + "4. Automatically evaluate the appropriateness of the meal choice.\n", + "5. Inject the selected meal into the prompt and submit it to the LLM.\n", + "6. Return the LLM's response to the user.\n", + "\n", + "Technically, the chain achieves this by employing a contextual bandit reinforcement learning model, specifically utilizing the [VowpalWabbit](https://github.com/VowpalWabbit/vowpal_wabbit) ML library.\n", + "\n", + "Initially, since the RL model is untrained, it might opt for random selections that don't necessarily align with a user's preferences. However, as it gains more exposure to the user's choices and feedback, it should start to make better selections (or quickly learn a good one and just pick that!).\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Hey Tom! We know you love vegetarian dishes and that regular dairy is ok, so this week's specialty dish is perfect for you! Our master chefs have created a delicious Chicken Flatbread with red sauce - a unique Italian-Mexican fusion that we know you'll love. Enjoy!\n", + "\n", + "Hey Tom, this week's specialty dish is a delicious Mexican-Greek fusion of Beef Enchiladas with Feta cheese to suit your preference of 'Vegetarian' with 'regular dairy is ok'. Our master chefs believe you will love it!\n", + "\n", + "Hey Tom! Our master chefs have cooked up something special this week - a Mexican-Greek fusion of Beef Enchiladas with Feta cheese - and we know you'll love it as a vegetarian-friendly option with regular dairy included. Enjoy!\n", + "\n", + "Hey Tom! We've got the perfect meal for you this week - our delicious veggie sweet potato quesadillas with vegan cheese, made with the freshest ingredients. Even if you usually opt for regular dairy, we think you'll love this vegetarian dish!\n", + "\n", + "Hey Tom! Our master chefs have outdone themselves this week with a special dish just for you - Chicken Flatbreads with red sauce. It's an Italian-Mexican fusion that's sure to tantalize your taste buds, and it's totally vegetarian friendly with regular dairy is ok. Enjoy!\n", + "\n" + ] + } + ], + "source": [ + "for _ in range(5):\n", + " try:\n", + " response = chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " )\n", + " except Exception as e:\n", + " print(e)\n", + " print(response[\"response\"])\n", + " print()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## How is the chain learning\n", + "\n", + "It's important to note that while the RL model can make sophisticated selections, it doesn't inherently recognize concepts like \"vegetarian\" or understand that \"beef enchiladas\" aren't vegetarian-friendly. Instead, it leverages the LLM to ground its choices in common sense.\n", + "\n", + "The way the chain is learning that Tom prefers veggetarian meals is via an AutoSelectionScorer that is built into the chain. The scorer will call the LLM again and ask it to evaluate the selection (`ToSelectFrom`) using the information wrapped in (`BasedOn`).\n", + "\n", + "You can set `langchain.debug=True` if you want to see the details of the auto-scorer, but you can also define the scoring prompt yourself." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "scoring_criteria_template = \"Given {preference} rank how good or bad this selection is {meal}\"\n", + "\n", + "chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=PROMPT,\n", + " selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, scoring_criteria_template_str=scoring_criteria_template),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you want to examine the score and other selection metadata you can by examining the metadata object returned by the chain" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Hey Tom, this week's meal is something special! Our chefs have prepared a delicious One-Pan Tortelonni Bake with peppers and onions - vegetarian friendly and made with regular dairy, so you can enjoy it without worry. We know you'll love it!\n", + "selected index: 3, score: 0.5\n" + ] + } + ], + "source": [ + "response = chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + ")\n", + "print(response[\"response\"])\n", + "selection_metadata = response[\"selection_metadata\"]\n", + "print(f\"selected index: {selection_metadata.selected.index}, score: {selection_metadata.selected.score}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In a more realistic scenario it is likely that you have a well defined scoring function for what was selected. For example, you might be doing few-shot prompting and want to select prompt examples for a natural language to sql translation task. In that case the scorer could be: did the sql that was generated run in an sql engine? In that case you want to plugin a scoring function. In the example below I will just check if the meal picked was vegetarian or not." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "class CustomSelectionScorer(rl_chain.SelectionScorer):\n", + " def score_response(\n", + " self, inputs, llm_response: str, event: rl_chain.PickBestEvent) -> float:\n", + "\n", + " print(event.based_on)\n", + " print(event.to_select_from)\n", + "\n", + " # you can build a complex scoring function here\n", + " # it is prefereable that the score ranges between 0 and 1 but it is not enforced\n", + "\n", + " selected_meal = event.to_select_from[\"meal\"][event.selected.index]\n", + " print(f\"selected meal: {selected_meal}\")\n", + "\n", + " if \"Tom\" in event.based_on[\"user\"]:\n", + " if \"Vegetarian\" in event.based_on[\"preference\"]:\n", + " if \"Chicken\" in selected_meal or \"Beef\" in selected_meal:\n", + " return 0.0\n", + " else:\n", + " return 1.0\n", + " else:\n", + " if \"Chicken\" in selected_meal or \"Beef\" in selected_meal:\n", + " return 1.0\n", + " else:\n", + " return 0.0\n", + " else:\n", + " raise NotImplementedError(\"I don't know how to score this user\")" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=PROMPT,\n", + " selection_scorer=CustomSelectionScorer(),\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'user': ['Tom'], 'preference': ['Vegetarian', 'regular dairy is ok']}\n", + "{'meal': ['Beef Enchiladas with Feta cheese. Mexican-Greek fusion', 'Chicken Flatbreads with red sauce. Italian-Mexican fusion', 'Veggie sweet potato quesadillas with vegan cheese', 'One-Pan Tortelonni bake with peppers and onions']}\n", + "selected meal: Veggie sweet potato quesadillas with vegan cheese\n" + ] + } + ], + "source": [ + "response = chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## How can I track the chains progress\n", + "\n", + "You can track the chains progress by using the metrics mechanism provided. I am going to expand the users to Tom and Anna, and extend the scoring function. I am going to initialize two chains, one with the default learning policy and one with a built-in random policy (i.e. selects a meal randomly), and plot their scoring progress." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "class CustomSelectionScorer(rl_chain.SelectionScorer):\n", + " def score_preference(self, preference, selected_meal):\n", + " if \"Vegetarian\" in preference:\n", + " if \"Chicken\" in selected_meal or \"Beef\" in selected_meal:\n", + " return 0.0\n", + " else:\n", + " return 1.0\n", + " else:\n", + " if \"Chicken\" in selected_meal or \"Beef\" in selected_meal:\n", + " return 1.0\n", + " else:\n", + " return 0.0\n", + " def score_response(\n", + " self, inputs, llm_response: str, event: rl_chain.PickBestEvent) -> float:\n", + "\n", + " selected_meal = event.to_select_from[\"meal\"][event.selected.index]\n", + "\n", + " if \"Tom\" in event.based_on[\"user\"]:\n", + " return self.score_preference(event.based_on[\"preference\"], selected_meal)\n", + " elif \"Anna\" in event.based_on[\"user\"]:\n", + " return self.score_preference(event.based_on[\"preference\"], selected_meal)\n", + " else:\n", + " raise NotImplementedError(\"I don't know how to score this user\")" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=PROMPT,\n", + " selection_scorer=CustomSelectionScorer(),\n", + " metrics_step=5,\n", + " metrics_window_size=5, # rolling window average\n", + ")\n", + "\n", + "random_chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=PROMPT,\n", + " selection_scorer=CustomSelectionScorer(),\n", + " metrics_step=5,\n", + " metrics_window_size=5, # rolling window average\n", + " policy=rl_chain.PickBestRandomPolicy # set the random policy instead of default\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "for _ in range(20):\n", + " try:\n", + " chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " )\n", + " random_chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " )\n", + " \n", + " chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Anna\"),\n", + " preference = rl_chain.BasedOn([\"Loves meat\", \"especially beef\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " )\n", + " random_chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Anna\"),\n", + " preference = rl_chain.BasedOn([\"Loves meat\", \"especially beef\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + " )\n", + " except Exception as e:\n", + " print(e)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The RL chain converges to the fact that Anna prefers beef and Tom is vegetarian. The random chain picks at random, and so will send beef to vegetarians half the time." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The final average score for the default policy, calculated over a rolling window, is: 1.0\n", + "The final average score for the random policy, calculated over a rolling window, is: 0.6\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAGdCAYAAADAAnMpAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy81sbWrAAAACXBIWXMAAA9hAAAPYQGoP6dpAACCzElEQVR4nO3dd3xTZfvH8U+S7g2UTgplbwq0lCWCglZFFCeyRcAFiuLER8XxKOhPeFwIisgSBETFAYKIsmR0QBmyZwt0UEr3Ts7vj9NGKqsj6Una6/169UWaJjnftDS5ep/7vm6doigKQgghhBAa0WsdQAghhBB1mxQjQgghhNCUFCNCCCGE0JQUI0IIIYTQlBQjQgghhNCUFCNCCCGE0JQUI0IIIYTQlBQjQgghhNCUg9YBKsJkMnHu3Dk8PT3R6XRaxxFCCCFEBSiKQnZ2NkFBQej1Vx//sIti5Ny5c4SEhGgdQwghhBBVkJiYSKNGja76dbsoRjw9PQH1yXh5eWmcRgghhBAVkZWVRUhIiPl9/GrsohgpOzXj5eUlxYgQQghhZ643xUImsAohhBBCU1KMCCGEEEJTUowIIYQQQlNSjAghhBBCU1KMCCGEEEJTUowIIYQQQlNSjAghhBBCU1KMCCGEEEJTUowIIYQQQlOVLkY2b97MoEGDCAoKQqfTsWrVquveZ+PGjXTt2hVnZ2datGjBggULqhBVCCGEELVRpYuR3NxcwsLCmDVrVoVuf/LkSQYOHMhNN91EfHw8zzzzDOPGjWPdunWVDiuEEEKI2qfSe9Pcfvvt3H777RW+/Zw5c2jatCkzZswAoG3btmzdupX//e9/REVFVfbwQgghhKhlrL5R3vbt2xkwYEC566KionjmmWeuep/CwkIKCwvNn2dlZVkrnrBzP+05x+6Ei1rHsCovF0ce69sMNye72NdS1DHnMvJZvTeJ+8IbUd/dSes4ogq+iztD9Ml03h7cAScHbaaSWv3VLTk5GX9//3LX+fv7k5WVRX5+Pq6urpfdZ9q0abz55pvWjibs3PHzOTz9zW6tY9SYZ29ppXUEIcq5mFvE0Lk7OH0hj5/3nmP5oz1xdTJoHUtUQsypdF7+fi/FRoXOjX0YGtlYkxw2+afWlClTmDx5svnzrKwsQkJCNEwkbNHi7acBaB/kRb/WDTVOYx3nMgr4YfdZvolOYOLNLXA0yAI4YRuKSkw8/nUcpy/kAbD3TCbPr9zDJw91Qa+/9nbxwjYkpufx2OI4io0Kt3cIYEiEdu+zVi9GAgICSElJKXddSkoKXl5eVxwVAXB2dsbZ2dna0YQdyy0s4bu4MwC8eFsb+raqncVIUYmJLUfTSM0uZN3fydzZKUjrSEKgKAqvrdrPzpPpeDg78ModbZn6035W702ieUMPJssons3LLihm7MIY0nOL6BDsxYwHwzQtIq3+Z1bPnj3ZsGFDuevWr19Pz549rX1oUYv9sPss2YUlNPV1p08LX63jWI2Tg55hkepfK4u2ndY4jRCqeVtPsjw2Eb0OPhnWhWHdG/POPR0B+HjDUX6MP6txQnEtJUYTT32zmyMpOfh7OfPlqG6az0mrdDGSk5NDfHw88fHxgLp0Nz4+noSEBEA9xTJq1Cjz7R9//HFOnDjBiy++yKFDh/jss89YsWIFzz77rGWegahzFEUxn6IZ0aNJrR8SHta9CQa9juhT6RxKlsncQlsbDqbwzpqDALw6sB03tfYD4MGIEB67sRkAL6zcW+snltuzd9ccYuPh87g46pk7KoIAbxetI1W+GImNjaVLly506dIFgMmTJ9OlSxdef/11AJKSksyFCUDTpk1ZvXo169evJywsjBkzZvDll1/Ksl5RZdEn0zmcko2ro4H7wxtpHcfqArxdiGqvTgJftF1GR4R2DiZl8fQ3u1EUGNa9MWN6h5b7+ou3tWFAW3+KSkyMXxTH2Yx8bYKKq1q6M4Gv/joJwMwHO9OpkY+2gUrpFEVRtA5xPVlZWXh7e5OZmYmXl5fWcYTGJizZxep9SQyNbMy0eztqHadGbD9+gaFzd+DqaGDHK/3xdnXUOpKoY85nFzJ41l+czcinV/MGLHwk8ooTqnMLS7hv9jYOJWfTNtCLlY/3xN3ZJtdK1DnbjqUx6qtoSkwKz93Siqf6t7T6MSv6/i1T84VdSckqYN3fyQCM6tlE4zQ1p0ez+rTy9yC/2GieuCtETSkoNvLY4ljOZuTT1Nedz4Z3verKLndnB+Y93A1fD2cOJmUxaVk8JpPN/81b6504n8MTS3ZRYlIY3DmIiTe30DpSOVKMCLuydGcCJSaFyND6tA2sO6NkOp2OkT1DAfh6x2l5cRc1RlEUXv5uL7sSMvB2dWTe6Ah83K7d3CzYx5UvRoXj5KDn94MpvLfuUA2lFVeSmVfMuIWxZOYX06WxD9Pv64ROZ1tz7aQYEXajqMTE0mh1PtLIOjQqUuaeLsF4ODtwIi2XrcfStI4j6ohZfx5jVfw5HPQ6Zg/vSrOGHhW6X9fG9fi/+zsB8PmmE3wbm2jNmOIqio0mnlgSx4m0XLVIHBmBi6PtNaaTYkTYjXV/J3M+u5CGns5EtQ/QOk6N83B24L6uwYBMZBU1Y82+JD747QgAb97dnl6VXEZ/d+dgni49HfDKD/uIPplu8Yzi6hRFYepPf7Pt+AXcnQx8OTqChp622cNLihFhN8qW8w6LbKzZ/glaKztV88ehFBLT87QNI2q1fWcymbwiHoAxvUMZ3r1qo5HPDGjFwI6BFBsVHlscS8IF+X9bUxZsO8XSnQnodPDRQ11s+tR23XxFF3bnYFIW0afScdDrGNZdm70TbEELPw96t2iASYElOxOufwchqiA5s4Bxi2IoKDbRr3VD/nNH2yo/ll6v44MHwujUyJuLecU8sjCGrIJiC6YVV/Ln4VTe/uUAAFNub8OAdv7XuYe2pBgRdqHstERU+wD8vbRv0KOlkT1CAVgek0BBsVHbMKLWyS8yMn5RLClZhbT08+DjoV1wqOaeSK5OBrW5lpcLx1JzmLh0NyVGk4USi387kpLNU0t3Y1LgwYhGjO/TTOtI1yXFiLB5mfnFrNqttpeuS8t5r2ZAWz+CvF24mFfML3uTtI4jahGTSeG5b+PZdzaT+u5OfPVwN7xcLNPTxt/LhS9HR+DqaGDzkfP8d/VBizyuKO9CTiFjF8aQU1hC96b1+e/gjja3cuZKpBgRNm9l3Bnyi4209vcksml9reNozsGgZ3gPtShbvP2UtmFErfK/34+wZl8yTgY9n48MJ6S+m0Ufv0OwN/8bEgao8xm+3iETsS2psMTI41/HkZieT5MGbswZEW438+vsI6Wos0wmxfyCNbJnE7uo8GvCkG4hOBn07DmTSXxihtZxRC2wavdZPvnjGADv3tuRbqHWKfxv6xDIC1GtAZj6099sPSrL1C1BURRe+X4/Macu4uniwLzREdRzv3Y/GFsixYiwaVuOpXEyLRdPZwfu6RKsdRyb4evhzMBOgQAsktERUU1xpy/y4nd7AXi8b3Or7/n0ZL/m3NslGKNJ4cklcRw/n2PV49UFczad4LtdZzDodcwa1pUWfp5aR6oUKUaETSs7DXFfeCPZ3+Jfyhq//bI3ifTcIo3TCHt15mIejy2OpajExK3t/HmxdNTCmnQ6HdPu60h4k3pkFZQwdkEMGXnyf7iq1v2dzPulXW6nDmrHja0aapyo8qQYETYrMT2PDYdSgbrZcfV6uoT40DHYm6ISE8tjpLulqLycwhLGLYwlLaeIdoFe/G9IZ/T6mjkV6uxg4POR4QT7uHLqQh5PfL2LYllhU2l/n8vkmWXxKIo6wX9UaS8ieyPFiLBZX+88jaJAn5a+NK9gC+q6RN2vRi3Svt5xGqPsVyMqwWhSmPTNbg4lZ9PQ05kvR0fU+Oijr4cz8x6OwN3JwPYTF3j9x/3YwUbyNiM1q4BxC2PJLzbSp6Uvr9/ZTutIVSbFiLBJBcVGVpT+tT+yh4yKXM1dYUH4uDlyNiOfP0pHkYSoiPfWHmLDoVScHfTMHRVBkI+rJjnaBHjxybAu6HXwTXQiX/11SpMc9qag2Mj4xXEkZRbQvKE7nw7rWu1+MFqy3+SiVvtlbxIX84oJ9nGlf1vb7hyoJRdHA0MiQgCZyCoqbnlMAl9sPgHABw+E0TnER9M8N7fx55XSLq/vrD7An1JYX5OiKLywci97EjPwcXNk3uhueLtaph+MVqQYETap7I11eI/GGGroHLa9GtGjCTodbDmaxglZlSCuY8eJC/znh/0APDOgJYPCgjROpBp7Q1OGRoZgUuCpb3ZzODlb60g266MNR/l5T9lOyuGE+rprHanapBgRNic+MYO9ZzJxMujNf/WLqwup78ZNrf0AWCxNpMQ1nErL5fGv4ygxKdzZKZBJ/VtqHclMp9Px5l0d6NGsPjmFJTyyIIa0nEKtY9mcn/ec48PfjwLwzj0d6Nm8gcaJLEOKEWFzykZF7gwLpIGHbW53bWvK2uSvjD1DbmGJxmmELcrML2bswhgy8ooJC/HhgwfCbK6JoJODnjkjwglt4MbZjHweWxxHYYnsv1QmPjGD57/dA8D4Pk0Z0q32bBoqxYiwKRdyCvllj7rfir0uUdPCjS0bEtrAjezCElbFn9U6jrAxJUYTE5fu4vj5XAK9XZg7MhwXR4PWsa7Ix82JeQ93w9PFgbjTF5ny3T5ZYQOcy8hn/KJYCktM9G/jx8u3V30nZVskxYiwKctjEykymujUyFvzSXX2RK/XMcK8X81pefEW5bz1ywG2HE3D1VHdPdfPxne+bt7Qg9nDwzHodXy/+yyfbTyudSRN5Zb2gzmfXUibAE8+Gtql1s2lk2JE2AyjSWHJjgRARkWq4oHwEFwc9RxKzib6ZLrWcYSNWLT9FIu2n0angw8f6kyHYG+tI1XIDS19eeOu9gD837rDrN1fN3eoNpkUnl0ez4GkLHw9nPhydAQetbAbtRQjwmZsOJjC2Yx86rk5cmfpviui4rzdHBncWd2/Z5FMZBXAlqPnefPnAwC8GNWGqPYBGieqnJE9mvBwr1AAnl2+h/1nM7UNpIH/++0wvx1IKd1JOYJG9Sy7k7KtkGJE2IyylSAPdgux2fPZtq6sI+u6/cmkZBVonEZo6VhqDk8u2YXRpHBf10Y83reZ1pGq5NWBbbmxVUPyi42MWxhbp/5fr4w7w+zSU1Tv39+J8Cb1NE5kPVKMCJtw/HwOW46modPBiO7ScbWq2gd5E9GkHiUmhaU7E7SOIzRyMbeIsQtjyC4ooVtoPd69t4PNrZypKAeDnk+HdaGFnwfJWQWMXxRLflHtX2ETcyqdKd+rOylPvKkFg2v5ruVSjAibsHi7OirSv40fIfVr5zBkTSkbHfkmOkE2HquDikpMPP51HKcv5BFS35U5I8JxdrDvkUYvF0fmjY6gnpsje89k8vy3ezDV4r2YEtPzeGxxHMVGhTs6BjD5llZaR7I6KUaE5nILS/gu7gwgE1ct4fYOgfh6OJOaXci6v5O1jiNqkKIovLZqPztPpuPh7MC80d1qTa+eJg3cmTMiHEeDjtX7kvhww1GtI1lFdoHaDyY9t4iOwd7MeKDmdlLWkhQjQnM/7D5LdmEJTX3duaGFr9Zx7J6Tg55hkaX71WyTiax1yZdbTrI8NhG9Dj4Z1oVW/p5aR7Ko7s0a8M49HQH4eMNRfqxlPXVKjCae+mY3R1Jy8PdyZu6oCFyd7HtUq6KkGBGaUhTFfIpmRI8mdeIvgJowrHsTDHod0afSOZScpXUcUQN+P5DCu78eBODVge3MWwTUNg9GhPDYjepk3BdW7mV3wkWNE1nOu2sOsfHweVwc9Xw5qhsB3rbdD8aSpBgRmoo+mc7hlGxcHQ3cH95I6zi1RoC3C1Ht1d2OF22X0ZHa7mBSFpOW7UZRYFj3xozpHap1JKt68bY2DGjrT1GJifGL4jibka91pGpbujOBr/46CcDMBzvTsZF99IOxFClGhKbK3igHdwm2+y2wbc3IHqEA/LDrLJn5xdqGEVZzPruQcQtjyS0y0qt5A968q73drpypKINex0cPdaZtoBdpOaXP3473ZNp2LI3Xf1R3Un7+1lbc0bHu9VmSYkRoJiWrwDzBsmyjN2E5PZrVp5W/B/nFRvMEYVG7FBQbeWxxLGcz8mnq685nw7viaKgbL+vuzg58OToCXw/n0pGheIx2uMLmxPkc807KgzsHMeGmFlpH0kTd+F8rbNLSnQmUmBQiQ+vTNtBL6zi1jk6nY2Tp6qTFO07X6qWQdZGiKLz83V52JWTg7aouffVxc9I6Vo0K9nHli1HhODno+f1gCu+vO6R1pErJzCtm7MJYsgpK6NrYh+n3dar1o1pXI8WI0ERRiYml0WpTrpEyKmI193QJxsPZgZNpuWw9lqZ1HGFBs/48xqr4czjodcwe3pVmDT20jqSJro3r8X/3dwLg800n+DY2UeNEFVNsNPHEkjhOpuUS7OPK5yMj6nTnaSlGhCbW/Z3M+exCGno6291+GfbEw9nBPDFYJrLWHmv2JfHBb0cAePPu9vSq40vi7+4czNM3q6c3XvlhHztPXNA40bUpisLUn/5m2/ELuDsZ+HJ0BA09a0c/mKqSYkRoYtH2UwAMi2yMk4P8N7SmET3UkacNh1JITM/TOI2orr1nMpi8Ih6AMb1DGS7bJwDwzIBWDOwYSLFR4fGv40i4YLv/1xdsO8XSnQnodPDRQ13kNDVSjAgNHEzKIubURRz0OoZ1b6x1nFqvhZ8HvVs0QFFgiexXY9eSM9W9WQqKTfRr3ZBXB7bTOpLN0Ot1fPBAGJ0aeXMxr5hHFsaQVWB7q8j+PJzK27+oOym/cntbBrTz1ziRbZBiRNS4stMFUe0D8PeqO019tFTWZn95TAIFxbV/k7HaKL/IyPhFsaRkFdLSz4NPhnbBIE0Cy3F1MjB3VAQBXi4cS81h4tLdlNjQ/kxHUrJ5auluTAoMiQhhXJ+mWkeyGVKMiBqVmV/Mqt1qC2dZzltz+rfxI8jbhYt5xfyyN0nrOKKSTCaFySvi2Xc2k/ruTnz1cDc8XaQvz5X4e7nw5egIXB0NbD5ynv+uPqh1JAAu5BQydmEMOYUldG9an7cH2+9OytYgxYioUSvjzpBfbKS1vyeRTetrHafOcDDoGV46d2Rx6XwdYT/+9/sRft2fjJNBz+cjw2Vn6+voEOzN/4aEAer8jMU7tJ28XVhi5PGv40hMz6dJAzfmjAiXuXL/It8NUWNMJoWvS18URvVqIn8V1LAh3UJwMujZcyaT+MQMreOIClq1+yyf/HEMgGn3dqRbqBTxFXFbh0BeiGoNwBs//c3Wo9osbVcUhVe+30/MqYt4uqg7Kddzr1v9YCpCihFRY7YcS+NkWi6ezg4M7hysdZw6x9fDmYGd1DbTi2R0xC7Enb7Ii9/tBeCJfs25T/ZvqpQn+zXn3i7BGE0KTy6J4/j5nBrPMGfTCb7bdQaDXsesYV1p4Vc3+8FcjxQjosaUnR64L7wR7s4O2oapo8oazP2yN4kLOYUapxHXcuZiHo8tjqWoxMSt7fx54dbWWkeyOzqdjmn3dSS8ST2yCkoYuyCGjLyiGjv+ur+TzV1hpw5qx42tGtbYse2NFCOiRiSm57HhUCogHVe11CXEh47B3hSVmFhuJ50q66KcwhLGLYwlLaeIdoFe/G9IZ/SycqZKnB0MfD4ynEb1XDl1IY8nvt5FcQ2ssPn7XCbPLItHUdTJ+mUr2sSVSTEiasTXO0+jKNCnpS/N62jbalug7lejFoNLdiTY5cZitZ3RpDDpm90cSs6moaczX46OkJHEavL1cGbe6G64OxnYfuICr/+4H0Wx3v/91KwCxi2MJb/YSJ+Wvrx+p/SDuR4pRoTVFRQbWRGj/hU+soeMimjtrrAgfNwcOZuRzx+lo1XCdkz/9SAbDqXi7KBn7qgIgnxctY5UK7QO8OSTYV3Q6+Cb6ETmbT1pleMUFBsZvziOpMwCmjd059NhXXGoIzspV4d8h4TV/bI3iYt5xQT7uNK/rXQb1JqLo4EhESGATGS1NctjEpi7RX2T/OCBMDqH+GgbqJa5uY0/r9zRFoB31xzkj0MpFn18RVF4/ts97EnMwMfNkXmju+HtKv1gKkKKEWF1ZW94w3s0lo6RNmJEjybodLDlaBonNFhhIC6348QF/vPDfgCeGdCSQWFBGieqncbe0JShkSGYFHj6m3gOJ2db7LE/2nCUX/Ym4aDXMWdEOKG+7hZ77NpOihFhVfGJGew9k4mTQW/+a1xoL6S+Gze39gPQvCGUgFNpuTz+dRwlJoVBYUFM6t9S60i1lk6n4627O9CjWX1yCkt4ZEEMaRZYWfbznnN8+PtRAN65pwM9mjWo9mPWJVKMCKtatO0UAHeGBdLAo25vkW1ryiayrow9Q25hicZp6q7M/GLGLowhI6+YsBAf/u/+TtIQ0MocDXp15KKBG2cz8nlscRyFJVXfsyk+MYPnv90DwPg+TRnSTTYArSwpRoTVXMgpNO+DIsvabM+NLRsS2sCN7MISVsWf1TpOnVRiNDFx6S6On88l0NuFuSPDcXE0aB2rTvBxc2Lew93wcnEg7vRFpny3r0orbM5l5DN+USyFJSb6t/Hj5dvbWiFt7SfFiLCa5bGJFBlNdGrkLRPxbJBer2OEeb+a01Zd6iiu7K1fDrDlaBqujga+HB2Bn+xiXaOaN/Tgs+HhGPQ6vt99ls82Hq/U/XNL+8Gczy6kTYAnH8lOylUmxYiwCqNJYcmOBEBGRWzZA+EhuDjqOZScTfTJdK3j1CmLtp9i0fbT6HTw4UOdaR/krXWkOumGlr68cVd7AP5v3WHW7q/YrtYmk8Kzy+M5kJSFr4cTX46OwEP6wVSZFCPCKjYcTOFsRj713By5s3Q/FGF7vN0czfsELZKJrDVm85HzvPnzAQBejGpDVPsAjRPVbSN7NOHhXqEAPLt8D/vPZl73Pv/322F+O5CCk4Oez0dG0Kie7KRcHVKMCKsoW6ExpFtjOQdu48omsq7bn0xKVoHGaWq/Y6nZTFi6C6NJ4b6ujXi8bzOtIwng1YFtubFVQ/KLjYxbGHvN34WVcWeYXXpK5/37OhHepF5Nxay1pBgRFnf8fA5bjqah08Hw7jKr3Na1D/Imokk9SkwKS3cmaB2nVruYW8TYhbFkF5TQLbQe797bQVbO2AgHg55Ph3WhhZ8HyVkFjF8US37R5StsYk6lM+V7dSflp25uweAusgO5JUgxIixu8XZ1VKR/Gz9C6svQpT0YVTpEvTQ6gaIS628iVhcVlZh47Os4Tl/II6S+K3NGhOPsIKOGtsTLxZF5oyOo5+bI3jOZPP/tHkyX7N+UmJ7HY4vjKDYq3NExgGcHtNIwbe1SpWJk1qxZhIaG4uLiQvfu3YmOjr7m7T/88ENat26Nq6srISEhPPvssxQUyHBwbZRbWMJ3cWcAmbhqT25rH4CvhzPnswtZ93ey1nFqHUVReHXVPqJPpuPp7MC80d2k746NatLAnc9HRuBo0LF6XxIfblAbmWUXqP1g0nOL6BjszYwHZCdlS6p0MbJ8+XImT57M1KlT2bVrF2FhYURFRZGaeuUNt5YuXcrLL7/M1KlTOXjwIPPmzWP58uW88sor1Q4vbM8Pu8+SXVhCU193bmjhq3UcUUFODnqGRaodcstGtoTlfLnlJCtiz6DXwcfDutDK31PrSOIaIpvW5917OgLw8Yaj/LD7DE99s5sjKTn4ezkzd1QErk4yqmVJlS5GZs6cyfjx4xkzZgzt2rVjzpw5uLm58dVXX13x9tu2baN3794MGzaM0NBQbr31VoYOHXrd0RRhfxRFMb+RjejRRP5qsDPDujfBoNcRfSqdg0lZWsepNX4/kMK7vx4E4NWB7biptA2/sG0PRITw2I3q5OJnl+9h4+HzuDjq+XJUNwK8pR+MpVWqGCkqKiIuLo4BAwb88wB6PQMGDGD79u1XvE+vXr2Ii4szFx8nTpxgzZo13HHHHVc9TmFhIVlZWeU+hO2LPpnO4ZRsXB0N3B/eSOs4opICvF2Iaq/uqrxIRkcs4mBSFpOW7UZRYFj3xozpHap1JFEJL97WhgGX7DQ+88HOdGwk/WCsoVLFSFpaGkajEX//8tvA+/v7k5x85fPMw4YN46233uKGG27A0dGR5s2b069fv2ueppk2bRre3t7mj5AQ2WDNHpS9gQ3uEizbZtupkT1CAVi1+yyZ+cXahrFz57MLGbcwltwiI72aN+DNu9rLyhk7Y9Dr+OihzjzSuykfPdSZOzpKzyRrsfpqmo0bN/Luu+/y2WefsWvXLr7//ntWr17N22+/fdX7TJkyhczMTPNHYmKitWOKakrOLDBPfBxV2rdC2J8ezerTyt+D/GKjeSKyqLyCYiOPLY7lbEY+zXzdmT08HEeDLF60R+7ODrw+qB13d5YlvNZUqd8OX19fDAYDKSkp5a5PSUkhIODKHQRfe+01Ro4cybhx4+jYsSP33HMP7777LtOmTcNkuvISQmdnZ7y8vMp9CNu2NDqBEpNCZGh92gbKz8te6XQ6Rpauglq843S5ZY2iYhRF4aXv9rIrIQNvV0e+HB2Bt5uMFApxLZUqRpycnAgPD2fDhg3m60wmExs2bKBnz55XvE9eXh56ffnDGAzqLGTZmKt2KCox8U202ixrpIyK2L17ugTj4ezAybRcth5L0zqO3Zn15zF+jD+Hg17H7OFdadbQQ+tIQti8So8bTp48mblz57Jw4UIOHjzIE088QW5uLmPGjAFg1KhRTJkyxXz7QYMGMXv2bJYtW8bJkydZv349r732GoMGDTIXJcK+rfs7mfPZhTT0dJY9NmoBD2cH8wRkmchaOWv2JfHBb0cAePPu9vSS5e1CVEiltxgcMmQI58+f5/XXXyc5OZnOnTuzdu1a86TWhISEciMhr776KjqdjldffZWzZ8/SsGFDBg0axDvvvGO5ZyE0tWj7KQCGRTbGyUHOi9cGI3o0YcG2U2w4lEJiep500q2AvWcymLwiHoAxvUMZ3l1GCYWoKJ1iB+dKsrKy8Pb2JjMzU+aP2JiDSVnc/tEWHPQ6/nr5Zvy9ZP19bTHiy51sPZbG432b8/LtbbSOY9OSMwu4e9ZWUrIK6de6IfNGd8MgfXaEqPD7t/wZK6qlbBg/qkOAFCK1TNn8n+UxCRQUX75hmFDlFxkZtyiGlKxCWvl78MnQLlKICFFJUoyIKsvML2bV7rMAjOohQ9K1Tf82fgR5u3Axr5hf9iZpHccmmUwKk1fEs/9sFvXdnZg3uhueLrJyRojKkmJEVNnKuDPkFxtp7e9JZNP6WscRFuZg0DO8tMhcXDovSJQ3c/0Rft2fjJNBz+cjw2VujRBVJMWIqBKTSeHrHeopmlG9mkhnyVrqoW4hOBn07DmTSXxihtZxbMqq3Wf59M9jAEy7tyPdQqUgF6KqpBgRVbLlWBon03LxdHZgsHQmrLUaeDgzsJPaAnuRjI6YxZ2+yIvf7QXgiX7NuU/2YhKiWqQYEVVSNmx/X3gj3J0rvUJc2JGy9v6/7E3iQk6hxmm0d+ZiHo8tjqWoxMSt7fx54dbWWkcSwu5JMSIqLTE9jw2HUgHpuFoXdA7xoWOwN0UlJpbH1u19onIKSxi7IJa0nCLaBXrxvyGd0cvKGSGqTYoRUWlf7zyNokCflr40l1bXtZ66X41adC7ZkYCxju5XYzQpTPpmN4dTsmno6cy8hyNkVFAIC5FiRFRKQbGRFTHqX8cjZTlvnXFXWBA+bo6czcjnj9JRsbpm+q8H2XAoFWcHPXNHRRDo7ap1JCFqDSlGRKX8sjeJi3nFBPu40r+tv9ZxRA1xcTQwJCIEqJsTWZfHJDB3y0kAPnggjM4hPtoGEqKWkWJEVErZG9HwHo2ly2QdM6JHE3Q62HI0jRPnc7SOU2O2H7/Af37YD8AzA1oyKCxI40RC1D5SjIgKi0/MYO+ZTJwMevNfyaLuCKnvxs2t/QBYvKNu7OZ7Ki2XJ5bEUWJSGBQWxKT+LbWOJEStJMWIqLBF204BcGdYIA08nLUNIzRRNpF1ZewZcgtLNE5jXZn5xYxdGENGXjFhIT783/2dpLmfEFYixYiokAs5heb9SUb1DNU2jNDMjS0bEtrAjezCElbFn9U6jtWUGE1MXLqL4+dzCfR2Ye7IcFwcDVrHEqLWkmJEVMjy2ESKjCbCGnnL5L06TK/XMaJ0FdWibadRlNq5zPetXw6w5Wgabk4GvhwdgZ/sSC2EVUkxIq7LaFJYsiMBgJEyKlLnPRAegoujnsMp2USfTNc6jsUt2n6KRdtPo9PB/4Z0pn2Qt9aRhKj1pBgR17XhYApnM/Kp5+bInaX7lIi6y9vNkXu6qPsRLaplE1k3HznPmz8fAODFqDZEtQ/QOJEQdYMUI+K6ylZODOnWWM6bCwBG9ggFYN3+ZFKyCrQNYyHHUrOZsGQXRpPCfV0b8XjfZlpHEqLOkGJEXNPx8zlsOZqGTgfDuzfWOo6wEe2CvIhoUo8Sk8LSnQlax6m2i7lFPLIgluzCEiJD6/PuvR1k5YwQNUiKEXFNi7eroyL92/gRUt9N4zTClozqFQrA0ugEikpM2oaphqISE499HUdCeh4h9V2ZPaIrzg4yAihETZJiRFxVbmEJ38WdAWQ5r7jcbe0D8PVw5nx2Iev+TtY6TpUoisKrq/YRfTIdT2cH5o3uJj10hNCAFCPiqn7YfZbswhKa+rpzQwtfreMIG+PkoGdY6am7shE0e/PllpOsiD2DXgefDOtCK39PrSMJUSdJMSKuSFEU8xvMiB5N0Ms+NOIKhkWqexRFn0rnYFKW1nEq5fcDKbz760EAXruzHf1KW90LIWqeFCPiiqJPpnM4JRtXRwP3hzfSOo6wUQHeLkS1V3dvXmRHoyMHk7KYtGw3igLDujfm4dL5L0IIbUgxIq6o7I1lcJdgvF0dNU4jbFnZfKJVu8+SmV+sbZgKOJ9dyLiFseQWGendogFv3tVeVs4IoTEpRsRlkjMLzBMSR5VujCbE1XRvWp9W/h7kFxtZWTrh2VYVFBt5dHEsZzPyaebrzmfDwnE0yMugEFqT30JxmaXRCZSYFCJD69M20EvrOMLG6XQ68zYBX+84jclkm/vVKIrCS9/tZXdCBt6ujsx7uBvebjLqJ4QtkGJElFNUYuKb6LJ9aGRURFTMvV2C8XR24GRaLluPpWkd54o+/eMYP8afw0GvY/bwrjT1ddc6khCilBQjopx1fydzPrsQP09n2ZdDVJi7swP3lU50XrT9lLZhrmDNviRmrD8CwFt3d6CXLFUXwqZIMSLKKXsjGRrZGCcH+e8hKm5ED3UkbcOhVBLT8zRO84+9ZzKYvCIegEd6NzX3RhFC2A55txFmB5OyiDl1EQe9Tl6wRaW18PPghha+KAossZH9apIzCxi/KJaCYhP9WjfkPwPbah1JCHEFUowIs7LlvFEdAvD3ctE4jbBHZfOMlsckUFBs1DRLXlEJ4xbFkJJVSCt/Dz4Z2gWDNO8TwiZJMSIAyMwvZtXuswCM6iETV0XV9G/jR7CPKxfzivllb5JmOUwmhedW7GH/2Szquzsxb3Q3PF1k5YwQtkqKEQHAyrgz5Bcbae3vSWTT+lrHEXbKwXDpfjWnNMsxc/0Rft2fjJNBz+cjw2XHaSFsnBQjApNJ4esd6imaUb2aSDdKUS0PdQvByaBnz5lM4hMzavz4P+w+w6d/HgNg2r0d6RYqxbUQtk6KEcGWY2mcTMvF09mBwZ2DtY4j7FwDD2fu7BQI1Pwy37jTF3lp5T4AnujX3LzcWAhh26QYEebh9PvCG+Hu7KBtGFErlE1k/WVvEhdyCmvkmGcu5vHY4liKjCai2vvzwq2ta+S4Qojqk2KkjktMz2PDoVRAOq4Ky+kc4kPHYG+KSkwsj020+vFyCksYuyCWtJwi2gV68b8hndHLyhkh7IYUI3Xc1ztPoyjQp6UvzRt6aB1H1BI6nc68yeKSHQkYrbhfjdGk8PQ3uzmckk1DT2fmPRyBm5OM8AlhT6QYqcMKio2siFH/ah0py3mFhQ0KC8LHzZGzGflsOJhiteNM//UgfxxKxdlBz5ejIgj0drXasYQQ1iHFSB32855zXMwrJtjHlf5t/bWOI2oZF0cDQyJCAFhculrL0pbHJDB3y0kAZjwYRliIj1WOI4SwLilG6rCyN4jhPRpLZ0phFSN6NEGngy1H0zh+Pseij739+AX+88N+AJ4Z0JI7OwVZ9PGFEDVHipE6Kj4xg71nMnFy0Jv/ehXC0kLqu3Fzaz8AFm+33OjIqbRcnlgSR4lJYVBYEJP6t7TYYwshap4UI3XUom2nALizUyANPJy1DSNqtVG9QgH4Lu4MuYUl1X68zPxiHlkYQ0ZeMZ1DfPi/+ztJoz4h7JwUI3XQhZxC874ho3qGahtG1Hp9WvgS2sCN7MISVsWfrdZjlRhNTFy6ixPncwnyduGLUeG4OBoslFQIoRUpRuqg5bGJFBlNhDXyprNM+BNWptfrGFG6WmvRttMoStWX+b758wG2HE3DzcnA3NER+HnK7tJC1AZSjNQxRpPCkh0JAIyUURFRQx4ID8HV0cDhlGyiT6ZX6TEWbT/F4h2n0engwyGdaR/kbeGUQgitSDFSx2w4mMLZjHzquTma9w8Rwtq83RwZ3EVd7bKoCst8Nx85z5s/HwDgpdvacGv7AIvmE0JoS4qROqZsOe+Qbo3lXLuoUSN7hAKwbn8yKVkFFb7fsdRsJizZhdGkcF/XRjx2YzMrJRRCaEWKkTrk+PkcthxNQ6eD4d0bax1H1DHtgrzoFlqPEpPC0p0JFbpPem4RjyyIJbuwhMjQ+rx7bwdZOSNELSTFSB1S1uehfxs/Quq7aZxG1EVl85SWRidQVGK65m2LSkw8/nUcCel5hNR3Zc7IcJwdZDRPiNpIipE6IrewhO/izgCynFdo57b2Afh6OHM+u5B1fydf9XaKovDqqn1En0zH09mBr0Z3o767Uw0mFULUJClG6ogfdp8lu7CEpr7u3NDCV+s4oo5yctAzrPQU4bU6ss7dcoIVsWfQ6+CTYV1o6e9ZUxGFEBqQYqQOUBTF/MI/okcT9LIPjdDQsEh1L6ToU+kcTMq67Ou/H0hh2q+HAHjtznb0K20nL4SovaQYqQN2nkzncEo2ro4G7g9vpHUcUccFeLsQ1V7dJXrRv0ZHDiZlMWnZbhRFnWT9cGkreSFE7SbFSB1QNioyuEsw3q6OGqcR4p95S6t2nyUzvxiA89mFjFsYS26Rkd4tGvDGXe1l5YwQdUSVipFZs2YRGhqKi4sL3bt3Jzo6+pq3z8jIYMKECQQGBuLs7EyrVq1Ys2ZNlQKLyknOLDBPFBzVs4nGaYRQdW9an1b+HuQXG1kZd4aCYiOPLo7lbEY+zXzd+WxYOI4G+VtJiLqi0r/ty5cvZ/LkyUydOpVdu3YRFhZGVFQUqampV7x9UVERt9xyC6dOnWLlypUcPnyYuXPnEhwcXO3w4vqWRidQYlKIDK1P20AvreMIAYBOpzOPjny94zQvfbeX3QkZeLs6Mu/hbni7yQieEHVJpYuRmTNnMn78eMaMGUO7du2YM2cObm5ufPXVV1e8/VdffUV6ejqrVq2id+/ehIaG0rdvX8LCwqodXlxbUYmJb6LL9qGppaMixQXqh7A793QJxtPZgbS08/wYfxYHvY7ZI7rS1Ndd62iisgoytU4gqiMjAU5v1zRCpYqRoqIi4uLiGDBgwD8PoNczYMAAtm+/8hP56aef6NmzJxMmTMDf358OHTrw7rvvYjQar3qcwsJCsrKyyn2IyttwMIXz2YX4eToTVRv38kg/AR93hk+6wsXK73citOXu7MBrzY4Q6/wECx3f47+DWtOruSw7tyuKAmtehOlNYPMHWqcRVVGYDUuHwMJBcPAXzWJUqhhJS0vDaDTi7+9f7np/f3+Sk6/cwOjEiROsXLkSo9HImjVreO2115gxYwb//e9/r3qcadOm4e3tbf4ICQmpTExR6veD6qmzwV2CcXKoZeffCzJh6UOQnQRZZ+Gbh6BAila7cnYX9ye+g7OumL6GvTx0/iP1zU3Yj51zIPpzQIE/3ob932udSFSGyQgrx0LqAXCrD0GdNYti9Xcok8mEn58fX3zxBeHh4QwZMoT//Oc/zJkz56r3mTJlCpmZmeaPxMREa8esdRRFYfPR8wD0a9VQ4zQWZiyBb8dA2mHwDASPAPWX6btx6i+XsH1Z52DZMPQl+RDQCXR62LUQdnymdTJRUUfXw7pX1MtBXdR/Vz0BZ+O0yyQqZ/3rcHQdOLjAQ9+At3atHypVjPj6+mIwGEhJSSl3fUpKCgEBVz4NEBgYSKtWrTAY/tlTom3btiQnJ1NUVHTF+zg7O+Pl5VXuQ1TOwaRszmcX4upoIDy0ntZxLGvdK3B8Azi4wtBvYOhS9Zfp6Dr47TWt04nrKcpVR7Kyk6BhW3h4NdxaOlK67j9wZJ22+cT1pRxQ/yBQTNBlJIzbAC2joKQAvhkGmWe1TiiuZ9ci2P6pennwZ9AoXNM4lSpGnJycCA8PZ8OGDebrTCYTGzZsoGfPnle8T+/evTl27Bgm0z+bYh05coTAwECcnGSvCWvZdEQdFenVvEHt2lws5svSYWHg3s/Vv8iCw2HwbPW6HbMgboFm8cR1mEzww2OQtAfcGsCwZeDiBT2ehK6jAQVWPgIpf2udVFxNznn4ZggUZUOTG2DgTNAb4L4vwa8d5CSrxWZRrtZJxdWc3AK/PKte7vsydLhP2zxU4TTN5MmTmTt3LgsXLuTgwYM88cQT5ObmMmbMGABGjRrFlClTzLd/4oknSE9PZ9KkSRw5coTVq1fz7rvvMmHCBMs9C3GZTUfU+SI31qZTNMf/VCfLAdz8KrS7+5+vdbgX+pUOGa9+Dk5urvl84vr+fAcO/gwGJxiyBOqFqtfrdHDHBxDaB4py1PlAOec1jSquoKQQlo9QV1/UawpDFoND6R+VLl4wdBm4+ULyXvj+UbX4FLblwnFYMRJMJdD+Xuj3staJgCoUI0OGDOGDDz7g9ddfp3PnzsTHx7N27VrzpNaEhASSkpLMtw8JCWHdunXExMTQqVMnnn76aSZNmsTLL9vGN6A2yiksIe70RQD61pZiJO0ofDsaFCN0GgJ9nr/8Nn1fhA73q79ky0eqv3TCduxZDltKV1wM+hia/Gs01cEJHlwE9ZtBZgIsHy7Ltm2JosDPkyBxBzh7w7AV6qTHS9VrAg8tUYvNQ7+ok1qF7cjPUEet8i+Wjih/pv4hYAN0imL709ezsrLw9vYmMzNT5o9UwPoDKYxfFEuTBm5seuEmreNUX146fNlfXcob0h1G/QSOLle+bXE+LLgTzsZCgxYw7ndwrWVzZuxRwk5YeCcYi+CGZ2HAG1e/bdpR9eddkKkWnvd8bjMvmHXalpmw4U3QGWDESmh+89Vvu2c5/PCoennwHOg8tGYyiqszlsCS++HEn+AVDOP/AE/rt3yo6Pt3LVvvKQA2l84XubFlLRgVKSmCFaPUQsQ7RB3av1ohAuDoCg8tBa9GcOEYrBgNxuKayysul5EAy4aphUibO+Hm1699e9+W8MBC9U1v73LYOrNmcoqrO/izWogA3P7etQsRgLAh0Oc59fLPT0PCDuvmE9e39iW1EHF0Uyf+10AhUhlSjNRCZZNX7f4UjaLAmufh1BZw8oBhy8GjAs/J01+dGOnoDic3wa8vSf8KrZQ1VMpLg4CO6iiHvgIvO81vgjveVy9veAsO/GTdnOLqkvao8z8Auo2HyPEVu99Nr0LbQWoRumy4NCbUUvRcdfI/Orh3LgTaXgd0KUZqmVNpuSSk5+Fo0NGzeQOt41TPjs/U3hPo4L554N++4vcN6KjO7kcHsfMg+gtrpRRXc2lDJQ9/GLocnD0qfv9u4yDyMfXyD4/BuXirxBTXkJ0M3wyF4jx1NOS26RW/r16vFp+BYWoxunSINCbUwrEN6h9kAAOmQts7tc1zFVKM1DJloyIRTerj7uygcZpqOLJO7TkBag+K1rdV/jHa3AG3lA4tr30Zjv5uuXzi+i5rqFSFzTGj3oXm/dU3w28egqyk699HWEZxvlqIZJ0F31Zw/3wwVPI1xcld/dl7BMD5g/DdWGlMWJPOH4ZvH1Yn/ocNg97PaJ3oqqQYqWXKihG7XtKbckDtNYGiNlTqWY1l4L2ehs4j1OZMK8dA6iGLxRTXELfQMg2VDA7wwHzwba02SVs2FIryLJdTXJmiqN1Uz+1SJ4APXQauPlV7LO/gSxoT/iaNCWtKXjosfRAKs6BxTxj0oU1PBJdipBYpLDGy/fgFwI7ni+ScV4dzi3L+aahUnV8gnQ7u/B807qX+Un4zBHIvWC6vuNzJLbB6snq535TqN1Ry8VbnALnWh3O71TdJ6V9hXRunw98/gN4RhnwNDZpX7/GCw+Ge0i1ApDGh9ZUUqf1gLp4Cnybqz9DBWetU1yTFSC0Se+oi+cVGGno60zbQU+s4lVdcoPaWyExQe01c2lCpOhyc1F9GnybqL+fyEWrzJmF5lzZU6nAf9H3JMo9bv5nav0LvCAdWwcZplnlccbl9K2FT6dyQO/8HoTdY5nHb3wM3lZ56lcaE1qMosPpZOP0XOHmqE//dbX83bClGapFLl/TqbHg47orMDZV2qg2Vhi6/vKFSdbg3UJs0OXtBwja1FbKssLGs/IvqqFZZQ6W7Z1l2WLhJLxj0kXp58/uw91vLPbZQnYmFVU+ql3s9BV1HWvbxb3yhfGPCtGOWfXwB2z6B3V+rm08+MB/82mqdqEKkGKlFzEt6W9vhKZqtM2HvMrW3xIMLoGEryx/Dr406CU+nh/glsO1jyx+jrjIWqxPlLhxVGyo9tFTt+WJpXYar84AAfpwAiTGWP0ZdlXlGnbBqLIRWt8GANy1/DJ0O7v4UgiOgIEM9bZp/0fLHqasO/6pOHAd18nfLW7TNUwlSjNQSKVkFHErORqeDPi1sf0iunAM/qb0koGINlaqj5YB/lieunwqH1ljvWHXJ2pfhxEa1t8vQZdZtqDTgDWh9h/qmuWwYZCRa71h1RWHpfkC5qeDfQV0Wr7fSBpvSmNA6kvfDd+MABcLHQPfHtU5UKVKM1BJloyKdGvlQz92OdkM+F6/2kACIfLTiDZWqI/JRiBgLKOovb9Je6x+zNtv5xT8Nle6bC4GdrHs8vUFt3OTfUX3z/OYhtbmaqBqTSW1qlrIP3Buq3TmdrTznzNNfnctgbkz4opw2rY6c0t+DohxoeiPc8X82vXLmSqQYqSXMp2ha2tGoSFZS+YZKUTU0KVGnU0dgmvaF4lw1Q3ZKzRy7tjm2QW0zDeqIRZuBNXNcZw/1TdPdD1L2l+4QK/0rqmTDm3B4NRic1RELn8Y1c9yADpc0JvxKGhNWVXGBOkKYmQj1m6tbKRgctU5VaVKM1AJGk8LWo2mAHc0XKcpTe0Zkn6t6Q6XqMDjCgwvVzfSyzqi/zMX5NXf82sDcUMlU2lBpUs0e3ydEffM0OMPhNfD7GzV7/Npg9xL460P18t2fQkhkzR5fGhNWj6LATxPhTAy4+Fx5J2U7IcVILbDnTAaZ+cV4uTgQ1shH6zjXZzKVNlTarfaOGLa86g2VqsO1nvrL6+Kj7vL740QZKq6o3AuXNFTqpV1DpZBualM1UCck7/665jPYq9Pb1BVsoK5y6fSgNjmkMWHVbf4A9n0Legd4cBH4ttA6UZVJMVILlC3pvaGlLw4GO/iRbpqu9oooa6hUv5l2WRo0V/uZ6B1g/0rY9L52WexFSZHaS8TcUGmxtg2VOt7/Tz+Tn5+BU1u1y2Iv0k+qm9eZiqHd3dDvFe2ylDUmbNJbLW6XPgi5adrlsRd//wB//le9fMf/QbO+2uapJjt45xLXY1e79O5bCZveUy/f+T8I7a1tHlAnfA2coV7e+C7s/17bPLZMUdQeLaf/Unu22EpDpb4vQ7vB6pvr8pGQfkLrRLarIFOd7JifDoGdYfCciu2kbE0OTvDgYqgXChmnpTHh9ZzdBT88oV7u/gREPKJtHguQYsTOZeQVsScxA7CD/WgSY6zbUKk6wh+GHqV74Kx6As7GaRrHZm37BOJLGyrdb0MNlfR6GDwbgrqob7JLH1LfdEV5xhJ136fzh8AzUJ0E7OSmdSqVe4PSnZ29IGG7NCa8mqxz6hy3knxocQtEvaN1IouQYsTObT2WhkmBVv4eBHpbocmUpWQkqr9AxkK1R4Q1GipV161vQ8tboaQAvhkGmWe1TmRbDq25pKHSNLVniy1xcivtcRIEaaWTa40lWqeyLb/9B479Dg6uaiHiFaR1ovL82qhdQ8saE/71kdaJbEtRrjqqlZ0EDdvC/V9Zrx9MDZNixM5tOvxPC3ibVZijLp8ta6h07xe2+QukN8B988CvHeQkl67bz9U6lW24tKFSxCPQ/TGtE12ZZ4C6qZ6jGxz/A9ZpOBfC1sTMg52lm9Xd+7k6imSLWgyA20pP5f7+BhxarWkcm2EywQ+PQ9IecGug/j938dI6lcVIMWLHFEVh81EbbwFvMsL342u2oVJ1uHipf127+ULyXrUhW13fIbasoVJxrtqb5fb3bbuhUmAY3PO5ejn689KGbHXciY2w5gX18s2vqpNWbVnk+EsaE46XxoQAf74DB38CgxMMWaLOr6lFpBixY4dTsknJKsTFUU+3UBtdW/77G2oPCIMzPPRNzTVUqo56TdQdYg1OcPBn+ONtrRNp59KGSg1aqL1Z7KGhUru7oH/pKaU1L6qjJHVV2lFYMQoUI3QaAn2e1zrR9ZU1JmzWTxoTAuxZDls+UC8P+hia9NQ2jxVIMWLHypb09mjWABdHGzztsfvrfzaju3uW2hPCXjTuAXd9ol7eOhPiv9E2jxYURd2M7tKGSq71tE5VcTdMhrCh6pvwiofh/BGtE9W8vHR1J+WCTAjprr6R2fKo1qUMjmo30QYtSxsTDq2bjQkTdqqNzQBueBY6D9U2j5VIMWLHbHpJ76m/1J4PADe+CJ0e0DROlYQ9pL6hAfz8NCTs0DZPTdv8gdp7Re+g9hJp0FzrRJWj08GgjyCkBxRmqv0r8tK1TlVzjMXqiEj6cfAOUYf2HV20TlU5rj7q8nEXH3WF248T6tYKm4yE0on/RdDmTrj5da0TWY0UI3Yqr6iEmJPq1ts2t6Q3/YTaJ8DcUGmK1omq7ubXoO0g9cVg2XC4eFrrRDWjXEOlD9ReLPbIwVltrOfTGC6eVN+cS4q0TmV9igJrnodTW8DJQ31D97Cx14mKatBc/RnqHWD/d3WnMWFhtjqqlZcGAR3VeVBa94Oxotr7zGq5HScuUGQ00aieK8183bWO84+CTLXHQ366OlvfFhoqVYder74IBHRSXxSWDoGCLK1TWdelDZV6PAkRY7TNU10eDdX+FU6e6pvzmudq/1/XO2ZD3ALUnZTngX97rRNVT9M+MHCmerkuNCY0GWHlWEg9AB7+pf1XPLROZVV2/C5Rt5mX9LZqiM5WzgEbS+DbMWqPB89AdcKqrTRUqg4nd3WFjUcAnD8I342tvTvEZp1TJwuW5Ks9V279r9aJLMO/ndqTQaeHXYtgx2daJ7KeI7+p/URA/fm1vk3bPJYSPhp6ls6dqO2NCde/DkfXgYOL+jrqHax1IquTYsRObS7bpdeWTtGsewWOb7ikoVKg1oksxzsYhi5VXxyO/ga/vaZ1Issra6iUk6w2VLpvnm32g6mqVpcUV+v+A0fWaZvHGlIOqB1WFRN0GQk9J2idyLJueQtaRtXuxoS7FsH2T9XLgz+DRuHa5qkhUozYoYQLeZxMy8VBr6NX8wZax1HFfKn2dAC1qZmtNlSqjuBwteU4wI5ZpcPgtYTJpPZUqaUNlcx6PAldRwOK+qad8rfWiSwn5zx8MwSKsqHJDeppDVsZNbUUvQHu+7L2NiY8uUVtgw/qXLsO92mbpwZJMWKHNpU2OuvapB6eLjbQ8+H4n2ovB1AnfLa7S9s81tTh3n92OF39HJzcrG0eS/nzv2pPFYMTPLS01jVUMtPp1E0RQ/tAUY46vynnvNapqq+kUJ00npGg7oI9ZLG6+Vxt9O/GhN8/WjsaE144ru6GbSpRi5CynajrCClG7FDZfBGbOEWTdhS+HX1JQ6XntE5kfX1fhA73qy8ay0eqLyL2bM9y2FK6a/Ggj9UeK7WZwREeXKS+aWcmwPLhanM3e6Uo8PMkSNwBzt7qZEc3G22CaCn1mqhFs8EJDv1i/40J8zPUyfH5F9UR2Ltn1b5RreuQYsTOFJWY2H7cRuaL5KWrvRvssaFSdeh0cPenEBwBBRnq9yD/otapqqaONFS6jFt9tYmbizck7lT7yNjrCput/4M934DOAA8ugIattE5UMxp3h7tK51bYc2NCY4m6qeOFo+AVrBZZjja86amVSDFiZ+JOXyS3yIivhxPtAjU8p19SVNpQ6QR4N7bPhkrV4eiqvmh4NYILx2DFaLXJlD25eLrONFS6It+WaodPnQH2XjI6ZE8O/gwbSnfAvv09aH6ztnlqWtglo7E/Pw2nt2ubpyrWvgQn/gTH0lV7ngFaJ9KEFCN2pqzrap+WDdHrNRqFuKyh0jL7bahUHZ7+pTvEusPJTfDri/bz13Vhtjr5Ly9N7aFy7xf23Q+mqprfBHf8n3r5j7fhwE/a5qmMpD3qfAmAyEfVzeXqoptehbZ3qUX1cjtrTBg9t3QjRx3cNxcCO2mdSDN18NXHvm22hRbwOz6DXQsBndq7wd4bKlVHQEd1dj86iP0Kor/QOtH1XdZQaZnaS6Wu6jYWuj+uXv7hMTgXr2mcCslOVvvBFOepoyFR07ROpB29Hu6Zo+7WnHfBfhoTHtsAv5ZOUh3wBrQZqGkcrUkxYkdSsws4kJSFTgd9WvpqE+LIOvjtVfXyrf+FVlHa5LAlbe6AW0qHyte+DEd/1zbP9VzaUGlo3WiodF23vgPN+6tv7t88BFlJWie6uuJ8tRDJOgu+reD++WBw0DqVtuytMeH5w+o8EcUIYcOg9yStE2lOihE7suWIOnG1Q5A3DTycaz5AygH1L2rFBF1H1b6GStXR62noPEL93qwcA6mHtE50ZXELL2moNFuduS/UN/MH5kPDNpCdVNq/Ik/rVJczmdTuo+d2gWt9dc8ZVx+tU9kGryC1uHZwLW1M+KrWia4s94I66b0wCxr3gkEf1o2J/9chxYgd0XSX3pzz6vBnUbbao+GOGfILdCmdDu78n/riUpilvtjkXtA6VXknt8Dq0l2I+72i9kwR/3DxLu1f0QCS4mHV47bXv2LTe+omhnpHdfO4+s20TmRbgruqp2xAPZ0cO1/bPP9WUqT2Erl4CnyalPaD0eAPSxskxYidMJoUthz9Zz+aGlVcoE4MyyxtqPTgotrbUKk6HJzUN4h6oZBxWm1CVVKodSrVZQ2VXtQ6kW2q37R0h1hHOPAjbLShuRj7VsKm6erlO/8Hob21zWOr2g9WJ7WCOtHeVhoTKgqsfhZO/wXOXuqolrtGp9ttkBQjdmL/2Uwu5hXj6exAl8Y+NXdgc0OlnXWnoVJ1uDco3WHTCxK2qa2dtV5hk3/xkoZKEXWyoVKlNOkFgz5SL29+H/Z+q20egDOxsOpJ9XKvp6DrSG3z2Lobn4eOD/zTmDDtmNaJYNsnsPtrdbPG++eDX1utE9kUKUbsRNkpml4tGuBoqMEf29aZsHdZaUOlhXWnoVJ1+LVRX2x0eohfAts+1i6LsfiShkqN6mxDpUrrMvyfSYU/ToDEGO2yZJ5RJ6waC6H1HTDgTe2y2AudTm2I1qib2pjwmyHaNiY8/Ks6cRzUlU8tB2iXxUZJMWIn/lnS61dzBz3wE2x4S718x/tqTwZRMS0HwG2lQ+rrp8KhNdrkWPsynNio9kIZtkztjSIqpv8b0HqgWgQsGwYZiTWfobB0/5zcVPDvUNoPphbtpGxNji620ZgweT98Nw5QIOIR6P5YzWewA1KM2IHM/GJ2J2YAcGOrGjrHeC5e7bkAakOlbuNq5ri1SeSjEDEWUNQXo6S9NXv8nV+Ub6gU0LFmj2/v9Hr1zd+/o1oMfPOQ2iyupphMalOzlH3g3lBdKeLsWXPHrw08/NS5GVo1Jswp/X9TlANN+8Lt78sp0quQYsQObDuWhtGk0LyhO43quVn/gFlJlzRU6l+3GypVh06ntuhu2heKc9XvaXZKzRz72O9qm2mQhkrV4eyhFgHufpCyH74bX3P9Kza8AYdXg8EZHvoGfBrXzHFrm4AOcP88zI0Jd35eM8ctLlBH1DIToUEL9TS3wQZ2WbdRUozYgU01eYqmKA+WDYXsc+DbWu29UNcbKlWHwVF9EWrQArLOqC9OxfnWPeb5w/DtGLXnSecR0lCpunxC1ILE4AxHfoXf37D+MXcvgb9KJ9HePQtCuln/mLVZ69vhltJTzuumWL8xoaKoG1CeiQEXH3VTRtd61j2mnZNixMYpimIuRqx+isbcUGl3aUOlZWrvBVE9rvVKd4j1gbOx8ONE6w0V/7uh0p0zZVjYEhpFwODP1MvbPlZXRVjL6W3qCjaAG1+ETg9Y71h1Sa+noEsNNSbc/AHs+xb0DmovkQbNrXesWkKKERt3LDWHpMwCnB309GjWwLoH2zQdDqyShkrW0KC5+qKkd4D9K2HT+5Y/xmUNlb6WhkqW1PF+6Ft66uvnZ+DUVssfI/0kLBsOpmJodzf0m2L5Y9RVOh0M/B806X1JY8I0yx/n7x/gz/+ql+/4AJreaPlj1EJSjNi4slGRyKb1cXG04iz6fSvV7o6gtieWhkqW1/RGGFi6Tf3Gd2H/95Z7bEVRe5qYGyqtUHueCMvq+zK0v0ctFpaPhPQTlnvsgkx1smN+OgR1gcFz6uZOytbk4AQPLrZeY8Kzu+CHJ9TLPZ6EiDGWe+xaTv6n27gaaQGfGHNJQ6Wn1aFMYR3hD0OP0j19Vj0BZ+Ms87jbPoH4SxsqtbHM44ry9Hp1T5+grmrRsPQhtYioLmMJrHwEzh8CzyB1wqpTDUxWr4vcG6jFurM3JGy3XGPCrHPqnLCSfGh5q7qRqKgwKUZsWH6RkZ0n0wHo19pKxUhGovoLZG6o9IZ1jiP+cevb6otVSQF8Mwwyz1bv8Q6t+aeh0m3TpaGStTm6qhNaPYMgrXT3VWNJ9R7zt/+oK6AcSh/bK9AiUcVVNCydnK8zqI0JyyYLV1VRrjqqlZ0EDdvCffOkH0wlSTFiw3aevEBRiYkgbxeaN/Sw/AEKc9TlpuaGSnPlF6gm6A3qi5VfO8hJLu1DkFu1x0reV76hUuSjFo0qrsIzQJ3g7egGx/9QV2hUVcw82Fm6udu9X0BQZ4tEFNfRov8/jQl/fwMO/lK1xzGZ1J5MSXvUTRaHLQMXL4vFrCukGLFh5lM0rRuis/SKCJMRvh9f2lDJT92t1NkKBY+4Mhev0h1ifSF5r9rcqrI7xGanqKcJinOloZIWAsPU4gEg+guInlv5xzj+J6x5Qb1882vQ7i7L5RPX172soaOi/g5WpTHhn+/AwZ/B4KR2fK0XaumUdYIUIzbMvKS3pRVO0Wx4Ew6vKW2otFTtpSBqVr0m8NAS9UXs0C/wx9sVv2/ZTspZZ6ShkpbaDoL+U9XLv76kjpJUVNpR+HY0KEboNAT6PGedjOLabnsPmvWrWmPCPcthywfq5UEfQ+MeVolYF0gxYqMS0/M4cT4Xg15HrxYW7i8iDZVsR+MecNcn6uWtMyH+m+vfR1HUzdukoZJtuOFZCBuqFhUrHobzR65/n7x0dSflgkwI6a6+kcmoljYMDvDAQmjQsrQx4dCKNSZM2Kk2NgP1/0DnodbNWctJMWKjNh9VR0W6hPjg7WrBv3hP/SUNlWxN2ENww2T18s9PQ8KOa99+8wdqrxJpqGQbdDoY9BGE9IDCTLV/RV761W9vLIYVoyD9OHg3hiFL1E3dhHZcfdQ9bFzrqSvcfpxw7RU2F0+XTvwvgjZ3ws2v11jU2kqKERu12RpLetNPqOvqTcXQbrA0VLIlN7+mDvkbi9SmVxdPX/l2lzZUGjhDGirZCgdn9ZSbT2O4eFItNkqKLr+dosCa5+HUFnDyUCc7elhx2b6ouAbN1R4kegfY/93VGxMWZquTzvPSIKBT6U7K8lZaXfIdtEHFRhN/HbsAqJNXLaIgU53saG6oNFt+gWyJXg/3fK6+uOWllQ7hZ5W/TbmGShPUniXCdrj7wtDl4OSpFhtrnrv8r+sdsyFuAaCD+78C//ZaJBVX07QP3Pk/9fKVGhOajLByLKQeAA9/dRK6k3vN56yFqvRuNGvWLEJDQ3FxcaF79+5ER0dX6H7Lli1Dp9MxePDgqhy2ztidkEFOYQn13Z3oEGSBvWGMJerGaWmHpaGSLXNyV1/cPALg/EH4buw/O8RmnlUn15kbKlVisquoOf7t1CJDp4ddi2D7rH++duQ3tZ8IqA2xWkVpk1FcW9dR0LN0LsiqJ+DMJY0J178OR9eBg4vaD8Y7WJuMtVCli5Hly5czefJkpk6dyq5duwgLCyMqKorU1NRr3u/UqVM8//zz9OnTp8ph64pNR9TvZZ+Wvuj1FpjUtu4VOL5B7YkgDZVsm3cwDF2qvtgd/Q1+e03tQbJsqNqTxK+dNFSyda1uhVvfUS//9iocXgspB9QOq4qp9M1ugrYZxbXd8ha0uk1tTLhsqPrHQNxC2P6p+vXBsyE4XNuMtUyli5GZM2cyfvx4xowZQ7t27ZgzZw5ubm589dVXV72P0Whk+PDhvPnmmzRrJpuvXY9Fl/TGfAnRn6uX7/lcGirZg+BwuKe0CdaOWTC3f2lDJV915EQaKtm+Hk+UnkZT1BGupQ9CUTaE9oE7ZsjKGVunN8B9X4Jfe8hJgUV3werSSeb9XoEO92qbrxaqVDFSVFREXFwcAwb8025ar9czYMAAtm/fftX7vfXWW/j5+TF27NgKHaewsJCsrKxyH3VFWk4h+8+qz7dPq2ou6T0TC2teVC/3f10aKtmT9vfATaVD+ucPljZUWqL2JhG2T6f7Z8fWohzITFR3wX5wkbpZm7B9zp7qBGP3hnDhGJhKoMN90PdFrZPVSpUqRtLS0jAajfj7+5e73t/fn+Tk5CveZ+vWrcybN4+5cyvenXDatGl4e3ubP0JC6k5Dri2lS3rbBXrh51nN5X5/faj2Pmg3+J+lo8J+3PgCdBmpnrK5e5Y0VLI3Bke1f0VAR/AMVCe3utXXOpWoDJ/SpdcuPuqo1t2zZFTLShys+eDZ2dmMHDmSuXPn4utb8b/yp0yZwuTJ/7x5ZmVl1ZmCZPORNMACq2gyz8Ch1erlfi/LL5A90ung7k/Vv7ClD4V9cqsPj25Wl9M7OGudRlRF4+7w3GH15yevo1ZTqWLE19cXg8FASkr5drkpKSkEBARcdvvjx49z6tQpBg0aZL7OVLr/hoODA4cPH6Z588sbNjk7O+PsXPd+cU0mxXL9RWLnq5PlQvuAX1sLpBOakULEvun1oK97r2e1ivwOWl2lTtM4OTkRHh7Ohg0bzNeZTCY2bNhAz549L7t9mzZt2LdvH/Hx8eaPu+66i5tuuon4+Pg6M9pRUQeSsriQW4S7k4GujavR3rukEHYtVC9HjrdMOCGEEMJKKn2aZvLkyYwePZqIiAgiIyP58MMPyc3NZcyYMQCMGjWK4OBgpk2bhouLCx06dCh3fx8fH4DLrhf/rKLp1cIXJ4dqNCQ78CPknld7irQeaKF0QgghhHVUuhgZMmQI58+f5/XXXyc5OZnOnTuzdu1a86TWhIQE9NLZs0o2HS5d0lvdUzRlW5lHPKJuAiWEEELYMJ2iXGs3INuQlZWFt7c3mZmZeHnVzh4LWQXFdH1rPSUmhc0v3ETjBlXskHouHr7oC3pHmHwAPPwsmlMIIYSoqIq+f8sQho3YduwCJSaFpr7uVS9EAGJKR0Xa3S2FiBBCCLsgxYiN2HzUAqto8tJh30r1cuSjFkglhBBCWJ8UIzZAURTzfJFqFSO7v1b3UgjoCCGRFkonhBBCWJcUIzbgRFouZzPycTLo6d6sih0aTSaInade7jZemvMIIYSwG1KM2ICyUZHIpvVxc6ri6pdjv8PFU+DiDR0fsFw4IYQQwsqkGLEB5l16q7MxXvQX6r9dRoJTNSbACiGEEDVMihGNFRQb2XnyAgB9W1Vx9Uv6CXVkBNTeIkIIIYQdkWJEY9En0ykoNhHg5UIrf4+qPUjMPECBFrdAg8v3+hFCCCFsmRQjGtt8ySkaXVUmnRblwe7F6mXZh0YIIYQdkmJEY5vMu/RW8RTN/pVQkAn1QqHFAMsFE0IIIWqIFCMaOpeRz9HUHPQ6uKFFFSavKso/E1cjxoLeYNmAQgghRA2QYkRDZadoOof44O3mWPkHSIyG5H3g4AJdRlg4nRBCCFEzpBjR0D9LeqvYdbVsH5qO94NbFZulCSGEEBqTYkQjJUYTW4+lAVVsAZ+TCn+vUi93k4mrQggh7JcUIxqJT8wgu6AEHzdHOjXyqfwDxC0EUzE06gZBnS0dTwghhKgxUoxopGy+yA0tfDHoK7mk11gCsV+pl2V3XiGEEHZOihGN/LOktwqnaA6vhuxz4OYL7e62cDIhhBCiZkkxooH03CL2ns0Eqjh5Nbp04mr4aHBwtmAyIYQQouZJMaKBLUfPoyjQJsATfy+Xyt059RCc2gI6vexDI4QQolaQYkQD1TpFU7act/Ud4N3IgqmEEEIIbUgxUsNMJoXNR6q4pLcgC/YsUy/LxFUhhBC1hBQjNexgchZpOYW4OhoID61XuTvvWQZFOeDbGpreaJ2AQgghRA2TYqSGlY2K9GreAGeHSuwloygQ86V6uds4qMoOv0IIIYQNkmKkhm06kgpA39aVPEVzcjOkHQYnDwh7yArJhBBCCG1IMVKDcgpLiD11EYAbW1ayGCnbnTfsIXDxsnAyIYQQQjtSjNSg7ccvUGJSaNLAjVBf94rfMfMMHF6jXu42zjrhhBBCCI1IMVKDyk7RVHpUJHY+KCYI7QN+ba2QTAghhNCOFCM1RFGUqvUXKSmEuAXq5UjZnVcIIUTtI8VIDTl1IY/E9HwcDTp6Nm9Q8Tse+BHy0sAzCFoPtF5AIYQQQiNSjNSQsl16I5rUx93ZoeJ3LNuHJuIRMFTifkIIIYSdkGKkhphP0VRmSe+5eDgTDXpHdVM8IYQQohaSYqQGFJYY2X78AlDJyatl+9C0HwwefpYPJoQQQtgAKUZqQOypi+QXG2no6UzbQM+K3SkvHfatVC93k4mrQgghai8pRmpA2SmaG1s2RFfRNu67v4aSAgjoCCGRVkwnhBBCaEuKkRqwubLzRUxGiJ2nXo58VPahEUIIUatJMWJlyZkFHErORqeDPi18K3anY7/DxVPg4gMd7rdmPCGEEEJzUoxY2eaj6qhIp0Y+1HN3qtidypbzdhkBTm5WSiaEEELYBilGrKzSXVcvHFdHRtCpvUWEEEKIWk6KESsymhS2Hk0DoG+rCp6iif0KUKDFAGjQ3HrhhBBCCBshxYgV7TmTQWZ+MV4uDoQ18rn+HYryYPdi9bLsQyOEEKKOkGLEijYdVk/R3NDSFwdDBb7V+76FgkyoF6qOjAghhBB1gBQjVlQ2ebVC80UU5Z+OqxFjQW+wYjIhhBDCdkgxYiUZeUXsScwA4MaKFCOJ0ZC8Dxxc1FU0QgghRB0hxYiVbD2WhkmBVv4eBHq7Xv8O0V+o/3a8H9zqWzecEEIIYUOkGLGSsvkiFTpFk5MKB35UL8s+NEIIIeoYKUasQFEU83yRCp2iiVsIpmJoFAlBna0bTgghhLAxUoxYweGUbFKyCnFx1NMt9DqnXIwlpb1FkOW8Qggh6iQpRqyg7BRNj2YNcHG8zqqYw6sh+xy4+UK7u2sgnRBCCGFbpBixgkot6S3bhyb8YXBwtl4oIYQQwkZJMWJheUUlxJy8CFRgvkjqQTi1BXR6iBhTA+mEEEII2yPFiIXtOHGBIqOJRvVcaebrfu0bx3yp/tv6DvBuZP1wQgghhA2SYsTCLl3Sq9Pprn7DgizYs0y9HPloDSQTQgghbJMUIxa26UgFl/TuWQZFOeDbGpreWAPJhBBCCNskxYgFnb6Qy6kLeTjodfRq3uDqN7x0H5rI8XCtERQhhBCilpNixII2l46KdG1SD08Xx6vf8OQmSDsCTh7QaUgNpRNCCCFskxQjFrTpSBpQgSW9Zct5wx4CFy8rpxJCCCFsW5WKkVmzZhEaGoqLiwvdu3cnOjr6qredO3cuffr0oV69etSrV48BAwZc8/b2qqjExPbjFShGMhLh8Br1suxDI4QQQlS+GFm+fDmTJ09m6tSp7Nq1i7CwMKKiokhNTb3i7Tdu3MjQoUP5888/2b59OyEhIdx6662cPXu22uFtSdzpi+QWGfH1cKJd4DVGO+Lmg2KC0D7g16bmAgohhBA2qtLFyMyZMxk/fjxjxoyhXbt2zJkzBzc3N7766qsr3n7JkiU8+eSTdO7cmTZt2vDll19iMpnYsGFDtcPbEvMqmpYN0euvMiG1pFDdFA9kHxohhBCiVKWKkaKiIuLi4hgwYMA/D6DXM2DAALZv316hx8jLy6O4uJj69a++gVxhYSFZWVnlPmxdhZb0HvgR8tLAKxhaD6yhZEIIIYRtq1QxkpaWhtFoxN/fv9z1/v7+JCcnV+gxXnrpJYKCgsoVNP82bdo0vL29zR8hISGViVnjUrMKOJiUhU4HfVr6Xv2G0V+o/4aPAYNDzYQTQgghbFyNrqaZPn06y5Yt44cffsDFxeWqt5syZQqZmZnmj8TExBpMWXmbj6oTVzsEedPA4yqb3Z3bDWdiQO8I4aNrMJ0QQghh2yr157mvry8Gg4GUlJRy16ekpBAQEHDN+37wwQdMnz6d33//nU6dOl3zts7Ozjg7288OtmX9Ra65iia6dB+a9oPBw8/6oYQQQgg7UamREScnJ8LDw8tNPi2bjNqzZ8+r3u/999/n7bffZu3atURERFQ9rQ0ymhS2HL3OfJG8dNi/Ur0sy3mFEEKIcio9cWHy5MmMHj2aiIgIIiMj+fDDD8nNzWXMmDEAjBo1iuDgYKZNmwbAe++9x+uvv87SpUsJDQ01zy3x8PDAw8PDgk9FG/vOZnIxrxhPZwe6NPa58o12fw0lBRDQEUIiazSfEEIIYesqXYwMGTKE8+fP8/rrr5OcnEznzp1Zu3ateVJrQkICev0/Ay6zZ8+mqKiI+++/v9zjTJ06lTfeeKN66W1A2Sma3i18cTRcYaDJZISY0lM0kY/KPjRCCCHEv1RpScfEiROZOHHiFb+2cePGcp+fOnWqKoewG9dd0nvsd8g4DS4+0OH+K99GCCGEqMNkb5pqyMwrZnfCRQBubHWVJb1l+9B0GQFObjWUTAghhLAfUoxUw1/H0zAp0LyhO43qXaHQuHAcjq0HdNBtbI3nE0IIIeyBFCPV8M+S3qss1Y0tbZHfYgDUb1ZDqYQQQgj7IsVIFSmKcsl8kSucoinKg92L1cuRj9ZgMiGEEMK+SDFSRUdTc0jKLMDZQU+PZg0uv8G+b6EgE+qFqiMjQgghhLgiKUaqqOwUTfdmDXBxNJT/oqJATOnE1YixoJdvsxBCCHE18i5ZReZTNFfaGC9xJyTvAwcXdRWNEEIIIa5KipEqyC8ysvNkOgD9Wl+hv0jZct6O94Nb/RpMJoQQQtgfKUaqYMfJCxSVmAjydqF5w3+1tM9OgQM/qpdlHxohhBDiuqQYqQLzkt7WDdH9u737roVgKoZGkRDUuebDCSGEEHZGipEq+Ge+yL9O0RhLIHa+ejlSRkWEEEKIipBipJIS0/M4cT4Xg15Hrxb/mrx6eDVknwP3htDubm0CCiGEEHZGipFK2nxUHRXp2tgHb1fH8l8sm7jadTQ4ONdwMiGEEMI+STFSSZsOX+UUTepBOLUFdHqIGKNBMiGEEMI+STFSCcVGE9uOXwDUyavllI2KtBkI3o1qOJkQQghhv6QYqYRdpy+SU1hCfXcnOgR5//OFgizYu1y9LMt5hRBCiEqRYqQSyuaL9Gnpi15/yZLePcugKAd8W0PTGzVKJ4QQQtgnKUYq4YpLei/dhyZyPPy774gQQgghrkmKkQo6n13I/rNZAPRpdcmS3pObIO0IOHlApyEapRNCCCHslxQjFbT1mDoq0j7ICz9Pl3++UDZxNewhcPHSIJkQQghh36QYqSDzkt5Wl5yiyUiEw2vUyzJxVQghhKgSKUYqwGRS2HI0DYC+lxYjcfNBMUFoH/Bro1E6IYQQwr5JMVIBf5/L4kJuEe5OBro2rqdeWVIIcQvVy5GPahdOCCGEsHMOWgewB2VLenu18MXJobR++3sV5KWBVzC0vkO7cEKISjEajRQXF2sdQ4hawdHREYPBUO3HkWKkAq44X6RsOW/4GDDIt1EIW6coCsnJyWRkZGgdRYhaxcfHh4CAAHTVaG0h76LXkVVQTFzCRQD6lvUXObcbzsSA3hHCR2uYTghRUWWFiJ+fH25ubtV64RRCqAV+Xl4eqampAAQGBlb5saQYuY5txy5gNCk083WncQM39croL9V/2w8GDz/NsgkhKsZoNJoLkQYNGmgdR4haw9XVFYDU1FT8/PyqfMpGJrBeh7nratkpmrx02L9SvSzLeYWwC2VzRNzc3DROIkTtU/Z7VZ25WFKMXIOiKGwuLUbMS3p3L4aSAgjoBCGRGqYTQlSWnJoRwvIs8Xslxcg1HD+fy9mMfJwMero3qw8mI8TMU78o+9AIIYQQFiHFyDWUjYpENq2Pm5MDHPsdMk6Diw90uF/bcEKIOqlfv34888wzlbrPqlWraNGiBQaDodL3vRadTseqVasqdZ+NGzei0+lsZlVTVZ6DVv79sw8NDeXDDz/ULI8lyQTWa/hnvkjpxnjRX6j/dhkBTnLuWQhhHx577DHGjBnD008/jaenp1WOcerUKZo2bcru3bvp3LmzVY5hDUlJSdSrV0/rGFUSExODu7u71jEsQoqRqygoNrLjxAUA+rbygwvH1ZERdNBtrLbhhBCignJyckhNTSUqKoqgoCCt49QIRVEwGo04OFz/LS4gIKAGEllHw4YNr38jOyGnaa4i+mQ6hSUmArxcaOXvAbFfqV9oeQvUb6ZtOCFEnZCbm8uoUaPw8PAgMDCQGTNmXHabwsJCnn/+eYKDg3F3d6d79+5s3LgRUE+JlI2E3Hzzzeh0OjZu3MiFCxcYOnQowcHBuLm50bFjR7755ptyj3ulUwCdO3fmjTfeuGLWpk2bAtClSxd0Oh39+vWr8PPcunUrffr0wdXVlZCQEJ5++mlyc3PNX1+8eDERERF4enoSEBDAsGHDzL0typ6nTqfj119/JTw8HGdnZ7Zu3Uq/fv14+umnefHFF6lfvz4BAQGX5b/0NM2pU6fQ6XR8//333HTTTbi5uREWFsb27dvL3Wfu3LmEhITg5ubGPffcw8yZM/Hx8bnq8yt73GXLltGrVy9cXFzo0KEDmzZtKne7TZs2ERkZibOzM4GBgbz88suUlJRc9XH//TPKyMjgsccew9/f33yMX375hdzcXLy8vFi5cmW5+69atQp3d3eys7OveoyaIsXIVVx6ikZXnK+uogFZzitELaAoCnlFJZp8KIpS4ZwvvPACmzZt4scff+S3335j48aN7Nq1q9xtJk6cyPbt21m2bBl79+7lgQce4LbbbuPo0aP06tWLw4cPA/Ddd9+RlJREr169KCgoIDw8nNWrV7N//34effRRRo4cSXR0dJW/p2X3/f3330lKSuL777+v0P2OHz/Obbfdxn333cfevXtZvnw5W7duZeLEiebbFBcX8/bbb7Nnzx5WrVrFqVOnePjhhy97rJdffpnp06dz8OBBOnXqBMDChQtxd3dn586dvP/++7z11lusX7/+mpn+85//8PzzzxMfH0+rVq0YOnSouSj466+/ePzxx5k0aRLx8fHccsstvPPOOxV6ri+88ALPPfccu3fvpmfPngwaNIgLF9QR+LNnz3LHHXfQrVs39uzZw+zZs5k3bx7//e9/K/TYJpOJ22+/nb/++ouvv/6aAwcOMH36dAwGA+7u7jz00EPMnz+/3H3mz5/P/fffb7VTd5Uhp2mu4p8lvX6w71soyIR6odBigLbBhBDVll9spN3r6zQ59oG3otQJ8deRk5PDvHnz+Prrr+nfvz+gvrE2atTIfJuEhATmz59PQkKC+RTM888/z9q1a5k/fz7vvvsufn5qY8aykQGA4OBgnn/+efPjPPXUU6xbt44VK1YQGVm1lgVlpwwaNGhQqVMf06ZNY/jw4eaJmS1btuTjjz+mb9++zJ49GxcXFx555BHz7Zs1a8bHH39Mt27dyMnJwcPDw/y1t956i1tuuaXc43fq1ImpU6eaH/vTTz9lw4YNl93uUs8//zwDBw4E4M0336R9+/YcO3aMNm3a8Mknn3D77bebv3+tWrVi27Zt/PLLL9d9rhMnTuS+++4DYPbs2axdu5Z58+bx4osv8tlnnxESEsKnn36KTqejTZs2nDt3jpdeeonXX38dvf7aYwe///470dHRHDx4kFatWpm/V2XGjRtHr169SEpKIjAwkNTUVNasWcPvv/9+3dw1QUZGruBcRj5HU3PQ6+CG5g0gunQfmm7j4Dr/IYQQwhKOHz9OUVER3bt3N19Xv359Wrdubf583759GI1GWrVqhYeHh/lj06ZNHD9+/KqPbTQaefvtt+nYsSP169fHw8ODdevWkZCQYNXndCV79uxhwYIF5fJHRUVhMpk4efIkAHFxcQwaNIjGjRvj6elJ3759AS7LGxERcdnjl42QlCl7I76WS+9T1uK87D6HDx++rGCraAHXs2dP82UHBwciIiI4ePAgAAcPHqRnz57lenb07t2bnJwczpw5c93Hjo+Pp1GjRuZC5N8iIyNp3749Cxequ81//fXXNGnShBtvvLFC2a1NRkauoGxUpHOID94XdkHKPnBwgc7DNU4mhLAEV0cDB96K0uzYlpKTk4PBYCAuLu6yNtyXjhj82//93//x0Ucf8eGHH9KxY0fc3d155plnKCoqMt9Gr9dfdkrJGrsd5+Tk8Nhjj/H0009f9rXGjRuTm5tLVFQUUVFRLFmyhIYNG5KQkEBUVFS5vMAVV5Y4OjqW+1yn02Eyma6Z6dL7lBUH17uP1srasl/LuHHjmDVrFi+//DLz589nzJgxNtMIUIqRKyjXAj76PfXKjveDW30NUwkhLEWn01XoVImWmjdvjqOjIzt37qRx48YAXLx4kSNHjphHBrp06YLRaCQ1NZU+ffpU+LH/+usv7r77bkaMGAGob7RHjhyhXbt25ts0bNiQpKQk8+dZWVnmkYorcXJyAtRRl8ro2rUrBw4coEWLFlf8+r59+7hw4QLTp08nJCQEgNjY2Eodw5Jat25NTExMuev+/fnV7NixwzwSUVJSQlxcnHluTNu2bfnuu+9QFMVcIPz11194enqWOzV3NZ06deLMmTMcOXLkqqMjI0aM4MUXX+Tjjz/mwIEDjB5tOxu9yjmHfykxmth6LA2A/o0UOPCj+gWZuCqEqEEeHh6MHTuWF154gT/++IP9+/fz8MMPl5s70KpVK4YPH86oUaP4/vvvOXnyJNHR0UybNo3Vq1df9bFbtmzJ+vXr2bZtGwcPHuSxxx4jJSWl3G1uvvlmFi9ezJYtW9i3bx+jR4++5iZofn5+uLq6snbtWlJSUsjMzKzQ83zppZfYtm0bEydOJD4+nqNHj/Ljjz+a36QbN26Mk5MTn3zyCSdOnOCnn37i7bffrtBjW8NTTz3FmjVrmDlzJkePHuXzzz/n119/rdAIw6xZs/jhhx84dOgQEyZM4OLFi+b5ME8++SSJiYk89dRTHDp0iB9//JGpU6cyefLk684XAejbty833ngj9913H+vXr+fkyZP8+uuvrF271nybevXqce+99/LCCy9w6623VqjIqSlSjPxLfGIG2QUl+Lg50j55FZiKoVEkBHXWOpoQoo75v//7P/r06cOgQYMYMGAAN9xwA+Hh4eVuM3/+fEaNGsVzzz1H69atGTx4MDExMebRlCt59dVX6dq1K1FRUfTr14+AgAAGDx5c7jZTpkyhb9++3HnnnQwcOJDBgwfTvHnzqz6mg4MDH3/8MZ9//jlBQUHcfffdFXqOnTp1YtOmTRw5coQ+ffrQpUsXXn/9dfOE3IYNG7JgwQK+/fZb2rVrx/Tp0/nggw8q9NjW0Lt3b+bMmcPMmTMJCwtj7dq1PPvss7i4uFz3vtOnT2f69OmEhYWxdetWfvrpJ3x91aaawcHBrFmzhujoaMLCwnj88ccZO3Ysr776aoWzfffdd3Tr1o2hQ4fSrl07XnzxxctGqsaOHUtRUVG5ScG2QKdUZp2ZRrKysvD29iYzMxMvLy+rHmvGb4f55I9j3NWxIR8nj4bsc3DvXOj0oFWPK4SwnoKCAk6ePEnTpk0r9KYhRGWMHz+eQ4cOsWXLlit+3Za60y5evJhnn32Wc+fOmU+tVde1fr8q+v5t2ydNNVA2efUhz31w9By4N4R2FavwhRBC1H4ffPABt9xyC+7u7vz6668sXLiQzz77TOtY15SXl0dSUhLTp0/nscces1ghYilymuYS6blF7D2rnueMSC3tVNd1NDg4a5hKCCGELYmOjuaWW26hY8eOzJkzh48//phx48ZpHeua3n//fdq0aUNAQABTpkzROs5lZGTkEluOnkdRIKphOk5ntoHOABFjtI4lhBDChqxYsaJStw8NDa1U511reOONN67ayt8WyMjIJcqW9D7m+od6RZs7wNt2ZhsLIYQQtZEUI6VMJoXNR9LwJI+w9NKlULKcVwghhLA6KUZKHUzOIi2nkIectmIoyQPf1tDUNtrkCiGEELWZFCOl1FM0CmOcS0/RRI4HG2mTK4QQQtRmUoyU2nzkPL30fxNUnABOHtBpiNaRhBBCiDpBihEgp7CE2FMXGW34Tb0ibCi4WLe5mhBCCCFUUowA246l4Wc6zwDDLvWKbra9XlwIIazh4YcfvqwtvD3o168fzzzzjFWPsXHjRnQ6HRkZGVY9jiUsWLAAHx8f8+dvvPGG5p1fr0eKEWDz0fMMc9iAAROE9gG/NlpHEkIIoZErFTe9evUiKSkJb29vbUJVw/PPP8+GDRu0jnFNdb7pmaIobDt8jhWGP9UrIh/VNpAQQlxFUVGRzbXxriucnJwICAjQOkaVeHh44OHhoXWMa6rzIyOnLuTRKXMjvrosTJ5B0PoOrSMJIQSg/oU+ceJEnnnmGXx9fYmKigJg5syZdOzYEXd3d0JCQnjyySfJyckx369smH7dunW0bdsWDw8PbrvtNpKSksy3MRqNTJ48GR8fHxo0aMCLL754WZfQwsJCnn76afz8/HBxceGGG24gJibG/PWyUxfr1q2jS5cuuLq6cvPNN5Oamsqvv/5K27Zt8fLyYtiwYeTl5V31eZ4+fZpBgwZRr1493N3dad++PWvWrDF/ff/+/dx+++14eHjg7+/PyJEjSUtLu+rjFRYW8vzzzxMcHIy7uzvdu3dn48aN5W7z119/0a9fP9zc3KhXrx5RUVFcvHiRhx9+mE2bNvHRRx+h0+nQ6XScOnXqiqdpvvvuO9q3b4+zszOhoaHMmDGj3DFCQ0N59913eeSRR/D09KRx48Z88cUXV80N//zMJ06ciLe3N76+vrz22mvlfjYXL15k1KhR1KtXDzc3N26//XaOHj161ce80mmar776ypw9MDCQiRMnAvDII49w5513lrttcXExfn5+zJs375rZq6POFyObDqcy2kGduKrv9ggY6vxgkRC1n6JAUa42H5VsC75w4UKcnJz466+/mDNnDgB6vZ6PP/6Yv//+m4ULF/LHH3/w4osvlrtfXl4eH3zwAYsXL2bz5s0kJCTw/PPPm78+Y8YMFixYwFdffcXWrVtJT0/nhx9+KPcYL774It999x0LFy5k165dtGjRgqioKNLT08vd7o033uDTTz9l27ZtJCYm8uCDD/Lhhx+ydOlSVq9ezW+//cYnn3xy1ec4YcIECgsL2bx5M/v27eO9994z/yWfkZHBzTffTJcuXYiNjWXt2rWkpKTw4INX30l94sSJbN++nWXLlrF3714eeOABbrvtNvMbdnx8PP3796ddu3Zs376drVu3MmjQIIxGIx999BE9e/Zk/PjxJCUlkZSUREhIyGXHiIuL48EHH+Shhx5i3759vPHGG7z22mssWLCg3O1mzJhBREQEu3fv5sknn+SJJ57g8OHDV80O6s/cwcGB6OhoPvroI2bOnMmXX35p/vrDDz9MbGwsP/30E9u3b0dRFO644w6Ki4uv+bhlZs+ezYQJE3j00UfZt28fP/30Ey1atABg3LhxrF27tlzh+ssvv5CXl8eQIVZcZapUwaeffqo0adJEcXZ2ViIjI5WdO3de8/YrVqxQWrdurTg7OysdOnRQVq9eXanjZWZmKoCSmZlZlbjX9MacrxVlqpdS8kZ9RclOsfjjCyG0l5+frxw4cEDJz89XryjMUZSpXtp8FOZUOHffvn2VLl26XPd23377rdKgQQPz5/Pnz1cA5dixY+brZs2apfj7+5s/DwwMVN5//33z58XFxUqjRo2Uu+++W1EURcnJyVEcHR2VJUuWmG9TVFSkBAUFme/3559/KoDy+++/m28zbdo0BVCOHz9uvu6xxx5ToqKirpq/Y8eOyhtvvHHFr7399tvKrbfeWu66xMREBVAOHz6sKIr6fZo0aZKiKIpy+vRpxWAwKGfPni13n/79+ytTpkxRFEVRhg4dqvTu3fuqeS59vDJlz/XixYuKoijKsGHDlFtuuaXcbV544QWlXbt25s+bNGmijBgxwvy5yWRS/Pz8lNmzZ1/z2G3btlVMJpP5updeeklp27atoiiKcuTIEQVQ/vrrL/PX09LSFFdXV2XFihWKoqg/f29vb/PXp06dqoSFhZk/DwoKUv7zn/9cNUO7du2U9957z/z5oEGDlIcffviqt7/s9+sSFX3/rvTIyPLly5k8eTJTp05l165dhIWFERUVRWpq6hVvv23bNoYOHcrYsWPZvXs3gwcPZvDgwezfv7/KBZSlFBQb6XBW3fAop/md4OGncSIhhCgvPDz8sut+//13+vfvT3BwMJ6enowcOZILFy6UOxXi5uZG8+bNzZ8HBgaaX6czMzNJSkqie/fu5q87ODgQERFh/vz48eMUFxfTu3dv83WOjo5ERkZy8ODBcnk6depkvuzv74+bmxvNmjUrd93V3iMAnn76af773//Su3dvpk6dyt69e81f27NnD3/++ad53oOHhwdt2rQxZ/y3ffv2YTQaadWqVbn7bNq0yXz7spGR6jh48GC57w1A7969OXr0KEaj0Xzdpd8bnU5HQEDANb8XAD169EB3SdPNnj17mh/34MGDODg4lPvZNWjQgNatW1/2c7mS1NRUzp07d83nP27cOObPnw9ASkoKv/76K4888sh1H7s6Kn1OYubMmYwfP54xY9TdbOfMmcPq1av56quvePnlly+7/UcffcRtt93GCy+8AMDbb7/N+vXr+fTTT81DjlqJP3yCgbq/APC68UlNswghapCjG7xyTrtjV4K7u3u5z0+dOsWdd97JE088wTvvvEP9+vXZunUrY8eOpaioCDc39fEdHR3L3U+n01lt59hLj6XT6a54bJPJdNX7jxs3jqioKPMpnWnTpjFjxgyeeuopcnJyGDRoEO+9995l9wsMDLzsupycHAwGA3FxcRgMhnJfKzv14+rqWqnnVx2V/V5YW0We+6hRo3j55ZfZvn0727Zto2nTpvTp08equSo1MlJUVERcXBwDBgz45wH0egYMGMD27duveJ/t27eXuz1AVFTUVW8P6uSjrKysch/WkLtzAS66Ys64tEQXEmmVYwghbJBOB07u2nxUc5uJuLg4TCYTM2bMoEePHrRq1Ypz5ypXWHl7exMYGMjOnTvN15WUlBAXF2f+vHnz5ua5KmWKi4uJiYmhXbt21XoOVxISEsLjjz/O999/z3PPPcfcuXMB6Nq1K3///TehoaG0aNGi3Me/CzWALl26YDQaSU1Nvez2ZathOnXqdM2lrk5OTuVGN66kbdu25b43oE6KbdWq1WVFUGVd+nMB2LFjBy1btsRgMNC2bVtKSkrK3ebChQscPny4Qj8XT09PQkNDr/n8GzRowODBg5k/fz4LFiwwDz5YU6WKkbS0NIxGI/7+/uWu9/f3Jzk5+Yr3SU5OrtTtAaZNm4a3t7f540qTh6rNZKTdmW8BSGs7SvahEULYhRYtWlBcXMwnn3zCiRMnWLx4cZVGmSdNmsT06dNZtWoVhw4d4sknnyy3UsTd3Z0nnniCF154gbVr13LgwAHGjx9PXl4eY8eOteAzgmeeeYZ169Zx8uRJdu3axZ9//knbtm0BdXJreno6Q4cOJSYmhuPHj7Nu3TrGjBlzxYKhVatWDB8+nFGjRvH9999z8uRJoqOjmTZtGqtXrwZgypQpxMTE8OSTT7J3714OHTrE7NmzzSt0QkND2blzJ6dOnSItLe2KIxnPPfccGzZs4O233+bIkSMsXLiQTz/9tNwk4apKSEhg8uTJHD58mG+++YZPPvmESZMmAdCyZUvuvvtuxo8fz9atW9mzZw8jRowgODiYu+++u0KP/8YbbzBjxgw+/vhjjh49yq5duy6bYDxu3DgWLlzIwYMHGT16dLWf0/XY5GqaKVOmkJmZaf5ITEy0+DEUIKHHW8R73USTvqMs/vhCCGENYWFhzJw5k/fee48OHTqwZMkSpk2bVunHee655xg5ciSjR4+mZ8+eeHp6cs8995S7zfTp07nvvvsYOXIkXbt25dixY6xbt4569epZ6ukA6jLjCRMm0LZtW2677TZatWrFZ599BkBQUBB//fUXRqORW2+9lY4dO/LMM8/g4+ODXn/lt7D58+czatQonnvuOVq3bs3gwYOJiYmhcePGgFqw/Pbbb+zZs4fIyEh69uzJjz/+iIODOnPh+eefx2Aw0K5dOxo2bEhCQsJlx+jatSsrVqxg2bJldOjQgddff5233nqLhx9+uNrfj1GjRpGfn09kZCQTJkxg0qRJPProPz2w5s+fT3h4OHfeeSc9e/ZEURTWrFlz2Smhqxk9ejQffvghn332Ge3bt+fOO++8bGnwgAEDCAwMJCoqiqCgoGo/p+vRKZU4iVh2PnLlypXlWgaPHj2ajIwMfvzxx8vu07hxYyZPnlyum93UqVNZtWoVe/bsqdBxs7Ky8Pb2JjMzEy8v2TNGCFE5BQUFnDx5kqZNm+Li4qJ1HCGuql+/fnTu3JkPP/xQ0xw5OTkEBwczf/587r333mve9lq/XxV9/67UyIiTkxPh4eHlzjWZTCY2bNhAz549r3ifnj17XnZuav369Ve9vRBCCCG0YTKZSE1N5e2338bHx4e77rqrRo5b6dU0kydPZvTo0URERBAZGcmHH35Ibm6ueYLLqFGjCA4ONg8bTpo0ib59+zJjxgwGDhzIsmXLiI2NvW4XOiGEEELUrISEBJo2bUqjRo1YsGCB+dSVtVX6KEOGDOH8+fO8/vrrJCcn07lzZ9auXWuepJqQkFDuPF6vXr1YunQpr776Kq+88gotW7Zk1apVdOjQwXLPQgghhKgF/t22vqaFhoZabQn4tVRqzohWZM6IEKI6ZM6IENZT43NGhBBCCCEsTYoRIUSdoWXnSyFqK0v8XskWtUKIWs/JyQm9Xs+5c+do2LAhTk5O5fb+EEJUnqIoFBUVcf78efR6PU5OTlV+LClGhBC1nl6vp2nTpiQlJVW6dboQ4trc3Nxo3LjxVZvQVYQUI0KIOsHJyYnGjRtTUlJy3X1HhBAVYzAYcHBwqPZIoxQjQog6o2xH2Yq2zRZC1AyZwCqEEEIITUkxIoQQQghNSTEihBBCCE3ZxZyRsiaxWVlZGicRQgghREWVvW9fr9m7XRQj2dnZAISEhGicRAghhBCVlZ2djbe391W/bhd705hMJs6dO4enp6dFGxVlZWUREhJCYmJird3zprY/R3l+9q+2P0d5fvavtj9Haz4/RVHIzs4mKCjomn1I7GJkRK/X06hRI6s9vpeXV638D3ap2v4c5fnZv9r+HOX52b/a/hyt9fyuNSJSRiawCiGEEEJTUowIIYQQQlN1uhhxdnZm6tSpODs7ax3Famr7c5TnZ/9q+3OU52f/avtztIXnZxcTWIUQQghRe9XpkREhhBBCaE+KESGEEEJoSooRIYQQQmhKihEhhBBCaKpOFyOzZs0iNDQUFxcXunfvTnR0tNaRLGbz5s0MGjSIoKAgdDodq1at0jqSRU2bNo1u3brh6emJn58fgwcP5vDhw1rHspjZs2fTqVMncxOinj178uuvv2ody2qmT5+OTqfjmWee0TqKxbzxxhvodLpyH23atNE6lkWdPXuWESNG0KBBA1xdXenYsSOxsbFax7KY0NDQy36GOp2OCRMmaB3NIoxGI6+99hpNmzbF1dWV5s2b8/bbb193HxlrqLPFyPLly5k8eTJTp05l165dhIWFERUVRWpqqtbRLCI3N5ewsDBmzZqldRSr2LRpExMmTGDHjh2sX7+e4uJibr31VnJzc7WOZhGNGjVi+vTpxMXFERsby80338zdd9/N33//rXU0i4uJieHzzz+nU6dOWkexuPbt25OUlGT+2Lp1q9aRLObixYv07t0bR0dHfv31Vw4cOMCMGTOoV6+e1tEsJiYmptzPb/369QA88MADGiezjPfee4/Zs2fz6aefcvDgQd577z3ef/99Pvnkk5oPo9RRkZGRyoQJE8yfG41GJSgoSJk2bZqGqawDUH744QetY1hVamqqAiibNm3SOorV1KtXT/nyyy+1jmFR2dnZSsuWLZX169crffv2VSZNmqR1JIuZOnWqEhYWpnUMq3nppZeUG264QesYNWrSpElK8+bNFZPJpHUUixg4cKDyyCOPlLvu3nvvVYYPH17jWerkyEhRURFxcXEMGDDAfJ1er2fAgAFs375dw2SiqjIzMwGoX7++xkksz2g0smzZMnJzc+nZs6fWcSxqwoQJDBw4sNzvYm1y9OhRgoKCaNasGcOHDychIUHrSBbz008/ERERwQMPPICfnx9dunRh7ty5WseymqKiIr7++mseeeQRi27YqqVevXqxYcMGjhw5AsCePXvYunUrt99+e41nsYuN8iwtLS0No9GIv79/uev9/f05dOiQRqlEVZlMJp555hl69+5Nhw4dtI5jMfv27aNnz54UFBTg4eHBDz/8QLt27bSOZTHLli1j165dxMTEaB3FKrp3786CBQto3bo1SUlJvPnmm/Tp04f9+/fj6empdbxqO3HiBLNnz2by5Mm88sorxMTE8PTTT+Pk5MTo0aO1jmdxq1atIiMjg4cffljrKBbz8ssvk5WVRZs2bTAYDBiNRt555x2GDx9e41nqZDEiapcJEyawf//+WnU+HqB169bEx8eTmZnJypUrGT16NJs2baoVBUliYiKTJk1i/fr1uLi4aB3HKi7967JTp050796dJk2asGLFCsaOHathMsswmUxERETw7rvvAtClSxf279/PnDlzamUxMm/ePG6//XaCgoK0jmIxK1asYMmSJSxdupT27dsTHx/PM888Q1BQUI3/DOtkMeLr64vBYCAlJaXc9SkpKQQEBGiUSlTFxIkT+eWXX9i8eTONGjXSOo5FOTk50aJFCwDCw8OJiYnho48+4vPPP9c4WfXFxcWRmppK165dzdcZjUY2b97Mp59+SmFhIQaDQcOElufj40OrVq04duyY1lEsIjAw8LLCuG3btnz33XcaJbKe06dP8/vvv/P9999rHcWiXnjhBV5++WUeeughADp27Mjp06eZNm1ajRcjdXLOiJOTE+Hh4WzYsMF8nclkYsOGDbXunHxtpSgKEydO5IcffuCPP/6gadOmWkeyOpPJRGFhodYxLKJ///7s27eP+Ph480dERATDhw8nPj6+1hUiADk5ORw/fpzAwECto1hE7969L1tOf+TIEZo0aaJRIuuZP38+fn5+DBw4UOsoFpWXl4deX74MMBgMmEymGs9SJ0dGACZPnszo0aOJiIggMjKSDz/8kNzcXMaMGaN1NIvIyckp9xfYyZMniY+Pp379+jRu3FjDZJYxYcIEli5dyo8//oinpyfJyckAeHt74+rqqnG66psyZQq33347jRs3Jjs7m6VLl7Jx40bWrVundTSL8PT0vGx+j7u7Ow0aNKg1836ef/55Bg0aRJMmTTh37hxTp07FYDAwdOhQraNZxLPPPkuvXr149913efDBB4mOjuaLL77giy++0DqaRZlMJubPn8/o0aNxcKhdb5mDBg3inXfeoXHjxrRv357du3czc+ZMHnnkkZoPU+Prd2zIJ598ojRu3FhxcnJSIiMjlR07dmgdyWL+/PNPBbjsY/To0VpHs4grPTdAmT9/vtbRLOKRRx5RmjRpojg5OSkNGzZU+vfvr/z2229ax7Kq2ra0d8iQIUpgYKDi5OSkBAcHK0OGDFGOHTumdSyL+vnnn5UOHToozs7OSps2bZQvvvhC60gWt27dOgVQDh8+rHUUi8vKylImTZqkNG7cWHFxcVGaNWum/Oc//1EKCwtrPItOUTRotSaEEEIIUapOzhkRQgghhO2QYkQIIYQQmpJiRAghhBCakmJECCGEEJqSYkQIIYQQmpJiRAghhBCakmJECCGEEJqSYkQIIYQQmpJiRAghhBCakmJECCGEEJqSYkQIIYQQmpJiRAghhBCa+n+5K6c9p71+eQAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from matplotlib import pyplot as plt\n", + "chain.metrics.to_pandas()['score'].plot(label=\"default learning policy\")\n", + "random_chain.metrics.to_pandas()['score'].plot(label=\"random selection policy\")\n", + "plt.legend()\n", + "\n", + "print(f\"The final average score for the default policy, calculated over a rolling window, is: {chain.metrics.to_pandas()['score'].iloc[-1]}\")\n", + "print(f\"The final average score for the random policy, calculated over a rolling window, is: {random_chain.metrics.to_pandas()['score'].iloc[-1]}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "There is a bit of randomness involved in the rl_chain's selection since the chain explores the selection space in order to learn the world as best as it can (see details of default exploration algorithm used [here](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Contextual-Bandit-Exploration-with-SquareCB)), but overall, default chain policy should be doing better than random as it learns" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Advanced options\n", + "\n", + "The RL chain is highly configurable in order to be able to adjust to various selection scenarios. If you want to learn more about the ML library that powers it please take a look at tutorials [here](https://vowpalwabbit.org/)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "| Section | Description | Example / Usage |\n", + "|---------|-------------|-----------------|\n", + "| [**Change Chain Logging Level**](#change-chain-logging-level) | Change the logging level for the RL chain. | `logger.setLevel(logging.INFO)` |\n", + "| [**Featurization**](#featurization) | Adjusts the input to the RL chain. Can set auto-embeddings ON for more complex embeddings. | `chain = rl_chain.PickBest.from_llm(auto_embed=True, [...])` |\n", + "| [**Learned Policy to Learn Asynchronously**](#learned-policy-to-learn-asynchronously) | Score asynchronously if user input is needed for scoring. | `chain.update_with_delayed_score(score=, chain_response=response)` |\n", + "| [**Store Progress of Learned Policy**](#store-progress-of-learned-policy) | Option to store the progress of the variable injection learned policy. | `chain.save_progress()` |\n", + "| [**Stop Learning of Learned Policy**](#stop-learning-of-learned-policy) | Toggle the RL chain's learned policy updates ON/OFF. | `chain.deactivate_selection_scorer()` |\n", + "| [**Set a Different Policy**](#set-a-different-policy) | Choose between different policies: default, random, or custom. | Custom policy creation at chain creation time. |\n", + "| [**Different Exploration Algorithms and Options for Default Learned Policy**](#different-exploration-algorithms-and-options-for-the-default-learned-policy) | Set different exploration algorithms and hyperparameters for `VwPolicy`. | `vw_cmd = [\"--cb_explore_adf\", \"--quiet\", \"--squarecb\", \"--interactions=::\"]` |\n", + "| [**Learn Policy's Data Logs**](#learned-policys-data-logs) | Store and examine `VwPolicy`'s data logs. | `chain = rl_chain.PickBest.from_llm(vw_logs=, [...])` |\n", + "| [**Other Advanced Featurization Options**](#other-advanced-featurization-options) | Specify advanced featurization options for the RL chain. | `age = rl_chain.BasedOn(\"age:32\")` |\n", + "| [**More Info on Auto or Custom SelectionScorer**](#more-info-on-auto-or-custom-selectionscorer) | Dive deeper into how selection scoring is determined. | `selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, scoring_criteria_template_str=scoring_criteria_template)` |\n", + "\n", + "### change chain logging level\n", + "\n", + "```\n", + "import logging\n", + "logger = logging.getLogger(\"rl_chain\")\n", + "logger.setLevel(logging.INFO)\n", + "```\n", + "\n", + "### featurization\n", + "\n", + "#### auto_embed\n", + "\n", + "By default the input to the rl chain (`ToSelectFrom`, `BasedOn`) is not tampered with. This might not be sufficient featurization, so based on how complex the scenario is you can set auto-embeddings to ON\n", + "\n", + "`chain = rl_chain.PickBest.from_llm(auto_embed=True, [...])`\n", + "\n", + "This will produce more complex embeddings and featurizations of the inputs, likely accelerating RL chain learning, albeit at the cost of increased runtime.\n", + "\n", + "By default, [sbert.net's sentence_transformers's ](https://www.sbert.net/docs/pretrained_models.html#model-overview) `all-mpnet-base-v2` model will be used for these embeddings but you can set a different embeddings model by initializing the chain with it as shown in this example. You could also set an entirely different embeddings encoding object, as long as it has an `encode()` function that returns a list of the encodings.\n", + "\n", + "```\n", + "from sentence_transformers import SentenceTransformer\n", + "\n", + "chain = rl_chain.PickBest.from_llm(\n", + " [...]\n", + " feature_embedder=rl_chain.PickBestFeatureEmbedder(\n", + " auto_embed=True,\n", + " model=SentenceTransformer(\"all-mpnet-base-v2\")\n", + " )\n", + ")\n", + "```\n", + "\n", + "#### explicitly defined embeddings\n", + "\n", + "Another option is to define what inputs you think should be embedded manually:\n", + "- `auto_embed = False`\n", + "- Can wrap individual variables in `rl_chain.Embed()` or `rl_chain.EmbedAndKeep()` e.g. `user = rl_chain.BasedOn(rl_chain.Embed(\"Tom\"))`\n", + "\n", + "#### custom featurization\n", + "\n", + "Another final option is to define and set a custom featurization/embedder class that returns a valid input for the learned policy.\n", + "\n", + "## learned policy to learn asynchronously\n", + "\n", + "If to score the result you need input from the user (e.g. my application showed Tom the selected meal and Tom clicked on it, but Anna did not), then the scoring can be done asynchronously. The way to do that is:\n", + "\n", + "- set `selection_scorer=None` on the chain creation OR call `chain.deactivate_selection_scorer()`\n", + "- call the chain for a specific input\n", + "- keep the chain's response (`response = chain.run([...])`)\n", + "- once you have determined the score of the response/chain selection call the chain with it: `chain.update_with_delayed_score(score=, chain_response=response)`\n", + "\n", + "### store progress of learned policy\n", + "\n", + "Since the variable injection learned policy evolves over time, there is the option to store its progress and continue learning. This can be done by calling:\n", + "\n", + "`chain.save_progress()`\n", + "\n", + "which will store the rl chain's learned policy in a file called `latest.vw`. It will also store it in a file with a timestamp. That way, if `save_progress()` is called more than once, multiple checkpoints will be created, but the latest one will always be in `latest.vw`\n", + "\n", + "Next time the chain is loaded, the chain will look for a file called `latest.vw` and if the file exists it will be loaded into the chain and the learning will continue from there.\n", + "\n", + "By default the rl chain model checkpoints will be stored in the current directory but you can specify the save/load location at chain creation time:\n", + "\n", + "`chain = rl_chain.PickBest.from_llm(model_save_dir=, [...])`\n", + "\n", + "### stop learning of learned policy\n", + "\n", + "If you want the rl chain's learned policy to stop updating you can turn it off/on:\n", + "\n", + "`chain.deactivate_selection_scorer()` and `chain.activate_selection_scorer()`\n", + "\n", + "### set a different policy\n", + "\n", + "There are two policies currently available:\n", + "\n", + "- default policy: `VwPolicy` which learns a [Vowpal Wabbit](https://github.com/VowpalWabbit/vowpal_wabbit) [Contextual Bandit](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Contextual-Bandit-algorithms) model\n", + "\n", + "- random policy: `RandomPolicy` which doesn't learn anything and just selects a value randomly. this policy can be used to compare other policies with a random baseline one.\n", + "\n", + "- custom policies: a custom policy could be created and set at chain creation time\n", + "\n", + "### different exploration algorithms and options for the default learned policy\n", + "\n", + "The default `VwPolicy` is initialized with some default arguments. The default exploration algorithm is [SquareCB](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Contextual-Bandit-Exploration-with-SquareCB) but other Contextual Bandit exploration algorithms can be set, and other hyper parameters can be tuned (see [here](https://vowpalwabbit.org/docs/vowpal_wabbit/python/9.6.0/command_line_args.html) for available options).\n", + "\n", + "`vw_cmd = [\"--cb_explore_adf\", \"--quiet\", \"--squarecb\", \"--interactions=::\"]`\n", + "\n", + "`chain = rl_chain.PickBest.from_llm(vw_cmd = vw_cmd, [...])`\n", + "\n", + "### learned policy's data logs\n", + "\n", + "The `VwPolicy`'s data files can be stored and examined or used to do [off policy evaluation](https://vowpalwabbit.org/docs/vowpal_wabbit/python/latest/tutorials/off_policy_evaluation.html) for hyper parameter tuning.\n", + "\n", + "The way to do this is to set a log file path to `vw_logs` on chain creation:\n", + "\n", + "`chain = rl_chain.PickBest.from_llm(vw_logs=, [...])`\n", + "\n", + "### other advanced featurization options\n", + "\n", + "Explictly numerical features can be provided with a colon separator:\n", + "`age = rl_chain.BasedOn(\"age:32\")`\n", + "\n", + "`ToSelectFrom` can be a bit more complex if the scenario demands it, instead of being a list of strings it can be:\n", + "- a list of list of strings:\n", + " ```\n", + " meal = rl_chain.ToSelectFrom([\n", + " [\"meal 1 name\", \"meal 1 description\"],\n", + " [\"meal 2 name\", \"meal 2 description\"]\n", + " ])\n", + " ```\n", + "- a list of dictionaries:\n", + " ```\n", + " meal = rl_chain.ToSelectFrom([\n", + " {\"name\":\"meal 1 name\", \"description\" : \"meal 1 description\"},\n", + " {\"name\":\"meal 2 name\", \"description\" : \"meal 2 description\"}\n", + " ])\n", + " ```\n", + "- a list of dictionaries containing lists:\n", + " ```\n", + " meal = rl_chain.ToSelectFrom([\n", + " {\"name\":[\"meal 1\", \"complex name\"], \"description\" : \"meal 1 description\"},\n", + " {\"name\":[\"meal 2\", \"complex name\"], \"description\" : \"meal 2 description\"}\n", + " ])\n", + " ```\n", + "\n", + "`BasedOn` can also take a list of strings:\n", + "```\n", + "user = rl_chain.BasedOn([\"Tom Joe\", \"age:32\", \"state of california\"])\n", + "```\n", + "\n", + "there is no dictionary provided since multiple variables can be supplied wrapped in `BasedOn`\n", + "\n", + "Storing the data logs into a file allows the examination of what different inputs do to the data format.\n", + "\n", + "### More info on Auto or Custom SelectionScorer\n", + "\n", + "It is very important to get the selection scorer right since the policy uses it to learn. It determines what is called the reward in reinforcement learning, and more specifically in our Contextual Bandits setting.\n", + "\n", + "The general advice is to keep the score between [0, 1], 0 being the worst selection, 1 being the best selection from the available `ToSelectFrom` variables, based on the `BasedOn` variables, but should be adjusted if the need arises.\n", + "\n", + "In the examples provided above, the AutoSelectionScorer is set mostly to get users started but in real world scenarios it will most likely not be an adequate scorer function.\n", + "\n", + "The example also provided the option to change part of the scoring prompt template that the AutoSelectionScorer used to determine whether a selection was good or not:\n", + "\n", + "```\n", + "scoring_criteria_template = \"Given {preference} rank how good or bad this selection is {meal}\"\n", + "chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=PROMPT,\n", + " selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, scoring_criteria_template_str=scoring_criteria_template),\n", + ")\n", + "\n", + "```\n", + "\n", + "Internally the AutoSelectionScorer adjusted the scoring prompt to make sure that the llm scoring retured a single float.\n", + "\n", + "However, if needed, a FULL scoring prompt can also be provided:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:PickBest] Entering Chain run with input:\n", + "\u001b[0m[inputs]\n", + "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:PickBest > 2:chain:LLMChain] Entering Chain run with input:\n", + "\u001b[0m[inputs]\n", + "\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[1:chain:PickBest > 2:chain:LLMChain > 3:llm:OpenAI] Entering LLM run with input:\n", + "\u001b[0m{\n", + " \"prompts\": [\n", + " \"Here is the description of a meal: \\\"Chicken Flatbreads with red sauce. Italian-Mexican fusion\\\".\\n\\nEmbed the meal into the given text: \\\"This is the weeks specialty dish, our master chefs believe you will love it!\\\".\\n\\nPrepend a personalized message including the user's name \\\"Tom\\\" \\n and their preference \\\"['Vegetarian', 'regular dairy is ok']\\\".\\n\\nMake it sound good.\"\n", + " ]\n", + "}\n", + "\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[1:chain:PickBest > 2:chain:LLMChain > 3:llm:OpenAI] [1.12s] Exiting LLM run with output:\n", + "\u001b[0m{\n", + " \"generations\": [\n", + " [\n", + " {\n", + " \"text\": \"\\nHey Tom, we have something special for you this week! Our master chefs have created a delicious Italian-Mexican fusion Chicken Flatbreads with red sauce just for you. Our chefs have also taken into account your preference of vegetarian options with regular dairy - this one is sure to be a hit!\",\n", + " \"generation_info\": {\n", + " \"finish_reason\": \"stop\",\n", + " \"logprobs\": null\n", + " }\n", + " }\n", + " ]\n", + " ],\n", + " \"llm_output\": {\n", + " \"token_usage\": {\n", + " \"total_tokens\": 154,\n", + " \"completion_tokens\": 61,\n", + " \"prompt_tokens\": 93\n", + " },\n", + " \"model_name\": \"text-davinci-003\"\n", + " },\n", + " \"run\": null\n", + "}\n", + "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:PickBest > 2:chain:LLMChain] [1.12s] Exiting Chain run with output:\n", + "\u001b[0m{\n", + " \"text\": \"\\nHey Tom, we have something special for you this week! Our master chefs have created a delicious Italian-Mexican fusion Chicken Flatbreads with red sauce just for you. Our chefs have also taken into account your preference of vegetarian options with regular dairy - this one is sure to be a hit!\"\n", + "}\n", + "\u001b[32;1m\u001b[1;3m[chain/start]\u001b[0m \u001b[1m[1:chain:LLMChain] Entering Chain run with input:\n", + "\u001b[0m[inputs]\n", + "\u001b[32;1m\u001b[1;3m[llm/start]\u001b[0m \u001b[1m[1:chain:LLMChain > 2:llm:OpenAI] Entering LLM run with input:\n", + "\u001b[0m{\n", + " \"prompts\": [\n", + " \"Given ['Vegetarian', 'regular dairy is ok'] rank how good or bad this selection is ['Beef Enchiladas with Feta cheese. Mexican-Greek fusion', 'Chicken Flatbreads with red sauce. Italian-Mexican fusion', 'Veggie sweet potato quesadillas with vegan cheese', 'One-Pan Tortelonni bake with peppers and onions']\\n\\nIMPORANT: you MUST return a single number between -1 and 1, -1 being bad, 1 being good\"\n", + " ]\n", + "}\n", + "\u001b[36;1m\u001b[1;3m[llm/end]\u001b[0m \u001b[1m[1:chain:LLMChain > 2:llm:OpenAI] [274ms] Exiting LLM run with output:\n", + "\u001b[0m{\n", + " \"generations\": [\n", + " [\n", + " {\n", + " \"text\": \"\\n0.625\",\n", + " \"generation_info\": {\n", + " \"finish_reason\": \"stop\",\n", + " \"logprobs\": null\n", + " }\n", + " }\n", + " ]\n", + " ],\n", + " \"llm_output\": {\n", + " \"token_usage\": {\n", + " \"total_tokens\": 112,\n", + " \"completion_tokens\": 4,\n", + " \"prompt_tokens\": 108\n", + " },\n", + " \"model_name\": \"text-davinci-003\"\n", + " },\n", + " \"run\": null\n", + "}\n", + "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:LLMChain] [275ms] Exiting Chain run with output:\n", + "\u001b[0m{\n", + " \"text\": \"\\n0.625\"\n", + "}\n", + "\u001b[36;1m\u001b[1;3m[chain/end]\u001b[0m \u001b[1m[1:chain:PickBest] [1.40s] Exiting Chain run with output:\n", + "\u001b[0m[outputs]\n" + ] + }, + { + "data": { + "text/plain": [ + "{'response': 'Hey Tom, we have something special for you this week! Our master chefs have created a delicious Italian-Mexican fusion Chicken Flatbreads with red sauce just for you. Our chefs have also taken into account your preference of vegetarian options with regular dairy - this one is sure to be a hit!',\n", + " 'selection_metadata': }" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from langchain.prompts.prompt import PromptTemplate\n", + "import langchain\n", + "langchain.debug = True\n", + "\n", + "REWARD_PROMPT_TEMPLATE = \"\"\"\n", + "\n", + "Given {preference} rank how good or bad this selection is {meal}\n", + "\n", + "IMPORANT: you MUST return a single number between -1 and 1, -1 being bad, 1 being good\n", + "\n", + "\"\"\"\n", + "\n", + "\n", + "REWARD_PROMPT = PromptTemplate(\n", + " input_variables=[\"preference\", \"meal\"],\n", + " template=REWARD_PROMPT_TEMPLATE,\n", + ")\n", + "\n", + "chain = rl_chain.PickBest.from_llm(\n", + " llm=llm,\n", + " prompt=PROMPT,\n", + " selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, prompt=REWARD_PROMPT),\n", + ")\n", + "\n", + "chain.run(\n", + " meal = rl_chain.ToSelectFrom(meals),\n", + " user = rl_chain.BasedOn(\"Tom\"),\n", + " preference = rl_chain.BasedOn([\"Vegetarian\", \"regular dairy is ok\"]),\n", + " text_to_personalize = \"This is the weeks specialty dish, our master chefs believe you will love it!\",\n", + ")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "poetry-venv", + "language": "python", + "name": "poetry-venv" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.1" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/libs/experimental/langchain_experimental/rl_chain/model_repository.py b/libs/experimental/langchain_experimental/rl_chain/model_repository.py index 87f162df0a..efe96cc0bf 100644 --- a/libs/experimental/langchain_experimental/rl_chain/model_repository.py +++ b/libs/experimental/langchain_experimental/rl_chain/model_repository.py @@ -45,7 +45,13 @@ class ModelRepository: shutil.copyfile(self.model_path, self.folder / f"model-{self.get_tag()}.vw") def load(self, commandline: List[str]) -> "vw.Workspace": - import vowpal_wabbit_next as vw + try: + import vowpal_wabbit_next as vw + except ImportError as e: + raise ImportError( + "Unable to import vowpal_wabbit_next, please install with " + "`pip install vowpal_wabbit_next`." + ) from e model_data = None if self.model_path.exists():