diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 00000000..c712d259 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,2 @@ +[run] +omit = tests/* diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 9d62e252..12ec7320 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -6,30 +6,31 @@ on: pull_request: env: - POETRY_VERSION: "1.2.0" + POETRY_VERSION: "1.3.1" jobs: build: runs-on: ubuntu-latest strategy: matrix: - python-version: - - "3.8" - - "3.9" - - "3.10" + python-version: + - "3.8" + - "3.9" + - "3.10" + - "3.11" steps: - - uses: actions/checkout@v3 - - name: Install poetry - run: | - pipx install poetry==$POETRY_VERSION - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - cache: poetry - - name: Install dependencies - run: | - poetry install - - name: Analysing the code with our lint - run: | - make lint + - uses: actions/checkout@v3 + - name: Install poetry + run: | + pipx install poetry==$POETRY_VERSION + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + cache: poetry + - name: Install dependencies + run: | + poetry install + - name: Analysing the code with our lint + run: | + make lint diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e3cb4943..f33e67ec 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -6,7 +6,7 @@ on: pull_request: env: - POETRY_VERSION: "1.2.0" + POETRY_VERSION: "1.3.1" jobs: build: @@ -14,20 +14,21 @@ jobs: strategy: matrix: python-version: - - "3.8" - - "3.9" - - "3.10" + - "3.8" + - "3.9" + - "3.10" + - "3.11" steps: - - uses: actions/checkout@v3 - - name: Install poetry - run: pipx install poetry==$POETRY_VERSION - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - cache: 'poetry' - - name: Install dependencies - run: poetry install - - name: Run unit tests - run: | - make tests + - uses: actions/checkout@v3 + - name: Install poetry + run: pipx install poetry==$POETRY_VERSION + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + cache: "poetry" + - name: Install dependencies + run: poetry install + - name: Run unit tests + run: | + make tests diff --git a/.gitignore b/.gitignore index 17d753d6..3252ee1f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ .vscode/ +.idea/ # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..ee66483f --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,154 @@ +# Contributing to LangChain + +Hi there! Thank you for even being interested in contributing to LangChain. +As an open source project in a rapidly developing field, we are extremely open +to contributions, whether it be in the form of a new feature, improved infra, or better documentation. + +To contribute to this project, please follow a ["fork and pull request"](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow. +Please do not try to push directly to this repo unless you are maintainer. + +## 🗺️Contributing Guidelines + +### 🚩GitHub Issues + +Our [issues](https://github.com/hwchase17/langchain/issues) page is kept up to date +with bugs, improvements, and feature requests. There is a taxonomy of labels to help +with sorting and discovery of issues of interest. These include: + +- prompts: related to prompt tooling/infra. +- llms: related to LLM wrappers/tooling/infra. +- chains +- utilities: related to different types of utilities to integrate with (Python, SQL, etc.). +- agents +- memory +- applications: related to example applications to build + +If you start working on an issue, please assign it to yourself. + +If you are adding an issue, please try to keep it focused on a single modular bug/improvement/feature. +If the two issues are related, or blocking, please link them rather than keep them as one single one. + +We will try to keep these issues as up to date as possible, though +with the rapid rate of develop in this field some may get out of date. +If you notice this happening, please just let us know. + +### 🙋Getting Help + +Although we try to have a developer setup to make it as easy as possible for others to contribute (see below) +it is possible that some pain point may arise around environment setup, linting, documentation, or other. +Should that occur, please contact a maintainer! Not only do we want to help get you unblocked, +but we also want to make sure that the process is smooth for future contributors. + +In a similar vein, we do enforce certain linting, formatting, and documentation standards in the codebase. +If you are finding these difficult (or even just annoying) to work with, +feel free to contact a maintainer for help - we do not want these to get in the way of getting +good code into the codebase. + +### 🏭Release process + +As of now, LangChain has an ad hoc release process: releases are cut with high frequency via by +a developer and published to [PyPI](https://pypi.org/project/ruff/). + +LangChain follows the [semver](https://semver.org/) versioning standard. However, as pre-1.0 software, +even patch releases may contain [non-backwards-compatible changes](https://semver.org/#spec-item-4). + +If your contribution has made its way into a release, we will want to give you credit on Twitter (only if you want though)! +If you have a Twitter account you would like us to mention, please let us know in the PR or in another manner. + +## 🤖Developer Setup + +### 🚀Quick Start + +This project uses [Poetry](https://python-poetry.org/) as a dependency manager. Check out Poetry's [documentation on how to install it](https://python-poetry.org/docs/#installation) on your system before proceeding. + +To install requirements: + +```bash +poetry install -E all +``` + +This will install all requirements for running the package, examples, linting, formatting, tests, and coverage. Note the `-E all` flag will install all optional dependencies necessary for integration testing. + +Now, you should be able to run the common tasks in the following section. + +### ✅Common Tasks + +#### Code Formatting + +Formatting for this project is done via a combination of [Black](https://black.readthedocs.io/en/stable/) and [isort](https://pycqa.github.io/isort/). + +To run formatting for this project: + +```bash +make format +``` + +#### Linting + +Linting for this project is done via a combination of [Black](https://black.readthedocs.io/en/stable/), [isort](https://pycqa.github.io/isort/), [flake8](https://flake8.pycqa.org/en/latest/), and [mypy](http://mypy-lang.org/). + +To run linting for this project: + +```bash +make lint +``` + +We recognize linting can be annoying - if you do not want to do it, please contact a project maintainer, and they can help you with it. We do not want this to be a blocker for good code getting contributed. + +#### Coverage + +Code coverage (i.e. the amount of code that is covered by unit tests) helps identify areas of the code that are potentially more or less brittle. + +To get a report of current coverage, run the following: + +```bash +make coverage +``` + +#### Testing + +Unit tests cover modular logic that does not require calls to outside APIs. + +To run unit tests: + +```bash +make tests +``` + +If you add new logic, please add a unit test. + +Integration tests cover logic that requires making calls to outside APIs (often integration with other services). + +To run integration tests: + +```bash +make integration_tests +``` + +If you add support for a new external API, please add a new integration test. + +#### Adding a Jupyter Notebook + +If you are adding a Jupyter notebook example, you'll want to install the optional `dev` dependencies. + +To install dev dependencies: + +```bash +poetry install --with dev +``` + +Launch a notebook: + +```bash +poetry run jupyter notebook +``` + +When you run `poetry install`, the `langchain` package is installed as editable in the virtualenv, so your new logic can be imported into the notebook. + +#### Contribute Documentation + +Docs are largely autogenerated by [sphinx](https://www.sphinx-doc.org/en/master/) from the code. + +For that reason, we ask that you add good documentation to all classes and methods. + +Similar to linting, we recognize documentation can be annoying. If you do not want to do it, please contact a project maintainer, and they can help you with it. We do not want this to be a blocker for good code getting contributed. diff --git a/Makefile b/Makefile index bd3f5cb2..979f6380 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,11 @@ .PHONY: format lint tests integration_tests +coverage: + poetry run pytest --cov \ + --cov-config=.coveragerc \ + --cov-report xml \ + --cov-report term-missing:skip-covered + format: poetry run black . poetry run isort . diff --git a/README.md b/README.md index 4fc020f9..e802a629 100644 --- a/README.md +++ b/README.md @@ -13,176 +13,45 @@ Large language models (LLMs) are emerging as a transformative technology, enabling developers to build applications that they previously could not. But using these LLMs in isolation is often not enough to -create a truly powerful app - the real power comes when you are able to -combine them with other sources of computation or knowledge. +create a truly powerful app - the real power comes when you can combine them with other sources of computation or knowledge. This library is aimed at assisting in the development of those types of applications. ## 📖 Documentation Please see [here](https://langchain.readthedocs.io/en/latest/?) for full documentation on: -- Getting started (installation, setting up environment, simple examples) + +- Getting started (installation, setting up the environment, simple examples) - How-To examples (demos, integrations, helper functions) - Reference (full API docs) -- Resources (high level explanation of core concepts) + Resources (high-level explanation of core concepts) ## 🚀 What can this help with? There are four main areas that LangChain is designed to help with. These are, in increasing order of complexity: -1. LLM and Prompts -2. Chains -3. Agents -4. Memory - -Let's go through these categories and for each one identify key concepts (to clarify terminology) as well as the problems in this area LangChain helps solve. - -### LLMs and Prompts -Calling out to an LLM once is pretty easy, with most of them being behind well documented APIs. -However, there are still some challenges going from that to an application running in production that LangChain attempts to address. - -**Key Concepts** -- LLM: A large language model, in particular a text-to-text model. -- Prompt: The input to a language model. Typically this is not simply a hardcoded string but rather a combination of a template, some examples, and user input. -- Prompt Template: An object responsible for constructing the final prompt to pass to a LLM. -- Examples: Datapoints that can be included in the prompt in order to give the model more context what to do. -- Few Shot Prompt Template: A subclass of the PromptTemplate class that uses examples. -- Example Selector: A class responsible to selecting examples to use dynamically (depending on user input) in a few shot prompt. - -**Problems Solved** -- Switching costs: by exposing a standard interface for all the top LLM providers, LangChain makes it easy to switch from one provider to another, whether it be for production use cases or just for testing stuff out. -- Prompt management: managing your prompts is easy when you only have one simple one, but can get tricky when you have a bunch or when they start to get more complex. LangChain provides a standard way for storing, constructing, and referencing prompts. -- Prompt optimization: despite the underlying models getting better and better, there is still currently a need for carefully constructing prompts. - -### Chains -Using an LLM in isolation is fine for some simple applications, but many more complex ones require chaining LLMs - either with eachother or with other experts. -LangChain provides several parts to help with that. - -**Key Concepts** -- Tools: APIs designed for assisting with a particular use case (search, databases, Python REPL, etc). Prompt templates, LLMs, and chains can also be considered tools. -- Chains: A combination of multiple tools in a deterministic manner. - -**Problems Solved** -- Standard interface for working with Chains -- Easy way to construct chains of LLMs -- Lots of integrations with other tools that you may want to use in conjunction with LLMs -- End-to-end chains for common workflows (database question/answer, recursive summarization, etc) - -### Agents -Some applications will require not just a predetermined chain of calls to LLMs/other tools, but potentially an unknown chain that depends on the user input. -In these types of chains, there is a “agent” which has access to a suite of tools. -Depending on the user input, the agent can then decide which, if any, of these tools to call. - -**Key Concepts** -- Tools: same as above. -- Agent: An LLM-powered class responsible for determining which tools to use and in what order. - - -**Problems Solved** -- Standard agent interfaces -- A selection of powerful agents to choose from -- Common chains that can be used as tools - -### Memory -By default, Chains and Agents are stateless, meaning that they treat each incoming query independently. -In some applications (chatbots being a GREAT example) it is highly important to remember previous interactions, -both at a short term but also at a long term level. The concept of "Memory" exists to do exactly that. - -**Key Concepts** -- Memory: A class that can be added to an Agent or Chain to (1) pull in memory variables before calling that chain/agent, and (2) create new memories after the chain/agent finishes. -- Memory Variables: Variables returned from a Memory class, to be passed into the chain/agent along with the user input. - -**Problems Solved** -- Standard memory interfaces -- A collection of common memory implementations to choose from -- Common chains/agents that use memory (e.g. chatbots) - -## 🤖 Developer Guide - -To begin developing on this project, first clone the repo locally. - -### Quick Start - -This project uses [Poetry](https://python-poetry.org/) as a dependency manager. Check out Poetry's own [documentation on how to install it](https://python-poetry.org/docs/#installation) on your system before proceeding. - -To install requirements: - -```bash -poetry install -E all -``` - -This will install all requirements for running the package, examples, linting, formatting, and tests. Note the `-E all` flag will install all optional dependencies necessary for integration testing. - -Now, you should be able to run the common tasks in the following section. - -### Common Tasks - -#### Code Formatting - -Formatting for this project is a combination of [Black](https://black.readthedocs.io/en/stable/) and [isort](https://pycqa.github.io/isort/). - -To run formatting for this project: - -```bash -make format -``` - -#### Linting - -Linting for this project is a combination of [Black](https://black.readthedocs.io/en/stable/), [isort](https://pycqa.github.io/isort/), [flake8](https://flake8.pycqa.org/en/latest/), and [mypy](http://mypy-lang.org/). - -To run linting for this project: - -```bash -make lint -``` - -We recognize linting can be annoying - if you do not want to do it, please contact a project maintainer and they can help you with it. We do not want this to be a blocker for good code getting contributed. - -#### Testing - -Unit tests cover modular logic that does not require calls to outside apis. - -To run unit tests: - -```bash -make tests -``` - -If you add new logic, please add a unit test. - -Integration tests cover logic that requires making calls to outside APIs (often integration with other services). - -To run integration tests: - -```bash -make integration_tests -``` -If you add support for a new external API, please add a new integration test. +**📃 LLMs and Prompts:** -#### Adding a Jupyter Notebook +This includes prompt management, prompt optimization, generic interface for all LLMs, and common utilities for working with LLMs. -If you are adding a Jupyter notebook example, you'll want to install the optional `dev` dependencies. +**🔗 Chains:** -To install dev dependencies: +Chains go beyond just a single LLM call, and are sequences of calls (whether to an LLM or a different utility). LangChain provides a standard interface for chains, lots of integrations with other tools, and end-to-end chains for common applications. -```bash -poetry install --with dev -``` +**🤖 Agents:** -Launch a notebook: +Agents involve an LLM making decisions about which Actions to take, taking that Action, seeing an Observation, and repeating that until done. LangChain provides a standard interface for agents, a selection of agents to choose from, and examples of end to end agents. -```bash -poetry run jupyter notebook -``` +**🧠 Memory:** -When you run `poetry install`, the `langchain` package is installed as editable in the virtualenv, so your new logic can be imported into the notebook. +Memory is the concept of persisting state between calls of a chain/agent. LangChain provides a standard interface for memory, a collection of memory implementations, and examples of chains/agents that use memory. -#### Contribute Documentation +For more information on these concepts, please see our [full documentation](https://langchain.readthedocs.io/en/latest/?). -Docs are largely autogenerated by [sphinx](https://www.sphinx-doc.org/en/master/) from the code. +## 💁 Contributing -For that reason, we ask that you add good documentation to all classes and methods. +As an open source project in a rapidly developing field, we are extremely open +to contributions, whether it be in the form of a new feature, improved infra, or better documentation. -Similar to linting, we recognize documentation can be annoying - if you do not want to do it, please contact a project maintainer and they can help you with it. We do not want this to be a blocker for good code getting contributed. +For detailed information on how to contribute, see [here](CONTRIBUTING.md). diff --git a/docs/examples/chains/qa_with_sources.ipynb b/docs/examples/chains/qa_with_sources.ipynb index 1135e86c..29454eae 100644 --- a/docs/examples/chains/qa_with_sources.ipynb +++ b/docs/examples/chains/qa_with_sources.ipynb @@ -159,6 +159,14 @@ "id": "e417926a", "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "None of PyTorch, TensorFlow >= 2.0, or Flax have been found. Models won't be available and only tokenizers, configuration and file/data utilities can be used.\n", + "Token indices sequence length is longer than the specified maximum sequence length for this model (1546 > 1024). Running this sequence through the model will result in indexing errors\n" + ] + }, { "data": { "text/plain": [ @@ -204,7 +212,7 @@ { "data": { "text/plain": [ - "{'output_text': \"\\n\\nThe president did not mention Justice Breyer in his speech to the European Parliament. He discussed the situation in Ukraine, the NATO Alliance, and the United States' response to Putin's attack on Ukraine. He spoke about the extensive preparation and coalition building that was done in advance of the attack, and the unified response from the European Union, Canada, Japan, Korea, Australia, New Zealand, and many other countries. He also discussed the economic sanctions that have been imposed on Russia, and the effects they have had on Putin's war fund. Source: 1, 2\"}" + "{'output_text': \"\\n\\nThe president did not mention Justice Breyer in his speech to the European Parliament, which focused on building a coalition of freedom-loving nations to confront Putin, unifying European allies, countering Russia's lies with truth, and enforcing powerful economic sanctions. Source: 2\"}" ] }, "execution_count": 12, diff --git a/docs/examples/chains/summarize.ipynb b/docs/examples/chains/summarize.ipynb index cd2fd716..ee6b6572 100644 --- a/docs/examples/chains/summarize.ipynb +++ b/docs/examples/chains/summarize.ipynb @@ -131,7 +131,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 6, "id": "ef28e1d4", "metadata": {}, "outputs": [], @@ -148,7 +148,7 @@ { "data": { "text/plain": [ - "' In response to Russian aggression in Ukraine, the US and its allies have imposed economic sanctions, cut off access to technology, seized assets of Russian oligarchs, and closed American airspace to Russian flights. The US is also providing military, economic, and humanitarian assistance to Ukraine, mobilizing ground forces, air squadrons, and ship deployments, and releasing 30 million barrels of oil from its Strategic Petroleum Reserve. President Biden has also passed the American Rescue Plan, Bipartisan Infrastructure Law, and Bipartisan Innovation Act to provide economic relief and create jobs.'" + "\" In response to Vladimir Putin's aggression in Ukraine, the US and its allies have taken action to hold him accountable, including economic sanctions, cutting off access to technology, and seizing the assets of Russian oligarchs. They are also providing military, economic, and humanitarian assistance to the Ukrainians, and releasing 60 million barrels of oil from reserves around the world. President Biden has passed several laws to provide economic relief to Americans and create jobs, and is making sure taxpayer dollars support American jobs and businesses.\"" ] }, "execution_count": 9, diff --git a/docs/examples/integrations/textsplitter.ipynb b/docs/examples/integrations/textsplitter.ipynb index 5f57d110..3103b4c6 100644 --- a/docs/examples/integrations/textsplitter.ipynb +++ b/docs/examples/integrations/textsplitter.ipynb @@ -19,7 +19,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 1, "id": "e82c4685", "metadata": {}, "outputs": [], @@ -42,7 +42,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 2, "id": "79ff6737", "metadata": {}, "outputs": [], @@ -57,7 +57,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 3, "id": "38547666", "metadata": {}, "outputs": [ @@ -67,7 +67,7 @@ "'Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \\n\\nLast year COVID-19 kept us apart. This year we are finally together again. \\n\\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \\n\\nWith a duty to one another to the American people to the Constitution. \\n\\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \\n\\nSix days ago, Russia’s Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \\n\\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \\n\\nHe met the Ukrainian people. \\n\\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \\n\\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland. '" ] }, - "execution_count": 8, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } @@ -88,7 +88,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 4, "id": "a8ce51d5", "metadata": {}, "outputs": [ @@ -108,7 +108,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 5, "id": "ca5e72c0", "metadata": {}, "outputs": [], @@ -119,7 +119,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 6, "id": "37cdfbeb", "metadata": {}, "outputs": [ @@ -143,6 +143,52 @@ "print(texts[0])" ] }, + { + "cell_type": "markdown", + "id": "7683b36a", + "metadata": {}, + "source": [ + "## tiktoken (OpenAI) Length Function\n", + "You can also use tiktoken, a open source tokenizer package from OpenAI to estimate tokens used. Will probably be ore accurate for their models." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "825f7c0a", + "metadata": {}, + "outputs": [], + "source": [ + "text_splitter = CharacterTextSplitter.from_tiktoken_encoder(chunk_size=100, chunk_overlap=0)\n", + "texts = text_splitter.split_text(state_of_the_union)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "ae35d165", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n", + "\n", + "Last year COVID-19 kept us apart. This year we are finally together again. \n", + "\n", + "Tonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n", + "\n", + "With a duty to one another to the American people to the Constitution. \n", + "\n", + "And with an unwavering resolve that freedom will always triumph over tyranny. \n" + ] + } + ], + "source": [ + "print(texts[0])" + ] + }, { "cell_type": "markdown", "id": "ea2973ac", diff --git a/docs/examples/prompts.rst b/docs/examples/prompts.rst index 6fe34382..8148b1d2 100644 --- a/docs/examples/prompts.rst +++ b/docs/examples/prompts.rst @@ -1,10 +1,35 @@ -Prompts -======= +LLMs & Prompts +============== + +The examples here all highlight how to work with LLMs and prompts. + +**LLMs** + +`LLM Functionality `_: A walkthrough of all the functionality the standard LLM interface exposes. + +`LLM Serialization `_: A walkthrough of how to serialize LLMs to and from disk. + +`Custom LLM `_: How to create and use a custom LLM class, in case you have an LLM not from one of the standard providers (including one that you host yourself). + + +**Prompts** + +`Prompt Management `_: A walkthrough of all the functionality LangChain supports for working with prompts. + +`Prompt Serialization `_: A walkthrough of how to serialize prompts to and from disk. + +`Few Shot Examples `_: How to include examples in the prompt. + +`Generate Examples `_: How to use existing examples to generate more examples. + +`Custom Example Selector `_: How to create and use a custom ExampleSelector (the class responsible for choosing which examples to use in a prompt). + +`Custom Prompt Template `_: How to create and use a custom PromptTemplate, the logic that decides how input variables get formatted into a prompt. -The examples here all highlight how to work with prompts. .. toctree:: :maxdepth: 1 :glob: + :hidden: prompts/* diff --git a/docs/examples/prompts/custom_llm.ipynb b/docs/examples/prompts/custom_llm.ipynb index fd9e6a7a..bb3938aa 100644 --- a/docs/examples/prompts/custom_llm.ipynb +++ b/docs/examples/prompts/custom_llm.ipynb @@ -11,7 +11,7 @@ "\n", "There is only one required thing that a custom LLM needs to implement:\n", "\n", - "1. A `__call__` method that takes in a string, some optional stop words, and returns a string\n", + "1. A `_call` method that takes in a string, some optional stop words, and returns a string\n", "\n", "There is a second optional thing it can implement:\n", "\n", @@ -33,17 +33,20 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 7, "id": "d5ceff02", "metadata": {}, "outputs": [], "source": [ "class CustomLLM(LLM):\n", " \n", - " def __init__(self, n: int):\n", - " self.n = n\n", + " n: int\n", + " \n", + " @property\n", + " def _llm_type(self) -> str:\n", + " return \"custom\"\n", " \n", - " def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str:\n", + " def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:\n", " if stop is not None:\n", " raise ValueError(\"stop kwargs are not permitted.\")\n", " return prompt[:self.n]\n", @@ -64,7 +67,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 8, "id": "10e5ece6", "metadata": {}, "outputs": [], @@ -74,7 +77,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 9, "id": "8cd49199", "metadata": {}, "outputs": [ @@ -84,7 +87,7 @@ "'This is a '" ] }, - "execution_count": 4, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } @@ -103,7 +106,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 10, "id": "9c33fa19", "metadata": {}, "outputs": [ @@ -145,7 +148,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.6" + "version": "3.10.8" } }, "nbformat": 4, diff --git a/docs/examples/prompts/few shot examples.ipynb b/docs/examples/prompts/few_shot_examples.ipynb similarity index 100% rename from docs/examples/prompts/few shot examples.ipynb rename to docs/examples/prompts/few_shot_examples.ipynb diff --git a/docs/examples/prompts/llm.json b/docs/examples/prompts/llm.json new file mode 100644 index 00000000..df2b7e1e --- /dev/null +++ b/docs/examples/prompts/llm.json @@ -0,0 +1,11 @@ +{ + "model_name": "text-davinci-003", + "temperature": 0.7, + "max_tokens": 256, + "top_p": 1.0, + "frequency_penalty": 0.0, + "presence_penalty": 0.0, + "n": 1, + "best_of": 1, + "_type": "openai" +} \ No newline at end of file diff --git a/docs/examples/prompts/llm.yaml b/docs/examples/prompts/llm.yaml new file mode 100644 index 00000000..ee384ffa --- /dev/null +++ b/docs/examples/prompts/llm.yaml @@ -0,0 +1,9 @@ +_type: openai +best_of: 1 +frequency_penalty: 0.0 +max_tokens: 256 +model_name: text-davinci-003 +n: 1 +presence_penalty: 0.0 +temperature: 0.7 +top_p: 1.0 diff --git a/docs/examples/prompts/llm_functionality.ipynb b/docs/examples/prompts/llm_functionality.ipynb new file mode 100644 index 00000000..7edfc841 --- /dev/null +++ b/docs/examples/prompts/llm_functionality.ipynb @@ -0,0 +1,412 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "20ac6b98", + "metadata": {}, + "source": [ + "# LLM Functionality\n", + "\n", + "This notebook goes over all the different features of the LLM class in LangChain.\n", + "\n", + "We will work with an OpenAI LLM wrapper, although these functionalities should exist for all LLM types." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "df924055", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.llms import OpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "182b484c", + "metadata": {}, + "outputs": [], + "source": [ + "llm = OpenAI(model_name=\"text-ada-001\", n=2, best_of=2)" + ] + }, + { + "cell_type": "markdown", + "id": "9695ccfc", + "metadata": {}, + "source": [ + "**Generate Text:** The most basic functionality an LLM has is just the ability to call it, passing in a string and getting back a string." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "9d12ac26", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side!'" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "llm(\"Tell me a joke\")" + ] + }, + { + "cell_type": "markdown", + "id": "e7d4d42d", + "metadata": {}, + "source": [ + "**Generate:** More broadly, you can call it with a list of inputs, getting back a more complete response than just the text. This complete response includes things like multiple top responses, as well as LLM provider specific information" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "f4dc241a", + "metadata": {}, + "outputs": [], + "source": [ + "llm_result = llm.generate([\"Tell me a joke\", \"Tell me a poem\"]*15)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "740392f6", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "30" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(llm_result.generations)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "ab6cdcf1", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[Generation(text='\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side.'),\n", + " Generation(text='\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side!')]" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "llm_result.generations[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "4946a778", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[Generation(text=\"\\n\\nA rose by the side of the road\\n\\nIs all I need to find my way\\n\\nTo the place I've been searching for\\n\\nAnd my heart is singing with joy\\n\\nWhen I look at this rose\\n\\nIt reminds me of the love I've found\\n\\nAnd I know that wherever I go\\n\\nI'll always find my rose by the side of the road.\"),\n", + " Generation(text=\"\\n\\nWhen I was younger\\nI thought that love\\nI was something like a fairytale\\nI would find my prince and they would be my people\\nI was naïve\\nI thought that\\n\\nLove was a something that happened\\nWhen I was younger\\nI was it for my fairytale prince\\nNow I realize\\nThat love is something that waits\\nFor when my prince comes\\nAnd when I am ready to be his wife\\nI'll tell you a poem\\n\\nWhen I was younger\\nI thought that love\\nI was something like a fairytale\\nI would find my prince and they would be my people\\nI was naïve\\nI thought that\\n\\nLove was a something that happened\\nAnd I would be happy\\nWhen my prince came\\nAnd I was ready to be his wife\")]" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "llm_result.generations[-1]" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "242e4527", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'token_usage': {'completion_tokens': 3722,\n", + " 'prompt_tokens': 120,\n", + " 'total_tokens': 3842}}" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Provider specific info\n", + "llm_result.llm_output" + ] + }, + { + "cell_type": "markdown", + "id": "bde8e04f", + "metadata": {}, + "source": [ + "**Number of Tokens:** You can also estimate how many tokens a piece of text will be in that model. This is useful because models have a context length (and cost more for more tokens), which means you need to be aware of how long the text you are passing in is.\n", + "\n", + "Notice that by default the tokens are estimated using a HuggingFace tokenizer." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "b623c774", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "3" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "llm.get_num_tokens(\"what a joke\")" + ] + }, + { + "cell_type": "markdown", + "id": "ee6fcf8d", + "metadata": {}, + "source": [ + "### Caching\n", + "With LangChain, you can also enable caching of LLM calls. Note that currently this only applies for individual LLM calls." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "2626ca48", + "metadata": {}, + "outputs": [], + "source": [ + "import langchain\n", + "from langchain.cache import InMemoryCache\n", + "langchain.llm_cache = InMemoryCache()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "97762272", + "metadata": {}, + "outputs": [], + "source": [ + "# To make the caching really obvious, lets use a slower model.\n", + "llm = OpenAI(model_name=\"text-davinci-002\", n=2, best_of=2)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "e80c65e4", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "CPU times: user 31.2 ms, sys: 11.8 ms, total: 43.1 ms\n", + "Wall time: 1.75 s\n" + ] + }, + { + "data": { + "text/plain": [ + "'\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side!'" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "%%time\n", + "# The first time, it is not yet in cache, so it should take longer\n", + "llm(\"Tell me a joke\")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "678408ec", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "CPU times: user 51 µs, sys: 1 µs, total: 52 µs\n", + "Wall time: 67.2 µs\n" + ] + }, + { + "data": { + "text/plain": [ + "'\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side!'" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "%%time\n", + "# The second time it is, so it goes faster\n", + "llm(\"Tell me a joke\")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "3f0ac8d2", + "metadata": {}, + "outputs": [], + "source": [ + "# We can do the same thing with a SQLite cache\n", + "from langchain.cache import SQLiteCache\n", + "langchain.llm_cache = SQLiteCache(database_path=\".langchain.db\")" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "0e1dcce3", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "CPU times: user 26.6 ms, sys: 11.2 ms, total: 37.7 ms\n", + "Wall time: 1.89 s\n" + ] + }, + { + "data": { + "text/plain": [ + "'\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side.'" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "%%time\n", + "# The first time, it is not yet in cache, so it should take longer\n", + "llm(\"Tell me a joke\")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "efadd750", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "CPU times: user 2.69 ms, sys: 1.57 ms, total: 4.27 ms\n", + "Wall time: 2.73 ms\n" + ] + }, + { + "data": { + "text/plain": [ + "'\\n\\nWhy did the chicken cross the road?\\n\\nTo get to the other side.'" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "%%time\n", + "# The second time it is, so it goes faster\n", + "llm(\"Tell me a joke\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6053408b", + "metadata": {}, + "outputs": [], + "source": [ + "# You can use SQLAlchemyCache to cache with any SQL database supported by SQLAlchemy.\n", + "from langchain.cache import SQLAlchemyCache\n", + "from sqlalchemy import create_engine\n", + "\n", + "engine = create_engine(\"postgresql://postgres:postgres@localhost:5432/postgres\")\n", + "langchain.llm_cache = SQLAlchemyCache(engine)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "base", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.12 (main, Jun 1 2022, 06:34:44) \n[Clang 12.0.0 ]" + }, + "vscode": { + "interpreter": { + "hash": "1235b9b19e8e9828b5c1fdb2cd89fe8d3de0fcde5ef5f3db36e4b671adb8660f" + } + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/examples/prompts/llm_serialization.ipynb b/docs/examples/prompts/llm_serialization.ipynb new file mode 100644 index 00000000..99556bfa --- /dev/null +++ b/docs/examples/prompts/llm_serialization.ipynb @@ -0,0 +1,166 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "73f9bf40", + "metadata": {}, + "source": [ + "# LLM Serialization\n", + "\n", + "This notebook walks how to write and read an LLM Configuration to and from disk. This is useful if you want to save the configuration for a given LLM (eg the provider, the temperature, etc)." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "9c9fb6ff", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.llms import OpenAI\n", + "from langchain.llms.loading import load_llm" + ] + }, + { + "cell_type": "markdown", + "id": "88ce018b", + "metadata": {}, + "source": [ + "### Loading\n", + "First, lets go over loading a LLM from disk. LLMs can be saved on disk in two formats: json or yaml. No matter the extension, they are loaded in the same way." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "f12b28f3", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\r\n", + " \"model_name\": \"text-davinci-003\",\r\n", + " \"temperature\": 0.7,\r\n", + " \"max_tokens\": 256,\r\n", + " \"top_p\": 1,\r\n", + " \"frequency_penalty\": 0,\r\n", + " \"presence_penalty\": 0,\r\n", + " \"n\": 1,\r\n", + " \"best_of\": 1,\r\n", + " \"_type\": \"openai\"\r\n", + "}" + ] + } + ], + "source": [ + "!cat llm.json" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "9ab709fc", + "metadata": {}, + "outputs": [], + "source": [ + "llm = load_llm(\"llm.json\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "095b1d56", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "_type: openai\r\n", + "best_of: 1\r\n", + "frequency_penalty: 0\r\n", + "max_tokens: 256\r\n", + "model_name: text-davinci-003\r\n", + "n: 1\r\n", + "presence_penalty: 0\r\n", + "temperature: 0.7\r\n", + "top_p: 1\r\n" + ] + } + ], + "source": [ + "!cat llm.yaml" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "8cafaafe", + "metadata": {}, + "outputs": [], + "source": [ + "llm = load_llm(\"llm.yaml\")" + ] + }, + { + "cell_type": "markdown", + "id": "ab3e4223", + "metadata": {}, + "source": [ + "### Saving\n", + "If you want to go from a LLM in memory to a serialized version of it, you can do so easily by calling the `.save` method. Again, this supports both json and yaml." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "b38f685d", + "metadata": {}, + "outputs": [], + "source": [ + "llm.save(\"llm.json\")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "b7365503", + "metadata": {}, + "outputs": [], + "source": [ + "llm.save(\"llm.yaml\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0e494851", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.8" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/explanation/combine_docs.md b/docs/explanation/combine_docs.md index 81a0d13d..2f86ad95 100644 --- a/docs/explanation/combine_docs.md +++ b/docs/explanation/combine_docs.md @@ -113,7 +113,7 @@ asking the LLM to refine the output based on the new document. **Pros:** Can pull in more relevant context, and may be less lossy than `RefineDocumentsChain`. -**Cons:** Requires many more calls to the LLM than `StuffDocumentsChain`. The calls are also NOT independent, meaning they cannot be paralleled like `RefineDocumentsChain`. There is also some potential dependencies on the ordering of the documents. +**Cons:** Requires many more calls to the LLM than `StuffDocumentsChain`. The calls are also NOT independent, meaning they cannot be paralleled like `MapReduceDocumentsChain`. There is also some potential dependencies on the ordering of the documents. ## Use Cases LangChain supports the above three methods of augmenting LLMs with external data. diff --git a/docs/explanation/cool_demos.md b/docs/explanation/cool_demos.md index 27381633..21af928d 100644 --- a/docs/explanation/cool_demos.md +++ b/docs/explanation/cool_demos.md @@ -6,6 +6,9 @@ If you see any other demos that you think we should highlight, be sure to let us ## Open Source +### [YouTube Transcription Question Answering with Sources](https://colab.research.google.com/drive/1sKSTjt9cPstl_WMZ86JsgEqFG-aSAwkn?usp=sharing) +An end-to-end example of doing question answering on YouTube transcripts, returning the timestamps as sources to legitimize the answer. + ### [ThoughtSource](https://github.com/OpenBioLink/ThoughtSource) A central, open resource and community around data and tools related to chain-of-thought reasoning in large language models. diff --git a/docs/installation.md b/docs/installation.md index e1ed80f8..2e7fb797 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -21,4 +21,10 @@ To install all modules needed for all integrations, run: ``` pip install langchain[all] +``` + +Note that if you are using `zsh`, you'll need to quote square brackets when passing them as an argument to a command, for example: + +``` +pip install 'langchain[all]' ``` \ No newline at end of file diff --git a/langchain/__init__.py b/langchain/__init__.py index 31ec09eb..c100d0e3 100644 --- a/langchain/__init__.py +++ b/langchain/__init__.py @@ -1,6 +1,9 @@ """Main entrypoint into package.""" +from typing import Optional + from langchain.agents import MRKLChain, ReActChain, SelfAskWithSearchChain +from langchain.cache import BaseCache from langchain.chains import ( ConversationChain, LLMBashChain, @@ -15,6 +18,7 @@ from langchain.chains import ( ) from langchain.docstore import InMemoryDocstore, Wikipedia from langchain.llms import Cohere, HuggingFaceHub, OpenAI +from langchain.llms.huggingface_pipeline import HuggingFacePipeline from langchain.logger import BaseLogger, StdOutLogger from langchain.prompts import ( BasePromptTemplate, @@ -28,6 +32,7 @@ from langchain.vectorstores import FAISS, ElasticVectorSearch logger: BaseLogger = StdOutLogger() verbose: bool = False +llm_cache: Optional[BaseCache] = None __all__ = [ "LLMChain", @@ -46,6 +51,7 @@ __all__ = [ "ReActChain", "Wikipedia", "HuggingFaceHub", + "HuggingFacePipeline", "SQLDatabase", "SQLDatabaseChain", "FAISS", diff --git a/langchain/cache.py b/langchain/cache.py new file mode 100644 index 00000000..258c0383 --- /dev/null +++ b/langchain/cache.py @@ -0,0 +1,118 @@ +"""Beta Feature: base interface for cache.""" +from abc import ABC, abstractmethod +from typing import Dict, List, Optional, Tuple, Union + +from sqlalchemy import Column, Integer, String, create_engine, select +from sqlalchemy.engine.base import Engine +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import Session + +from langchain.schema import Generation + +RETURN_VAL_TYPE = Union[List[Generation], str] + + +class BaseCache(ABC): + """Base interface for cache.""" + + @abstractmethod + def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: + """Look up based on prompt and llm_string.""" + + @abstractmethod + def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: + """Update cache based on prompt and llm_string.""" + + +class InMemoryCache(BaseCache): + """Cache that stores things in memory.""" + + def __init__(self) -> None: + """Initialize with empty cache.""" + self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {} + + def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: + """Look up based on prompt and llm_string.""" + return self._cache.get((prompt, llm_string), None) + + def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: + """Update cache based on prompt and llm_string.""" + self._cache[(prompt, llm_string)] = return_val + + +Base = declarative_base() + + +class LLMCache(Base): # type: ignore + """SQLite table for simple LLM cache (string only).""" + + __tablename__ = "llm_cache" + prompt = Column(String, primary_key=True) + llm = Column(String, primary_key=True) + response = Column(String) + + +class FullLLMCache(Base): # type: ignore + """SQLite table for full LLM Cache (all generations).""" + + __tablename__ = "full_llm_cache" + prompt = Column(String, primary_key=True) + llm = Column(String, primary_key=True) + idx = Column(Integer, primary_key=True) + response = Column(String) + + +class SQLAlchemyCache(BaseCache): + """Cache that uses SQAlchemy as a backend.""" + + def __init__(self, engine: Engine): + """Initialize by creating all tables.""" + self.engine = engine + Base.metadata.create_all(self.engine) + + def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]: + """Look up based on prompt and llm_string.""" + stmt = ( + select(FullLLMCache.response) + .where(FullLLMCache.prompt == prompt) + .where(FullLLMCache.llm == llm_string) + .order_by(FullLLMCache.idx) + ) + with Session(self.engine) as session: + generations = [] + for row in session.execute(stmt): + generations.append(Generation(text=row[0])) + if len(generations) > 0: + return generations + stmt = ( + select(LLMCache.response) + .where(LLMCache.prompt == prompt) + .where(LLMCache.llm == llm_string) + ) + with Session(self.engine) as session: + for row in session.execute(stmt): + return row[0] + return None + + def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None: + """Look up based on prompt and llm_string.""" + if isinstance(return_val, str): + item = LLMCache(prompt=prompt, llm=llm_string, response=return_val) + with Session(self.engine) as session, session.begin(): + session.add(item) + else: + for i, generation in enumerate(return_val): + item = FullLLMCache( + prompt=prompt, llm=llm_string, response=generation.text, idx=i + ) + with Session(self.engine) as session, session.begin(): + session.add(item) + + +class SQLiteCache(SQLAlchemyCache): + """Cache that uses SQLite as a backend.""" + + def __init__(self, database_path: str = ".langchain.db"): + """Initialize by creating the engine and all tables.""" + engine = create_engine(f"sqlite:///{database_path}") + super().__init__(engine) diff --git a/langchain/chains/base.py b/langchain/chains/base.py index 44f0bdbf..848664e5 100644 --- a/langchain/chains/base.py +++ b/langchain/chains/base.py @@ -1,6 +1,6 @@ """Base interface that all chains should implement.""" from abc import ABC, abstractmethod -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Union from pydantic import BaseModel, Extra, Field @@ -29,6 +29,10 @@ class Memory(BaseModel, ABC): def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save the context of this model run to memory.""" + @abstractmethod + def clear(self) -> None: + """Clear memory contents.""" + def _get_verbosity() -> bool: return langchain.verbose @@ -70,18 +74,28 @@ class Chain(BaseModel, ABC): """Run the logic of this chain and return the output.""" def __call__( - self, inputs: Dict[str, Any], return_only_outputs: bool = False + self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False ) -> Dict[str, str]: """Run the logic of this chain and add to output if desired. Args: - inputs: Dictionary of inputs. + inputs: Dictionary of inputs, or single input if chain expects + only one param. return_only_outputs: boolean for whether to return only outputs in the response. If True, only new keys generated by this chain will be returned. If False, both input keys and new keys generated by this chain will be returned. Defaults to False. """ + if not isinstance(inputs, dict): + if len(self.input_keys) != 1: + raise ValueError( + f"A single string input was passed in, but this chain expects " + f"multiple inputs ({self.input_keys}). When a chain expects " + f"multiple inputs, please call it by passing in a dictionary, " + "eg `chain({'foo': 1, 'bar': 2})`" + ) + inputs = {self.input_keys[0]: inputs} if self.memory is not None: external_context = self.memory.load_memory_variables(inputs) inputs = dict(inputs, **external_context) diff --git a/langchain/chains/combine_documents/base.py b/langchain/chains/combine_documents/base.py index 26cbcb89..7d5574ca 100644 --- a/langchain/chains/combine_documents/base.py +++ b/langchain/chains/combine_documents/base.py @@ -1,7 +1,7 @@ """Base interface for chains combining documents.""" from abc import ABC, abstractmethod -from typing import Any, Dict, List +from typing import Any, Dict, List, Optional from pydantic import BaseModel @@ -31,6 +31,13 @@ class BaseCombineDocumentsChain(Chain, BaseModel, ABC): """ return [self.output_key] + def prompt_length(self, docs: List[Document], **kwargs: Any) -> Optional[int]: + """Return the prompt length given the documents passed in. + + Returns None if the method does not depend on the prompt length. + """ + return None + @abstractmethod def combine_docs(self, docs: List[Document], **kwargs: Any) -> str: """Combine documents into a single string.""" diff --git a/langchain/chains/combine_documents/map_reduce.py b/langchain/chains/combine_documents/map_reduce.py index b182e9e5..8653ef1a 100644 --- a/langchain/chains/combine_documents/map_reduce.py +++ b/langchain/chains/combine_documents/map_reduce.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Any, Dict, List +from typing import Any, Callable, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator @@ -11,13 +11,57 @@ from langchain.chains.llm import LLMChain from langchain.docstore.document import Document +def _split_list_of_docs( + docs: List[Document], length_func: Callable, token_max: int, **kwargs: Any +) -> List[List[Document]]: + new_result_doc_list = [] + _sub_result_docs = [] + for doc in docs: + _sub_result_docs.append(doc) + _num_tokens = length_func(_sub_result_docs, **kwargs) + if _num_tokens > token_max: + if len(_sub_result_docs) == 1: + raise ValueError( + "A single document was longer than the context length," + " we cannot handle this." + ) + if len(_sub_result_docs) == 2: + raise ValueError( + "A single document was so long it could not be combined " + "with another document, we cannot handle this." + ) + new_result_doc_list.append(_sub_result_docs[:-1]) + _sub_result_docs = _sub_result_docs[-1:] + new_result_doc_list.append(_sub_result_docs) + return new_result_doc_list + + +def _collapse_docs( + docs: List[Document], + combine_document_func: Callable, + **kwargs: Any, +) -> Document: + result = combine_document_func(docs, **kwargs) + combined_metadata = {k: str(v) for k, v in docs[0].metadata.items()} + for doc in docs[1:]: + for k, v in doc.metadata.items(): + if k in combined_metadata: + combined_metadata[k] += f", {v}" + else: + combined_metadata[k] = str(v) + return Document(page_content=result, metadata=combined_metadata) + + class MapReduceDocumentsChain(BaseCombineDocumentsChain, BaseModel): """Combining documents by mapping a chain over them, then combining results.""" llm_chain: LLMChain - """Chain to apply to each document individually..""" + """Chain to apply to each document individually.""" combine_document_chain: BaseCombineDocumentsChain """Chain to use to combine results of applying llm_chain to documents.""" + collapse_document_chain: Optional[BaseCombineDocumentsChain] = None + """Chain to use to collapse intermediary results if needed. + If None, will use the combine_document_chain.""" document_variable_name: str """The variable name in the llm_chain to put the documents in. If only one variable in the llm_chain, this need not be provided.""" @@ -49,14 +93,45 @@ class MapReduceDocumentsChain(BaseCombineDocumentsChain, BaseModel): ) return values - def combine_docs(self, docs: List[Document], **kwargs: Any) -> str: - """Combine by mapping first chain over all, then stuffing into final chain.""" + @property + def _collapse_chain(self) -> BaseCombineDocumentsChain: + if self.collapse_document_chain is not None: + return self.collapse_document_chain + else: + return self.combine_document_chain + + def combine_docs( + self, docs: List[Document], token_max: int = 3000, **kwargs: Any + ) -> str: + """Combine documents in a map reduce manner. + + Combine by mapping first chain over all documents, then reducing the results. + This reducing can be done recursively if needed (if there are many documents). + """ results = self.llm_chain.apply( + # FYI - this is parallelized and so it is fast. [{**{self.document_variable_name: d.page_content}, **kwargs} for d in docs] ) question_result_key = self.llm_chain.output_key result_docs = [ Document(page_content=r[question_result_key], metadata=docs[i].metadata) + # This uses metadata from the docs, and the textual results from `results` for i, r in enumerate(results) ] - return self.combine_document_chain.combine_docs(result_docs, **kwargs) + length_func = self.combine_document_chain.prompt_length + num_tokens = length_func(result_docs, **kwargs) + while num_tokens is not None and num_tokens > token_max: + new_result_doc_list = _split_list_of_docs( + result_docs, length_func, token_max, **kwargs + ) + result_docs = [] + for docs in new_result_doc_list: + new_doc = _collapse_docs( + docs, self._collapse_chain.combine_docs, **kwargs + ) + result_docs.append(new_doc) + num_tokens = self.combine_document_chain.prompt_length( + result_docs, **kwargs + ) + output = self.combine_document_chain.combine_docs(result_docs, **kwargs) + return output diff --git a/langchain/chains/combine_documents/stuff.py b/langchain/chains/combine_documents/stuff.py index 7b56c211..796de39e 100644 --- a/langchain/chains/combine_documents/stuff.py +++ b/langchain/chains/combine_documents/stuff.py @@ -1,6 +1,6 @@ """Chain that combines documents by stuffing into context.""" -from typing import Any, Dict, List +from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, Field, root_validator @@ -55,8 +55,7 @@ class StuffDocumentsChain(BaseCombineDocumentsChain, BaseModel): ) return values - def combine_docs(self, docs: List[Document], **kwargs: Any) -> str: - """Stuff all documents into one prompt and pass to LLM.""" + def _get_inputs(self, docs: List[Document], **kwargs: Any) -> dict: # Get relevant information from each document. doc_dicts = [] for doc in docs: @@ -71,5 +70,16 @@ class StuffDocumentsChain(BaseCombineDocumentsChain, BaseModel): # Join the documents together to put them in the prompt. inputs = kwargs.copy() inputs[self.document_variable_name] = "\n\n".join(doc_strings) + return inputs + + def prompt_length(self, docs: List[Document], **kwargs: Any) -> Optional[int]: + """Get the prompt length by formatting the prompt.""" + inputs = self._get_inputs(docs, **kwargs) + prompt = self.llm_chain.prompt.format(**inputs) + return self.llm_chain.llm.get_num_tokens(prompt) + + def combine_docs(self, docs: List[Document], **kwargs: Any) -> str: + """Stuff all documents into one prompt and pass to LLM.""" + inputs = self._get_inputs(docs, **kwargs) # Call predict on the LLM. return self.llm_chain.predict(**inputs) diff --git a/langchain/chains/conversation/memory.py b/langchain/chains/conversation/memory.py index 4ab6cbbd..ebdbb7ef 100644 --- a/langchain/chains/conversation/memory.py +++ b/langchain/chains/conversation/memory.py @@ -46,6 +46,10 @@ class ConversationBufferMemory(Memory, BaseModel): ai = "AI: " + outputs[list(outputs.keys())[0]] self.buffer += "\n" + "\n".join([human, ai]) + def clear(self) -> None: + """Clear memory contents.""" + self.buffer = "" + class ConversationalBufferWindowMemory(Memory, BaseModel): """Buffer for storing conversation memory.""" @@ -75,6 +79,10 @@ class ConversationalBufferWindowMemory(Memory, BaseModel): ai = "AI: " + outputs[list(outputs.keys())[0]] self.buffer.append("\n".join([human, ai])) + def clear(self) -> None: + """Clear memory contents.""" + self.buffer = [] + class ConversationSummaryMemory(Memory, BaseModel): """Conversation summarizer to memory.""" @@ -118,3 +126,7 @@ class ConversationSummaryMemory(Memory, BaseModel): new_lines = "\n".join([human, ai]) chain = LLMChain(llm=self.llm, prompt=self.prompt) self.buffer = chain.predict(summary=self.buffer, new_lines=new_lines) + + def clear(self) -> None: + """Clear memory contents.""" + self.buffer = "" diff --git a/langchain/chains/llm.py b/langchain/chains/llm.py index a1575ed4..31800774 100644 --- a/langchain/chains/llm.py +++ b/langchain/chains/llm.py @@ -51,18 +51,34 @@ class LLMChain(Chain, BaseModel): """ return [self.output_key] + def apply(self, input_list: List[Dict[str, Any]]) -> List[Dict[str, str]]: + """Utilize the LLM generate method for speed gains.""" + stop = None + if "stop" in input_list[0]: + stop = input_list[0]["stop"] + prompts = [] + for inputs in input_list: + selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} + prompt = self.prompt.format(**selected_inputs) + if self.verbose: + langchain.logger.log_llm_inputs(selected_inputs, prompt) + if "stop" in inputs and inputs["stop"] != stop: + raise ValueError( + "If `stop` is present in any inputs, should be present in all." + ) + prompts.append(prompt) + response = self.llm.generate(prompts, stop=stop) + outputs = [] + for generation in response.generations: + # Get the text of the top generated string. + response_str = generation[0].text + if self.verbose: + langchain.logger.log_llm_response(response_str) + outputs.append({self.output_key: response_str}) + return outputs + def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]: - selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} - prompt = self.prompt.format(**selected_inputs) - if self.verbose: - langchain.logger.log_llm_inputs(selected_inputs, prompt) - kwargs = {} - if "stop" in inputs: - kwargs["stop"] = inputs["stop"] - response = self.llm(prompt, **kwargs) - if self.verbose: - langchain.logger.log_llm_response(response) - return {self.output_key: response} + return self.apply([inputs])[0] def predict(self, **kwargs: Any) -> str: """Format prompt with kwargs and pass to LLM. diff --git a/langchain/chains/qa_with_sources/__init__.py b/langchain/chains/qa_with_sources/__init__.py index 159abd76..82d93f50 100644 --- a/langchain/chains/qa_with_sources/__init__.py +++ b/langchain/chains/qa_with_sources/__init__.py @@ -1,5 +1,5 @@ """Load question answering with sources chains.""" -from typing import Any, Mapping, Protocol +from typing import Any, Mapping, Optional, Protocol from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain @@ -44,6 +44,7 @@ def _load_map_reduce_chain( document_prompt: BasePromptTemplate = map_reduce_prompt.EXAMPLE_PROMPT, combine_document_variable_name: str = "summaries", map_reduce_document_variable_name: str = "context", + collapse_prompt: Optional[BasePromptTemplate] = None, **kwargs: Any, ) -> MapReduceDocumentsChain: map_chain = LLMChain(llm=llm, prompt=question_prompt) @@ -53,10 +54,19 @@ def _load_map_reduce_chain( document_variable_name=combine_document_variable_name, document_prompt=document_prompt, ) + if collapse_prompt is None: + collapse_chain = None + else: + collapse_chain = StuffDocumentsChain( + llm_chain=LLMChain(llm=llm, prompt=collapse_prompt), + document_variable_name=combine_document_variable_name, + document_prompt=document_prompt, + ) return MapReduceDocumentsChain( llm_chain=map_chain, combine_document_chain=combine_document_chain, document_variable_name=map_reduce_document_variable_name, + collapse_document_chain=collapse_chain, **kwargs, ) diff --git a/langchain/chains/question_answering/__init__.py b/langchain/chains/question_answering/__init__.py index 1591054c..9883e068 100644 --- a/langchain/chains/question_answering/__init__.py +++ b/langchain/chains/question_answering/__init__.py @@ -1,5 +1,5 @@ """Load question answering chains.""" -from typing import Any, Mapping, Protocol +from typing import Any, Mapping, Optional, Protocol from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain @@ -41,6 +41,7 @@ def _load_map_reduce_chain( combine_prompt: BasePromptTemplate = map_reduce_prompt.COMBINE_PROMPT, combine_document_variable_name: str = "summaries", map_reduce_document_variable_name: str = "context", + collapse_prompt: Optional[BasePromptTemplate] = None, **kwargs: Any, ) -> MapReduceDocumentsChain: map_chain = LLMChain(llm=llm, prompt=question_prompt) @@ -49,10 +50,18 @@ def _load_map_reduce_chain( combine_document_chain = StuffDocumentsChain( llm_chain=reduce_chain, document_variable_name=combine_document_variable_name ) + if collapse_prompt is None: + collapse_chain = None + else: + collapse_chain = StuffDocumentsChain( + llm_chain=LLMChain(llm=llm, prompt=collapse_prompt), + document_variable_name=combine_document_variable_name, + ) return MapReduceDocumentsChain( llm_chain=map_chain, combine_document_chain=combine_document_chain, document_variable_name=map_reduce_document_variable_name, + collapse_document_chain=collapse_chain, **kwargs, ) diff --git a/langchain/chains/summarize/__init__.py b/langchain/chains/summarize/__init__.py index ab90c70b..e613ff8d 100644 --- a/langchain/chains/summarize/__init__.py +++ b/langchain/chains/summarize/__init__.py @@ -1,5 +1,5 @@ """Load summarizing chains.""" -from typing import Any, Mapping, Protocol +from typing import Any, Mapping, Optional, Protocol from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain @@ -37,6 +37,7 @@ def _load_map_reduce_chain( combine_prompt: BasePromptTemplate = map_reduce_prompt.PROMPT, combine_document_variable_name: str = "text", map_reduce_document_variable_name: str = "text", + collapse_prompt: Optional[BasePromptTemplate] = None, **kwargs: Any, ) -> MapReduceDocumentsChain: map_chain = LLMChain(llm=llm, prompt=map_prompt) @@ -45,10 +46,18 @@ def _load_map_reduce_chain( combine_document_chain = StuffDocumentsChain( llm_chain=reduce_chain, document_variable_name=combine_document_variable_name ) + if collapse_prompt is None: + collapse_chain = None + else: + collapse_chain = StuffDocumentsChain( + llm_chain=LLMChain(llm=llm, prompt=collapse_prompt), + document_variable_name=combine_document_variable_name, + ) return MapReduceDocumentsChain( llm_chain=map_chain, combine_document_chain=combine_document_chain, document_variable_name=map_reduce_document_variable_name, + collapse_document_chain=collapse_chain, **kwargs, ) diff --git a/langchain/embeddings/openai.py b/langchain/embeddings/openai.py index 864e7758..205d028c 100644 --- a/langchain/embeddings/openai.py +++ b/langchain/embeddings/openai.py @@ -22,9 +22,8 @@ class OpenAIEmbeddings(BaseModel, Embeddings): """ client: Any #: :meta private: - model_name: str = "babbage" - """Model name to use.""" - + document_model_name: str = "text-embedding-ada-002" + query_model_name: str = "text-embedding-ada-002" openai_api_key: Optional[str] = None class Config: @@ -32,6 +31,26 @@ class OpenAIEmbeddings(BaseModel, Embeddings): extra = Extra.forbid + # TODO: deprecate this + @root_validator(pre=True) + def get_model_names(cls, values: Dict) -> Dict: + """Get model names from just old model name.""" + if "model_name" in values: + if "document_model_name" in values: + raise ValueError( + "Both `model_name` and `document_model_name` were provided, " + "but only one should be." + ) + if "query_model_name" in values: + raise ValueError( + "Both `model_name` and `query_model_name` were provided, " + "but only one should be." + ) + model_name = values.pop("model_name") + values["document_model_name"] = f"text-search-{model_name}-doc-001" + values["query_model_name"] = f"text-search-{model_name}-query-001" + return values + @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" @@ -66,7 +85,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): List of embeddings, one for each text. """ responses = [ - self._embedding_func(text, engine=f"text-search-{self.model_name}-doc-001") + self._embedding_func(text, engine=self.document_model_name) for text in texts ] return responses @@ -80,7 +99,5 @@ class OpenAIEmbeddings(BaseModel, Embeddings): Returns: Embeddings for the text. """ - embedding = self._embedding_func( - text, engine=f"text-search-{self.model_name}-query-001" - ) + embedding = self._embedding_func(text, engine=self.query_model_name) return embedding diff --git a/langchain/llms/__init__.py b/langchain/llms/__init__.py index fab84d41..f32ef72d 100644 --- a/langchain/llms/__init__.py +++ b/langchain/llms/__init__.py @@ -1,7 +1,28 @@ """Wrappers on top of large language models APIs.""" +from typing import Dict, Type + +from langchain.llms.ai21 import AI21 +from langchain.llms.base import LLM from langchain.llms.cohere import Cohere from langchain.llms.huggingface_hub import HuggingFaceHub +from langchain.llms.huggingface_pipeline import HuggingFacePipeline from langchain.llms.nlpcloud import NLPCloud from langchain.llms.openai import OpenAI -__all__ = ["Cohere", "NLPCloud", "OpenAI", "HuggingFaceHub"] +__all__ = [ + "Cohere", + "NLPCloud", + "OpenAI", + "HuggingFaceHub", + "HuggingFacePipeline", + "AI21", +] + +type_to_cls_dict: Dict[str, Type[LLM]] = { + "ai21": AI21, + "cohere": Cohere, + "huggingface_hub": HuggingFaceHub, + "nlpcloud": NLPCloud, + "openai": OpenAI, + "huggingface_pipeline": HuggingFacePipeline, +} diff --git a/langchain/llms/ai21.py b/langchain/llms/ai21.py index a870d9e4..77a9300d 100644 --- a/langchain/llms/ai21.py +++ b/langchain/llms/ai21.py @@ -19,7 +19,7 @@ class AI21PenaltyData(BaseModel): applyToEmojis: bool = True -class AI21(BaseModel, LLM): +class AI21(LLM, BaseModel): """Wrapper around AI21 large language models. To use, you should have the environment variable ``AI21_API_KEY`` @@ -96,7 +96,12 @@ class AI21(BaseModel, LLM): """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} - def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str: + @property + def _llm_type(self) -> str: + """Return type of llm.""" + return "ai21" + + def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to AI21's complete endpoint. Args: diff --git a/langchain/llms/base.py b/langchain/llms/base.py index df81242f..4488475d 100644 --- a/langchain/llms/base.py +++ b/langchain/llms/base.py @@ -1,15 +1,116 @@ """Base interface for large language models to expose.""" +import json from abc import ABC, abstractmethod -from typing import Any, List, Mapping, Optional +from pathlib import Path +from typing import Any, Dict, List, Mapping, NamedTuple, Optional, Union +import yaml +from pydantic import BaseModel, Extra -class LLM(ABC): +import langchain +from langchain.schema import Generation + + +class LLMResult(NamedTuple): + """Class that contains all relevant information for an LLM Result.""" + + generations: List[List[Generation]] + """List of the things generated. This is List[List[]] because + each input could have multiple generations.""" + llm_output: Optional[dict] = None + """For arbitrary LLM provider specific output.""" + + +class LLM(BaseModel, ABC): """LLM wrapper should take in a prompt and return a string.""" + class Config: + """Configuration for this pydantic object.""" + + extra = Extra.forbid + + def _generate( + self, prompts: List[str], stop: Optional[List[str]] = None + ) -> LLMResult: + """Run the LLM on the given prompt and input.""" + # TODO: add caching here. + generations = [] + for prompt in prompts: + text = self(prompt, stop=stop) + generations.append([Generation(text=text)]) + return LLMResult(generations=generations) + + def generate( + self, prompts: List[str], stop: Optional[List[str]] = None + ) -> LLMResult: + """Run the LLM on the given prompt and input.""" + if langchain.llm_cache is None: + return self._generate(prompts, stop=stop) + params = self._llm_dict() + params["stop"] = stop + llm_string = str(sorted([(k, v) for k, v in params.items()])) + missing_prompts = [] + missing_prompt_idxs = [] + existing_prompts = {} + for i, prompt in enumerate(prompts): + cache_val = langchain.llm_cache.lookup(prompt, llm_string) + if isinstance(cache_val, list): + existing_prompts[i] = cache_val + else: + missing_prompts.append(prompt) + missing_prompt_idxs.append(i) + new_results = self._generate(missing_prompts, stop=stop) + for i, result in enumerate(new_results.generations): + existing_prompts[i] = result + prompt = prompts[i] + langchain.llm_cache.update(prompt, llm_string, result) + generations = [existing_prompts[i] for i in range(len(prompts))] + return LLMResult(generations=generations, llm_output=new_results.llm_output) + + def get_num_tokens(self, text: str) -> int: + """Get the number of tokens present in the text.""" + # TODO: this method may not be exact. + # TODO: this method may differ based on model (eg codex). + try: + from transformers import GPT2TokenizerFast + except ImportError: + raise ValueError( + "Could not import transformers python package. " + "This is needed in order to calculate get_num_tokens. " + "Please it install it with `pip install transformers`." + ) + # create a GPT-3 tokenizer instance + tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") + + # tokenize the text using the GPT-3 tokenizer + tokenized_text = tokenizer.tokenize(text) + + # calculate the number of tokens in the tokenized text + return len(tokenized_text) + @abstractmethod - def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str: + def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Run the LLM on the given prompt and input.""" + def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str: + """Check Cache and run the LLM on the given prompt and input.""" + if langchain.llm_cache is None: + return self._call(prompt, stop=stop) + params = self._llm_dict() + params["stop"] = stop + llm_string = str(sorted([(k, v) for k, v in params.items()])) + if langchain.cache is not None: + cache_val = langchain.llm_cache.lookup(prompt, llm_string) + if cache_val is not None: + if isinstance(cache_val, str): + return cache_val + else: + return cache_val[0].text + return_val = self._call(prompt, stop=stop) + if langchain.cache is not None: + langchain.llm_cache.update(prompt, llm_string, return_val) + return return_val + @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" @@ -19,3 +120,46 @@ class LLM(ABC): """Get a string representation of the object for printing.""" cls_name = f"\033[1m{self.__class__.__name__}\033[0m" return f"{cls_name}\nParams: {self._identifying_params}" + + @property + @abstractmethod + def _llm_type(self) -> str: + """Return type of llm.""" + + def _llm_dict(self) -> Dict: + """Return a dictionary of the prompt.""" + starter_dict = dict(self._identifying_params) + starter_dict["_type"] = self._llm_type + return starter_dict + + def save(self, file_path: Union[Path, str]) -> None: + """Save the LLM. + + Args: + file_path: Path to file to save the LLM to. + + Example: + .. code-block:: python + + llm.save(file_path="path/llm.yaml") + """ + # Convert file to Path object. + if isinstance(file_path, str): + save_path = Path(file_path) + else: + save_path = file_path + + directory_path = save_path.parent + directory_path.mkdir(parents=True, exist_ok=True) + + # Fetch dictionary to save + prompt_dict = self._llm_dict() + + if save_path.suffix == ".json": + with open(file_path, "w") as f: + json.dump(prompt_dict, f, indent=4) + elif save_path.suffix == ".yaml": + with open(file_path, "w") as f: + yaml.dump(prompt_dict, f, default_flow_style=False) + else: + raise ValueError(f"{save_path} must be json or yaml") diff --git a/langchain/llms/cohere.py b/langchain/llms/cohere.py index e051ba47..d9a3f51a 100644 --- a/langchain/llms/cohere.py +++ b/langchain/llms/cohere.py @@ -85,7 +85,12 @@ class Cohere(LLM, BaseModel): """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} - def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str: + @property + def _llm_type(self) -> str: + """Return type of llm.""" + return "cohere" + + def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to Cohere's generate endpoint. Args: diff --git a/langchain/llms/huggingface_hub.py b/langchain/llms/huggingface_hub.py index 5cded677..ef53275d 100644 --- a/langchain/llms/huggingface_hub.py +++ b/langchain/llms/huggingface_hub.py @@ -74,9 +74,17 @@ class HuggingFaceHub(LLM, BaseModel): def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} - return {**{"repo_id": self.repo_id}, **_model_kwargs} + return { + **{"repo_id": self.repo_id, "task": self.task}, + **{"model_kwargs": _model_kwargs}, + } - def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str: + @property + def _llm_type(self) -> str: + """Return type of llm.""" + return "huggingface_hub" + + def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to HuggingFace Hub's inference endpoint. Args: diff --git a/langchain/llms/huggingface_pipeline.py b/langchain/llms/huggingface_pipeline.py new file mode 100644 index 00000000..6db9bcd0 --- /dev/null +++ b/langchain/llms/huggingface_pipeline.py @@ -0,0 +1,118 @@ +"""Wrapper around HuggingFace Pipeline APIs.""" +from typing import Any, List, Mapping, Optional + +from pydantic import BaseModel, Extra + +from langchain.llms.base import LLM +from langchain.llms.utils import enforce_stop_tokens + +DEFAULT_MODEL_ID = "gpt2" +DEFAULT_TASK = "text-generation" +VALID_TASKS = ("text2text-generation", "text-generation") + + +class HuggingFacePipeline(LLM, BaseModel): + """Wrapper around HuggingFace Pipeline API. + + To use, you should have the ``transformers`` python package installed. + + Only supports `text-generation` and `text2text-generation` for now. + + Example using from_model_id: + .. code-block:: python + + from langchain.llms.huggingface_pipeline import HuggingFacePipeline + hf = HuggingFacePipeline.from_model_id( + model_id="gpt2", task="text-generation" + ) + Example passing pipeline in directly: + .. code-block:: python + + from langchain.llms.huggingface_pipeline import HuggingFacePipeline + from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline + + model_id = "gpt2" + tokenizer = AutoTokenizer.from_pretrained(model_id) + model = AutoModelForCausalLM.from_pretrained(model_id) + pipe = pipeline( + "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10 + ) + hf = HuggingFacePipeline(pipeline=pipe + """ + + pipeline: Any #: :meta private: + model_id: str = DEFAULT_MODEL_ID + """Model name to use.""" + model_kwargs: Optional[dict] = None + """Key word arguments to pass to the model.""" + + class Config: + """Configuration for this pydantic object.""" + + extra = Extra.forbid + + @classmethod + def from_model_id( + cls, + model_id: str, + task: str, + model_kwargs: Optional[dict] = None, + **kwargs: Any, + ) -> LLM: + """Construct the pipeline object from model_id and task.""" + try: + from transformers import AutoModelForCausalLM, AutoTokenizer + from transformers import pipeline as hf_pipeline + + tokenizer = AutoTokenizer.from_pretrained(model_id) + model = AutoModelForCausalLM.from_pretrained(model_id) + pipeline = hf_pipeline( + task=task, model=model, tokenizer=tokenizer, **model_kwargs + ) + if pipeline.task not in VALID_TASKS: + raise ValueError( + f"Got invalid task {pipeline.task}, " + f"currently only {VALID_TASKS} are supported" + ) + + return cls( + pipeline=pipeline, + model_id=model_id, + model_kwargs=model_kwargs, + **kwargs, + ) + except ImportError: + raise ValueError( + "Could not import transformers python package. " + "Please it install it with `pip install transformers`." + ) + + @property + def _identifying_params(self) -> Mapping[str, Any]: + """Get the identifying parameters.""" + return { + **{"model_id": self.model_id}, + **{"model_kwargs": self.model_kwargs}, + } + + @property + def _llm_type(self) -> str: + return "huggingface_pipeline" + + def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: + response = self.pipeline(text_inputs=prompt) + if self.pipeline.task == "text-generation": + # Text generation return includes the starter text. + text = response[0]["generated_text"][len(prompt) :] + elif self.pipeline.task == "text2text-generation": + text = response[0]["generated_text"] + else: + raise ValueError( + f"Got invalid task {self.pipeline.task}, " + f"currently only {VALID_TASKS} are supported" + ) + if stop is not None: + # This is a bit hacky, but I can't figure out a better way to enforce + # stop tokens when making calls to huggingface_hub. + text = enforce_stop_tokens(text, stop) + return text diff --git a/langchain/llms/loading.py b/langchain/llms/loading.py new file mode 100644 index 00000000..d5881ba4 --- /dev/null +++ b/langchain/llms/loading.py @@ -0,0 +1,42 @@ +"""Base interface for loading large language models apis.""" +import json +from pathlib import Path +from typing import Union + +import yaml + +from langchain.llms import type_to_cls_dict +from langchain.llms.base import LLM + + +def load_llm_from_config(config: dict) -> LLM: + """Load LLM from Config Dict.""" + if "_type" not in config: + raise ValueError("Must specify an LLM Type in config") + config_type = config.pop("_type") + + if config_type not in type_to_cls_dict: + raise ValueError(f"Loading {config_type} LLM not supported") + + llm_cls = type_to_cls_dict[config_type] + return llm_cls(**config) + + +def load_llm(file: Union[str, Path]) -> LLM: + """Load LLM from file.""" + # Convert file to Path object. + if isinstance(file, str): + file_path = Path(file) + else: + file_path = file + # Load from either json or yaml. + if file_path.suffix == ".json": + with open(file_path) as f: + config = json.load(f) + elif file_path.suffix == ".yaml": + with open(file_path, "r") as f: + config = yaml.safe_load(f) + else: + raise ValueError("File type must be json or yaml") + # Load the LLM from the config now. + return load_llm_from_config(config) diff --git a/langchain/llms/manifest.py b/langchain/llms/manifest.py index 0e1fb720..b9a4ce14 100644 --- a/langchain/llms/manifest.py +++ b/langchain/llms/manifest.py @@ -37,7 +37,12 @@ class ManifestWrapper(LLM, BaseModel): kwargs = self.llm_kwargs or {} return {**self.client.client.get_model_params(), **kwargs} - def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str: + @property + def _llm_type(self) -> str: + """Return type of llm.""" + return "manifest" + + def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to LLM through Manifest.""" if stop is not None and len(stop) != 1: raise NotImplementedError( diff --git a/langchain/llms/nlpcloud.py b/langchain/llms/nlpcloud.py index d9e4c54e..94f0df7d 100644 --- a/langchain/llms/nlpcloud.py +++ b/langchain/llms/nlpcloud.py @@ -106,7 +106,12 @@ class NLPCloud(LLM, BaseModel): """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} - def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str: + @property + def _llm_type(self) -> str: + """Return type of llm.""" + return "nlpcloud" + + def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to NLPCloud's create endpoint. Args: diff --git a/langchain/llms/openai.py b/langchain/llms/openai.py index 22070c1a..2cca3e2e 100644 --- a/langchain/llms/openai.py +++ b/langchain/llms/openai.py @@ -1,9 +1,11 @@ """Wrapper around OpenAI APIs.""" -from typing import Any, Dict, List, Mapping, Optional +import sys +from typing import Any, Dict, Generator, List, Mapping, Optional from pydantic import BaseModel, Extra, Field, root_validator -from langchain.llms.base import LLM +from langchain.llms.base import LLM, LLMResult +from langchain.schema import Generation from langchain.utils import get_from_dict_or_env @@ -29,7 +31,9 @@ class OpenAI(LLM, BaseModel): temperature: float = 0.7 """What sampling temperature to use.""" max_tokens: int = 256 - """The maximum number of tokens to generate in the completion.""" + """The maximum number of tokens to generate in the completion. + -1 returns as many tokens as possible given the prompt and + the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0 @@ -43,6 +47,8 @@ class OpenAI(LLM, BaseModel): model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None + batch_size: int = 20 + """Batch size to use when passing multiple documents to generate.""" class Config: """Configuration for this pydantic object.""" @@ -95,12 +101,100 @@ class OpenAI(LLM, BaseModel): } return {**normal_params, **self.model_kwargs} + def _generate( + self, prompts: List[str], stop: Optional[List[str]] = None + ) -> LLMResult: + """Call out to OpenAI's endpoint with k unique prompts. + + Args: + prompts: The prompts to pass into the model. + stop: Optional list of stop words to use when generating. + + Returns: + The full LLM output. + + Example: + .. code-block:: python + + response = openai.generate(["Tell me a joke."]) + """ + # TODO: write a unit test for this + params = self._default_params + if stop is not None: + if "stop" in params: + raise ValueError("`stop` found in both the input and default params.") + params["stop"] = stop + + if params["max_tokens"] == -1: + if len(prompts) != 1: + raise ValueError( + "max_tokens set to -1 not supported for multiple inputs." + ) + params["max_tokens"] = self.max_tokens_for_prompt(prompts[0]) + sub_prompts = [ + prompts[i : i + self.batch_size] + for i in range(0, len(prompts), self.batch_size) + ] + choices = [] + token_usage = {} + # Get the token usage from the response. + # Includes prompt, completion, and total tokens used. + _keys = ["completion_tokens", "prompt_tokens", "total_tokens"] + for _prompts in sub_prompts: + response = self.client.create( + model=self.model_name, prompt=_prompts, **params + ) + choices.extend(response["choices"]) + for _key in _keys: + if _key not in token_usage: + token_usage[_key] = response["usage"][_key] + else: + token_usage[_key] += response["usage"][_key] + generations = [] + for i, prompt in enumerate(prompts): + sub_choices = choices[i * self.n : (i + 1) * self.n] + generations.append( + [Generation(text=choice["text"]) for choice in sub_choices] + ) + return LLMResult( + generations=generations, llm_output={"token_usage": token_usage} + ) + + def stream(self, prompt: str) -> Generator: + """Call OpenAI with streaming flag and return the resulting generator. + + Args: + prompt: The prompts to pass into the model. + + Returns: + A generator representing the stream of tokens from OpenAI. + + Example: + .. code-block:: python + + generator = openai.stream("Tell me a joke.") + for token in generator: + yield token + """ + params = self._default_params + if params["best_of"] != 1: + raise ValueError("OpenAI only supports best_of == 1 for streaming") + params["stream"] = True + generator = self.client.create(model=self.model_name, prompt=prompt, **params) + + return generator + @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" - return {**{"model": self.model_name}, **self._default_params} + return {**{"model_name": self.model_name}, **self._default_params} + + @property + def _llm_type(self) -> str: + """Return type of llm.""" + return "openai" - def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str: + def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to OpenAI's create endpoint. Args: @@ -115,10 +209,82 @@ class OpenAI(LLM, BaseModel): response = openai("Tell me a joke.") """ - params = self._default_params - if stop is not None: - if "stop" in params: - raise ValueError("`stop` found in both the input and default params.") - params["stop"] = stop - response = self.client.create(model=self.model_name, prompt=prompt, **params) - return response["choices"][0]["text"] + return self.generate([prompt], stop=stop).generations[0][0].text + + def get_num_tokens(self, text: str) -> int: + """Calculate num tokens with tiktoken package.""" + # tiktoken NOT supported for Python 3.8 or below + if sys.version_info[1] <= 8: + return super().get_num_tokens(text) + try: + import tiktoken + except ImportError: + raise ValueError( + "Could not import tiktoken python package. " + "This is needed in order to calculate get_num_tokens. " + "Please it install it with `pip install tiktoken`." + ) + # create a GPT-3 encoder instance + enc = tiktoken.get_encoding("gpt2") + + # encode the text using the GPT-3 encoder + tokenized_text = enc.encode(text) + + # calculate the number of tokens in the encoded text + return len(tokenized_text) + + def modelname_to_contextsize(self, modelname: str) -> int: + """Calculate the maximum number of tokens possible to generate for a model. + + text-davinci-003: 4,000 tokens + text-curie-001: 2,048 tokens + text-babbage-001: 2,048 tokens + text-ada-001: 2,048 tokens + code-davinci-002: 8,000 tokens + code-cushman-001: 2,048 tokens + + Args: + modelname: The modelname we want to know the context size for. + + Returns: + The maximum context size + + Example: + .. code-block:: python + + max_tokens = openai.modelname_to_contextsize("text-davinci-003") + """ + if modelname == "text-davinci-003": + return 4000 + elif modelname == "text-curie-001": + return 2048 + elif modelname == "text-babbage-001": + return 2048 + elif modelname == "text-ada-001": + return 2048 + elif modelname == "code-davinci-002": + return 8000 + elif modelname == "code-cushman-001": + return 2048 + else: + return 4000 + + def max_tokens_for_prompt(self, prompt: str) -> int: + """Calculate the maximum number of tokens possible to generate for a prompt. + + Args: + prompt: The prompt to pass into the model. + + Returns: + The maximum number of tokens to generate for a prompt. + + Example: + .. code-block:: python + + max_tokens = openai.max_token_for_prompt("Tell me a joke.") + """ + num_tokens = self.get_num_tokens(prompt) + + # get max context size for model by name + max_size = self.modelname_to_contextsize(self.model_name) + return max_size - num_tokens diff --git a/langchain/schema.py b/langchain/schema.py index 67a64c01..4e255e3e 100644 --- a/langchain/schema.py +++ b/langchain/schema.py @@ -9,3 +9,11 @@ class AgentAction(NamedTuple): tool: str tool_input: str log: str + + +class Generation(NamedTuple): + """Output of a single generation.""" + + text: str + """Generated text output.""" + # TODO: add log probs diff --git a/langchain/text_splitter.py b/langchain/text_splitter.py index c2da1745..daf720e1 100644 --- a/langchain/text_splitter.py +++ b/langchain/text_splitter.py @@ -49,7 +49,7 @@ class TextSplitter(ABC): @classmethod def from_huggingface_tokenizer(cls, tokenizer: Any, **kwargs: Any) -> TextSplitter: - """Text splitter than uses HuggingFace tokenizer to count length.""" + """Text splitter that uses HuggingFace tokenizer to count length.""" try: from transformers import PreTrainedTokenizerBase @@ -68,6 +68,27 @@ class TextSplitter(ABC): ) return cls(length_function=_huggingface_tokenizer_length, **kwargs) + @classmethod + def from_tiktoken_encoder( + cls, encoding_name: str = "gpt2", **kwargs: Any + ) -> TextSplitter: + """Text splitter that uses tiktoken encoder to count length.""" + try: + import tiktoken + except ImportError: + raise ValueError( + "Could not import tiktoken python package. " + "This is needed in order to calculate max_tokens_for_prompt. " + "Please it install it with `pip install tiktoken`." + ) + # create a GPT-3 encoder instance + enc = tiktoken.get_encoding(encoding_name) + + def _tiktoken_encoder(text: str) -> int: + return len(enc.encode(text)) + + return cls(length_function=_tiktoken_encoder, **kwargs) + class CharacterTextSplitter(TextSplitter): """Implementation of splitting text that looks at characters.""" diff --git a/poetry.lock b/poetry.lock index a6a4d326..9dcbd09b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -54,6 +54,17 @@ cffi = ">=1.0.1" dev = ["cogapp", "pre-commit", "pytest", "wheel"] tests = ["pytest"] +[[package]] +name = "arrow" +version = "1.2.3" +description = "Better dates & times for Python" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +python-dateutil = ">=2.7.0" + [[package]] name = "asttokens" version = "2.2.1" @@ -115,7 +126,7 @@ lxml = ["lxml"] [[package]] name = "black" -version = "22.10.0" +version = "22.12.0" description = "The uncompromising code formatter." category = "dev" optional = false @@ -162,6 +173,20 @@ python-versions = "*" [package.dependencies] numpy = ">=1.15.0" +[[package]] +name = "blobfile" +version = "2.0.0" +description = "Read GCS, ABS and local paths with the same interface, clone of tensorflow.io.gfile" +category = "main" +optional = true +python-versions = ">=3.7.0" + +[package.dependencies] +filelock = ">=3.0,<4.0" +lxml = ">=4.9,<5.0" +pycryptodomex = ">=3.8,<4.0" +urllib3 = ">=1.25,<2.0" + [[package]] name = "catalogue" version = "2.0.8" @@ -172,7 +197,7 @@ python-versions = ">=3.6" [[package]] name = "certifi" -version = "2022.9.24" +version = "2022.12.7" description = "Python package for providing Mozilla's CA Bundle." category = "main" optional = false @@ -219,6 +244,20 @@ category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +[[package]] +name = "comm" +version = "0.1.2" +description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc." +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +traitlets = ">=5.3" + +[package.extras] +test = ["pytest"] + [[package]] name = "confection" version = "0.0.3" @@ -231,6 +270,20 @@ python-versions = ">=3.6" pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<1.11.0" srsly = ">=2.4.0,<3.0.0" +[[package]] +name = "coverage" +version = "6.5.0" +description = "Code coverage measurement for Python" +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} + +[package.extras] +toml = ["tomli"] + [[package]] name = "cymem" version = "2.0.7" @@ -291,7 +344,7 @@ develop = ["aiohttp", "mock", "pytest", "pytest-asyncio", "pytest-cov", "pytest- [[package]] name = "elasticsearch" -version = "8.5.2" +version = "8.5.3" description = "Python client for Elasticsearch" category = "main" optional = true @@ -390,6 +443,14 @@ python-versions = "*" flake8 = ">=3" pydocstyle = ">=2.1" +[[package]] +name = "fqdn" +version = "1.5.1" +description = "Validates fully-qualified domain names against RFC 1123, so that they are acceptable to modern bowsers" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0, !=3.1, !=3.2, !=3.3, !=3.4, <4" + [[package]] name = "greenlet" version = "2.0.1" @@ -455,7 +516,7 @@ testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packag [[package]] name = "importlib-resources" -version = "5.10.0" +version = "5.10.1" description = "Read resources from Python packages" category = "dev" optional = false @@ -478,7 +539,7 @@ python-versions = "*" [[package]] name = "ipykernel" -version = "6.17.1" +version = "6.19.2" description = "IPython Kernel for Jupyter" category = "dev" optional = false @@ -486,6 +547,7 @@ python-versions = ">=3.8" [package.dependencies] appnope = {version = "*", markers = "platform_system == \"Darwin\""} +comm = ">=0.1.1" debugpy = ">=1.0" ipython = ">=7.23.1" jupyter-client = ">=6.1.12" @@ -495,11 +557,14 @@ packaging = "*" psutil = "*" pyzmq = ">=17" tornado = ">=6.1" -traitlets = ">=5.1.0" +traitlets = ">=5.4.0" [package.extras] +cov = ["coverage[toml]", "curio", "matplotlib", "pytest-cov", "trio"] docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinxcontrib-github-alt"] -test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-cov", "pytest-timeout"] +lint = ["black (>=22.6.0)", "mdformat (>0.7)", "ruff (>=0.0.156)"] +test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio", "pytest-cov", "pytest-timeout"] +typing = ["mypy (>=0.990)"] [[package]] name = "ipython" @@ -546,7 +611,7 @@ python-versions = "*" [[package]] name = "ipywidgets" -version = "8.0.2" +version = "8.0.3" description = "Jupyter interactive widgets" category = "dev" optional = false @@ -562,13 +627,24 @@ widgetsnbextension = ">=4.0,<5.0" [package.extras] test = ["jsonschema", "pytest (>=3.6.0)", "pytest-cov", "pytz"] +[[package]] +name = "isoduration" +version = "20.11.0" +description = "Operations with ISO 8601 durations" +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +arrow = ">=0.15.0" + [[package]] name = "isort" -version = "5.10.1" +version = "5.11.2" description = "A Python utility / library to sort Python imports." category = "dev" optional = false -python-versions = ">=3.6.1,<4.0" +python-versions = ">=3.7.0" [package.extras] colors = ["colorama (>=0.4.3,<0.5.0)"] @@ -614,6 +690,14 @@ category = "main" optional = true python-versions = ">=3.7" +[[package]] +name = "jsonpointer" +version = "2.3" +description = "Identify specific nodes in a JSON document (RFC 6901)" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + [[package]] name = "jsonschema" version = "4.17.3" @@ -624,9 +708,17 @@ python-versions = ">=3.7" [package.dependencies] attrs = ">=17.4.0" +fqdn = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +idna = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} +isoduration = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +jsonpointer = {version = ">1.13", optional = true, markers = "extra == \"format-nongpl\""} pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""} pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2" +rfc3339-validator = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +rfc3986-validator = {version = ">0.1.0", optional = true, markers = "extra == \"format-nongpl\""} +uri-template = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +webcolors = {version = ">=1.11", optional = true, markers = "extra == \"format-nongpl\""} [package.extras] format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] @@ -704,34 +796,73 @@ traitlets = ">=5.3" docs = ["myst-parser", "sphinxcontrib-github-alt", "traitlets"] test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"] +[[package]] +name = "jupyter-events" +version = "0.5.0" +description = "Jupyter Event System library" +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +jsonschema = {version = ">=4.3.0", extras = ["format-nongpl"]} +python-json-logger = "*" +pyyaml = "*" +traitlets = "*" + +[package.extras] +cli = ["click", "rich"] +test = ["click", "coverage", "pre-commit", "pytest (>=6.1.0)", "pytest-asyncio (>=0.19.0)", "pytest-console-scripts", "pytest-cov", "rich"] + [[package]] name = "jupyter-server" -version = "1.23.3" +version = "2.0.1" description = "The backend—i.e. core services, APIs, and REST endpoints—to Jupyter web applications." category = "dev" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" [package.dependencies] anyio = ">=3.1.0,<4" argon2-cffi = "*" jinja2 = "*" -jupyter-client = ">=6.1.12" -jupyter-core = ">=4.7.0" +jupyter-client = ">=7.4.4" +jupyter-core = ">=4.12,<5.0.0 || >=5.1.0" +jupyter-events = ">=0.4.0" +jupyter-server-terminals = "*" nbconvert = ">=6.4.4" -nbformat = ">=5.2.0" +nbformat = ">=5.3.0" packaging = "*" prometheus-client = "*" pywinpty = {version = "*", markers = "os_name == \"nt\""} -pyzmq = ">=17" -Send2Trash = "*" +pyzmq = ">=24" +send2trash = "*" terminado = ">=0.8.3" -tornado = ">=6.1.0" -traitlets = ">=5.1" +tornado = ">=6.2.0" +traitlets = ">=5.6.0" websocket-client = "*" [package.extras] -test = ["coverage", "ipykernel", "pre-commit", "pytest (>=7.0)", "pytest-console-scripts", "pytest-cov", "pytest-mock", "pytest-timeout", "pytest-tornasync", "requests"] +docs = ["docutils (<0.20)", "ipykernel", "jinja2", "jupyter-client", "jupyter-server", "mistune (<1.0.0)", "myst-parser", "nbformat", "prometheus-client", "pydata-sphinx-theme", "send2trash", "sphinxcontrib-github-alt", "sphinxcontrib-openapi", "sphinxemoji", "tornado"] +lint = ["black (>=22.6.0)", "mdformat (>0.7)", "ruff (>=0.0.156)"] +test = ["ipykernel", "pre-commit", "pytest (>=7.0)", "pytest-console-scripts", "pytest-jupyter[server] (>=0.4)", "pytest-timeout", "requests"] +typing = ["mypy (>=0.990)"] + +[[package]] +name = "jupyter-server-terminals" +version = "0.4.2" +description = "A Jupyter Server Extension Providing Terminals." +category = "dev" +optional = false +python-versions = ">=3.8" + +[package.dependencies] +pywinpty = {version = ">=2.0.3", markers = "os_name == \"nt\""} +terminado = ">=0.8.3" + +[package.extras] +docs = ["jinja2", "jupyter-server", "mistune (<2.0)", "myst-parser", "nbformat", "packaging", "pydata-sphinx-theme", "sphinxcontrib-github-alt", "sphinxcontrib-openapi", "sphinxemoji", "tornado"] +test = ["coverage", "jupyter-server (>=2.0.0rc8)", "pytest (>=7.0)", "pytest-cov", "pytest-jupyter[server] (>=0.5.3)", "pytest-timeout"] [[package]] name = "jupyterlab-pygments" @@ -743,7 +874,7 @@ python-versions = ">=3.7" [[package]] name = "jupyterlab-widgets" -version = "3.0.3" +version = "3.0.4" description = "Jupyter interactive widgets for JupyterLab" category = "dev" optional = false @@ -760,6 +891,20 @@ python-versions = ">=3.6" [package.extras] data = ["language-data (>=1.1,<2.0)"] +[[package]] +name = "lxml" +version = "4.9.2" +description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." +category = "main" +optional = true +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*" + +[package.extras] +cssselect = ["cssselect (>=0.7)"] +html5 = ["html5lib"] +htmlsoup = ["BeautifulSoup4"] +source = ["Cython (>=0.29.7)"] + [[package]] name = "manifest-ml" version = "0.0.1" @@ -963,7 +1108,7 @@ python-versions = ">=3.5" [[package]] name = "nltk" -version = "3.7" +version = "3.8" description = "Natural Language Toolkit" category = "main" optional = true @@ -1036,16 +1181,61 @@ category = "main" optional = false python-versions = ">=3.8" +[[package]] +name = "nvidia-cublas-cu11" +version = "11.10.3.66" +description = "CUBLAS native runtime libraries" +category = "main" +optional = true +python-versions = ">=3" + +[package.dependencies] +setuptools = "*" +wheel = "*" + +[[package]] +name = "nvidia-cuda-nvrtc-cu11" +version = "11.7.99" +description = "NVRTC native runtime libraries" +category = "main" +optional = true +python-versions = ">=3" + +[package.dependencies] +setuptools = "*" +wheel = "*" + +[[package]] +name = "nvidia-cuda-runtime-cu11" +version = "11.7.99" +description = "CUDA Runtime native Libraries" +category = "main" +optional = true +python-versions = ">=3" + +[package.dependencies] +setuptools = "*" +wheel = "*" + +[[package]] +name = "nvidia-cudnn-cu11" +version = "8.5.0.96" +description = "cuDNN runtime libraries" +category = "main" +optional = true +python-versions = ">=3" + +[package.dependencies] +setuptools = "*" +wheel = "*" + [[package]] name = "packaging" -version = "21.3" +version = "22.0" description = "Core utilities for Python packages" category = "main" optional = false -python-versions = ">=3.6" - -[package.dependencies] -pyparsing = ">=2.0.2,<3.0.5 || >3.0.5" +python-versions = ">=3.7" [[package]] name = "pandocfilters" @@ -1069,7 +1259,7 @@ testing = ["docopt", "pytest (<6.0.0)"] [[package]] name = "pathspec" -version = "0.10.2" +version = "0.10.3" description = "Utility library for gitignore style pattern matching of file paths." category = "dev" optional = false @@ -1077,14 +1267,14 @@ python-versions = ">=3.7" [[package]] name = "pathy" -version = "0.10.0" +version = "0.10.1" description = "pathlib.Path subclasses for local and cloud bucket storage" category = "main" optional = true python-versions = ">= 3.6" [package.dependencies] -smart-open = ">=5.2.1,<6.0.0" +smart-open = ">=5.2.1,<7.0.0" typer = ">=0.3.0,<1.0.0" [package.extras] @@ -1123,7 +1313,7 @@ python-versions = ">=3.6" [[package]] name = "platformdirs" -version = "2.5.4" +version = "2.6.0" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." category = "dev" optional = false @@ -1183,7 +1373,7 @@ twisted = ["twisted"] [[package]] name = "prompt-toolkit" -version = "3.0.33" +version = "3.0.36" description = "Library for building powerful interactive command lines in Python" category = "dev" optional = false @@ -1246,6 +1436,14 @@ category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +[[package]] +name = "pycryptodomex" +version = "3.16.0" +description = "Cryptographic library for Python" +category = "main" +optional = true +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + [[package]] name = "pydantic" version = "1.10.2" @@ -1305,17 +1503,6 @@ python-versions = ">=3.6" [package.extras] plugins = ["importlib-metadata"] -[[package]] -name = "pyparsing" -version = "3.0.9" -description = "pyparsing module - Classes and methods to define and execute parsing grammars" -category = "main" -optional = false -python-versions = ">=3.6.8" - -[package.extras] -diagrams = ["jinja2", "railroad-diagrams"] - [[package]] name = "pyrsistent" version = "0.19.2" @@ -1344,6 +1531,21 @@ tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} [package.extras] testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "xmlschema"] +[[package]] +name = "pytest-cov" +version = "4.0.0" +description = "Pytest plugin for measuring coverage." +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +coverage = {version = ">=5.2.1", extras = ["toml"]} +pytest = ">=4.6" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"] + [[package]] name = "pytest-dotenv" version = "0.5.2" @@ -1378,6 +1580,14 @@ python-versions = ">=3.7" [package.extras] cli = ["click (>=5.0)"] +[[package]] +name = "python-json-logger" +version = "2.0.4" +description = "A python library adding a json log formatter" +category = "dev" +optional = false +python-versions = ">=3.5" + [[package]] name = "pywin32" version = "305" @@ -1491,6 +1701,25 @@ urllib3 = ">=1.21.1,<1.27" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] +[[package]] +name = "rfc3339-validator" +version = "0.1.4" +description = "A pure python RFC3339 validator" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[package.dependencies] +six = "*" + +[[package]] +name = "rfc3986-validator" +version = "0.1.1" +description = "Pure python rfc3986 validator" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + [[package]] name = "send2trash" version = "1.8.0" @@ -1527,19 +1756,20 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" [[package]] name = "smart-open" -version = "5.2.1" +version = "6.3.0" description = "Utils for streaming large files (S3, HDFS, GCS, Azure Blob Storage, gzip, bz2...)" category = "main" optional = true python-versions = ">=3.6,<4.0" [package.extras] -all = ["azure-common", "azure-core", "azure-storage-blob", "boto3", "google-cloud-storage", "requests"] +all = ["azure-common", "azure-core", "azure-storage-blob", "boto3", "google-cloud-storage (>=2.6.0)", "paramiko", "requests"] azure = ["azure-common", "azure-core", "azure-storage-blob"] -gcs = ["google-cloud-storage"] +gcs = ["google-cloud-storage (>=2.6.0)"] http = ["requests"] s3 = ["boto3"] -test = ["azure-common", "azure-core", "azure-storage-blob", "boto3", "google-cloud-storage", "moto[server] (==1.3.14)", "parameterizedtestcase", "paramiko", "pathlib2", "pytest", "pytest-rerunfailures", "requests", "responses"] +ssh = ["paramiko"] +test = ["azure-common", "azure-core", "azure-storage-blob", "boto3", "google-cloud-storage (>=2.6.0)", "moto[server]", "paramiko", "pytest", "pytest-rerunfailures", "requests", "responses"] webhdfs = ["requests"] [[package]] @@ -1568,7 +1798,7 @@ python-versions = ">=3.6" [[package]] name = "spacy" -version = "3.4.3" +version = "3.4.4" description = "Industrial-strength Natural Language Processing (NLP) in Python" category = "main" optional = true @@ -1587,6 +1817,7 @@ preshed = ">=3.0.2,<3.1.0" pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<1.11.0" requests = ">=2.13.0,<3.0.0" setuptools = "*" +smart-open = ">=5.2.1,<7.0.0" spacy-legacy = ">=3.0.10,<3.1.0" spacy-loggers = ">=1.0.0,<2.0.0" srsly = ">=2.4.3,<3.0.0" @@ -1632,18 +1863,15 @@ python-versions = ">=3.6" [[package]] name = "spacy-loggers" -version = "1.0.3" +version = "1.0.4" description = "Logging utilities for SpaCy" category = "main" optional = true python-versions = ">=3.6" -[package.dependencies] -wasabi = ">=0.8.1,<1.1.0" - [[package]] name = "sqlalchemy" -version = "1.4.44" +version = "1.4.45" description = "Database Abstraction Library" category = "main" optional = false @@ -1770,6 +1998,18 @@ mxnet = ["mxnet (>=1.5.1,<1.6.0)"] tensorflow = ["tensorflow (>=2.0.0,<2.6.0)"] torch = ["torch (>=1.6.0)"] +[[package]] +name = "tiktoken" +version = "0.1.1" +description = "" +category = "main" +optional = true +python-versions = ">=3.9" + +[package.dependencies] +blobfile = ">=2" +regex = ">=2022.1.18" + [[package]] name = "tinycss2" version = "1.2.1" @@ -1806,6 +2046,24 @@ category = "dev" optional = false python-versions = ">=3.7" +[[package]] +name = "torch" +version = "1.13.1" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +category = "main" +optional = true +python-versions = ">=3.7.0" + +[package.dependencies] +nvidia-cublas-cu11 = {version = "11.10.3.66", markers = "platform_system == \"Linux\""} +nvidia-cuda-nvrtc-cu11 = {version = "11.7.99", markers = "platform_system == \"Linux\""} +nvidia-cuda-runtime-cu11 = {version = "11.7.99", markers = "platform_system == \"Linux\""} +nvidia-cudnn-cu11 = {version = "8.5.0.96", markers = "platform_system == \"Linux\""} +typing-extensions = "*" + +[package.extras] +opt-einsum = ["opt-einsum (>=3.3)"] + [[package]] name = "tornado" version = "6.2" @@ -1833,7 +2091,7 @@ telegram = ["requests"] [[package]] name = "traitlets" -version = "5.6.0" +version = "5.7.1" description = "Traitlets Python configuration system" category = "dev" optional = false @@ -1841,7 +2099,9 @@ python-versions = ">=3.7" [package.extras] docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +lint = ["black (>=22.6.0)", "mdformat (>0.7)", "ruff (>=0.0.156)"] test = ["pre-commit", "pytest"] +typing = ["mypy (>=0.990)"] [[package]] name = "transformers" @@ -1965,6 +2225,17 @@ category = "main" optional = false python-versions = ">=3.7" +[[package]] +name = "uri-template" +version = "1.2.0" +description = "RFC 6570 URI Template Processor" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.extras] +dev = ["flake8 (<4.0.0)", "flake8-annotations", "flake8-bugbear", "flake8-commas", "flake8-comprehensions", "flake8-continuation", "flake8-datetimez", "flake8-docstrings", "flake8-import-order", "flake8-literal", "flake8-noqa", "flake8-requirements", "flake8-type-annotations", "flake8-use-fstring", "mypy", "pep8-naming"] + [[package]] name = "urllib3" version = "1.26.13" @@ -1994,6 +2265,14 @@ category = "dev" optional = false python-versions = "*" +[[package]] +name = "webcolors" +version = "1.12" +description = "A library for working with color names and color values formats defined by HTML and CSS." +category = "dev" +optional = false +python-versions = ">=3.7" + [[package]] name = "webencodings" version = "0.5.1" @@ -2015,9 +2294,20 @@ docs = ["Sphinx (>=3.4)", "sphinx-rtd-theme (>=0.5)"] optional = ["python-socks", "wsaccel"] test = ["websockets"] +[[package]] +name = "wheel" +version = "0.38.4" +description = "A built-package format for Python" +category = "main" +optional = true +python-versions = ">=3.7" + +[package.extras] +test = ["pytest (>=3.0.0)"] + [[package]] name = "widgetsnbextension" -version = "4.0.3" +version = "4.0.4" description = "Jupyter interactive widgets for Jupyter Notebook" category = "dev" optional = false @@ -2048,13 +2338,13 @@ docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker testing = ["flake8 (<5)", "func-timeout", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] [extras] -all = ["manifest-ml", "elasticsearch", "faiss-cpu", "transformers", "spacy", "nltk", "wikipedia", "beautifulsoup4"] -llms = ["manifest-ml"] +all = ["manifest-ml", "elasticsearch", "faiss-cpu", "transformers", "spacy", "nltk", "wikipedia", "beautifulsoup4", "tiktoken", "torch"] +llms = ["manifest-ml", "torch", "transformers"] [metadata] lock-version = "1.1" python-versions = ">=3.8.1,<4.0" -content-hash = "7f44c3b23d4fa30e192ec0f0a9218bcd646bb48dd64a813ee1bb7d61cbe3a5b2" +content-hash = "d9ed2c5e1b2c51d7f8a9f74c858ab5058db14b7d6ca542777d7ecd07ccef5ee8" [metadata.files] anyio = [ @@ -2092,6 +2382,10 @@ argon2-cffi-bindings = [ {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed2937d286e2ad0cc79a7087d3c272832865f779430e0cc2b4f3718d3159b0cb"}, {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5e00316dabdaea0b2dd82d141cc66889ced0cdcbfa599e8b471cf22c620c329a"}, ] +arrow = [ + {file = "arrow-1.2.3-py3-none-any.whl", hash = "sha256:5a49ab92e3b7b71d96cd6bfcc4df14efefc9dfa96ea19045815914a6ab6b1fe2"}, + {file = "arrow-1.2.3.tar.gz", hash = "sha256:3934b30ca1b9f292376d9db15b19446088d12ec58629bc3f0da28fd55fb633a1"}, +] asttokens = [ {file = "asttokens-2.2.1-py2.py3-none-any.whl", hash = "sha256:6b0ac9e93fb0335014d382b8fa9b3afa7df546984258005da0b9e7095b3deb1c"}, {file = "asttokens-2.2.1.tar.gz", hash = "sha256:4622110b2a6f30b77e1473affaa97e711bc2f07d3f10848420ff1898edbe94f3"}, @@ -2113,27 +2407,18 @@ beautifulsoup4 = [ {file = "beautifulsoup4-4.11.1.tar.gz", hash = "sha256:ad9aa55b65ef2808eb405f46cf74df7fcb7044d5cbc26487f96eb2ef2e436693"}, ] black = [ - {file = "black-22.10.0-1fixedarch-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:5cc42ca67989e9c3cf859e84c2bf014f6633db63d1cbdf8fdb666dcd9e77e3fa"}, - {file = "black-22.10.0-1fixedarch-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:5d8f74030e67087b219b032aa33a919fae8806d49c867846bfacde57f43972ef"}, - {file = "black-22.10.0-1fixedarch-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:197df8509263b0b8614e1df1756b1dd41be6738eed2ba9e9769f3880c2b9d7b6"}, - {file = "black-22.10.0-1fixedarch-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:2644b5d63633702bc2c5f3754b1b475378fbbfb481f62319388235d0cd104c2d"}, - {file = "black-22.10.0-1fixedarch-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:e41a86c6c650bcecc6633ee3180d80a025db041a8e2398dcc059b3afa8382cd4"}, - {file = "black-22.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2039230db3c6c639bd84efe3292ec7b06e9214a2992cd9beb293d639c6402edb"}, - {file = "black-22.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14ff67aec0a47c424bc99b71005202045dc09270da44a27848d534600ac64fc7"}, - {file = "black-22.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:819dc789f4498ecc91438a7de64427c73b45035e2e3680c92e18795a839ebb66"}, - {file = "black-22.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5b9b29da4f564ba8787c119f37d174f2b69cdfdf9015b7d8c5c16121ddc054ae"}, - {file = "black-22.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8b49776299fece66bffaafe357d929ca9451450f5466e997a7285ab0fe28e3b"}, - {file = "black-22.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:21199526696b8f09c3997e2b4db8d0b108d801a348414264d2eb8eb2532e540d"}, - {file = "black-22.10.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e464456d24e23d11fced2bc8c47ef66d471f845c7b7a42f3bd77bf3d1789650"}, - {file = "black-22.10.0-cp37-cp37m-win_amd64.whl", hash = "sha256:9311e99228ae10023300ecac05be5a296f60d2fd10fff31cf5c1fa4ca4b1988d"}, - {file = "black-22.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fba8a281e570adafb79f7755ac8721b6cf1bbf691186a287e990c7929c7692ff"}, - {file = "black-22.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:915ace4ff03fdfff953962fa672d44be269deb2eaf88499a0f8805221bc68c87"}, - {file = "black-22.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:444ebfb4e441254e87bad00c661fe32df9969b2bf224373a448d8aca2132b395"}, - {file = "black-22.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:974308c58d057a651d182208a484ce80a26dac0caef2895836a92dd6ebd725e0"}, - {file = "black-22.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72ef3925f30e12a184889aac03d77d031056860ccae8a1e519f6cbb742736383"}, - {file = "black-22.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:432247333090c8c5366e69627ccb363bc58514ae3e63f7fc75c54b1ea80fa7de"}, - {file = "black-22.10.0-py3-none-any.whl", hash = "sha256:c957b2b4ea88587b46cf49d1dc17681c1e672864fd7af32fc1e9664d572b3458"}, - {file = "black-22.10.0.tar.gz", hash = "sha256:f513588da599943e0cde4e32cc9879e825d58720d6557062d1098c5ad80080e1"}, + {file = "black-22.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eedd20838bd5d75b80c9f5487dbcb06836a43833a37846cf1d8c1cc01cef59d"}, + {file = "black-22.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:159a46a4947f73387b4d83e87ea006dbb2337eab6c879620a3ba52699b1f4351"}, + {file = "black-22.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d30b212bffeb1e252b31dd269dfae69dd17e06d92b87ad26e23890f3efea366f"}, + {file = "black-22.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:7412e75863aa5c5411886804678b7d083c7c28421210180d67dfd8cf1221e1f4"}, + {file = "black-22.12.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c116eed0efb9ff870ded8b62fe9f28dd61ef6e9ddd28d83d7d264a38417dcee2"}, + {file = "black-22.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1f58cbe16dfe8c12b7434e50ff889fa479072096d79f0a7f25e4ab8e94cd8350"}, + {file = "black-22.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d86c9f3db9b1bf6761244bc0b3572a546f5fe37917a044e02f3166d5aafa7d"}, + {file = "black-22.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:82d9fe8fee3401e02e79767016b4907820a7dc28d70d137eb397b92ef3cc5bfc"}, + {file = "black-22.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101c69b23df9b44247bd88e1d7e90154336ac4992502d4197bdac35dd7ee3320"}, + {file = "black-22.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:559c7a1ba9a006226f09e4916060982fd27334ae1998e7a38b3f33a37f7a2148"}, + {file = "black-22.12.0-py3-none-any.whl", hash = "sha256:436cc9167dd28040ad90d3b404aec22cedf24a6e4d7de221bec2730ec0c97bcf"}, + {file = "black-22.12.0.tar.gz", hash = "sha256:229351e5a18ca30f447bf724d007f890f97e13af070bb6ad4c0a441cd7596a2f"}, ] bleach = [ {file = "bleach-5.0.1-py3-none-any.whl", hash = "sha256:085f7f33c15bd408dd9b17a4ad77c577db66d76203e5984b1bd59baeee948b2a"}, @@ -2169,13 +2454,16 @@ blis = [ {file = "blis-0.7.9-cp39-cp39-win_amd64.whl", hash = "sha256:d81c3f627d33545fc25c9dcb5fee66c476d89288a27d63ac16ea63453401ffd5"}, {file = "blis-0.7.9.tar.gz", hash = "sha256:29ef4c25007785a90ffc2f0ab3d3bd3b75cd2d7856a9a482b7d0dac8d511a09d"}, ] +blobfile = [ + {file = "blobfile-2.0.0-py3-none-any.whl", hash = "sha256:e8701f253c4510edc24290f48198f6b8edde30774387df05bc932ff927519904"}, +] catalogue = [ {file = "catalogue-2.0.8-py3-none-any.whl", hash = "sha256:2d786e229d8d202b4f8a2a059858e45a2331201d831e39746732daa704b99f69"}, {file = "catalogue-2.0.8.tar.gz", hash = "sha256:b325c77659208bfb6af1b0d93b1a1aa4112e1bb29a4c5ced816758a722f0e388"}, ] certifi = [ - {file = "certifi-2022.9.24-py3-none-any.whl", hash = "sha256:90c1a32f1d68f940488354e36370f6cca89f0f106db09518524c88d6ed83f382"}, - {file = "certifi-2022.9.24.tar.gz", hash = "sha256:0d9c601124e5a6ba9712dbc60d9c53c21e34f5f641fe83002317394311bdce14"}, + {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, + {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, ] cffi = [ {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, @@ -2255,10 +2543,66 @@ colorama = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +comm = [ + {file = "comm-0.1.2-py3-none-any.whl", hash = "sha256:9f3abf3515112fa7c55a42a6a5ab358735c9dccc8b5910a9d8e3ef5998130666"}, + {file = "comm-0.1.2.tar.gz", hash = "sha256:3e2f5826578e683999b93716285b3b1f344f157bf75fa9ce0a797564e742f062"}, +] confection = [ {file = "confection-0.0.3-py3-none-any.whl", hash = "sha256:51af839c1240430421da2b248541ebc95f9d0ee385bcafa768b8acdbd2b0111d"}, {file = "confection-0.0.3.tar.gz", hash = "sha256:4fec47190057c43c9acbecb8b1b87a9bf31c469caa0d6888a5b9384432fdba5a"}, ] +coverage = [ + {file = "coverage-6.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ef8674b0ee8cc11e2d574e3e2998aea5df5ab242e012286824ea3c6970580e53"}, + {file = "coverage-6.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:784f53ebc9f3fd0e2a3f6a78b2be1bd1f5575d7863e10c6e12504f240fd06660"}, + {file = "coverage-6.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4a5be1748d538a710f87542f22c2cad22f80545a847ad91ce45e77417293eb4"}, + {file = "coverage-6.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83516205e254a0cb77d2d7bb3632ee019d93d9f4005de31dca0a8c3667d5bc04"}, + {file = "coverage-6.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af4fffaffc4067232253715065e30c5a7ec6faac36f8fc8d6f64263b15f74db0"}, + {file = "coverage-6.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:97117225cdd992a9c2a5515db1f66b59db634f59d0679ca1fa3fe8da32749cae"}, + {file = "coverage-6.5.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a1170fa54185845505fbfa672f1c1ab175446c887cce8212c44149581cf2d466"}, + {file = "coverage-6.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:11b990d520ea75e7ee8dcab5bc908072aaada194a794db9f6d7d5cfd19661e5a"}, + {file = "coverage-6.5.0-cp310-cp310-win32.whl", hash = "sha256:5dbec3b9095749390c09ab7c89d314727f18800060d8d24e87f01fb9cfb40b32"}, + {file = "coverage-6.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:59f53f1dc5b656cafb1badd0feb428c1e7bc19b867479ff72f7a9dd9b479f10e"}, + {file = "coverage-6.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4a5375e28c5191ac38cca59b38edd33ef4cc914732c916f2929029b4bfb50795"}, + {file = "coverage-6.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4ed2820d919351f4167e52425e096af41bfabacb1857186c1ea32ff9983ed75"}, + {file = "coverage-6.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:33a7da4376d5977fbf0a8ed91c4dffaaa8dbf0ddbf4c8eea500a2486d8bc4d7b"}, + {file = "coverage-6.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8fb6cf131ac4070c9c5a3e21de0f7dc5a0fbe8bc77c9456ced896c12fcdad91"}, + {file = "coverage-6.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a6b7d95969b8845250586f269e81e5dfdd8ff828ddeb8567a4a2eaa7313460c4"}, + {file = "coverage-6.5.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1ef221513e6f68b69ee9e159506d583d31aa3567e0ae84eaad9d6ec1107dddaa"}, + {file = "coverage-6.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cca4435eebea7962a52bdb216dec27215d0df64cf27fc1dd538415f5d2b9da6b"}, + {file = "coverage-6.5.0-cp311-cp311-win32.whl", hash = "sha256:98e8a10b7a314f454d9eff4216a9a94d143a7ee65018dd12442e898ee2310578"}, + {file = "coverage-6.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:bc8ef5e043a2af066fa8cbfc6e708d58017024dc4345a1f9757b329a249f041b"}, + {file = "coverage-6.5.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4433b90fae13f86fafff0b326453dd42fc9a639a0d9e4eec4d366436d1a41b6d"}, + {file = "coverage-6.5.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4f05d88d9a80ad3cac6244d36dd89a3c00abc16371769f1340101d3cb899fc3"}, + {file = "coverage-6.5.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:94e2565443291bd778421856bc975d351738963071e9b8839ca1fc08b42d4bef"}, + {file = "coverage-6.5.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:027018943386e7b942fa832372ebc120155fd970837489896099f5cfa2890f79"}, + {file = "coverage-6.5.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:255758a1e3b61db372ec2736c8e2a1fdfaf563977eedbdf131de003ca5779b7d"}, + {file = "coverage-6.5.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:851cf4ff24062c6aec510a454b2584f6e998cada52d4cb58c5e233d07172e50c"}, + {file = "coverage-6.5.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:12adf310e4aafddc58afdb04d686795f33f4d7a6fa67a7a9d4ce7d6ae24d949f"}, + {file = "coverage-6.5.0-cp37-cp37m-win32.whl", hash = "sha256:b5604380f3415ba69de87a289a2b56687faa4fe04dbee0754bfcae433489316b"}, + {file = "coverage-6.5.0-cp37-cp37m-win_amd64.whl", hash = "sha256:4a8dbc1f0fbb2ae3de73eb0bdbb914180c7abfbf258e90b311dcd4f585d44bd2"}, + {file = "coverage-6.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d900bb429fdfd7f511f868cedd03a6bbb142f3f9118c09b99ef8dc9bf9643c3c"}, + {file = "coverage-6.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2198ea6fc548de52adc826f62cb18554caedfb1d26548c1b7c88d8f7faa8f6ba"}, + {file = "coverage-6.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c4459b3de97b75e3bd6b7d4b7f0db13f17f504f3d13e2a7c623786289dd670e"}, + {file = "coverage-6.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:20c8ac5386253717e5ccc827caad43ed66fea0efe255727b1053a8154d952398"}, + {file = "coverage-6.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b07130585d54fe8dff3d97b93b0e20290de974dc8177c320aeaf23459219c0b"}, + {file = "coverage-6.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:dbdb91cd8c048c2b09eb17713b0c12a54fbd587d79adcebad543bc0cd9a3410b"}, + {file = "coverage-6.5.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:de3001a203182842a4630e7b8d1a2c7c07ec1b45d3084a83d5d227a3806f530f"}, + {file = "coverage-6.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e07f4a4a9b41583d6eabec04f8b68076ab3cd44c20bd29332c6572dda36f372e"}, + {file = "coverage-6.5.0-cp38-cp38-win32.whl", hash = "sha256:6d4817234349a80dbf03640cec6109cd90cba068330703fa65ddf56b60223a6d"}, + {file = "coverage-6.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:7ccf362abd726b0410bf8911c31fbf97f09f8f1061f8c1cf03dfc4b6372848f6"}, + {file = "coverage-6.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:633713d70ad6bfc49b34ead4060531658dc6dfc9b3eb7d8a716d5873377ab745"}, + {file = "coverage-6.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:95203854f974e07af96358c0b261f1048d8e1083f2de9b1c565e1be4a3a48cfc"}, + {file = "coverage-6.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9023e237f4c02ff739581ef35969c3739445fb059b060ca51771e69101efffe"}, + {file = "coverage-6.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:265de0fa6778d07de30bcf4d9dc471c3dc4314a23a3c6603d356a3c9abc2dfcf"}, + {file = "coverage-6.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f830ed581b45b82451a40faabb89c84e1a998124ee4212d440e9c6cf70083e5"}, + {file = "coverage-6.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7b6be138d61e458e18d8e6ddcddd36dd96215edfe5f1168de0b1b32635839b62"}, + {file = "coverage-6.5.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:42eafe6778551cf006a7c43153af1211c3aaab658d4d66fa5fcc021613d02518"}, + {file = "coverage-6.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:723e8130d4ecc8f56e9a611e73b31219595baa3bb252d539206f7bbbab6ffc1f"}, + {file = "coverage-6.5.0-cp39-cp39-win32.whl", hash = "sha256:d9ecf0829c6a62b9b573c7bb6d4dcd6ba8b6f80be9ba4fc7ed50bf4ac9aecd72"}, + {file = "coverage-6.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:fc2af30ed0d5ae0b1abdb4ebdce598eafd5b35397d4d75deb341a614d333d987"}, + {file = "coverage-6.5.0-pp36.pp37.pp38-none-any.whl", hash = "sha256:1431986dac3923c5945271f169f59c45b8802a114c8f548d611f2015133df77a"}, + {file = "coverage-6.5.0.tar.gz", hash = "sha256:f642e90754ee3e06b0e7e51bce3379590e76b7f76b708e1a71ff043f87025c84"}, +] cymem = [ {file = "cymem-2.0.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4981fc9182cc1fe54bfedf5f73bfec3ce0c27582d9be71e130c46e35958beef0"}, {file = "cymem-2.0.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:42aedfd2e77aa0518a24a2a60a2147308903abc8b13c84504af58539c39e52a3"}, @@ -2326,8 +2670,8 @@ elastic-transport = [ {file = "elastic_transport-8.4.0-py3-none-any.whl", hash = "sha256:19db271ab79c9f70f8c43f8f5b5111408781a6176b54ab2e54d713b6d9ceb815"}, ] elasticsearch = [ - {file = "elasticsearch-8.5.2-py3-none-any.whl", hash = "sha256:100ead24d2a20d40227bde9c586c9e32b820d15d3dfb4f12204d6b11841f029e"}, - {file = "elasticsearch-8.5.2.tar.gz", hash = "sha256:7fd57b89b1dfc3c976a71af58376d300a4e40dfcdd22ea2573ce6a830c9ad7c2"}, + {file = "elasticsearch-8.5.3-py3-none-any.whl", hash = "sha256:f09adbea8caa633ff79e8fe115fb1d2b635426fe1a23e7e8e3bd7cce5ac3eb70"}, + {file = "elasticsearch-8.5.3.tar.gz", hash = "sha256:4b71ad05b36243c3b13f1c89b3ede4357011eece68917e293c43d4177d565838"}, ] entrypoints = [ {file = "entrypoints-0.4-py3-none-any.whl", hash = "sha256:f174b5ff827504fd3cd97cc3f8649f3693f51538c7e4bdf3ef002c8429d42f9f"}, @@ -2384,6 +2728,10 @@ flake8-docstrings = [ {file = "flake8-docstrings-1.6.0.tar.gz", hash = "sha256:9fe7c6a306064af8e62a055c2f61e9eb1da55f84bb39caef2b84ce53708ac34b"}, {file = "flake8_docstrings-1.6.0-py2.py3-none-any.whl", hash = "sha256:99cac583d6c7e32dd28bbfbef120a7c0d1b6dde4adb5a9fd441c4227a6534bde"}, ] +fqdn = [ + {file = "fqdn-1.5.1-py3-none-any.whl", hash = "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014"}, + {file = "fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f"}, +] greenlet = [ {file = "greenlet-2.0.1-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:9ed358312e63bf683b9ef22c8e442ef6c5c02973f0c2a939ec1d7b50c974015c"}, {file = "greenlet-2.0.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:4f09b0010e55bec3239278f642a8a506b91034f03a4fb28289a7d448a67f1515"}, @@ -2459,16 +2807,16 @@ importlib-metadata = [ {file = "importlib_metadata-5.1.0.tar.gz", hash = "sha256:d5059f9f1e8e41f80e9c56c2ee58811450c31984dfa625329ffd7c0dad88a73b"}, ] importlib-resources = [ - {file = "importlib_resources-5.10.0-py3-none-any.whl", hash = "sha256:ee17ec648f85480d523596ce49eae8ead87d5631ae1551f913c0100b5edd3437"}, - {file = "importlib_resources-5.10.0.tar.gz", hash = "sha256:c01b1b94210d9849f286b86bb51bcea7cd56dde0600d8db721d7b81330711668"}, + {file = "importlib_resources-5.10.1-py3-none-any.whl", hash = "sha256:c09b067d82e72c66f4f8eb12332f5efbebc9b007c0b6c40818108c9870adc363"}, + {file = "importlib_resources-5.10.1.tar.gz", hash = "sha256:32bb095bda29741f6ef0e5278c42df98d135391bee5f932841efc0041f748dc3"}, ] iniconfig = [ {file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"}, {file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"}, ] ipykernel = [ - {file = "ipykernel-6.17.1-py3-none-any.whl", hash = "sha256:3a9a1b2ad6dbbd5879855aabb4557f08e63fa2208bffed897f03070e2bb436f6"}, - {file = "ipykernel-6.17.1.tar.gz", hash = "sha256:e178c1788399f93a459c241fe07c3b810771c607b1fb064a99d2c5d40c90c5d4"}, + {file = "ipykernel-6.19.2-py3-none-any.whl", hash = "sha256:1374a55c57ca7a7286c3d8b15799cd76e1a2381b6b1fea99c494b955988926b6"}, + {file = "ipykernel-6.19.2.tar.gz", hash = "sha256:1ab68d3d3654196266baa93990055413e167263ffbe4cfe834f871bcd3d3506d"}, ] ipython = [ {file = "ipython-8.7.0-py3-none-any.whl", hash = "sha256:352042ddcb019f7c04e48171b4dd78e4c4bb67bf97030d170e154aac42b656d9"}, @@ -2479,12 +2827,16 @@ ipython-genutils = [ {file = "ipython_genutils-0.2.0.tar.gz", hash = "sha256:eb2e116e75ecef9d4d228fdc66af54269afa26ab4463042e33785b887c628ba8"}, ] ipywidgets = [ - {file = "ipywidgets-8.0.2-py3-none-any.whl", hash = "sha256:1dc3dd4ee19ded045ea7c86eb273033d238d8e43f9e7872c52d092683f263891"}, - {file = "ipywidgets-8.0.2.tar.gz", hash = "sha256:08cb75c6e0a96836147cbfdc55580ae04d13e05d26ffbc377b4e1c68baa28b1f"}, + {file = "ipywidgets-8.0.3-py3-none-any.whl", hash = "sha256:db7dd35fb1217636cbdbe0ba0bd2216d91a7695cb28b5c1dca17e62cd51378de"}, + {file = "ipywidgets-8.0.3.tar.gz", hash = "sha256:2ec50df8538a1d4ddd5d454830d010922ad1015e81ac23efb27c0908bbc1eece"}, +] +isoduration = [ + {file = "isoduration-20.11.0-py3-none-any.whl", hash = "sha256:b2904c2a4228c3d44f409c8ae8e2370eb21a26f7ac2ec5446df141dde3452042"}, + {file = "isoduration-20.11.0.tar.gz", hash = "sha256:ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9"}, ] isort = [ - {file = "isort-5.10.1-py3-none-any.whl", hash = "sha256:6f62d78e2f89b4500b080fe3a81690850cd254227f27f75c3a0c491a1f351ba7"}, - {file = "isort-5.10.1.tar.gz", hash = "sha256:e8443a5e7a020e9d7f97f1d7d9cd17c88bcb3bc7e218bf9cf5095fe550be2951"}, + {file = "isort-5.11.2-py3-none-any.whl", hash = "sha256:e486966fba83f25b8045f8dd7455b0a0d1e4de481e1d7ce4669902d9fb85e622"}, + {file = "isort-5.11.2.tar.gz", hash = "sha256:dd8bbc5c0990f2a095d754e50360915f73b4c26fc82733eb5bfc6b48396af4d2"}, ] jedi = [ {file = "jedi-0.18.2-py2.py3-none-any.whl", hash = "sha256:203c1fd9d969ab8f2119ec0a3342e0b49910045abe6af0a3ae83a5764d54639e"}, @@ -2498,6 +2850,10 @@ joblib = [ {file = "joblib-1.2.0-py3-none-any.whl", hash = "sha256:091138ed78f800342968c523bdde947e7a305b8594b910a0fea2ab83c3c6d385"}, {file = "joblib-1.2.0.tar.gz", hash = "sha256:e1cee4a79e4af22881164f218d4311f60074197fb707e082e803b61f6d137018"}, ] +jsonpointer = [ + {file = "jsonpointer-2.3-py2.py3-none-any.whl", hash = "sha256:51801e558539b4e9cd268638c078c6c5746c9ac96bc38152d443400e4f3793e9"}, + {file = "jsonpointer-2.3.tar.gz", hash = "sha256:97cba51526c829282218feb99dab1b1e6bdf8efd1c43dc9d57be093c0d69c99a"}, +] jsonschema = [ {file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"}, {file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"}, @@ -2519,22 +2875,107 @@ jupyter-core = [ {file = "jupyter_core-5.1.0-py3-none-any.whl", hash = "sha256:f5740d99606958544396914b08e67b668f45e7eff99ab47a7f4bcead419c02f4"}, {file = "jupyter_core-5.1.0.tar.gz", hash = "sha256:a5ae7c09c55c0b26f692ec69323ba2b62e8d7295354d20f6cd57b749de4a05bf"}, ] +jupyter-events = [ + {file = "jupyter_events-0.5.0-py3-none-any.whl", hash = "sha256:6f7b67bf42b8a370c992187194ed02847dfa02307a7aebe9913e2d3979b9b6b8"}, + {file = "jupyter_events-0.5.0.tar.gz", hash = "sha256:e27ffdd6138699d47d42cb65ae6d79334ff7c0d923694381c991ce56a140f2cd"}, +] jupyter-server = [ - {file = "jupyter_server-1.23.3-py3-none-any.whl", hash = "sha256:438496cac509709cc85e60172e5538ca45b4c8a0862bb97cd73e49f2ace419cb"}, - {file = "jupyter_server-1.23.3.tar.gz", hash = "sha256:f7f7a2f9d36f4150ad125afef0e20b1c76c8ff83eb5e39fb02d3b9df0f9b79ab"}, + {file = "jupyter_server-2.0.1-py3-none-any.whl", hash = "sha256:3bc09974a5290249de6924a614933e6f4f3d6d11f3061423a9f4e0271064a8b3"}, + {file = "jupyter_server-2.0.1.tar.gz", hash = "sha256:6e71268380ad7e4f2d9dda2f3e51a4fd4d1997b5390d5acdb74c7a195cfe4c00"}, +] +jupyter-server-terminals = [ + {file = "jupyter_server_terminals-0.4.2-py3-none-any.whl", hash = "sha256:c0eaacee6cac21b597c23c38dd523dc4e9b947f97af5101e0396c08f28db3e37"}, + {file = "jupyter_server_terminals-0.4.2.tar.gz", hash = "sha256:0e68cba38eb0f9f2d93f1160e0a7f84b943d0d0c4d2f77eeaabbb4a2919c47c6"}, ] jupyterlab-pygments = [ {file = "jupyterlab_pygments-0.2.2-py2.py3-none-any.whl", hash = "sha256:2405800db07c9f770863bcf8049a529c3dd4d3e28536638bd7c1c01d2748309f"}, {file = "jupyterlab_pygments-0.2.2.tar.gz", hash = "sha256:7405d7fde60819d905a9fa8ce89e4cd830e318cdad22a0030f7a901da705585d"}, ] jupyterlab-widgets = [ - {file = "jupyterlab_widgets-3.0.3-py3-none-any.whl", hash = "sha256:6aa1bc0045470d54d76b9c0b7609a8f8f0087573bae25700a370c11f82cb38c8"}, - {file = "jupyterlab_widgets-3.0.3.tar.gz", hash = "sha256:c767181399b4ca8b647befe2d913b1260f51bf9d8ef9b7a14632d4c1a7b536bd"}, + {file = "jupyterlab_widgets-3.0.4-py3-none-any.whl", hash = "sha256:4c9275daa6d20fc96c3aea45756ece7110850d035b0b93a6a40e918016b927da"}, + {file = "jupyterlab_widgets-3.0.4.tar.gz", hash = "sha256:9a568e022b8bb53ab23291f6ddb52f8002b789c2c5763378cbc882be1d619be8"}, ] langcodes = [ {file = "langcodes-3.3.0-py3-none-any.whl", hash = "sha256:4d89fc9acb6e9c8fdef70bcdf376113a3db09b67285d9e1d534de6d8818e7e69"}, {file = "langcodes-3.3.0.tar.gz", hash = "sha256:794d07d5a28781231ac335a1561b8442f8648ca07cd518310aeb45d6f0807ef6"}, ] +lxml = [ + {file = "lxml-4.9.2-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:76cf573e5a365e790396a5cc2b909812633409306c6531a6877c59061e42c4f2"}, + {file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b1f42b6921d0e81b1bcb5e395bc091a70f41c4d4e55ba99c6da2b31626c44892"}, + {file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9f102706d0ca011de571de32c3247c6476b55bb6bc65a20f682f000b07a4852a"}, + {file = "lxml-4.9.2-cp27-cp27m-win32.whl", hash = "sha256:8d0b4612b66ff5d62d03bcaa043bb018f74dfea51184e53f067e6fdcba4bd8de"}, + {file = "lxml-4.9.2-cp27-cp27m-win_amd64.whl", hash = "sha256:4c8f293f14abc8fd3e8e01c5bd86e6ed0b6ef71936ded5bf10fe7a5efefbaca3"}, + {file = "lxml-4.9.2-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2899456259589aa38bfb018c364d6ae7b53c5c22d8e27d0ec7609c2a1ff78b50"}, + {file = "lxml-4.9.2-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6749649eecd6a9871cae297bffa4ee76f90b4504a2a2ab528d9ebe912b101975"}, + {file = "lxml-4.9.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a08cff61517ee26cb56f1e949cca38caabe9ea9fbb4b1e10a805dc39844b7d5c"}, + {file = "lxml-4.9.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:85cabf64adec449132e55616e7ca3e1000ab449d1d0f9d7f83146ed5bdcb6d8a"}, + {file = "lxml-4.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8340225bd5e7a701c0fa98284c849c9b9fc9238abf53a0ebd90900f25d39a4e4"}, + {file = "lxml-4.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:1ab8f1f932e8f82355e75dda5413a57612c6ea448069d4fb2e217e9a4bed13d4"}, + {file = "lxml-4.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:699a9af7dffaf67deeae27b2112aa06b41c370d5e7633e0ee0aea2e0b6c211f7"}, + {file = "lxml-4.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b9cc34af337a97d470040f99ba4282f6e6bac88407d021688a5d585e44a23184"}, + {file = "lxml-4.9.2-cp310-cp310-win32.whl", hash = "sha256:d02a5399126a53492415d4906ab0ad0375a5456cc05c3fc0fc4ca11771745cda"}, + {file = "lxml-4.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:a38486985ca49cfa574a507e7a2215c0c780fd1778bb6290c21193b7211702ab"}, + {file = "lxml-4.9.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c83203addf554215463b59f6399835201999b5e48019dc17f182ed5ad87205c9"}, + {file = "lxml-4.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:2a87fa548561d2f4643c99cd13131acb607ddabb70682dcf1dff5f71f781a4bf"}, + {file = "lxml-4.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:d6b430a9938a5a5d85fc107d852262ddcd48602c120e3dbb02137c83d212b380"}, + {file = "lxml-4.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3efea981d956a6f7173b4659849f55081867cf897e719f57383698af6f618a92"}, + {file = "lxml-4.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:df0623dcf9668ad0445e0558a21211d4e9a149ea8f5666917c8eeec515f0a6d1"}, + {file = "lxml-4.9.2-cp311-cp311-win32.whl", hash = "sha256:da248f93f0418a9e9d94b0080d7ebc407a9a5e6d0b57bb30db9b5cc28de1ad33"}, + {file = "lxml-4.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:3818b8e2c4b5148567e1b09ce739006acfaa44ce3156f8cbbc11062994b8e8dd"}, + {file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca989b91cf3a3ba28930a9fc1e9aeafc2a395448641df1f387a2d394638943b0"}, + {file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:822068f85e12a6e292803e112ab876bc03ed1f03dddb80154c395f891ca6b31e"}, + {file = "lxml-4.9.2-cp35-cp35m-win32.whl", hash = "sha256:be7292c55101e22f2a3d4d8913944cbea71eea90792bf914add27454a13905df"}, + {file = "lxml-4.9.2-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:b26a29f0b7fc6f0897f043ca366142d2b609dc60756ee6e4e90b5f762c6adc53"}, + {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:ab323679b8b3030000f2be63e22cdeea5b47ee0abd2d6a1dc0c8103ddaa56cd7"}, + {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:689bb688a1db722485e4610a503e3e9210dcc20c520b45ac8f7533c837be76fe"}, + {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:f49e52d174375a7def9915c9f06ec4e569d235ad428f70751765f48d5926678c"}, + {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:36c3c175d34652a35475a73762b545f4527aec044910a651d2bf50de9c3352b1"}, + {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a35f8b7fa99f90dd2f5dc5a9fa12332642f087a7641289ca6c40d6e1a2637d8e"}, + {file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:58bfa3aa19ca4c0f28c5dde0ff56c520fbac6f0daf4fac66ed4c8d2fb7f22e74"}, + {file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc718cd47b765e790eecb74d044cc8d37d58562f6c314ee9484df26276d36a38"}, + {file = "lxml-4.9.2-cp36-cp36m-win32.whl", hash = "sha256:d5bf6545cd27aaa8a13033ce56354ed9e25ab0e4ac3b5392b763d8d04b08e0c5"}, + {file = "lxml-4.9.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:05ca3f6abf5cf78fe053da9b1166e062ade3fa5d4f92b4ed688127ea7d7b1d03"}, + {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:a5da296eb617d18e497bcf0a5c528f5d3b18dadb3619fbdadf4ed2356ef8d941"}, + {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:04876580c050a8c5341d706dd464ff04fd597095cc8c023252566a8826505726"}, + {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:c9ec3eaf616d67db0764b3bb983962b4f385a1f08304fd30c7283954e6a7869b"}, + {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2a29ba94d065945944016b6b74e538bdb1751a1db6ffb80c9d3c2e40d6fa9894"}, + {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a82d05da00a58b8e4c0008edbc8a4b6ec5a4bc1e2ee0fb6ed157cf634ed7fa45"}, + {file = "lxml-4.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:223f4232855ade399bd409331e6ca70fb5578efef22cf4069a6090acc0f53c0e"}, + {file = "lxml-4.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d17bc7c2ccf49c478c5bdd447594e82692c74222698cfc9b5daae7ae7e90743b"}, + {file = "lxml-4.9.2-cp37-cp37m-win32.whl", hash = "sha256:b64d891da92e232c36976c80ed7ebb383e3f148489796d8d31a5b6a677825efe"}, + {file = "lxml-4.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:a0a336d6d3e8b234a3aae3c674873d8f0e720b76bc1d9416866c41cd9500ffb9"}, + {file = "lxml-4.9.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:da4dd7c9c50c059aba52b3524f84d7de956f7fef88f0bafcf4ad7dde94a064e8"}, + {file = "lxml-4.9.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:821b7f59b99551c69c85a6039c65b75f5683bdc63270fec660f75da67469ca24"}, + {file = "lxml-4.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:e5168986b90a8d1f2f9dc1b841467c74221bd752537b99761a93d2d981e04889"}, + {file = "lxml-4.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:8e20cb5a47247e383cf4ff523205060991021233ebd6f924bca927fcf25cf86f"}, + {file = "lxml-4.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:13598ecfbd2e86ea7ae45ec28a2a54fb87ee9b9fdb0f6d343297d8e548392c03"}, + {file = "lxml-4.9.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:880bbbcbe2fca64e2f4d8e04db47bcdf504936fa2b33933efd945e1b429bea8c"}, + {file = "lxml-4.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7d2278d59425777cfcb19735018d897ca8303abe67cc735f9f97177ceff8027f"}, + {file = "lxml-4.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5344a43228767f53a9df6e5b253f8cdca7dfc7b7aeae52551958192f56d98457"}, + {file = "lxml-4.9.2-cp38-cp38-win32.whl", hash = "sha256:925073b2fe14ab9b87e73f9a5fde6ce6392da430f3004d8b72cc86f746f5163b"}, + {file = "lxml-4.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:9b22c5c66f67ae00c0199f6055705bc3eb3fcb08d03d2ec4059a2b1b25ed48d7"}, + {file = "lxml-4.9.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:5f50a1c177e2fa3ee0667a5ab79fdc6b23086bc8b589d90b93b4bd17eb0e64d1"}, + {file = "lxml-4.9.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:090c6543d3696cbe15b4ac6e175e576bcc3f1ccfbba970061b7300b0c15a2140"}, + {file = "lxml-4.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:63da2ccc0857c311d764e7d3d90f429c252e83b52d1f8f1d1fe55be26827d1f4"}, + {file = "lxml-4.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:5b4545b8a40478183ac06c073e81a5ce4cf01bf1734962577cf2bb569a5b3bbf"}, + {file = "lxml-4.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2e430cd2824f05f2d4f687701144556646bae8f249fd60aa1e4c768ba7018947"}, + {file = "lxml-4.9.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6804daeb7ef69e7b36f76caddb85cccd63d0c56dedb47555d2fc969e2af6a1a5"}, + {file = "lxml-4.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a6e441a86553c310258aca15d1c05903aaf4965b23f3bc2d55f200804e005ee5"}, + {file = "lxml-4.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ca34efc80a29351897e18888c71c6aca4a359247c87e0b1c7ada14f0ab0c0fb2"}, + {file = "lxml-4.9.2-cp39-cp39-win32.whl", hash = "sha256:6b418afe5df18233fc6b6093deb82a32895b6bb0b1155c2cdb05203f583053f1"}, + {file = "lxml-4.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:f1496ea22ca2c830cbcbd473de8f114a320da308438ae65abad6bab7867fe38f"}, + {file = "lxml-4.9.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b264171e3143d842ded311b7dccd46ff9ef34247129ff5bf5066123c55c2431c"}, + {file = "lxml-4.9.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0dc313ef231edf866912e9d8f5a042ddab56c752619e92dfd3a2c277e6a7299a"}, + {file = "lxml-4.9.2-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:16efd54337136e8cd72fb9485c368d91d77a47ee2d42b057564aae201257d419"}, + {file = "lxml-4.9.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0f2b1e0d79180f344ff9f321327b005ca043a50ece8713de61d1cb383fb8ac05"}, + {file = "lxml-4.9.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:7b770ed79542ed52c519119473898198761d78beb24b107acf3ad65deae61f1f"}, + {file = "lxml-4.9.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efa29c2fe6b4fdd32e8ef81c1528506895eca86e1d8c4657fda04c9b3786ddf9"}, + {file = "lxml-4.9.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7e91ee82f4199af8c43d8158024cbdff3d931df350252288f0d4ce656df7f3b5"}, + {file = "lxml-4.9.2-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b23e19989c355ca854276178a0463951a653309fb8e57ce674497f2d9f208746"}, + {file = "lxml-4.9.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:01d36c05f4afb8f7c20fd9ed5badca32a2029b93b1750f571ccc0b142531caf7"}, + {file = "lxml-4.9.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7b515674acfdcadb0eb5d00d8a709868173acece5cb0be3dd165950cbfdf5409"}, + {file = "lxml-4.9.2.tar.gz", hash = "sha256:2455cfaeb7ac70338b3257f41e21f0724f4b5b0c0e7702da67ee6c3640835b67"}, +] manifest-ml = [ {file = "manifest-ml-0.0.1.tar.gz", hash = "sha256:f828faf7de41fad5318254beec08acdf5142196e0e22203a4047412c2d3127a0"}, {file = "manifest_ml-0.0.1-py2.py3-none-any.whl", hash = "sha256:fc4e62e706fd767fd8851d91051fdb71bc79b2df9c66f5879736c46d8163a316"}, @@ -2680,8 +3121,8 @@ nest-asyncio = [ {file = "nest_asyncio-1.5.6.tar.gz", hash = "sha256:d267cc1ff794403f7df692964d1d2a3fa9418ffea2a3f6859a439ff482fef290"}, ] nltk = [ - {file = "nltk-3.7-py3-none-any.whl", hash = "sha256:ba3de02490308b248f9b94c8bc1ac0683e9aa2ec49ee78536d8667afb5e3eec8"}, - {file = "nltk-3.7.zip", hash = "sha256:d6507d6460cec76d70afea4242a226a7542f85c669177b9c7f562b7cf1b05502"}, + {file = "nltk-3.8-py3-none-any.whl", hash = "sha256:3306502f487aa9fb0566e23443fa287a85a8d8d0821e2ef1655b4e3f0ea4aeee"}, + {file = "nltk-3.8.zip", hash = "sha256:74b30826a37d78d53427105bbd037dd880251be269fca64ee530838a46ed55fc"}, ] notebook = [ {file = "notebook-6.5.2-py3-none-any.whl", hash = "sha256:e04f9018ceb86e4fa841e92ea8fb214f8d23c1cedfde530cc96f92446924f0e4"}, @@ -2721,9 +3162,26 @@ numpy = [ {file = "numpy-1.23.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:01dd17cbb340bf0fc23981e52e1d18a9d4050792e8fb8363cecbf066a84b827d"}, {file = "numpy-1.23.5.tar.gz", hash = "sha256:1b1766d6f397c18153d40015ddfc79ddb715cabadc04d2d228d4e5a8bc4ded1a"}, ] +nvidia-cublas-cu11 = [ + {file = "nvidia_cublas_cu11-11.10.3.66-py3-none-manylinux1_x86_64.whl", hash = "sha256:d32e4d75f94ddfb93ea0a5dda08389bcc65d8916a25cb9f37ac89edaeed3bded"}, + {file = "nvidia_cublas_cu11-11.10.3.66-py3-none-win_amd64.whl", hash = "sha256:8ac17ba6ade3ed56ab898a036f9ae0756f1e81052a317bf98f8c6d18dc3ae49e"}, +] +nvidia-cuda-nvrtc-cu11 = [ + {file = "nvidia_cuda_nvrtc_cu11-11.7.99-2-py3-none-manylinux1_x86_64.whl", hash = "sha256:9f1562822ea264b7e34ed5930567e89242d266448e936b85bc97a3370feabb03"}, + {file = "nvidia_cuda_nvrtc_cu11-11.7.99-py3-none-manylinux1_x86_64.whl", hash = "sha256:f7d9610d9b7c331fa0da2d1b2858a4a8315e6d49765091d28711c8946e7425e7"}, + {file = "nvidia_cuda_nvrtc_cu11-11.7.99-py3-none-win_amd64.whl", hash = "sha256:f2effeb1309bdd1b3854fc9b17eaf997808f8b25968ce0c7070945c4265d64a3"}, +] +nvidia-cuda-runtime-cu11 = [ + {file = "nvidia_cuda_runtime_cu11-11.7.99-py3-none-manylinux1_x86_64.whl", hash = "sha256:cc768314ae58d2641f07eac350f40f99dcb35719c4faff4bc458a7cd2b119e31"}, + {file = "nvidia_cuda_runtime_cu11-11.7.99-py3-none-win_amd64.whl", hash = "sha256:bc77fa59a7679310df9d5c70ab13c4e34c64ae2124dd1efd7e5474b71be125c7"}, +] +nvidia-cudnn-cu11 = [ + {file = "nvidia_cudnn_cu11-8.5.0.96-2-py3-none-manylinux1_x86_64.whl", hash = "sha256:402f40adfc6f418f9dae9ab402e773cfed9beae52333f6d86ae3107a1b9527e7"}, + {file = "nvidia_cudnn_cu11-8.5.0.96-py3-none-manylinux1_x86_64.whl", hash = "sha256:71f8111eb830879ff2836db3cccf03bbd735df9b0d17cd93761732ac50a8a108"}, +] packaging = [ - {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"}, - {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, + {file = "packaging-22.0-py3-none-any.whl", hash = "sha256:957e2148ba0e1a3b282772e791ef1d8083648bc131c8ab0c1feba110ce1146c3"}, + {file = "packaging-22.0.tar.gz", hash = "sha256:2198ec20bd4c017b8f9717e00f0c8714076fc2fd93816750ab48e2c41de2cfd3"}, ] pandocfilters = [ {file = "pandocfilters-1.5.0-py2.py3-none-any.whl", hash = "sha256:33aae3f25fd1a026079f5d27bdd52496f0e0803b3469282162bafdcbdf6ef14f"}, @@ -2734,12 +3192,12 @@ parso = [ {file = "parso-0.8.3.tar.gz", hash = "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0"}, ] pathspec = [ - {file = "pathspec-0.10.2-py3-none-any.whl", hash = "sha256:88c2606f2c1e818b978540f73ecc908e13999c6c3a383daf3705652ae79807a5"}, - {file = "pathspec-0.10.2.tar.gz", hash = "sha256:8f6bf73e5758fd365ef5d58ce09ac7c27d2833a8d7da51712eac6e27e35141b0"}, + {file = "pathspec-0.10.3-py3-none-any.whl", hash = "sha256:3c95343af8b756205e2aba76e843ba9520a24dd84f68c22b9f93251507509dd6"}, + {file = "pathspec-0.10.3.tar.gz", hash = "sha256:56200de4077d9d0791465aa9095a01d421861e405b5096955051deefd697d6f6"}, ] pathy = [ - {file = "pathy-0.10.0-py3-none-any.whl", hash = "sha256:205d6da31c47334227d364ad8c13b848eb3254701553eb179f3faaec3abd0204"}, - {file = "pathy-0.10.0.tar.gz", hash = "sha256:939822c326913cd0ab48f5928c8c40afcc59c5b093eac328348dd16700ab49e9"}, + {file = "pathy-0.10.1-py3-none-any.whl", hash = "sha256:a7613ee2d99a0a3300e1d836322e2d947c85449fde59f52906f995dbff67dad4"}, + {file = "pathy-0.10.1.tar.gz", hash = "sha256:4cd6e71b4cd5ff875cfbb949ad9fa5519d8d1dbe69d5fc1d1b23aa3cb049618b"}, ] pexpect = [ {file = "pexpect-4.8.0-py2.py3-none-any.whl", hash = "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937"}, @@ -2754,8 +3212,8 @@ pkgutil-resolve-name = [ {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, ] platformdirs = [ - {file = "platformdirs-2.5.4-py3-none-any.whl", hash = "sha256:af0276409f9a02373d540bf8480021a048711d572745aef4b7842dad245eba10"}, - {file = "platformdirs-2.5.4.tar.gz", hash = "sha256:1006647646d80f16130f052404c6b901e80ee4ed6bef6792e1f238a8969106f7"}, + {file = "platformdirs-2.6.0-py3-none-any.whl", hash = "sha256:1a89a12377800c81983db6be069ec068eee989748799b946cce2a6e80dcc54ca"}, + {file = "platformdirs-2.6.0.tar.gz", hash = "sha256:b46ffafa316e6b83b47489d240ce17173f123a9b9c83282141c3daf26ad9ac2e"}, ] playwright = [ {file = "playwright-1.28.0-py3-none-macosx_10_13_x86_64.whl", hash = "sha256:2e101b17e4d5252ef96c9dc8b2ac17f2980dde0420728c1c96a77eeaf6f9b11f"}, @@ -2805,8 +3263,8 @@ prometheus-client = [ {file = "prometheus_client-0.15.0.tar.gz", hash = "sha256:be26aa452490cfcf6da953f9436e95a9f2b4d578ca80094b4458930e5f584ab1"}, ] prompt-toolkit = [ - {file = "prompt_toolkit-3.0.33-py3-none-any.whl", hash = "sha256:ced598b222f6f4029c0800cefaa6a17373fb580cd093223003475ce32805c35b"}, - {file = "prompt_toolkit-3.0.33.tar.gz", hash = "sha256:535c29c31216c77302877d5120aef6c94ff573748a5b5ca5b1b1f76f5e700c73"}, + {file = "prompt_toolkit-3.0.36-py3-none-any.whl", hash = "sha256:aa64ad242a462c5ff0363a7b9cfe696c20d55d9fc60c11fd8e632d064804d305"}, + {file = "prompt_toolkit-3.0.36.tar.gz", hash = "sha256:3e163f254bef5a03b146397d7c1963bd3e2812f0964bb9a24e6ec761fd28db63"}, ] psutil = [ {file = "psutil-5.9.4-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:c1ca331af862803a42677c120aff8a814a804e09832f166f226bfd22b56feee8"}, @@ -2844,6 +3302,34 @@ pycparser = [ {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, ] +pycryptodomex = [ + {file = "pycryptodomex-3.16.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:b3d04c00d777c36972b539fb79958790126847d84ec0129fce1efef250bfe3ce"}, + {file = "pycryptodomex-3.16.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:e5a670919076b71522c7d567a9043f66f14b202414a63c3a078b5831ae342c03"}, + {file = "pycryptodomex-3.16.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:ce338a9703f54b2305a408fc9890eb966b727ce72b69f225898bb4e9d9ed3f1f"}, + {file = "pycryptodomex-3.16.0-cp27-cp27m-manylinux2014_aarch64.whl", hash = "sha256:a1c0ae7123448ecb034c75c713189cb00ebe2d415b11682865b6c54d200d9c93"}, + {file = "pycryptodomex-3.16.0-cp27-cp27m-win32.whl", hash = "sha256:8851585ff19871e5d69e1790f4ca5f6fd1699d6b8b14413b472a4c0dbc7ea780"}, + {file = "pycryptodomex-3.16.0-cp27-cp27m-win_amd64.whl", hash = "sha256:8dd2d9e3c617d0712ed781a77efd84ea579e76c5f9b2a4bc0b684ebeddf868b2"}, + {file = "pycryptodomex-3.16.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:2ad9bb86b355b6104796567dd44c215b3dc953ef2fae5e0bdfb8516731df92cf"}, + {file = "pycryptodomex-3.16.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:e25a2f5667d91795f9417cb856f6df724ccdb0cdd5cbadb212ee9bf43946e9f8"}, + {file = "pycryptodomex-3.16.0-cp27-cp27mu-manylinux2014_aarch64.whl", hash = "sha256:b0789a8490114a2936ed77c87792cfe77582c829cb43a6d86ede0f9624ba8aa3"}, + {file = "pycryptodomex-3.16.0-cp35-abi3-macosx_10_9_x86_64.whl", hash = "sha256:0da835af786fdd1c9930994c78b23e88d816dc3f99aa977284a21bbc26d19735"}, + {file = "pycryptodomex-3.16.0-cp35-abi3-manylinux2014_aarch64.whl", hash = "sha256:22aed0868622d95179217c298e37ed7410025c7b29dac236d3230617d1e4ed56"}, + {file = "pycryptodomex-3.16.0-cp35-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1619087fb5b31510b0b0b058a54f001a5ffd91e6ffee220d9913064519c6a69d"}, + {file = "pycryptodomex-3.16.0-cp35-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:70288d9bfe16b2fd0d20b6c365db614428f1bcde7b20d56e74cf88ade905d9eb"}, + {file = "pycryptodomex-3.16.0-cp35-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:7993d26dae4d83b8f4ce605bb0aecb8bee330bb3c95475ef06f3694403621e71"}, + {file = "pycryptodomex-3.16.0-cp35-abi3-musllinux_1_1_i686.whl", hash = "sha256:1cda60207be8c1cf0b84b9138f9e3ca29335013d2b690774a5e94678ff29659a"}, + {file = "pycryptodomex-3.16.0-cp35-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:04610536921c1ec7adba158ef570348550c9f3a40bc24be9f8da2ef7ab387981"}, + {file = "pycryptodomex-3.16.0-cp35-abi3-win32.whl", hash = "sha256:daa67f5ebb6fbf1ee9c90decaa06ca7fc88a548864e5e484d52b0920a57fe8a5"}, + {file = "pycryptodomex-3.16.0-cp35-abi3-win_amd64.whl", hash = "sha256:231dc8008cbdd1ae0e34645d4523da2dbc7a88c325f0d4a59635a86ee25b41dd"}, + {file = "pycryptodomex-3.16.0-pp27-pypy_73-macosx_10_9_x86_64.whl", hash = "sha256:4dbbe18cc232b5980c7633972ae5417d0df76fe89e7db246eefd17ef4d8e6d7a"}, + {file = "pycryptodomex-3.16.0-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:893f8a97d533c66cc3a56e60dd3ed40a3494ddb4aafa7e026429a08772f8a849"}, + {file = "pycryptodomex-3.16.0-pp27-pypy_73-win32.whl", hash = "sha256:6a465e4f856d2a4f2a311807030c89166529ccf7ccc65bef398de045d49144b6"}, + {file = "pycryptodomex-3.16.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ba57ac7861fd2c837cdb33daf822f2a052ff57dd769a2107807f52a36d0e8d38"}, + {file = "pycryptodomex-3.16.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f2b971a7b877348a27dcfd0e772a0343fb818df00b74078e91c008632284137d"}, + {file = "pycryptodomex-3.16.0-pp37-pypy37_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e2453162f473c1eae4826eb10cd7bce19b5facac86d17fb5f29a570fde145abd"}, + {file = "pycryptodomex-3.16.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:0ba28aa97cdd3ff5ed1a4f2b7f5cd04e721166bd75bd2b929e2734433882b583"}, + {file = "pycryptodomex-3.16.0.tar.gz", hash = "sha256:e9ba9d8ed638733c9e95664470b71d624a6def149e2db6cc52c1aca5a6a2df1d"}, +] pydantic = [ {file = "pydantic-1.10.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bb6ad4489af1bac6955d38ebcb95079a836af31e4c4f74aba1ca05bb9f6027bd"}, {file = "pydantic-1.10.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a1f5a63a6dfe19d719b1b6e6106561869d2efaca6167f84f5ab9347887d78b98"}, @@ -2898,10 +3384,6 @@ pygments = [ {file = "Pygments-2.13.0-py3-none-any.whl", hash = "sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42"}, {file = "Pygments-2.13.0.tar.gz", hash = "sha256:56a8508ae95f98e2b9bdf93a6be5ae3f7d8af858b43e02c5a2ff083726be40c1"}, ] -pyparsing = [ - {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"}, - {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"}, -] pyrsistent = [ {file = "pyrsistent-0.19.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d6982b5a0237e1b7d876b60265564648a69b14017f3b5f908c5be2de3f9abb7a"}, {file = "pyrsistent-0.19.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:187d5730b0507d9285a96fca9716310d572e5464cadd19f22b63a6976254d77a"}, @@ -2930,6 +3412,10 @@ pytest = [ {file = "pytest-7.2.0-py3-none-any.whl", hash = "sha256:892f933d339f068883b6fd5a459f03d85bfcb355e4981e146d2c7616c21fef71"}, {file = "pytest-7.2.0.tar.gz", hash = "sha256:c4014eb40e10f11f355ad4e3c2fb2c6c6d1919c73f3b5a433de4708202cade59"}, ] +pytest-cov = [ + {file = "pytest-cov-4.0.0.tar.gz", hash = "sha256:996b79efde6433cdbd0088872dbc5fb3ed7fe1578b68cdbba634f14bb8dd0470"}, + {file = "pytest_cov-4.0.0-py3-none-any.whl", hash = "sha256:2feb1b751d66a8bd934e5edfa2e961d11309dc37b73b0eabe73b5945fee20f6b"}, +] pytest-dotenv = [ {file = "pytest-dotenv-0.5.2.tar.gz", hash = "sha256:2dc6c3ac6d8764c71c6d2804e902d0ff810fa19692e95fe138aefc9b1aa73732"}, {file = "pytest_dotenv-0.5.2-py3-none-any.whl", hash = "sha256:40a2cece120a213898afaa5407673f6bd924b1fa7eafce6bda0e8abffe2f710f"}, @@ -2942,6 +3428,10 @@ python-dotenv = [ {file = "python-dotenv-0.21.0.tar.gz", hash = "sha256:b77d08274639e3d34145dfa6c7008e66df0f04b7be7a75fd0d5292c191d79045"}, {file = "python_dotenv-0.21.0-py3-none-any.whl", hash = "sha256:1684eb44636dd462b66c3ee016599815514527ad99965de77f43e0944634a7e5"}, ] +python-json-logger = [ + {file = "python-json-logger-2.0.4.tar.gz", hash = "sha256:764d762175f99fcc4630bd4853b09632acb60a6224acb27ce08cd70f0b1b81bd"}, + {file = "python_json_logger-2.0.4-py3-none-any.whl", hash = "sha256:3b03487b14eb9e4f77e4fc2a023358b5394b82fd89cecf5586259baed57d8c6f"}, +] pywin32 = [ {file = "pywin32-305-cp310-cp310-win32.whl", hash = "sha256:421f6cd86e84bbb696d54563c48014b12a23ef95a14e0bdba526be756d89f116"}, {file = "pywin32-305-cp310-cp310-win_amd64.whl", hash = "sha256:73e819c6bed89f44ff1d690498c0a811948f73777e5f97c494c152b850fad478"}, @@ -3190,6 +3680,14 @@ requests = [ {file = "requests-2.28.1-py3-none-any.whl", hash = "sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349"}, {file = "requests-2.28.1.tar.gz", hash = "sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983"}, ] +rfc3339-validator = [ + {file = "rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa"}, + {file = "rfc3339_validator-0.1.4.tar.gz", hash = "sha256:138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b"}, +] +rfc3986-validator = [ + {file = "rfc3986_validator-0.1.1-py2.py3-none-any.whl", hash = "sha256:2f235c432ef459970b4306369336b9d5dbdda31b510ca1e327636e01f528bfa9"}, + {file = "rfc3986_validator-0.1.1.tar.gz", hash = "sha256:3d44bde7921b3b9ec3ae4e3adca370438eccebc676456449b145d533b240d055"}, +] send2trash = [ {file = "Send2Trash-1.8.0-py3-none-any.whl", hash = "sha256:f20eaadfdb517eaca5ce077640cb261c7d2698385a6a0f072a4a5447fd49fa08"}, {file = "Send2Trash-1.8.0.tar.gz", hash = "sha256:d2c24762fd3759860a0aff155e45871447ea58d2be6bdd39b5c8f966a0c99c2d"}, @@ -3203,8 +3701,8 @@ six = [ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] smart-open = [ - {file = "smart_open-5.2.1-py3-none-any.whl", hash = "sha256:71d14489da58b60ce12fc3ecb823facc59a8b23cd1b58edb97175640350d3a62"}, - {file = "smart_open-5.2.1.tar.gz", hash = "sha256:75abf758717a92a8f53aa96953f0c245c8cedf8e1e4184903db3659b419d4c17"}, + {file = "smart_open-6.3.0-py3-none-any.whl", hash = "sha256:b4c9ae193ad6d3e7add50944b86afa0d150bd821ab8ec21edb26d9a06b66f6a8"}, + {file = "smart_open-6.3.0.tar.gz", hash = "sha256:d5238825fe9a9340645fac3d75b287c08fbb99fb2b422477de781c9f5f09e019"}, ] sniffio = [ {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, @@ -3219,85 +3717,85 @@ soupsieve = [ {file = "soupsieve-2.3.2.post1.tar.gz", hash = "sha256:fc53893b3da2c33de295667a0e19f078c14bf86544af307354de5fcf12a3f30d"}, ] spacy = [ - {file = "spacy-3.4.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e546b314f619502ae03e5eb9a0cfd09ca7a9db265bcdd8a3af83cfb0f1432e55"}, - {file = "spacy-3.4.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ded11aa8966236aab145b4d2d024b3eb61ac50078362d77d9ed7d8c240ef0f4a"}, - {file = "spacy-3.4.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:462e141f514d78cff85685b5b12eb8cadac0bad2f7820149cbe18d03ccb2e59c"}, - {file = "spacy-3.4.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c966d25b3f3e49f5de08546b3638928f49678c365cbbebd0eec28f74e0adb539"}, - {file = "spacy-3.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:2ddba486c4c981abe6f1e3fd72648dc8811966e5f0e05808f9c9fab155c388d7"}, - {file = "spacy-3.4.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c87117dd335fba44d1c0d77602f0763c3addf4e7ef9bdbe9a495466c3484c69"}, - {file = "spacy-3.4.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ce3938720f48eaeeb360a7f623f15a0d9efd1a688d5d740e3d4cdcd6f6da8a3"}, - {file = "spacy-3.4.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ad6bf5e4e7f0bc2ef94b7ff6fe59abd766f74c192bca2f17430a3b3cd5bda5a"}, - {file = "spacy-3.4.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6644c678bd7af567c6ce679f71d64119282e7d6f1a6f787162a91be3ea39333"}, - {file = "spacy-3.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:e6b871de8857a6820140358db3943180fdbe03d44ed792155cee6cb95f4ac4ea"}, - {file = "spacy-3.4.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d211c2b8894354bf8d961af9a9dcab38f764e1dcddd7b80760e438fcd4c9fe43"}, - {file = "spacy-3.4.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ea41f9de30435456235c4182d8bc2eb54a0a64719856e66e780350bb4c8cfbe"}, - {file = "spacy-3.4.3-cp36-cp36m-win_amd64.whl", hash = "sha256:afaf6e716cbac4a0fbfa9e9bf95decff223936597ddd03ea869118a7576aa1b1"}, - {file = "spacy-3.4.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7115da36369b3c537caf2fe08e0b45528bd091c7f56ba3580af1e6fdfa9b1081"}, - {file = "spacy-3.4.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b3e629c889cac9656151286ec1232c6a948ce0d44a39f1ef5e60fed4f183a10"}, - {file = "spacy-3.4.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9277cd0fcb96ee5dd885f7e96c639f21afd96198d61ca32100446afbff4dfbef"}, - {file = "spacy-3.4.3-cp37-cp37m-win_amd64.whl", hash = "sha256:a36bd06a5a147350e5f5f6903c4777296c37b18199251bb41056c3a73aa4494f"}, - {file = "spacy-3.4.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bdafcd0823ca804c39d0bed9e677eb7d0235b1259563d0fd4d3a201c71108af8"}, - {file = "spacy-3.4.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0cdc23a48e6543402b4c56ebf2d36246001175c29fd56d3081efcec684651abc"}, - {file = "spacy-3.4.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:455c2fbd1de24b6fe34fa121d87525134d7498f9f458ebc8274d7940b473999e"}, - {file = "spacy-3.4.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1c85279fbb6b75d7fb8d7c59c2b734502e51271cad90926e8df1d21b67da5aa"}, - {file = "spacy-3.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:5c0d65f39184f522b4e67b965a42d121a3b2d799362682fe8847b64b0ce5bc7c"}, - {file = "spacy-3.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a7b97ec21ed773edb2479ae5d6c7686b8034f418df6bccd9218f5c3c2b7cf888"}, - {file = "spacy-3.4.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:36a9a506029842795099fd97ad95f0da2845c319020fcc7164cbf33650726f83"}, - {file = "spacy-3.4.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ab293eb1423fa05c7ee71b2fedda57c2b4a4ca8dc054ce678809457287b01dc"}, - {file = "spacy-3.4.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb6d0f185126decc8392cde7d28eb6e85ba4bca15424713288cccc49c2a3c52b"}, - {file = "spacy-3.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:676ab9ab2cf94ba48caa306f185a166e85bd35b388ec24512c8ba7dfcbc7517e"}, - {file = "spacy-3.4.3.tar.gz", hash = "sha256:22698cf5175e2b697e82699fcccee3092b42137a57d352df208d71657fd693bb"}, + {file = "spacy-3.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:07a10999a3e37f896758a92c2eed263638bcbf2747dc3a4aeea929aaa20ea28c"}, + {file = "spacy-3.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e6d98511dc8a88d3a96bcae13971a284459362076738c85053d1a3791f6cde92"}, + {file = "spacy-3.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2cad9c5543f03b3375c252e4dd45670ee8ed99c925dca15eadab5084fd1b033"}, + {file = "spacy-3.4.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ade19c1e676cac2546f268db22bc5eba08d12beafabe80f1b9f06028b3a0b52"}, + {file = "spacy-3.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:e782c8a7c4805cc1b34ed2b11f72a5cf2b9851e20f7afe3e97caf206f19f761b"}, + {file = "spacy-3.4.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aa027e69ef9fe42c8b02b940872e5bde0ce1bf66b6bf488c6493e3ce660c4b3a"}, + {file = "spacy-3.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ddeb5d725b6fa9c9009b1ff645db8f5caab9ed8956ee3a84b8379951caad1d36"}, + {file = "spacy-3.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29d6bb428a6bb19e026d8bbb9d4385c25b21e1ce51fcaabadfb5599b2390a79c"}, + {file = "spacy-3.4.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a21187ad4c44e166dc3deed23992ea1a74d731c9a6bdd9fca306d455181577fa"}, + {file = "spacy-3.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:10643c6d335a02805f6676738a3e992323cfd9438115cc253435e5053dc93824"}, + {file = "spacy-3.4.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:486228cfa7ced18ec99008388028bd2329262ab8108e7c19252c1a67b2801909"}, + {file = "spacy-3.4.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcb7a213178c298b95532075d6dddfb374bbe56ef8d2687212763b4583048da2"}, + {file = "spacy-3.4.4-cp36-cp36m-win_amd64.whl", hash = "sha256:15e5c41d408d1d30d8f3dd8e4eed9ed28e6174e011b8d61c1345981562e2e8f5"}, + {file = "spacy-3.4.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8979dbd3594c5c268cedad53f456a3ec3a0a2b78a1199788aacedcd68eef3a00"}, + {file = "spacy-3.4.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f4736fea2630e696422dfe38bfb3d0a7864bc6a9072d6e49a906af46870e36e"}, + {file = "spacy-3.4.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:498bf01e8c7ab601c3f8d6c51497817b40a3322a3967c032536b18ce9ea26d0a"}, + {file = "spacy-3.4.4-cp37-cp37m-win_amd64.whl", hash = "sha256:95f880c6fea57d51c448ad84f96d79d8758e5e18bdbaaee060c15af11641079b"}, + {file = "spacy-3.4.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9ccbede9be470c5d795168bf3be41fc86e18892a9247a742b394ba866c005391"}, + {file = "spacy-3.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2f1edbecfde9c11b17e87768bb5f2c33948fb1e3bf54b2197031ff9053607277"}, + {file = "spacy-3.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:66eaf4764e95699934cbd8f38717b283db185c896cfd3d1fb1ad5c6552e8b3c9"}, + {file = "spacy-3.4.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0bb7d53f1a780bb8cc1b27a81e02e8b9bc71abb959f4dc13c21af4041fdd2c7a"}, + {file = "spacy-3.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:c1a5ce5c9b19cdfb4469079e710e72bb09c3cab855f21ef6a614b84c765e0311"}, + {file = "spacy-3.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f7044dca3542579ea1e3ac6cdd821640c2f65dd0c56230688f36e15aca1b8217"}, + {file = "spacy-3.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8a495b0fc00910fb5c1fbe64fdbfe1d3c11b09f421d1ae4e30cdb4c2388a91e4"}, + {file = "spacy-3.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31e9a637960b60c1bb7a36a187271425717e97c14e9d1df613dc4efeffefcbec"}, + {file = "spacy-3.4.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71f9449ffadef85b048c9735ee235da5dca9d0a87038dba6d4ed20c5188e0f13"}, + {file = "spacy-3.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:1b7791a6c0592615b0566001596cc48c72325d1b97e46e574c91bff34f4e3f4c"}, + {file = "spacy-3.4.4.tar.gz", hash = "sha256:e500cf2cb5f1849461a7928fa269703756069bdfb71559065240af6d0208b08c"}, ] spacy-legacy = [ {file = "spacy-legacy-3.0.10.tar.gz", hash = "sha256:16104595d8ab1b7267f817a449ad1f986eb1f2a2edf1050748f08739a479679a"}, {file = "spacy_legacy-3.0.10-py2.py3-none-any.whl", hash = "sha256:8526a54d178dee9b7f218d43e5c21362c59056c5da23380b319b56043e9211f3"}, ] spacy-loggers = [ - {file = "spacy-loggers-1.0.3.tar.gz", hash = "sha256:00f6fd554db9fd1fde6501b23e1f0e72f6eef14bb1e7fc15456d11d1d2de92ca"}, - {file = "spacy_loggers-1.0.3-py3-none-any.whl", hash = "sha256:f74386b390a023f9615dcb499b7b4ad63338236a8187f0ec4dfe265a9f665ee8"}, + {file = "spacy-loggers-1.0.4.tar.gz", hash = "sha256:e6f983bf71230091d5bb7b11bf64bd54415eca839108d5f83d9155d0ba93bf28"}, + {file = "spacy_loggers-1.0.4-py3-none-any.whl", hash = "sha256:e050bf2e63208b2f096b777e494971c962ad7c1dc997641c8f95c622550044ae"}, ] sqlalchemy = [ - {file = "SQLAlchemy-1.4.44-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:da60b98b0f6f0df9fbf8b72d67d13b73aa8091923a48af79a951d4088530a239"}, - {file = "SQLAlchemy-1.4.44-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:95f4f8d62589755b507218f2e3189475a4c1f5cc9db2aec772071a7dc6cd5726"}, - {file = "SQLAlchemy-1.4.44-cp27-cp27m-win32.whl", hash = "sha256:afd1ac99179d1864a68c06b31263a08ea25a49df94e272712eb2824ef151e294"}, - {file = "SQLAlchemy-1.4.44-cp27-cp27m-win_amd64.whl", hash = "sha256:f8e5443295b218b08bef8eb85d31b214d184b3690d99a33b7bd8e5591e2b0aa1"}, - {file = "SQLAlchemy-1.4.44-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:53f90a2374f60e703c94118d21533765412da8225ba98659de7dd7998641ab17"}, - {file = "SQLAlchemy-1.4.44-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:65a0ad931944fcb0be12a8e0ac322dbd3ecf17c53f088bc10b6da8f0caac287b"}, - {file = "SQLAlchemy-1.4.44-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b185041a4dc5c685283ea98c2f67bbfa47bb28e4a4f5b27ebf40684e7a9f8"}, - {file = "SQLAlchemy-1.4.44-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:80ead36fb1d676cc019586ffdc21c7e906ce4bf243fe4021e4973dae332b6038"}, - {file = "SQLAlchemy-1.4.44-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68e0cd5d32a32c4395168d42f2fefbb03b817ead3a8f3704b8bd5697c0b26c24"}, - {file = "SQLAlchemy-1.4.44-cp310-cp310-win32.whl", hash = "sha256:ae1ed1ebc407d2f66c6f0ec44ef7d56e3f455859df5494680e2cf89dad8e3ae0"}, - {file = "SQLAlchemy-1.4.44-cp310-cp310-win_amd64.whl", hash = "sha256:6f0ea4d7348feb5e5d0bf317aace92e28398fa9a6e38b7be9ec1f31aad4a8039"}, - {file = "SQLAlchemy-1.4.44-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f5e8ed9cde48b76318ab989deeddc48f833d2a6a7b7c393c49b704f67dedf01d"}, - {file = "SQLAlchemy-1.4.44-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c857676d810ca196be73c98eb839125d6fa849bfa3589be06201a6517f9961c"}, - {file = "SQLAlchemy-1.4.44-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c56e6899fa6e767e4be5d106941804a4201c5cb9620a409c0b80448ec70b656"}, - {file = "SQLAlchemy-1.4.44-cp311-cp311-win32.whl", hash = "sha256:c46322354c58d4dc039a2c982d28284330f8919f31206894281f4b595b9d8dbe"}, - {file = "SQLAlchemy-1.4.44-cp311-cp311-win_amd64.whl", hash = "sha256:7313e4acebb9ae88dbde14a8a177467a7625b7449306c03a3f9f309b30e163d0"}, - {file = "SQLAlchemy-1.4.44-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:17aee7bfcef7bf0dea92f10e5dfdd67418dcf6fe0759f520e168b605855c003e"}, - {file = "SQLAlchemy-1.4.44-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9470633395e5f24d6741b4c8a6e905bce405a28cf417bba4ccbaadf3dab0111d"}, - {file = "SQLAlchemy-1.4.44-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:393f51a09778e8984d735b59a810731394308b4038acdb1635397c2865dae2b6"}, - {file = "SQLAlchemy-1.4.44-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7e3b9e01fdbe1ce3a165cc7e1ff52b24813ee79c6df6dee0d1e13888a97817e"}, - {file = "SQLAlchemy-1.4.44-cp36-cp36m-win32.whl", hash = "sha256:6a06c2506c41926d2769f7968759995f2505e31c5b5a0821e43ca5a3ddb0e8ae"}, - {file = "SQLAlchemy-1.4.44-cp36-cp36m-win_amd64.whl", hash = "sha256:3ca21b35b714ce36f4b8d1ee8d15f149db8eb43a472cf71600bf18dae32286e7"}, - {file = "SQLAlchemy-1.4.44-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:3cbdbed8cdcae0f83640a9c44fa02b45a6c61e149c58d45a63c9581aba62850f"}, - {file = "SQLAlchemy-1.4.44-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a22208c1982f1fe2ae82e5e4c3d4a6f2445a7a0d65fb7983a3d7cbbe3983f5a4"}, - {file = "SQLAlchemy-1.4.44-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d3b9ac11f36ab9a726097fba7c7f6384f0129aedb017f1d4d1d4fce9052a1320"}, - {file = "SQLAlchemy-1.4.44-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d654870a66027af3a26df1372cf7f002e161c6768ebe4c9c6fdc0da331cb5173"}, - {file = "SQLAlchemy-1.4.44-cp37-cp37m-win32.whl", hash = "sha256:0be9b479c5806cece01f1581726573a8d6515f8404e082c375b922c45cfc2a7b"}, - {file = "SQLAlchemy-1.4.44-cp37-cp37m-win_amd64.whl", hash = "sha256:3eba07f740488c3a125f17c092a81eeae24a6c7ec32ac9dbc52bf7afaf0c4f16"}, - {file = "SQLAlchemy-1.4.44-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:ad5f966623905ee33694680dda1b735544c99c7638f216045d21546d3d8c6f5b"}, - {file = "SQLAlchemy-1.4.44-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f68eab46649504eb95be36ca529aea16cd199f080726c28cbdbcbf23d20b2a2"}, - {file = "SQLAlchemy-1.4.44-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:21f3df74a0ab39e1255e94613556e33c1dc3b454059fe0b365ec3bbb9ed82e4a"}, - {file = "SQLAlchemy-1.4.44-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8080bc51a775627865e0f1dbfc0040ff4ace685f187f6036837e1727ba2ed10"}, - {file = "SQLAlchemy-1.4.44-cp38-cp38-win32.whl", hash = "sha256:b6a337a2643a41476fb6262059b8740f4b9a2ec29bf00ffb18c18c080f6e0aed"}, - {file = "SQLAlchemy-1.4.44-cp38-cp38-win_amd64.whl", hash = "sha256:b737fbeb2f78926d1f59964feb287bbbd050e7904766f87c8ce5cfb86e6d840c"}, - {file = "SQLAlchemy-1.4.44-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:c9aa372b295a36771cffc226b6517df3011a7d146ac22d19fa6a75f1cdf9d7e6"}, - {file = "SQLAlchemy-1.4.44-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:237067ba0ef45a518b64606e1807f7229969ad568288b110ed5f0ca714a3ed3a"}, - {file = "SQLAlchemy-1.4.44-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6d7e1b28342b45f19e3dea7873a9479e4a57e15095a575afca902e517fb89652"}, - {file = "SQLAlchemy-1.4.44-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94c0093678001f5d79f2dcbf3104c54d6c89e41ab50d619494c503a4d3f1aef2"}, - {file = "SQLAlchemy-1.4.44-cp39-cp39-win32.whl", hash = "sha256:7cf7c7adbf4417e3f46fc5a2dbf8395a5a69698217337086888f79700a12e93a"}, - {file = "SQLAlchemy-1.4.44-cp39-cp39-win_amd64.whl", hash = "sha256:d3b6d4588994da73567bb00af9d7224a16c8027865a8aab53ae9be83f9b7cbd1"}, - {file = "SQLAlchemy-1.4.44.tar.gz", hash = "sha256:2dda5f96719ae89b3ec0f1b79698d86eb9aecb1d54e990abb3fdd92c04b46a90"}, + {file = "SQLAlchemy-1.4.45-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:f1d3fb02a4d0b07d1351a4a52f159e5e7b3045c903468b7e9349ebf0020ffdb9"}, + {file = "SQLAlchemy-1.4.45-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9b7025d46aba946272f6b6b357a22f3787473ef27451f342df1a2a6de23743e3"}, + {file = "SQLAlchemy-1.4.45-cp27-cp27m-win32.whl", hash = "sha256:26b8424b32eeefa4faad21decd7bdd4aade58640b39407bf43e7d0a7c1bc0453"}, + {file = "SQLAlchemy-1.4.45-cp27-cp27m-win_amd64.whl", hash = "sha256:13578d1cda69bc5e76c59fec9180d6db7ceb71c1360a4d7861c37d87ea6ca0b1"}, + {file = "SQLAlchemy-1.4.45-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6cd53b4c756a6f9c6518a3dc9c05a38840f9ae442c91fe1abde50d73651b6922"}, + {file = "SQLAlchemy-1.4.45-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:ca152ffc7f0aa069c95fba46165030267ec5e4bb0107aba45e5e9e86fe4d9363"}, + {file = "SQLAlchemy-1.4.45-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06055476d38ed7915eeed22b78580556d446d175c3574a01b9eb04d91f3a8b2e"}, + {file = "SQLAlchemy-1.4.45-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:081e2a2d75466353c738ca2ee71c0cfb08229b4f9909b5fa085f75c48d021471"}, + {file = "SQLAlchemy-1.4.45-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96821d806c0c90c68ce3f2ce6dd529c10e5d7587961f31dd5c30e3bfddc4545d"}, + {file = "SQLAlchemy-1.4.45-cp310-cp310-win32.whl", hash = "sha256:c8051bff4ce48cbc98f11e95ac46bfd1e36272401070c010248a3230d099663f"}, + {file = "SQLAlchemy-1.4.45-cp310-cp310-win_amd64.whl", hash = "sha256:16ad798fc121cad5ea019eb2297127b08c54e1aa95fe17b3fea9fdbc5c34fe62"}, + {file = "SQLAlchemy-1.4.45-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:099efef0de9fbda4c2d7cb129e4e7f812007901942259d4e6c6e19bd69de1088"}, + {file = "SQLAlchemy-1.4.45-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29a29d02c9e6f6b105580c5ed7afb722b97bc2e2fdb85e1d45d7ddd8440cfbca"}, + {file = "SQLAlchemy-1.4.45-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc10423b59d6d032d6dff0bb42aa06dc6a8824eb6029d70c7d1b6981a2e7f4d8"}, + {file = "SQLAlchemy-1.4.45-cp311-cp311-win32.whl", hash = "sha256:1a92685db3b0682776a5abcb5f9e9addb3d7d9a6d841a452a17ec2d8d457bea7"}, + {file = "SQLAlchemy-1.4.45-cp311-cp311-win_amd64.whl", hash = "sha256:db3ccbce4a861bf4338b254f95916fc68dd8b7aa50eea838ecdaf3a52810e9c0"}, + {file = "SQLAlchemy-1.4.45-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:a62ae2ea3b940ce9c9cbd675489c2047921ce0a79f971d3082978be91bd58117"}, + {file = "SQLAlchemy-1.4.45-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a87f8595390764db333a1705591d0934973d132af607f4fa8b792b366eacbb3c"}, + {file = "SQLAlchemy-1.4.45-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9a21c1fb71c69c8ec65430160cd3eee44bbcea15b5a4e556f29d03f246f425ec"}, + {file = "SQLAlchemy-1.4.45-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7944b04e6fcf8d733964dd9ee36b6a587251a1a4049af3a9b846f6e64eb349a"}, + {file = "SQLAlchemy-1.4.45-cp36-cp36m-win32.whl", hash = "sha256:a3bcd5e2049ceb97e8c273e6a84ff4abcfa1dc47b6d8bbd36e07cce7176610d3"}, + {file = "SQLAlchemy-1.4.45-cp36-cp36m-win_amd64.whl", hash = "sha256:5953e225be47d80410ae519f865b5c341f541d8e383fb6d11f67fb71a45bf890"}, + {file = "SQLAlchemy-1.4.45-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:6a91b7883cb7855a27bc0637166eed622fdf1bb94a4d1630165e5dd88c7e64d3"}, + {file = "SQLAlchemy-1.4.45-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d458fd0566bc9e10b8be857f089e96b5ca1b1ef033226f24512f9ffdf485a8c0"}, + {file = "SQLAlchemy-1.4.45-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:88f4ad3b081c0dbb738886f8d425a5d983328670ee83b38192687d78fc82bd1e"}, + {file = "SQLAlchemy-1.4.45-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd95a3e6ab46da2c5b0703e797a772f3fab44d085b3919a4f27339aa3b1f51d3"}, + {file = "SQLAlchemy-1.4.45-cp37-cp37m-win32.whl", hash = "sha256:715f5859daa3bee6ecbad64501637fa4640ca6734e8cda6135e3898d5f8ccadd"}, + {file = "SQLAlchemy-1.4.45-cp37-cp37m-win_amd64.whl", hash = "sha256:2d1539fbc82d2206380a86d6d7d0453764fdca5d042d78161bbfb8dd047c80ec"}, + {file = "SQLAlchemy-1.4.45-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:01aa76f324c9bbc0dcb2bc3d9e2a9d7ede4808afa1c38d40d5e2007e3163b206"}, + {file = "SQLAlchemy-1.4.45-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:416fe7d228937bd37990b5a429fd00ad0e49eabcea3455af7beed7955f192edd"}, + {file = "SQLAlchemy-1.4.45-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7e32ce2584564d9e068bb7e0ccd1810cbb0a824c0687f8016fe67e97c345a637"}, + {file = "SQLAlchemy-1.4.45-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:561605cfc26273825ed2fb8484428faf36e853c13e4c90c61c58988aeccb34ed"}, + {file = "SQLAlchemy-1.4.45-cp38-cp38-win32.whl", hash = "sha256:55ddb5585129c5d964a537c9e32a8a68a8c6293b747f3fa164e1c034e1657a98"}, + {file = "SQLAlchemy-1.4.45-cp38-cp38-win_amd64.whl", hash = "sha256:445914dcadc0b623bd9851260ee54915ecf4e3041a62d57709b18a0eed19f33b"}, + {file = "SQLAlchemy-1.4.45-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:2db887dbf05bcc3151de1c4b506b14764c6240a42e844b4269132a7584de1e5f"}, + {file = "SQLAlchemy-1.4.45-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52b90c9487e4449ad954624d01dea34c90cd8c104bce46b322c83654f37a23c5"}, + {file = "SQLAlchemy-1.4.45-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f61e54b8c2b389de1a8ad52394729c478c67712dbdcdadb52c2575e41dae94a5"}, + {file = "SQLAlchemy-1.4.45-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e91a5e45a2ea083fe344b3503405978dff14d60ef3aa836432c9ca8cd47806b6"}, + {file = "SQLAlchemy-1.4.45-cp39-cp39-win32.whl", hash = "sha256:0e068b8414d60dd35d43c693555fc3d2e1d822cef07960bb8ca3f1ee6c4ff762"}, + {file = "SQLAlchemy-1.4.45-cp39-cp39-win_amd64.whl", hash = "sha256:2d6f178ff2923730da271c8aa317f70cf0df11a4d1812f1d7a704b1cf29c5fe3"}, + {file = "SQLAlchemy-1.4.45.tar.gz", hash = "sha256:fd69850860093a3f69fefe0ab56d041edfdfe18510b53d9a2eaecba2f15fa795"}, ] sqlitedict = [ {file = "sqlitedict-2.1.0.tar.gz", hash = "sha256:03d9cfb96d602996f1d4c2db2856f1224b96a9c431bdd16e78032a72940f9e8c"}, @@ -3370,6 +3868,24 @@ thinc = [ {file = "thinc-8.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:16be051c6f71d967fe87c3bda3a760699539cf75fee6b32527ea38feb3002e56"}, {file = "thinc-8.1.5.tar.gz", hash = "sha256:4d3e4de33d2d0eae7c1455c60c680e453b0204c29e3d2d548d7a9e7fe08ccfbd"}, ] +tiktoken = [ + {file = "tiktoken-0.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f4362363d129c0dcfd5acc8c9675a0f2372c23ac1239b11349eac991a814c046"}, + {file = "tiktoken-0.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b16afffbc1a546521a2fc04c6ab185b263df71ff77eca8a6ff10effc7562b1af"}, + {file = "tiktoken-0.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50d0f446750bb2390bff3d63d517e3457b9b182b162f6d90586b9c5ac95899ce"}, + {file = "tiktoken-0.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1c88e6eab36d4cf950da4686149e03b5ef3274fb0b3711cb50aef0e03eb9051d"}, + {file = "tiktoken-0.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:ca0096dca00b0ed60c6eee22a8bbaaee8c8de09fc13417d5cf786c6a910dcd0c"}, + {file = "tiktoken-0.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f6151dc8cd871bf24b60788403c0425e3a8c69259aa985fc5c451a4a5c344e0d"}, + {file = "tiktoken-0.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c6af53a1f76cffe9429aa2a2b9091556d157462212ef53e63505177fa9196809"}, + {file = "tiktoken-0.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:580bd89f80fbbe9a5add29d585c94fc72a1c8648d935eddeb677721985efd7b1"}, + {file = "tiktoken-0.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ca6cb6e806261b0f33440c9b064068c1cb2b5c506aff7d9133e6b643b4fc1652"}, + {file = "tiktoken-0.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:c5402c7871aa2507a749374a39a5b88f7e96be3f0cc7bf52e0845e865fc2cf52"}, + {file = "tiktoken-0.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:301e9d61a45846f1751f552eb6501c9ad4086763b55f3fee66852ae3532a9a3c"}, + {file = "tiktoken-0.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2450f71b3219bae8be1e55cae7385de48ee96f0dd7490d8de6b7c53e97ba06b7"}, + {file = "tiktoken-0.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc6df2d41e3176cba3015c5c3183b7c83de927db81d6ad44a08c90547774065a"}, + {file = "tiktoken-0.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5c48cc16483ab4313012b5656c8c02c0fd631329b255e0723019f2097d7d4aa1"}, + {file = "tiktoken-0.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:c2f5697a5fe2bd053997728c7522f0d0285b6d4fd6be65602dccf43341e4dd82"}, + {file = "tiktoken-0.1.1.tar.gz", hash = "sha256:328ee64d63c68e04f11d68b45198053134fbe42a169363e6b5405fabf59696e1"}, +] tinycss2 = [ {file = "tinycss2-1.2.1-py3-none-any.whl", hash = "sha256:2b80a96d41e7c3914b8cda8bc7f705a4d9c49275616e886103dd839dfc847847"}, {file = "tinycss2-1.2.1.tar.gz", hash = "sha256:8cff3a8f066c2ec677c06dbc7b45619804a6938478d9d73c284b29d14ecb0627"}, @@ -3416,6 +3932,29 @@ tomli = [ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] +torch = [ + {file = "torch-1.13.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:fd12043868a34a8da7d490bf6db66991108b00ffbeecb034228bfcbbd4197143"}, + {file = "torch-1.13.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:d9fe785d375f2e26a5d5eba5de91f89e6a3be5d11efb497e76705fdf93fa3c2e"}, + {file = "torch-1.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:98124598cdff4c287dbf50f53fb455f0c1e3a88022b39648102957f3445e9b76"}, + {file = "torch-1.13.1-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:393a6273c832e047581063fb74335ff50b4c566217019cc6ace318cd79eb0566"}, + {file = "torch-1.13.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:0122806b111b949d21fa1a5f9764d1fd2fcc4a47cb7f8ff914204fd4fc752ed5"}, + {file = "torch-1.13.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:22128502fd8f5b25ac1cd849ecb64a418382ae81dd4ce2b5cebaa09ab15b0d9b"}, + {file = "torch-1.13.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:76024be052b659ac1304ab8475ab03ea0a12124c3e7626282c9c86798ac7bc11"}, + {file = "torch-1.13.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:ea8dda84d796094eb8709df0fcd6b56dc20b58fdd6bc4e8d7109930dafc8e419"}, + {file = "torch-1.13.1-cp37-cp37m-win_amd64.whl", hash = "sha256:2ee7b81e9c457252bddd7d3da66fb1f619a5d12c24d7074de91c4ddafb832c93"}, + {file = "torch-1.13.1-cp37-none-macosx_10_9_x86_64.whl", hash = "sha256:0d9b8061048cfb78e675b9d2ea8503bfe30db43d583599ae8626b1263a0c1380"}, + {file = "torch-1.13.1-cp37-none-macosx_11_0_arm64.whl", hash = "sha256:f402ca80b66e9fbd661ed4287d7553f7f3899d9ab54bf5c67faada1555abde28"}, + {file = "torch-1.13.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:727dbf00e2cf858052364c0e2a496684b9cb5aa01dc8a8bc8bbb7c54502bdcdd"}, + {file = "torch-1.13.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:df8434b0695e9ceb8cc70650afc1310d8ba949e6db2a0525ddd9c3b2b181e5fe"}, + {file = "torch-1.13.1-cp38-cp38-win_amd64.whl", hash = "sha256:5e1e722a41f52a3f26f0c4fcec227e02c6c42f7c094f32e49d4beef7d1e213ea"}, + {file = "torch-1.13.1-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:33e67eea526e0bbb9151263e65417a9ef2d8fa53cbe628e87310060c9dcfa312"}, + {file = "torch-1.13.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:eeeb204d30fd40af6a2d80879b46a7efbe3cf43cdbeb8838dd4f3d126cc90b2b"}, + {file = "torch-1.13.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:50ff5e76d70074f6653d191fe4f6a42fdbe0cf942fbe2a3af0b75eaa414ac038"}, + {file = "torch-1.13.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:2c3581a3fd81eb1f0f22997cddffea569fea53bafa372b2c0471db373b26aafc"}, + {file = "torch-1.13.1-cp39-cp39-win_amd64.whl", hash = "sha256:0aa46f0ac95050c604bcf9ef71da9f1172e5037fdf2ebe051962d47b123848e7"}, + {file = "torch-1.13.1-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:6930791efa8757cb6974af73d4996b6b50c592882a324b8fb0589c6a9ba2ddaf"}, + {file = "torch-1.13.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:e0df902a7c7dd6c795698532ee5970ce898672625635d885eade9976e5a04949"}, +] tornado = [ {file = "tornado-6.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:20f638fd8cc85f3cbae3c732326e96addff0a15e22d80f049e00121651e82e72"}, {file = "tornado-6.2-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:87dcafae3e884462f90c90ecc200defe5e580a7fbbb4365eda7c7c1eb809ebc9"}, @@ -3434,8 +3973,8 @@ tqdm = [ {file = "tqdm-4.64.1.tar.gz", hash = "sha256:5f4f682a004951c1b450bc753c710e9280c5746ce6ffedee253ddbcbf54cf1e4"}, ] traitlets = [ - {file = "traitlets-5.6.0-py3-none-any.whl", hash = "sha256:1410755385d778aed847d68deb99b3ba30fbbf489e17a1e8cbb753060d5cce73"}, - {file = "traitlets-5.6.0.tar.gz", hash = "sha256:10b6ed1c9cedee83e795db70a8b9c2db157bb3778ec4587a349ecb7ef3b1033b"}, + {file = "traitlets-5.7.1-py3-none-any.whl", hash = "sha256:57ba2ba951632eeab9388fa45f342a5402060a5cc9f0bb942f760fafb6641581"}, + {file = "traitlets-5.7.1.tar.gz", hash = "sha256:fde8f62c05204ead43c2c1b9389cfc85befa7f54acb5da28529d671175bb4108"}, ] transformers = [ {file = "transformers-4.25.1-py3-none-any.whl", hash = "sha256:60f1be15e17e4a54373c787c713ec149dabcc63464131ac45611618fe7c2016e"}, @@ -3465,6 +4004,10 @@ typing-extensions = [ {file = "typing_extensions-4.4.0-py3-none-any.whl", hash = "sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e"}, {file = "typing_extensions-4.4.0.tar.gz", hash = "sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa"}, ] +uri-template = [ + {file = "uri_template-1.2.0-py3-none-any.whl", hash = "sha256:f1699c77b73b925cf4937eae31ab282a86dc885c333f2e942513f08f691fc7db"}, + {file = "uri_template-1.2.0.tar.gz", hash = "sha256:934e4d09d108b70eb8a24410af8615294d09d279ce0e7cbcdaef1bd21f932b06"}, +] urllib3 = [ {file = "urllib3-1.26.13-py2.py3-none-any.whl", hash = "sha256:47cc05d99aaa09c9e72ed5809b60e7ba354e64b59c9c173ac3018642d8bb41fc"}, {file = "urllib3-1.26.13.tar.gz", hash = "sha256:c083dd0dce68dbfbe1129d5271cb90f9447dea7d52097c6e0126120c521ddea8"}, @@ -3477,6 +4020,10 @@ wcwidth = [ {file = "wcwidth-0.2.5-py2.py3-none-any.whl", hash = "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784"}, {file = "wcwidth-0.2.5.tar.gz", hash = "sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83"}, ] +webcolors = [ + {file = "webcolors-1.12-py3-none-any.whl", hash = "sha256:d98743d81d498a2d3eaf165196e65481f0d2ea85281463d856b1e51b09f62dce"}, + {file = "webcolors-1.12.tar.gz", hash = "sha256:16d043d3a08fd6a1b1b7e3e9e62640d09790dce80d2bdd4792a175b35fe794a9"}, +] webencodings = [ {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, @@ -3485,9 +4032,13 @@ websocket-client = [ {file = "websocket-client-1.4.2.tar.gz", hash = "sha256:d6e8f90ca8e2dd4e8027c4561adeb9456b54044312dba655e7cae652ceb9ae59"}, {file = "websocket_client-1.4.2-py3-none-any.whl", hash = "sha256:d6b06432f184438d99ac1f456eaf22fe1ade524c3dd16e661142dc54e9cba574"}, ] +wheel = [ + {file = "wheel-0.38.4-py3-none-any.whl", hash = "sha256:b60533f3f5d530e971d6737ca6d58681ee434818fab630c83a734bb10c083ce8"}, + {file = "wheel-0.38.4.tar.gz", hash = "sha256:965f5259b566725405b05e7cf774052044b1ed30119b5d586b2703aafe8719ac"}, +] widgetsnbextension = [ - {file = "widgetsnbextension-4.0.3-py3-none-any.whl", hash = "sha256:7f3b0de8fda692d31ef03743b598620e31c2668b835edbd3962d080ccecf31eb"}, - {file = "widgetsnbextension-4.0.3.tar.gz", hash = "sha256:34824864c062b0b3030ad78210db5ae6a3960dfb61d5b27562d6631774de0286"}, + {file = "widgetsnbextension-4.0.4-py3-none-any.whl", hash = "sha256:fa0e840719ec95dd2ec85c3a48913f1a0c29d323eacbcdb0b29bfed0cc6da678"}, + {file = "widgetsnbextension-4.0.4.tar.gz", hash = "sha256:44c69f18237af0f610557d6c1c7ef76853f5856a0e604c0a517f2320566bb775"}, ] wikipedia = [ {file = "wikipedia-1.4.0.tar.gz", hash = "sha256:db0fad1829fdd441b1852306e9856398204dc0786d2996dd2e0c8bb8e26133b2"}, diff --git a/pyproject.toml b/pyproject.toml index 455ae90c..26866076 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain" -version = "0.0.34" +version = "0.0.39" description = "Building applications with LLMs through composability" authors = [] license = "MIT" @@ -22,9 +22,12 @@ spacy = {version = "^3", optional = true} nltk = {version = "^3", optional = true} transformers = {version = "^4", optional = true} beautifulsoup4 = {version = "^4", optional = true} +torch = {version = "^1.13.1", optional = true} +tiktoken = {version = "^0", optional = true, python="^3.9"} [tool.poetry.group.test.dependencies] pytest = "^7.2.0" +pytest-cov = "^4.0.0" pytest-dotenv = "^0.5.2" [tool.poetry.group.lint.dependencies] @@ -47,8 +50,8 @@ jupyter = "^1.0.0" playwright = "^1.28.0" [tool.poetry.extras] -llms = ["cohere", "openai", "nlpcloud", "huggingface_hub", "manifest-ml"] -all = ["cohere", "openai", "nlpcloud", "huggingface_hub", "manifest-ml", "elasticsearch", "google-search-results", "faiss-cpu", "sentence_transformers", "transformers", "spacy", "nltk", "wikipedia", "beautifulsoup4"] +llms = ["cohere", "openai", "nlpcloud", "huggingface_hub", "manifest-ml", "torch", "transformers"] +all = ["cohere", "openai", "nlpcloud", "huggingface_hub", "manifest-ml", "elasticsearch", "google-search-results", "faiss-cpu", "sentence_transformers", "transformers", "spacy", "nltk", "wikipedia", "beautifulsoup4", "tiktoken", "torch"] [tool.isort] profile = "black" diff --git a/tests/integration_tests/llms/test_ai21.py b/tests/integration_tests/llms/test_ai21.py index 3737ee5c..16a8716d 100644 --- a/tests/integration_tests/llms/test_ai21.py +++ b/tests/integration_tests/llms/test_ai21.py @@ -1,6 +1,9 @@ """Test AI21 API wrapper.""" +from pathlib import Path + from langchain.llms.ai21 import AI21 +from langchain.llms.loading import load_llm def test_ai21_call() -> None: @@ -8,3 +11,11 @@ def test_ai21_call() -> None: llm = AI21(maxTokens=10) output = llm("Say foo:") assert isinstance(output, str) + + +def test_saving_loading_llm(tmp_path: Path) -> None: + """Test saving/loading an AI21 LLM.""" + llm = AI21(maxTokens=10) + llm.save(file_path=tmp_path / "ai21.yaml") + loaded_llm = load_llm(tmp_path / "ai21.yaml") + assert llm == loaded_llm diff --git a/tests/integration_tests/llms/test_cohere.py b/tests/integration_tests/llms/test_cohere.py index f1a8a6c3..4c260982 100644 --- a/tests/integration_tests/llms/test_cohere.py +++ b/tests/integration_tests/llms/test_cohere.py @@ -1,6 +1,10 @@ """Test Cohere API wrapper.""" +from pathlib import Path + from langchain.llms.cohere import Cohere +from langchain.llms.loading import load_llm +from tests.integration_tests.llms.utils import assert_llm_equality def test_cohere_call() -> None: @@ -8,3 +12,11 @@ def test_cohere_call() -> None: llm = Cohere(max_tokens=10) output = llm("Say foo:") assert isinstance(output, str) + + +def test_saving_loading_llm(tmp_path: Path) -> None: + """Test saving/loading an Cohere LLM.""" + llm = Cohere(max_tokens=10) + llm.save(file_path=tmp_path / "cohere.yaml") + loaded_llm = load_llm(tmp_path / "cohere.yaml") + assert_llm_equality(llm, loaded_llm) diff --git a/tests/integration_tests/llms/test_huggingface_hub.py b/tests/integration_tests/llms/test_huggingface_hub.py index aa181a87..df0b4416 100644 --- a/tests/integration_tests/llms/test_huggingface_hub.py +++ b/tests/integration_tests/llms/test_huggingface_hub.py @@ -1,8 +1,12 @@ """Test HuggingFace API wrapper.""" +from pathlib import Path + import pytest from langchain.llms.huggingface_hub import HuggingFaceHub +from langchain.llms.loading import load_llm +from tests.integration_tests.llms.utils import assert_llm_equality def test_huggingface_text_generation() -> None: @@ -24,3 +28,11 @@ def test_huggingface_call_error() -> None: llm = HuggingFaceHub(model_kwargs={"max_new_tokens": -1}) with pytest.raises(ValueError): llm("Say foo:") + + +def test_saving_loading_llm(tmp_path: Path) -> None: + """Test saving/loading an HuggingFaceHub LLM.""" + llm = HuggingFaceHub(repo_id="gpt2", model_kwargs={"max_new_tokens": 10}) + llm.save(file_path=tmp_path / "hf.yaml") + loaded_llm = load_llm(tmp_path / "hf.yaml") + assert_llm_equality(llm, loaded_llm) diff --git a/tests/integration_tests/llms/test_huggingface_pipeline.py b/tests/integration_tests/llms/test_huggingface_pipeline.py new file mode 100644 index 00000000..7cf3b6d1 --- /dev/null +++ b/tests/integration_tests/llms/test_huggingface_pipeline.py @@ -0,0 +1,41 @@ +"""Test HuggingFace Pipeline wrapper.""" + +from pathlib import Path + +from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline + +from langchain.llms.huggingface_pipeline import HuggingFacePipeline +from langchain.llms.loading import load_llm +from tests.integration_tests.llms.utils import assert_llm_equality + + +def test_huggingface_pipeline_text_generation() -> None: + """Test valid call to HuggingFace text generation model.""" + llm = HuggingFacePipeline.from_model_id( + model_id="gpt2", task="text-generation", model_kwargs={"max_new_tokens": 10} + ) + output = llm("Say foo:") + assert isinstance(output, str) + + +def test_saving_loading_llm(tmp_path: Path) -> None: + """Test saving/loading an HuggingFaceHub LLM.""" + llm = HuggingFacePipeline.from_model_id( + model_id="gpt2", task="text-generation", model_kwargs={"max_new_tokens": 10} + ) + llm.save(file_path=tmp_path / "hf.yaml") + loaded_llm = load_llm(tmp_path / "hf.yaml") + assert_llm_equality(llm, loaded_llm) + + +def test_init_with_pipeline() -> None: + """Test initialization with a HF pipeline.""" + model_id = "gpt2" + tokenizer = AutoTokenizer.from_pretrained(model_id) + model = AutoModelForCausalLM.from_pretrained(model_id) + pipe = pipeline( + "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10 + ) + llm = HuggingFacePipeline(pipeline=pipe) + output = llm("Say foo:") + assert isinstance(output, str) diff --git a/tests/integration_tests/llms/test_nlpcloud.py b/tests/integration_tests/llms/test_nlpcloud.py index 9e4664e4..4c5ccca0 100644 --- a/tests/integration_tests/llms/test_nlpcloud.py +++ b/tests/integration_tests/llms/test_nlpcloud.py @@ -1,6 +1,10 @@ """Test NLPCloud API wrapper.""" +from pathlib import Path + +from langchain.llms.loading import load_llm from langchain.llms.nlpcloud import NLPCloud +from tests.integration_tests.llms.utils import assert_llm_equality def test_nlpcloud_call() -> None: @@ -8,3 +12,11 @@ def test_nlpcloud_call() -> None: llm = NLPCloud(max_length=10) output = llm("Say foo:") assert isinstance(output, str) + + +def test_saving_loading_llm(tmp_path: Path) -> None: + """Test saving/loading an NLPCloud LLM.""" + llm = NLPCloud(max_length=10) + llm.save(file_path=tmp_path / "nlpcloud.yaml") + loaded_llm = load_llm(tmp_path / "nlpcloud.yaml") + assert_llm_equality(llm, loaded_llm) diff --git a/tests/integration_tests/llms/test_openai.py b/tests/integration_tests/llms/test_openai.py index 4050e42e..9c03b92f 100644 --- a/tests/integration_tests/llms/test_openai.py +++ b/tests/integration_tests/llms/test_openai.py @@ -1,7 +1,11 @@ """Test OpenAI API wrapper.""" +from pathlib import Path +from typing import Generator + import pytest +from langchain.llms.loading import load_llm from langchain.llms.openai import OpenAI @@ -44,3 +48,29 @@ def test_openai_stop_error() -> None: llm = OpenAI(stop="3", temperature=0) with pytest.raises(ValueError): llm("write an ordered list of five items", stop=["\n"]) + + +def test_saving_loading_llm(tmp_path: Path) -> None: + """Test saving/loading an OpenAPI LLM.""" + llm = OpenAI(max_tokens=10) + llm.save(file_path=tmp_path / "openai.yaml") + loaded_llm = load_llm(tmp_path / "openai.yaml") + assert loaded_llm == llm + + +def test_openai_streaming() -> None: + """Test streaming tokens from OpenAI.""" + llm = OpenAI(max_tokens=10) + generator = llm.stream("I'm Pickle Rick") + + assert isinstance(generator, Generator) + + for token in generator: + assert isinstance(token["choices"][0]["text"], str) + + +def test_openai_streaming_error() -> None: + """Test error handling in stream.""" + llm = OpenAI(best_of=2) + with pytest.raises(ValueError): + llm.stream("I'm Pickle Rick") diff --git a/tests/integration_tests/llms/utils.py b/tests/integration_tests/llms/utils.py new file mode 100644 index 00000000..c05445d4 --- /dev/null +++ b/tests/integration_tests/llms/utils.py @@ -0,0 +1,16 @@ +"""Utils for LLM Tests.""" + +from langchain.llms.base import LLM + + +def assert_llm_equality(llm: LLM, loaded_llm: LLM) -> None: + """Assert LLM Equality for tests.""" + # Check that they are the same type. + assert type(llm) == type(loaded_llm) + # Client field can be session based, so hash is different despite + # all other values being the same, so just assess all other fields + for field in llm.__fields__.keys(): + if field != "client" and field != "pipeline": + val = getattr(llm, field) + new_val = getattr(loaded_llm, field) + assert new_val == val diff --git a/tests/unit_tests/agents/test_agent.py b/tests/unit_tests/agents/test_agent.py index 85853e70..48d0e61f 100644 --- a/tests/unit_tests/agents/test_agent.py +++ b/tests/unit_tests/agents/test_agent.py @@ -2,19 +2,19 @@ from typing import Any, List, Mapping, Optional +from pydantic import BaseModel + from langchain.agents import Tool, initialize_agent from langchain.llms.base import LLM -class FakeListLLM(LLM): +class FakeListLLM(LLM, BaseModel): """Fake LLM for testing that outputs elements of a list.""" - def __init__(self, responses: List[str]): - """Initialize with list of responses.""" - self.responses = responses - self.i = -1 + responses: List[str] + i: int = -1 - def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str: + def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Increment counter, and then return response in that index.""" self.i += 1 print(self.i) @@ -25,6 +25,11 @@ class FakeListLLM(LLM): def _identifying_params(self) -> Mapping[str, Any]: return {} + @property + def _llm_type(self) -> str: + """Return type of llm.""" + return "fake_list" + def test_agent_bad_action() -> None: """Test react chain when bad action given.""" @@ -33,7 +38,7 @@ def test_agent_bad_action() -> None: f"I'm turning evil\nAction: {bad_action_name}\nAction Input: misalignment", "Oh well\nAction: Final Answer\nAction Input: curses foiled again", ] - fake_llm = FakeListLLM(responses) + fake_llm = FakeListLLM(responses=responses) tools = [ Tool("Search", lambda x: x, "Useful for searching"), Tool("Lookup", lambda x: x, "Useful for looking up things in a table"), diff --git a/tests/unit_tests/agents/test_react.py b/tests/unit_tests/agents/test_react.py index 16cc26ab..cd937db8 100644 --- a/tests/unit_tests/agents/test_react.py +++ b/tests/unit_tests/agents/test_react.py @@ -2,6 +2,8 @@ from typing import Any, List, Mapping, Optional, Union +from pydantic import BaseModel + from langchain.agents.react.base import ReActChain, ReActDocstoreAgent from langchain.agents.tools import Tool from langchain.docstore.base import Docstore @@ -20,15 +22,18 @@ Made in 2022.""" _FAKE_PROMPT = PromptTemplate(input_variables=["input"], template="{input}") -class FakeListLLM(LLM): +class FakeListLLM(LLM, BaseModel): """Fake LLM for testing that outputs elements of a list.""" - def __init__(self, responses: List[str]): - """Initialize with list of responses.""" - self.responses = responses - self.i = -1 + responses: List[str] + i: int = -1 + + @property + def _llm_type(self) -> str: + """Return type of llm.""" + return "fake_list" - def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str: + def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Increment counter, and then return response in that index.""" self.i += 1 return self.responses[self.i] @@ -50,7 +55,7 @@ class FakeDocstore(Docstore): def test_predict_until_observation_normal() -> None: """Test predict_until_observation when observation is made normally.""" outputs = ["foo\nAction 1: Search[foo]"] - fake_llm = FakeListLLM(outputs) + fake_llm = FakeListLLM(responses=outputs) tools = [ Tool("Search", lambda x: x), Tool("Lookup", lambda x: x), @@ -65,7 +70,7 @@ def test_predict_until_observation_normal() -> None: def test_predict_until_observation_repeat() -> None: """Test when no action is generated initially.""" outputs = ["foo", " Search[foo]"] - fake_llm = FakeListLLM(outputs) + fake_llm = FakeListLLM(responses=outputs) tools = [ Tool("Search", lambda x: x), Tool("Lookup", lambda x: x), @@ -84,7 +89,7 @@ def test_react_chain() -> None: "I should probably lookup\nAction 2: Lookup[made]", "Ah okay now I know the answer\nAction 3: Finish[2022]", ] - fake_llm = FakeListLLM(responses) + fake_llm = FakeListLLM(responses=responses) react_chain = ReActChain(llm=fake_llm, docstore=FakeDocstore()) output = react_chain.run("when was langchain made") assert output == "2022" @@ -97,7 +102,7 @@ def test_react_chain_bad_action() -> None: f"I'm turning evil\nAction 1: {bad_action_name}[langchain]", "Oh well\nAction 2: Finish[curses foiled again]", ] - fake_llm = FakeListLLM(responses) + fake_llm = FakeListLLM(responses=responses) react_chain = ReActChain(llm=fake_llm, docstore=FakeDocstore()) output = react_chain.run("when was langchain made") assert output == "curses foiled again" diff --git a/tests/unit_tests/chains/test_base.py b/tests/unit_tests/chains/test_base.py index 2ab19c31..8fcfa918 100644 --- a/tests/unit_tests/chains/test_base.py +++ b/tests/unit_tests/chains/test_base.py @@ -11,11 +11,12 @@ class FakeChain(Chain, BaseModel): """Fake chain class for testing purposes.""" be_correct: bool = True + the_input_keys: List[str] = ["foo"] @property def input_keys(self) -> List[str]: - """Input key of foo.""" - return ["foo"] + """Input keys.""" + return self.the_input_keys @property def output_keys(self) -> List[str]: @@ -48,3 +49,17 @@ def test_correct_call() -> None: chain = FakeChain() output = chain({"foo": "bar"}) assert output == {"foo": "bar", "bar": "baz"} + + +def test_single_input_correct() -> None: + """Test passing single input works.""" + chain = FakeChain() + output = chain("bar") + assert output == {"foo": "bar", "bar": "baz"} + + +def test_single_input_error() -> None: + """Test passing single input errors as expected.""" + chain = FakeChain(the_input_keys=["foo", "bar"]) + with pytest.raises(ValueError): + chain("bar") diff --git a/tests/unit_tests/chains/test_combine_documents.py b/tests/unit_tests/chains/test_combine_documents.py new file mode 100644 index 00000000..81468f97 --- /dev/null +++ b/tests/unit_tests/chains/test_combine_documents.py @@ -0,0 +1,118 @@ +"""Test functionality related to combining documents.""" + +from typing import List + +import pytest + +from langchain.chains.combine_documents.map_reduce import ( + _collapse_docs, + _split_list_of_docs, +) +from langchain.docstore.document import Document + + +def _fake_docs_len_func(docs: List[Document]) -> int: + return len(_fake_combine_docs_func(docs)) + + +def _fake_combine_docs_func(docs: List[Document]) -> str: + return "".join([d.page_content for d in docs]) + + +def test__split_list_long_single_doc() -> None: + """Test splitting of a long single doc.""" + docs = [Document(page_content="foo" * 100)] + with pytest.raises(ValueError): + _split_list_of_docs(docs, _fake_docs_len_func, 100) + + +def test__split_list_long_pair_doc() -> None: + """Test splitting of a list with two medium docs.""" + docs = [Document(page_content="foo" * 30)] * 2 + with pytest.raises(ValueError): + _split_list_of_docs(docs, _fake_docs_len_func, 100) + + +def test__split_list_single_doc() -> None: + """Test splitting works with just a single doc.""" + docs = [Document(page_content="foo")] + doc_list = _split_list_of_docs(docs, _fake_docs_len_func, 100) + assert doc_list == [docs] + + +def test__split_list_double_doc() -> None: + """Test splitting works with just two docs.""" + docs = [Document(page_content="foo"), Document(page_content="bar")] + doc_list = _split_list_of_docs(docs, _fake_docs_len_func, 100) + assert doc_list == [docs] + + +def test__split_list_works_correctly() -> None: + """Test splitting works correctly.""" + docs = [ + Document(page_content="foo"), + Document(page_content="bar"), + Document(page_content="baz"), + Document(page_content="foo" * 2), + Document(page_content="bar"), + Document(page_content="baz"), + ] + doc_list = _split_list_of_docs(docs, _fake_docs_len_func, 10) + expected_result = [ + # Test a group of three. + [ + Document(page_content="foo"), + Document(page_content="bar"), + Document(page_content="baz"), + ], + # Test a group of two, where one is bigger. + [Document(page_content="foo" * 2), Document(page_content="bar")], + # Test no errors on last + [Document(page_content="baz")], + ] + assert doc_list == expected_result + + +def test__collapse_docs_no_metadata() -> None: + """Test collapse documents functionality when no metadata.""" + docs = [ + Document(page_content="foo"), + Document(page_content="bar"), + Document(page_content="baz"), + ] + output = _collapse_docs(docs, _fake_combine_docs_func) + expected_output = Document(page_content="foobarbaz") + assert output == expected_output + + +def test__collapse_docs_one_doc() -> None: + """Test collapse documents functionality when only one document present.""" + # Test with no metadata. + docs = [Document(page_content="foo")] + output = _collapse_docs(docs, _fake_combine_docs_func) + assert output == docs[0] + + # Test with metadata. + docs = [Document(page_content="foo", metadata={"source": "a"})] + output = _collapse_docs(docs, _fake_combine_docs_func) + assert output == docs[0] + + +def test__collapse_docs_metadata() -> None: + """Test collapse documents functionality when metadata exists.""" + metadata1 = {"source": "a", "foo": 2, "bar": "1", "extra1": "foo"} + metadata2 = {"source": "b", "foo": "3", "bar": 2, "extra2": "bar"} + docs = [ + Document(page_content="foo", metadata=metadata1), + Document(page_content="bar", metadata=metadata2), + ] + output = _collapse_docs(docs, _fake_combine_docs_func) + expected_metadata = { + "source": "a, b", + "foo": "2, 3", + "bar": "1, 2", + "extra1": "foo", + "extra2": "bar", + } + expected_output = Document(page_content="foobar", metadata=expected_metadata) + assert output == expected_output diff --git a/tests/unit_tests/chains/test_conversation.py b/tests/unit_tests/chains/test_conversation.py index 82653986..fd7eb55f 100644 --- a/tests/unit_tests/chains/test_conversation.py +++ b/tests/unit_tests/chains/test_conversation.py @@ -4,6 +4,7 @@ import pytest from langchain.chains.base import Memory from langchain.chains.conversation.base import ConversationChain from langchain.chains.conversation.memory import ( + ConversationalBufferWindowMemory, ConversationBufferMemory, ConversationSummaryMemory, ) @@ -66,3 +67,23 @@ def test_conversation_memory(memory: Memory) -> None: bad_outputs = {"foo": "bar", "foo1": "bar"} with pytest.raises(ValueError): memory.save_context(good_inputs, bad_outputs) + + +@pytest.mark.parametrize( + "memory", + [ + ConversationBufferMemory(memory_key="baz"), + ConversationSummaryMemory(llm=FakeLLM(), memory_key="baz"), + ConversationalBufferWindowMemory(memory_key="baz"), + ], +) +def test_clearing_conversation_memory(memory: Memory) -> None: + """Test clearing the conversation memory.""" + # This is a good input because the input is not the same as baz. + good_inputs = {"foo": "bar", "baz": "foo"} + # This is a good output because these is one variable. + good_outputs = {"bar": "foo"} + memory.save_context(good_inputs, good_outputs) + + memory.clear() + assert memory.load_memory_variables({}) == {"baz": ""} diff --git a/tests/unit_tests/chains/test_natbot.py b/tests/unit_tests/chains/test_natbot.py index bf4223b2..0beaa409 100644 --- a/tests/unit_tests/chains/test_natbot.py +++ b/tests/unit_tests/chains/test_natbot.py @@ -2,20 +2,27 @@ from typing import Any, List, Mapping, Optional +from pydantic import BaseModel + from langchain.chains.natbot.base import NatBotChain from langchain.llms.base import LLM -class FakeLLM(LLM): +class FakeLLM(LLM, BaseModel): """Fake LLM wrapper for testing purposes.""" - def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str: + def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Return `foo` if longer than 10000 words, else `bar`.""" if len(prompt) > 10000: return "foo" else: return "bar" + @property + def _llm_type(self) -> str: + """Return type of llm.""" + return "fake" + @property def _identifying_params(self) -> Mapping[str, Any]: return {} diff --git a/tests/unit_tests/llms/fake_llm.py b/tests/unit_tests/llms/fake_llm.py index 65fd8c79..dd8b3462 100644 --- a/tests/unit_tests/llms/fake_llm.py +++ b/tests/unit_tests/llms/fake_llm.py @@ -1,20 +1,25 @@ """Fake LLM wrapper for testing purposes.""" from typing import Any, List, Mapping, Optional +from pydantic import BaseModel + from langchain.llms.base import LLM -class FakeLLM(LLM): +class FakeLLM(LLM, BaseModel): """Fake LLM wrapper for testing purposes.""" - def __init__(self, queries: Optional[Mapping] = None): - """Initialize with optional lookup of queries.""" - self._queries = queries + queries: Optional[Mapping] = None + + @property + def _llm_type(self) -> str: + """Return type of llm.""" + return "fake" - def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str: + def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """First try to lookup in queries, else return 'foo' or 'bar'.""" - if self._queries is not None: - return self._queries[prompt] + if self.queries is not None: + return self.queries[prompt] if stop is None: return "foo" else: diff --git a/tests/unit_tests/llms/test_loading.py b/tests/unit_tests/llms/test_loading.py new file mode 100644 index 00000000..e478a0b0 --- /dev/null +++ b/tests/unit_tests/llms/test_loading.py @@ -0,0 +1,15 @@ +"""Test LLM saving and loading functions.""" +from pathlib import Path +from unittest.mock import patch + +from langchain.llms.loading import load_llm +from tests.unit_tests.llms.fake_llm import FakeLLM + + +@patch("langchain.llms.loading.type_to_cls_dict", {"fake": FakeLLM}) +def test_saving_loading_round_trip(tmp_path: Path) -> None: + """Test saving/loading a Fake LLM.""" + fake_llm = FakeLLM() + fake_llm.save(file_path=tmp_path / "fake_llm.yaml") + loaded_llm = load_llm(tmp_path / "fake_llm.yaml") + assert loaded_llm == fake_llm diff --git a/tests/unit_tests/llms/test_utils.py b/tests/unit_tests/llms/test_utils.py index 5685f650..77cff607 100644 --- a/tests/unit_tests/llms/test_utils.py +++ b/tests/unit_tests/llms/test_utils.py @@ -10,6 +10,9 @@ def test_enforce_stop_tokens() -> None: text = "foo bar baz" output = enforce_stop_tokens(text, ["moo", "baz", "bar"]) assert output == "foo " + text = "foo bar baz" + output = enforce_stop_tokens(text, ["moo", "bar"]) + assert output == "foo " def test_enforce_stop_tokens_none() -> None: