Feature: linkcheck-action (#534) (#542)

- Add support for local build and linkchecking of docs
- Add GitHub Action to automatically check links before prior to
publication
- Minor reformat of Contributing readme
- Fix existing broken links

Co-authored-by: Hunter Gerlach <hunter@huntergerlach.com>

Co-authored-by: Hunter Gerlach <HunterGerlach@users.noreply.github.com>
Co-authored-by: Hunter Gerlach <hunter@huntergerlach.com>
harrison/pinecone-try-except
Harrison Chase 1 year ago committed by GitHub
parent 5aefc2b7ce
commit 9753bccc71
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,36 @@
name: linkcheck
on:
push:
branches: [master]
pull_request:
env:
POETRY_VERSION: "1.3.1"
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
python-version:
- "3.11"
steps:
- uses: actions/checkout@v3
- name: Install poetry
run: |
pipx install poetry==$POETRY_VERSION
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
cache: poetry
- name: Install dependencies
run: |
poetry install --with docs
- name: Build the docs
run: |
make docs_build
- name: Analyzing the docs with linkcheck
run: |
make docs_linkcheck

1
.gitignore vendored

@ -107,6 +107,7 @@ celerybeat.pid
# Environments
.env
.venv
.venvs
env/
venv/
ENV/

@ -55,9 +55,7 @@ even patch releases may contain [non-backwards-compatible changes](https://semve
If your contribution has made its way into a release, we will want to give you credit on Twitter (only if you want though)!
If you have a Twitter account you would like us to mention, please let us know in the PR or in another manner.
## 🤖Developer Setup
### 🚀Quick Start
## 🚀Quick Start
This project uses [Poetry](https://python-poetry.org/) as a dependency manager. Check out Poetry's [documentation on how to install it](https://python-poetry.org/docs/#installation) on your system before proceeding.
@ -77,9 +75,9 @@ This will install all requirements for running the package, examples, linting, f
Now, you should be able to run the common tasks in the following section.
### ✅Common Tasks
## ✅Common Tasks
#### Code Formatting
### Code Formatting
Formatting for this project is done via a combination of [Black](https://black.readthedocs.io/en/stable/) and [isort](https://pycqa.github.io/isort/).
@ -89,7 +87,7 @@ To run formatting for this project:
make format
```
#### Linting
### Linting
Linting for this project is done via a combination of [Black](https://black.readthedocs.io/en/stable/), [isort](https://pycqa.github.io/isort/), [flake8](https://flake8.pycqa.org/en/latest/), and [mypy](http://mypy-lang.org/).
@ -101,7 +99,7 @@ make lint
We recognize linting can be annoying - if you do not want to do it, please contact a project maintainer, and they can help you with it. We do not want this to be a blocker for good code getting contributed.
#### Coverage
### Coverage
Code coverage (i.e. the amount of code that is covered by unit tests) helps identify areas of the code that are potentially more or less brittle.
@ -111,7 +109,7 @@ To get a report of current coverage, run the following:
make coverage
```
#### Testing
### Testing
Unit tests cover modular logic that does not require calls to outside APIs.
@ -133,7 +131,7 @@ make integration_tests
If you add support for a new external API, please add a new integration test.
#### Adding a Jupyter Notebook
### Adding a Jupyter Notebook
If you are adding a Jupyter notebook example, you'll want to install the optional `dev` dependencies.
@ -151,10 +149,32 @@ poetry run jupyter notebook
When you run `poetry install`, the `langchain` package is installed as editable in the virtualenv, so your new logic can be imported into the notebook.
#### Contribute Documentation
## Documentation
### Contribute Documentation
Docs are largely autogenerated by [sphinx](https://www.sphinx-doc.org/en/master/) from the code.
For that reason, we ask that you add good documentation to all classes and methods.
Similar to linting, we recognize documentation can be annoying. If you do not want to do it, please contact a project maintainer, and they can help you with it. We do not want this to be a blocker for good code getting contributed.
### Build Documentation Locally
Before building the documentation, it is always a good idea to clean the build directory:
```bash
make docs_clean
```
Next, you can run the linkchecker to make sure all links are valid:
```bash
make docs_linkcheck
```
Finally, you can build the documentation as outlined below:
```bash
make docs_build
```

@ -6,6 +6,15 @@ coverage:
--cov-report xml \
--cov-report term-missing:skip-covered
docs_build:
cd docs && poetry run make html
docs_clean:
cd docs && poetry run make clean
docs_linkcheck:
poetry run linkchecker docs/_build/html/index.html
format:
poetry run black .
poetry run isort .

@ -3,7 +3,7 @@
# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS ?=
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SPHINXAUTOBUILD ?= sphinx-autobuild
SOURCEDIR = .

@ -48,8 +48,7 @@ extensions = [
"sphinx_panels",
"IPython.sphinxext.ipython_console_highlighting",
]
source_suffix = [".rst", ".md"]
source_suffix = [".ipynb", ".html", ".md", ".rst"]
autodoc_pydantic_model_show_json = False
autodoc_pydantic_field_list_validators = False

@ -1,50 +1,55 @@
# Glossary
This is a collection of terminology commonly used when developing LLM applications.
It contains reference to external papers or sources where the concept was first introduced,
It contains reference to external papers or sources where the concept was first introduced,
as well as to places in LangChain where the concept is used.
## Chain of Thought Prompting
A prompting technique used to encourage the model to generate a series of intermediate reasoning steps.
A prompting technique used to encourage the model to generate a series of intermediate reasoning steps.
A less formal way to induce this behavior is to include “Lets think step-by-step” in the prompt.
Resources:
- [Chain-of-Thought Paper](https://arxiv.org/pdf/2201.11903.pdf)
- [Step-by-Step Paper](https://arxiv.org/abs/2112.00114)
## Action Plan Generation
A prompt usage that uses a language model to generate actions to take.
A prompt usage that uses a language model to generate actions to take.
The results of these actions can then be fed back into the language model to generate a subsequent action.
Resources:
- [WebGPT Paper](https://arxiv.org/pdf/2112.09332.pdf)
- [SayCan Paper](https://say-can.github.io/assets/palm_saycan.pdf)
## ReAct Prompting
A prompting technique that combines Chain-of-Thought prompting with action plan generation.
This induces the to model to think about what action to take, then take it.
A prompting technique that combines Chain-of-Thought prompting with action plan generation.
This induces the to model to think about what action to take, then take it.
Resources:
- [Paper](https://arxiv.org/pdf/2210.03629.pdf)
- [LangChain Example](https://github.com/hwchase17/langchain/blob/master/docs/examples/agents/react.ipynb)
- [LangChain Example](./modules/agents/implementations/react.ipynb)
## Self-ask
A prompting method that builds on top of chain-of-thought prompting.
In this method, the model explicitly asks itself follow-up questions, which are then answered by an external search engine.
A prompting method that builds on top of chain-of-thought prompting.
In this method, the model explicitly asks itself follow-up questions, which are then answered by an external search engine.
Resources:
- [Paper](https://ofir.io/self-ask.pdf)
- [LangChain Example](https://github.com/hwchase17/langchain/blob/master/docs/examples/agents/self_ask_with_search.ipynb)
- [LangChain Example](./modules/agents/implementations/self_ask_with_search.ipynb)
## Prompt Chaining
Combining multiple LLM calls together, with the output of one-step being the input to the next.
Combining multiple LLM calls together, with the output of one-step being the input to the next.
Resources:
Resources:
- [PromptChainer Paper](https://arxiv.org/pdf/2203.06566.pdf)
- [Language Model Cascades](https://arxiv.org/abs/2207.10342)
- [ICE Primer Book](https://primer.ought.org/)
@ -52,25 +57,28 @@ Resources:
## Memetic Proxy
Encouraging the LLM to respond in a certain way framing the discussion in a context that the model knows of and that will result in that type of response. For example, as a conversation between a student and a teacher.
Encouraging the LLM to respond in a certain way framing the discussion in a context that the model knows of and that will result in that type of response. For example, as a conversation between a student and a teacher.
Resources:
- [Paper](https://arxiv.org/pdf/2102.07350.pdf)
## Self Consistency
A decoding strategy that samples a diverse set of reasoning paths and then selects the most consistent answer.
Is most effective when combined with Chain-of-thought prompting.
A decoding strategy that samples a diverse set of reasoning paths and then selects the most consistent answer.
Is most effective when combined with Chain-of-thought prompting.
Resources:
- [Paper](https://arxiv.org/pdf/2203.11171.pdf)
## Inception
Also called “First Person Instruction”.
Encouraging the model to think a certain way by including the start of the models response in the prompt.
Also called “First Person Instruction”.
Encouraging the model to think a certain way by including the start of the models response in the prompt.
Resources:
- [Example](https://twitter.com/goodside/status/1583262455207460865?s=20&t=8Hz7XBnK1OF8siQrxxCIGQ)
## MemPrompt
@ -78,4 +86,5 @@ Resources:
MemPrompt maintains a memory of errors and user feedback, and uses them to prevent repetition of mistakes.
Resources:
- [Paper](https://memprompt.com/)

@ -14,7 +14,7 @@ Getting Started
Checkout the below guide for a walkthrough of how to get started using LangChain to create an Language Model application.
- `Getting Started Documentation <getting_started/getting_started.html>`_
- `Getting Started Documentation <./getting_started/getting_started.html>`_
.. toctree::
:maxdepth: 1
@ -32,17 +32,17 @@ For each module we provide some examples to get started, how-to guides, referenc
These modules are, in increasing order of complexity:
- `Prompts <modules/prompts.html>`_: This includes prompt management, prompt optimization, and prompt serialization.
- `Prompts <./modules/prompts.html>`_: This includes prompt management, prompt optimization, and prompt serialization.
- `LLMs <modules/llms.html>`_: This includes a generic interface for all LLMs, and common utilities for working with LLMs.
- `LLMs <./modules/llms.html>`_: This includes a generic interface for all LLMs, and common utilities for working with LLMs.
- `Utils <modules/utils.html>`_: Language models are often more powerful when interacting with other sources of knowledge or computation. This can include Python REPLs, embeddings, search engines, and more. LangChain provides a large collection of common utils to use in your application.
- `Utils <./modules/utils.html>`_: Language models are often more powerful when interacting with other sources of knowledge or computation. This can include Python REPLs, embeddings, search engines, and more. LangChain provides a large collection of common utils to use in your application.
- `Chains <modules/chains.html>`_: Chains go beyond just a single LLM call, and are sequences of calls (whether to an LLM or a different utility). LangChain provides a standard interface for chains, lots of integrations with other tools, and end-to-end chains for common applications.
- `Chains <./modules/chains.html>`_: Chains go beyond just a single LLM call, and are sequences of calls (whether to an LLM or a different utility). LangChain provides a standard interface for chains, lots of integrations with other tools, and end-to-end chains for common applications.
- `Agents <modules/agents.html>`_: Agents involve an LLM making decisions about which Actions to take, taking that Action, seeing an Observation, and repeating that until done. LangChain provides a standard interface for agents, a selection of agents to choose from, and examples of end to end agents.
- `Agents <./modules/agents.html>`_: Agents involve an LLM making decisions about which Actions to take, taking that Action, seeing an Observation, and repeating that until done. LangChain provides a standard interface for agents, a selection of agents to choose from, and examples of end to end agents.
- `Memory <modules/memory.html>`_: Memory is the concept of persisting state between calls of a chain/agent. LangChain provides a standard interface for memory, a collection of memory implementations, and examples of chains/agents that use memory.
- `Memory <./modules/memory.html>`_: Memory is the concept of persisting state between calls of a chain/agent. LangChain provides a standard interface for memory, a collection of memory implementations, and examples of chains/agents that use memory.
.. toctree::
@ -51,33 +51,33 @@ These modules are, in increasing order of complexity:
:name: modules
:hidden:
modules/prompts.md
modules/llms.md
modules/utils.md
modules/chains.md
modules/agents.md
modules/memory.md
./modules/prompts.md
./modules/llms.md
./modules/utils.md
./modules/chains.md
./modules/agents.md
./modules/memory.md
Use Cases
----------
The above modules can be used in a variety of ways. LangChain also provides guidance and assistance in this. Below are some of the common use cases LangChain supports.
- `Agents <use_cases/agents.html>`_: Agents are systems that use a language model to interact with other tools. These can be used to do more grounded question/answering, interact with APIs, or even take actions.
- `Agents <./use_cases/agents.html>`_: Agents are systems that use a language model to interact with other tools. These can be used to do more grounded question/answering, interact with APIs, or even take actions.
- `Chatbots <use_cases/chatbots.html>`_: Since language models are good at producing text, that makes them ideal for creating chatbots.
- `Chatbots <./use_cases/chatbots.html>`_: Since language models are good at producing text, that makes them ideal for creating chatbots.
- `Data Augmented Generation <use_cases/combine_docs.html>`_: Data Augmented Generation involves specific types of chains that first interact with an external datasource to fetch data to use in the generation step. Examples of this include summarization of long pieces of text and question/answering over specific data sources.
- `Data Augmented Generation <./use_cases/combine_docs.html>`_: Data Augmented Generation involves specific types of chains that first interact with an external datasource to fetch data to use in the generation step. Examples of this include summarization of long pieces of text and question/answering over specific data sources.
- `Question Answering <use_cases/question_answering.html>`_: Answering questions over specific documents, only utilizing the information in those documents to construct an answer. A type of Data Augmented Generation.
- `Question Answering <./use_cases/question_answering.html>`_: Answering questions over specific documents, only utilizing the information in those documents to construct an answer. A type of Data Augmented Generation.
- `Summarization <use_cases/summarization.html>`_: Summarizing longer documents into shorter, more condensed chunks of information. A type of Data Augmented Generation.
- `Summarization <./use_cases/summarization.html>`_: Summarizing longer documents into shorter, more condensed chunks of information. A type of Data Augmented Generation.
- `Evaluation <use_cases/evaluation.html>`_: Generative models are notoriously hard to evaluate with traditional metrics. One new way of evaluating them is using language models themselves to do the evaluation. LangChain provides some prompts/chains for assisting in this.
- `Evaluation <./use_cases/evaluation.html>`_: Generative models are notoriously hard to evaluate with traditional metrics. One new way of evaluating them is using language models themselves to do the evaluation. LangChain provides some prompts/chains for assisting in this.
- `Generate similar examples <use_cases/generate_examples.html>`_: Generating similar examples to a given input. This is a common use case for many applications, and LangChain provides some prompts/chains for assisting in this.
- `Generate similar examples <./use_cases/generate_examples.html>`_: Generating similar examples to a given input. This is a common use case for many applications, and LangChain provides some prompts/chains for assisting in this.
- `Compare models <model_laboratory.html>`_: Experimenting with different prompts, models, and chains is a big part of developing the best possible application. The ModelLaboratory makes it easy to do so.
- `Compare models <./use_cases/model_laboratory.html>`_: Experimenting with different prompts, models, and chains is a big part of developing the best possible application. The ModelLaboratory makes it easy to do so.
@ -87,14 +87,14 @@ The above modules can be used in a variety of ways. LangChain also provides guid
:name: use_cases
:hidden:
use_cases/agents.md
use_cases/chatbots.md
use_cases/generate_examples.ipynb
use_cases/combine_docs.md
use_cases/question_answering.md
use_cases/summarization.md
use_cases/evaluation.rst
use_cases/model_laboratory.ipynb
./use_cases/agents.md
./use_cases/chatbots.md
./use_cases/generate_examples.ipynb
./use_cases/combine_docs.md
./use_cases/question_answering.md
./use_cases/summarization.md
./use_cases/evaluation.rst
./use_cases/model_laboratory.ipynb
Reference Docs
@ -103,16 +103,16 @@ Reference Docs
All of LangChain's reference documentation, in one place. Full documentation on all methods, classes, installation methods, and integration setups for LangChain.
- `Reference Documentation <reference.html>`_
- `Reference Documentation <./reference.html>`_
.. toctree::
:maxdepth: 1
:caption: Reference
:name: reference
:hidden:
reference/installation.md
reference/integrations.md
reference.rst
./reference/installation.md
./reference/integrations.md
./reference.rst
LangChain Ecosystem
@ -120,7 +120,7 @@ LangChain Ecosystem
Guides for how other companies/products can be used with LangChain
- `LangChain Ecosystem <ecosystem.html>`_
- `LangChain Ecosystem <./ecosystem.html>`_
.. toctree::
:maxdepth: 1
@ -129,7 +129,7 @@ Guides for how other companies/products can be used with LangChain
:name: ecosystem
:hidden:
ecosystem.rst
./ecosystem.rst
Additional Resources
@ -137,9 +137,9 @@ Additional Resources
Additional collection of resources we think may be useful as you develop your application!
- `Glossary <glossary.html>`_: A glossary of all related terms, papers, methods, etc. Whether implemented in LangChain or not!
- `Glossary <./glossary.html>`_: A glossary of all related terms, papers, methods, etc. Whether implemented in LangChain or not!
- `Gallery <gallery.html>`_: A collection of our favorite projects that use LangChain. Useful for finding inspiration or seeing how things were done in other applications.
- `Gallery <./gallery.html>`_: A collection of our favorite projects that use LangChain. Useful for finding inspiration or seeing how things were done in other applications.
- `Discord <https://discord.gg/6adMQxSpJS>`_: Join us on our Discord to discuss all things LangChain!
@ -150,5 +150,5 @@ Additional collection of resources we think may be useful as you develop your ap
:name: resources
:hidden:
glossary.md
gallery.rst
./glossary.md
./gallery.rst

@ -8,13 +8,13 @@ Depending on the user input, the agent can then decide which, if any, of these t
The following sections of documentation are provided:
- `Getting Started <agents/getting_started.html>`_: A notebook to help you get started working with agents as quickly as possible.
- `Getting Started <./agents/getting_started.html>`_: A notebook to help you get started working with agents as quickly as possible.
- `Key Concepts <agents/key_concepts.html>`_: A conceptual guide going over the various concepts related to agents.
- `Key Concepts <./agents/key_concepts.html>`_: A conceptual guide going over the various concepts related to agents.
- `How-To Guides <agents/how_to_guides.html>`_: A collection of how-to guides. These highlight how to integrate various types of tools, how to work with different types of agent, and how to customize agents.
- `How-To Guides <./agents/how_to_guides.html>`_: A collection of how-to guides. These highlight how to integrate various types of tools, how to work with different types of agent, and how to customize agents.
- `Reference </reference/modules/agents.html>`_: API reference documentation for all Agent classes.
- `Reference <../reference/modules/agents.html>`_: API reference documentation for all Agent classes.
@ -24,7 +24,7 @@ The following sections of documentation are provided:
:name: Agents
:hidden:
agents/getting_started.ipynb
agents/key_concepts.md
agents/how_to_guides.rst
Reference</reference/modules/agents.rst>
./agents/getting_started.ipynb
./agents/key_concepts.md
./agents/how_to_guides.rst
Reference<../reference/modules/agents.rst>

@ -3,24 +3,24 @@ How-To Guides
The first category of how-to guides here cover specific parts of working with agents.
`Custom Tools <examples/custom_tools.html>`_: How to create custom tools that an agent can use.
`Custom Tools <./examples/custom_tools.html>`_: How to create custom tools that an agent can use.
`Intermediate Steps <examples/intermediate_steps.html>`_: How to access and use intermediate steps to get more visibility into the internals of an agent.
`Intermediate Steps <./examples/intermediate_steps.html>`_: How to access and use intermediate steps to get more visibility into the internals of an agent.
`Custom Agent <examples/custom_agent.html>`_: How to create a custom agent (specifically, a custom LLM + prompt to drive that agent).
`Custom Agent <./examples/custom_agent.html>`_: How to create a custom agent (specifically, a custom LLM + prompt to drive that agent).
`Multi Input Tools <examples/multi_input_tool.html>`_: How to use a tool that requires multiple inputs with an agent.
`Multi Input Tools <./examples/multi_input_tool.html>`_: How to use a tool that requires multiple inputs with an agent.
`Search Tools <examples/search_tools.html>`_: How to use the different type of search tools that LangChain supports.
`Search Tools <./examples/search_tools.html>`_: How to use the different type of search tools that LangChain supports.
`Max Iterations <examples/max_iterations.html>`_: How to restrict an agent to a certain number of iterations.
`Max Iterations <./examples/max_iterations.html>`_: How to restrict an agent to a certain number of iterations.
The next set of examples are all end-to-end agents for specific applications.
In all examples there is an Agent with a particular set of tools.
- Tools: A tool can be anything that takes in a string and returns a string. This means that you can use both the primitives AND the chains found in `this <chains.html>`_ documentation. LangChain also provides a list of easily loadable tools. For detailed information on those, please see `this documentation <../explanation/tools.html>`_
- Agents: An agent uses an LLMChain to determine which tools to use. For a list of all available agent types, see `here <../explanation/agents.html>`_.
- Tools: A tool can be anything that takes in a string and returns a string. This means that you can use both the primitives AND the chains found in `this <../chains.html>`_ documentation. LangChain also provides a list of easily loadable tools. For detailed information on those, please see `this documentation <./tools.html>`_
- Agents: An agent uses an LLMChain to determine which tools to use. For a list of all available agent types, see `here <./agents.html>`_.
**MRKL**
@ -28,21 +28,21 @@ In all examples there is an Agent with a particular set of tools.
- **Agent used**: `zero-shot-react-description`
- `Paper <https://arxiv.org/pdf/2205.00445.pdf>`_
- **Note**: This is the most general purpose example, so if you are looking to use an agent with arbitrary tools, please start here.
- `Example Notebook <implementations/mrkl.html>`_
- `Example Notebook <./implementations/mrkl.html>`_
**Self-Ask-With-Search**
- **Tools used**: Search
- **Agent used**: `self-ask-with-search`
- `Paper <https://ofir.io/self-ask.pdf>`_
- `Example Notebook <implementations/self_ask_with_search.html>`_
- `Example Notebook <./implementations/self_ask_with_search.html>`_
**ReAct**
- **Tools used**: Wikipedia Docstore
- **Agent used**: `react-docstore`
- `Paper <https://arxiv.org/pdf/2210.03629.pdf>`_
- `Example Notebook <implementations/react.html>`_
- `Example Notebook <./implementations/react.html>`_
@ -51,11 +51,11 @@ In all examples there is an Agent with a particular set of tools.
:glob:
:hidden:
examples/*
./examples/*
.. toctree::
:maxdepth: 1
:glob:
:hidden:
implementations/*
./implementations/*

@ -7,13 +7,13 @@ LangChain provides a standard interface for Chains, as well as some common imple
The following sections of documentation are provided:
- `Getting Started <chains/getting_started.html>`_: A getting started guide for chains, to get you up and running quickly.
- `Getting Started <./chains/getting_started.html>`_: A getting started guide for chains, to get you up and running quickly.
- `Key Concepts <chains/key_concepts.html>`_: A conceptual guide going over the various concepts related to chains.
- `Key Concepts <./chains/key_concepts.html>`_: A conceptual guide going over the various concepts related to chains.
- `How-To Guides <chains/how_to_guides.html>`_: A collection of how-to guides. These highlight how to use various types of chains.
- `How-To Guides <./chains/how_to_guides.html>`_: A collection of how-to guides. These highlight how to use various types of chains.
- `Reference </reference/chains.html>`_: API reference documentation for all Chain classes.
- `Reference <../reference/modules/chains.html>`_: API reference documentation for all Chain classes.
@ -23,7 +23,7 @@ The following sections of documentation are provided:
:name: Chains
:hidden:
chains/getting_started.ipynb
chains/how_to_guides.rst
chains/key_concepts.rst
Reference</reference/modules/chains.rst>
./chains/getting_started.ipynb
./chains/how_to_guides.rst
./chains/key_concepts.rst
Reference<../reference/modules/chains.rst>

@ -5,15 +5,15 @@ A chain is made up of links, which can be either primitives or other chains.
Primitives can be either `prompts <../prompts.html>`_, `llms <../llms.html>`_, `utils <../utils.html>`_, or other chains.
The examples here are all end-to-end chains for working with documents.
`Question Answering <combine_docs_examples/question_answering.html>`_: A walkthrough of how to use LangChain for question answering over specific documents.
`Question Answering <./combine_docs_examples/question_answering.html>`_: A walkthrough of how to use LangChain for question answering over specific documents.
`Question Answering with Sources <combine_docs_examples/qa_with_sources.html>`_: A walkthrough of how to use LangChain for question answering (with sources) over specific documents.
`Question Answering with Sources <./combine_docs_examples/qa_with_sources.html>`_: A walkthrough of how to use LangChain for question answering (with sources) over specific documents.
`Summarization <combine_docs_examples/summarize.html>`_: A walkthrough of how to use LangChain for summarization over specific documents.
`Summarization <./combine_docs_examples/summarize.html>`_: A walkthrough of how to use LangChain for summarization over specific documents.
`Vector DB Question Answering <combine_docs_examples/vector_db_qa.html>`_: A walkthrough of how to use LangChain for question answering over a vector database.
`Vector DB Question Answering <./combine_docs_examples/vector_db_qa.html>`_: A walkthrough of how to use LangChain for question answering over a vector database.
`Vector DB Question Answering with Sources <combine_docs_examples/vector_db_qa_with_sources.html>`_: A walkthrough of how to use LangChain for question answering (with sources) over a vector database.
`Vector DB Question Answering with Sources <./combine_docs_examples/vector_db_qa_with_sources.html>`_: A walkthrough of how to use LangChain for question answering (with sources) over a vector database.
.. toctree::
@ -23,4 +23,4 @@ The examples here are all end-to-end chains for working with documents.
:name: combine_docs
:hidden:
combine_docs_examples/*
./combine_docs_examples/*

@ -9,19 +9,19 @@ The examples here are all generic end-to-end chains that are meant to be used to
- **Links Used**: PromptTemplate, LLM
- **Notes**: This chain is the simplest chain, and is widely used by almost every other chain. This chain takes arbitrary user input, creates a prompt with it from the PromptTemplate, passes that to the LLM, and then returns the output of the LLM as the final output.
- `Example Notebook <generic/llm_chain.html>`_
- `Example Notebook <./generic/llm_chain.html>`_
**Transformation Chain**
- **Links Used**: TransformationChain
- **Notes**: This notebook shows how to use the Transformation Chain, which takes an arbitrary python function and applies it to inputs/outputs of other chains.
- `Example Notebook <generic/transformation.html>`_
- `Example Notebook <./generic/transformation.html>`_
**Sequential Chain**
- **Links Used**: Sequential
- **Notes**: This notebook shows how to combine calling multiple other chains in sequence.
- `Example Notebook <generic/sequential_chains.html>`_
- `Example Notebook <./generic/sequential_chains.html>`_
.. toctree::
:maxdepth: 1
@ -30,4 +30,4 @@ The examples here are all generic end-to-end chains that are meant to be used to
:name: generic
:hidden:
generic/*
./generic/*

@ -6,15 +6,15 @@ Primitives can be either `prompts <../prompts.html>`_, `llms <../llms.html>`_, `
The examples here are all end-to-end chains for specific applications.
They are broken up into three categories:
1. `Generic Chains <generic_how_to.html>`_: Generic chains, that are meant to help build other chains rather than serve a particular purpose.
2. `CombineDocuments Chains <combine_docs_how_to.html>`_: Chains aimed at making it easy to work with documents (question answering, summarization, etc).
3. `Utility Chains <utility_how_to.html>`_: Chains consisting of an LLMChain interacting with a specific util.
1. `Generic Chains <./generic_how_to.html>`_: Generic chains, that are meant to help build other chains rather than serve a particular purpose.
2. `CombineDocuments Chains <./combine_docs_how_to.html>`_: Chains aimed at making it easy to work with documents (question answering, summarization, etc).
3. `Utility Chains <./utility_how_to.html>`_: Chains consisting of an LLMChain interacting with a specific util.
.. toctree::
:maxdepth: 1
:glob:
:hidden:
generic_how_to.rst
combine_docs_how_to.rst
utility_how_to.rst
./generic_how_to.rst
./combine_docs_how_to.rst
./utility_how_to.rst

@ -9,44 +9,44 @@ The examples here are all end-to-end chains for specific applications, focused o
- **Links Used**: Python REPL, LLMChain
- **Notes**: This chain takes user input (a math question), uses an LLMChain to convert it to python code snippet to run in the Python REPL, and then returns that as the result.
- `Example Notebook <examples/llm_math.html>`_
- `Example Notebook <./examples/llm_math.html>`_
**PAL**
- **Links Used**: Python REPL, LLMChain
- **Notes**: This chain takes user input (a reasoning question), uses an LLMChain to convert it to python code snippet to run in the Python REPL, and then returns that as the result.
- `Paper <https://arxiv.org/abs/2211.10435>`_
- `Example Notebook <examples/pal.html>`_
- `Example Notebook <./examples/pal.html>`_
**SQLDatabase Chain**
- **Links Used**: SQLDatabase, LLMChain
- **Notes**: This chain takes user input (a question), uses a first LLM chain to construct a SQL query to run against the SQL database, and then uses another LLMChain to take the results of that query and use it to answer the original question.
- `Example Notebook <examples/sqlite.html>`_
- `Example Notebook <./examples/sqlite.html>`_
**LLMBash Chain**
- **Links Used**: BashProcess, LLMChain
- **Notes**: This chain takes user input (a question), uses an LLM chain to convert it to a bash command to run in the terminal, and then returns that as the result.
- `Example Notebook <examples/llm_bash.html>`_
- `Example Notebook <./examples/llm_bash.html>`_
**LLMChecker Chain**
- **Links Used**: LLMChain
- **Notes**: This chain takes user input (a question), uses an LLM chain to answer that question, and then uses other LLMChains to self-check that answer.
- `Example Notebook <examples/llm_checker.html>`_
- `Example Notebook <./examples/llm_checker.html>`_
**LLMRequests Chain**
- **Links Used**: Requests, LLMChain
- **Notes**: This chain takes a URL and other inputs, uses Requests to get the data at that URL, and then passes that along with the other inputs into an LLMChain to generate a response. The example included shows how to ask a question to Google - it firsts constructs a Google url, then fetches the data there, then passes that data + the original question into an LLMChain to get an answer.
- `Example Notebook <examples/llm_requests.html>`_
- `Example Notebook <./examples/llm_requests.html>`_
**Moderation Chain**
- **Links Used**: LLMChain, ModerationChain
- **Notes**: This chain shows how to use OpenAI's content moderation endpoint to screen output, and shows how to connect this to an LLMChain.
- `Example Notebook <examples/moderation.html>`_
- `Example Notebook <./examples/moderation.html>`_
.. toctree::
@ -56,4 +56,4 @@ The examples here are all end-to-end chains for specific applications, focused o
:name: generic
:hidden:
examples/*
./examples/*

@ -7,13 +7,13 @@ you can interact with a variety of LLMs.
The following sections of documentation are provided:
- `Getting Started <llms/getting_started.html>`_: An overview of all the functionality the LangChain LLM class provides.
- `Getting Started <./llms/getting_started.html>`_: An overview of all the functionality the LangChain LLM class provides.
- `Key Concepts <llms/key_concepts.html>`_: A conceptual guide going over the various concepts related to LLMs.
- `Key Concepts <./llms/key_concepts.html>`_: A conceptual guide going over the various concepts related to LLMs.
- `How-To Guides <llms/how_to_guides.html>`_: A collection of how-to guides. These highlight how to accomplish various objectives with our LLM class, as well as how to integrate with various LLM providers.
- `How-To Guides <./llms/how_to_guides.html>`_: A collection of how-to guides. These highlight how to accomplish various objectives with our LLM class, as well as how to integrate with various LLM providers.
- `Reference </reference/modules/llms.html>`_: API reference documentation for all LLM classes.
- `Reference <../reference/modules/llms.html>`_: API reference documentation for all LLM classes.
.. toctree::
@ -21,7 +21,7 @@ The following sections of documentation are provided:
:name: LLMs
:hidden:
llms/key_concepts.md
llms/getting_started.ipynb
llms/how_to_guides.rst
Reference</reference/modules/llms.rst>
./llms/key_concepts.md
./llms/getting_started.ipynb
./llms/how_to_guides.rst
Reference<../reference/modules/llms.rst>

@ -3,11 +3,11 @@ Generic Functionality
The examples here all address certain "how-to" guides for working with LLMs.
`LLM Serialization <examples/llm_serialization.html>`_: A walkthrough of how to serialize LLMs to and from disk.
`LLM Serialization <./examples/llm_serialization.html>`_: A walkthrough of how to serialize LLMs to and from disk.
`LLM Caching <examples/llm_caching.html>`_: Covers different types of caches, and how to use a cache to save results of LLM calls.
`LLM Caching <./examples/llm_caching.html>`_: Covers different types of caches, and how to use a cache to save results of LLM calls.
`Custom LLM <examples/custom_llm.html>`_: How to create and use a custom LLM class, in case you have an LLM not from one of the standard providers (including one that you host yourself).
`Custom LLM <./examples/custom_llm.html>`_: How to create and use a custom LLM class, in case you have an LLM not from one of the standard providers (including one that you host yourself).
.. toctree::
@ -17,4 +17,4 @@ The examples here all address certain "how-to" guides for working with LLMs.
:name: Generic Functionality
:hidden:
examples/*
./examples/*

@ -5,13 +5,13 @@ The examples here all address certain "how-to" guides for working with LLMs.
They are split into two categories:
1. `Generic Functionality <generic_how_to.html>`_: Covering generic functionality all LLMs should have.
2. `Integrations <integrations.html>`_: Covering integrations with various LLM providers.
1. `Generic Functionality <./generic_how_to.html>`_: Covering generic functionality all LLMs should have.
2. `Integrations <./integrations.html>`_: Covering integrations with various LLM providers.
.. toctree::
:maxdepth: 1
:glob:
:hidden:
generic_how_to.rst
integrations.rst
./generic_how_to.rst
./integrations.rst

@ -3,11 +3,11 @@ Integrations
The examples here are all "how-to" guides for how to integrate with various LLM providers.
`Huggingface Hub <examples/huggingface_hub.html>`_: Covers how to connect to LLMs hosted on HuggingFace Hub.
`Huggingface Hub <./integrations/huggingface_hub.html>`_: Covers how to connect to LLMs hosted on HuggingFace Hub.
`Azure OpenAI <examples/azure_openai_example.html>`_: Covers how to connect to Azure-hosted OpenAI Models.
`Azure OpenAI <./integrations/azure_openai_example.html>`_: Covers how to connect to Azure-hosted OpenAI Models.
`Manifest <examples/manifest.html>`_: Covers how to utilize the Manifest wrapper.
`Manifest <./integrations/manifest.html>`_: Covers how to utilize the Manifest wrapper.
.. toctree::
@ -17,4 +17,4 @@ The examples here are all "how-to" guides for how to integrate with various LLM
:name: Specific LLM Integrations
:hidden:
integrations/*
./integrations/*

@ -9,11 +9,11 @@ The concept of “Memory” exists to do exactly that.
The following sections of documentation are provided:
- `Getting Started <memory/getting_started.html>`_: An overview of how to get started with different types of memory.
- `Getting Started <./memory/getting_started.html>`_: An overview of how to get started with different types of memory.
- `Key Concepts <memory/key_concepts.html>`_: A conceptual guide going over the various concepts related to memory.
- `Key Concepts <./memory/key_concepts.html>`_: A conceptual guide going over the various concepts related to memory.
- `How-To Guides <memory/how_to_guides.html>`_: A collection of how-to guides. These highlight how to work with different types of memory, as well as how to customize memory.
- `How-To Guides <./memory/how_to_guides.html>`_: A collection of how-to guides. These highlight how to work with different types of memory, as well as how to customize memory.
@ -22,6 +22,6 @@ The following sections of documentation are provided:
:caption: Memory
:name: Memory
memory/getting_started.ipynb
memory/key_concepts.rst
memory/how_to_guides.rst
./memory/getting_started.ipynb
./memory/key_concepts.rst
./memory/how_to_guides.rst

@ -3,17 +3,17 @@ How-To Guides
The examples here all highlight how to use memory in different ways.
`Adding Memory <examples/adding_memory.html>`_: How to add a memory component to any single input chain.
`Adding Memory <./examples/adding_memory.html>`_: How to add a memory component to any single input chain.
`ChatGPT Clone <examples/chatgpt_clone.html>`_: How to recreate ChatGPT with LangChain prompting + memory components.
`ChatGPT Clone <./examples/chatgpt_clone.html>`_: How to recreate ChatGPT with LangChain prompting + memory components.
`Adding Memory to Multi-Input Chain <examples/adding_memory_chain_multiple_inputs.html>`_: How to add a memory component to any multiple input chain.
`Adding Memory to Multi-Input Chain <./examples/adding_memory_chain_multiple_inputs.html>`_: How to add a memory component to any multiple input chain.
`Conversational Memory Customization <examples/conversational_customization.html>`_: How to customize existing conversation memory components.
`Conversational Memory Customization <./examples/conversational_customization.html>`_: How to customize existing conversation memory components.
`Custom Memory <examples/custom_memory.html>`_: How to write your own custom memory component.
`Custom Memory <./examples/custom_memory.html>`_: How to write your own custom memory component.
`Adding Memory to Agents <examples/agent_with_memory.html>`_: How to add a memory component to any agent.
`Adding Memory to Agents <./examples/agent_with_memory.html>`_: How to add a memory component to any agent.
.. toctree::
@ -21,4 +21,4 @@ The examples here all highlight how to use memory in different ways.
:glob:
:hidden:
examples/*
./examples/*

@ -7,13 +7,13 @@ LangChain provides several classes and functions to make constructing and workin
The following sections of documentation are provided:
- `Getting Started <prompts/getting_started.html>`_: An overview of all the functionality LangChain provides for working with and constructing prompts.
- `Getting Started <./prompts/getting_started.html>`_: An overview of all the functionality LangChain provides for working with and constructing prompts.
- `Key Concepts <prompts/key_concepts.html>`_: A conceptual guide going over the various concepts related to prompts.
- `Key Concepts <./prompts/key_concepts.html>`_: A conceptual guide going over the various concepts related to prompts.
- `How-To Guides <prompts/how_to_guides.html>`_: A collection of how-to guides. These highlight how to accomplish various objectives with our prompt class.
- `How-To Guides <./prompts/how_to_guides.html>`_: A collection of how-to guides. These highlight how to accomplish various objectives with our prompt class.
- `Reference </reference/prompts.html>`_: API reference documentation for all prompt classes.
- `Reference <../reference/prompts.html>`_: API reference documentation for all prompt classes.
@ -24,7 +24,7 @@ The following sections of documentation are provided:
:name: Prompts
:hidden:
prompts/getting_started.md
prompts/key_concepts.md
prompts/how_to_guides.rst
Reference</reference/prompts.rst>
./prompts/getting_started.md
./prompts/key_concepts.md
./prompts/how_to_guides.rst
Reference<../reference/prompts.rst>

@ -1,19 +1,19 @@
How-To Guides
=============
If you're new to the library, you may want to start with the `Quickstart <getting_started.html>`_.
If you're new to the library, you may want to start with the `Quickstart <./getting_started.html>`_.
The user guide here shows more advanced workflows and how to use the library in different ways.
`Custom Prompt Template <examples/custom_prompt_template.html>`_: How to create and use a custom PromptTemplate, the logic that decides how input variables get formatted into a prompt.
`Custom Prompt Template <./examples/custom_prompt_template.html>`_: How to create and use a custom PromptTemplate, the logic that decides how input variables get formatted into a prompt.
`Custom Example Selector <examples/custom_example_selector.html>`_: How to create and use a custom ExampleSelector (the class responsible for choosing which examples to use in a prompt).
`Custom Example Selector <./examples/custom_example_selector.html>`_: How to create and use a custom ExampleSelector (the class responsible for choosing which examples to use in a prompt).
`Few Shot Prompt Templates <examples/few_shot_examples.html>`_: How to include examples in the prompt.
`Few Shot Prompt Templates <./examples/few_shot_examples.html>`_: How to include examples in the prompt.
`Prompt Serialization <examples/prompt_serialization.html>`_: A walkthrough of how to serialize prompts to and from disk.
`Prompt Serialization <./examples/prompt_serialization.html>`_: A walkthrough of how to serialize prompts to and from disk.
`Few Shot Prompt Examples <examples/few_shot_examples.html>`_: Examples of Few Shot Prompt Templates.
`Few Shot Prompt Examples <./examples/few_shot_examples.html>`_: Examples of Few Shot Prompt Templates.
@ -27,8 +27,8 @@ The user guide here shows more advanced workflows and how to use the library in
:glob:
:hidden:
examples/custom_prompt_template.md
examples/custom_example_selector.md
examples/few_shot_examples.ipynb
examples/prompt_serialization.ipynb
examples/few_shot_examples_data.ipynb
./examples/custom_prompt_template.md
./examples/custom_example_selector.md
./examples/few_shot_examples.ipynb
./examples/prompt_serialization.ipynb
./examples/few_shot_examples_data.ipynb

@ -4,7 +4,6 @@
A prompt is the input to a language model. It is a string of text that is used to generate a response from the language model.
## Prompt Templates
`PromptTemplates` are a way to create prompts in a reproducible way. They contain a template string, and a set of input variables. The template string can be formatted with the input variables to generate a prompt. The template string often contains instructions to the language model, a few shot examples, and a question to the language model.
@ -26,7 +25,6 @@ Capital:
"""
```
### Input Variables
Input variables are the variables that are used to fill in the template string. In the example above, the input variable is `country`.
@ -57,20 +55,21 @@ Capital: Ottawa
```
To learn more about how to provide few shot examples, see [Few Shot Examples](examples/few_shot_examples.ipynb).
<!-- TODO(shreya): Add correct link here. -->
<!-- TODO(shreya): Add correct link here. -->
## Example selection
If there are multiple examples that are relevant to a prompt, it is important to select the most relevant examples. Generally, the quality of the response from the LLM can be significantly improved by selecting the most relevant examples. This is because the language model will be able to better understand the context of the prompt, and also potentially learn failure modes to avoid.
To help the user with selecting the most relevant examples, we provide example selectors that select the most relevant based on different criteria, such as length, semantic similarity, etc. The example selector takes in a list of examples and returns a list of selected examples, formatted as a string. The user can also provide their own example selector. To learn more about example selectors, see [Example Selection](example_selection.md).
<!-- TODO(shreya): Add correct link here. -->
<!-- TODO(shreya): Add correct link here. -->
## Serialization
To make it easy to share `PromptTemplates`, we provide a `serialize` method that returns a JSON string. The JSON string can be saved to a file, and then loaded back into a `PromptTemplate` using the `deserialize` method. This allows users to share `PromptTemplates` with others, and also to save them for later use.
To learn more about serialization, see [Serialization](examples/prompt_serialization.ipynb).
<!-- TODO(shreya): Provide correct link. -->

@ -7,11 +7,11 @@ and goes over how to easily use them from within LangChain.
The following sections of documentation are provided:
- `Key Concepts <utils/key_concepts.html>`_: A conceptual guide going over the various types of utils.
- `Key Concepts <./utils/key_concepts.html>`_: A conceptual guide going over the various types of utils.
- `How-To Guides <utils/how_to_guides.html>`_: A collection of how-to guides. These highlight how to use various types of utils.
- `How-To Guides <./utils/how_to_guides.html>`_: A collection of how-to guides. These highlight how to use various types of utils.
- `Reference </reference/utils.html>`_: API reference documentation for all Util classes.
- `Reference <../reference/utils.html>`_: API reference documentation for all Util classes.
@ -21,6 +21,6 @@ The following sections of documentation are provided:
:name: Utils
:hidden:
utils/key_concepts.md
utils/how_to_guides.rst
Reference </reference/utils.rst>
./utils/key_concepts.md
./utils/how_to_guides.rst
Reference <../reference/utils.rst>

@ -5,13 +5,13 @@ There are a lot of different utilities that LangChain provides integrations for
These guides go over how to use them.
The utilities here are all utilities that make it easier to work with documents.
`Text Splitters <combine_docs_examples/textsplitter.html>`_: A walkthrough of how to split large documents up into smaller, more manageable pieces of text.
`Text Splitters <./combine_docs_examples/textsplitter.html>`_: A walkthrough of how to split large documents up into smaller, more manageable pieces of text.
`VectorStores <combine_docs_examples/vectorstores.html>`_: A walkthrough of vectorstore functionalities, and different types of vectorstores, that LangChain supports.
`VectorStores <./combine_docs_examples/vectorstores.html>`_: A walkthrough of vectorstore functionalities, and different types of vectorstores, that LangChain supports.
`Embeddings <combine_docs_examples/embeddings.html>`_: A walkthrough of embedding functionalities, and different types of embeddings, that LangChain supports.
`Embeddings <./combine_docs_examples/embeddings.html>`_: A walkthrough of embedding functionalities, and different types of embeddings, that LangChain supports.
`HyDE <combine_docs_examples/hyde.html>`_: How to use Hypothetical Document Embeddings, a novel way of constructing embeddings for document retrieval systems.
`HyDE <./combine_docs_examples/hyde.html>`_: How to use Hypothetical Document Embeddings, a novel way of constructing embeddings for document retrieval systems.
.. toctree::
:maxdepth: 1

@ -5,15 +5,15 @@ There are a lot of different utilities that LangChain provides integrations for
These guides go over how to use them.
The utilities listed here are all generic utilities.
`Bash <examples/bash.html>`_: How to use a bash wrapper to execute bash commands.
`Bash <./examples/bash.html>`_: How to use a bash wrapper to execute bash commands.
`Python REPL <examples/python.html>`_: How to use a Python wrapper to execute python commands.
`Python REPL <./examples/python.html>`_: How to use a Python wrapper to execute python commands.
`Requests <examples/requests.html>`_: How to use a requests wrapper to interact with the web.
`Requests <./examples/requests.html>`_: How to use a requests wrapper to interact with the web.
`Google Search <examples/google_search.html>`_: How to use the google search wrapper to search the web.
`Google Search <./examples/google_search.html>`_: How to use the google search wrapper to search the web.
`SerpAPI <examples/serpapi.html>`_: How to use the SerpAPI wrapper to search the web.
`SerpAPI <./examples/serpapi.html>`_: How to use the SerpAPI wrapper to search the web.
.. toctree::
@ -21,4 +21,4 @@ The utilities listed here are all generic utilities.
:glob:
:hidden:
examples/*
./examples/*

@ -5,13 +5,13 @@ There are a lot of different utilities that LangChain provides integrations for
These guides go over how to use them.
These can largely be grouped into two categories:
1. `Generic Utilities <generic_how_to.html>`_: Generic utilities, including search, python REPLs, etc.
2. `Utilities for working with Documents <combine_docs_how_to.html>`_: Utilities aimed at making it easy to work with documents (text splitting, embeddings, vectorstores, etc).
1. `Generic Utilities <./generic_how_to.html>`_: Generic utilities, including search, python REPLs, etc.
2. `Utilities for working with Documents <./combine_docs_how_to.html>`_: Utilities aimed at making it easy to work with documents (text splitting, embeddings, vectorstores, etc).
.. toctree::
:maxdepth: 1
:glob:
:hidden:
generic_how_to.rst
combine_docs_how_to.rst
./generic_how_to.rst
./combine_docs_how_to.rst

@ -7,8 +7,8 @@ Full documentation on all methods, classes, and APIs in LangChain.
.. toctree::
:maxdepth: 1
reference/prompts.rst
LLMs<reference/modules/llms>
reference/utils.rst
Chains<reference/modules/chains>
Agents<reference/modules/agents>
./reference/prompts.rst
LLMs<./refeence/modules/llms>
./reference/utils.rst
Chains<./reference/modules/chains>
Agents<./reference/modules/agents>

@ -5,11 +5,11 @@ Generative models are notoriously hard to evaluate with traditional metrics. One
The examples here all highlight how to use language models to assist in evaluation of themselves.
`Question Answering <evaluation/question_answering.html>`_: An overview of LLMs aimed at evaluating question answering systems in general.
`Question Answering <./evaluation/question_answering.html>`_: An overview of LLMs aimed at evaluating question answering systems in general.
`Data Augmented Question Answering <evaluation/data_augmented_question_answering.html>`_: An end-to-end example of evaluating a question answering system focused on a specific document (a VectorDBQAChain to be precise). This example highlights how to use LLMs to come up with question/answer examples to evaluate over, and then highlights how to use LLMs to evaluate performance on those generated examples.
`Data Augmented Question Answering <./evaluation/data_augmented_question_answering.html>`_: An end-to-end example of evaluating a question answering system focused on a specific document (a VectorDBQAChain to be precise). This example highlights how to use LLMs to come up with question/answer examples to evaluate over, and then highlights how to use LLMs to evaluate performance on those generated examples.
`Hugging Face Datasets <evaluation/huggingface_datasets.html>`_: Covers an example of loading and using a dataset from Hugging Face for evaluation.
`Hugging Face Datasets <./evaluation/huggingface_datasets.html>`_: Covers an example of loading and using a dataset from Hugging Face for evaluation.
.. toctree::

589
poetry.lock generated

@ -1,5 +1,17 @@
# This file is automatically @generated by Poetry and should not be changed by hand.
[[package]]
name = "alabaster"
version = "0.7.12"
description = "A configurable sidebar-enabled Sphinx theme"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "alabaster-0.7.12-py2.py3-none-any.whl", hash = "sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359"},
{file = "alabaster-0.7.12.tar.gz", hash = "sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02"},
]
[[package]]
name = "anyio"
version = "3.6.2"
@ -170,6 +182,42 @@ files = [
[package.dependencies]
cryptography = ">=3.2"
[[package]]
name = "autodoc-pydantic"
version = "1.8.0"
description = "Seamlessly integrate pydantic models in your Sphinx documentation."
category = "dev"
optional = false
python-versions = ">=3.6,<4.0.0"
files = [
{file = "autodoc_pydantic-1.8.0-py3-none-any.whl", hash = "sha256:f1bf9318f37369fec906ab523ebe65c1894395a6fc859dbc6fd02ffd90d3242f"},
{file = "autodoc_pydantic-1.8.0.tar.gz", hash = "sha256:77da1cbbe4434fa9963f85a1555c63afff9a4acec06b318dc4f54c4f28a04f2c"},
]
[package.dependencies]
pydantic = ">=1.5"
Sphinx = ">=3.4"
[package.extras]
dev = ["coverage (>=5,<6)", "flake8 (>=3,<4)", "pytest (>=6,<7)", "sphinx-copybutton (>=0.4,<0.5)", "sphinx-rtd-theme (>=1.0,<2.0)", "sphinx-tabs (>=3,<4)", "sphinxcontrib-mermaid (>=0.7,<0.8)", "tox (>=3,<4)"]
docs = ["sphinx-copybutton (>=0.4,<0.5)", "sphinx-rtd-theme (>=1.0,<2.0)", "sphinx-tabs (>=3,<4)", "sphinxcontrib-mermaid (>=0.7,<0.8)"]
test = ["coverage (>=5,<6)", "pytest (>=6,<7)"]
[[package]]
name = "babel"
version = "2.11.0"
description = "Internationalization utilities"
category = "dev"
optional = false
python-versions = ">=3.6"
files = [
{file = "Babel-2.11.0-py3-none-any.whl", hash = "sha256:1ad3eca1c885218f6dce2ab67291178944f810a10a9b5f3cb8382a5a232b64fe"},
{file = "Babel-2.11.0.tar.gz", hash = "sha256:5ef4b3226b0180dedded4229651c8b0e1a3a6a2837d45a073272f313e4cf97f6"},
]
[package.dependencies]
pytz = ">=2015.7"
[[package]]
name = "backcall"
version = "0.2.0"
@ -728,7 +776,7 @@ name = "dnspython"
version = "2.2.1"
description = "DNS toolkit"
category = "main"
optional = true
optional = false
python-versions = ">=3.6,<4.0"
files = [
{file = "dnspython-2.2.1-py3-none-any.whl", hash = "sha256:a851e51367fb93e9e1361732c1d60dab63eff98712e503ea7d92e6eccb109b4f"},
@ -743,6 +791,18 @@ idna = ["idna (>=2.1,<4.0)"]
trio = ["trio (>=0.14,<0.20)"]
wmi = ["wmi (>=1.5.1,<2.0.0)"]
[[package]]
name = "docutils"
version = "0.17.1"
description = "Docutils -- Python Documentation Utilities"
category = "dev"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
files = [
{file = "docutils-0.17.1-py2.py3-none-any.whl", hash = "sha256:cf316c8370a737a022b72b56874f6602acf974a37a9fba42ec2876387549fc61"},
{file = "docutils-0.17.1.tar.gz", hash = "sha256:686577d2e4c32380bb50cbb22f575ed742d58168cee37e99117a854bcd88f125"},
]
[[package]]
name = "duckdb"
version = "0.6.1"
@ -1244,6 +1304,18 @@ files = [
{file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"},
]
[[package]]
name = "imagesize"
version = "1.4.1"
description = "Getting image size from png/jpeg/jpeg2000/gif file"
category = "dev"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
files = [
{file = "imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b"},
{file = "imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a"},
]
[[package]]
name = "importlib-metadata"
version = "5.2.0"
@ -1545,6 +1617,34 @@ nbconvert = "*"
notebook = "*"
qtconsole = "*"
[[package]]
name = "jupyter-cache"
version = "0.5.0"
description = "A defined interface for working with a cache of jupyter notebooks."
category = "dev"
optional = false
python-versions = "~=3.7"
files = [
{file = "jupyter-cache-0.5.0.tar.gz", hash = "sha256:87408030a4c8c14fe3f8fe62e6ceeb24c84e544c7ced20bfee45968053d07801"},
{file = "jupyter_cache-0.5.0-py3-none-any.whl", hash = "sha256:642e434b9b75c4b94dc8346eaf5a639c8926a0673b87e5e8ef6460d5cf2c9516"},
]
[package.dependencies]
attrs = "*"
click = "*"
importlib-metadata = "*"
nbclient = ">=0.2,<0.6"
nbformat = "*"
pyyaml = "*"
sqlalchemy = ">=1.3.12,<1.5"
tabulate = "*"
[package.extras]
cli = ["click-log"]
code-style = ["pre-commit (>=2.12,<3.0)"]
rtd = ["jupytext", "myst-nb (>=0.12.3,<0.13.0)", "nbdime", "sphinx-book-theme (>=0.1.1,<0.2.0)", "sphinx-copybutton"]
testing = ["coverage", "ipykernel", "jupytext", "matplotlib", "nbdime", "nbformat (>=5.1)", "numpy", "pandas", "pytest (>=6,<7)", "pytest-cov", "pytest-regressions", "sympy"]
[[package]]
name = "jupyter-client"
version = "7.4.8"
@ -1730,6 +1830,39 @@ files = [
[package.extras]
data = ["language-data (>=1.1,<2.0)"]
[[package]]
name = "linkchecker"
version = "10.2.1"
description = "check links in web documents or full websites"
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
{file = "LinkChecker-10.2.1-py3-none-any.whl", hash = "sha256:5438496290826f5e2f4a2041f11482608378150b6c2d05ca8f94f460b7cb7c9e"},
{file = "LinkChecker-10.2.1.tar.gz", hash = "sha256:97eae069ccfe892a18e380c7f4762dfe3f352e87c442ef6124e8c60b887cddcd"},
]
[package.dependencies]
beautifulsoup4 = ">=4.8.1"
dnspython = ">=2.0"
requests = ">=2.20"
[[package]]
name = "livereload"
version = "2.6.3"
description = "Python LiveReload is an awesome tool for web developers"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "livereload-2.6.3-py2.py3-none-any.whl", hash = "sha256:ad4ac6f53b2d62bb6ce1a5e6e96f1f00976a32348afedcb4b6d68df2a1d346e4"},
{file = "livereload-2.6.3.tar.gz", hash = "sha256:776f2f865e59fde56490a56bcc6773b6917366bce0c267c60ee8aaf1a0959869"},
]
[package.dependencies]
six = "*"
tornado = {version = "*", markers = "python_version > \"2.7\""}
[[package]]
name = "loguru"
version = "0.6.0"
@ -1782,6 +1915,7 @@ files = [
{file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca989b91cf3a3ba28930a9fc1e9aeafc2a395448641df1f387a2d394638943b0"},
{file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:822068f85e12a6e292803e112ab876bc03ed1f03dddb80154c395f891ca6b31e"},
{file = "lxml-4.9.2-cp35-cp35m-win32.whl", hash = "sha256:be7292c55101e22f2a3d4d8913944cbea71eea90792bf914add27454a13905df"},
{file = "lxml-4.9.2-cp35-cp35m-win_amd64.whl", hash = "sha256:998c7c41910666d2976928c38ea96a70d1aa43be6fe502f21a651e17483a43c5"},
{file = "lxml-4.9.2-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:b26a29f0b7fc6f0897f043ca366142d2b609dc60756ee6e4e90b5f762c6adc53"},
{file = "lxml-4.9.2-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:ab323679b8b3030000f2be63e22cdeea5b47ee0abd2d6a1dc0c8103ddaa56cd7"},
{file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:689bb688a1db722485e4610a503e3e9210dcc20c520b45ac8f7533c837be76fe"},
@ -1791,6 +1925,7 @@ files = [
{file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:58bfa3aa19ca4c0f28c5dde0ff56c520fbac6f0daf4fac66ed4c8d2fb7f22e74"},
{file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc718cd47b765e790eecb74d044cc8d37d58562f6c314ee9484df26276d36a38"},
{file = "lxml-4.9.2-cp36-cp36m-win32.whl", hash = "sha256:d5bf6545cd27aaa8a13033ce56354ed9e25ab0e4ac3b5392b763d8d04b08e0c5"},
{file = "lxml-4.9.2-cp36-cp36m-win_amd64.whl", hash = "sha256:3ab9fa9d6dc2a7f29d7affdf3edebf6ece6fb28a6d80b14c3b2fb9d39b9322c3"},
{file = "lxml-4.9.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:05ca3f6abf5cf78fe053da9b1166e062ade3fa5d4f92b4ed688127ea7d7b1d03"},
{file = "lxml-4.9.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:a5da296eb617d18e497bcf0a5c528f5d3b18dadb3619fbdadf4ed2356ef8d941"},
{file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:04876580c050a8c5341d706dd464ff04fd597095cc8c023252566a8826505726"},
@ -1864,6 +1999,31 @@ all = ["Flask (>=2.1.2)", "accelerate (>=0.10.0)", "autopep8 (>=1.6.0)", "black
api = ["Flask (>=2.1.2)", "accelerate (>=0.10.0)", "torch (>=1.8.0)", "transformers (>=4.20.0)"]
dev = ["autopep8 (>=1.6.0)", "black (>=22.3.0)", "docformatter (>=1.4)", "flake8 (>=4.0.0)", "flake8-docstrings (>=1.6.0)", "isort (>=5.9.3)", "mypy (>=0.950)", "nbsphinx (>=0.8.0)", "pep8-naming (>=0.12.1)", "pre-commit (>=2.14.0)", "pytest (>=7.0.0)", "pytest-cov (>=3.0.0)", "python-dotenv (>=0.20.0)", "recommonmark (>=0.7.1)", "sphinx-autobuild", "sphinx-rtd-theme (>=0.5.1)", "twine", "types-PyYAML (>=6.0.7)", "types-protobuf (>=3.19.21)", "types-python-dateutil (>=2.8.16)", "types-redis (>=4.2.6)", "types-requests (>=2.27.29)", "types-setuptools (>=57.4.17)"]
[[package]]
name = "markdown-it-py"
version = "2.1.0"
description = "Python port of markdown-it. Markdown parsing, done right!"
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
{file = "markdown-it-py-2.1.0.tar.gz", hash = "sha256:cf7e59fed14b5ae17c0006eff14a2d9a00ed5f3a846148153899a0224e2c07da"},
{file = "markdown_it_py-2.1.0-py3-none-any.whl", hash = "sha256:93de681e5c021a432c63147656fe21790bc01231e0cd2da73626f1aa3ac0fe27"},
]
[package.dependencies]
mdurl = ">=0.1,<1.0"
[package.extras]
benchmarking = ["psutil", "pytest", "pytest-benchmark (>=3.2,<4.0)"]
code-style = ["pre-commit (==2.6)"]
compare = ["commonmark (>=0.9.1,<0.10.0)", "markdown (>=3.3.6,<3.4.0)", "mistletoe (>=0.8.1,<0.9.0)", "mistune (>=2.0.2,<2.1.0)", "panflute (>=2.1.3,<2.2.0)"]
linkify = ["linkify-it-py (>=1.0,<2.0)"]
plugins = ["mdit-py-plugins"]
profiling = ["gprof2dot"]
rtd = ["attrs", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"]
testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
[[package]]
name = "markupsafe"
version = "2.1.1"
@ -1941,6 +2101,38 @@ files = [
{file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
]
[[package]]
name = "mdit-py-plugins"
version = "0.3.3"
description = "Collection of plugins for markdown-it-py"
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
{file = "mdit-py-plugins-0.3.3.tar.gz", hash = "sha256:5cfd7e7ac582a594e23ba6546a2f406e94e42eb33ae596d0734781261c251260"},
{file = "mdit_py_plugins-0.3.3-py3-none-any.whl", hash = "sha256:36d08a29def19ec43acdcd8ba471d3ebab132e7879d442760d963f19913e04b9"},
]
[package.dependencies]
markdown-it-py = ">=1.0.0,<3.0.0"
[package.extras]
code-style = ["pre-commit"]
rtd = ["attrs", "myst-parser (>=0.16.1,<0.17.0)", "sphinx-book-theme (>=0.1.0,<0.2.0)"]
testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
[[package]]
name = "mdurl"
version = "0.1.2"
description = "Markdown URL utilities"
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
{file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"},
{file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"},
]
[[package]]
name = "mistune"
version = "2.0.4"
@ -2054,6 +2246,62 @@ files = [
{file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"},
]
[[package]]
name = "myst-nb"
version = "0.17.1"
description = "A Jupyter Notebook Sphinx reader built on top of the MyST markdown parser."
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
{file = "myst-nb-0.17.1.tar.gz", hash = "sha256:14df725f3e00cb5efef4f863bf0c273490c8c662dfee39ed8a7b374bf2561933"},
{file = "myst_nb-0.17.1-py3-none-any.whl", hash = "sha256:c268d11aa4936b4bdd18b3b2cd5baa14fdb80c80d2983c02329ade52010f6260"},
]
[package.dependencies]
importlib_metadata = "*"
ipykernel = "*"
ipython = "*"
jupyter-cache = ">=0.5.0,<0.6.0"
myst-parser = ">=0.18.0,<0.19.0"
nbclient = "*"
nbformat = ">=5.0,<6.0"
pyyaml = "*"
sphinx = ">=4,<6"
typing-extensions = "*"
[package.extras]
code-style = ["pre-commit"]
rtd = ["alabaster", "altair", "bokeh", "coconut (>=1.4.3,<1.5.0)", "ipykernel (>=5.5,<6.0)", "ipywidgets", "jupytext (>=1.11.2,<1.12.0)", "matplotlib", "numpy", "pandas", "plotly", "sphinx-book-theme (>=0.3.0,<0.4.0)", "sphinx-copybutton", "sphinx-design (>=0.1.0,<0.2.0)", "sphinxcontrib-bibtex", "sympy"]
testing = ["beautifulsoup4", "coverage (>=6.4,<7.0)", "ipykernel (>=5.5,<6.0)", "ipython (!=8.1.0,<8.5)", "ipywidgets (>=8)", "jupytext (>=1.11.2,<1.12.0)", "matplotlib (>=3.5.3,<3.6)", "nbdime", "numpy", "pandas", "pytest (>=7.1,<8.0)", "pytest-cov (>=3.0,<4.0)", "pytest-param-files (>=0.3.3,<0.4.0)", "pytest-regressions", "sympy (>=1.10.1)"]
[[package]]
name = "myst-parser"
version = "0.18.1"
description = "An extended commonmark compliant parser, with bridges to docutils & sphinx."
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
{file = "myst-parser-0.18.1.tar.gz", hash = "sha256:79317f4bb2c13053dd6e64f9da1ba1da6cd9c40c8a430c447a7b146a594c246d"},
{file = "myst_parser-0.18.1-py3-none-any.whl", hash = "sha256:61b275b85d9f58aa327f370913ae1bec26ebad372cc99f3ab85c8ec3ee8d9fb8"},
]
[package.dependencies]
docutils = ">=0.15,<0.20"
jinja2 = "*"
markdown-it-py = ">=1.0.0,<3.0.0"
mdit-py-plugins = ">=0.3.1,<0.4.0"
pyyaml = "*"
sphinx = ">=4,<6"
typing-extensions = "*"
[package.extras]
code-style = ["pre-commit (>=2.12,<3.0)"]
linkify = ["linkify-it-py (>=1.0,<2.0)"]
rtd = ["ipython", "sphinx-book-theme", "sphinx-design", "sphinxcontrib.mermaid (>=0.7.1,<0.8.0)", "sphinxext-opengraph (>=0.6.3,<0.7.0)", "sphinxext-rediraffe (>=0.2.7,<0.3.0)"]
testing = ["beautifulsoup4", "coverage[toml]", "pytest (>=6,<7)", "pytest-cov", "pytest-param-files (>=0.3.4,<0.4.0)", "pytest-regressions", "sphinx (<5.2)", "sphinx-pytest"]
[[package]]
name = "nbclassic"
version = "0.4.8"
@ -2092,26 +2340,25 @@ test = ["coverage", "nbval", "pytest", "pytest-cov", "pytest-playwright", "pytes
[[package]]
name = "nbclient"
version = "0.7.2"
version = "0.5.13"
description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor."
category = "dev"
optional = false
python-versions = ">=3.7.0"
files = [
{file = "nbclient-0.7.2-py3-none-any.whl", hash = "sha256:d97ac6257de2794f5397609df754fcbca1a603e94e924eb9b99787c031ae2e7c"},
{file = "nbclient-0.7.2.tar.gz", hash = "sha256:884a3f4a8c4fc24bb9302f263e0af47d97f0d01fe11ba714171b320c8ac09547"},
{file = "nbclient-0.5.13-py3-none-any.whl", hash = "sha256:47ac905af59379913c1f8f541098d2550153cf8dc58553cbe18c702b181518b0"},
{file = "nbclient-0.5.13.tar.gz", hash = "sha256:40c52c9b5e3c31faecaee69f202b3f53e38d7c1c563de0fadde9d7eda0fdafe8"},
]
[package.dependencies]
jupyter-client = ">=6.1.12"
jupyter-core = ">=4.12,<5.0.0 || >=5.1.0"
nbformat = ">=5.1"
traitlets = ">=5.3"
jupyter-client = ">=6.1.5"
nbformat = ">=5.0"
nest-asyncio = "*"
traitlets = ">=5.0.0"
[package.extras]
dev = ["pre-commit"]
docs = ["autodoc-traits", "mock", "moto", "myst-parser", "nbclient[test]", "sphinx (>=1.7)", "sphinx-book-theme"]
test = ["ipykernel", "ipython", "ipywidgets", "nbconvert (>=7.0.0)", "pytest (>=7.0)", "pytest-asyncio", "pytest-cov (>=4.0)", "testpath", "xmltodict"]
sphinx = ["Sphinx (>=1.7)", "mock", "moto", "myst-parser", "sphinx-book-theme"]
test = ["black", "check-manifest", "flake8", "ipykernel", "ipython (<8.0.0)", "ipywidgets (<8.0.0)", "mypy", "pip (>=18.1)", "pytest (>=4.1)", "pytest-asyncio", "pytest-cov (>=2.6.1)", "setuptools (>=38.6.0)", "twine (>=1.11.0)", "wheel (>=0.31.0)", "xmltodict"]
[[package]]
name = "nbconvert"
@ -2174,6 +2421,26 @@ traitlets = ">=5.1"
docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinxcontrib-github-alt"]
test = ["pep440", "pre-commit", "pytest", "testpath"]
[[package]]
name = "nbsphinx"
version = "0.8.11"
description = "Jupyter Notebook Tools for Sphinx"
category = "dev"
optional = false
python-versions = ">=3.6"
files = [
{file = "nbsphinx-0.8.11-py3-none-any.whl", hash = "sha256:e5c6c76b12fbc425d5538d4b8c0d6fb0c134c28459d4a1b54f41d704b34b4bd1"},
{file = "nbsphinx-0.8.11.tar.gz", hash = "sha256:abe18c04b33d9bcdfb3d66f1195f6b0d51eeca463ecb07f6061de497e43316e4"},
]
[package.dependencies]
docutils = "*"
jinja2 = "*"
nbconvert = "!=5.4"
nbformat = "*"
sphinx = ">=1.8"
traitlets = ">=5"
[[package]]
name = "nest-asyncio"
version = "1.5.6"
@ -2862,6 +3129,30 @@ typing-extensions = ">=4.1.0"
dotenv = ["python-dotenv (>=0.10.4)"]
email = ["email-validator (>=1.0.3)"]
[[package]]
name = "pydata-sphinx-theme"
version = "0.8.1"
description = "Bootstrap-based Sphinx theme from the PyData community"
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
{file = "pydata_sphinx_theme-0.8.1-py3-none-any.whl", hash = "sha256:af2c99cb0b43d95247b1563860942ba75d7f1596360594fce510caaf8c4fcc16"},
{file = "pydata_sphinx_theme-0.8.1.tar.gz", hash = "sha256:96165702253917ece13dd895e23b96ee6dce422dcc144d560806067852fe1fed"},
]
[package.dependencies]
beautifulsoup4 = "*"
docutils = "!=0.17.0"
packaging = "*"
sphinx = ">=3.5.4,<5"
[package.extras]
coverage = ["codecov", "pydata-sphinx-theme[test]", "pytest-cov"]
dev = ["nox", "pre-commit", "pydata-sphinx-theme[coverage]", "pyyaml"]
doc = ["jupyter_sphinx", "myst-parser", "numpy", "numpydoc", "pandas", "plotly", "pytest", "pytest-regressions", "sphinx-sitemap", "sphinxext-rediraffe", "xarray"]
test = ["pydata-sphinx-theme[doc]", "pytest"]
[[package]]
name = "pydocstyle"
version = "6.1.1"
@ -3085,6 +3376,18 @@ files = [
{file = "python_json_logger-2.0.4-py3-none-any.whl", hash = "sha256:3b03487b14eb9e4f77e4fc2a023358b5394b82fd89cecf5586259baed57d8c6f"},
]
[[package]]
name = "pytz"
version = "2022.7"
description = "World timezone definitions, modern and historical"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "pytz-2022.7-py2.py3-none-any.whl", hash = "sha256:93007def75ae22f7cd991c84e02d434876818661f8df9ad5df9e950ff4e52cfd"},
{file = "pytz-2022.7.tar.gz", hash = "sha256:7ccfae7b4b2c067464a6733c6261673fdb8fd1be905460396b97a073e9fa683a"},
]
[[package]]
name = "pywin32"
version = "305"
@ -3703,6 +4006,235 @@ files = [
{file = "spacy_loggers-1.0.4-py3-none-any.whl", hash = "sha256:e050bf2e63208b2f096b777e494971c962ad7c1dc997641c8f95c622550044ae"},
]
[[package]]
name = "sphinx"
version = "4.5.0"
description = "Python documentation generator"
category = "dev"
optional = false
python-versions = ">=3.6"
files = [
{file = "Sphinx-4.5.0-py3-none-any.whl", hash = "sha256:ebf612653238bcc8f4359627a9b7ce44ede6fdd75d9d30f68255c7383d3a6226"},
{file = "Sphinx-4.5.0.tar.gz", hash = "sha256:7bf8ca9637a4ee15af412d1a1d9689fec70523a68ca9bb9127c2f3eeb344e2e6"},
]
[package.dependencies]
alabaster = ">=0.7,<0.8"
babel = ">=1.3"
colorama = {version = ">=0.3.5", markers = "sys_platform == \"win32\""}
docutils = ">=0.14,<0.18"
imagesize = "*"
importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""}
Jinja2 = ">=2.3"
packaging = "*"
Pygments = ">=2.0"
requests = ">=2.5.0"
snowballstemmer = ">=1.1"
sphinxcontrib-applehelp = "*"
sphinxcontrib-devhelp = "*"
sphinxcontrib-htmlhelp = ">=2.0.0"
sphinxcontrib-jsmath = "*"
sphinxcontrib-qthelp = "*"
sphinxcontrib-serializinghtml = ">=1.1.5"
[package.extras]
docs = ["sphinxcontrib-websupport"]
lint = ["docutils-stubs", "flake8 (>=3.5.0)", "isort", "mypy (>=0.931)", "types-requests", "types-typed-ast"]
test = ["cython", "html5lib", "pytest", "pytest-cov", "typed-ast"]
[[package]]
name = "sphinx-autobuild"
version = "2021.3.14"
description = "Rebuild Sphinx documentation on changes, with live-reload in the browser."
category = "dev"
optional = false
python-versions = ">=3.6"
files = [
{file = "sphinx-autobuild-2021.3.14.tar.gz", hash = "sha256:de1ca3b66e271d2b5b5140c35034c89e47f263f2cd5db302c9217065f7443f05"},
{file = "sphinx_autobuild-2021.3.14-py3-none-any.whl", hash = "sha256:8fe8cbfdb75db04475232f05187c776f46f6e9e04cacf1e49ce81bdac649ccac"},
]
[package.dependencies]
colorama = "*"
livereload = "*"
sphinx = "*"
[package.extras]
test = ["pytest", "pytest-cov"]
[[package]]
name = "sphinx-book-theme"
version = "0.3.3"
description = "A clean book theme for scientific explanations and documentation with Sphinx"
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
{file = "sphinx_book_theme-0.3.3-py3-none-any.whl", hash = "sha256:9685959dbbb492af005165ef1b9229fdd5d5431580ac181578beae3b4d012d91"},
{file = "sphinx_book_theme-0.3.3.tar.gz", hash = "sha256:0ec36208ff14c6d6bf8aee1f1f8268e0c6e2bfa3cef6e41143312b25275a6217"},
]
[package.dependencies]
pydata-sphinx-theme = ">=0.8.0,<0.9.0"
pyyaml = "*"
sphinx = ">=3,<5"
[package.extras]
code-style = ["pre-commit (>=2.7.0,<2.8.0)"]
doc = ["ablog (>=0.10.13,<0.11.0)", "folium", "ipywidgets", "matplotlib", "myst-nb (>=0.13.2,<0.14.0)", "nbclient", "numpy", "numpydoc", "pandas", "plotly", "sphinx (>=4.0,<5.0)", "sphinx-copybutton", "sphinx-design", "sphinx-examples", "sphinx-tabs", "sphinx-thebe (>=0.1.1)", "sphinx-togglebutton (>=0.2.1)", "sphinxcontrib-bibtex (>=2.2,<3.0)", "sphinxcontrib-youtube", "sphinxext-opengraph"]
test = ["beautifulsoup4 (>=4.6.1,<5)", "coverage", "myst-nb (>=0.13.2,<0.14.0)", "pytest (>=6.0.1,<6.1.0)", "pytest-cov", "pytest-regressions (>=2.0.1,<2.1.0)", "sphinx_thebe"]
[[package]]
name = "sphinx-panels"
version = "0.6.0"
description = "A sphinx extension for creating panels in a grid layout."
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "sphinx-panels-0.6.0.tar.gz", hash = "sha256:d36dcd26358117e11888f7143db4ac2301ebe90873ac00627bf1fe526bf0f058"},
{file = "sphinx_panels-0.6.0-py3-none-any.whl", hash = "sha256:bd64afaf85c07f8096d21c8247fc6fd757e339d1be97832c8832d6ae5ed2e61d"},
]
[package.dependencies]
docutils = "*"
sphinx = ">=2,<5"
[package.extras]
code-style = ["pre-commit (>=2.7.0,<2.8.0)"]
live-dev = ["sphinx-autobuild", "web-compile (>=0.2.0,<0.3.0)"]
testing = ["pytest (>=6.0.1,<6.1.0)", "pytest-regressions (>=2.0.1,<2.1.0)"]
themes = ["myst-parser (>=0.12.9,<0.13.0)", "pydata-sphinx-theme (>=0.4.0,<0.5.0)", "sphinx-book-theme (>=0.0.36,<0.1.0)", "sphinx-rtd-theme"]
[[package]]
name = "sphinx-rtd-theme"
version = "1.1.1"
description = "Read the Docs theme for Sphinx"
category = "dev"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
files = [
{file = "sphinx_rtd_theme-1.1.1-py2.py3-none-any.whl", hash = "sha256:31faa07d3e97c8955637fc3f1423a5ab2c44b74b8cc558a51498c202ce5cbda7"},
{file = "sphinx_rtd_theme-1.1.1.tar.gz", hash = "sha256:6146c845f1e1947b3c3dd4432c28998a1693ccc742b4f9ad7c63129f0757c103"},
]
[package.dependencies]
docutils = "<0.18"
sphinx = ">=1.6,<6"
[package.extras]
dev = ["bump2version", "sphinxcontrib-httpdomain", "transifex-client", "wheel"]
[[package]]
name = "sphinx-typlog-theme"
version = "0.8.0"
description = "A typlog Sphinx theme"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "sphinx_typlog_theme-0.8.0-py2.py3-none-any.whl", hash = "sha256:b0ab728ab31d071523af0229bcb6427a13493958b3fc2bb7db381520fab77de4"},
{file = "sphinx_typlog_theme-0.8.0.tar.gz", hash = "sha256:61dbf97b1fde441bd03a5409874571e229898b67fb3080400837b8f4cee46659"},
]
[package.extras]
dev = ["livereload", "sphinx"]
[[package]]
name = "sphinxcontrib-applehelp"
version = "1.0.2"
description = "sphinxcontrib-applehelp is a sphinx extension which outputs Apple help books"
category = "dev"
optional = false
python-versions = ">=3.5"
files = [
{file = "sphinxcontrib-applehelp-1.0.2.tar.gz", hash = "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58"},
{file = "sphinxcontrib_applehelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a"},
]
[package.extras]
lint = ["docutils-stubs", "flake8", "mypy"]
test = ["pytest"]
[[package]]
name = "sphinxcontrib-devhelp"
version = "1.0.2"
description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document."
category = "dev"
optional = false
python-versions = ">=3.5"
files = [
{file = "sphinxcontrib-devhelp-1.0.2.tar.gz", hash = "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"},
{file = "sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e"},
]
[package.extras]
lint = ["docutils-stubs", "flake8", "mypy"]
test = ["pytest"]
[[package]]
name = "sphinxcontrib-htmlhelp"
version = "2.0.0"
description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files"
category = "dev"
optional = false
python-versions = ">=3.6"
files = [
{file = "sphinxcontrib-htmlhelp-2.0.0.tar.gz", hash = "sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2"},
{file = "sphinxcontrib_htmlhelp-2.0.0-py2.py3-none-any.whl", hash = "sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07"},
]
[package.extras]
lint = ["docutils-stubs", "flake8", "mypy"]
test = ["html5lib", "pytest"]
[[package]]
name = "sphinxcontrib-jsmath"
version = "1.0.1"
description = "A sphinx extension which renders display math in HTML via JavaScript"
category = "dev"
optional = false
python-versions = ">=3.5"
files = [
{file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"},
{file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"},
]
[package.extras]
test = ["flake8", "mypy", "pytest"]
[[package]]
name = "sphinxcontrib-qthelp"
version = "1.0.3"
description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document."
category = "dev"
optional = false
python-versions = ">=3.5"
files = [
{file = "sphinxcontrib-qthelp-1.0.3.tar.gz", hash = "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72"},
{file = "sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"},
]
[package.extras]
lint = ["docutils-stubs", "flake8", "mypy"]
test = ["pytest"]
[[package]]
name = "sphinxcontrib-serializinghtml"
version = "1.1.5"
description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)."
category = "dev"
optional = false
python-versions = ">=3.5"
files = [
{file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"},
{file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"},
]
[package.extras]
lint = ["docutils-stubs", "flake8", "mypy"]
test = ["pytest"]
[[package]]
name = "sqlalchemy"
version = "1.4.45"
@ -3759,7 +4291,7 @@ greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\" and (platfo
[package.extras]
aiomysql = ["aiomysql", "greenlet (!=0.4.17)"]
aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"]
aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing-extensions (!=3.10.0.1)"]
asyncio = ["greenlet (!=0.4.17)"]
asyncmy = ["asyncmy (>=0.2.3,!=0.2.4)", "greenlet (!=0.4.17)"]
mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2)"]
@ -3769,14 +4301,14 @@ mssql-pyodbc = ["pyodbc"]
mypy = ["mypy (>=0.910)", "sqlalchemy2-stubs"]
mysql = ["mysqlclient (>=1.4.0)", "mysqlclient (>=1.4.0,<2)"]
mysql-connector = ["mysql-connector-python"]
oracle = ["cx_oracle (>=7)", "cx_oracle (>=7,<8)"]
oracle = ["cx-oracle (>=7)", "cx-oracle (>=7,<8)"]
postgresql = ["psycopg2 (>=2.7)"]
postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"]
postgresql-pg8000 = ["pg8000 (>=1.16.6,!=1.29.0)"]
postgresql-psycopg2binary = ["psycopg2-binary"]
postgresql-psycopg2cffi = ["psycopg2cffi"]
pymysql = ["pymysql", "pymysql (<1)"]
sqlcipher = ["sqlcipher3_binary"]
sqlcipher = ["sqlcipher3-binary"]
[[package]]
name = "sqlitedict"
@ -3850,6 +4382,21 @@ pure-eval = "*"
[package.extras]
tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"]
[[package]]
name = "tabulate"
version = "0.9.0"
description = "Pretty-print tabular data"
category = "dev"
optional = false
python-versions = ">=3.7"
files = [
{file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"},
{file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"},
]
[package.extras]
widechars = ["wcwidth"]
[[package]]
name = "terminado"
version = "0.17.1"
@ -4047,6 +4594,18 @@ dev = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"]
docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"]
testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"]
[[package]]
name = "toml"
version = "0.10.2"
description = "Python Library for Tom's Obvious, Minimal Language"
category = "dev"
optional = false
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
files = [
{file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"},
{file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"},
]
[[package]]
name = "tomli"
version = "2.0.1"
@ -4585,4 +5144,4 @@ llms = ["manifest-ml", "torch", "transformers"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.8.1,<4.0"
content-hash = "e74acf12a1ee1db03d9ab506880937a0843f057f0478e06c54562379ce651088"
content-hash = "e6d702a8244b8646db76252fedde20b938ebc33214ac8d912681b190e769be0f"

@ -30,6 +30,19 @@ pinecone-client = {version = "^2", optional = true}
weaviate-client = {version = "^3", optional = true}
google-api-python-client = {version = "2.70.0", optional = true}
[tool.poetry.group.docs.dependencies]
autodoc_pydantic = "^1.8.0"
myst_parser = "^0.18.1"
nbsphinx = "^0.8.9"
sphinx = "^4.5.0"
sphinx-autobuild = "^2021.3.14"
sphinx_book_theme = "^0.3.3"
sphinx_rtd_theme = "^1.0.0"
sphinx-typlog-theme = "^0.8.0"
sphinx-panels = "^0.6.0"
toml = "^0.10.2"
myst-nb = "^0.17.1"
linkchecker = "^10.2.1"
[tool.poetry.group.test.dependencies]
pytest = "^7.2.0"

Loading…
Cancel
Save