From 114d64d4a7936de08ee2f1958c43203fb68f43a7 Mon Sep 17 00:00:00 2001 From: Leonid Ganeline Date: Tue, 5 Mar 2024 17:32:59 -0800 Subject: [PATCH] docs: `providers` update (#18527) Added missed pages. Added links and descriptions. Foratted to the consistent form. --- .../docs/integrations/callbacks/fiddler.ipynb | 8 ++--- docs/docs/integrations/providers/argilla.mdx | 26 +++++++---------- .../providers/comet_tracking.ipynb | 29 +++++++++++++++++-- .../docs/integrations/providers/confident.mdx | 20 ++++++++----- docs/docs/integrations/providers/fiddler.md | 27 +++++++++++++++++ 5 files changed, 80 insertions(+), 30 deletions(-) create mode 100644 docs/docs/integrations/providers/fiddler.md diff --git a/docs/docs/integrations/callbacks/fiddler.ipynb b/docs/docs/integrations/callbacks/fiddler.ipynb index 560e62c17c..55d246aa91 100644 --- a/docs/docs/integrations/callbacks/fiddler.ipynb +++ b/docs/docs/integrations/callbacks/fiddler.ipynb @@ -5,9 +5,9 @@ "id": "0cebf93b", "metadata": {}, "source": [ - "## Fiddler Langchain integration Quick Start Guide\n", + "# Fiddler\n", "\n", - "Fiddler is the pioneer in enterprise Generative and Predictive system ops, offering a unified platform that enables Data Science, MLOps, Risk, Compliance, Analytics, and other LOB teams to monitor, explain, analyze, and improve ML deployments at enterprise scale. " + ">[Fiddler](https://www.fiddler.ai/) is the pioneer in enterprise Generative and Predictive system ops, offering a unified platform that enables Data Science, MLOps, Risk, Compliance, Analytics, and other LOB teams to monitor, explain, analyze, and improve ML deployments at enterprise scale. " ] }, { @@ -25,7 +25,7 @@ "metadata": {}, "outputs": [], "source": [ - "# langchain langchain-community langchain-openai fiddler-client" + "#!pip install langchain langchain-community langchain-openai fiddler-client" ] }, { @@ -207,7 +207,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.0" + "version": "3.10.12" } }, "nbformat": 4, diff --git a/docs/docs/integrations/providers/argilla.mdx b/docs/docs/integrations/providers/argilla.mdx index 209a69647b..fc4232e0ec 100644 --- a/docs/docs/integrations/providers/argilla.mdx +++ b/docs/docs/integrations/providers/argilla.mdx @@ -1,29 +1,25 @@ # Argilla -![Argilla - Open-source data platform for LLMs](https://argilla.io/og.png) - ->[Argilla](https://argilla.io/) is an open-source data curation platform for LLMs. -> Using Argilla, everyone can build robust language models through faster data curation -> using both human and machine feedback. We provide support for each step in the MLOps cycle, -> from data labelling to model monitoring. +>[Argilla](https://argilla.io/) is an open-source data curation platform for LLMs. +> Using `Argilla`, everyone can build robust language models through faster data curation +> using both human and machine feedback. `Argilla` provides support for each step in the MLOps cycle, +> from data labeling to model monitoring. ## Installation and Setup -First, you'll need to install the `argilla` Python package as follows: +Get your [API key](https://platform.openai.com/account/api-keys). + +Install the Python package: ```bash -pip install argilla --upgrade +pip install argilla ``` -If you already have an Argilla Server running, then you're good to go; but if -you don't, follow the next steps to install it. - -If you don't you can refer to [Argilla - 🚀 Quickstart](https://docs.argilla.io/en/latest/getting_started/quickstart.html#Running-Argilla-Quickstart) to deploy Argilla either on HuggingFace Spaces, locally, or on a server. +## Callbacks -## Tracking - -See a [usage example of `ArgillaCallbackHandler`](/docs/integrations/callbacks/argilla). ```python from langchain.callbacks import ArgillaCallbackHandler ``` + +See an [example](/docs/integrations/callbacks/argilla). diff --git a/docs/docs/integrations/providers/comet_tracking.ipynb b/docs/docs/integrations/providers/comet_tracking.ipynb index df66bda746..c102b833d8 100644 --- a/docs/docs/integrations/providers/comet_tracking.ipynb +++ b/docs/docs/integrations/providers/comet_tracking.ipynb @@ -4,7 +4,10 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Comet" + "# Comet\n", + "\n", + ">[Comet](https://www.comet.com/) machine learning platform integrates with your existing infrastructure\n", + ">and tools so you can manage, visualize, and optimize models—from training runs to production monitoring" ] }, { @@ -318,6 +321,26 @@ "print(synopsis_chain.apply(test_prompts, callbacks=callbacks))\n", "comet_callback.flush_tracker(synopsis_chain, finish=True)" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Callback Tracer\n", + "\n", + "There is another integration with Comet:\n", + "\n", + "See an [example](/docs/integrations/callbacks/comet_tracing).\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.callbacks.tracers.comet import CometTracer" + ] } ], "metadata": { @@ -336,9 +359,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.3" + "version": "3.10.12" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/docs/docs/integrations/providers/confident.mdx b/docs/docs/integrations/providers/confident.mdx index 9823e0c624..51de573421 100644 --- a/docs/docs/integrations/providers/confident.mdx +++ b/docs/docs/integrations/providers/confident.mdx @@ -1,22 +1,26 @@ # Confident AI -![Confident - Unit Testing for LLMs](https://github.com/confident-ai/deepeval) - ->[DeepEval](https://confident-ai.com) package for unit testing LLMs. -> Using Confident, everyone can build robust language models through faster iterations -> using both unit testing and integration testing. We provide support for each step in the iteration +>[Confident AI](https://confident-ai.com) is a creator of the `DeepEval`. +> +>[DeepEval](https://github.com/confident-ai/deepeval) is a package for unit testing LLMs. +> Using `DeepEval`, everyone can build robust language models through faster iterations +> using both unit testing and integration testing. `DeepEval provides support for each step in the iteration > from synthetic data creation to testing. ## Installation and Setup -First, you'll need to install the `DeepEval` Python package as follows: +You need to get the [DeepEval API credentials](https://app.confident-ai.com). + +You need to install the `DeepEval` Python package: ```bash pip install deepeval ``` -Afterwards, you can get started in as little as a few lines of code. +## Callbacks + +See an [example](/docs/integrations/callbacks/confident). ```python -from langchain.callbacks import DeepEvalCallback +from langchain.callbacks.confident_callback import DeepEvalCallbackHandler ``` diff --git a/docs/docs/integrations/providers/fiddler.md b/docs/docs/integrations/providers/fiddler.md new file mode 100644 index 0000000000..87bffdff9c --- /dev/null +++ b/docs/docs/integrations/providers/fiddler.md @@ -0,0 +1,27 @@ +# Fiddler + +>[Fiddler](https://www.fiddler.ai/) provides a unified platform to monitor, explain, analyze, +> and improve ML deployments at an enterprise scale. + +## Installation and Setup + +Set up your model [with Fiddler](https://demo.fiddler.ai): + +* The URL you're using to connect to Fiddler +* Your organization ID +* Your authorization token + +Install the Python package: + +```bash +pip install fiddler-client +``` + +## Callbacks + + +```python +from langchain_community.callbacks.fiddler_callback import FiddlerCallbackHandler +``` + +See an [example](/docs/integrations/callbacks/fiddler).