diff --git a/docs/docs/integrations/llms/huggingface_pipelines.ipynb b/docs/docs/integrations/llms/huggingface_pipelines.ipynb index a377d9d008..2d74d44542 100644 --- a/docs/docs/integrations/llms/huggingface_pipelines.ipynb +++ b/docs/docs/integrations/llms/huggingface_pipelines.ipynb @@ -330,7 +330,7 @@ "id": "da9a9239", "metadata": {}, "source": [ - "For more information refer to [OpenVINO LLM guide](https://docs.openvino.ai/2024/openvino-workflow/generative-ai-models-guide.html)." + "For more information refer to [OpenVINO LLM guide](https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide.html) and [OpenVINO Local Pipelines notebook](./openvino.ipynb)." ] } ], diff --git a/docs/docs/integrations/llms/openvino.ipynb b/docs/docs/integrations/llms/openvino.ipynb index 1309d65b90..7c6f63e870 100644 --- a/docs/docs/integrations/llms/openvino.ipynb +++ b/docs/docs/integrations/llms/openvino.ipynb @@ -7,7 +7,7 @@ "source": [ "# OpenVINO Local Pipelines\n", "\n", - "[OpenVINO™](https://github.com/openvinotoolkit/openvino) is an open-source toolkit for optimizing and deploying AI inference. The OpenVINO™ Runtime can infer models on different hardware [devices](https://github.com/openvinotoolkit/openvino?tab=readme-ov-file#supported-hardware-matrix). It can help to boost deep learning performance in computer vision, automatic speech recognition, natural language processing and other common tasks.\n", + "[OpenVINO™](https://github.com/openvinotoolkit/openvino) is an open-source toolkit for optimizing and deploying AI inference. OpenVINO™ Runtime can enable running the same model optimized across various hardware [devices](https://github.com/openvinotoolkit/openvino?tab=readme-ov-file#supported-hardware-matrix). Accelerate your deep learning performance across use cases like: language + LLMs, computer vision, automatic speech recognition, and more.\n", "\n", "OpenVINO models can be run locally through the `HuggingFacePipeline` [class](https://python.langchain.com/docs/integrations/llms/huggingface_pipeline). To deploy a model with OpenVINO, you can specify the `backend=\"openvino\"` parameter to trigger OpenVINO as backend inference framework." ] @@ -73,7 +73,7 @@ "id": "00104b27-0c15-4a97-b198-4512337ee211", "metadata": {}, "source": [ - "They can also be loaded by passing in an existing `optimum-intel` pipeline directly" + "They can also be loaded by passing in an existing [`optimum-intel`](https://huggingface.co/docs/optimum/main/en/intel/inference) pipeline directly" ] }, { @@ -221,7 +221,15 @@ "id": "da9a9239", "metadata": {}, "source": [ - "For more information refer to [OpenVINO LLM guide](https://docs.openvino.ai/2024/openvino-workflow/generative-ai-models-guide.html)." + "For more information refer to:\n", + "\n", + "* [OpenVINO LLM guide](https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide.html).\n", + "\n", + "* [OpenVINO Documentation](https://docs.openvino.ai/2024/home.html).\n", + "\n", + "* [OpenVINO Get Started Guide](https://www.intel.com/content/www/us/en/content-details/819067/openvino-get-started-guide.html).\n", + " \n", + "* [RAG Notebook with LangChain](https://github.com/openvinotoolkit/openvino_notebooks/tree/master/notebooks/llm-chatbot)." ] } ],