From 5d2d80a9a8e1c5d948fd94f67a28cd165d625ab3 Mon Sep 17 00:00:00 2001 From: BeatrixCohere <128378696+BeatrixCohere@users.noreply.github.com> Date: Mon, 26 Feb 2024 02:10:09 +0000 Subject: [PATCH] docs: Add Cohere examples in documentation (#17794) - Description: Add cohere examples to documentation - Issue:N/A - Dependencies: N/A --------- Co-authored-by: Bagatur --- docs/docs/get_started/quickstart.mdx | 54 ++++++++++++++++--- .../data_connection/text_embedding/index.mdx | 38 +++++++++++++ docs/docs/modules/model_io/quick_start.mdx | 33 +++++++++++- 3 files changed, 118 insertions(+), 7 deletions(-) diff --git a/docs/docs/get_started/quickstart.mdx b/docs/docs/get_started/quickstart.mdx index 86a6d05191..37da0128ae 100644 --- a/docs/docs/get_started/quickstart.mdx +++ b/docs/docs/get_started/quickstart.mdx @@ -65,10 +65,10 @@ We will link to relevant docs. ## LLM Chain -For this getting started guide, we will provide two options: using OpenAI (a popular model available via API) or using a local open source model. +We'll show how to use models available via API, like OpenAI and Cohere, and local open source models, using integrations like Ollama. - + First we'll need to import the LangChain x OpenAI integration package. @@ -99,7 +99,7 @@ llm = ChatOpenAI(openai_api_key="...") ``` - + [Ollama](https://ollama.ai/) allows you to run open-source large language models, such as Llama 2, locally. @@ -112,6 +112,37 @@ Then, make sure the Ollama server is running. After that, you can do: ```python from langchain_community.llms import Ollama llm = Ollama(model="llama2") +``` + + + + +First we'll need to import the Cohere SDK package. + +```shell +pip install cohere +``` + +Accessing the API requires an API key, which you can get by creating an account and heading [here](https://dashboard.cohere.com/api-keys). Once we have a key we'll want to set it as an environment variable by running: + +```shell +export COHERE_API_KEY="..." +``` + +We can then initialize the model: + +```python +from langchain_community.chat_models import ChatCohere + +llm = ChatCohere() +``` + +If you'd prefer not to set an environment variable you can pass the key in directly via the `cohere_api_key` named parameter when initiating the Cohere LLM class: + +```python +from langchain_community.chat_models import ChatCohere + +llm = ChatCohere(cohere_api_key="...") ``` @@ -200,10 +231,10 @@ docs = loader.load() Next, we need to index it into a vectorstore. This requires a few components, namely an [embedding model](/docs/modules/data_connection/text_embedding) and a [vectorstore](/docs/modules/data_connection/vectorstores). -For embedding models, we once again provide examples for accessing via OpenAI or via local models. +For embedding models, we once again provide examples for accessing via API or by running local models. - + Make sure you have the `langchain_openai` package installed an the appropriate environment variables set (these are the same as needed for the LLM). @@ -214,7 +245,7 @@ embeddings = OpenAIEmbeddings() ``` - + Make sure you have Ollama running (same set up as with the LLM). @@ -224,6 +255,17 @@ from langchain_community.embeddings import OllamaEmbeddings embeddings = OllamaEmbeddings() ``` + + +Make sure you have the `cohere` package installed an the appropriate environment variables set (these are the same as needed for the LLM). + +```python +from langchain_community.embeddings import CohereEmbeddings + +embeddings = CohereEmbeddings() +``` + + Now, we can use this embedding model to ingest documents into a vectorstore. diff --git a/docs/docs/modules/data_connection/text_embedding/index.mdx b/docs/docs/modules/data_connection/text_embedding/index.mdx index b014e01743..c7da825666 100644 --- a/docs/docs/modules/data_connection/text_embedding/index.mdx +++ b/docs/docs/modules/data_connection/text_embedding/index.mdx @@ -17,6 +17,11 @@ The base Embeddings class in LangChain provides two methods: one for embedding d ### Setup +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + To start we'll need to install the OpenAI partner package: ```bash @@ -44,6 +49,39 @@ from langchain_openai import OpenAIEmbeddings embeddings_model = OpenAIEmbeddings() ``` + + + +To start we'll need to install the Cohere SDK package: + +```bash +pip install cohere +``` + +Accessing the API requires an API key, which you can get by creating an account and heading [here](https://dashboard.cohere.com/api-keys). Once we have a key we'll want to set it as an environment variable by running: + +```shell +export COHERE_API_KEY="..." +``` + +If you'd prefer not to set an environment variable you can pass the key in directly via the `cohere_api_key` named parameter when initiating the Cohere LLM class: + +```python +from langchain_community.embeddings import CohereEmbeddings + +embeddings_model = CohereEmbeddings(cohere_api_key="...") +``` + +Otherwise you can initialize without any params: +```python +from langchain_community.embeddings import CohereEmbeddings + +embeddings_model = CohereEmbeddings() +``` + + + + ### `embed_documents` #### Embed list of texts diff --git a/docs/docs/modules/model_io/quick_start.mdx b/docs/docs/modules/model_io/quick_start.mdx index 62a78413d2..191117418a 100644 --- a/docs/docs/modules/model_io/quick_start.mdx +++ b/docs/docs/modules/model_io/quick_start.mdx @@ -46,7 +46,7 @@ llm = ChatOpenAI(openai_api_key="...") ``` - + [Ollama](https://ollama.ai/) allows you to run open-source large language models, such as Llama 2, locally. @@ -62,6 +62,37 @@ from langchain_community.chat_models import ChatOllama llm = Ollama(model="llama2") chat_model = ChatOllama() +``` + + + + +First we'll need to install their partner package: + +```shell +pip install cohere +``` + +Accessing the API requires an API key, which you can get by creating an account and heading [here](https://dashboard.cohere.com/api-keys). Once we have a key we'll want to set it as an environment variable by running: + +```shell +export COHERE_API_KEY="..." +``` + +We can then initialize the model: + +```python +from langchain_community.chat_models import ChatCohere + +llm = ChatCohere() +``` + +If you'd prefer not to set an environment variable you can pass the key in directly via the `cohere_api_key` named parameter when initiating the Cohere LLM class: + +```python +from langchain_community.chat_models import ChatCohere + +llm = ChatCohere(cohere_api_key="...") ```