docs: Google Cloud Documentation Cleanup (#12224)

- Move Document AI provider to the Google provider page
- Change Vertex AI Matching Engine to Vector Search
- Change references from GCP to Google Cloud
- Add Gmail chat loader to Google provider page
- Change Serper page title to "Serper - Google Search API" since it is
not a Google product.
pull/12238/head
Holt Skinner 9 months ago committed by GitHub
parent 286a29a49e
commit e7e670805c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -5,7 +5,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# GCP Vertex AI \n", "# Google Cloud Vertex AI \n",
"\n", "\n",
"Note: This is separate from the Google PaLM integration. Google has chosen to offer an enterprise version of PaLM through GCP, and this supports the models made available through there. \n", "Note: This is separate from the Google PaLM integration. Google has chosen to offer an enterprise version of PaLM through GCP, and this supports the models made available through there. \n",
"\n", "\n",
@ -31,7 +31,7 @@
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"#!pip install langchain google-cloud-aiplatform" "#!pip install langchain google-cloud-aiplatform\n"
] ]
}, },
{ {
@ -41,7 +41,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"from langchain.chat_models import ChatVertexAI\n", "from langchain.chat_models import ChatVertexAI\n",
"from langchain.prompts import ChatPromptTemplate" "from langchain.prompts import ChatPromptTemplate\n"
] ]
}, },
{ {
@ -50,7 +50,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"chat = ChatVertexAI()" "chat = ChatVertexAI()\n"
] ]
}, },
{ {
@ -64,7 +64,7 @@
"prompt = ChatPromptTemplate.from_messages(\n", "prompt = ChatPromptTemplate.from_messages(\n",
" [(\"system\", system), (\"human\", human)]\n", " [(\"system\", system), (\"human\", human)]\n",
")\n", ")\n",
"messages = prompt.format_messages()" "messages = prompt.format_messages()\n"
] ]
}, },
{ {
@ -84,7 +84,7 @@
} }
], ],
"source": [ "source": [
"chat(messages)" "chat(messages)\n"
] ]
}, },
{ {
@ -104,7 +104,7 @@
"human = \"{text}\"\n", "human = \"{text}\"\n",
"prompt = ChatPromptTemplate.from_messages(\n", "prompt = ChatPromptTemplate.from_messages(\n",
" [(\"system\", system), (\"human\", human)]\n", " [(\"system\", system), (\"human\", human)]\n",
")" ")\n"
] ]
}, },
{ {
@ -127,7 +127,7 @@
"chain = prompt | chat\n", "chain = prompt | chat\n",
"chain.invoke(\n", "chain.invoke(\n",
" {\"input_language\": \"English\", \"output_language\": \"Japanese\", \"text\": \"I love programming\"}\n", " {\"input_language\": \"English\", \"output_language\": \"Japanese\", \"text\": \"I love programming\"}\n",
")" ")\n"
] ]
}, },
{ {
@ -161,7 +161,7 @@
" model_name=\"codechat-bison\",\n", " model_name=\"codechat-bison\",\n",
" max_output_tokens=1000,\n", " max_output_tokens=1000,\n",
" temperature=0.5\n", " temperature=0.5\n",
")" ")\n"
] ]
}, },
{ {
@ -189,7 +189,7 @@
], ],
"source": [ "source": [
"# For simple string in string out usage, we can use the `predict` method:\n", "# For simple string in string out usage, we can use the `predict` method:\n",
"print(chat.predict(\"Write a Python function to identify all prime numbers\"))" "print(chat.predict(\"Write a Python function to identify all prime numbers\"))\n"
] ]
}, },
{ {
@ -209,7 +209,7 @@
"source": [ "source": [
"import asyncio\n", "import asyncio\n",
"# import nest_asyncio\n", "# import nest_asyncio\n",
"# nest_asyncio.apply()" "# nest_asyncio.apply()\n"
] ]
}, },
{ {
@ -237,7 +237,7 @@
" top_k=40,\n", " top_k=40,\n",
")\n", ")\n",
"\n", "\n",
"asyncio.run(chat.agenerate([messages]))" "asyncio.run(chat.agenerate([messages]))\n"
] ]
}, },
{ {
@ -257,7 +257,7 @@
} }
], ],
"source": [ "source": [
"asyncio.run(chain.ainvoke({\"input_language\": \"English\", \"output_language\": \"Sanskrit\", \"text\": \"I love programming\"}))" "asyncio.run(chain.ainvoke({\"input_language\": \"English\", \"output_language\": \"Sanskrit\", \"text\": \"I love programming\"}))\n"
] ]
}, },
{ {
@ -275,7 +275,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import sys" "import sys\n"
] ]
}, },
{ {
@ -310,7 +310,7 @@
"messages = prompt.format_messages()\n", "messages = prompt.format_messages()\n",
"for chunk in chat.stream(messages):\n", "for chunk in chat.stream(messages):\n",
" sys.stdout.write(chunk.content)\n", " sys.stdout.write(chunk.content)\n",
" sys.stdout.flush()" " sys.stdout.flush()\n"
] ]
} }
], ],

@ -4,7 +4,7 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# GCP Vertex AI\n", "# Google Cloud Vertex AI\n",
"\n", "\n",
"**Note:** This is separate from the `Google PaLM` integration, it exposes [Vertex AI PaLM API](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/overview) on `Google Cloud`. \n" "**Note:** This is separate from the `Google PaLM` integration, it exposes [Vertex AI PaLM API](https://cloud.google.com/vertex-ai/docs/generative-ai/learn/overview) on `Google Cloud`. \n"
] ]
@ -41,7 +41,7 @@
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"#!pip install langchain google-cloud-aiplatform" "#!pip install langchain google-cloud-aiplatform\n"
] ]
}, },
{ {
@ -50,7 +50,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from langchain.llms import VertexAI" "from langchain.llms import VertexAI\n"
] ]
}, },
{ {
@ -74,7 +74,7 @@
], ],
"source": [ "source": [
"llm = VertexAI()\n", "llm = VertexAI()\n",
"print(llm(\"What are some of the pros and cons of Python as a programming language?\"))" "print(llm(\"What are some of the pros and cons of Python as a programming language?\"))\n"
] ]
}, },
{ {
@ -90,7 +90,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from langchain.prompts import PromptTemplate" "from langchain.prompts import PromptTemplate\n"
] ]
}, },
{ {
@ -102,7 +102,7 @@
"template = \"\"\"Question: {question}\n", "template = \"\"\"Question: {question}\n",
"\n", "\n",
"Answer: Let's think step by step.\"\"\"\n", "Answer: Let's think step by step.\"\"\"\n",
"prompt = PromptTemplate.from_template(template)" "prompt = PromptTemplate.from_template(template)\n"
] ]
}, },
{ {
@ -111,7 +111,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"chain = prompt | llm" "chain = prompt | llm\n"
] ]
}, },
{ {
@ -130,7 +130,7 @@
], ],
"source": [ "source": [
"question = \"Who was the president in the year Justin Beiber was born?\"\n", "question = \"Who was the president in the year Justin Beiber was born?\"\n",
"print(chain.invoke({\"question\": question}))" "print(chain.invoke({\"question\": question}))\n"
] ]
}, },
{ {
@ -159,7 +159,7 @@
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"llm = VertexAI(model_name=\"code-bison\", max_output_tokens=1000, temperature=0.3)" "llm = VertexAI(model_name=\"code-bison\", max_output_tokens=1000, temperature=0.3)\n"
] ]
}, },
{ {
@ -168,7 +168,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"question = \"Write a python function that checks if a string is a valid email address\"" "question = \"Write a python function that checks if a string is a valid email address\"\n"
] ]
}, },
{ {
@ -193,7 +193,7 @@
} }
], ],
"source": [ "source": [
"print(llm(question))" "print(llm(question))\n"
] ]
}, },
{ {
@ -223,7 +223,7 @@
], ],
"source": [ "source": [
"result = llm.generate([question])\n", "result = llm.generate([question])\n",
"result.generations" "result.generations\n"
] ]
}, },
{ {
@ -243,7 +243,7 @@
"source": [ "source": [
"# If running in a Jupyter notebook you'll need to install nest_asyncio\n", "# If running in a Jupyter notebook you'll need to install nest_asyncio\n",
"\n", "\n",
"# !pip install nest_asyncio" "# !pip install nest_asyncio\n"
] ]
}, },
{ {
@ -254,7 +254,7 @@
"source": [ "source": [
"import asyncio\n", "import asyncio\n",
"# import nest_asyncio\n", "# import nest_asyncio\n",
"# nest_asyncio.apply()" "# nest_asyncio.apply()\n"
] ]
}, },
{ {
@ -274,7 +274,7 @@
} }
], ],
"source": [ "source": [
"asyncio.run(llm.agenerate([question]))" "asyncio.run(llm.agenerate([question]))\n"
] ]
}, },
{ {
@ -292,7 +292,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import sys" "import sys\n"
] ]
}, },
{ {
@ -337,7 +337,7 @@
"source": [ "source": [
"for chunk in llm.stream(question):\n", "for chunk in llm.stream(question):\n",
" sys.stdout.write(chunk)\n", " sys.stdout.write(chunk)\n",
" sys.stdout.flush()" " sys.stdout.flush()\n"
] ]
}, },
{ {
@ -360,7 +360,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from langchain.llms import VertexAIModelGarden" "from langchain.llms import VertexAIModelGarden\n"
] ]
}, },
{ {
@ -372,7 +372,7 @@
"llm = VertexAIModelGarden(\n", "llm = VertexAIModelGarden(\n",
" project=\"YOUR PROJECT\",\n", " project=\"YOUR PROJECT\",\n",
" endpoint_id=\"YOUR ENDPOINT_ID\"\n", " endpoint_id=\"YOUR ENDPOINT_ID\"\n",
")" ")\n"
] ]
}, },
{ {
@ -381,7 +381,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"print(llm(\"What is the meaning of life?\"))" "print(llm(\"What is the meaning of life?\"))\n"
] ]
}, },
{ {
@ -397,7 +397,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"prompt = PromptTemplate.from_template(\"What is the meaning of {thing}?\")" "prompt = PromptTemplate.from_template(\"What is the meaning of {thing}?\")\n"
] ]
}, },
{ {
@ -407,7 +407,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"chian = prompt | llm\n", "chian = prompt | llm\n",
"print(chain.invoke({\"thing\": \"life\"}))" "print(chain.invoke({\"thing\": \"life\"}))\n"
] ]
} }
], ],

@ -30,7 +30,6 @@ Access PaLM chat models like `chat-bison` and `codechat-bison` via Google Cloud.
from langchain.chat_models import ChatVertexAI from langchain.chat_models import ChatVertexAI
``` ```
## Document Loader ## Document Loader
### Google BigQuery ### Google BigQuery
@ -51,7 +50,7 @@ from langchain.document_loaders import BigQueryLoader
### Google Cloud Storage ### Google Cloud Storage
>[Google Cloud Storage](https://en.wikipedia.org/wiki/Google_Cloud_Storage) is a managed service for storing unstructured data. > [Google Cloud Storage](https://en.wikipedia.org/wiki/Google_Cloud_Storage) is a managed service for storing unstructured data.
First, we need to install `google-cloud-storage` python package. First, we need to install `google-cloud-storage` python package.
@ -74,11 +73,11 @@ from langchain.document_loaders import GCSFileLoader
### Google Drive ### Google Drive
>[Google Drive](https://en.wikipedia.org/wiki/Google_Drive) is a file storage and synchronization service developed by Google. > [Google Drive](https://en.wikipedia.org/wiki/Google_Drive) is a file storage and synchronization service developed by Google.
Currently, only `Google Docs` are supported. Currently, only `Google Docs` are supported.
First, we need to install several python package. First, we need to install several python packages.
```bash ```bash
pip install google-api-python-client google-auth-httplib2 google-auth-oauthlib pip install google-api-python-client google-auth-httplib2 google-auth-oauthlib
@ -91,10 +90,11 @@ from langchain.document_loaders import GoogleDriveLoader
``` ```
## Vector Store ## Vector Store
### Google Vertex AI MatchingEngine ### Google Vertex AI Vector Search
> [Google Vertex AI Matching Engine](https://cloud.google.com/vertex-ai/docs/matching-engine/overview) provides > [Google Vertex AI Vector Search](https://cloud.google.com/vertex-ai/docs/matching-engine/overview),
> the industry's leading high-scale low latency vector database. These vector databases are commonly > formerly known as Vertex AI Matching Engine, provides the industry's leading high-scale
> low latency vector database. These vector databases are commonly
> referred to as vector similarity-matching or an approximate nearest neighbor (ANN) service. > referred to as vector similarity-matching or an approximate nearest neighbor (ANN) service.
We need to install several python packages. We need to install several python packages.
@ -181,14 +181,28 @@ There exists a `GoogleSearchAPIWrapper` utility which wraps this API. To import
```python ```python
from langchain.utilities import GoogleSearchAPIWrapper from langchain.utilities import GoogleSearchAPIWrapper
``` ```
For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/google_search.html). For a more detailed walkthrough of this wrapper, see [this notebook](/docs/integrations/tools/google_search.html).
We can easily load this wrapper as a Tool (to use with an Agent). We can do this with: We can easily load this wrapper as a Tool (to use with an Agent). We can do this with:
```python ```python
from langchain.agents import load_tools from langchain.agents import load_tools
tools = load_tools(["google-search"]) tools = load_tools(["google-search"])
``` ```
### Google Places
See a [usage example](/docs/integrations/tools/google_places).
```
pip install googlemaps
```
```python
from langchain.tools import GooglePlacesTool
```
## Document Transformer ## Document Transformer
### Google Document AI ### Google Document AI
@ -216,3 +230,40 @@ See a [usage example](/docs/integrations/document_transformers/docai).
from langchain.document_loaders.blob_loaders import Blob from langchain.document_loaders.blob_loaders import Blob
from langchain.document_loaders.parsers import DocAIParser from langchain.document_loaders.parsers import DocAIParser
``` ```
## Chat loaders
### Gmail
> [Gmail](https://en.wikipedia.org/wiki/Gmail) is a free email service provided by Google.
First, we need to install several python packages.
```bash
pip install --upgrade google-auth google-auth-oauthlib google-auth-httplib2 google-api-python-client
```
See a [usage example and authorizing instructions](/docs/integrations/chat_loaders/gmail).
```python
from langchain.chat_loaders.gmail import GMailLoader
```
## Agents and Toolkits
### Gmail
See a [usage example and authorizing instructions](/docs/integrations/toolkits/gmail).
```python
from langchain.agents.agent_toolkits import GmailToolkit
toolkit = GmailToolkit()
```
### Google Drive
See a [usage example and authorizing instructions](/docs/integrations/toolkits/google_drive).
```python
from langchain_googledrive.utilities.google_drive import GoogleDriveAPIWrapper
from langchain_googledrive.tools.google_drive.tool import GoogleDriveSearchTool
```

@ -1,28 +0,0 @@
# Google Document AI
>[Document AI](https://cloud.google.com/document-ai/docs/overview) is a `Google Cloud Platform`
> service to transform unstructured data from documents into structured data, making it easier
> to understand, analyze, and consume.
## Installation and Setup
You need to set up a [`GCS` bucket and create your own OCR processor](https://cloud.google.com/document-ai/docs/create-processor)
The `GCS_OUTPUT_PATH` should be a path to a folder on GCS (starting with `gs://`)
and a processor name should look like `projects/PROJECT_NUMBER/locations/LOCATION/processors/PROCESSOR_ID`.
You can get it either programmatically or copy from the `Prediction endpoint` section of the `Processor details`
tab in the Google Cloud Console.
```bash
pip install google-cloud-documentai
pip install google-cloud-documentai-toolbox
```
## Document Transformer
See a [usage example](/docs/integrations/document_transformers/docai).
```python
from langchain.document_loaders.blob_loaders import Blob
from langchain.document_loaders.parsers import DocAIParser
```

@ -1,4 +1,4 @@
# Google Serper # Serper - Google Search API
This page covers how to use the [Serper](https://serper.dev) Google Search API within LangChain. Serper is a low-cost Google Search API that can be used to add answer box, knowledge graph, and organic results data from Google Search. This page covers how to use the [Serper](https://serper.dev) Google Search API within LangChain. Serper is a low-cost Google Search API that can be used to add answer box, knowledge graph, and organic results data from Google Search.
It is broken into two parts: setup, and then references to the specific Google Serper wrapper. It is broken into two parts: setup, and then references to the specific Google Serper wrapper.

@ -5,11 +5,11 @@
"id": "655b8f55-2089-4733-8b09-35dea9580695", "id": "655b8f55-2089-4733-8b09-35dea9580695",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Google Vertex AI MatchingEngine\n", "# Google Vertex AI Vector Search\n",
"\n", "\n",
"This notebook shows how to use functionality related to the `GCP Vertex AI MatchingEngine` vector database.\n", "This notebook shows how to use functionality related to the `Google Cloud Vertex AI Vector Search` vector database.\n",
"\n", "\n",
"> Vertex AI [Matching Engine](https://cloud.google.com/vertex-ai/docs/matching-engine/overview) provides the industry's leading high-scale low latency vector database. These vector databases are commonly referred to as vector similarity-matching or an approximate nearest neighbor (ANN) service.\n", "> [Google Vertex AI Vector Search](https://cloud.google.com/vertex-ai/docs/matching-engine/overview), formerly known as Vertex AI Matching Engine, provides the industry's leading high-scale low latency vector database. These vector databases are commonly referred to as vector similarity-matching or an approximate nearest neighbor (ANN) service.\n",
"\n", "\n",
"**Note**: This module expects an endpoint and deployed index already created as the creation time takes close to one hour. To see how to create an index refer to the section [Create Index and deploy it to an Endpoint](#create-index-and-deploy-it-to-an-endpoint)" "**Note**: This module expects an endpoint and deployed index already created as the creation time takes close to one hour. To see how to create an index refer to the section [Create Index and deploy it to an Endpoint](#create-index-and-deploy-it-to-an-endpoint)"
] ]
@ -29,7 +29,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"from langchain.vectorstores import MatchingEngine" "from langchain.vectorstores import MatchingEngine\n"
] ]
}, },
{ {
@ -61,7 +61,7 @@
"\n", "\n",
"vector_store.add_texts(texts=texts)\n", "vector_store.add_texts(texts=texts)\n",
"\n", "\n",
"vector_store.similarity_search(\"lunch\", k=2)" "vector_store.similarity_search(\"lunch\", k=2)\n"
] ]
}, },
{ {
@ -93,7 +93,7 @@
"!pip install tensorflow \\\n", "!pip install tensorflow \\\n",
" google-cloud-aiplatform \\\n", " google-cloud-aiplatform \\\n",
" tensorflow-hub \\\n", " tensorflow-hub \\\n",
" tensorflow-text " " tensorflow-text \n"
] ]
}, },
{ {
@ -108,7 +108,7 @@
"\n", "\n",
"from google.cloud import aiplatform\n", "from google.cloud import aiplatform\n",
"import tensorflow_hub as hub\n", "import tensorflow_hub as hub\n",
"import tensorflow_text" "import tensorflow_text\n"
] ]
}, },
{ {
@ -137,7 +137,7 @@
"VPC_NETWORK_FULL = f\"projects/{PROJECT_NUMBER}/global/networks/{VPC_NETWORK}\"\n", "VPC_NETWORK_FULL = f\"projects/{PROJECT_NUMBER}/global/networks/{VPC_NETWORK}\"\n",
"\n", "\n",
"# Change this if you need the VPC to be created.\n", "# Change this if you need the VPC to be created.\n",
"CREATE_VPC = False" "CREATE_VPC = False\n"
] ]
}, },
{ {
@ -148,7 +148,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# Set the project id\n", "# Set the project id\n",
"! gcloud config set project {PROJECT_ID}" "! gcloud config set project {PROJECT_ID}\n"
] ]
}, },
{ {
@ -177,7 +177,7 @@
"\n", "\n",
" # Set up peering with service networking\n", " # Set up peering with service networking\n",
" # Your account must have the \"Compute Network Admin\" role to run the following.\n", " # Your account must have the \"Compute Network Admin\" role to run the following.\n",
" ! gcloud services vpc-peerings connect --service=servicenetworking.googleapis.com --network={VPC_NETWORK} --ranges={PEERING_RANGE_NAME} --project={PROJECT_ID}" " ! gcloud services vpc-peerings connect --service=servicenetworking.googleapis.com --network={VPC_NETWORK} --ranges={PEERING_RANGE_NAME} --project={PROJECT_ID}\n"
] ]
}, },
{ {
@ -188,7 +188,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# Creating bucket.\n", "# Creating bucket.\n",
"! gsutil mb -l $REGION -p $PROJECT_ID $BUCKET_URI" "! gsutil mb -l $REGION -p $PROJECT_ID $BUCKET_URI\n"
] ]
}, },
{ {
@ -208,7 +208,7 @@
"source": [ "source": [
"# Load the Universal Sentence Encoder module\n", "# Load the Universal Sentence Encoder module\n",
"module_url = \"https://tfhub.dev/google/universal-sentence-encoder-multilingual/3\"\n", "module_url = \"https://tfhub.dev/google/universal-sentence-encoder-multilingual/3\"\n",
"model = hub.load(module_url)" "model = hub.load(module_url)\n"
] ]
}, },
{ {
@ -219,7 +219,7 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"# Generate embeddings for each word\n", "# Generate embeddings for each word\n",
"embeddings = model([\"banana\"])" "embeddings = model([\"banana\"])\n"
] ]
}, },
{ {
@ -245,7 +245,7 @@
"with open(\"data.json\", \"w\") as f:\n", "with open(\"data.json\", \"w\") as f:\n",
" json.dump(initial_config, f)\n", " json.dump(initial_config, f)\n",
"\n", "\n",
"!gsutil cp data.json {EMBEDDING_DIR}/file.json" "!gsutil cp data.json {EMBEDDING_DIR}/file.json\n"
] ]
}, },
{ {
@ -255,7 +255,7 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"aiplatform.init(project=PROJECT_ID, location=REGION, staging_bucket=BUCKET_URI)" "aiplatform.init(project=PROJECT_ID, location=REGION, staging_bucket=BUCKET_URI)\n"
] ]
}, },
{ {
@ -279,7 +279,7 @@
" dimensions=DIMENSIONS,\n", " dimensions=DIMENSIONS,\n",
" approximate_neighbors_count=150,\n", " approximate_neighbors_count=150,\n",
" distance_measure_type=\"DOT_PRODUCT_DISTANCE\",\n", " distance_measure_type=\"DOT_PRODUCT_DISTANCE\",\n",
")" ")\n"
] ]
}, },
{ {
@ -300,7 +300,7 @@
"my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint.create(\n", "my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint.create(\n",
" display_name=f\"{DISPLAY_NAME}-endpoint\",\n", " display_name=f\"{DISPLAY_NAME}-endpoint\",\n",
" network=VPC_NETWORK_FULL,\n", " network=VPC_NETWORK_FULL,\n",
")" ")\n"
] ]
}, },
{ {
@ -322,7 +322,7 @@
" index=my_index, deployed_index_id=DEPLOYED_INDEX_ID\n", " index=my_index, deployed_index_id=DEPLOYED_INDEX_ID\n",
")\n", ")\n",
"\n", "\n",
"my_index_endpoint.deployed_indexes" "my_index_endpoint.deployed_indexes\n"
] ]
} }
], ],

@ -3850,6 +3850,10 @@
{ {
"source": "/docs/integrations/retrievers/google_cloud_enterprise_search", "source": "/docs/integrations/retrievers/google_cloud_enterprise_search",
"destination": "/docs/integrations/retrievers/google_vertex_ai_search" "destination": "/docs/integrations/retrievers/google_vertex_ai_search"
},
{
"source": "/docs/integrations/providers/google_document_ai",
"destination": "/docs/integrations/platforms/google#google-document-ai"
} }
] ]
} }

Loading…
Cancel
Save