docs: migrate integrations using langchain-cli (#21929)

Migrate integration docs
pull/21933/head
Eugene Yurtsev 4 months ago committed by GitHub
parent c98bd8505f
commit 8ed2ba9301
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -29,6 +29,7 @@
"import os\n",
"\n",
"import comet_llm\n",
"from langchain_openai import OpenAI\n",
"\n",
"os.environ[\"LANGCHAIN_COMET_TRACING\"] = \"true\"\n",
"\n",
@ -40,8 +41,7 @@
"# here we are configuring the comet project\n",
"os.environ[\"COMET_PROJECT_NAME\"] = \"comet-example-langchain-tracing\"\n",
"\n",
"from langchain.agents import AgentType, initialize_agent, load_tools\n",
"from langchain.llms import OpenAI"
"from langchain.agents import AgentType, initialize_agent, load_tools"
]
},
{

@ -114,10 +114,7 @@
"source": [
"import os\n",
"\n",
"from langchain.schema import (\n",
" HumanMessage,\n",
" SystemMessage,\n",
")\n",
"from langchain_core.messages import HumanMessage, SystemMessage\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"token = os.environ[\"CONTEXT_API_TOKEN\"]\n",

@ -94,9 +94,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema import (\n",
" HumanMessage,\n",
")\n",
"from langchain_core.messages import HumanMessage\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"chat_llm = ChatOpenAI(\n",

@ -127,7 +127,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.callbacks import get_openai_callback"
"from langchain_community.callbacks import get_openai_callback"
]
},
{

@ -161,8 +161,7 @@
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
},
"orig_nbformat": 4
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -86,7 +86,13 @@
{
"data": {
"text/plain": [
"AIMessage(content='为你找到关于coze的信息如下\n\nCoze是一个由字节跳动推出的AI聊天机器人和应用程序编辑开发平台。\n\n用户无论是否有编程经验都可以通过该平台快速创建各种类型的聊天机器人、智能体、AI应用和插件并将其部署在社交平台和即时聊天应用程序中。\n\n国际版使用的模型比国内版更强大。')"
"AIMessage(content='为你找到关于coze的信息如下\n",
"\n",
"Coze是一个由字节跳动推出的AI聊天机器人和应用程序编辑开发平台。\n",
"\n",
"用户无论是否有编程经验都可以通过该平台快速创建各种类型的聊天机器人、智能体、AI应用和插件并将其部署在社交平台和即时聊天应用程序中。\n",
"\n",
"国际版使用的模型比国内版更强大。')"
]
},
"execution_count": 3,
@ -173,8 +179,7 @@
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
},
"orig_nbformat": 4
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -67,7 +67,7 @@
},
"outputs": [],
"source": [
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler"
"from langchain_core.callbacks import StreamingStdOutCallbackHandler"
]
},
{

@ -133,8 +133,7 @@
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
},
"orig_nbformat": 4
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -126,8 +126,8 @@
}
],
"source": [
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"from langchain_community.chat_models import ChatEverlyAI\n",
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
"from langchain_core.messages import HumanMessage, SystemMessage\n",
"\n",
"messages = [\n",
@ -184,8 +184,8 @@
}
],
"source": [
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"from langchain_community.chat_models import ChatEverlyAI\n",
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
"from langchain_core.messages import HumanMessage, SystemMessage\n",
"\n",
"messages = [\n",

@ -143,8 +143,7 @@
},
"outputs": [],
"source": [
"from langchain.callbacks.manager import CallbackManager\n",
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler"
"from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler"
]
},
{

@ -94,8 +94,7 @@
},
"outputs": [],
"source": [
"from langchain.callbacks.manager import CallbackManager\n",
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler"
"from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler"
]
},
{

@ -122,8 +122,7 @@
},
"outputs": [],
"source": [
"from langchain.callbacks.manager import CallbackManager\n",
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler"
"from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler"
]
},
{

@ -173,8 +173,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import OnlinePDFLoader\n",
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"from langchain_community.document_loaders import OnlinePDFLoader\n",
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
"\n",
"# Loading the COMVEST 2024 notice\n",
"loader = OnlinePDFLoader(\n",
@ -202,7 +202,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.retrievers import BM25Retriever\n",
"from langchain_community.retrievers import BM25Retriever\n",
"\n",
"retriever = BM25Retriever.from_documents(texts)"
]

@ -71,8 +71,7 @@
"metadata": {
"language_info": {
"name": "python"
},
"orig_nbformat": 4
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -66,10 +66,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema import (\n",
" HumanMessage,\n",
")\n",
"from langchain_community.chat_models.mlx import ChatMLX\n",
"from langchain_core.messages import HumanMessage\n",
"\n",
"messages = [\n",
" HumanMessage(\n",

@ -78,8 +78,7 @@
"metadata": {
"language_info": {
"name": "python"
},
"orig_nbformat": 4
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -34,7 +34,9 @@
"outputs": [
{
"data": {
"text/plain": "AIMessage(content='Hello! How can I help you today?')"
"text/plain": [
"AIMessage(content='Hello! How can I help you today?')"
]
},
"execution_count": 10,
"metadata": {},

@ -2,17 +2,17 @@
"cells": [
{
"cell_type": "raw",
"source": [
"---\n",
"sidebar_label: Yuan2.0\n",
"---"
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%% raw\n"
}
}
},
"source": [
"---\n",
"sidebar_label: Yuan2.0\n",
"---"
]
},
{
"cell_type": "markdown",
@ -216,7 +216,7 @@
},
"outputs": [],
"source": [
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
"\n",
"chat = ChatYuan2(\n",
" yuan2_api_base=\"http://127.0.0.1:8001/v1\",\n",
@ -460,4 +460,4 @@
},
"nbformat": 4,
"nbformat_minor": 4
}
}

@ -199,28 +199,32 @@
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": false
},
"source": [
"### Using With Functions Call\n",
"\n",
"GLM-4 Model can be used with the function call as welluse the following code to run a simple LangChain json_chat_agent."
],
"metadata": {
"collapsed": false
}
]
},
{
"cell_type": "code",
"outputs": [],
"source": [
"os.environ[\"TAVILY_API_KEY\"] = \"tavily_api_key\""
],
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null
"outputs": [],
"source": [
"os.environ[\"TAVILY_API_KEY\"] = \"tavily_api_key\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"from langchain import hub\n",
@ -235,22 +239,18 @@
"agent_executor = AgentExecutor(\n",
" agent=agent, tools=tools, verbose=True, handle_parsing_errors=True\n",
")"
],
"metadata": {
"collapsed": false
},
"execution_count": null
]
},
{
"cell_type": "code",
"outputs": [],
"source": [
"agent_executor.invoke({\"input\": \"what is LangChain?\"})"
],
"execution_count": null,
"metadata": {
"collapsed": false
},
"execution_count": null
"outputs": [],
"source": [
"agent_executor.invoke({\"input\": \"what is LangChain?\"})"
]
}
],
"metadata": {

@ -135,8 +135,8 @@
"source": [
"from pprint import pprint\n",
"\n",
"from langchain.utils.openai_functions import convert_pydantic_to_openai_function\n",
"from langchain_core.pydantic_v1 import BaseModel\n",
"from langchain_core.utils.function_calling import convert_pydantic_to_openai_function\n",
"\n",
"openai_function_def = convert_pydantic_to_openai_function(Calculator)\n",
"pprint(openai_function_def)"
@ -149,7 +149,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.output_parsers.openai_functions import PydanticOutputFunctionsParser\n",
"from langchain_core.output_parsers.openai_functions import PydanticOutputFunctionsParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_openai import ChatOpenAI\n",
"\n",

@ -166,7 +166,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.docstore.document import Document\n",
"from langchain_core.documents import Document\n",
"\n",
"\n",
"def handle_record(record, id):\n",

@ -149,7 +149,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.docstore.document import Document\n",
"from langchain_core.documents import Document\n",
"\n",
"\n",
"def handle_record(record, id):\n",

@ -151,7 +151,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.docstore.document import Document\n",
"from langchain_core.documents import Document\n",
"\n",
"\n",
"def handle_record(record, id):\n",

@ -156,7 +156,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.docstore.document import Document\n",
"from langchain_core.documents import Document\n",
"\n",
"\n",
"def handle_record(record, id):\n",

@ -152,7 +152,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.docstore.document import Document\n",
"from langchain_core.documents import Document\n",
"\n",
"\n",
"def handle_record(record, id):\n",

@ -149,7 +149,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.docstore.document import Document\n",
"from langchain_core.documents import Document\n",
"\n",
"\n",
"def handle_record(record, id):\n",

@ -152,7 +152,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.docstore.document import Document\n",
"from langchain_core.documents import Document\n",
"\n",
"\n",
"def handle_record(record, id):\n",

@ -153,7 +153,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.docstore.document import Document\n",
"from langchain_core.documents import Document\n",
"\n",
"\n",
"def handle_record(record, id):\n",

@ -101,8 +101,8 @@
"outputs": [],
"source": [
"from langchain.indexes import VectorstoreIndexCreator\n",
"from langchain_community.docstore.document import Document\n",
"from langchain_community.document_loaders import ApifyDatasetLoader"
"from langchain_community.document_loaders import ApifyDatasetLoader\n",
"from langchain_core.documents import Document"
]
},
{

@ -208,8 +208,7 @@
"metadata": {
"language_info": {
"name": "python"
},
"orig_nbformat": 4
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -80,21 +80,21 @@
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"ExecuteTime": {
"end_time": "2024-01-08T12:41:22.643335Z",
"start_time": "2024-01-08T12:40:57.759116Z"
},
"collapsed": false
},
"outputs": [],
"source": [
"from getpass import getpass\n",
"\n",
"ASTRA_DB_API_ENDPOINT = input(\"ASTRA_DB_API_ENDPOINT = \")\n",
"ASTRA_DB_APPLICATION_TOKEN = getpass(\"ASTRA_DB_APPLICATION_TOKEN = \")"
],
"metadata": {
"collapsed": false,
"ExecuteTime": {
"end_time": "2024-01-08T12:41:22.643335Z",
"start_time": "2024-01-08T12:40:57.759116Z"
}
},
"execution_count": 4
]
},
{
"cell_type": "code",
@ -118,18 +118,18 @@
},
{
"cell_type": "code",
"outputs": [],
"source": [
"docs = loader.load()"
],
"execution_count": 7,
"metadata": {
"collapsed": false,
"ExecuteTime": {
"end_time": "2024-01-08T12:42:30.236489Z",
"start_time": "2024-01-08T12:42:29.612133Z"
}
},
"collapsed": false
},
"execution_count": 7
"outputs": [],
"source": [
"docs = loader.load()"
]
},
{
"cell_type": "code",
@ -143,7 +143,9 @@
"outputs": [
{
"data": {
"text/plain": "Document(page_content='{\"_id\": \"659bdffa16cbc4586b11a423\", \"title\": \"Dangerous Men\", \"reviewtext\": \"\\\\\"Dangerous Men,\\\\\" the picture\\'s production notes inform, took 26 years to reach the big screen. After having seen it, I wonder: What was the rush?\"}', metadata={'namespace': 'default_keyspace', 'api_endpoint': 'https://01234567-89ab-cdef-0123-456789abcdef-us-east1.apps.astra.datastax.com', 'collection': 'movie_reviews'})"
"text/plain": [
"Document(page_content='{\"_id\": \"659bdffa16cbc4586b11a423\", \"title\": \"Dangerous Men\", \"reviewtext\": \"\\\\\"Dangerous Men,\\\\\" the picture\\'s production notes inform, took 26 years to reach the big screen. After having seen it, I wonder: What was the rush?\"}', metadata={'namespace': 'default_keyspace', 'api_endpoint': 'https://01234567-89ab-cdef-0123-456789abcdef-us-east1.apps.astra.datastax.com', 'collection': 'movie_reviews'})"
]
},
"execution_count": 8,
"metadata": {},

@ -104,37 +104,37 @@
},
{
"cell_type": "markdown",
"id": "91a7ac07",
"metadata": {},
"source": [
"## Configuring the AWS Boto3 client\n",
"You can configure the AWS [Boto3](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html) client by passing\n",
"named arguments when creating the S3DirectoryLoader.\n",
"This is useful for instance when AWS credentials can't be set as environment variables.\n",
"See the [list of parameters](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html#boto3.session.Session) that can be configured."
],
"metadata": {},
"id": "91a7ac07"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f485ec8c",
"metadata": {},
"outputs": [],
"source": [
"loader = S3DirectoryLoader(\n",
" \"testing-hwc\", aws_access_key_id=\"xxxx\", aws_secret_access_key=\"yyyy\"\n",
")"
],
"metadata": {},
"id": "f485ec8c"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c0fa76ae",
"metadata": {},
"outputs": [],
"source": [
"loader.load()"
],
"metadata": {},
"id": "c0fa76ae"
]
}
],
"metadata": {
@ -158,4 +158,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

@ -80,24 +80,24 @@
{
"cell_type": "code",
"execution_count": null,
"id": "43106ee8",
"metadata": {},
"outputs": [],
"source": [
"loader = S3FileLoader(\n",
" \"testing-hwc\", \"fake.docx\", aws_access_key_id=\"xxxx\", aws_secret_access_key=\"yyyy\"\n",
")"
],
"metadata": {},
"id": "43106ee8"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1764a727",
"metadata": {},
"outputs": [],
"source": [
"loader.load()"
],
"metadata": {},
"id": "1764a727"
]
}
],
"metadata": {
@ -121,4 +121,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

@ -143,4 +143,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

@ -96,8 +96,7 @@
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
},
"orig_nbformat": 4
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -17,7 +17,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.docstore.document import Document"
"from langchain_core.documents import Document"
]
},
{

@ -145,8 +145,7 @@
"metadata": {
"language_info": {
"name": "python"
},
"orig_nbformat": 4
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -76,8 +76,7 @@
"metadata": {
"language_info": {
"name": "python"
},
"orig_nbformat": 4
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -1,362 +1,362 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "E_RJy7C1bpCT"
},
"source": [
"# Google Cloud SQL for PostgreSQL\n",
"\n",
"> [Cloud SQL for PostgreSQL](https://cloud.google.com/sql/docs/postgres) is a fully-managed database service that helps you set up, maintain, manage, and administer your PostgreSQL relational databases on Google Cloud Platform. Extend your database application to build AI-powered experiences leveraging Cloud SQL for PostgreSQL's Langchain integrations.\n",
"\n",
"This notebook goes over how to use `Cloud SQL for PostgreSQL` to load Documents with the `PostgresLoader` class.\n",
"\n",
"Learn more about the package on [GitHub](https://github.com/googleapis/langchain-google-cloud-sql-pg-python/).\n",
"\n",
"[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/googleapis/langchain-google-cloud-sql-pg-python/blob/main/docs/document_loader.ipynb)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "xjcxaw6--Xyy"
},
"source": [
"## Before you begin\n",
"\n",
"To run this notebook, you will need to do the following:\n",
"\n",
" * [Create a Google Cloud Project](https://developers.google.com/workspace/guides/create-project)\n",
" * [Enable the Cloud SQL Admin API.](https://console.cloud.google.com/marketplace/product/google/sqladmin.googleapis.com)\n",
" * [Create a Cloud SQL for PostgreSQL instance.](https://cloud.google.com/sql/docs/postgres/create-instance)\n",
" * [Create a Cloud SQL for PostgreSQL database.](https://cloud.google.com/sql/docs/postgres/create-manage-databases)\n",
" * [Add a User to the database.](https://cloud.google.com/sql/docs/postgres/create-manage-users)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "IR54BmgvdHT_"
},
"source": [
"### 🦜🔗 Library Installation\n",
"Install the integration library, `langchain_google_cloud_sql_pg`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 1000
},
"id": "0ZITIDE160OD",
"outputId": "90e0636e-ff34-4e1e-ad37-d2a6db4a317e"
},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain_google_cloud_sql_pg"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "v40bB_GMcr9f"
},
"source": [
"**Colab only:** Uncomment the following cell to restart the kernel or use the button to restart the kernel. For Vertex AI Workbench you can restart the terminal using the button on top."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "6o0iGVIdDD6K"
},
"outputs": [],
"source": [
"# # Automatically restart kernel after installs so that your environment can access the new packages\n",
"# import IPython\n",
"\n",
"# app = IPython.Application.instance()\n",
"# app.kernel.do_shutdown(True)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "cTXTbj4UltKf"
},
"source": [
"### 🔐 Authentication\n",
"Authenticate to Google Cloud as the IAM user logged into this notebook in order to access your Google Cloud Project.\n",
"\n",
"* If you are using Colab to run this notebook, use the cell below and continue.\n",
"* If you are using Vertex AI Workbench, check out the setup instructions [here](https://github.com/GoogleCloudPlatform/generative-ai/tree/main/setup-env)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from google.colab import auth\n",
"\n",
"auth.authenticate_user()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Uj02bMRAc9_c"
},
"source": [
"### ☁ Set Your Google Cloud Project\n",
"Set your Google Cloud project so that you can leverage Google Cloud resources within this notebook.\n",
"\n",
"If you don't know your project ID, try the following:\n",
"\n",
"* Run `gcloud config list`.\n",
"* Run `gcloud projects list`.\n",
"* See the support page: [Locate the project ID](https://support.google.com/googleapi/answer/7014113)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "wnp1R1PYc9_c",
"outputId": "6502c721-a2fd-451f-b946-9f7b850d5966"
},
"outputs": [],
"source": [
"# @title Project { display-mode: \"form\" }\n",
"PROJECT_ID = \"gcp_project_id\" # @param {type:\"string\"}\n",
"\n",
"# Set the project id\n",
"! gcloud config set project {PROJECT_ID}"
]
},
{
"cell_type": "markdown",
"id": "f8f2830ee9ca1e01",
"metadata": {
"id": "f8f2830ee9ca1e01"
},
"source": [
"## Basic Usage"
]
},
{
"cell_type": "markdown",
"id": "OMvzMWRrR6n7",
"metadata": {
"id": "OMvzMWRrR6n7"
},
"source": [
"### Set Cloud SQL database values\n",
"Find your database variables, in the [Cloud SQL Instances page](https://console.cloud.google.com/sql/instances)."
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "irl7eMFnSPZr",
"metadata": {
"id": "irl7eMFnSPZr"
},
"outputs": [],
"source": [
"# @title Set Your Values Here { display-mode: \"form\" }\n",
"REGION = \"us-central1\" # @param {type: \"string\"}\n",
"INSTANCE = \"my-primary\" # @param {type: \"string\"}\n",
"DATABASE = \"my-database\" # @param {type: \"string\"}\n",
"TABLE_NAME = \"vector_store\" # @param {type: \"string\"}"
]
},
{
"cell_type": "markdown",
"id": "QuQigs4UoFQ2",
"metadata": {
"id": "QuQigs4UoFQ2"
},
"source": [
"### Cloud SQL Engine\n",
"\n",
"One of the requirements and arguments to establish PostgreSQL as a document loader is a `PostgresEngine` object. The `PostgresEngine` configures a connection pool to your Cloud SQL for PostgreSQL database, enabling successful connections from your application and following industry best practices.\n",
"\n",
"To create a `PostgresEngine` using `PostgresEngine.from_instance()` you need to provide only 4 things:\n",
"\n",
"1. `project_id` : Project ID of the Google Cloud Project where the Cloud SQL instance is located.\n",
"1. `region` : Region where the Cloud SQL instance is located.\n",
"1. `instance` : The name of the Cloud SQL instance.\n",
"1. `database` : The name of the database to connect to on the Cloud SQL instance.\n",
"\n",
"By default, [IAM database authentication](https://cloud.google.com/sql/docs/postgres/iam-authentication) will be used as the method of database authentication. This library uses the IAM principal belonging to the [Application Default Credentials (ADC)](https://cloud.google.com/docs/authentication/application-default-credentials) sourced from the environment.\n",
"\n",
"Optionally, [built-in database authentication](https://cloud.google.com/sql/docs/postgres/users) using a username and password to access the Cloud SQL database can also be used. Just provide the optional `user` and `password` arguments to `PostgresEngine.from_instance()`:\n",
"\n",
"* `user` : Database user to use for built-in database authentication and login\n",
"* `password` : Database password to use for built-in database authentication and login.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Note**: This tutorial demonstrates the async interface. All async methods have corresponding sync methods."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain_google_cloud_sql_pg import PostgresEngine\n",
"\n",
"engine = await PostgresEngine.afrom_instance(\n",
" project_id=PROJECT_ID,\n",
" region=REGION,\n",
" instance=INSTANCE,\n",
" database=DATABASE,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "e1tl0aNx7SWy"
},
"source": [
"### Create PostgresLoader"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "z-AZyzAQ7bsf"
},
"outputs": [],
"source": [
"from langchain_google_cloud_sql_pg import PostgresLoader\n",
"\n",
"# Creating a basic PostgreSQL object\n",
"loader = await PostgresLoader.create(engine, table_name=TABLE_NAME)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "PeOMpftjc9_e"
},
"source": [
"### Load Documents via default table\n",
"The loader returns a list of Documents from the table using the first column as page_content and all other columns as metadata. The default table will have the first column as\n",
"page_content and the second column as metadata (JSON). Each row becomes a document. Please note that if you want your documents to have ids you will need to add them in."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "cwvi_O5Wc9_e"
},
"outputs": [],
"source": [
"from langchain_google_cloud_sql_pg import PostgresLoader\n",
"\n",
"# Creating a basic PostgresLoader object\n",
"loader = await PostgresLoader.create(engine, table_name=TABLE_NAME)\n",
"\n",
"docs = await loader.aload()\n",
"print(docs)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "kSkL9l1Hc9_e"
},
"source": [
"### Load documents via custom table/metadata or custom page content columns"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"loader = await PostgresLoader.create(\n",
" engine,\n",
" table_name=TABLE_NAME,\n",
" content_columns=[\"product_name\"], # Optional\n",
" metadata_columns=[\"id\"], # Optional\n",
")\n",
"docs = await loader.aload()\n",
"print(docs)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "5R6h0_Cvc9_f"
},
"source": [
"### Set page content format\n",
"The loader returns a list of Documents, with one document per row, with page content in specified string format, i.e. text (space separated concatenation), JSON, YAML, CSV, etc. JSON and YAML formats include headers, while text and CSV do not include field headers.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "NGNdS7cqc9_f"
},
"outputs": [],
"source": [
"loader = await PostgresLoader.create(\n",
" engine,\n",
" table_name=\"products\",\n",
" content_columns=[\"product_name\", \"description\"],\n",
" format=\"YAML\",\n",
")\n",
"docs = await loader.aload()\n",
"print(docs)"
]
}
],
"metadata": {
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "E_RJy7C1bpCT"
},
"source": [
"# Google Cloud SQL for PostgreSQL\n",
"\n",
"> [Cloud SQL for PostgreSQL](https://cloud.google.com/sql/docs/postgres) is a fully-managed database service that helps you set up, maintain, manage, and administer your PostgreSQL relational databases on Google Cloud Platform. Extend your database application to build AI-powered experiences leveraging Cloud SQL for PostgreSQL's Langchain integrations.\n",
"\n",
"This notebook goes over how to use `Cloud SQL for PostgreSQL` to load Documents with the `PostgresLoader` class.\n",
"\n",
"Learn more about the package on [GitHub](https://github.com/googleapis/langchain-google-cloud-sql-pg-python/).\n",
"\n",
"[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/googleapis/langchain-google-cloud-sql-pg-python/blob/main/docs/document_loader.ipynb)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "xjcxaw6--Xyy"
},
"source": [
"## Before you begin\n",
"\n",
"To run this notebook, you will need to do the following:\n",
"\n",
" * [Create a Google Cloud Project](https://developers.google.com/workspace/guides/create-project)\n",
" * [Enable the Cloud SQL Admin API.](https://console.cloud.google.com/marketplace/product/google/sqladmin.googleapis.com)\n",
" * [Create a Cloud SQL for PostgreSQL instance.](https://cloud.google.com/sql/docs/postgres/create-instance)\n",
" * [Create a Cloud SQL for PostgreSQL database.](https://cloud.google.com/sql/docs/postgres/create-manage-databases)\n",
" * [Add a User to the database.](https://cloud.google.com/sql/docs/postgres/create-manage-users)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "IR54BmgvdHT_"
},
"source": [
"### 🦜🔗 Library Installation\n",
"Install the integration library, `langchain_google_cloud_sql_pg`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"provenance": [],
"toc_visible": true
"base_uri": "https://localhost:8080/",
"height": 1000
},
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
"id": "0ZITIDE160OD",
"outputId": "90e0636e-ff34-4e1e-ad37-d2a6db4a317e"
},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain_google_cloud_sql_pg"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "v40bB_GMcr9f"
},
"source": [
"**Colab only:** Uncomment the following cell to restart the kernel or use the button to restart the kernel. For Vertex AI Workbench you can restart the terminal using the button on top."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "6o0iGVIdDD6K"
},
"outputs": [],
"source": [
"# # Automatically restart kernel after installs so that your environment can access the new packages\n",
"# import IPython\n",
"\n",
"# app = IPython.Application.instance()\n",
"# app.kernel.do_shutdown(True)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "cTXTbj4UltKf"
},
"source": [
"### 🔐 Authentication\n",
"Authenticate to Google Cloud as the IAM user logged into this notebook in order to access your Google Cloud Project.\n",
"\n",
"* If you are using Colab to run this notebook, use the cell below and continue.\n",
"* If you are using Vertex AI Workbench, check out the setup instructions [here](https://github.com/GoogleCloudPlatform/generative-ai/tree/main/setup-env)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from google.colab import auth\n",
"\n",
"auth.authenticate_user()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Uj02bMRAc9_c"
},
"source": [
"### ☁ Set Your Google Cloud Project\n",
"Set your Google Cloud project so that you can leverage Google Cloud resources within this notebook.\n",
"\n",
"If you don't know your project ID, try the following:\n",
"\n",
"* Run `gcloud config list`.\n",
"* Run `gcloud projects list`.\n",
"* See the support page: [Locate the project ID](https://support.google.com/googleapi/answer/7014113)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.6"
}
"id": "wnp1R1PYc9_c",
"outputId": "6502c721-a2fd-451f-b946-9f7b850d5966"
},
"outputs": [],
"source": [
"# @title Project { display-mode: \"form\" }\n",
"PROJECT_ID = \"gcp_project_id\" # @param {type:\"string\"}\n",
"\n",
"# Set the project id\n",
"! gcloud config set project {PROJECT_ID}"
]
},
{
"cell_type": "markdown",
"id": "f8f2830ee9ca1e01",
"metadata": {
"id": "f8f2830ee9ca1e01"
},
"source": [
"## Basic Usage"
]
},
{
"cell_type": "markdown",
"id": "OMvzMWRrR6n7",
"metadata": {
"id": "OMvzMWRrR6n7"
},
"source": [
"### Set Cloud SQL database values\n",
"Find your database variables, in the [Cloud SQL Instances page](https://console.cloud.google.com/sql/instances)."
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "irl7eMFnSPZr",
"metadata": {
"id": "irl7eMFnSPZr"
},
"outputs": [],
"source": [
"# @title Set Your Values Here { display-mode: \"form\" }\n",
"REGION = \"us-central1\" # @param {type: \"string\"}\n",
"INSTANCE = \"my-primary\" # @param {type: \"string\"}\n",
"DATABASE = \"my-database\" # @param {type: \"string\"}\n",
"TABLE_NAME = \"vector_store\" # @param {type: \"string\"}"
]
},
{
"cell_type": "markdown",
"id": "QuQigs4UoFQ2",
"metadata": {
"id": "QuQigs4UoFQ2"
},
"source": [
"### Cloud SQL Engine\n",
"\n",
"One of the requirements and arguments to establish PostgreSQL as a document loader is a `PostgresEngine` object. The `PostgresEngine` configures a connection pool to your Cloud SQL for PostgreSQL database, enabling successful connections from your application and following industry best practices.\n",
"\n",
"To create a `PostgresEngine` using `PostgresEngine.from_instance()` you need to provide only 4 things:\n",
"\n",
"1. `project_id` : Project ID of the Google Cloud Project where the Cloud SQL instance is located.\n",
"1. `region` : Region where the Cloud SQL instance is located.\n",
"1. `instance` : The name of the Cloud SQL instance.\n",
"1. `database` : The name of the database to connect to on the Cloud SQL instance.\n",
"\n",
"By default, [IAM database authentication](https://cloud.google.com/sql/docs/postgres/iam-authentication) will be used as the method of database authentication. This library uses the IAM principal belonging to the [Application Default Credentials (ADC)](https://cloud.google.com/docs/authentication/application-default-credentials) sourced from the environment.\n",
"\n",
"Optionally, [built-in database authentication](https://cloud.google.com/sql/docs/postgres/users) using a username and password to access the Cloud SQL database can also be used. Just provide the optional `user` and `password` arguments to `PostgresEngine.from_instance()`:\n",
"\n",
"* `user` : Database user to use for built-in database authentication and login\n",
"* `password` : Database password to use for built-in database authentication and login.\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Note**: This tutorial demonstrates the async interface. All async methods have corresponding sync methods."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain_google_cloud_sql_pg import PostgresEngine\n",
"\n",
"engine = await PostgresEngine.afrom_instance(\n",
" project_id=PROJECT_ID,\n",
" region=REGION,\n",
" instance=INSTANCE,\n",
" database=DATABASE,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "e1tl0aNx7SWy"
},
"source": [
"### Create PostgresLoader"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "z-AZyzAQ7bsf"
},
"outputs": [],
"source": [
"from langchain_google_cloud_sql_pg import PostgresLoader\n",
"\n",
"# Creating a basic PostgreSQL object\n",
"loader = await PostgresLoader.create(engine, table_name=TABLE_NAME)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "PeOMpftjc9_e"
},
"source": [
"### Load Documents via default table\n",
"The loader returns a list of Documents from the table using the first column as page_content and all other columns as metadata. The default table will have the first column as\n",
"page_content and the second column as metadata (JSON). Each row becomes a document. Please note that if you want your documents to have ids you will need to add them in."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "cwvi_O5Wc9_e"
},
"outputs": [],
"source": [
"from langchain_google_cloud_sql_pg import PostgresLoader\n",
"\n",
"# Creating a basic PostgresLoader object\n",
"loader = await PostgresLoader.create(engine, table_name=TABLE_NAME)\n",
"\n",
"docs = await loader.aload()\n",
"print(docs)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "kSkL9l1Hc9_e"
},
"source": [
"### Load documents via custom table/metadata or custom page content columns"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"loader = await PostgresLoader.create(\n",
" engine,\n",
" table_name=TABLE_NAME,\n",
" content_columns=[\"product_name\"], # Optional\n",
" metadata_columns=[\"id\"], # Optional\n",
")\n",
"docs = await loader.aload()\n",
"print(docs)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "5R6h0_Cvc9_f"
},
"source": [
"### Set page content format\n",
"The loader returns a list of Documents, with one document per row, with page content in specified string format, i.e. text (space separated concatenation), JSON, YAML, CSV, etc. JSON and YAML formats include headers, while text and CSV do not include field headers.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "NGNdS7cqc9_f"
},
"outputs": [],
"source": [
"loader = await PostgresLoader.create(\n",
" engine,\n",
" table_name=\"products\",\n",
" content_columns=[\"product_name\", \"description\"],\n",
" format=\"YAML\",\n",
")\n",
"docs = await loader.aload()\n",
"print(docs)"
]
}
],
"metadata": {
"colab": {
"provenance": [],
"toc_visible": true
},
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"nbformat": 4,
"nbformat_minor": 4
}
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

@ -1,336 +1,336 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Google Firestore in Datastore Mode\n",
"\n",
"> [Firestore in Datastore Mode](https://cloud.google.com/datastore) is a NoSQL document database built for automatic scaling, high performance and ease of application development. Extend your database application to build AI-powered experiences leveraging Datastore's Langchain integrations.\n",
"\n",
"This notebook goes over how to use [Firestore in Datastore Mode](https://cloud.google.com/datastore) to [save, load and delete langchain documents](/docs/how_to#document-loaders) with `DatastoreLoader` and `DatastoreSaver`.\n",
"\n",
"Learn more about the package on [GitHub](https://github.com/googleapis/langchain-google-datastore-python/).\n",
"\n",
"[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/googleapis/langchain-google-datastore-python/blob/main/docs/document_loader.ipynb)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Before You Begin\n",
"\n",
"To run this notebook, you will need to do the following:\n",
"\n",
"* [Create a Google Cloud Project](https://developers.google.com/workspace/guides/create-project)\n",
"* [Enable the Datastore API](https://console.cloud.google.com/flows/enableapi?apiid=datastore.googleapis.com)\n",
"* [Create a Firestore in Datastore Mode database](https://cloud.google.com/datastore/docs/manage-databases)\n",
"\n",
"After confirmed access to database in the runtime environment of this notebook, filling the following values and run the cell before running example scripts."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 🦜🔗 Library Installation\n",
"\n",
"The integration lives in its own `langchain-google-datastore` package, so we need to install it."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"%pip install -upgrade --quiet langchain-google-datastore"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Colab only**: Uncomment the following cell to restart the kernel or use the button to restart the kernel. For Vertex AI Workbench you can restart the terminal using the button on top."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# # Automatically restart kernel after installs so that your environment can access the new packages\n",
"# import IPython\n",
"\n",
"# app = IPython.Application.instance()\n",
"# app.kernel.do_shutdown(True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### ☁ Set Your Google Cloud Project\n",
"Set your Google Cloud project so that you can leverage Google Cloud resources within this notebook.\n",
"\n",
"If you don't know your project ID, try the following:\n",
"\n",
"* Run `gcloud config list`.\n",
"* Run `gcloud projects list`.\n",
"* See the support page: [Locate the project ID](https://support.google.com/googleapi/answer/7014113)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# @markdown Please fill in the value below with your Google Cloud project ID and then run the cell.\n",
"\n",
"PROJECT_ID = \"my-project-id\" # @param {type:\"string\"}\n",
"\n",
"# Set the project id\n",
"!gcloud config set project {PROJECT_ID}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 🔐 Authentication\n",
"\n",
"Authenticate to Google Cloud as the IAM user logged into this notebook in order to access your Google Cloud Project.\n",
"\n",
"- If you are using Colab to run this notebook, use the cell below and continue.\n",
"- If you are using Vertex AI Workbench, check out the setup instructions [here](https://github.com/GoogleCloudPlatform/generative-ai/tree/main/setup-env)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from google.colab import auth\n",
"\n",
"auth.authenticate_user()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Basic Usage"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Save documents\n",
"\n",
"Save langchain documents with `DatastoreSaver.upsert_documents(<documents>)`. By default it will try to extract the entity key from the `key` in the Document metadata."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.documents import Document\n",
"from langchain_google_datastore import DatastoreSaver\n",
"\n",
"saver = DatastoreSaver()\n",
"\n",
"data = [Document(page_content=\"Hello, World!\")]\n",
"saver.upsert_documents(data)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Save documents without key\n",
"\n",
"If a `kind` is specified the documents will be stored with an auto generated id."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"saver = DatastoreSaver(\"MyKind\")\n",
"\n",
"saver.upsert_documents(data)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load documents via Kind\n",
"\n",
"Load langchain documents with `DatastoreLoader.load()` or `DatastoreLoader.lazy_load()`. `lazy_load` returns a generator that only queries database during the iteration. To initialize `DatastoreLoader` class you need to provide:\n",
"1. `source` - The source to load the documents. It can be an instance of Query or the name of the Datastore kind to read from."
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"from langchain_google_datastore import DatastoreLoader\n",
"\n",
"loader = DatastoreLoader(\"MyKind\")\n",
"data = loader.load()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load documents via query\n",
"\n",
"Other than loading documents from kind, we can also choose to load documents from query. For example:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from google.cloud import datastore\n",
"\n",
"client = datastore.Client(database=\"non-default-db\", namespace=\"custom_namespace\")\n",
"query_load = client.query(kind=\"MyKind\")\n",
"query_load.add_filter(\"region\", \"=\", \"west_coast\")\n",
"\n",
"loader_document = DatastoreLoader(query_load)\n",
"\n",
"data = loader_document.load()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Delete documents\n",
"\n",
"Delete a list of langchain documents from Datastore with `DatastoreSaver.delete_documents(<documents>)`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"saver = DatastoreSaver()\n",
"\n",
"saver.delete_documents(data)\n",
"\n",
"keys_to_delete = [\n",
" [\"Kind1\", \"identifier\"],\n",
" [\"Kind2\", 123],\n",
" [\"Kind3\", \"identifier\", \"NestedKind\", 456],\n",
"]\n",
"# The Documents will be ignored and only the document ids will be used.\n",
"saver.delete_documents(data, keys_to_delete)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Advanced Usage"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load documents with customized document page content & metadata\n",
"\n",
"The arguments of `page_content_properties` and `metadata_properties` will specify the Entity properties to be written into LangChain Document `page_content` and `metadata`."
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"loader = DatastoreLoader(\n",
" source=\"MyKind\",\n",
" page_content_fields=[\"data_field\"],\n",
" metadata_fields=[\"metadata_field\"],\n",
")\n",
"\n",
"data = loader.load()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Customize Page Content Format\n",
"\n",
"When the `page_content` contains only one field the information will be the field value only. Otherwise the `page_content` will be in JSON format."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Customize Connection & Authentication"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from google.auth import compute_engine\n",
"from google.cloud.firestore import Client\n",
"\n",
"client = Client(database=\"non-default-db\", creds=compute_engine.Credentials())\n",
"loader = DatastoreLoader(\n",
" source=\"foo\",\n",
" client=client,\n",
")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6"
}
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Google Firestore in Datastore Mode\n",
"\n",
"> [Firestore in Datastore Mode](https://cloud.google.com/datastore) is a NoSQL document database built for automatic scaling, high performance and ease of application development. Extend your database application to build AI-powered experiences leveraging Datastore's Langchain integrations.\n",
"\n",
"This notebook goes over how to use [Firestore in Datastore Mode](https://cloud.google.com/datastore) to [save, load and delete langchain documents](/docs/how_to#document-loaders) with `DatastoreLoader` and `DatastoreSaver`.\n",
"\n",
"Learn more about the package on [GitHub](https://github.com/googleapis/langchain-google-datastore-python/).\n",
"\n",
"[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/googleapis/langchain-google-datastore-python/blob/main/docs/document_loader.ipynb)"
]
},
"nbformat": 4,
"nbformat_minor": 4
}
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Before You Begin\n",
"\n",
"To run this notebook, you will need to do the following:\n",
"\n",
"* [Create a Google Cloud Project](https://developers.google.com/workspace/guides/create-project)\n",
"* [Enable the Datastore API](https://console.cloud.google.com/flows/enableapi?apiid=datastore.googleapis.com)\n",
"* [Create a Firestore in Datastore Mode database](https://cloud.google.com/datastore/docs/manage-databases)\n",
"\n",
"After confirmed access to database in the runtime environment of this notebook, filling the following values and run the cell before running example scripts."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 🦜🔗 Library Installation\n",
"\n",
"The integration lives in its own `langchain-google-datastore` package, so we need to install it."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"%pip install -upgrade --quiet langchain-google-datastore"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"**Colab only**: Uncomment the following cell to restart the kernel or use the button to restart the kernel. For Vertex AI Workbench you can restart the terminal using the button on top."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# # Automatically restart kernel after installs so that your environment can access the new packages\n",
"# import IPython\n",
"\n",
"# app = IPython.Application.instance()\n",
"# app.kernel.do_shutdown(True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### ☁ Set Your Google Cloud Project\n",
"Set your Google Cloud project so that you can leverage Google Cloud resources within this notebook.\n",
"\n",
"If you don't know your project ID, try the following:\n",
"\n",
"* Run `gcloud config list`.\n",
"* Run `gcloud projects list`.\n",
"* See the support page: [Locate the project ID](https://support.google.com/googleapi/answer/7014113)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# @markdown Please fill in the value below with your Google Cloud project ID and then run the cell.\n",
"\n",
"PROJECT_ID = \"my-project-id\" # @param {type:\"string\"}\n",
"\n",
"# Set the project id\n",
"!gcloud config set project {PROJECT_ID}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 🔐 Authentication\n",
"\n",
"Authenticate to Google Cloud as the IAM user logged into this notebook in order to access your Google Cloud Project.\n",
"\n",
"- If you are using Colab to run this notebook, use the cell below and continue.\n",
"- If you are using Vertex AI Workbench, check out the setup instructions [here](https://github.com/GoogleCloudPlatform/generative-ai/tree/main/setup-env)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from google.colab import auth\n",
"\n",
"auth.authenticate_user()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Basic Usage"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Save documents\n",
"\n",
"Save langchain documents with `DatastoreSaver.upsert_documents(<documents>)`. By default it will try to extract the entity key from the `key` in the Document metadata."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.documents import Document\n",
"from langchain_google_datastore import DatastoreSaver\n",
"\n",
"saver = DatastoreSaver()\n",
"\n",
"data = [Document(page_content=\"Hello, World!\")]\n",
"saver.upsert_documents(data)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Save documents without key\n",
"\n",
"If a `kind` is specified the documents will be stored with an auto generated id."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"saver = DatastoreSaver(\"MyKind\")\n",
"\n",
"saver.upsert_documents(data)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load documents via Kind\n",
"\n",
"Load langchain documents with `DatastoreLoader.load()` or `DatastoreLoader.lazy_load()`. `lazy_load` returns a generator that only queries database during the iteration. To initialize `DatastoreLoader` class you need to provide:\n",
"1. `source` - The source to load the documents. It can be an instance of Query or the name of the Datastore kind to read from."
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"from langchain_google_datastore import DatastoreLoader\n",
"\n",
"loader = DatastoreLoader(\"MyKind\")\n",
"data = loader.load()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load documents via query\n",
"\n",
"Other than loading documents from kind, we can also choose to load documents from query. For example:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from google.cloud import datastore\n",
"\n",
"client = datastore.Client(database=\"non-default-db\", namespace=\"custom_namespace\")\n",
"query_load = client.query(kind=\"MyKind\")\n",
"query_load.add_filter(\"region\", \"=\", \"west_coast\")\n",
"\n",
"loader_document = DatastoreLoader(query_load)\n",
"\n",
"data = loader_document.load()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Delete documents\n",
"\n",
"Delete a list of langchain documents from Datastore with `DatastoreSaver.delete_documents(<documents>)`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"saver = DatastoreSaver()\n",
"\n",
"saver.delete_documents(data)\n",
"\n",
"keys_to_delete = [\n",
" [\"Kind1\", \"identifier\"],\n",
" [\"Kind2\", 123],\n",
" [\"Kind3\", \"identifier\", \"NestedKind\", 456],\n",
"]\n",
"# The Documents will be ignored and only the document ids will be used.\n",
"saver.delete_documents(data, keys_to_delete)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Advanced Usage"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Load documents with customized document page content & metadata\n",
"\n",
"The arguments of `page_content_properties` and `metadata_properties` will specify the Entity properties to be written into LangChain Document `page_content` and `metadata`."
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"loader = DatastoreLoader(\n",
" source=\"MyKind\",\n",
" page_content_fields=[\"data_field\"],\n",
" metadata_fields=[\"metadata_field\"],\n",
")\n",
"\n",
"data = loader.load()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Customize Page Content Format\n",
"\n",
"When the `page_content` contains only one field the information will be the field value only. Otherwise the `page_content` will be in JSON format."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Customize Connection & Authentication"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from google.auth import compute_engine\n",
"from google.cloud.firestore import Client\n",
"\n",
"client = Client(database=\"non-default-db\", creds=compute_engine.Credentials())\n",
"loader = DatastoreLoader(\n",
" source=\"foo\",\n",
" client=client,\n",
")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

@ -198,8 +198,7 @@
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.0"
},
"orig_nbformat": 4
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -13,14 +13,14 @@
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": false
},
"source": [
"## Initializing the lakeFS loader\n",
"\n",
"Replace `ENDPOINT`, `LAKEFS_ACCESS_KEY`, and `LAKEFS_SECRET_KEY` values with your own."
],
"metadata": {
"collapsed": false
}
]
},
{
"cell_type": "code",
@ -50,15 +50,15 @@
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": false
},
"source": [
"## Specifying a path\n",
"You can specify a prefix or a complete object path to control which files to load.\n",
"\n",
"Specify the repository, reference (branch, commit id, or tag), and path in the corresponding `REPO`, `REF`, and `PATH` to load the documents from:"
],
"metadata": {
"collapsed": false
}
]
},
{
"cell_type": "code",

@ -103,7 +103,9 @@
"outputs": [
{
"data": {
"text/plain": "Document(page_content='Abstract\\nWe study how to apply large language models to write grounded and organized long-form articles from scratch, with comparable breadth and depth to Wikipedia pages.\\nThis underexplored problem poses new challenges at the pre-writing stage, including how to research the topic and prepare an outline prior to writing.\\nWe propose STORM, a writing system for the Synthesis of Topic Outlines through\\nReferences\\nFull-length Article\\nTopic\\nOutline\\n2022 Winter Olympics\\nOpening Ceremony\\nResearch via Question Asking\\nRetrieval and Multi-perspective Question Asking.\\nSTORM models the pre-writing stage by\\nLLM\\n(1) discovering diverse perspectives in researching the given topic, (2) simulating conversations where writers carrying different perspectives pose questions to a topic expert grounded on trusted Internet sources, (3) curating the collected information to create an outline.\\nFor evaluation, we curate FreshWiki, a dataset of recent high-quality Wikipedia articles, and formulate outline assessments to evaluate the pre-writing stage.\\nWe further gather feedback from experienced Wikipedia editors.\\nCompared to articles generated by an outlinedriven retrieval-augmented baseline, more of STORMs articles are deemed to be organized (by a 25% absolute increase) and broad in coverage (by 10%).\\nThe expert feedback also helps identify new challenges for generating grounded long articles, such as source bias transfer and over-association of unrelated facts.\\n1. Can you provide any information about the transportation arrangements for the opening ceremony?\\nLLM\\n2. Can you provide any information about the budget for the 2022 Winter Olympics opening ceremony?…\\nLLM- Role1\\nLLM- Role2\\nLLM- Role1', metadata={'source': 'https://arxiv.org/pdf/2402.14207.pdf', 'section_number': 1, 'section_title': 'Abstract'})"
"text/plain": [
"Document(page_content='Abstract\\nWe study how to apply large language models to write grounded and organized long-form articles from scratch, with comparable breadth and depth to Wikipedia pages.\\nThis underexplored problem poses new challenges at the pre-writing stage, including how to research the topic and prepare an outline prior to writing.\\nWe propose STORM, a writing system for the Synthesis of Topic Outlines through\\nReferences\\nFull-length Article\\nTopic\\nOutline\\n2022 Winter Olympics\\nOpening Ceremony\\nResearch via Question Asking\\nRetrieval and Multi-perspective Question Asking.\\nSTORM models the pre-writing stage by\\nLLM\\n(1) discovering diverse perspectives in researching the given topic, (2) simulating conversations where writers carrying different perspectives pose questions to a topic expert grounded on trusted Internet sources, (3) curating the collected information to create an outline.\\nFor evaluation, we curate FreshWiki, a dataset of recent high-quality Wikipedia articles, and formulate outline assessments to evaluate the pre-writing stage.\\nWe further gather feedback from experienced Wikipedia editors.\\nCompared to articles generated by an outlinedriven retrieval-augmented baseline, more of STORMs articles are deemed to be organized (by a 25% absolute increase) and broad in coverage (by 10%).\\nThe expert feedback also helps identify new challenges for generating grounded long articles, such as source bias transfer and over-association of unrelated facts.\\n1. Can you provide any information about the transportation arrangements for the opening ceremony?\\nLLM\\n2. Can you provide any information about the budget for the 2022 Winter Olympics opening ceremony?…\\nLLM- Role1\\nLLM- Role2\\nLLM- Role1', metadata={'source': 'https://arxiv.org/pdf/2402.14207.pdf', 'section_number': 1, 'section_title': 'Abstract'})"
]
},
"execution_count": 6,
"metadata": {},
@ -128,7 +130,9 @@
"outputs": [
{
"data": {
"text/plain": "79"
"text/plain": [
"79"
]
},
"execution_count": 7,
"metadata": {},
@ -188,7 +192,9 @@
"outputs": [
{
"data": {
"text/plain": "Document(page_content='Assisting in Writing Wikipedia-like Articles From Scratch with Large Language Models\\nStanford University {shaoyj, yuchengj, tkanell, peterxu, okhattab}@stanford.edu lam@cs.stanford.edu', metadata={'source': 'https://arxiv.org/pdf/2402.14207.pdf', 'chunk_number': 1, 'chunk_type': 'para'})"
"text/plain": [
"Document(page_content='Assisting in Writing Wikipedia-like Articles From Scratch with Large Language Models\\nStanford University {shaoyj, yuchengj, tkanell, peterxu, okhattab}@stanford.edu lam@cs.stanford.edu', metadata={'source': 'https://arxiv.org/pdf/2402.14207.pdf', 'chunk_number': 1, 'chunk_type': 'para'})"
]
},
"execution_count": 9,
"metadata": {},
@ -213,7 +219,9 @@
"outputs": [
{
"data": {
"text/plain": "306"
"text/plain": [
"306"
]
},
"execution_count": 10,
"metadata": {},
@ -273,7 +281,9 @@
"outputs": [
{
"data": {
"text/plain": "'<html><h1>Assisting in Writing Wikipedia-like Articles From Scratch with Large Language Models</h1><table><th><td colSpan=1>Yijia Shao</td><td colSpan=1>Yucheng Jiang</td><td colSpan=1>Theodore A. Kanell</td><td colSpan=1>Peter Xu</td></th><tr><td colSpan=1></td><td colSpan=1>Omar Khattab</td><td colSpan=1>Monica S. Lam</td><td colSpan=1></td></tr></table><p>Stanford University {shaoyj, yuchengj, '"
"text/plain": [
"'<html><h1>Assisting in Writing Wikipedia-like Articles From Scratch with Large Language Models</h1><table><th><td colSpan=1>Yijia Shao</td><td colSpan=1>Yucheng Jiang</td><td colSpan=1>Theodore A. Kanell</td><td colSpan=1>Peter Xu</td></th><tr><td colSpan=1></td><td colSpan=1>Omar Khattab</td><td colSpan=1>Monica S. Lam</td><td colSpan=1></td></tr></table><p>Stanford University {shaoyj, yuchengj, '"
]
},
"execution_count": 12,
"metadata": {},
@ -298,7 +308,9 @@
"outputs": [
{
"data": {
"text/plain": "1"
"text/plain": [
"1"
]
},
"execution_count": 13,
"metadata": {},
@ -358,7 +370,9 @@
"outputs": [
{
"data": {
"text/plain": "'Assisting in Writing Wikipedia-like Articles From Scratch with Large Language Models\\n | Yijia Shao | Yucheng Jiang | Theodore A. Kanell | Peter Xu\\n | --- | --- | --- | ---\\n | | Omar Khattab | Monica S. Lam | \\n\\nStanford University {shaoyj, yuchengj, tkanell, peterxu, okhattab}@stanford.edu lam@cs.stanford.edu\\nAbstract\\nWe study how to apply large language models to write grounded and organized long'"
"text/plain": [
"'Assisting in Writing Wikipedia-like Articles From Scratch with Large Language Models\\n | Yijia Shao | Yucheng Jiang | Theodore A. Kanell | Peter Xu\\n | --- | --- | --- | ---\\n | | Omar Khattab | Monica S. Lam | \\n\\nStanford University {shaoyj, yuchengj, tkanell, peterxu, okhattab}@stanford.edu lam@cs.stanford.edu\\nAbstract\\nWe study how to apply large language models to write grounded and organized long'"
]
},
"execution_count": 3,
"metadata": {},
@ -383,7 +397,9 @@
"outputs": [
{
"data": {
"text/plain": "1"
"text/plain": [
"1"
]
},
"execution_count": 4,
"metadata": {},

@ -121,8 +121,7 @@
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
},
"orig_nbformat": 4
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -81,13 +81,13 @@
},
{
"cell_type": "markdown",
"source": [
"Use nlp=True to run nlp analysis and generate keywords + summary"
],
"id": "98ac26c488315bff",
"metadata": {
"collapsed": false
},
"id": "98ac26c488315bff"
"source": [
"Use nlp=True to run nlp analysis and generate keywords + summary"
]
},
{
"cell_type": "code",
@ -120,10 +120,34 @@
{
"cell_type": "code",
"execution_count": 5,
"id": "ae37e004e0284b1d",
"metadata": {
"ExecuteTime": {
"end_time": "2023-08-02T21:18:19.585758200Z",
"start_time": "2023-08-02T21:18:19.585758200Z"
},
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": "['powell',\n 'know',\n 'donald',\n 'trump',\n 'review',\n 'indictment',\n 'telling',\n 'view',\n 'reasonable',\n 'person',\n 'testimony',\n 'coconspirators',\n 'riot',\n 'representatives',\n 'claims']"
"text/plain": [
"['powell',\n",
" 'know',\n",
" 'donald',\n",
" 'trump',\n",
" 'review',\n",
" 'indictment',\n",
" 'telling',\n",
" 'view',\n",
" 'reasonable',\n",
" 'person',\n",
" 'testimony',\n",
" 'coconspirators',\n",
" 'riot',\n",
" 'representatives',\n",
" 'claims']"
]
},
"execution_count": 5,
"metadata": {},
@ -132,23 +156,25 @@
],
"source": [
"data[0].metadata[\"keywords\"]"
],
"metadata": {
"collapsed": false,
"ExecuteTime": {
"end_time": "2023-08-02T21:18:19.585758200Z",
"start_time": "2023-08-02T21:18:19.585758200Z"
}
},
"id": "ae37e004e0284b1d"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "7676155fb175e53e",
"metadata": {
"ExecuteTime": {
"end_time": "2023-08-02T21:18:19.598966800Z",
"start_time": "2023-08-02T21:18:19.594950200Z"
},
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": "'In testimony to the congressional committee examining the 6 January riot, Mrs Powell said she did not review all of the many claims of election fraud she made, telling them that \"no reasonable person\" would view her claims as fact.\\nNeither she nor her representatives have commented.'"
"text/plain": [
"'In testimony to the congressional committee examining the 6 January riot, Mrs Powell said she did not review all of the many claims of election fraud she made, telling them that \"no reasonable person\" would view her claims as fact.\\nNeither she nor her representatives have commented.'"
]
},
"execution_count": 6,
"metadata": {},
@ -157,15 +183,7 @@
],
"source": [
"data[0].metadata[\"summary\"]"
],
"metadata": {
"collapsed": false,
"ExecuteTime": {
"end_time": "2023-08-02T21:18:19.598966800Z",
"start_time": "2023-08-02T21:18:19.594950200Z"
}
},
"id": "7676155fb175e53e"
]
}
],
"metadata": {

@ -32,7 +32,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders.csv_loader import CSVLoader\n",
"from langchain_community.document_loaders import CSVLoader\n",
"\n",
"loader = CSVLoader(\"data/corp_sens_data.csv\")\n",
"documents = loader.load()\n",
@ -52,8 +52,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders.csv_loader import CSVLoader\n",
"from langchain_community.document_loaders import PebbloSafeLoader\n",
"from langchain_community.document_loaders import CSVLoader, PebbloSafeLoader\n",
"\n",
"loader = PebbloSafeLoader(\n",
" CSVLoader(\"data/corp_sens_data.csv\"),\n",
@ -80,8 +79,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders.csv_loader import CSVLoader\n",
"from langchain_community.document_loaders import PebbloSafeLoader\n",
"from langchain_community.document_loaders import CSVLoader, PebbloSafeLoader\n",
"\n",
"loader = PebbloSafeLoader(\n",
" CSVLoader(\"data/corp_sens_data.csv\"),\n",
@ -109,8 +107,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders.csv_loader import CSVLoader\n",
"from langchain_community.document_loaders import PebbloSafeLoader\n",
"from langchain_community.document_loaders import CSVLoader, PebbloSafeLoader\n",
"\n",
"loader = PebbloSafeLoader(\n",
" CSVLoader(\"data/corp_sens_data.csv\"),\n",

@ -239,8 +239,7 @@
"language_info": {
"name": "python",
"version": "3.11.4"
},
"orig_nbformat": 4
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -27,14 +27,14 @@
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Document(page_content='United States\\n\\nWashington, DC\\n\\nJoe Biden\\n\\nBaseball\\n\\nCanada\\n\\nOttawa\\n\\nJustin Trudeau\\n\\nHockey\\n\\nFrance\\n\\nParis\\n\\nEmmanuel Macron\\n\\nSoccer\\n\\nTrinidad & Tobado\\n\\nPort of Spain\\n\\nKeith Rowley\\n\\nTrack & Field', metadata={'source': 'example_data/factbook.xml'})"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
"data": {
"text/plain": [
"Document(page_content='United States\\n\\nWashington, DC\\n\\nJoe Biden\\n\\nBaseball\\n\\nCanada\\n\\nOttawa\\n\\nJustin Trudeau\\n\\nHockey\\n\\nFrance\\n\\nParis\\n\\nEmmanuel Macron\\n\\nSoccer\\n\\nTrinidad & Tobado\\n\\nPort of Spain\\n\\nKeith Rowley\\n\\nTrack & Field', metadata={'source': 'example_data/factbook.xml'})"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [

@ -296,8 +296,7 @@
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
},
"orig_nbformat": 4
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -66,7 +66,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import TextLoader\n",
"from langchain_community.document_loaders import TextLoader\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_huggingface import HuggingFaceEmbeddings\n",
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",

@ -646,8 +646,7 @@
],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.docstore.document import Document\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.documents import Document\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n",
"from langchain_google_vertexai import VertexAI\n",

@ -12,8 +12,7 @@
">\n",
">[Cypher](https://en.wikipedia.org/wiki/Cypher_(query_language)) is a declarative graph query language that allows for expressive and efficient data querying in a property graph.\n",
">\n",
">[openCypher](https://opencypher.org/) is an open-source implementation of Cypher.",
"# Neptune Open Cypher QA Chain\n",
">[openCypher](https://opencypher.org/) is an open-source implementation of Cypher.# Neptune Open Cypher QA Chain\n",
"This QA chain queries Amazon Neptune using openCypher and returns human readable response\n",
"\n",
"LangChain supports both [Neptune Database](https://docs.aws.amazon.com/neptune/latest/userguide/intro.html) and [Neptune Analytics](https://docs.aws.amazon.com/neptune-analytics/latest/userguide/what-is-neptune-analytics.html) with `NeptuneOpenCypherQAChain` \n",

@ -66,9 +66,9 @@
"source": [
"import nest_asyncio\n",
"from langchain.chains.graph_qa.gremlin import GremlinQAChain\n",
"from langchain.schema import Document\n",
"from langchain_community.graphs import GremlinGraph\n",
"from langchain_community.graphs.graph_document import GraphDocument, Node, Relationship\n",
"from langchain_core.documents import Document\n",
"from langchain_openai import AzureChatOpenAI"
]
},

@ -54,11 +54,11 @@
"execution_count": 5,
"id": "035dea0f",
"metadata": {
"tags": [],
"ExecuteTime": {
"end_time": "2024-03-05T20:58:44.465443Z",
"start_time": "2024-03-05T20:58:42.399724Z"
}
},
"tags": []
},
"outputs": [],
"source": [
@ -83,16 +83,18 @@
"execution_count": 6,
"id": "98f70927a87e4745",
"metadata": {
"collapsed": false,
"ExecuteTime": {
"end_time": "2024-03-05T20:58:45.859265Z",
"start_time": "2024-03-05T20:58:44.466637Z"
}
},
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": "'\\nLangChain is a (database)\\nLangChain is a database for storing and processing documents'"
"text/plain": [
"'\\nLangChain is a (database)\\nLangChain is a database for storing and processing documents'"
]
},
"execution_count": 6,
"metadata": {},
@ -118,6 +120,10 @@
},
{
"cell_type": "markdown",
"id": "9965c10269159ed1",
"metadata": {
"collapsed": false
},
"source": [
"# AI21 Contextual Answer\n",
"\n",
@ -126,14 +132,19 @@
"\n",
"This means that if the answer to your question is not in the document,\n",
"the model will indicate it (instead of providing a false answer)"
],
"metadata": {
"collapsed": false
},
"id": "9965c10269159ed1"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "411adf42eab80829",
"metadata": {
"ExecuteTime": {
"end_time": "2024-03-05T20:59:00.943426Z",
"start_time": "2024-03-05T20:59:00.263497Z"
},
"collapsed": false
},
"outputs": [],
"source": [
"from langchain_ai21 import AI21ContextualAnswers\n",
@ -141,29 +152,29 @@
"tsm = AI21ContextualAnswers()\n",
"\n",
"response = tsm.invoke(input={\"context\": \"Your context\", \"question\": \"Your question\"})"
],
"metadata": {
"collapsed": false,
"ExecuteTime": {
"end_time": "2024-03-05T20:59:00.943426Z",
"start_time": "2024-03-05T20:59:00.263497Z"
}
},
"id": "411adf42eab80829",
"execution_count": 9
]
},
{
"cell_type": "markdown",
"source": [
"You can also use it with chains and output parsers and vector DBs"
],
"id": "af59ffdbf4964875",
"metadata": {
"collapsed": false
},
"id": "af59ffdbf4964875"
"source": [
"You can also use it with chains and output parsers and vector DBs"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "bc63830f921b4ac9",
"metadata": {
"ExecuteTime": {
"end_time": "2024-03-05T20:59:07.719225Z",
"start_time": "2024-03-05T20:59:07.102950Z"
},
"collapsed": false
},
"outputs": [],
"source": [
"from langchain_ai21 import AI21ContextualAnswers\n",
@ -175,16 +186,7 @@
"response = chain.invoke(\n",
" {\"context\": \"Your context\", \"question\": \"Your question\"},\n",
")"
],
"metadata": {
"collapsed": false,
"ExecuteTime": {
"end_time": "2024-03-05T20:59:07.719225Z",
"start_time": "2024-03-05T20:59:07.102950Z"
}
},
"id": "bc63830f921b4ac9",
"execution_count": 10
]
}
],
"metadata": {

@ -149,7 +149,9 @@
"outputs": [
{
"data": {
"text/plain": "\" Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\""
"text/plain": [
"\" Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\""
]
},
"execution_count": 5,
"metadata": {},
@ -179,7 +181,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"\u001B[1mAzureOpenAI\u001B[0m\n",
"\u001b[1mAzureOpenAI\u001b[0m\n",
"Params: {'deployment_name': 'gpt-35-turbo-instruct-0914', 'model_name': 'gpt-3.5-turbo-instruct', 'temperature': 0.7, 'top_p': 1, 'frequency_penalty': 0, 'presence_penalty': 0, 'n': 1, 'logit_bias': {}, 'max_tokens': 256}\n"
]
}

@ -254,7 +254,6 @@
"pygments_lexer": "ipython3",
"version": "3.11.4"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "6fa70026b407ae751a5c9e6bd7f7d482379da8ad616f98512780b705c84ee157"

@ -167,4 +167,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

@ -83,8 +83,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"from langchain_community.llms import Bedrock\n",
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
"\n",
"llm = Bedrock(\n",
" credentials_profile_name=\"bedrock-admin\",\n",

@ -118,8 +118,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.tools import Tool\n",
"from langchain_community.utilities import GoogleSearchAPIWrapper\n",
"from langchain_core.tools import Tool\n",
"\n",
"search = GoogleSearchAPIWrapper()\n",
"\n",

@ -41,8 +41,8 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.schema.messages import AIMessage\n",
"from langchain_community.llms.chatglm3 import ChatGLM3\n",
"from langchain_core.messages import AIMessage\n",
"from langchain_core.prompts import PromptTemplate"
]
},

@ -119,8 +119,7 @@
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.18"
},
"orig_nbformat": 4
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -79,7 +79,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
"\n",
"llm = CTransformers(\n",
" model=\"marella/gpt-2-ggml\", callbacks=[StreamingStdOutCallbackHandler()]\n",
@ -120,8 +120,7 @@
"metadata": {
"language_info": {
"name": "python"
},
"orig_nbformat": 4
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -229,7 +229,6 @@
"pygments_lexer": "ipython3",
"version": "3.10.12"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "d1d3a3c58a58885896c5459933a599607cdbb9917d7e1ad7516c8786c51f2dd2"

@ -298,8 +298,7 @@
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
},
"metadata": {},
"source": [
"## Wrapping a cluster driver proxy app\n",
"\n",

@ -189,8 +189,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"from langchain_community.llms import EdenAI\n",
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
"\n",
"llm = EdenAI(\n",
" callbacks=[StreamingStdOutCallbackHandler()],\n",

@ -47,9 +47,9 @@
},
"outputs": [],
"source": [
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"from langchain.chains import LLMChain\n",
"from langchain_community.llms import GPT4All\n",
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
"from langchain_core.prompts import PromptTemplate"
]
},

@ -192,7 +192,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
"from langchain_huggingface import HuggingFaceEndpoint\n",
"\n",
"llm = HuggingFaceEndpoint(\n",

@ -63,7 +63,7 @@
"import os\n",
"\n",
"import requests\n",
"from langchain.tools import tool\n",
"from langchain_core.tools import tool\n",
"\n",
"HF_TOKEN = os.environ.get(\"HUGGINGFACE_API_KEY\")\n",
"\n",

@ -46,7 +46,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.cache import InMemoryCache\n",
"from langchain_community.cache import InMemoryCache\n",
"\n",
"set_llm_cache(InMemoryCache())"
]
@ -141,7 +141,7 @@
"outputs": [],
"source": [
"# We can do the same thing with a SQLite cache\n",
"from langchain.cache import SQLiteCache\n",
"from langchain_community.cache import SQLiteCache\n",
"\n",
"set_llm_cache(SQLiteCache(database_path=\".langchain.db\"))"
]
@ -235,7 +235,7 @@
"outputs": [],
"source": [
"import langchain\n",
"from langchain.cache import UpstashRedisCache\n",
"from langchain_community.cache import UpstashRedisCache\n",
"from upstash_redis import Redis\n",
"\n",
"URL = \"<UPSTASH_REDIS_REST_URL>\"\n",
@ -335,7 +335,7 @@
"source": [
"# We can do the same thing with a Redis cache\n",
"# (make sure your local Redis instance is running first before running this example)\n",
"from langchain.cache import RedisCache\n",
"from langchain_community.cache import RedisCache\n",
"from redis import Redis\n",
"\n",
"set_llm_cache(RedisCache(redis_=Redis()))"
@ -419,7 +419,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.cache import RedisSemanticCache\n",
"from langchain_community.cache import RedisSemanticCache\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"set_llm_cache(\n",
@ -517,7 +517,7 @@
"from gptcache import Cache\n",
"from gptcache.manager.factory import manager_factory\n",
"from gptcache.processor.pre import get_prompt\n",
"from langchain.cache import GPTCache\n",
"from langchain_community.cache import GPTCache\n",
"\n",
"\n",
"def get_hashed_name(name):\n",
@ -616,7 +616,7 @@
"\n",
"from gptcache import Cache\n",
"from gptcache.adapter.api import init_similar_cache\n",
"from langchain.cache import GPTCache\n",
"from langchain_community.cache import GPTCache\n",
"\n",
"\n",
"def get_hashed_name(name):\n",
@ -765,7 +765,7 @@
"source": [
"from datetime import timedelta\n",
"\n",
"from langchain.cache import MomentoCache\n",
"from langchain_community.cache import MomentoCache\n",
"\n",
"cache_name = \"langchain\"\n",
"ttl = timedelta(days=1)\n",
@ -879,7 +879,7 @@
"source": [
"# You can define your own declarative SQLAlchemyCache child class to customize the schema used for caching. For example, to support high-speed fulltext prompt indexing with Postgres, use:\n",
"\n",
"from langchain.cache import SQLAlchemyCache\n",
"from langchain_community.cache import SQLAlchemyCache\n",
"from sqlalchemy import Column, Computed, Index, Integer, Sequence, String, create_engine\n",
"from sqlalchemy.ext.declarative import declarative_base\n",
"from sqlalchemy_utils import TSVectorType\n",
@ -1296,8 +1296,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.cache import AstraDBCache\n",
"from langchain.globals import set_llm_cache\n",
"from langchain_astradb import AstraDBCache\n",
"\n",
"set_llm_cache(\n",
" AstraDBCache(\n",
@ -1384,7 +1384,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.cache import AstraDBSemanticCache\n",
"from langchain_astradb import AstraDBSemanticCache\n",
"\n",
"set_llm_cache(\n",
" AstraDBSemanticCache(\n",
@ -1868,7 +1868,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.docstore.document import Document\n",
"from langchain_core.documents import Document\n",
"\n",
"docs = [Document(page_content=t) for t in texts[:3]]\n",
"from langchain.chains.summarize import load_summarize_chain"

@ -169,8 +169,7 @@
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
},
"orig_nbformat": 4
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -77,8 +77,7 @@
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
},
"orig_nbformat": 4
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -117,10 +117,10 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.output_parser import StrOutputParser\n",
"from langchain.schema.runnable import RunnablePassthrough\n",
"from langchain_community.embeddings import OCIGenAIEmbeddings\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"embeddings = OCIGenAIEmbeddings(\n",
" model_id=\"MY_EMBEDDING_MODEL\",\n",

@ -58,11 +58,11 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.callbacks.stdout import StdOutCallbackHandler\n",
"from langchain.chains import LLMChain\n",
"from langchain.globals import set_debug, set_verbose\n",
"from langchain.memory import ConversationBufferWindowMemory\n",
"from langchain_community.llms import OpaquePrompts\n",
"from langchain_core.callbacks import StdOutCallbackHandler\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_openai import OpenAI\n",
"\n",

@ -291,7 +291,6 @@
"pygments_lexer": "ipython3",
"version": "3.8.9"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"

@ -250,4 +250,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

@ -311,7 +311,7 @@
}
],
"source": [
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
"\n",
"llm = Replicate(\n",
" streaming=True,\n",

@ -58,7 +58,7 @@
},
"outputs": [],
"source": [
"from langchain_community.docstore.document import Document"
"from langchain_core.documents import Document"
]
},
{

@ -81,7 +81,9 @@
"outputs": [
{
"data": {
"text/plain": "LLMResult(generations=[[Generation(text='Hello! How can I assist you today?')]], llm_output=None, run=[RunInfo(run_id=UUID('d8cdcd41-a698-4cbf-a28d-e74f9cd2037b'))])"
"text/plain": [
"LLMResult(generations=[[Generation(text='Hello! How can I assist you today?')]], llm_output=None, run=[RunInfo(run_id=UUID('d8cdcd41-a698-4cbf-a28d-e74f9cd2037b'))])"
]
},
"execution_count": 9,
"metadata": {},

@ -91,10 +91,10 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"from langchain.chains import LLMChain\n",
"from langchain.globals import set_debug\n",
"from langchain_community.llms import TextGen\n",
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"set_debug(True)\n",

@ -29,11 +29,9 @@
"source": [
"import time\n",
"\n",
"from langchain.callbacks.manager import CallbackManager\n",
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"\n",
"# Note importing TitanTakeoffPro instead of TitanTakeoff will work as well both use same object under the hood\n",
"from langchain_community.llms import TitanTakeoff\n",
"from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler\n",
"from langchain_core.prompts import PromptTemplate"
]
},

@ -87,7 +87,9 @@
"outputs": [
{
"data": {
"text/plain": "'好的,下面是一个笑话:\\n\\n大学暑假我配了隐形眼镜回家给爷爷说我现在配了隐形眼镜。\\n爷爷让我给他看看于是我用小镊子夹了一片给爷爷看。\\n爷爷看完便准备出门边走还边说“真高级啊还真是隐形眼镜”\\n等爷爷出去后我才发现我刚没夹起来'"
"text/plain": [
"'好的,下面是一个笑话:\\n\\n大学暑假我配了隐形眼镜回家给爷爷说我现在配了隐形眼镜。\\n爷爷让我给他看看于是我用小镊子夹了一片给爷爷看。\\n爷爷看完便准备出门边走还边说“真高级啊还真是隐形眼镜”\\n等爷爷出去后我才发现我刚没夹起来'"
]
},
"execution_count": 8,
"metadata": {},

@ -168,8 +168,7 @@
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.11"
},
"orig_nbformat": 4
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -113,4 +113,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
}
}

@ -2,6 +2,9 @@
"cells": [
{
"cell_type": "markdown",
"metadata": {
"collapsed": false
},
"source": [
"# Zep\n",
"> Recall, understand, and extract data from chat histories. Power personalized AI experiences.\n",
@ -29,10 +32,7 @@
"2. Running an agent and having message automatically added to the store.\n",
"3. Viewing the enriched messages.\n",
"4. Vector search over the conversation history."
],
"metadata": {
"collapsed": false
}
]
},
{
"cell_type": "code",
@ -47,11 +47,12 @@
"source": [
"from uuid import uuid4\n",
"\n",
"from langchain.agents import AgentType, Tool, initialize_agent\n",
"from langchain.agents import AgentType, initialize_agent\n",
"from langchain.memory import ZepMemory\n",
"from langchain.retrievers import ZepRetriever\n",
"from langchain_community.retrievers import ZepRetriever\n",
"from langchain_community.utilities import WikipediaAPIWrapper\n",
"from langchain_core.messages import AIMessage, HumanMessage\n",
"from langchain_core.tools import Tool\n",
"from langchain_openai import OpenAI\n",
"\n",
"# Set this to your Zep server URL\n",
@ -257,11 +258,11 @@
"text": [
"\n",
"\n",
"\u001B[1m> Entering new chain...\u001B[0m\n",
"\u001B[32;1m\u001B[1;3mThought: Do I need to use a tool? No\n",
"AI: Parable of the Sower is a prescient novel that speaks to the challenges facing contemporary society, such as climate change, inequality, and violence. It is a cautionary tale that warns of the dangers of unchecked greed and the need for individuals to take responsibility for their own lives and the lives of those around them.\u001B[0m\n",
"\u001b[1m> Entering new chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mThought: Do I need to use a tool? No\n",
"AI: Parable of the Sower is a prescient novel that speaks to the challenges facing contemporary society, such as climate change, inequality, and violence. It is a cautionary tale that warns of the dangers of unchecked greed and the need for individuals to take responsibility for their own lives and the lives of those around them.\u001b[0m\n",
"\n",
"\u001B[1m> Finished chain.\u001B[0m\n"
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{

@ -65,7 +65,8 @@
"import os\n",
"from datetime import datetime\n",
"\n",
"from langchain.callbacks import AimCallbackHandler, StdOutCallbackHandler\n",
"from langchain_community.callbacks import AimCallbackHandler\n",
"from langchain_core.callbacks import StdOutCallbackHandler\n",
"from langchain_openai import OpenAI"
]
},
@ -307,4 +308,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

@ -55,8 +55,8 @@
},
"outputs": [],
"source": [
"from langchain.callbacks import ArthurCallbackHandler\n",
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"from langchain_community.callbacks import ArthurCallbackHandler\n",
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
"from langchain_core.messages import HumanMessage\n",
"from langchain_openai import ChatOpenAI"
]

@ -88,7 +88,7 @@
},
"outputs": [],
"source": [
"from langchain.callbacks import ClearMLCallbackHandler"
"from langchain_community.callbacks import ClearMLCallbackHandler"
]
},
{
@ -105,7 +105,7 @@
}
],
"source": [
"from langchain.callbacks import StdOutCallbackHandler\n",
"from langchain_core.callbacks import StdOutCallbackHandler\n",
"from langchain_openai import OpenAI\n",
"\n",
"# Setup and use the ClearML Callback\n",

@ -121,7 +121,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n",
"from langchain_community.callbacks import CometCallbackHandler\n",
"from langchain_core.callbacks import StdOutCallbackHandler\n",
"from langchain_openai import OpenAI\n",
"\n",
"comet_callback = CometCallbackHandler(\n",
@ -152,8 +153,9 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n",
"from langchain.chains import LLMChain\n",
"from langchain_community.callbacks import CometCallbackHandler\n",
"from langchain_core.callbacks import StdOutCallbackHandler\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_openai import OpenAI\n",
"\n",
@ -191,7 +193,8 @@
"outputs": [],
"source": [
"from langchain.agents import initialize_agent, load_tools\n",
"from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n",
"from langchain_community.callbacks import CometCallbackHandler\n",
"from langchain_core.callbacks import StdOutCallbackHandler\n",
"from langchain_openai import OpenAI\n",
"\n",
"comet_callback = CometCallbackHandler(\n",
@ -249,8 +252,9 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler\n",
"from langchain.chains import LLMChain\n",
"from langchain_community.callbacks import CometCallbackHandler\n",
"from langchain_core.callbacks import StdOutCallbackHandler\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_openai import OpenAI\n",
"from rouge_score import rouge_scorer\n",
@ -339,7 +343,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.callbacks.tracers.comet import CometTracer"
"from langchain_community.callbacks.tracers.comet import CometTracer"
]
}
],

@ -75,8 +75,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.cache import SQLiteCache\n",
"from langchain.globals import set_llm_cache\n",
"from langchain_community.cache import SQLiteCache\n",
"from langchain_openai import OpenAI\n",
"\n",
"set_llm_cache(SQLiteCache(database_path=\"cache.db\"))\n",

@ -77,7 +77,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.callbacks import MlflowCallbackHandler\n",
"from langchain_community.callbacks import MlflowCallbackHandler\n",
"from langchain_openai import OpenAI"
]
},

@ -227,8 +227,7 @@
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
},
"orig_nbformat": 4
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -177,7 +177,7 @@
"outputs": [],
"source": [
"from langchain.chains import SimpleSequentialChain, TransformChain\n",
"from langchain.sql_database import SQLDatabase\n",
"from langchain_community.utilities import SQLDatabase\n",
"from langchain_experimental.sql import SQLDatabaseChain"
]
},

@ -592,13 +592,13 @@
},
"outputs": [],
"source": [
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"from langchain.chains.conversational_retrieval.prompts import (\n",
" CONDENSE_QUESTION_PROMPT,\n",
" QA_PROMPT,\n",
")\n",
"from langchain.chains.llm import LLMChain\n",
"from langchain.chains.question_answering import load_qa_chain\n",
"from langchain_core.callbacks import StreamingStdOutCallbackHandler\n",
"\n",
"# Construct a ConversationalRetrievalChain with a streaming llm for combine docs\n",
"# and a separate, non-streaming llm for question generation\n",

@ -30,6 +30,8 @@
"source": [
"import os\n",
"\n",
"from langchain_community.callbacks import wandb_tracing_enabled\n",
"\n",
"os.environ[\"LANGCHAIN_WANDB_TRACING\"] = \"true\"\n",
"\n",
"# wandb documentation to configure wandb using env variables\n",
@ -38,7 +40,6 @@
"os.environ[\"WANDB_PROJECT\"] = \"langchain-tracing\"\n",
"\n",
"from langchain.agents import AgentType, initialize_agent, load_tools\n",
"from langchain.callbacks import wandb_tracing_enabled\n",
"from langchain_openai import OpenAI"
]
},

@ -62,7 +62,8 @@
"source": [
"from datetime import datetime\n",
"\n",
"from langchain.callbacks import StdOutCallbackHandler, WandbCallbackHandler\n",
"from langchain_community.callbacks import WandbCallbackHandler\n",
"from langchain_core.callbacks import StdOutCallbackHandler\n",
"from langchain_openai import OpenAI"
]
},

@ -83,7 +83,7 @@
},
"outputs": [],
"source": [
"from langchain.callbacks import WhyLabsCallbackHandler"
"from langchain_community.callbacks import WhyLabsCallbackHandler"
]
},
{

@ -151,11 +151,11 @@
"source": [
"import os\n",
"\n",
"from langchain.document_loaders import DirectoryLoader, TextLoader\n",
"from langchain.text_splitter import TokenTextSplitter\n",
"from langchain.vectorstores import AzureSearch\n",
"from langchain_community.document_loaders import DirectoryLoader, TextLoader\n",
"from langchain_community.retrievers import AzureAISearchRetriever\n",
"from langchain_community.vectorstores import AzureSearch\n",
"from langchain_openai import AzureOpenAIEmbeddings, OpenAIEmbeddings\n",
"from langchain_text_splitters import TokenTextSplitter\n",
"\n",
"os.environ[\"AZURE_AI_SEARCH_SERVICE_NAME\"] = \"<YOUR_SEARCH_SERVICE_NAME>\"\n",
"os.environ[\"AZURE_AI_SEARCH_INDEX_NAME\"] = \"langchain-vector-demo\"\n",

@ -32,6 +32,7 @@
"# This is from https://langchain.readthedocs.io/en/latest/modules/document_loaders/examples/csv.html\n",
"\n",
"from langchain_community.document_loaders import CSVLoader\n",
"from langchain_core.documents import Document\n",
"\n",
"loader = CSVLoader(\n",
" file_path=\"../../document_loaders/examples/example_data/mlb_teams_2012.csv\"\n",
@ -45,8 +46,6 @@
"import json\n",
"from typing import List\n",
"\n",
"from langchain_community.docstore.document import Document\n",
"\n",
"\n",
"def write_json(path: str, documents: List[Document]) -> None:\n",
" results = [{\"text\": doc.page_content} for doc in documents]\n",

@ -647,7 +647,7 @@
}
],
"source": [
"from langchain.retrievers import DocArrayRetriever\n",
"from langchain_community.retrievers import DocArrayRetriever\n",
"\n",
"# create a retriever\n",
"retriever = DocArrayRetriever(\n",
@ -687,7 +687,7 @@
}
],
"source": [
"from langchain.retrievers import DocArrayRetriever\n",
"from langchain_community.retrievers import DocArrayRetriever\n",
"\n",
"# create a retriever\n",
"retriever = DocArrayRetriever(\n",
@ -729,7 +729,7 @@
}
],
"source": [
"from langchain.retrievers import DocArrayRetriever\n",
"from langchain_community.retrievers import DocArrayRetriever\n",
"\n",
"# create a retriever\n",
"retriever = DocArrayRetriever(\n",

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save