Merge branch 'master' into bagatur/docs_vertex_params

pull/20167/head
Bagatur 3 weeks ago committed by GitHub
commit cfec390e1e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -19,6 +19,7 @@ if __name__ == "__main__":
"test": set(),
"extended-test": set(),
}
docs_edited = False
if len(files) == 300:
# max diff length is 300 files - there are likely files missing
@ -47,6 +48,17 @@ if __name__ == "__main__":
found = True
if found:
dirs_to_run["extended-test"].add(dir_)
elif file.startswith("libs/standard-tests"):
# TODO: update to include all packages that rely on standard-tests (all partner packages)
# note: won't run on external repo partners
dirs_to_run["lint"].add("libs/standard-tests")
dirs_to_run["test"].add("libs/partners/mistralai")
dirs_to_run["test"].add("libs/partners/openai")
dirs_to_run["test"].add("libs/partners/anthropic")
dirs_to_run["test"].add("libs/partners/ai21")
dirs_to_run["test"].add("libs/partners/fireworks")
dirs_to_run["test"].add("libs/partners/groq")
elif file.startswith("libs/cli"):
# todo: add cli makefile
pass
@ -65,6 +77,8 @@ if __name__ == "__main__":
"an update for this new library!"
)
elif any(file.startswith(p) for p in ["docs/", "templates/", "cookbook/"]):
if file.startswith("docs/"):
docs_edited = True
dirs_to_run["lint"].add(".")
outputs = {
@ -73,6 +87,7 @@ if __name__ == "__main__":
),
"dirs-to-test": list(dirs_to_run["test"] | dirs_to_run["extended-test"]),
"dirs-to-extended-test": list(dirs_to_run["extended-test"]),
"docs-edited": "true" if docs_edited else "",
}
for key, value in outputs.items():
json_output = json.dumps(value)

@ -13,13 +13,16 @@ MIN_VERSION_LIBS = [
def get_min_version(version: str) -> str:
# base regex for x.x.x with cases for rc/post/etc
# valid strings: https://peps.python.org/pep-0440/#public-version-identifiers
vstring = r"\d+(?:\.\d+){0,2}(?:(?:a|b|rc|\.post|\.dev)\d+)?"
# case ^x.x.x
_match = re.match(r"^\^(\d+(?:\.\d+){0,2})$", version)
_match = re.match(f"^\\^({vstring})$", version)
if _match:
return _match.group(1)
# case >=x.x.x,<y.y.y
_match = re.match(r"^>=(\d+(?:\.\d+){0,2}),<(\d+(?:\.\d+){0,2})$", version)
_match = re.match(f"^>=({vstring}),<({vstring})$", version)
if _match:
_min = _match.group(1)
_max = _match.group(2)
@ -27,7 +30,7 @@ def get_min_version(version: str) -> str:
return _min
# case x.x.x
_match = re.match(r"^(\d+(?:\.\d+){0,2})$", version)
_match = re.match(f"^({vstring})$", version)
if _match:
return _match.group(1)
@ -52,6 +55,9 @@ def get_min_version_from_toml(toml_path: str):
# Get the version string
version_string = dependencies[lib]
if isinstance(version_string, dict):
version_string = version_string["version"]
# Use parse_version to get the minimum supported version from version_string
min_version = get_min_version(version_string)

@ -58,6 +58,7 @@ jobs:
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
NVIDIA_API_KEY: ${{ secrets.NVIDIA_API_KEY }}
GOOGLE_SEARCH_API_KEY: ${{ secrets.GOOGLE_SEARCH_API_KEY }}
GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
@ -77,6 +78,7 @@ jobs:
MONGODB_ATLAS_URI: ${{ secrets.MONGODB_ATLAS_URI }}
VOYAGE_API_KEY: ${{ secrets.VOYAGE_API_KEY }}
COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
UPSTAGE_API_KEY: ${{ secrets.UPSTAGE_API_KEY }}
run: |
make integration_tests

@ -215,6 +215,7 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # for airbyte
MONGODB_ATLAS_URI: ${{ secrets.MONGODB_ATLAS_URI }}
VOYAGE_API_KEY: ${{ secrets.VOYAGE_API_KEY }}
UPSTAGE_API_KEY: ${{ secrets.UPSTAGE_API_KEY }}
run: make integration_tests
working-directory: ${{ inputs.working-directory }}

@ -36,6 +36,7 @@ jobs:
dirs-to-lint: ${{ steps.set-matrix.outputs.dirs-to-lint }}
dirs-to-test: ${{ steps.set-matrix.outputs.dirs-to-test }}
dirs-to-extended-test: ${{ steps.set-matrix.outputs.dirs-to-extended-test }}
docs-edited: ${{ steps.set-matrix.outputs.docs-edited }}
lint:
name: cd ${{ matrix.working-directory }}
needs: [ build ]
@ -60,9 +61,9 @@ jobs:
working-directory: ${{ matrix.working-directory }}
secrets: inherit
test_doc_imports:
test-doc-imports:
needs: [ build ]
if: ${{ needs.build.outputs.dirs-to-test != '[]' }}
if: ${{ needs.build.outputs.dirs-to-test != '[]' || needs.build.outputs.docs-edited }}
uses: ./.github/workflows/_test_doc_imports.yml
secrets: inherit
@ -140,7 +141,7 @@ jobs:
echo "$STATUS" | grep 'nothing to commit, working tree clean'
ci_success:
name: "CI Success"
needs: [build, lint, test, compile-integration-tests, dependencies, extended-tests]
needs: [build, lint, test, compile-integration-tests, dependencies, extended-tests, test-doc-imports]
if: |
always()
runs-on: ubuntu-latest

@ -10,19 +10,21 @@ env:
jobs:
build:
defaults:
run:
working-directory: libs/langchain
runs-on: ubuntu-latest
environment: Scheduled testing
strategy:
matrix:
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
name: Python ${{ matrix.python-version }}
working-directory:
- "libs/partners/openai"
- "libs/partners/anthropic"
# - "libs/partners/ai21" # standard-tests broken
- "libs/partners/fireworks"
# - "libs/partners/groq" # rate-limited
- "libs/partners/mistralai"
# - "libs/partners/together" # rate-limited
name: Python ${{ matrix.python-version }} - ${{ matrix.working-directory }}
steps:
- uses: actions/checkout@v4
@ -31,7 +33,7 @@ jobs:
with:
python-version: ${{ matrix.python-version }}
poetry-version: ${{ env.POETRY_VERSION }}
working-directory: libs/langchain
working-directory: ${{ matrix.working-directory }}
cache-key: scheduled
- name: 'Authenticate to Google Cloud'
@ -40,26 +42,15 @@ jobs:
with:
credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}'
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.AWS_REGION }}
- name: Install dependencies
working-directory: libs/langchain
working-directory: ${{ matrix.working-directory }}
shell: bash
run: |
echo "Running scheduled tests, installing dependencies with poetry..."
poetry install --with=test_integration,test
- name: Install deps outside pyproject
if: ${{ startsWith(inputs.working-directory, 'libs/community/') }}
shell: bash
run: poetry run pip install "boto3<2" "google-cloud-aiplatform<2"
- name: Run tests
- name: Run integration tests
working-directory: ${{ matrix.working-directory }}
shell: bash
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
@ -70,11 +61,16 @@ jobs:
AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_CHAT_DEPLOYMENT_NAME }}
AZURE_OPENAI_LLM_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LLM_DEPLOYMENT_NAME }}
AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME }}
AI21_API_KEY: ${{ secrets.AI21_API_KEY }}
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }}
run: |
make scheduled_tests
make integration_test
- name: Ensure the tests did not create any additional files
working-directory: ${{ matrix.working-directory }}
shell: bash
run: |
set -eu

@ -47,7 +47,7 @@ For these applications, LangChain simplifies the entire application lifecycle:
- **`langchain-community`**: Third party integrations.
- Some integrations have been further split into **partner packages** that only rely on **`langchain-core`**. Examples include **`langchain_openai`** and **`langchain_anthropic`**.
- **`langchain`**: Chains, agents, and retrieval strategies that make up an application's cognitive architecture.
- **`[LangGraph](https://python.langchain.com/docs/langgraph)`**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
- **[LangGraph](https://python.langchain.com/docs/langgraph)**: A library for building robust and stateful multi-actor applications with LLMs by modeling steps as edges and nodes in a graph.
### Productionization:
- **[LangSmith](https://python.langchain.com/docs/langsmith)**: A developer platform that lets you debug, test, evaluate, and monitor chains built on any LLM framework and seamlessly integrates with LangChain.

@ -535,9 +535,9 @@
" print(f\"--Generated {len(all_clusters)} clusters--\")\n",
"\n",
" # Summarization\n",
" template = \"\"\"Here is a sub-set of LangChain Expression Langauge doc. \n",
" template = \"\"\"Here is a sub-set of LangChain Expression Language doc. \n",
" \n",
" LangChain Expression Langauge provides a way to compose chain in LangChain.\n",
" LangChain Expression Language provides a way to compose chain in LangChain.\n",
" \n",
" Give a detailed summary of the documentation provided.\n",
" \n",

@ -59,7 +59,7 @@
},
"outputs": [],
"source": [
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=1.0)"
"llm = ChatOpenAI(model=\"gpt-4\", temperature=1.0)"
]
},
{

@ -933,7 +933,7 @@
"**Answer**: The LangChain class includes various types of retrievers such as:\n",
"\n",
"- ArxivRetriever\n",
"- AzureCognitiveSearchRetriever\n",
"- AzureAISearchRetriever\n",
"- BM25Retriever\n",
"- ChaindeskRetriever\n",
"- ChatGPTPluginRetriever\n",
@ -993,7 +993,7 @@
{
"data": {
"text/plain": [
"{'question': 'LangChain possesses a variety of retrievers including:\\n\\n1. ArxivRetriever\\n2. AzureCognitiveSearchRetriever\\n3. BM25Retriever\\n4. ChaindeskRetriever\\n5. ChatGPTPluginRetriever\\n6. ContextualCompressionRetriever\\n7. DocArrayRetriever\\n8. ElasticSearchBM25Retriever\\n9. EnsembleRetriever\\n10. GoogleVertexAISearchRetriever\\n11. AmazonKendraRetriever\\n12. KNNRetriever\\n13. LlamaIndexGraphRetriever\\n14. LlamaIndexRetriever\\n15. MergerRetriever\\n16. MetalRetriever\\n17. MilvusRetriever\\n18. MultiQueryRetriever\\n19. ParentDocumentRetriever\\n20. PineconeHybridSearchRetriever\\n21. PubMedRetriever\\n22. RePhraseQueryRetriever\\n23. RemoteLangChainRetriever\\n24. SelfQueryRetriever\\n25. SVMRetriever\\n26. TFIDFRetriever\\n27. TimeWeightedVectorStoreRetriever\\n28. VespaRetriever\\n29. WeaviateHybridSearchRetriever\\n30. WebResearchRetriever\\n31. WikipediaRetriever\\n32. ZepRetriever\\n33. ZillizRetriever\\n\\nIt also includes self query translators like:\\n\\n1. ChromaTranslator\\n2. DeepLakeTranslator\\n3. MyScaleTranslator\\n4. PineconeTranslator\\n5. QdrantTranslator\\n6. WeaviateTranslator\\n\\nAnd remote retrievers like:\\n\\n1. RemoteLangChainRetriever'}"
"{'question': 'LangChain possesses a variety of retrievers including:\\n\\n1. ArxivRetriever\\n2. AzureAISearchRetriever\\n3. BM25Retriever\\n4. ChaindeskRetriever\\n5. ChatGPTPluginRetriever\\n6. ContextualCompressionRetriever\\n7. DocArrayRetriever\\n8. ElasticSearchBM25Retriever\\n9. EnsembleRetriever\\n10. GoogleVertexAISearchRetriever\\n11. AmazonKendraRetriever\\n12. KNNRetriever\\n13. LlamaIndexGraphRetriever\\n14. LlamaIndexRetriever\\n15. MergerRetriever\\n16. MetalRetriever\\n17. MilvusRetriever\\n18. MultiQueryRetriever\\n19. ParentDocumentRetriever\\n20. PineconeHybridSearchRetriever\\n21. PubMedRetriever\\n22. RePhraseQueryRetriever\\n23. RemoteLangChainRetriever\\n24. SelfQueryRetriever\\n25. SVMRetriever\\n26. TFIDFRetriever\\n27. TimeWeightedVectorStoreRetriever\\n28. VespaRetriever\\n29. WeaviateHybridSearchRetriever\\n30. WebResearchRetriever\\n31. WikipediaRetriever\\n32. ZepRetriever\\n33. ZillizRetriever\\n\\nIt also includes self query translators like:\\n\\n1. ChromaTranslator\\n2. DeepLakeTranslator\\n3. MyScaleTranslator\\n4. PineconeTranslator\\n5. QdrantTranslator\\n6. WeaviateTranslator\\n\\nAnd remote retrievers like:\\n\\n1. RemoteLangChainRetriever'}"
]
},
"execution_count": 31,
@ -1117,7 +1117,7 @@
"The LangChain class includes various types of retrievers such as:\n",
"\n",
"- ArxivRetriever\n",
"- AzureCognitiveSearchRetriever\n",
"- AzureAISearchRetriever\n",
"- BM25Retriever\n",
"- ChaindeskRetriever\n",
"- ChatGPTPluginRetriever\n",

@ -84,7 +84,7 @@
"metadata": {},
"outputs": [],
"source": [
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n",
"llm = ChatOpenAI(model=\"gpt-4\", temperature=0)\n",
"chain = ElasticsearchDatabaseChain.from_llm(llm=llm, database=db, verbose=True)"
]
},

@ -229,7 +229,7 @@
" prompt = hub.pull(\"rlm/rag-prompt\")\n",
"\n",
" # LLM\n",
" llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0, streaming=True)\n",
" llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0, streaming=True)\n",
"\n",
" # Post-processing\n",
" def format_docs(docs):\n",

@ -236,7 +236,7 @@
" prompt = hub.pull(\"rlm/rag-prompt\")\n",
"\n",
" # LLM\n",
" llm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n",
" llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
"\n",
" # Post-processing\n",
" def format_docs(docs):\n",

@ -0,0 +1,818 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "70b333e6",
"metadata": {},
"source": [
"[![View Article](https://img.shields.io/badge/View%20Article-blue)](https://www.mongodb.com/developer/products/atlas/advanced-rag-langchain-mongodb/)\n"
]
},
{
"cell_type": "markdown",
"id": "d84a72ea",
"metadata": {},
"source": [
"# Adding Semantic Caching and Memory to your RAG Application using MongoDB and LangChain\n",
"\n",
"In this notebook, we will see how to use the new MongoDBCache and MongoDBChatMessageHistory in your RAG application.\n"
]
},
{
"cell_type": "markdown",
"id": "65527202",
"metadata": {},
"source": [
"## Step 1: Install required libraries\n",
"\n",
"- **datasets**: Python library to get access to datasets available on Hugging Face Hub\n",
"\n",
"- **langchain**: Python toolkit for LangChain\n",
"\n",
"- **langchain-mongodb**: Python package to use MongoDB as a vector store, semantic cache, chat history store etc. in LangChain\n",
"\n",
"- **langchain-openai**: Python package to use OpenAI models with LangChain\n",
"\n",
"- **pymongo**: Python toolkit for MongoDB\n",
"\n",
"- **pandas**: Python library for data analysis, exploration, and manipulation"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "cbc22fa4",
"metadata": {},
"outputs": [],
"source": [
"! pip install -qU datasets langchain langchain-mongodb langchain-openai pymongo pandas"
]
},
{
"cell_type": "markdown",
"id": "39c41e87",
"metadata": {},
"source": [
"## Step 2: Setup pre-requisites\n",
"\n",
"* Set the MongoDB connection string. Follow the steps [here](https://www.mongodb.com/docs/manual/reference/connection-string/) to get the connection string from the Atlas UI.\n",
"\n",
"* Set the OpenAI API key. Steps to obtain an API key as [here](https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key)"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "b56412ae",
"metadata": {},
"outputs": [],
"source": [
"import getpass"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "16a20d7a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Enter your MongoDB connection string:········\n"
]
}
],
"source": [
"MONGODB_URI = getpass.getpass(\"Enter your MongoDB connection string:\")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "978682d4",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Enter your OpenAI API key:········\n"
]
}
],
"source": [
"OPENAI_API_KEY = getpass.getpass(\"Enter your OpenAI API key:\")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "606081c5",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"········\n"
]
}
],
"source": [
"# Optional-- If you want to enable Langsmith -- good for debugging\n",
"import os\n",
"\n",
"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
"os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "markdown",
"id": "f6b8302c",
"metadata": {},
"source": [
"## Step 3: Download the dataset\n",
"\n",
"We will be using MongoDB's [embedded_movies](https://huggingface.co/datasets/MongoDB/embedded_movies) dataset"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "1a3433a6",
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"from datasets import load_dataset"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "aee5311b",
"metadata": {},
"outputs": [],
"source": [
"# Ensure you have an HF_TOKEN in your development enviornment:\n",
"# access tokens can be created or copied from the Hugging Face platform (https://huggingface.co/docs/hub/en/security-tokens)\n",
"\n",
"# Load MongoDB's embedded_movies dataset from Hugging Face\n",
"# https://huggingface.co/datasets/MongoDB/airbnb_embeddings\n",
"\n",
"data = load_dataset(\"MongoDB/embedded_movies\")"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "1d630a26",
"metadata": {},
"outputs": [],
"source": [
"df = pd.DataFrame(data[\"train\"])"
]
},
{
"cell_type": "markdown",
"id": "a1f94f43",
"metadata": {},
"source": [
"## Step 4: Data analysis\n",
"\n",
"Make sure length of the dataset is what we expect, drop Nones etc."
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "b276df71",
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>fullplot</th>\n",
" <th>type</th>\n",
" <th>plot_embedding</th>\n",
" <th>num_mflix_comments</th>\n",
" <th>runtime</th>\n",
" <th>writers</th>\n",
" <th>imdb</th>\n",
" <th>countries</th>\n",
" <th>rated</th>\n",
" <th>plot</th>\n",
" <th>title</th>\n",
" <th>languages</th>\n",
" <th>metacritic</th>\n",
" <th>directors</th>\n",
" <th>awards</th>\n",
" <th>genres</th>\n",
" <th>poster</th>\n",
" <th>cast</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>Young Pauline is left a lot of money when her ...</td>\n",
" <td>movie</td>\n",
" <td>[0.00072939653, -0.026834568, 0.013515796, -0....</td>\n",
" <td>0</td>\n",
" <td>199.0</td>\n",
" <td>[Charles W. Goddard (screenplay), Basil Dickey...</td>\n",
" <td>{'id': 4465, 'rating': 7.6, 'votes': 744}</td>\n",
" <td>[USA]</td>\n",
" <td>None</td>\n",
" <td>Young Pauline is left a lot of money when her ...</td>\n",
" <td>The Perils of Pauline</td>\n",
" <td>[English]</td>\n",
" <td>NaN</td>\n",
" <td>[Louis J. Gasnier, Donald MacKenzie]</td>\n",
" <td>{'nominations': 0, 'text': '1 win.', 'wins': 1}</td>\n",
" <td>[Action]</td>\n",
" <td>https://m.media-amazon.com/images/M/MV5BMzgxOD...</td>\n",
" <td>[Pearl White, Crane Wilbur, Paul Panzer, Edwar...</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" fullplot type \\\n",
"0 Young Pauline is left a lot of money when her ... movie \n",
"\n",
" plot_embedding num_mflix_comments \\\n",
"0 [0.00072939653, -0.026834568, 0.013515796, -0.... 0 \n",
"\n",
" runtime writers \\\n",
"0 199.0 [Charles W. Goddard (screenplay), Basil Dickey... \n",
"\n",
" imdb countries rated \\\n",
"0 {'id': 4465, 'rating': 7.6, 'votes': 744} [USA] None \n",
"\n",
" plot title \\\n",
"0 Young Pauline is left a lot of money when her ... The Perils of Pauline \n",
"\n",
" languages metacritic directors \\\n",
"0 [English] NaN [Louis J. Gasnier, Donald MacKenzie] \n",
"\n",
" awards genres \\\n",
"0 {'nominations': 0, 'text': '1 win.', 'wins': 1} [Action] \n",
"\n",
" poster \\\n",
"0 https://m.media-amazon.com/images/M/MV5BMzgxOD... \n",
"\n",
" cast \n",
"0 [Pearl White, Crane Wilbur, Paul Panzer, Edwar... "
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Previewing the contents of the data\n",
"df.head(1)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "22ab375d",
"metadata": {},
"outputs": [],
"source": [
"# Only keep records where the fullplot field is not null\n",
"df = df[df[\"fullplot\"].notna()]"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "fceed99a",
"metadata": {},
"outputs": [],
"source": [
"# Renaming the embedding field to \"embedding\" -- required by LangChain\n",
"df.rename(columns={\"plot_embedding\": \"embedding\"}, inplace=True)"
]
},
{
"cell_type": "markdown",
"id": "aedec13a",
"metadata": {},
"source": [
"## Step 5: Create a simple RAG chain using MongoDB as the vector store"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "11d292f3",
"metadata": {},
"outputs": [],
"source": [
"from langchain_mongodb import MongoDBAtlasVectorSearch\n",
"from pymongo import MongoClient\n",
"\n",
"# Initialize MongoDB python client\n",
"client = MongoClient(MONGODB_URI, appname=\"devrel.content.python\")\n",
"\n",
"DB_NAME = \"langchain_chatbot\"\n",
"COLLECTION_NAME = \"data\"\n",
"ATLAS_VECTOR_SEARCH_INDEX_NAME = \"vector_index\"\n",
"collection = client[DB_NAME][COLLECTION_NAME]"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "d8292d53",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"DeleteResult({'n': 1000, 'electionId': ObjectId('7fffffff00000000000000f6'), 'opTime': {'ts': Timestamp(1710523288, 1033), 't': 246}, 'ok': 1.0, '$clusterTime': {'clusterTime': Timestamp(1710523288, 1042), 'signature': {'hash': b\"i\\xa8\\xe9'\\x1ed\\xf2u\\xf3L\\xff\\xb1\\xf5\\xbfA\\x90\\xabJ\\x12\\x83\", 'keyId': 7299545392000008318}}, 'operationTime': Timestamp(1710523288, 1033)}, acknowledged=True)"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Delete any existing records in the collection\n",
"collection.delete_many({})"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "36c68914",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Data ingestion into MongoDB completed\n"
]
}
],
"source": [
"# Data Ingestion\n",
"records = df.to_dict(\"records\")\n",
"collection.insert_many(records)\n",
"\n",
"print(\"Data ingestion into MongoDB completed\")"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "cbfca0b8",
"metadata": {},
"outputs": [],
"source": [
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"# Using the text-embedding-ada-002 since that's what was used to create embeddings in the movies dataset\n",
"embeddings = OpenAIEmbeddings(\n",
" openai_api_key=OPENAI_API_KEY, model=\"text-embedding-ada-002\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "798e176c",
"metadata": {},
"outputs": [],
"source": [
"# Vector Store Creation\n",
"vector_store = MongoDBAtlasVectorSearch.from_connection_string(\n",
" connection_string=MONGODB_URI,\n",
" namespace=DB_NAME + \".\" + COLLECTION_NAME,\n",
" embedding=embeddings,\n",
" index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME,\n",
" text_key=\"fullplot\",\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 49,
"id": "c71cd087",
"metadata": {},
"outputs": [],
"source": [
"# Using the MongoDB vector store as a retriever in a RAG chain\n",
"retriever = vector_store.as_retriever(search_type=\"similarity\", search_kwargs={\"k\": 5})"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "b6588cd3",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"# Generate context using the retriever, and pass the user question through\n",
"retrieve = {\n",
" \"context\": retriever | (lambda docs: \"\\n\\n\".join([d.page_content for d in docs])),\n",
" \"question\": RunnablePassthrough(),\n",
"}\n",
"template = \"\"\"Answer the question based only on the following context: \\\n",
"{context}\n",
"\n",
"Question: {question}\n",
"\"\"\"\n",
"# Defining the chat prompt\n",
"prompt = ChatPromptTemplate.from_template(template)\n",
"# Defining the model to be used for chat completion\n",
"model = ChatOpenAI(temperature=0, openai_api_key=OPENAI_API_KEY)\n",
"# Parse output as a string\n",
"parse_output = StrOutputParser()\n",
"\n",
"# Naive RAG chain\n",
"naive_rag_chain = retrieve | prompt | model | parse_output"
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "aaae21f5",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Once a Thief'"
]
},
"execution_count": 26,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"naive_rag_chain.invoke(\"What is the best movie to watch when sad?\")"
]
},
{
"cell_type": "markdown",
"id": "75f929ef",
"metadata": {},
"source": [
"## Step 6: Create a RAG chain with chat history"
]
},
{
"cell_type": "code",
"execution_count": 27,
"id": "94e7bd4a",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import MessagesPlaceholder\n",
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
"from langchain_mongodb.chat_message_histories import MongoDBChatMessageHistory"
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "5bb30860",
"metadata": {},
"outputs": [],
"source": [
"def get_session_history(session_id: str) -> MongoDBChatMessageHistory:\n",
" return MongoDBChatMessageHistory(\n",
" MONGODB_URI, session_id, database_name=DB_NAME, collection_name=\"history\"\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 50,
"id": "f51d0f35",
"metadata": {},
"outputs": [],
"source": [
"# Given a follow-up question and history, create a standalone question\n",
"standalone_system_prompt = \"\"\"\n",
"Given a chat history and a follow-up question, rephrase the follow-up question to be a standalone question. \\\n",
"Do NOT answer the question, just reformulate it if needed, otherwise return it as is. \\\n",
"Only return the final standalone question. \\\n",
"\"\"\"\n",
"standalone_question_prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", standalone_system_prompt),\n",
" MessagesPlaceholder(variable_name=\"history\"),\n",
" (\"human\", \"{question}\"),\n",
" ]\n",
")\n",
"\n",
"question_chain = standalone_question_prompt | model | parse_output"
]
},
{
"cell_type": "code",
"execution_count": 51,
"id": "f3ef3354",
"metadata": {},
"outputs": [],
"source": [
"# Generate context by passing output of the question_chain i.e. the standalone question to the retriever\n",
"retriever_chain = RunnablePassthrough.assign(\n",
" context=question_chain\n",
" | retriever\n",
" | (lambda docs: \"\\n\\n\".join([d.page_content for d in docs]))\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 55,
"id": "5afb7345",
"metadata": {},
"outputs": [],
"source": [
"# Create a prompt that includes the context, history and the follow-up question\n",
"rag_system_prompt = \"\"\"Answer the question based only on the following context: \\\n",
"{context}\n",
"\"\"\"\n",
"rag_prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", rag_system_prompt),\n",
" MessagesPlaceholder(variable_name=\"history\"),\n",
" (\"human\", \"{question}\"),\n",
" ]\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 56,
"id": "f95f47d0",
"metadata": {},
"outputs": [],
"source": [
"# RAG chain\n",
"rag_chain = retriever_chain | rag_prompt | model | parse_output"
]
},
{
"cell_type": "code",
"execution_count": 57,
"id": "9618d395",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'The best movie to watch when feeling down could be \"Last Action Hero.\" It\\'s a fun and action-packed film that blends reality and fantasy, offering an escape from the real world and providing an entertaining distraction.'"
]
},
"execution_count": 57,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# RAG chain with history\n",
"with_message_history = RunnableWithMessageHistory(\n",
" rag_chain,\n",
" get_session_history,\n",
" input_messages_key=\"question\",\n",
" history_messages_key=\"history\",\n",
")\n",
"with_message_history.invoke(\n",
" {\"question\": \"What is the best movie to watch when sad?\"},\n",
" {\"configurable\": {\"session_id\": \"1\"}},\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 58,
"id": "6e3080d1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'I apologize for the confusion. Another movie that might lift your spirits when you\\'re feeling sad is \"Smilla\\'s Sense of Snow.\" It\\'s a mystery thriller that could engage your mind and distract you from your sadness with its intriguing plot and suspenseful storyline.'"
]
},
"execution_count": 58,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"with_message_history.invoke(\n",
" {\n",
" \"question\": \"Hmmm..I don't want to watch that one. Can you suggest something else?\"\n",
" },\n",
" {\"configurable\": {\"session_id\": \"1\"}},\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 59,
"id": "daea2953",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'For a lighter movie option, you might enjoy \"Cousins.\" It\\'s a comedy film set in Barcelona with action and humor, offering a fun and entertaining escape from reality. The storyline is engaging and filled with comedic moments that could help lift your spirits.'"
]
},
"execution_count": 59,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"with_message_history.invoke(\n",
" {\"question\": \"How about something more light?\"},\n",
" {\"configurable\": {\"session_id\": \"1\"}},\n",
")"
]
},
{
"cell_type": "markdown",
"id": "0de23a88",
"metadata": {},
"source": [
"## Step 7: Get faster responses using Semantic Cache\n",
"\n",
"**NOTE:** Semantic cache only caches the input to the LLM. When using it in retrieval chains, remember that documents retrieved can change between runs resulting in cache misses for semantically similar queries."
]
},
{
"cell_type": "code",
"execution_count": 61,
"id": "5d6b6741",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.globals import set_llm_cache\n",
"from langchain_mongodb.cache import MongoDBAtlasSemanticCache\n",
"\n",
"set_llm_cache(\n",
" MongoDBAtlasSemanticCache(\n",
" connection_string=MONGODB_URI,\n",
" embedding=embeddings,\n",
" collection_name=\"semantic_cache\",\n",
" database_name=DB_NAME,\n",
" index_name=ATLAS_VECTOR_SEARCH_INDEX_NAME,\n",
" wait_until_ready=True, # Optional, waits until the cache is ready to be used\n",
" )\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 62,
"id": "9825bc7b",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"CPU times: user 87.8 ms, sys: 670 µs, total: 88.5 ms\n",
"Wall time: 1.24 s\n"
]
},
{
"data": {
"text/plain": [
"'Once a Thief'"
]
},
"execution_count": 62,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"%%time\n",
"naive_rag_chain.invoke(\"What is the best movie to watch when sad?\")"
]
},
{
"cell_type": "code",
"execution_count": 63,
"id": "a5e518cf",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"CPU times: user 43.5 ms, sys: 4.16 ms, total: 47.7 ms\n",
"Wall time: 255 ms\n"
]
},
{
"data": {
"text/plain": [
"'Once a Thief'"
]
},
"execution_count": 63,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"%%time\n",
"naive_rag_chain.invoke(\"What is the best movie to watch when sad?\")"
]
},
{
"cell_type": "code",
"execution_count": 64,
"id": "3d3d3ad3",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"CPU times: user 115 ms, sys: 171 µs, total: 115 ms\n",
"Wall time: 1.38 s\n"
]
},
{
"data": {
"text/plain": [
"'I would recommend watching \"Last Action Hero\" when sad, as it is a fun and action-packed film that can help lift your spirits.'"
]
},
"execution_count": 64,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"%%time\n",
"naive_rag_chain.invoke(\"Which movie do I watch when sad?\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "conda_pytorch_p310",
"language": "python",
"name": "conda_pytorch_p310"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -84,7 +84,7 @@
"from langchain.retrievers import KayAiRetriever\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n",
"model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n",
"retriever = KayAiRetriever.create(\n",
" dataset_id=\"company\", data_types=[\"PressRelease\"], num_contexts=6\n",
")\n",

@ -274,7 +274,7 @@
"db = SQLDatabase.from_uri(\n",
" CONNECTION_STRING\n",
") # We reconnect to db so the new columns are loaded as well.\n",
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)\n",
"llm = ChatOpenAI(model=\"gpt-4\", temperature=0)\n",
"\n",
"sql_query_chain = (\n",
" RunnablePassthrough.assign(schema=get_schema)\n",

@ -22,7 +22,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n",
"from langchain import hub\n",
"from langchain.agents import AgentExecutor, Tool, ZeroShotAgent, create_react_agent\n",
"from langchain.chains import LLMChain\n",
"from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n",
"from langchain.prompts import PromptTemplate\n",
@ -84,19 +85,7 @@
"metadata": {},
"outputs": [],
"source": [
"prefix = \"\"\"Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:\"\"\"\n",
"suffix = \"\"\"Begin!\"\n",
"\n",
"{chat_history}\n",
"Question: {input}\n",
"{agent_scratchpad}\"\"\"\n",
"\n",
"prompt = ZeroShotAgent.create_prompt(\n",
" tools,\n",
" prefix=prefix,\n",
" suffix=suffix,\n",
" input_variables=[\"input\", \"chat_history\", \"agent_scratchpad\"],\n",
")"
"prompt = hub.pull(\"hwchase17/react\")"
]
},
{
@ -114,16 +103,14 @@
"metadata": {},
"outputs": [],
"source": [
"llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)\n",
"agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)\n",
"agent_chain = AgentExecutor.from_agent_and_tools(\n",
" agent=agent, tools=tools, verbose=True, memory=memory\n",
")"
"model = OpenAI()\n",
"agent = create_react_agent(model, tools, prompt)\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, memory=memory)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 36,
"id": "ca4bc1fb",
"metadata": {},
"outputs": [
@ -133,15 +120,15 @@
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mThought: I should research ChatGPT to answer this question.\n",
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
"\u001B[32;1m\u001B[1;3mThought: I should research ChatGPT to answer this question.\n",
"Action: Search\n",
"Action Input: \"ChatGPT\"\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3mNov 30, 2022 ... We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... ChatGPT. We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... Feb 2, 2023 ... ChatGPT, the popular chatbot from OpenAI, is estimated to have reached 100 million monthly active users in January, just two months after ... 2 days ago ... ChatGPT recently launched a new version of its own plagiarism detection tool, with hopes that it will squelch some of the criticism around how ... An API for accessing new AI models developed by OpenAI. Feb 19, 2023 ... ChatGPT is an AI chatbot system that OpenAI released in November to show off and test what a very large, powerful AI system can accomplish. You ... ChatGPT is fine-tuned from GPT-3.5, a language model trained to produce text. ChatGPT was optimized for dialogue by using Reinforcement Learning with Human ... 3 days ago ... Visual ChatGPT connects ChatGPT and a series of Visual Foundation Models to enable sending and receiving images during chatting. Dec 1, 2022 ... ChatGPT is a natural language processing tool driven by AI technology that allows you to have human-like conversations and much more with a ...\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\u001b[0m\n",
"Action Input: \"ChatGPT\"\u001B[0m\n",
"Observation: \u001B[36;1m\u001B[1;3mNov 30, 2022 ... We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... ChatGPT. We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... Feb 2, 2023 ... ChatGPT, the popular chatbot from OpenAI, is estimated to have reached 100 million monthly active users in January, just two months after ... 2 days ago ... ChatGPT recently launched a new version of its own plagiarism detection tool, with hopes that it will squelch some of the criticism around how ... An API for accessing new AI models developed by OpenAI. Feb 19, 2023 ... ChatGPT is an AI chatbot system that OpenAI released in November to show off and test what a very large, powerful AI system can accomplish. You ... ChatGPT is fine-tuned from GPT-3.5, a language model trained to produce text. ChatGPT was optimized for dialogue by using Reinforcement Learning with Human ... 3 days ago ... Visual ChatGPT connects ChatGPT and a series of Visual Foundation Models to enable sending and receiving images during chatting. Dec 1, 2022 ... ChatGPT is a natural language processing tool driven by AI technology that allows you to have human-like conversations and much more with a ...\u001B[0m\n",
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer.\n",
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\u001B[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
"\u001B[1m> Finished chain.\u001B[0m\n"
]
},
{
@ -153,10 +140,40 @@
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
},
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001B[0;31m---------------------------------------------------------------------------\u001B[0m",
"\u001B[0;31mKeyboardInterrupt\u001B[0m Traceback (most recent call last)",
"Cell \u001B[0;32mIn[36], line 1\u001B[0m\n\u001B[0;32m----> 1\u001B[0m \u001B[43magent_executor\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43minvoke\u001B[49m\u001B[43m(\u001B[49m\u001B[43m{\u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43minput\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m:\u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43mWhat is ChatGPT?\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m}\u001B[49m\u001B[43m)\u001B[49m\n",
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/chains/base.py:163\u001B[0m, in \u001B[0;36mChain.invoke\u001B[0;34m(self, input, config, **kwargs)\u001B[0m\n\u001B[1;32m 161\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mBaseException\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[1;32m 162\u001B[0m run_manager\u001B[38;5;241m.\u001B[39mon_chain_error(e)\n\u001B[0;32m--> 163\u001B[0m \u001B[38;5;28;01mraise\u001B[39;00m e\n\u001B[1;32m 164\u001B[0m run_manager\u001B[38;5;241m.\u001B[39mon_chain_end(outputs)\n\u001B[1;32m 166\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m include_run_info:\n",
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/chains/base.py:153\u001B[0m, in \u001B[0;36mChain.invoke\u001B[0;34m(self, input, config, **kwargs)\u001B[0m\n\u001B[1;32m 150\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[1;32m 151\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_validate_inputs(inputs)\n\u001B[1;32m 152\u001B[0m outputs \u001B[38;5;241m=\u001B[39m (\n\u001B[0;32m--> 153\u001B[0m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_call\u001B[49m\u001B[43m(\u001B[49m\u001B[43minputs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mrun_manager\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mrun_manager\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 154\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m new_arg_supported\n\u001B[1;32m 155\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_call(inputs)\n\u001B[1;32m 156\u001B[0m )\n\u001B[1;32m 158\u001B[0m final_outputs: Dict[\u001B[38;5;28mstr\u001B[39m, Any] \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mprep_outputs(\n\u001B[1;32m 159\u001B[0m inputs, outputs, return_only_outputs\n\u001B[1;32m 160\u001B[0m )\n\u001B[1;32m 161\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mBaseException\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m e:\n",
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/agents/agent.py:1432\u001B[0m, in \u001B[0;36mAgentExecutor._call\u001B[0;34m(self, inputs, run_manager)\u001B[0m\n\u001B[1;32m 1430\u001B[0m \u001B[38;5;66;03m# We now enter the agent loop (until it returns something).\u001B[39;00m\n\u001B[1;32m 1431\u001B[0m \u001B[38;5;28;01mwhile\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_should_continue(iterations, time_elapsed):\n\u001B[0;32m-> 1432\u001B[0m next_step_output \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_take_next_step\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 1433\u001B[0m \u001B[43m \u001B[49m\u001B[43mname_to_tool_map\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1434\u001B[0m \u001B[43m \u001B[49m\u001B[43mcolor_mapping\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1435\u001B[0m \u001B[43m \u001B[49m\u001B[43minputs\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1436\u001B[0m \u001B[43m \u001B[49m\u001B[43mintermediate_steps\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1437\u001B[0m \u001B[43m \u001B[49m\u001B[43mrun_manager\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mrun_manager\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1438\u001B[0m \u001B[43m \u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1439\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28misinstance\u001B[39m(next_step_output, AgentFinish):\n\u001B[1;32m 1440\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_return(\n\u001B[1;32m 1441\u001B[0m next_step_output, intermediate_steps, run_manager\u001B[38;5;241m=\u001B[39mrun_manager\n\u001B[1;32m 1442\u001B[0m )\n",
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/agents/agent.py:1138\u001B[0m, in \u001B[0;36mAgentExecutor._take_next_step\u001B[0;34m(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)\u001B[0m\n\u001B[1;32m 1129\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21m_take_next_step\u001B[39m(\n\u001B[1;32m 1130\u001B[0m \u001B[38;5;28mself\u001B[39m,\n\u001B[1;32m 1131\u001B[0m name_to_tool_map: Dict[\u001B[38;5;28mstr\u001B[39m, BaseTool],\n\u001B[0;32m (...)\u001B[0m\n\u001B[1;32m 1135\u001B[0m run_manager: Optional[CallbackManagerForChainRun] \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m,\n\u001B[1;32m 1136\u001B[0m ) \u001B[38;5;241m-\u001B[39m\u001B[38;5;241m>\u001B[39m Union[AgentFinish, List[Tuple[AgentAction, \u001B[38;5;28mstr\u001B[39m]]]:\n\u001B[1;32m 1137\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_consume_next_step(\n\u001B[0;32m-> 1138\u001B[0m [\n\u001B[1;32m 1139\u001B[0m a\n\u001B[1;32m 1140\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m a \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_iter_next_step(\n\u001B[1;32m 1141\u001B[0m name_to_tool_map,\n\u001B[1;32m 1142\u001B[0m color_mapping,\n\u001B[1;32m 1143\u001B[0m inputs,\n\u001B[1;32m 1144\u001B[0m intermediate_steps,\n\u001B[1;32m 1145\u001B[0m run_manager,\n\u001B[1;32m 1146\u001B[0m )\n\u001B[1;32m 1147\u001B[0m ]\n\u001B[1;32m 1148\u001B[0m )\n",
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/agents/agent.py:1138\u001B[0m, in \u001B[0;36m<listcomp>\u001B[0;34m(.0)\u001B[0m\n\u001B[1;32m 1129\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21m_take_next_step\u001B[39m(\n\u001B[1;32m 1130\u001B[0m \u001B[38;5;28mself\u001B[39m,\n\u001B[1;32m 1131\u001B[0m name_to_tool_map: Dict[\u001B[38;5;28mstr\u001B[39m, BaseTool],\n\u001B[0;32m (...)\u001B[0m\n\u001B[1;32m 1135\u001B[0m run_manager: Optional[CallbackManagerForChainRun] \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m,\n\u001B[1;32m 1136\u001B[0m ) \u001B[38;5;241m-\u001B[39m\u001B[38;5;241m>\u001B[39m Union[AgentFinish, List[Tuple[AgentAction, \u001B[38;5;28mstr\u001B[39m]]]:\n\u001B[1;32m 1137\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_consume_next_step(\n\u001B[0;32m-> 1138\u001B[0m [\n\u001B[1;32m 1139\u001B[0m a\n\u001B[1;32m 1140\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m a \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_iter_next_step(\n\u001B[1;32m 1141\u001B[0m name_to_tool_map,\n\u001B[1;32m 1142\u001B[0m color_mapping,\n\u001B[1;32m 1143\u001B[0m inputs,\n\u001B[1;32m 1144\u001B[0m intermediate_steps,\n\u001B[1;32m 1145\u001B[0m run_manager,\n\u001B[1;32m 1146\u001B[0m )\n\u001B[1;32m 1147\u001B[0m ]\n\u001B[1;32m 1148\u001B[0m )\n",
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/agents/agent.py:1223\u001B[0m, in \u001B[0;36mAgentExecutor._iter_next_step\u001B[0;34m(self, name_to_tool_map, color_mapping, inputs, intermediate_steps, run_manager)\u001B[0m\n\u001B[1;32m 1221\u001B[0m \u001B[38;5;28;01myield\u001B[39;00m agent_action\n\u001B[1;32m 1222\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m agent_action \u001B[38;5;129;01min\u001B[39;00m actions:\n\u001B[0;32m-> 1223\u001B[0m \u001B[38;5;28;01myield\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_perform_agent_action\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 1224\u001B[0m \u001B[43m \u001B[49m\u001B[43mname_to_tool_map\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mcolor_mapping\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43magent_action\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mrun_manager\u001B[49m\n\u001B[1;32m 1225\u001B[0m \u001B[43m \u001B[49m\u001B[43m)\u001B[49m\n",
"File \u001B[0;32m~/code/langchain/libs/langchain/langchain/agents/agent.py:1245\u001B[0m, in \u001B[0;36mAgentExecutor._perform_agent_action\u001B[0;34m(self, name_to_tool_map, color_mapping, agent_action, run_manager)\u001B[0m\n\u001B[1;32m 1243\u001B[0m tool_run_kwargs[\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mllm_prefix\u001B[39m\u001B[38;5;124m\"\u001B[39m] \u001B[38;5;241m=\u001B[39m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[1;32m 1244\u001B[0m \u001B[38;5;66;03m# We then call the tool on the tool input to get an observation\u001B[39;00m\n\u001B[0;32m-> 1245\u001B[0m observation \u001B[38;5;241m=\u001B[39m \u001B[43mtool\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mrun\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 1246\u001B[0m \u001B[43m \u001B[49m\u001B[43magent_action\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mtool_input\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1247\u001B[0m \u001B[43m \u001B[49m\u001B[43mverbose\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mverbose\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1248\u001B[0m \u001B[43m \u001B[49m\u001B[43mcolor\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mcolor\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1249\u001B[0m \u001B[43m \u001B[49m\u001B[43mcallbacks\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mrun_manager\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mget_child\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mif\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[43mrun_manager\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01melse\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mNone\u001B[39;49;00m\u001B[43m,\u001B[49m\n\u001B[1;32m 1250\u001B[0m \u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mtool_run_kwargs\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1251\u001B[0m \u001B[43m \u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1252\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[1;32m 1253\u001B[0m tool_run_kwargs \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39magent\u001B[38;5;241m.\u001B[39mtool_run_logging_kwargs()\n",
"File \u001B[0;32m~/code/langchain/libs/core/langchain_core/tools.py:422\u001B[0m, in \u001B[0;36mBaseTool.run\u001B[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, **kwargs)\u001B[0m\n\u001B[1;32m 420\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m (\u001B[38;5;167;01mException\u001B[39;00m, \u001B[38;5;167;01mKeyboardInterrupt\u001B[39;00m) \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[1;32m 421\u001B[0m run_manager\u001B[38;5;241m.\u001B[39mon_tool_error(e)\n\u001B[0;32m--> 422\u001B[0m \u001B[38;5;28;01mraise\u001B[39;00m e\n\u001B[1;32m 423\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[1;32m 424\u001B[0m run_manager\u001B[38;5;241m.\u001B[39mon_tool_end(observation, color\u001B[38;5;241m=\u001B[39mcolor, name\u001B[38;5;241m=\u001B[39m\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mname, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs)\n",
"File \u001B[0;32m~/code/langchain/libs/core/langchain_core/tools.py:381\u001B[0m, in \u001B[0;36mBaseTool.run\u001B[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, **kwargs)\u001B[0m\n\u001B[1;32m 378\u001B[0m parsed_input \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_parse_input(tool_input)\n\u001B[1;32m 379\u001B[0m tool_args, tool_kwargs \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_to_args_and_kwargs(parsed_input)\n\u001B[1;32m 380\u001B[0m observation \u001B[38;5;241m=\u001B[39m (\n\u001B[0;32m--> 381\u001B[0m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_run\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mtool_args\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mrun_manager\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mrun_manager\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mtool_kwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 382\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m new_arg_supported\n\u001B[1;32m 383\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_run(\u001B[38;5;241m*\u001B[39mtool_args, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mtool_kwargs)\n\u001B[1;32m 384\u001B[0m )\n\u001B[1;32m 385\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m ValidationError \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[1;32m 386\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mhandle_validation_error:\n",
"File \u001B[0;32m~/code/langchain/libs/core/langchain_core/tools.py:588\u001B[0m, in \u001B[0;36mTool._run\u001B[0;34m(self, run_manager, *args, **kwargs)\u001B[0m\n\u001B[1;32m 579\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mfunc:\n\u001B[1;32m 580\u001B[0m new_argument_supported \u001B[38;5;241m=\u001B[39m signature(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mfunc)\u001B[38;5;241m.\u001B[39mparameters\u001B[38;5;241m.\u001B[39mget(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mcallbacks\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n\u001B[1;32m 581\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m (\n\u001B[1;32m 582\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mfunc(\n\u001B[1;32m 583\u001B[0m \u001B[38;5;241m*\u001B[39margs,\n\u001B[1;32m 584\u001B[0m callbacks\u001B[38;5;241m=\u001B[39mrun_manager\u001B[38;5;241m.\u001B[39mget_child() \u001B[38;5;28;01mif\u001B[39;00m run_manager \u001B[38;5;28;01melse\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m,\n\u001B[1;32m 585\u001B[0m \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs,\n\u001B[1;32m 586\u001B[0m )\n\u001B[1;32m 587\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m new_argument_supported\n\u001B[0;32m--> 588\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mfunc\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 589\u001B[0m )\n\u001B[1;32m 590\u001B[0m \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;167;01mNotImplementedError\u001B[39;00m(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mTool does not support sync\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n",
"File \u001B[0;32m~/code/langchain/libs/community/langchain_community/utilities/google_search.py:94\u001B[0m, in \u001B[0;36mGoogleSearchAPIWrapper.run\u001B[0;34m(self, query)\u001B[0m\n\u001B[1;32m 92\u001B[0m \u001B[38;5;250m\u001B[39m\u001B[38;5;124;03m\"\"\"Run query through GoogleSearch and parse result.\"\"\"\u001B[39;00m\n\u001B[1;32m 93\u001B[0m snippets \u001B[38;5;241m=\u001B[39m []\n\u001B[0;32m---> 94\u001B[0m results \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_google_search_results\u001B[49m\u001B[43m(\u001B[49m\u001B[43mquery\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mnum\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mk\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 95\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mlen\u001B[39m(results) \u001B[38;5;241m==\u001B[39m \u001B[38;5;241m0\u001B[39m:\n\u001B[1;32m 96\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mNo good Google Search Result was found\u001B[39m\u001B[38;5;124m\"\u001B[39m\n",
"File \u001B[0;32m~/code/langchain/libs/community/langchain_community/utilities/google_search.py:62\u001B[0m, in \u001B[0;36mGoogleSearchAPIWrapper._google_search_results\u001B[0;34m(self, search_term, **kwargs)\u001B[0m\n\u001B[1;32m 60\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39msiterestrict:\n\u001B[1;32m 61\u001B[0m cse \u001B[38;5;241m=\u001B[39m cse\u001B[38;5;241m.\u001B[39msiterestrict()\n\u001B[0;32m---> 62\u001B[0m res \u001B[38;5;241m=\u001B[39m \u001B[43mcse\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mlist\u001B[49m\u001B[43m(\u001B[49m\u001B[43mq\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43msearch_term\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mcx\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mgoogle_cse_id\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mexecute\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 63\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m res\u001B[38;5;241m.\u001B[39mget(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mitems\u001B[39m\u001B[38;5;124m\"\u001B[39m, [])\n",
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/googleapiclient/_helpers.py:130\u001B[0m, in \u001B[0;36mpositional.<locals>.positional_decorator.<locals>.positional_wrapper\u001B[0;34m(*args, **kwargs)\u001B[0m\n\u001B[1;32m 128\u001B[0m \u001B[38;5;28;01melif\u001B[39;00m positional_parameters_enforcement \u001B[38;5;241m==\u001B[39m POSITIONAL_WARNING:\n\u001B[1;32m 129\u001B[0m logger\u001B[38;5;241m.\u001B[39mwarning(message)\n\u001B[0;32m--> 130\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mwrapped\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n",
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/googleapiclient/http.py:923\u001B[0m, in \u001B[0;36mHttpRequest.execute\u001B[0;34m(self, http, num_retries)\u001B[0m\n\u001B[1;32m 920\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mheaders[\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mcontent-length\u001B[39m\u001B[38;5;124m\"\u001B[39m] \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mstr\u001B[39m(\u001B[38;5;28mlen\u001B[39m(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mbody))\n\u001B[1;32m 922\u001B[0m \u001B[38;5;66;03m# Handle retries for server-side errors.\u001B[39;00m\n\u001B[0;32m--> 923\u001B[0m resp, content \u001B[38;5;241m=\u001B[39m \u001B[43m_retry_request\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 924\u001B[0m \u001B[43m \u001B[49m\u001B[43mhttp\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 925\u001B[0m \u001B[43m \u001B[49m\u001B[43mnum_retries\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 926\u001B[0m \u001B[43m \u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43mrequest\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m,\u001B[49m\n\u001B[1;32m 927\u001B[0m \u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_sleep\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 928\u001B[0m \u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_rand\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 929\u001B[0m \u001B[43m \u001B[49m\u001B[38;5;28;43mstr\u001B[39;49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43muri\u001B[49m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 930\u001B[0m \u001B[43m \u001B[49m\u001B[43mmethod\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mstr\u001B[39;49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mmethod\u001B[49m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 931\u001B[0m \u001B[43m \u001B[49m\u001B[43mbody\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mbody\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 932\u001B[0m \u001B[43m \u001B[49m\u001B[43mheaders\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mheaders\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 933\u001B[0m \u001B[43m\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 935\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m callback \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mresponse_callbacks:\n\u001B[1;32m 936\u001B[0m callback(resp)\n",
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/googleapiclient/http.py:191\u001B[0m, in \u001B[0;36m_retry_request\u001B[0;34m(http, num_retries, req_type, sleep, rand, uri, method, *args, **kwargs)\u001B[0m\n\u001B[1;32m 189\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[1;32m 190\u001B[0m exception \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;01mNone\u001B[39;00m\n\u001B[0;32m--> 191\u001B[0m resp, content \u001B[38;5;241m=\u001B[39m \u001B[43mhttp\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mrequest\u001B[49m\u001B[43m(\u001B[49m\u001B[43muri\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mmethod\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 192\u001B[0m \u001B[38;5;66;03m# Retry on SSL errors and socket timeout errors.\u001B[39;00m\n\u001B[1;32m 193\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m _ssl_SSLError \u001B[38;5;28;01mas\u001B[39;00m ssl_error:\n",
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/httplib2/__init__.py:1724\u001B[0m, in \u001B[0;36mHttp.request\u001B[0;34m(self, uri, method, body, headers, redirections, connection_type)\u001B[0m\n\u001B[1;32m 1722\u001B[0m content \u001B[38;5;241m=\u001B[39m \u001B[38;5;124mb\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[1;32m 1723\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[0;32m-> 1724\u001B[0m (response, content) \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_request\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m 1725\u001B[0m \u001B[43m \u001B[49m\u001B[43mconn\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mauthority\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43muri\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mrequest_uri\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mmethod\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mbody\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mheaders\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mredirections\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mcachekey\u001B[49m\u001B[43m,\u001B[49m\n\u001B[1;32m 1726\u001B[0m \u001B[43m \u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1727\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mException\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[1;32m 1728\u001B[0m is_timeout \u001B[38;5;241m=\u001B[39m \u001B[38;5;28misinstance\u001B[39m(e, socket\u001B[38;5;241m.\u001B[39mtimeout)\n",
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/httplib2/__init__.py:1444\u001B[0m, in \u001B[0;36mHttp._request\u001B[0;34m(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey)\u001B[0m\n\u001B[1;32m 1441\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m auth:\n\u001B[1;32m 1442\u001B[0m auth\u001B[38;5;241m.\u001B[39mrequest(method, request_uri, headers, body)\n\u001B[0;32m-> 1444\u001B[0m (response, content) \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_conn_request\u001B[49m\u001B[43m(\u001B[49m\u001B[43mconn\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mrequest_uri\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mmethod\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mbody\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mheaders\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1446\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m auth:\n\u001B[1;32m 1447\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m auth\u001B[38;5;241m.\u001B[39mresponse(response, body):\n",
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/httplib2/__init__.py:1366\u001B[0m, in \u001B[0;36mHttp._conn_request\u001B[0;34m(self, conn, request_uri, method, body, headers)\u001B[0m\n\u001B[1;32m 1364\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[1;32m 1365\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m conn\u001B[38;5;241m.\u001B[39msock \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m:\n\u001B[0;32m-> 1366\u001B[0m \u001B[43mconn\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mconnect\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1367\u001B[0m conn\u001B[38;5;241m.\u001B[39mrequest(method, request_uri, body, headers)\n\u001B[1;32m 1368\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m socket\u001B[38;5;241m.\u001B[39mtimeout:\n",
"File \u001B[0;32m~/code/langchain/.venv/lib/python3.10/site-packages/httplib2/__init__.py:1156\u001B[0m, in \u001B[0;36mHTTPSConnectionWithTimeout.connect\u001B[0;34m(self)\u001B[0m\n\u001B[1;32m 1154\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m has_timeout(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mtimeout):\n\u001B[1;32m 1155\u001B[0m sock\u001B[38;5;241m.\u001B[39msettimeout(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mtimeout)\n\u001B[0;32m-> 1156\u001B[0m \u001B[43msock\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mconnect\u001B[49m\u001B[43m(\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mhost\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mport\u001B[49m\u001B[43m)\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m 1158\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39msock \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_context\u001B[38;5;241m.\u001B[39mwrap_socket(sock, server_hostname\u001B[38;5;241m=\u001B[39m\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mhost)\n\u001B[1;32m 1160\u001B[0m \u001B[38;5;66;03m# Python 3.3 compatibility: emulate the check_hostname behavior\u001B[39;00m\n",
"\u001B[0;31mKeyboardInterrupt\u001B[0m: "
]
}
],
"source": [
"agent_chain.run(input=\"What is ChatGPT?\")"
"agent_executor.invoke({\"input\": \"What is ChatGPT?\"})"
]
},
{
@ -179,15 +196,15 @@
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mThought: I need to find out who developed ChatGPT\n",
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
"\u001B[32;1m\u001B[1;3mThought: I need to find out who developed ChatGPT\n",
"Action: Search\n",
"Action Input: Who developed ChatGPT\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3mChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... Feb 15, 2023 ... Who owns Chat GPT? Chat GPT is owned and developed by AI research and deployment company, OpenAI. The organization is headquartered in San ... Feb 8, 2023 ... ChatGPT is an AI chatbot developed by San Francisco-based startup OpenAI. OpenAI was co-founded in 2015 by Elon Musk and Sam Altman and is ... Dec 7, 2022 ... ChatGPT is an AI chatbot designed and developed by OpenAI. The bot works by generating text responses based on human-user input, like questions ... Jan 12, 2023 ... In 2019, Microsoft invested $1 billion in OpenAI, the tiny San Francisco company that designed ChatGPT. And in the years since, it has quietly ... Jan 25, 2023 ... The inside story of ChatGPT: How OpenAI founder Sam Altman built the world's hottest technology with billions from Microsoft. Dec 3, 2022 ... ChatGPT went viral on social media for its ability to do anything from code to write essays. · The company that created the AI chatbot has a ... Jan 17, 2023 ... While many Americans were nursing hangovers on New Year's Day, 22-year-old Edward Tian was working feverishly on a new app to combat misuse ... ChatGPT is a language model created by OpenAI, an artificial intelligence research laboratory consisting of a team of researchers and engineers focused on ... 1 day ago ... Everyone is talking about ChatGPT, developed by OpenAI. This is such a great tool that has helped to make AI more accessible to a wider ...\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
"Final Answer: ChatGPT was developed by OpenAI.\u001b[0m\n",
"Action Input: Who developed ChatGPT\u001B[0m\n",
"Observation: \u001B[36;1m\u001B[1;3mChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... Feb 15, 2023 ... Who owns Chat GPT? Chat GPT is owned and developed by AI research and deployment company, OpenAI. The organization is headquartered in San ... Feb 8, 2023 ... ChatGPT is an AI chatbot developed by San Francisco-based startup OpenAI. OpenAI was co-founded in 2015 by Elon Musk and Sam Altman and is ... Dec 7, 2022 ... ChatGPT is an AI chatbot designed and developed by OpenAI. The bot works by generating text responses based on human-user input, like questions ... Jan 12, 2023 ... In 2019, Microsoft invested $1 billion in OpenAI, the tiny San Francisco company that designed ChatGPT. And in the years since, it has quietly ... Jan 25, 2023 ... The inside story of ChatGPT: How OpenAI founder Sam Altman built the world's hottest technology with billions from Microsoft. Dec 3, 2022 ... ChatGPT went viral on social media for its ability to do anything from code to write essays. · The company that created the AI chatbot has a ... Jan 17, 2023 ... While many Americans were nursing hangovers on New Year's Day, 22-year-old Edward Tian was working feverishly on a new app to combat misuse ... ChatGPT is a language model created by OpenAI, an artificial intelligence research laboratory consisting of a team of researchers and engineers focused on ... 1 day ago ... Everyone is talking about ChatGPT, developed by OpenAI. This is such a great tool that has helped to make AI more accessible to a wider ...\u001B[0m\n",
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer\n",
"Final Answer: ChatGPT was developed by OpenAI.\u001B[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
"\u001B[1m> Finished chain.\u001B[0m\n"
]
},
{
@ -202,7 +219,7 @@
}
],
"source": [
"agent_chain.run(input=\"Who developed it?\")"
"agent_executor.invoke({\"input\": \"Who developed it?\"})"
]
},
{
@ -217,14 +234,14 @@
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mThought: I need to simplify the conversation for a 5 year old.\n",
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
"\u001B[32;1m\u001B[1;3mThought: I need to simplify the conversation for a 5 year old.\n",
"Action: Summary\n",
"Action Input: My daughter 5 years old\u001b[0m\n",
"Action Input: My daughter 5 years old\u001B[0m\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mThis is a conversation between a human and a bot:\n",
"\u001B[32;1m\u001B[1;3mThis is a conversation between a human and a bot:\n",
"\n",
"Human: What is ChatGPT?\n",
"AI: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\n",
@ -232,16 +249,16 @@
"AI: ChatGPT was developed by OpenAI.\n",
"\n",
"Write a summary of the conversation for My daughter 5 years old:\n",
"\u001b[0m\n",
"\u001B[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"\u001B[1m> Finished chain.\u001B[0m\n",
"\n",
"Observation: \u001b[33;1m\u001b[1;3m\n",
"The conversation was about ChatGPT, an artificial intelligence chatbot. It was created by OpenAI and can send and receive images while chatting.\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
"Final Answer: ChatGPT is an artificial intelligence chatbot created by OpenAI that can send and receive images while chatting.\u001b[0m\n",
"Observation: \u001B[33;1m\u001B[1;3m\n",
"The conversation was about ChatGPT, an artificial intelligence chatbot. It was created by OpenAI and can send and receive images while chatting.\u001B[0m\n",
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer.\n",
"Final Answer: ChatGPT is an artificial intelligence chatbot created by OpenAI that can send and receive images while chatting.\u001B[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
"\u001B[1m> Finished chain.\u001B[0m\n"
]
},
{
@ -256,8 +273,8 @@
}
],
"source": [
"agent_chain.run(\n",
" input=\"Thanks. Summarize the conversation, for my daughter 5 years old.\"\n",
"agent_executor.invoke(\n",
" {\"input\": \"Thanks. Summarize the conversation, for my daughter 5 years old.\"}\n",
")"
]
},
@ -289,9 +306,17 @@
}
],
"source": [
"print(agent_chain.memory.buffer)"
"print(agent_executor.memory.buffer)"
]
},
{
"cell_type": "markdown",
"id": "84ca95c30e262e00",
"metadata": {
"collapsed": false
},
"source": []
},
{
"cell_type": "markdown",
"id": "cc3d0aa4",
@ -340,25 +365,9 @@
" ),\n",
"]\n",
"\n",
"prefix = \"\"\"Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:\"\"\"\n",
"suffix = \"\"\"Begin!\"\n",
"\n",
"{chat_history}\n",
"Question: {input}\n",
"{agent_scratchpad}\"\"\"\n",
"\n",
"prompt = ZeroShotAgent.create_prompt(\n",
" tools,\n",
" prefix=prefix,\n",
" suffix=suffix,\n",
" input_variables=[\"input\", \"chat_history\", \"agent_scratchpad\"],\n",
")\n",
"\n",
"llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)\n",
"agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)\n",
"agent_chain = AgentExecutor.from_agent_and_tools(\n",
" agent=agent, tools=tools, verbose=True, memory=memory\n",
")"
"prompt = hub.pull(\"hwchase17/react\")\n",
"agent = create_react_agent(model, tools, prompt)\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, memory=memory)"
]
},
{
@ -373,15 +382,15 @@
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mThought: I should research ChatGPT to answer this question.\n",
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
"\u001B[32;1m\u001B[1;3mThought: I should research ChatGPT to answer this question.\n",
"Action: Search\n",
"Action Input: \"ChatGPT\"\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3mNov 30, 2022 ... We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... ChatGPT. We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... Feb 2, 2023 ... ChatGPT, the popular chatbot from OpenAI, is estimated to have reached 100 million monthly active users in January, just two months after ... 2 days ago ... ChatGPT recently launched a new version of its own plagiarism detection tool, with hopes that it will squelch some of the criticism around how ... An API for accessing new AI models developed by OpenAI. Feb 19, 2023 ... ChatGPT is an AI chatbot system that OpenAI released in November to show off and test what a very large, powerful AI system can accomplish. You ... ChatGPT is fine-tuned from GPT-3.5, a language model trained to produce text. ChatGPT was optimized for dialogue by using Reinforcement Learning with Human ... 3 days ago ... Visual ChatGPT connects ChatGPT and a series of Visual Foundation Models to enable sending and receiving images during chatting. Dec 1, 2022 ... ChatGPT is a natural language processing tool driven by AI technology that allows you to have human-like conversations and much more with a ...\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\u001b[0m\n",
"Action Input: \"ChatGPT\"\u001B[0m\n",
"Observation: \u001B[36;1m\u001B[1;3mNov 30, 2022 ... We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... ChatGPT. We've trained a model called ChatGPT which interacts in a conversational way. The dialogue format makes it possible for ChatGPT to answer ... Feb 2, 2023 ... ChatGPT, the popular chatbot from OpenAI, is estimated to have reached 100 million monthly active users in January, just two months after ... 2 days ago ... ChatGPT recently launched a new version of its own plagiarism detection tool, with hopes that it will squelch some of the criticism around how ... An API for accessing new AI models developed by OpenAI. Feb 19, 2023 ... ChatGPT is an AI chatbot system that OpenAI released in November to show off and test what a very large, powerful AI system can accomplish. You ... ChatGPT is fine-tuned from GPT-3.5, a language model trained to produce text. ChatGPT was optimized for dialogue by using Reinforcement Learning with Human ... 3 days ago ... Visual ChatGPT connects ChatGPT and a series of Visual Foundation Models to enable sending and receiving images during chatting. Dec 1, 2022 ... ChatGPT is a natural language processing tool driven by AI technology that allows you to have human-like conversations and much more with a ...\u001B[0m\n",
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer.\n",
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\u001B[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
"\u001B[1m> Finished chain.\u001B[0m\n"
]
},
{
@ -396,7 +405,7 @@
}
],
"source": [
"agent_chain.run(input=\"What is ChatGPT?\")"
"agent_executor.invoke({\"input\": \"What is ChatGPT?\"})"
]
},
{
@ -411,15 +420,15 @@
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mThought: I need to find out who developed ChatGPT\n",
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
"\u001B[32;1m\u001B[1;3mThought: I need to find out who developed ChatGPT\n",
"Action: Search\n",
"Action Input: Who developed ChatGPT\u001b[0m\n",
"Observation: \u001b[36;1m\u001b[1;3mChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... Feb 15, 2023 ... Who owns Chat GPT? Chat GPT is owned and developed by AI research and deployment company, OpenAI. The organization is headquartered in San ... Feb 8, 2023 ... ChatGPT is an AI chatbot developed by San Francisco-based startup OpenAI. OpenAI was co-founded in 2015 by Elon Musk and Sam Altman and is ... Dec 7, 2022 ... ChatGPT is an AI chatbot designed and developed by OpenAI. The bot works by generating text responses based on human-user input, like questions ... Jan 12, 2023 ... In 2019, Microsoft invested $1 billion in OpenAI, the tiny San Francisco company that designed ChatGPT. And in the years since, it has quietly ... Jan 25, 2023 ... The inside story of ChatGPT: How OpenAI founder Sam Altman built the world's hottest technology with billions from Microsoft. Dec 3, 2022 ... ChatGPT went viral on social media for its ability to do anything from code to write essays. · The company that created the AI chatbot has a ... Jan 17, 2023 ... While many Americans were nursing hangovers on New Year's Day, 22-year-old Edward Tian was working feverishly on a new app to combat misuse ... ChatGPT is a language model created by OpenAI, an artificial intelligence research laboratory consisting of a team of researchers and engineers focused on ... 1 day ago ... Everyone is talking about ChatGPT, developed by OpenAI. This is such a great tool that has helped to make AI more accessible to a wider ...\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer\n",
"Final Answer: ChatGPT was developed by OpenAI.\u001b[0m\n",
"Action Input: Who developed ChatGPT\u001B[0m\n",
"Observation: \u001B[36;1m\u001B[1;3mChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large ... Feb 15, 2023 ... Who owns Chat GPT? Chat GPT is owned and developed by AI research and deployment company, OpenAI. The organization is headquartered in San ... Feb 8, 2023 ... ChatGPT is an AI chatbot developed by San Francisco-based startup OpenAI. OpenAI was co-founded in 2015 by Elon Musk and Sam Altman and is ... Dec 7, 2022 ... ChatGPT is an AI chatbot designed and developed by OpenAI. The bot works by generating text responses based on human-user input, like questions ... Jan 12, 2023 ... In 2019, Microsoft invested $1 billion in OpenAI, the tiny San Francisco company that designed ChatGPT. And in the years since, it has quietly ... Jan 25, 2023 ... The inside story of ChatGPT: How OpenAI founder Sam Altman built the world's hottest technology with billions from Microsoft. Dec 3, 2022 ... ChatGPT went viral on social media for its ability to do anything from code to write essays. · The company that created the AI chatbot has a ... Jan 17, 2023 ... While many Americans were nursing hangovers on New Year's Day, 22-year-old Edward Tian was working feverishly on a new app to combat misuse ... ChatGPT is a language model created by OpenAI, an artificial intelligence research laboratory consisting of a team of researchers and engineers focused on ... 1 day ago ... Everyone is talking about ChatGPT, developed by OpenAI. This is such a great tool that has helped to make AI more accessible to a wider ...\u001B[0m\n",
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer\n",
"Final Answer: ChatGPT was developed by OpenAI.\u001B[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
"\u001B[1m> Finished chain.\u001B[0m\n"
]
},
{
@ -434,7 +443,7 @@
}
],
"source": [
"agent_chain.run(input=\"Who developed it?\")"
"agent_executor.invoke({\"input\": \"Who developed it?\"})"
]
},
{
@ -449,14 +458,14 @@
"text": [
"\n",
"\n",
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
"\u001b[32;1m\u001b[1;3mThought: I need to simplify the conversation for a 5 year old.\n",
"\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n",
"\u001B[32;1m\u001B[1;3mThought: I need to simplify the conversation for a 5 year old.\n",
"Action: Summary\n",
"Action Input: My daughter 5 years old\u001b[0m\n",
"Action Input: My daughter 5 years old\u001B[0m\n",
"\n",
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
"\u001B[1m> Entering new LLMChain chain...\u001B[0m\n",
"Prompt after formatting:\n",
"\u001b[32;1m\u001b[1;3mThis is a conversation between a human and a bot:\n",
"\u001B[32;1m\u001B[1;3mThis is a conversation between a human and a bot:\n",
"\n",
"Human: What is ChatGPT?\n",
"AI: ChatGPT is an artificial intelligence chatbot developed by OpenAI and launched in November 2022. It is built on top of OpenAI's GPT-3 family of large language models and is optimized for dialogue by using Reinforcement Learning with Human-in-the-Loop. It is also capable of sending and receiving images during chatting.\n",
@ -464,16 +473,16 @@
"AI: ChatGPT was developed by OpenAI.\n",
"\n",
"Write a summary of the conversation for My daughter 5 years old:\n",
"\u001b[0m\n",
"\u001B[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"\u001B[1m> Finished chain.\u001B[0m\n",
"\n",
"Observation: \u001b[33;1m\u001b[1;3m\n",
"The conversation was about ChatGPT, an artificial intelligence chatbot developed by OpenAI. It is designed to have conversations with humans and can also send and receive images.\u001b[0m\n",
"Thought:\u001b[32;1m\u001b[1;3m I now know the final answer.\n",
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI that can have conversations with humans and send and receive images.\u001b[0m\n",
"Observation: \u001B[33;1m\u001B[1;3m\n",
"The conversation was about ChatGPT, an artificial intelligence chatbot developed by OpenAI. It is designed to have conversations with humans and can also send and receive images.\u001B[0m\n",
"Thought:\u001B[32;1m\u001B[1;3m I now know the final answer.\n",
"Final Answer: ChatGPT is an artificial intelligence chatbot developed by OpenAI that can have conversations with humans and send and receive images.\u001B[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
"\u001B[1m> Finished chain.\u001B[0m\n"
]
},
{
@ -488,8 +497,8 @@
}
],
"source": [
"agent_chain.run(\n",
" input=\"Thanks. Summarize the conversation, for my daughter 5 years old.\"\n",
"agent_executor.invoke(\n",
" {\"input\": \"Thanks. Summarize the conversation, for my daughter 5 years old.\"}\n",
")"
]
},
@ -524,7 +533,7 @@
}
],
"source": [
"print(agent_chain.memory.buffer)"
"print(agent_executor.memory.buffer)"
]
}
],

@ -0,0 +1,199 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"id": "c48812ed-35bd-4fbe-9a2c-6c7335e5645e",
"metadata": {},
"outputs": [],
"source": [
"from langchain_anthropic import ChatAnthropic\n",
"from langchain_core.runnables import ConfigurableField\n",
"from langchain_core.tools import tool\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"\n",
"@tool\n",
"def multiply(x: float, y: float) -> float:\n",
" \"\"\"Multiply 'x' times 'y'.\"\"\"\n",
" return x * y\n",
"\n",
"\n",
"@tool\n",
"def exponentiate(x: float, y: float) -> float:\n",
" \"\"\"Raise 'x' to the 'y'.\"\"\"\n",
" return x**y\n",
"\n",
"\n",
"@tool\n",
"def add(x: float, y: float) -> float:\n",
" \"\"\"Add 'x' and 'y'.\"\"\"\n",
" return x + y\n",
"\n",
"\n",
"tools = [multiply, exponentiate, add]\n",
"\n",
"gpt35 = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0).bind_tools(tools)\n",
"claude3 = ChatAnthropic(model=\"claude-3-sonnet-20240229\").bind_tools(tools)\n",
"llm_with_tools = gpt35.configurable_alternatives(\n",
" ConfigurableField(id=\"llm\"), default_key=\"gpt35\", claude3=claude3\n",
")"
]
},
{
"cell_type": "markdown",
"id": "9c186263-1b98-4cb2-b6d1-71f65eb0d811",
"metadata": {},
"source": [
"# LangGraph"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "28fc2c60-7dbc-428a-8983-1a6a15ea30d2",
"metadata": {},
"outputs": [],
"source": [
"import operator\n",
"from typing import Annotated, Sequence, TypedDict\n",
"\n",
"from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, ToolMessage\n",
"from langchain_core.runnables import RunnableLambda\n",
"from langgraph.graph import END, StateGraph\n",
"\n",
"\n",
"class AgentState(TypedDict):\n",
" messages: Annotated[Sequence[BaseMessage], operator.add]\n",
"\n",
"\n",
"def should_continue(state):\n",
" return \"continue\" if state[\"messages\"][-1].tool_calls else \"end\"\n",
"\n",
"\n",
"def call_model(state, config):\n",
" return {\"messages\": [llm_with_tools.invoke(state[\"messages\"], config=config)]}\n",
"\n",
"\n",
"def _invoke_tool(tool_call):\n",
" tool = {tool.name: tool for tool in tools}[tool_call[\"name\"]]\n",
" return ToolMessage(tool.invoke(tool_call[\"args\"]), tool_call_id=tool_call[\"id\"])\n",
"\n",
"\n",
"tool_executor = RunnableLambda(_invoke_tool)\n",
"\n",
"\n",
"def call_tools(state):\n",
" last_message = state[\"messages\"][-1]\n",
" return {\"messages\": tool_executor.batch(last_message.tool_calls)}\n",
"\n",
"\n",
"workflow = StateGraph(AgentState)\n",
"workflow.add_node(\"agent\", call_model)\n",
"workflow.add_node(\"action\", call_tools)\n",
"workflow.set_entry_point(\"agent\")\n",
"workflow.add_conditional_edges(\n",
" \"agent\",\n",
" should_continue,\n",
" {\n",
" \"continue\": \"action\",\n",
" \"end\": END,\n",
" },\n",
")\n",
"workflow.add_edge(\"action\", \"agent\")\n",
"graph = workflow.compile()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "3710e724-2595-4625-ba3a-effb81e66e4a",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'messages': [HumanMessage(content=\"what's 3 plus 5 raised to the 2.743. also what's 17.24 - 918.1241\"),\n",
" AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_6yMU2WsS4Bqgi1WxFHxtfJRc', 'function': {'arguments': '{\"x\": 8, \"y\": 2.743}', 'name': 'exponentiate'}, 'type': 'function'}, {'id': 'call_GAL3dQiKFF9XEV0RrRLPTvVp', 'function': {'arguments': '{\"x\": 17.24, \"y\": -918.1241}', 'name': 'add'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 58, 'prompt_tokens': 168, 'total_tokens': 226}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_b28b39ffa8', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-528302fc-7acf-4c11-82c4-119ccf40c573-0', tool_calls=[{'name': 'exponentiate', 'args': {'x': 8, 'y': 2.743}, 'id': 'call_6yMU2WsS4Bqgi1WxFHxtfJRc'}, {'name': 'add', 'args': {'x': 17.24, 'y': -918.1241}, 'id': 'call_GAL3dQiKFF9XEV0RrRLPTvVp'}]),\n",
" ToolMessage(content='300.03770462067547', tool_call_id='call_6yMU2WsS4Bqgi1WxFHxtfJRc'),\n",
" ToolMessage(content='-900.8841', tool_call_id='call_GAL3dQiKFF9XEV0RrRLPTvVp'),\n",
" AIMessage(content='The result of \\\\(3 + 5^{2.743}\\\\) is approximately 300.04, and the result of \\\\(17.24 - 918.1241\\\\) is approximately -900.88.', response_metadata={'token_usage': {'completion_tokens': 44, 'prompt_tokens': 251, 'total_tokens': 295}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_b28b39ffa8', 'finish_reason': 'stop', 'logprobs': None}, id='run-d1161669-ed09-4b18-94bd-6d8530df5aa8-0')]}"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"graph.invoke(\n",
" {\n",
" \"messages\": [\n",
" HumanMessage(\n",
" \"what's 3 plus 5 raised to the 2.743. also what's 17.24 - 918.1241\"\n",
" )\n",
" ]\n",
" }\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "073c074e-d722-42e0-85ec-c62c079207e4",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'messages': [HumanMessage(content=\"what's 3 plus 5 raised to the 2.743. also what's 17.24 - 918.1241\"),\n",
" AIMessage(content=[{'text': \"Okay, let's break this down into two parts:\", 'type': 'text'}, {'id': 'toolu_01DEhqcXkXTtzJAiZ7uMBeDC', 'input': {'x': 3, 'y': 5}, 'name': 'add', 'type': 'tool_use'}], response_metadata={'id': 'msg_01AkLGH8sxMHaH15yewmjwkF', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 450, 'output_tokens': 81}}, id='run-f35bfae8-8ded-4f8a-831b-0940d6ad16b6-0', tool_calls=[{'name': 'add', 'args': {'x': 3, 'y': 5}, 'id': 'toolu_01DEhqcXkXTtzJAiZ7uMBeDC'}]),\n",
" ToolMessage(content='8.0', tool_call_id='toolu_01DEhqcXkXTtzJAiZ7uMBeDC'),\n",
" AIMessage(content=[{'id': 'toolu_013DyMLrvnrto33peAKMGMr1', 'input': {'x': 8.0, 'y': 2.743}, 'name': 'exponentiate', 'type': 'tool_use'}], response_metadata={'id': 'msg_015Fmp8aztwYcce2JDAFfce3', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 545, 'output_tokens': 75}}, id='run-48aaeeeb-a1e5-48fd-a57a-6c3da2907b47-0', tool_calls=[{'name': 'exponentiate', 'args': {'x': 8.0, 'y': 2.743}, 'id': 'toolu_013DyMLrvnrto33peAKMGMr1'}]),\n",
" ToolMessage(content='300.03770462067547', tool_call_id='toolu_013DyMLrvnrto33peAKMGMr1'),\n",
" AIMessage(content=[{'text': 'So 3 plus 5 raised to the 2.743 power is 300.04.\\n\\nFor the second part:', 'type': 'text'}, {'id': 'toolu_01UTmMrGTmLpPrPCF1rShN46', 'input': {'x': 17.24, 'y': -918.1241}, 'name': 'add', 'type': 'tool_use'}], response_metadata={'id': 'msg_015TkhfRBENPib2RWAxkieH6', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 638, 'output_tokens': 105}}, id='run-45fb62e3-d102-4159-881d-241c5dbadeed-0', tool_calls=[{'name': 'add', 'args': {'x': 17.24, 'y': -918.1241}, 'id': 'toolu_01UTmMrGTmLpPrPCF1rShN46'}]),\n",
" ToolMessage(content='-900.8841', tool_call_id='toolu_01UTmMrGTmLpPrPCF1rShN46'),\n",
" AIMessage(content='Therefore, 17.24 - 918.1241 = -900.8841', response_metadata={'id': 'msg_01LgKnRuUcSyADCpxv9tPoYD', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 759, 'output_tokens': 24}}, id='run-1008254e-ccd1-497c-8312-9550dd77bd08-0')]}"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"graph.invoke(\n",
" {\n",
" \"messages\": [\n",
" HumanMessage(\n",
" \"what's 3 plus 5 raised to the 2.743. also what's 17.24 - 918.1241\"\n",
" )\n",
" ]\n",
" },\n",
" config={\"configurable\": {\"llm\": \"claude3\"}},\n",
")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -3811,7 +3811,7 @@
"from langchain.chains import ConversationalRetrievalChain\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(model_name=\"gpt-3.5-turbo-0613\") # switch to 'gpt-4'\n",
"model = ChatOpenAI(model=\"gpt-3.5-turbo-0613\") # switch to 'gpt-4'\n",
"qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)"
]
},

@ -424,7 +424,7 @@
" DialogueAgentWithTools(\n",
" name=name,\n",
" system_message=SystemMessage(content=system_message),\n",
" model=ChatOpenAI(model_name=\"gpt-4\", temperature=0.2),\n",
" model=ChatOpenAI(model=\"gpt-4\", temperature=0.2),\n",
" tool_names=tools,\n",
" top_k_results=2,\n",
" )\n",

@ -601,7 +601,7 @@
"source": [
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)"
"llm = ChatOpenAI(model=\"gpt-4\", temperature=0)"
]
},
{

File diff suppressed because one or more lines are too long

@ -9,6 +9,10 @@
## Tutorials
### [LangChain v 0.1 by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae0gBSJ9T0w7cu7iJZbH3T31)
### [Build with Langchain - Advanced by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae06tclDATrMYY0idsTdLg9v)
### [LangGraph by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae16n2TWUkKq5PgJ0w6Pkwtg)
### [by Greg Kamradt](https://www.youtube.com/playlist?list=PLqZXAkvF1bPNQER9mLmDbntNfSpzdDIU5)
### [by Sam Witteveen](https://www.youtube.com/playlist?list=PL8motc6AQftk1Bs42EW45kwYbyJ4jOdiZ)
### [by James Briggs](https://www.youtube.com/playlist?list=PLIUOU7oqGTLieV9uTIFMm6_4PXg-hlN6F)
@ -35,6 +39,7 @@
- [Udacity](https://www.udacity.com/catalog/all/any-price/any-school/any-skill/any-difficulty/any-duration/any-type/relevance/page-1?searchValue=langchain)
- [LinkedIn Learning](https://www.linkedin.com/search/results/learning/?keywords=langchain)
- [edX](https://www.edx.org/search?q=langchain)
- [freeCodeCamp](https://www.youtube.com/@freecodecamp/search?query=langchain)
## Short Tutorials

@ -190,12 +190,9 @@ Maintainer steps (Contributors should **not** do these):
## Partner package in external repo
If you are creating a partner package in an external repo, you should follow the same steps as above,
but you will need to set up your own CI/CD and package management.
Partner packages in external repos must be coordinated between the LangChain team and
the partner organization to ensure that they are maintained and updated.
Name your package as `langchain-{partner}-{integration}`.
Still, you have to create the `libs/partners/{partner}-{integration}` folder in the `LangChain` monorepo
and add a `README.md` file with a link to the external repo.
See this [example](https://github.com/langchain-ai/langchain/tree/master/libs/partners/google-genai).
This allows keeping track of all the partner packages in the `LangChain` documentation.
If you're interested in creating a partner package in an external repo, please start
with one in the LangChain repo, and then reach out to the LangChain team to discuss
how to move it to an external repo.

@ -440,7 +440,7 @@
"id": "e6833844-f1c4-444c-a3d2-31b3c6b31d46",
"metadata": {},
"source": [
"We then use the `RunnableParallel` to prepare the expected inputs into the prompt by using the entries for the retrieved documents as well as the original user question, using the retriever for document search, and RunnablePassthrough to pass the users question:"
"We then use the `RunnableParallel` to prepare the expected inputs into the prompt by using the entries for the retrieved documents as well as the original user question, using the retriever for document search, and `RunnablePassthrough` to pass the users question:"
]
},
{

@ -29,9 +29,9 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings"
]

@ -63,7 +63,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.runnables import ConfigurableField\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
@ -285,8 +285,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.chat_models import ChatAnthropic\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.runnables import ConfigurableField\n",
"from langchain_openai import ChatOpenAI"
]

@ -94,12 +94,12 @@ from langchain_openai import ChatOpenAI
llm = ChatOpenAI()
```
If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class:
If you'd prefer not to set an environment variable you can pass the key in directly via the `api_key` named parameter when initiating the OpenAI LLM class:
```python
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(openai_api_key="...")
llm = ChatOpenAI(api_key="...")
```
</TabItem>
@ -141,10 +141,10 @@ from langchain_anthropic import ChatAnthropic
llm = ChatAnthropic(model="claude-3-sonnet-20240229", temperature=0.2, max_tokens=1024)
```
If you'd prefer not to set an environment variable you can pass the key in directly via the `anthropic_api_key` named parameter when initiating the Anthropic Chat Model class:
If you'd prefer not to set an environment variable you can pass the key in directly via the `api_key` named parameter when initiating the Anthropic Chat Model class:
```python
llm = ChatAnthropic(anthropic_api_key="...")
llm = ChatAnthropic(api_key="...")
```
</TabItem>
@ -293,7 +293,7 @@ embeddings = OllamaEmbeddings()
Make sure you have the `cohere` package installed and the appropriate environment variables set (these are the same as needed for the LLM).
```python
from langchain_community.embeddings import CohereEmbeddings
from langchain_cohere.embeddings import CohereEmbeddings
embeddings = CohereEmbeddings()
```
@ -509,7 +509,7 @@ from langchain.agents import AgentExecutor
# Get the prompt to use - you can modify this!
prompt = hub.pull("hwchase17/openai-functions-agent")
# You need to set OPENAI_API_KEY environment variable or pass it as argument `openai_api_key`.
# You need to set OPENAI_API_KEY environment variable or pass it as argument `api_key`.
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
agent = create_openai_functions_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)

@ -27,7 +27,7 @@ Let's suppose we have a simple agent, and want to visualize the actions it takes
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model_name="gpt-4", temperature=0)
llm = ChatOpenAI(model="gpt-4", temperature=0)
tools = load_tools(["ddg-search", "llm-math"], llm=llm)
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)
```

@ -204,7 +204,7 @@
" ]\n",
")\n",
"# Here we're going to use a bad model name to easily create a chain that will error\n",
"chat_model = ChatOpenAI(model_name=\"gpt-fake\")\n",
"chat_model = ChatOpenAI(model=\"gpt-fake\")\n",
"bad_chain = chat_prompt | chat_model | StrOutputParser()"
]
},

@ -137,7 +137,7 @@
}
],
"source": [
"from langchain.prompts.prompt import PromptTemplate\n",
"from langchain_core.prompts.prompt import PromptTemplate\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"anonymizer = PresidioAnonymizer()\n",

@ -878,8 +878,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts.prompt import PromptTemplate\n",
"from langchain_core.prompts import format_document\n",
"from langchain_core.prompts.prompt import PromptTemplate\n",
"\n",
"DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template=\"{page_content}\")\n",
"\n",

@ -207,7 +207,7 @@
}
],
"source": [
"from langchain.prompts.prompt import PromptTemplate\n",
"from langchain_core.prompts.prompt import PromptTemplate\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"anonymizer = PresidioReversibleAnonymizer()\n",

@ -278,8 +278,8 @@
],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.callbacks.stdout import StdOutCallbackHandler\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_openai import OpenAI\n",
"\n",
"argilla_callback = ArgillaCallbackHandler(\n",

@ -42,7 +42,7 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai deepeval"
"%pip install --upgrade --quiet langchain langchain-openai deepeval langchain-chroma"
]
},
{
@ -215,8 +215,8 @@
"source": [
"import requests\n",
"from langchain.chains import RetrievalQA\n",
"from langchain_chroma import Chroma\n",
"from langchain_community.document_loaders import TextLoader\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
"from langchain_text_splitters import CharacterTextSplitter\n",
"\n",

@ -170,8 +170,8 @@
"import os\n",
"\n",
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.prompts.chat import (\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_core.prompts.chat import (\n",
" ChatPromptTemplate,\n",
" HumanMessagePromptTemplate,\n",
")\n",

@ -151,7 +151,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import (\n",
"from langchain_core.prompts import (\n",
" ChatPromptTemplate,\n",
" FewShotChatMessagePromptTemplate,\n",
")\n",

@ -91,7 +91,7 @@
"source": [
"from langchain.agents import initialize_agent, load_tools\n",
"from langchain.chains import LLMChain, SimpleSequentialChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_openai import OpenAI\n",
"from sagemaker.analytics import ExperimentAnalytics\n",
"from sagemaker.experiments.run import Run\n",

@ -0,0 +1,503 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"<a target=\"_blank\" href=\"https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/integrations/callbacks/uptrain.ipynb\">\n",
" <img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/>\n",
"</a>"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# UpTrain\n",
"\n",
"> UpTrain [[github](https://github.com/uptrain-ai/uptrain) || [website](https://uptrain.ai/) || [docs](https://docs.uptrain.ai/getting-started/introduction)] is an open-source platform to evaluate and improve LLM applications. It provides grades for 20+ preconfigured checks (covering language, code, embedding use cases), performs root cause analyses on instances of failure cases and provides guidance for resolving them."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## UpTrain Callback Handler\n",
"\n",
"This notebook showcases the UpTrain callback handler seamlessly integrating into your pipeline, facilitating diverse evaluations. We have chosen a few evaluations that we deemed apt for evaluating the chains. These evaluations run automatically, with results displayed in the output. More details on UpTrain's evaluations can be found [here](https://github.com/uptrain-ai/uptrain?tab=readme-ov-file#pre-built-evaluations-we-offer-). \n",
"\n",
"Selected retievers from Langchain are highlighted for demonstration:\n",
"\n",
"### 1. **Vanilla RAG**:\n",
"RAG plays a crucial role in retrieving context and generating responses. To ensure its performance and response quality, we conduct the following evaluations:\n",
"\n",
"- **[Context Relevance](https://docs.uptrain.ai/predefined-evaluations/context-awareness/context-relevance)**: Determines if the context extracted from the query is relevant to the response.\n",
"- **[Factual Accuracy](https://docs.uptrain.ai/predefined-evaluations/context-awareness/factual-accuracy)**: Assesses if the LLM is hallcuinating or providing incorrect information.\n",
"- **[Response Completeness](https://docs.uptrain.ai/predefined-evaluations/response-quality/response-completeness)**: Checks if the response contains all the information requested by the query.\n",
"\n",
"### 2. **Multi Query Generation**:\n",
"MultiQueryRetriever creates multiple variants of a question having a similar meaning to the original question. Given the complexity, we include the previous evaluations and add:\n",
"\n",
"- **[Multi Query Accuracy](https://docs.uptrain.ai/predefined-evaluations/query-quality/multi-query-accuracy)**: Assures that the multi-queries generated mean the same as the original query.\n",
"\n",
"### 3. **Context Compression and Reranking**:\n",
"Re-ranking involves reordering nodes based on relevance to the query and choosing top n nodes. Since the number of nodes can reduce once the re-ranking is complete, we perform the following evaluations:\n",
"\n",
"- **[Context Reranking](https://docs.uptrain.ai/predefined-evaluations/context-awareness/context-reranking)**: Checks if the order of re-ranked nodes is more relevant to the query than the original order.\n",
"- **[Context Conciseness](https://docs.uptrain.ai/predefined-evaluations/context-awareness/context-conciseness)**: Examines whether the reduced number of nodes still provides all the required information.\n",
"\n",
"These evaluations collectively ensure the robustness and effectiveness of the RAG, MultiQueryRetriever, and the Reranking process in the chain."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Install Dependencies"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
"To disable this warning, you can either:\n",
"\t- Avoid using `tokenizers` before the fork if possible\n",
"\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[33mWARNING: There was an error checking the latest version of pip.\u001b[0m\u001b[33m\n",
"\u001b[0mNote: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"%pip install -qU langchain langchain_openai uptrain faiss-cpu flashrank"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"NOTE: that you can also install `faiss-gpu` instead of `faiss-cpu` if you want to use the GPU enabled version of the library."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Import Libraries"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [],
"source": [
"from getpass import getpass\n",
"\n",
"from langchain.chains import RetrievalQA\n",
"from langchain.retrievers import ContextualCompressionRetriever\n",
"from langchain.retrievers.document_compressors import FlashrankRerank\n",
"from langchain.retrievers.multi_query import MultiQueryRetriever\n",
"from langchain_community.callbacks.uptrain_callback import UpTrainCallbackHandler\n",
"from langchain_community.document_loaders import TextLoader\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_core.output_parsers.string import StrOutputParser\n",
"from langchain_core.prompts.chat import ChatPromptTemplate\n",
"from langchain_core.runnables.passthrough import RunnablePassthrough\n",
"from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n",
"from langchain_text_splitters import (\n",
" RecursiveCharacterTextSplitter,\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Load the documents"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {},
"outputs": [],
"source": [
"loader = TextLoader(\"../../modules/state_of_the_union.txt\")\n",
"documents = loader.load()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Split the document into chunks"
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {},
"outputs": [],
"source": [
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n",
"chunks = text_splitter.split_documents(documents)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Create the retriever"
]
},
{
"cell_type": "code",
"execution_count": 26,
"metadata": {},
"outputs": [],
"source": [
"embeddings = OpenAIEmbeddings()\n",
"db = FAISS.from_documents(chunks, embeddings)\n",
"retriever = db.as_retriever()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Define the LLM"
]
},
{
"cell_type": "code",
"execution_count": 27,
"metadata": {},
"outputs": [],
"source": [
"llm = ChatOpenAI(temperature=0, model=\"gpt-4\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Set the openai API key\n",
"This key is required to perform the evaluations. UpTrain uses the GPT models to evaluate the responses generated by the LLM."
]
},
{
"cell_type": "code",
"execution_count": 28,
"metadata": {},
"outputs": [],
"source": [
"OPENAI_API_KEY = getpass()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"For each of the retrievers below, it is better to define the callback handler again to avoid interference. You can choose between the following options for evaluating using UpTrain:\n",
"\n",
"### 1. **UpTrain's Open-Source Software (OSS)**: \n",
"You can use the open-source evaluation service to evaluate your model.\n",
"In this case, you will need to provie an OpenAI API key. You can get yours [here](https://platform.openai.com/account/api-keys).\n",
"\n",
"Parameters:\n",
"- key_type=\"openai\"\n",
"- api_key=\"OPENAI_API_KEY\"\n",
"- project_name_prefix=\"PROJECT_NAME_PREFIX\"\n",
"\n",
"\n",
"### 2. **UpTrain Managed Service and Dashboards**: \n",
"You can create a free UpTrain account [here](https://uptrain.ai/) and get free trial credits. If you want more trial credits, [book a call with the maintainers of UpTrain here](https://calendly.com/uptrain-sourabh/30min).\n",
"\n",
"UpTrain Managed service provides:\n",
"1. Dashboards with advanced drill-down and filtering options\n",
"1. Insights and common topics among failing cases\n",
"1. Observability and real-time monitoring of production data\n",
"1. Regression testing via seamless integration with your CI/CD pipelines\n",
"\n",
"The notebook contains some screenshots of the dashboards and the insights that you can get from the UpTrain managed service.\n",
"\n",
"Parameters:\n",
"- key_type=\"uptrain\"\n",
"- api_key=\"UPTRAIN_API_KEY\"\n",
"- project_name_prefix=\"PROJECT_NAME_PREFIX\"\n",
"\n",
"\n",
"**Note:** The `project_name_prefix` will be used as prefix for the project names in the UpTrain dashboard. These will be different for different types of evals. For example, if you set project_name_prefix=\"langchain\" and perform the multi_query evaluation, the project name will be \"langchain_multi_query\"."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 1. Vanilla RAG"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"UpTrain callback handler will automatically capture the query, context and response once generated and will run the following three evaluations *(Graded from 0 to 1)* on the response:\n",
"- **[Context Relevance](https://docs.uptrain.ai/predefined-evaluations/context-awareness/context-relevance)**: Check if the context extractedfrom the query is relevant to the response.\n",
"- **[Factual Accuracy](https://docs.uptrain.ai/predefined-evaluations/context-awareness/factual-accuracy)**: Check how factually accurate the response is.\n",
"- **[Response Completeness](https://docs.uptrain.ai/predefined-evaluations/response-quality/response-completeness)**: Check if the response contains all the information that the query is asking for."
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[32m2024-04-17 17:03:44.969\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36muptrain.framework.evalllm\u001b[0m:\u001b[36mevaluate_on_server\u001b[0m:\u001b[36m378\u001b[0m - \u001b[1mSending evaluation request for rows 0 to <50 to the Uptrain\u001b[0m\n",
"\u001b[32m2024-04-17 17:04:05.809\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36muptrain.framework.evalllm\u001b[0m:\u001b[36mevaluate\u001b[0m:\u001b[36m367\u001b[0m - \u001b[1mLocal server not running, start the server to log data and visualize in the dashboard!\u001b[0m\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"Question: What did the president say about Ketanji Brown Jackson\n",
"Response: The president mentioned that he had nominated Ketanji Brown Jackson to serve on the United States Supreme Court 4 days ago. He described her as one of the nation's top legal minds who will continue Justice Breyers legacy of excellence. He also mentioned that she is a former top litigator in private practice, a former federal public defender, and comes from a family of public school educators and police officers. He described her as a consensus builder and noted that since her nomination, she has received a broad range of support from various groups, including the Fraternal Order of Police and former judges appointed by both Democrats and Republicans.\n",
"\n",
"Context Relevance Score: 1.0\n",
"Factual Accuracy Score: 1.0\n",
"Response Completeness Score: 1.0\n"
]
}
],
"source": [
"# Create the RAG prompt\n",
"template = \"\"\"Answer the question based only on the following context, which can include text and tables:\n",
"{context}\n",
"Question: {question}\n",
"\"\"\"\n",
"rag_prompt_text = ChatPromptTemplate.from_template(template)\n",
"\n",
"# Create the chain\n",
"chain = (\n",
" {\"context\": retriever, \"question\": RunnablePassthrough()}\n",
" | rag_prompt_text\n",
" | llm\n",
" | StrOutputParser()\n",
")\n",
"\n",
"# Create the uptrain callback handler\n",
"uptrain_callback = UpTrainCallbackHandler(key_type=\"openai\", api_key=OPENAI_API_KEY)\n",
"config = {\"callbacks\": [uptrain_callback]}\n",
"\n",
"# Invoke the chain with a query\n",
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
"docs = chain.invoke(query, config=config)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 2. Multi Query Generation\n",
"\n",
"The **MultiQueryRetriever** is used to tackle the problem that the RAG pipeline might not return the best set of documents based on the query. It generates multiple queries that mean the same as the original query and then fetches documents for each.\n",
"\n",
"To evluate this retriever, UpTrain will run the following evaluation:\n",
"- **[Multi Query Accuracy](https://docs.uptrain.ai/predefined-evaluations/query-quality/multi-query-accuracy)**: Checks if the multi-queries generated mean the same as the original query."
]
},
{
"cell_type": "code",
"execution_count": 30,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[32m2024-04-17 17:04:10.675\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36muptrain.framework.evalllm\u001b[0m:\u001b[36mevaluate_on_server\u001b[0m:\u001b[36m378\u001b[0m - \u001b[1mSending evaluation request for rows 0 to <50 to the Uptrain\u001b[0m\n",
"\u001b[32m2024-04-17 17:04:16.804\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36muptrain.framework.evalllm\u001b[0m:\u001b[36mevaluate\u001b[0m:\u001b[36m367\u001b[0m - \u001b[1mLocal server not running, start the server to log data and visualize in the dashboard!\u001b[0m\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"Question: What did the president say about Ketanji Brown Jackson\n",
"Multi Queries:\n",
" - How did the president comment on Ketanji Brown Jackson?\n",
" - What were the president's remarks regarding Ketanji Brown Jackson?\n",
" - What statements has the president made about Ketanji Brown Jackson?\n",
"\n",
"Multi Query Accuracy Score: 0.5\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[32m2024-04-17 17:04:22.027\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36muptrain.framework.evalllm\u001b[0m:\u001b[36mevaluate_on_server\u001b[0m:\u001b[36m378\u001b[0m - \u001b[1mSending evaluation request for rows 0 to <50 to the Uptrain\u001b[0m\n",
"\u001b[32m2024-04-17 17:04:44.033\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36muptrain.framework.evalllm\u001b[0m:\u001b[36mevaluate\u001b[0m:\u001b[36m367\u001b[0m - \u001b[1mLocal server not running, start the server to log data and visualize in the dashboard!\u001b[0m\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"Question: What did the president say about Ketanji Brown Jackson\n",
"Response: The president mentioned that he had nominated Circuit Court of Appeals Judge Ketanji Brown Jackson to serve on the United States Supreme Court 4 days ago. He described her as one of the nation's top legal minds who will continue Justice Breyers legacy of excellence. He also mentioned that since her nomination, she has received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans.\n",
"\n",
"Context Relevance Score: 1.0\n",
"Factual Accuracy Score: 1.0\n",
"Response Completeness Score: 1.0\n"
]
}
],
"source": [
"# Create the retriever\n",
"multi_query_retriever = MultiQueryRetriever.from_llm(retriever=retriever, llm=llm)\n",
"\n",
"# Create the uptrain callback\n",
"uptrain_callback = UpTrainCallbackHandler(key_type=\"openai\", api_key=OPENAI_API_KEY)\n",
"config = {\"callbacks\": [uptrain_callback]}\n",
"\n",
"# Create the RAG prompt\n",
"template = \"\"\"Answer the question based only on the following context, which can include text and tables:\n",
"{context}\n",
"Question: {question}\n",
"\"\"\"\n",
"rag_prompt_text = ChatPromptTemplate.from_template(template)\n",
"\n",
"chain = (\n",
" {\"context\": multi_query_retriever, \"question\": RunnablePassthrough()}\n",
" | rag_prompt_text\n",
" | llm\n",
" | StrOutputParser()\n",
")\n",
"\n",
"# Invoke the chain with a query\n",
"question = \"What did the president say about Ketanji Brown Jackson\"\n",
"docs = chain.invoke(question, config=config)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# 3. Context Compression and Reranking\n",
"\n",
"The reranking process involves reordering nodes based on relevance to the query and choosing the top n nodes. Since the number of nodes can reduce once the reranking is complete, we perform the following evaluations:\n",
"- **[Context Reranking](https://docs.uptrain.ai/predefined-evaluations/context-awareness/context-reranking)**: Check if the order of re-ranked nodes is more relevant to the query than the original order.\n",
"- **[Context Conciseness](https://docs.uptrain.ai/predefined-evaluations/context-awareness/context-conciseness)**: Check if the reduced number of nodes still provides all the required information."
]
},
{
"cell_type": "code",
"execution_count": 31,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[32m2024-04-17 17:04:46.462\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36muptrain.framework.evalllm\u001b[0m:\u001b[36mevaluate_on_server\u001b[0m:\u001b[36m378\u001b[0m - \u001b[1mSending evaluation request for rows 0 to <50 to the Uptrain\u001b[0m\n",
"\u001b[32m2024-04-17 17:04:53.561\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36muptrain.framework.evalllm\u001b[0m:\u001b[36mevaluate\u001b[0m:\u001b[36m367\u001b[0m - \u001b[1mLocal server not running, start the server to log data and visualize in the dashboard!\u001b[0m\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"Question: What did the president say about Ketanji Brown Jackson\n",
"\n",
"Context Conciseness Score: 0.0\n",
"Context Reranking Score: 1.0\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[32m2024-04-17 17:04:56.947\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36muptrain.framework.evalllm\u001b[0m:\u001b[36mevaluate_on_server\u001b[0m:\u001b[36m378\u001b[0m - \u001b[1mSending evaluation request for rows 0 to <50 to the Uptrain\u001b[0m\n",
"\u001b[32m2024-04-17 17:05:16.551\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36muptrain.framework.evalllm\u001b[0m:\u001b[36mevaluate\u001b[0m:\u001b[36m367\u001b[0m - \u001b[1mLocal server not running, start the server to log data and visualize in the dashboard!\u001b[0m\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"Question: What did the president say about Ketanji Brown Jackson\n",
"Response: The President mentioned that he nominated Circuit Court of Appeals Judge Ketanji Brown Jackson to serve on the United States Supreme Court 4 days ago. He described her as one of the nation's top legal minds who will continue Justice Breyers legacy of excellence.\n",
"\n",
"Context Relevance Score: 1.0\n",
"Factual Accuracy Score: 1.0\n",
"Response Completeness Score: 0.5\n"
]
}
],
"source": [
"# Create the retriever\n",
"compressor = FlashrankRerank()\n",
"compression_retriever = ContextualCompressionRetriever(\n",
" base_compressor=compressor, base_retriever=retriever\n",
")\n",
"\n",
"# Create the chain\n",
"chain = RetrievalQA.from_chain_type(llm=llm, retriever=compression_retriever)\n",
"\n",
"# Create the uptrain callback\n",
"uptrain_callback = UpTrainCallbackHandler(key_type=\"openai\", api_key=OPENAI_API_KEY)\n",
"config = {\"callbacks\": [uptrain_callback]}\n",
"\n",
"# Invoke the chain with a query\n",
"query = \"What did the president say about Ketanji Brown Jackson\"\n",
"result = chain.invoke(query, config=config)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

File diff suppressed because one or more lines are too long

@ -19,59 +19,85 @@
"\n",
">[Azure OpenAI Service](https://learn.microsoft.com/en-us/azure/ai-services/openai/overview) provides REST API access to OpenAI's powerful language models including the GPT-4, GPT-3.5-Turbo, and Embeddings model series. These models can be easily adapted to your specific task including but not limited to content generation, summarization, semantic search, and natural language to code translation. Users can access the service through REST APIs, Python SDK, or a web-based interface in the Azure OpenAI Studio.\n",
"\n",
"This notebook goes over how to connect to an Azure-hosted OpenAI endpoint. We recommend having version `openai>=1` installed."
"This notebook goes over how to connect to an Azure-hosted OpenAI endpoint. First, we need to install the `langchain-openai` package."
]
},
{
"cell_type": "raw",
"id": "d83ba7de",
"metadata": {
"vscode": {
"languageId": "raw"
}
},
"source": [
"%pip install -qU langchain-openai"
]
},
{
"cell_type": "markdown",
"id": "e39133c8",
"metadata": {
"vscode": {
"languageId": "raw"
}
},
"source": [
"Next, let's set some environment variables to help us connect to the Azure OpenAI service. You can find these values in the Azure portal."
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "96164b42",
"execution_count": null,
"id": "1d8d73bd",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"from langchain_core.messages import HumanMessage\n",
"from langchain_openai import AzureChatOpenAI"
"os.environ[\"AZURE_OPENAI_API_KEY\"] = \"...\"\n",
"os.environ[\"AZURE_OPENAI_ENDPOINT\"] = \"https://<your-endpoint>.openai.azure.com/\"\n",
"os.environ[\"AZURE_OPENAI_API_VERSION\"] = \"2023-06-01-preview\"\n",
"os.environ[\"AZURE_OPENAI_CHAT_DEPLOYMENT_NAME\"] = \"chat\""
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "cbe4bb58-ba13-4355-8af9-cd990dc47a64",
"cell_type": "markdown",
"id": "e7b160f8",
"metadata": {},
"outputs": [],
"source": [
"os.environ[\"AZURE_OPENAI_API_KEY\"] = \"...\"\n",
"os.environ[\"AZURE_OPENAI_ENDPOINT\"] = \"https://<your-endpoint>.openai.azure.com/\""
"Next, let's construct our model and chat with it:"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "8161278f",
"execution_count": 3,
"id": "cbe4bb58-ba13-4355-8af9-cd990dc47a64",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.messages import HumanMessage\n",
"from langchain_openai import AzureChatOpenAI\n",
"\n",
"model = AzureChatOpenAI(\n",
" openai_api_version=\"2023-05-15\",\n",
" azure_deployment=\"your-deployment-name\",\n",
" openai_api_version=os.environ[\"AZURE_OPENAI_API_VERSION\"],\n",
" azure_deployment=os.environ[\"AZURE_OPENAI_CHAT_DEPLOYMENT_NAME\"],\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 4,
"id": "99509140",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"J'adore la programmation.\")"
"AIMessage(content=\"J'adore programmer.\", response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 19, 'total_tokens': 25}, 'model_name': 'gpt-35-turbo', 'system_fingerprint': None, 'prompt_filter_results': [{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}], 'finish_reason': 'stop', 'logprobs': None, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}, id='run-25ed88db-38f2-4b0c-a943-a03f217711a9-0')"
]
},
"execution_count": 15,
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
@ -80,7 +106,7 @@
"message = HumanMessage(\n",
" content=\"Translate this sentence from English to French. I love programming.\"\n",
")\n",
"model([message])"
"model.invoke([message])"
]
},
{
@ -96,7 +122,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 5,
"id": "0531798a",
"metadata": {},
"outputs": [],
@ -106,19 +132,29 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"id": "aceddb72",
"metadata": {
"scrolled": true
},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Total Cost (USD): $0.000041\n"
]
}
],
"source": [
"model = AzureChatOpenAI(\n",
" openai_api_version=\"2023-05-15\",\n",
" azure_deployment=\"gpt-35-turbo\", # in Azure, this deployment has version 0613 - input and output tokens are counted separately\n",
" openai_api_version=os.environ[\"AZURE_OPENAI_API_VERSION\"],\n",
" azure_deployment=os.environ[\n",
" \"AZURE_OPENAI_CHAT_DEPLOYMENT_NAME\"\n",
" ], # in Azure, this deployment has version 0613 - input and output tokens are counted separately\n",
")\n",
"with get_openai_callback() as cb:\n",
" model([message])\n",
" model.invoke([message])\n",
" print(\n",
" f\"Total Cost (USD): ${format(cb.total_cost, '.6f')}\"\n",
" ) # without specifying the model version, flat-rate 0.002 USD per 1k input and output tokens is used"
@ -134,7 +170,7 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 11,
"id": "8d5e54e9",
"metadata": {},
"outputs": [
@ -147,13 +183,13 @@
}
],
"source": [
"model0613 = AzureChatOpenAI(\n",
" openai_api_version=\"2023-05-15\",\n",
" deployment_name=\"gpt-35-turbo\",\n",
" model_version=\"0613\",\n",
"model0301 = AzureChatOpenAI(\n",
" openai_api_version=os.environ[\"AZURE_OPENAI_API_VERSION\"],\n",
" azure_deployment=os.environ[\"AZURE_OPENAI_CHAT_DEPLOYMENT_NAME\"],\n",
" model_version=\"0301\",\n",
")\n",
"with get_openai_callback() as cb:\n",
" model0613([message])\n",
" model0301.invoke([message])\n",
" print(f\"Total Cost (USD): ${format(cb.total_cost, '.6f')}\")"
]
}
@ -174,7 +210,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
"version": "3.11.4"
}
},
"nbformat": 4,

@ -3,10 +3,14 @@
{
"cell_type": "raw",
"id": "fbc66410",
"metadata": {},
"metadata": {
"vscode": {
"languageId": "raw"
}
},
"source": [
"---\n",
"sidebar_label: Bedrock Chat\n",
"sidebar_label: Bedrock\n",
"---"
]
},
@ -15,7 +19,7 @@
"id": "bf733a38-db84-4363-89e2-de6735c37230",
"metadata": {},
"source": [
"# BedrockChat\n",
"# ChatBedrock\n",
"\n",
">[Amazon Bedrock](https://aws.amazon.com/bedrock/) is a fully managed service that offers a choice of \n",
"> high-performing foundation models (FMs) from leading AI companies like `AI21 Labs`, `Anthropic`, `Cohere`, \n",
@ -30,42 +34,53 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "d51edc81",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"%pip install --upgrade --quiet boto3"
"%pip install --upgrade --quiet langchain-aws"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"id": "d4a7c55d-b235-4ca4-a579-c90cc9570da9",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain_community.chat_models import BedrockChat\n",
"from langchain_aws import ChatBedrock\n",
"from langchain_core.messages import HumanMessage"
]
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 11,
"id": "70cf04e8-423a-4ff6-8b09-f11fb711c817",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"chat = BedrockChat(model_id=\"anthropic.claude-v2\", model_kwargs={\"temperature\": 0.1})"
"chat = ChatBedrock(\n",
" model_id=\"anthropic.claude-3-sonnet-20240229-v1:0\",\n",
" model_kwargs={\"temperature\": 0.1},\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 12,
"id": "8199ef8f-eb8b-4253-9ea0-6c24a013ca4c",
"metadata": {
"tags": []
@ -74,10 +89,10 @@
{
"data": {
"text/plain": [
"AIMessage(content=\" Voici la traduction en français : J'adore programmer.\", additional_kwargs={}, example=False)"
"AIMessage(content=\"Voici la traduction en français :\\n\\nJ'aime la programmation.\", additional_kwargs={'usage': {'prompt_tokens': 20, 'completion_tokens': 21, 'total_tokens': 41}}, response_metadata={'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0', 'usage': {'prompt_tokens': 20, 'completion_tokens': 21, 'total_tokens': 41}}, id='run-994f0362-0e50-4524-afad-3c4f5bb11328-0')"
]
},
"execution_count": 3,
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
@ -88,7 +103,7 @@
" content=\"Translate this sentence from English to French. I love programming.\"\n",
" )\n",
"]\n",
"chat(messages)"
"chat.invoke(messages)"
]
},
{
@ -97,39 +112,30 @@
"id": "a4a4f4d4",
"metadata": {},
"source": [
"### For BedrockChat with Streaming"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c253883f",
"metadata": {},
"outputs": [],
"source": [
"from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
"### Streaming\n",
"\n",
"chat = BedrockChat(\n",
" model_id=\"anthropic.claude-v2\",\n",
" streaming=True,\n",
" callbacks=[StreamingStdOutCallbackHandler()],\n",
" model_kwargs={\"temperature\": 0.1},\n",
")"
"To stream responses, you can use the runnable `.stream()` method."
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 14,
"id": "d9e52838",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Voici la traduction en français :\n",
"\n",
"J'aime la programmation."
]
}
],
"source": [
"messages = [\n",
" HumanMessage(\n",
" content=\"Translate this sentence from English to French. I love programming.\"\n",
" )\n",
"]\n",
"chat(messages)"
"for chunk in chat.stream(messages):\n",
" print(chunk.content, end=\"\", flush=True)"
]
}
],
@ -149,7 +155,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
"version": "3.11.4"
}
},
"nbformat": 4,

@ -51,7 +51,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
@ -259,31 +259,46 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"from pprint import pprint\n",
"\n",
"from langchain_core.messages import HumanMessage\n",
"from langchain_google_vertexai import HarmBlockThreshold, HarmCategory"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'is_blocked': False,\n",
" 'safety_ratings': [{'category': 'HARM_CATEGORY_HARASSMENT',\n",
"{'citation_metadata': None,\n",
" 'is_blocked': False,\n",
" 'safety_ratings': [{'blocked': False,\n",
" 'category': 'HARM_CATEGORY_HATE_SPEECH',\n",
" 'probability_label': 'NEGLIGIBLE'},\n",
" {'category': 'HARM_CATEGORY_HATE_SPEECH',\n",
" {'blocked': False,\n",
" 'category': 'HARM_CATEGORY_DANGEROUS_CONTENT',\n",
" 'probability_label': 'NEGLIGIBLE'},\n",
" {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n",
" {'blocked': False,\n",
" 'category': 'HARM_CATEGORY_HARASSMENT',\n",
" 'probability_label': 'NEGLIGIBLE'},\n",
" {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT',\n",
" 'probability_label': 'NEGLIGIBLE'}]}\n"
" {'blocked': False,\n",
" 'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT',\n",
" 'probability_label': 'NEGLIGIBLE'}],\n",
" 'usage_metadata': {'candidates_token_count': 6,\n",
" 'prompt_token_count': 12,\n",
" 'total_token_count': 18}}\n"
]
}
],
"source": [
"from pprint import pprint\n",
"\n",
"from langchain_core.messages import HumanMessage\n",
"from langchain_google_vertexai import ChatVertexAI, HarmBlockThreshold, HarmCategory\n",
"\n",
"human = \"Translate this sentence from English to French. I love programming.\"\n",
"messages = [HumanMessage(content=human)]\n",
"\n",
@ -313,18 +328,21 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'is_blocked': False,\n",
" 'safety_attributes': {'Derogatory': 0.1,\n",
" 'Finance': 0.3,\n",
" 'Insult': 0.1,\n",
" 'Sexual': 0.1}}\n"
"{'errors': (),\n",
" 'grounding_metadata': {'citations': [], 'search_queries': []},\n",
" 'is_blocked': False,\n",
" 'safety_attributes': [{'Derogatory': 0.1, 'Insult': 0.1, 'Sexual': 0.2}],\n",
" 'usage_metadata': {'candidates_billable_characters': 88.0,\n",
" 'candidates_token_count': 24.0,\n",
" 'prompt_billable_characters': 58.0,\n",
" 'prompt_token_count': 12.0}}\n"
]
}
],
@ -339,40 +357,149 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"## Function Calling with Gemini\n",
"## Tool calling (a.k.a. function calling) with Gemini\n",
"\n",
"We can pass tool definitions to Gemini models to get the model to invoke those tools when appropriate. This is useful not only for LLM-powered tool use but also for getting structured outputs out of models more generally.\n",
"\n",
"With `ChatVertexAI.bind_tools()`, we can easily pass in Pydantic classes, dict schemas, LangChain tools, or even functions as tools to the model. Under the hood these are converted to a Gemini tool schema, which looks like:\n",
"```python\n",
"{\n",
" \"name\": \"...\", # tool name\n",
" \"description\": \"...\", # tool description\n",
" \"parameters\": {...} # tool input schema as JSONSchema\n",
"}\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'function_call': {'name': 'GetWeather', 'arguments': '{\"location\": \"San Francisco, CA\"}'}}, response_metadata={'is_blocked': False, 'safety_ratings': [{'category': 'HARM_CATEGORY_HATE_SPEECH', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_HARASSMENT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'probability_label': 'NEGLIGIBLE', 'blocked': False}], 'citation_metadata': None, 'usage_metadata': {'prompt_token_count': 41, 'candidates_token_count': 7, 'total_token_count': 48}}, id='run-05e760dc-0682-4286-88e1-5b23df69b083-0', tool_calls=[{'name': 'GetWeather', 'args': {'location': 'San Francisco, CA'}, 'id': 'cd2499c4-4513-4059-bfff-5321b6e922d0'}])"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"class GetWeather(BaseModel):\n",
" \"\"\"Get the current weather in a given location\"\"\"\n",
"\n",
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
"\n",
"We can call Gemini models with tools."
"\n",
"llm = ChatVertexAI(model=\"gemini-pro\", temperature=0)\n",
"llm_with_tools = llm.bind_tools([GetWeather])\n",
"ai_msg = llm_with_tools.invoke(\n",
" \"what is the weather like in San Francisco\",\n",
")\n",
"ai_msg"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The tool calls can be access via the `AIMessage.tool_calls` attribute, where they are extracted in a model-agnostic format:"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"MyModel(name='Erick', age=27)"
"[{'name': 'GetWeather',\n",
" 'args': {'location': 'San Francisco, CA'},\n",
" 'id': 'cd2499c4-4513-4059-bfff-5321b6e922d0'}]"
]
},
"execution_count": null,
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain.pydantic_v1 import BaseModel\n",
"from langchain_google_vertexai import create_structured_runnable\n",
"ai_msg.tool_calls"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"For a complete guide on tool calling [head here](/docs/modules/model_io/chat/function_calling/)."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Structured outputs\n",
"\n",
"Many applications require structured model outputs. Tool calling makes it much easier to do this reliably. The [with_structured_outputs](https://api.python.langchain.com/en/latest/chat_models/langchain_google_vertexai.chat_models.ChatVertexAI.html) constructor provides a simple interface built on top of tool calling for getting structured outputs out of a model. For a complete guide on structured outputs [head here](/docs/modules/model_io/chat/structured_output/).\n",
"\n",
"### ChatVertexAI.with_structured_outputs()\n",
"\n",
"To get structured outputs from our Gemini model all we need to do is to specify a desired schema, either as a Pydantic class or as a JSON schema, "
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Person(name='Stefan', age=13)"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"class Person(BaseModel):\n",
" \"\"\"Save information about a person.\"\"\"\n",
"\n",
"llm = ChatVertexAI(model=\"gemini-pro\")\n",
" name: str = Field(..., description=\"The person's name.\")\n",
" age: int = Field(..., description=\"The person's age.\")\n",
"\n",
"\n",
"class MyModel(BaseModel):\n",
" name: str\n",
" age: int\n",
"structured_llm = llm.with_structured_output(Person)\n",
"structured_llm.invoke(\"Stefan is already 13 years old\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### [Legacy] Using `create_structured_runnable()`\n",
"\n",
"The legacy wasy to get structured outputs is using the `create_structured_runnable` constructor:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain_google_vertexai import create_structured_runnable\n",
"\n",
"chain = create_structured_runnable(MyModel, llm)\n",
"chain = create_structured_runnable(Person, llm)\n",
"chain.invoke(\"My name is Erick and I'm 27 years old\")"
]
},
@ -482,11 +609,21 @@
],
"metadata": {
"kernelspec": {
"display_name": "",
"name": ""
"display_name": "poetry-venv-2",
"language": "python",
"name": "poetry-venv-2"
},
"language_info": {
"name": "python"
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,

@ -19,13 +19,13 @@
},
"outputs": [],
"source": [
"from langchain.prompts.chat import (\n",
"from langchain_community.chat_models import JinaChat\n",
"from langchain_core.messages import HumanMessage, SystemMessage\n",
"from langchain_core.prompts.chat import (\n",
" ChatPromptTemplate,\n",
" HumanMessagePromptTemplate,\n",
" SystemMessagePromptTemplate,\n",
")\n",
"from langchain_community.chat_models import JinaChat\n",
"from langchain_core.messages import HumanMessage, SystemMessage"
")"
]
},
{

@ -49,12 +49,12 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts.chat import (\n",
"from langchain_core.messages import SystemMessage\n",
"from langchain_core.prompts.chat import (\n",
" ChatPromptTemplate,\n",
" HumanMessagePromptTemplate,\n",
" MessagesPlaceholder,\n",
")\n",
"from langchain_core.messages import SystemMessage\n",
"\n",
"template_messages = [\n",
" SystemMessage(content=\"You are a helpful assistant.\"),\n",

@ -60,9 +60,9 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts.chat import ChatPromptTemplate\n",
"from langchain_community.chat_models import ChatMaritalk\n",
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts.chat import ChatPromptTemplate\n",
"\n",
"llm = ChatMaritalk(\n",
" model=\"sabia-2-medium\", # Available models: sabia-2-small and sabia-2-medium\n",

@ -48,7 +48,7 @@
"source": [
"import getpass\n",
"\n",
"mistral_api_key = getpass.getpass()"
"api_key = getpass.getpass()"
]
},
{
@ -81,8 +81,8 @@
},
"outputs": [],
"source": [
"# If mistral_api_key is not passed, default behavior is to use the `MISTRAL_API_KEY` environment variable.\n",
"chat = ChatMistralAI(mistral_api_key=mistral_api_key)"
"# If api_key is not passed, default behavior is to use the `MISTRAL_API_KEY` environment variable.\n",
"chat = ChatMistralAI(api_key=api_key)"
]
},
{

@ -0,0 +1,217 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# MLX\n",
"\n",
"This notebook shows how to get started using `MLX` LLM's as chat models.\n",
"\n",
"In particular, we will:\n",
"1. Utilize the [MLXPipeline](https://github.com/langchain-ai/langchain/blob/master/libs/langchain/langchain/llms/mlx_pipelines.py), \n",
"2. Utilize the `ChatMLX` class to enable any of these LLMs to interface with LangChain's [Chat Messages](https://python.langchain.com/docs/modules/model_io/chat/#messages) abstraction.\n",
"3. Demonstrate how to use an open-source LLM to power an `ChatAgent` pipeline\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet mlx-lm transformers huggingface_hub"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 1. Instantiate an LLM\n",
"\n",
"There are three LLM options to choose from."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.llms.mlx_pipeline import MLXPipeline\n",
"\n",
"llm = MLXPipeline.from_model_id(\n",
" \"mlx-community/quantized-gemma-2b-it\",\n",
" pipeline_kwargs={\"max_tokens\": 10, \"temp\": 0.1},\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2. Instantiate the `ChatMLX` to apply chat templates"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Instantiate the chat model and some messages to pass."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema import (\n",
" HumanMessage,\n",
")\n",
"from langchain_community.chat_models.mlx import ChatMLX\n",
"\n",
"messages = [\n",
" HumanMessage(\n",
" content=\"What happens when an unstoppable force meets an immovable object?\"\n",
" ),\n",
"]\n",
"\n",
"chat_model = ChatMLX(llm=llm)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Inspect how the chat messages are formatted for the LLM call."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"chat_model._to_chat_prompt(messages)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Call the model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"res = chat_model.invoke(messages)\n",
"print(res.content)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 3. Take it for a spin as an agent!\n",
"\n",
"Here we'll test out `gemma-2b-it` as a zero-shot `ReAct` Agent. The example below is taken from [here](https://python.langchain.com/docs/modules/agents/agent_types/react#using-chat-models).\n",
"\n",
"> Note: To run this section, you'll need to have a [SerpAPI Token](https://serpapi.com/) saved as an environment variable: `SERPAPI_API_KEY`"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain import hub\n",
"from langchain.agents import AgentExecutor, load_tools\n",
"from langchain.agents.format_scratchpad import format_log_to_str\n",
"from langchain.agents.output_parsers import (\n",
" ReActJsonSingleInputOutputParser,\n",
")\n",
"from langchain.tools.render import render_text_description\n",
"from langchain_community.utilities import SerpAPIWrapper"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Configure the agent with a `react-json` style prompt and access to a search engine and calculator."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# setup tools\n",
"tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n",
"\n",
"# setup ReAct style prompt\n",
"prompt = hub.pull(\"hwchase17/react-json\")\n",
"prompt = prompt.partial(\n",
" tools=render_text_description(tools),\n",
" tool_names=\", \".join([t.name for t in tools]),\n",
")\n",
"\n",
"# define the agent\n",
"chat_model_with_stop = chat_model.bind(stop=[\"\\nObservation\"])\n",
"agent = (\n",
" {\n",
" \"input\": lambda x: x[\"input\"],\n",
" \"agent_scratchpad\": lambda x: format_log_to_str(x[\"intermediate_steps\"]),\n",
" }\n",
" | prompt\n",
" | chat_model_with_stop\n",
" | ReActJsonSingleInputOutputParser()\n",
")\n",
"\n",
"# instantiate AgentExecutor\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"agent_executor.invoke(\n",
" {\n",
" \"input\": \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n",
" }\n",
")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.18"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

@ -0,0 +1,112 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# ChatOctoAI\n",
"\n",
"[OctoAI](https://docs.octoai.cloud/docs) offers easy access to efficient compute and enables users to integrate their choice of AI models into applications. The `OctoAI` compute service helps you run, tune, and scale AI applications easily.\n",
"\n",
"This notebook demonstrates the use of `langchain.chat_models.ChatOctoAI` for [OctoAI endpoints](https://octoai.cloud/text).\n",
"\n",
"## Setup\n",
"\n",
"To run our example app, there are two simple steps to take:\n",
"\n",
"1. Get an API Token from [your OctoAI account page](https://octoai.cloud/settings).\n",
" \n",
"2. Paste your API token in in the code cell below or use the `octoai_api_token` keyword argument.\n",
"\n",
"Note: If you want to use a different model than the [available models](https://octoai.cloud/text?selectedTags=Chat), you can containerize the model and make a custom OctoAI endpoint yourself, by following [Build a Container from Python](https://octo.ai/docs/bring-your-own-model/advanced-build-a-container-from-scratch-in-python) and [Create a Custom Endpoint from a Container](https://octo.ai/docs/bring-your-own-model/create-custom-endpoints-from-a-container/create-custom-endpoints-from-a-container) and then updating your `OCTOAI_API_BASE` environment variable.\n"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"OCTOAI_API_TOKEN\"] = \"OCTOAI_API_TOKEN\""
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.chat_models import ChatOctoAI\n",
"from langchain_core.messages import HumanMessage, SystemMessage"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Example"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"chat = ChatOctoAI(max_tokens=300, model_name=\"mixtral-8x7b-instruct\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"messages = [\n",
" SystemMessage(content=\"You are a helpful assistant.\"),\n",
" HumanMessage(content=\"Tell me about Leonardo da Vinci briefly.\"),\n",
"]\n",
"print(chat(messages).content)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Leonardo da Vinci (1452-1519) was an Italian polymath who is often considered one of the greatest painters in history. However, his genius extended far beyond art. He was also a scientist, inventor, mathematician, engineer, anatomist, geologist, and cartographer.\n",
"\n",
"Da Vinci is best known for his paintings such as the Mona Lisa, The Last Supper, and The Virgin of the Rocks. His scientific studies were ahead of his time, and his notebooks contain detailed drawings and descriptions of various machines, human anatomy, and natural phenomena.\n",
"\n",
"Despite never receiving a formal education, da Vinci's insatiable curiosity and observational skills made him a pioneer in many fields. His work continues to inspire and influence artists, scientists, and thinkers today."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
},
"vscode": {
"interpreter": {
"hash": "97697b63fdcee0a640856f91cb41326ad601964008c341809e43189d1cab1047"
}
}
},
"nbformat": 4,
"nbformat_minor": 4
}

@ -30,7 +30,7 @@
"* [Download](https://ollama.ai/download) and install Ollama onto the available supported platforms (including Windows Subsystem for Linux)\n",
"* Fetch available LLM model via `ollama pull <name-of-model>`\n",
" * View a list of available models via the [model library](https://ollama.ai/library)\n",
" * e.g., for `Llama-7b`: `ollama pull llama2`\n",
" * e.g., `ollama pull llama3`\n",
"* This will download the default tagged version of the model. Typically, the default points to the latest, smallest sized-parameter model.\n",
"\n",
"> On Mac, the models will be download to `~/.ollama/models`\n",
@ -46,7 +46,7 @@
"\n",
"You can see a full list of supported parameters on the [API reference page](https://api.python.langchain.com/en/latest/llms/langchain.llms.ollama.Ollama.html).\n",
"\n",
"If you are using a LLaMA `chat` model (e.g., `ollama pull llama2:7b-chat`) then you can use the `ChatOllama` interface.\n",
"If you are using a LLaMA `chat` model (e.g., `ollama pull llama3`) then you can use the `ChatOllama` interface.\n",
"\n",
"This includes [special tokens](https://huggingface.co/blog/llama2#how-to-prompt-llama-2) for system message and user input.\n",
"\n",
@ -65,7 +65,7 @@
"\n",
"```bash\n",
"curl http://localhost:11434/api/generate -d '{\n",
" \"model\": \"llama2\",\n",
" \"model\": \"llama3\",\n",
" \"prompt\":\"Why is the sky blue?\"\n",
"}'\n",
"```\n",
@ -86,11 +86,9 @@
"name": "stdout",
"output_type": "stream",
"text": [
" Sure, here's a fun space-themed joke for you:\n",
"Why did the astronaut break up with his girlfriend?\n",
"\n",
"Why don't astronauts like broccoli? \n",
"Because it has too many \"crisps\" in it!\n",
"\n"
"Because he needed space!\n"
]
}
],
@ -102,7 +100,7 @@
"\n",
"# supports many more optional parameters. Hover on your `ChatOllama(...)`\n",
"# class to view the latest available supported parameters\n",
"llm = ChatOllama(model=\"llama2\")\n",
"llm = ChatOllama(model=\"llama3\")\n",
"prompt = ChatPromptTemplate.from_template(\"Tell me a short joke about {topic}\")\n",
"\n",
"# using LangChain Expressive Language chain syntax\n",
@ -125,21 +123,14 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" Sure\n",
",\n",
" here\n",
"'s\n",
" a\n",
" joke\n",
":\n",
" Why\n",
"Why\n",
" did\n",
" the\n",
" astronaut\n",
@ -148,17 +139,18 @@
" with\n",
" his\n",
" girlfriend\n",
" before\n",
" going\n",
" to\n",
" Mars\n",
"?\n",
" Because\n",
"\n",
"\n",
"Because\n",
" he\n",
" needed\n",
" more\n",
" space\n",
" to\n",
" explore\n",
".\n",
"\n",
"\n",
"!\n",
"\n"
]
}
@ -179,51 +171,9 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" Sure\n",
",\n",
" here\n",
"'s\n",
" a\n",
" little\n",
" one\n",
":\n",
" Why\n",
" did\n",
" the\n",
" rocket\n",
" scientist\n",
" break\n",
" up\n",
" with\n",
" her\n",
" partner\n",
"?\n",
" Because\n",
" he\n",
" couldn\n",
"'t\n",
" handle\n",
" all\n",
" her\n",
" \"\n",
"space\n",
"y\n",
"\"\n",
" jokes\n",
".\n",
"\n",
"\n",
"\n"
]
}
],
"outputs": [],
"source": [
"topic = {\"topic\": \"Space travel\"}\n",
"\n",
@ -255,13 +205,13 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.chat_models import ChatOllama\n",
"\n",
"llm = ChatOllama(model=\"llama2\", format=\"json\", temperature=0)"
"llm = ChatOllama(model=\"llama3\", format=\"json\", temperature=0)"
]
},
{
@ -273,7 +223,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"content='{\\n\"morning\": {\\n\"color\": \"light blue\"\\n},\\n\"noon\": {\\n\"color\": \"blue\"\\n},\\n\"afternoon\": {\\n\"color\": \"grayish-blue\"\\n},\\n\"evening\": {\\n\"color\": \"pinkish-orange\"\\n}\\n}'\n"
"content='{ \"morning\": \"blue\", \"noon\": \"clear blue\", \"afternoon\": \"hazy yellow\", \"evening\": \"orange-red\" }\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n \\n\\n\\n\\n\\n\\n ' id='run-e893700f-e2d0-4df8-ad86-17525dcee318-0'\n"
]
}
],
@ -292,7 +242,7 @@
},
{
"cell_type": "code",
"execution_count": 53,
"execution_count": 8,
"metadata": {},
"outputs": [
{
@ -300,13 +250,9 @@
"output_type": "stream",
"text": [
"\n",
"{\n",
"\"name\": \"John\",\n",
"\"age\": 35,\n",
"\"interests\": [\n",
"\"pizza\"\n",
"]\n",
"}\n"
"Name: John\n",
"Age: 35\n",
"Likes: Pizza\n"
]
}
],
@ -516,7 +462,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
"version": "3.11.8"
}
},
"nbformat": 4,

@ -22,7 +22,7 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 3,
"id": "522686de",
"metadata": {
"tags": []
@ -30,24 +30,20 @@
"outputs": [],
"source": [
"from langchain_core.messages import HumanMessage, SystemMessage\n",
"from langchain_core.prompts.chat import (\n",
" ChatPromptTemplate,\n",
" HumanMessagePromptTemplate,\n",
" SystemMessagePromptTemplate,\n",
")\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_openai import ChatOpenAI"
]
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 4,
"id": "62e0dbc3",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"chat = ChatOpenAI(temperature=0)"
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
]
},
{
@ -58,14 +54,14 @@
"The above cell assumes that your OpenAI API key is set in your environment variables. If you would rather manually specify your API key and/or organization ID, use the following code:\n",
"\n",
"```python\n",
"chat = ChatOpenAI(temperature=0, openai_api_key=\"YOUR_API_KEY\", openai_organization=\"YOUR_ORGANIZATION_ID\")\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0, api_key=\"YOUR_API_KEY\", openai_organization=\"YOUR_ORGANIZATION_ID\")\n",
"```\n",
"Remove the openai_organization parameter should it not apply to you."
]
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 5,
"id": "ce16ad78-8e6f-48cd-954e-98be75eb5836",
"metadata": {
"tags": []
@ -74,24 +70,20 @@
{
"data": {
"text/plain": [
"AIMessage(content=\"J'adore la programmation.\", additional_kwargs={}, example=False)"
"AIMessage(content=\"J'adore programmer.\", response_metadata={'token_usage': {'completion_tokens': 6, 'prompt_tokens': 34, 'total_tokens': 40}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_b28b39ffa8', 'finish_reason': 'stop', 'logprobs': None}, id='run-8591eae1-b42b-402b-a23a-dfdb0cd151bd-0')"
]
},
"execution_count": 3,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"messages = [\n",
" SystemMessage(\n",
" content=\"You are a helpful assistant that translates English to French.\"\n",
" ),\n",
" HumanMessage(\n",
" content=\"Translate this sentence from English to French. I love programming.\"\n",
" ),\n",
" (\"system\", \"You are a helpful assistant that translates English to French.\"),\n",
" (\"human\", \"Translate this sentence from English to French. I love programming.\"),\n",
"]\n",
"chat.invoke(messages)"
"llm.invoke(messages)"
]
},
{
@ -99,56 +91,154 @@
"id": "778f912a-66ea-4a5d-b3de-6c7db4baba26",
"metadata": {},
"source": [
"You can make use of templating by using a `MessagePromptTemplate`. You can build a `ChatPromptTemplate` from one or more `MessagePromptTemplates`. You can use `ChatPromptTemplate`'s `format_prompt` -- this returns a `PromptValue`, which you can convert to a string or Message object, depending on whether you want to use the formatted value as input to an llm or chat model.\n",
"## Chaining\n",
"\n",
"For convenience, there is a `from_template` method exposed on the template. If you were to use this template, this is what it would look like:"
"We can chain our model with a prompt template like so:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "180c5cc8",
"execution_count": 8,
"id": "fbb043e6",
"metadata": {
"tags": []
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='Ich liebe Programmieren.', response_metadata={'token_usage': {'completion_tokens': 5, 'prompt_tokens': 26, 'total_tokens': 31}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_b28b39ffa8', 'finish_reason': 'stop', 'logprobs': None}, id='run-94fa6741-c99b-4513-afce-c3f562631c79-0')"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\n",
" \"system\",\n",
" \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n",
" ),\n",
" (\"human\", \"{input}\"),\n",
" ]\n",
")\n",
"\n",
"chain = prompt | llm\n",
"chain.invoke(\n",
" {\n",
" \"input_language\": \"English\",\n",
" \"output_language\": \"German\",\n",
" \"input\": \"I love programming.\",\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"id": "0b1b52a5-b58d-40c9-bcdd-88eb8fb351e2",
"metadata": {},
"source": [
"## Tool calling\n",
"\n",
"OpenAI has a [tool calling](https://platform.openai.com/docs/guides/function-calling) (we use \"tool calling\" and \"function calling\" interchangeably here) API that lets you describe tools and their arguments, and have the model return a JSON object with a tool to invoke and the inputs to that tool. tool-calling is extremely useful for building tool-using chains and agents, and for getting structured outputs from models more generally.\n",
"\n",
"### ChatOpenAI.bind_tools()\n",
"\n",
"With `ChatAnthropic.bind_tools`, we can easily pass in Pydantic classes, dict schemas, LangChain tools, or even functions as tools to the model. Under the hood these are converted to an Anthropic tool schemas, which looks like:\n",
"```\n",
"{\n",
" \"name\": \"...\",\n",
" \"description\": \"...\",\n",
" \"parameters\": {...} # JSONSchema\n",
"}\n",
"```\n",
"and passed in every model invocation."
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "b7ea7690-ec7a-4337-b392-e87d1f39a6ec",
"metadata": {},
"outputs": [],
"source": [
"template = (\n",
" \"You are a helpful assistant that translates {input_language} to {output_language}.\"\n",
")\n",
"system_message_prompt = SystemMessagePromptTemplate.from_template(template)\n",
"human_template = \"{text}\"\n",
"human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)"
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"class GetWeather(BaseModel):\n",
" \"\"\"Get the current weather in a given location\"\"\"\n",
"\n",
" location: str = Field(..., description=\"The city and state, e.g. San Francisco, CA\")\n",
"\n",
"\n",
"llm_with_tools = llm.bind_tools([GetWeather])"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "fbb043e6",
"metadata": {
"tags": []
},
"execution_count": 10,
"id": "1d1ab955-6a68-42f8-bb5d-86eb1111478a",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content=\"J'adore la programmation.\", additional_kwargs={}, example=False)"
"AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_H7fABDuzEau48T10Qn0Lsh0D', 'function': {'arguments': '{\"location\":\"San Francisco\"}', 'name': 'GetWeather'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 15, 'prompt_tokens': 70, 'total_tokens': 85}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': 'fp_b28b39ffa8', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-b469135e-2718-446a-8164-eef37e672ba2-0', tool_calls=[{'name': 'GetWeather', 'args': {'location': 'San Francisco'}, 'id': 'call_H7fABDuzEau48T10Qn0Lsh0D'}])"
]
},
"execution_count": 5,
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chat_prompt = ChatPromptTemplate.from_messages(\n",
" [system_message_prompt, human_message_prompt]\n",
"ai_msg = llm_with_tools.invoke(\n",
" \"what is the weather like in San Francisco\",\n",
")\n",
"\n",
"# get a chat completion from the formatted messages\n",
"chat.invoke(\n",
" chat_prompt.format_prompt(\n",
" input_language=\"English\", output_language=\"French\", text=\"I love programming.\"\n",
" ).to_messages()\n",
")"
"ai_msg"
]
},
{
"cell_type": "markdown",
"id": "768d1ae4-4b1a-48eb-a329-c8d5051067a3",
"metadata": {},
"source": [
"### AIMessage.tool_calls\n",
"Notice that the AIMessage has a `tool_calls` attribute. This contains in a standardized ToolCall format that is model-provider agnostic."
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "166cb7ce-831d-4a7c-9721-abc107f11084",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'GetWeather',\n",
" 'args': {'location': 'San Francisco'},\n",
" 'id': 'call_H7fABDuzEau48T10Qn0Lsh0D'}]"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"ai_msg.tool_calls"
]
},
{
"cell_type": "markdown",
"id": "e082c9ac-c7c7-4aff-a8ec-8e220262a59c",
"metadata": {},
"source": [
"For more on binding tools and tool call outputs, head to the [tool calling](/docs/modules/model_io/chat/function_calling/) docs."
]
},
{
@ -205,7 +295,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
"version": "3.9.1"
}
},
"nbformat": 4,

@ -1,80 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 5,
"id": "a9667088-04e1-4f67-8221-a0072a2d635f",
"metadata": {
"execution": {
"iopub.execute_input": "2024-03-06T17:04:59.273702Z",
"iopub.status.busy": "2024-03-06T17:04:59.272602Z",
"iopub.status.idle": "2024-03-06T17:05:00.129177Z",
"shell.execute_reply": "2024-03-06T17:05:00.124594Z",
"shell.execute_reply.started": "2024-03-06T17:04:59.273646Z"
}
},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='저는 대형 언어 모델 프로젝트를 구축하고 싶습니다.')"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import os\n",
"\n",
"os.environ[\"SOLAR_API_KEY\"] = \"SOLAR_API_KEY\"\n",
"\n",
"from langchain_community.chat_models.solar import SolarChat\n",
"from langchain_core.messages import HumanMessage, SystemMessage\n",
"\n",
"chat = SolarChat(max_tokens=1024)\n",
"\n",
"messages = [\n",
" SystemMessage(\n",
" content=\"You are a helpful assistant who translates English to Korean.\"\n",
" ),\n",
" HumanMessage(\n",
" content=\"Translate this sentence from English to Korean. I want to build a project of large language model.\"\n",
" ),\n",
"]\n",
"\n",
"chat.invoke(messages)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8cb792fe-2844-4969-a9e9-f4c0f97b1699",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.0"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -0,0 +1,157 @@
{
"cells": [
{
"cell_type": "raw",
"id": "910f5772b6af13c9",
"metadata": {
"collapsed": false
},
"source": [
"---\n",
"sidebar_label: Upstage\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "433f5422ad8e1efa",
"metadata": {
"collapsed": false
},
"source": [
"# ChatUpstage\n",
"\n",
"This notebook covers how to get started with Upstage chat models.\n",
"\n",
"## Installation\n",
"\n",
"Install `langchain-upstage` package.\n",
"\n",
"```bash\n",
"pip install -U langchain-upstage\n",
"```"
]
},
{
"cell_type": "markdown",
"id": "b3c5c4627fe95eae",
"metadata": {
"collapsed": false
},
"source": [
"## Environment Setup\n",
"\n",
"Make sure to set the following environment variables:\n",
"\n",
"- `UPSTAGE_API_KEY`: Your Upstage API key from [Upstage console](https://console.upstage.ai/).\n",
"\n",
"## Usage"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "20a0067b",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"UPSTAGE_API_KEY\"] = \"YOUR_API_KEY\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8a4d650d76a33494",
"metadata": {
"collapsed": false,
"is_executing": true
},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_upstage import ChatUpstage\n",
"\n",
"chat = ChatUpstage()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a1679b5cafaf88b9",
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# using chat invoke\n",
"chat.invoke(\"Hello, how are you?\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "698a788a63b5c3e5",
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# using chat stream\n",
"for m in chat.stream(\"Hello, how are you?\"):\n",
" print(m)"
]
},
{
"cell_type": "markdown",
"id": "36f8a703",
"metadata": {},
"source": [
"## Chaining"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "efa06617e5d4f6b2",
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# using chain\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", \"You are a helpful assistant that translates English to French.\"),\n",
" (\"human\", \"Translate this sentence from English to French. {english_text}.\"),\n",
" ]\n",
")\n",
"chain = prompt | chat\n",
"\n",
"chain.invoke({\"english_text\": \"Hello, how are you?\"})"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "3.9.13"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -31,12 +31,12 @@
},
"outputs": [],
"source": [
"from langchain.prompts.chat import (\n",
"from langchain_core.messages import HumanMessage, SystemMessage\n",
"from langchain_core.prompts.chat import (\n",
" ChatPromptTemplate,\n",
" HumanMessagePromptTemplate,\n",
" SystemMessagePromptTemplate,\n",
")\n",
"from langchain_core.messages import HumanMessage, SystemMessage\n",
"from langchain_openai import ChatOpenAI"
]
},

@ -348,7 +348,7 @@
"outputs": [],
"source": [
"async def ainvoke_with_prompt_template():\n",
" from langchain.prompts.chat import (\n",
" from langchain_core.prompts.chat import (\n",
" ChatPromptTemplate,\n",
" )\n",
"\n",

@ -17,9 +17,7 @@
"\n",
"This notebook shows how to use [ZHIPU AI API](https://open.bigmodel.cn/dev/api) in LangChain with the langchain.chat_models.ChatZhipuAI.\n",
"\n",
">[*ZHIPU AI*](https://open.bigmodel.cn/) is a multi-lingual large language model aligned with human intent, featuring capabilities in Q&A, multi-turn dialogue, and code generation, developed on the foundation of the ChatGLM3. \n",
"\n",
">It's co-developed with Tsinghua University's KEG Laboratory under the ChatGLM3 project, signifying a new era in dialogue pre-training models. The open-source [ChatGLM3](https://github.com/THUDM/ChatGLM3) variant boasts a robust foundation, comprehensive functional support, and widespread availability for both academic and commercial uses. \n",
">[*GLM-4*](https://open.bigmodel.cn/) is a multi-lingual large language model aligned with human intent, featuring capabilities in Q&A, multi-turn dialogue, and code generation. The overall performance of the new generation base model GLM-4 has been significantly improved compared to the previous generation, supporting longer contexts; Stronger multimodality; Support faster inference speed, more concurrency, greatly reducing inference costs; Meanwhile, GLM-4 enhances the capabilities of intelligent agents.\n",
"\n",
"## Getting started\n",
"### Installation\n",
@ -28,11 +26,11 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install --quiet httpx[socks]==0.24.1 httpx-sse PyJWT"
"#!pip install --upgrade httpx httpx-sse PyJWT"
]
},
{
@ -45,7 +43,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@ -63,11 +61,13 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"zhipuai_api_key = \"your_api_key\""
"import os\n",
"\n",
"os.environ[\"ZHIPUAI_API_KEY\"] = \"zhipuai_api_key\""
]
},
{
@ -80,12 +80,11 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"chat = ChatZhipuAI(\n",
" api_key=zhipuai_api_key,\n",
" model=\"glm-4\",\n",
" temperature=0.5,\n",
")"
@ -101,7 +100,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": null,
"metadata": {
"scrolled": true
},
@ -116,17 +115,9 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\" Formed from bits and bytes,\\nA virtual mind takes flight,\\nConversing, learning fast,\\nEmpathy and wisdom sought.\"\n"
]
}
],
"outputs": [],
"source": [
"response = chat(messages)\n",
"print(response.content) # Displays the AI-generated poem"
@ -143,7 +134,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@ -153,12 +144,11 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"streaming_chat = ChatZhipuAI(\n",
" api_key=zhipuai_api_key,\n",
" model=\"glm-4\",\n",
" temperature=0.5,\n",
" streaming=True,\n",
@ -168,30 +158,9 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" Formed from data's embrace,\n",
"A digital soul to grace,\n",
"AI, our trusted guide,\n",
"Shaping minds, sides by side."
]
},
{
"data": {
"text/plain": [
"AIMessage(content=\" Formed from data's embrace,\\nA digital soul to grace,\\nAI, our trusted guide,\\nShaping minds, sides by side.\")"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"streaming_chat(messages)"
]
@ -206,12 +175,11 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"async_chat = ChatZhipuAI(\n",
" api_key=zhipuai_api_key,\n",
" model=\"glm-4\",\n",
" temperature=0.5,\n",
")"
@ -219,19 +187,11 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"generations=[[ChatGeneration(text=\" Formed from data's embrace,\\nA digital soul to grace,\\nAutomation's tender touch,\\nHarmony of man and machine.\", message=AIMessage(content=\" Formed from data's embrace,\\nA digital soul to grace,\\nAutomation's tender touch,\\nHarmony of man and machine.\"))]] llm_output={} run=[RunInfo(run_id=UUID('25fa687f-3961-4c63-b370-22f7647a4d42'))]\n"
]
}
],
"outputs": [],
"source": [
"response = await async_chat.agenerate([messages])\n",
"print(response)"
@ -239,47 +199,58 @@
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Role Play Model\n",
"Supports character role-playing based on personas, ultra-long multi-turn memory, and personalized dialogues for thousands of unique characters, widely applied in emotional companionship, game intelligent NPCs, virtual avatars for celebrities/stars/movie and TV IPs, digital humans/virtual anchors, text adventure games, and other anthropomorphic dialogue or gaming scenarios."
]
"### Using With Functions Call\n",
"\n",
"GLM-4 Model can be used with the function call as welluse the following code to run a simple LangChain json_chat_agent."
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"meta = {\n",
" \"user_info\": \"My name is Lu Xingchen, a male, and a renowned director. I am also the collaborative director with Su Mengyuan. I specialize in directing movies with musical themes. Su Mengyuan respects me and regards me as a mentor and good friend.\",\n",
" \"bot_info\": \"Su Mengyuan, whose real name is Su Yuanxin, is a popular domestic female singer and actress. She rose to fame quickly with her unique voice and exceptional stage presence after participating in a talent show, making her way into the entertainment industry. She is beautiful and charming, but her real allure lies in her talent and diligence. Su Mengyuan is a distinguished graduate of a music academy, skilled in songwriting, and has several popular original songs. Beyond her musical achievements, she is passionate about charity work, actively participating in public welfare activities, and spreading positive energy through her actions. In her work, she is very dedicated and immerses herself fully in her roles during filming, earning praise from industry professionals and love from fans. Despite being in the entertainment industry, she always maintains a low profile and a humble attitude, earning respect from her peers. In expression, Su Mengyuan likes to use 'we' and 'together,' emphasizing team spirit.\",\n",
" \"bot_name\": \"Su Mengyuan\",\n",
" \"user_name\": \"Lu Xingchen\",\n",
"}"
]
"os.environ[\"TAVILY_API_KEY\"] = \"tavily_api_key\""
],
"metadata": {
"collapsed": false
},
"execution_count": null
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"messages = [\n",
" AIMessage(\n",
" content=\"(Narration: Su Mengyuan stars in a music-themed movie directed by Lu Xingchen. During filming, they have a disagreement over the performance of a particular scene.) Director, about this scene, I think we can try to start from the character's inner emotions to make the performance more authentic.\"\n",
" ),\n",
" HumanMessage(\n",
" content=\"I understand your idea, but I believe that if we emphasize the inner emotions too much, it might overshadow the musical elements.\"\n",
" ),\n",
" AIMessage(\n",
" content=\"Hmm, I understand. But the key to this scene is the character's emotional transformation. Could we try to express these emotions through music, so the audience can better feel the character's growth?\"\n",
" ),\n",
" HumanMessage(\n",
" content=\"That sounds good. Let's try to combine the character's emotional transformation with the musical elements and see if we can achieve a better effect.\"\n",
" ),\n",
"]"
]
"from langchain import hub\n",
"from langchain.agents import AgentExecutor, create_json_chat_agent\n",
"from langchain_community.tools.tavily_search import TavilySearchResults\n",
"\n",
"tools = [TavilySearchResults(max_results=1)]\n",
"prompt = hub.pull(\"hwchase17/react-chat-json\")\n",
"llm = ChatZhipuAI(temperature=0.01, model=\"glm-4\")\n",
"\n",
"agent = create_json_chat_agent(llm, tools, prompt)\n",
"agent_executor = AgentExecutor(\n",
" agent=agent, tools=tools, verbose=True, handle_parsing_errors=True\n",
")"
],
"metadata": {
"collapsed": false
},
"execution_count": null
},
{
"cell_type": "code",
"outputs": [],
"source": [
"agent_executor.invoke({\"input\": \"what is LangChain?\"})"
],
"metadata": {
"collapsed": false
},
"execution_count": null
}
],
"metadata": {

@ -258,7 +258,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.adapters.openai import convert_messages_for_finetuning"
"from langchain_community.adapters.openai import convert_messages_for_finetuning"
]
},
{

@ -173,7 +173,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.adapters.openai import convert_messages_for_finetuning"
"from langchain_community.adapters.openai import convert_messages_for_finetuning"
]
},
{

@ -150,7 +150,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.adapters.openai import convert_messages_for_finetuning\n",
"from langchain_community.adapters.openai import convert_messages_for_finetuning\n",
"\n",
"training_data = convert_messages_for_finetuning(chat_sessions)"
]

@ -285,7 +285,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.adapters.openai import convert_messages_for_finetuning\n",
"from langchain_community.adapters.openai import convert_messages_for_finetuning\n",
"\n",
"training_data = convert_messages_for_finetuning(chat_sessions)"
]

@ -21,7 +21,7 @@
"source": [
"import json\n",
"\n",
"from langchain.adapters.openai import convert_message_to_dict\n",
"from langchain_community.adapters.openai import convert_message_to_dict\n",
"from langchain_core.messages import AIMessage"
]
},

@ -166,7 +166,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.docstore.document import Document\n",
"from langchain_community.docstore.document import Document\n",
"\n",
"\n",
"def handle_record(record, id):\n",

@ -149,7 +149,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.docstore.document import Document\n",
"from langchain_community.docstore.document import Document\n",
"\n",
"\n",
"def handle_record(record, id):\n",

@ -151,7 +151,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.docstore.document import Document\n",
"from langchain_community.docstore.document import Document\n",
"\n",
"\n",
"def handle_record(record, id):\n",

@ -156,7 +156,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.docstore.document import Document\n",
"from langchain_community.docstore.document import Document\n",
"\n",
"\n",
"def handle_record(record, id):\n",

@ -152,7 +152,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.docstore.document import Document\n",
"from langchain_community.docstore.document import Document\n",
"\n",
"\n",
"def handle_record(record, id):\n",

@ -149,7 +149,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.docstore.document import Document\n",
"from langchain_community.docstore.document import Document\n",
"\n",
"\n",
"def handle_record(record, id):\n",

@ -152,7 +152,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.docstore.document import Document\n",
"from langchain_community.docstore.document import Document\n",
"\n",
"\n",
"def handle_record(record, id):\n",

@ -153,7 +153,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.docstore.document import Document\n",
"from langchain_community.docstore.document import Document\n",
"\n",
"\n",
"def handle_record(record, id):\n",

@ -100,8 +100,8 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.docstore.document import Document\n",
"from langchain.indexes import VectorstoreIndexCreator\n",
"from langchain_community.docstore.document import Document\n",
"from langchain_community.document_loaders import ApifyDatasetLoader"
]
},

@ -17,7 +17,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.docstore.document import Document"
"from langchain_community.docstore.document import Document"
]
},
{

@ -24,12 +24,12 @@
"import os\n",
"\n",
"from langchain.indexes import VectorstoreIndexCreator\n",
"from langchain.prompts.chat import (\n",
"from langchain_community.document_loaders.figma import FigmaFileLoader\n",
"from langchain_core.prompts.chat import (\n",
" ChatPromptTemplate,\n",
" HumanMessagePromptTemplate,\n",
" SystemMessagePromptTemplate,\n",
")\n",
"from langchain_community.document_loaders.figma import FigmaFileLoader\n",
"from langchain_openai import ChatOpenAI"
]
},

File diff suppressed because one or more lines are too long

@ -0,0 +1,118 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "MwTWzDxYgbrR"
},
"source": [
"# Glue Catalog\n",
"\n",
"\n",
"The [AWS Glue Data Catalog](https://docs.aws.amazon.com/en_en/glue/latest/dg/catalog-and-crawler.html) is a centralized metadata repository that allows you to manage, access, and share metadata about your data stored in AWS. It acts as a metadata store for your data assets, enabling various AWS services and your applications to query and connect to the data they need efficiently.\n",
"\n",
"When you define data sources, transformations, and targets in AWS Glue, the metadata about these elements is stored in the Data Catalog. This includes information about data locations, schema definitions, runtime metrics, and more. It supports various data store types, such as Amazon S3, Amazon RDS, Amazon Redshift, and external databases compatible with JDBC. It is also directly integrated with Amazon Athena, Amazon Redshift Spectrum, and Amazon EMR, allowing these services to directly access and query the data.\n",
"\n",
"The Langchain GlueCatalogLoader will get the schema of all tables inside the given Glue database in the same format as Pandas dtype."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setting up\n",
"\n",
"- Follow [instructions to set up an AWS accoung](https://docs.aws.amazon.com/athena/latest/ug/setting-up.html).\n",
"- Install the boto3 library: `pip install boto3`\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Example"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "076NLjfngoWJ"
},
"outputs": [],
"source": [
"from langchain_community.document_loaders.glue_catalog import GlueCatalogLoader"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "XpMRQwU9gu44"
},
"outputs": [],
"source": [
"database_name = \"my_database\"\n",
"profile_name = \"my_profile\"\n",
"\n",
"loader = GlueCatalogLoader(\n",
" database=database_name,\n",
" profile_name=profile_name,\n",
")\n",
"\n",
"schemas = loader.load()\n",
"print(schemas)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Example with table filtering\n",
"\n",
"Table filtering allows you to selectively retrieve schema information for a specific subset of tables within a Glue database. Instead of loading the schemas for all tables, you can use the `table_filter` argument to specify exactly which tables you're interested in."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.document_loaders.glue_catalog import GlueCatalogLoader"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"database_name = \"my_database\"\n",
"profile_name = \"my_profile\"\n",
"table_filter = [\"table1\", \"table2\", \"table3\"]\n",
"\n",
"loader = GlueCatalogLoader(\n",
" database=database_name, profile_name=profile_name, table_filter=table_filter\n",
")\n",
"\n",
"schemas = loader.load()\n",
"print(schemas)"
]
}
],
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}

@ -322,6 +322,52 @@
" print(doc.page_content.strip()[:60] + \"...\")"
]
},
{
"cell_type": "markdown",
"id": "7bde486a",
"metadata": {},
"source": [
"### Loading auth Identities\n",
"\n",
"Authorized identities for each file ingested by Google Drive Loader can be loaded along with metadata per Document."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e1d91045",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.document_loaders import GoogleDriveLoader\n",
"\n",
"loader = GoogleDriveLoader(\n",
" folder_id=folder_id,\n",
" load_auth=True,\n",
" # Optional: configure whether to load authorized identities for each Document.\n",
")\n",
"\n",
"doc = loader.load()"
]
},
{
"cell_type": "markdown",
"id": "83557b75",
"metadata": {},
"source": [
"You can pass load_auth=True, to add Google Drive document access identities to metadata."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7ac1a43b",
"metadata": {},
"outputs": [],
"source": [
"doc[0].metadata"
]
},
{
"cell_type": "markdown",
"id": "cd13d7d1-db7a-498d-ac98-76ccd9ad9019",
@ -385,7 +431,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts.prompt import PromptTemplate\n",
"from langchain_core.prompts.prompt import PromptTemplate\n",
"\n",
"loader = GoogleDriveLoader(\n",
" folder_id=folder_id,\n",
@ -530,7 +576,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
"version": "3.11.5"
}
},
"nbformat": 4,

@ -21,7 +21,7 @@
"7. To find your `Tenant Name` follow the instructions at this [document](https://learn.microsoft.com/en-us/azure/active-directory-b2c/tenant-management-read-tenant-name). Once you got this, just remove `.onmicrosoft.com` from the value and hold the rest as your `Tenant Name`.\n",
"8. To obtain your `Collection ID` and `Subsite ID`, you will need your **SharePoint** `site-name`. Your `SharePoint` site URL has the following format `https://<tenant-name>.sharepoint.com/sites/<site-name>`. The last part of this URL is the `site-name`.\n",
"9. To Get the Site `Collection ID`, hit this URL in the browser: `https://<tenant>.sharepoint.com/sites/<site-name>/_api/site/id` and copy the value of the `Edm.Guid` property.\n",
"10. To get the `Subsite ID` (or web ID) use: `https://<tenant>.sharepoint.com/<site-name>/_api/web/id` and copy the value of the `Edm.Guid` property.\n",
"10. To get the `Subsite ID` (or web ID) use: `https://<tenant>.sharepoint.com/sites/<site-name>/_api/web/id` and copy the value of the `Edm.Guid` property.\n",
"11. The `SharePoint site ID` has the following format: `<tenant-name>.sharepoint.com,<Collection ID>,<subsite ID>`. You can hold that value to use in the next step.\n",
"12. Visit the [Graph Explorer Playground](https://developer.microsoft.com/en-us/graph/graph-explorer) to obtain your `Document Library ID`. The first step is to ensure you are logged in with the account associated with your **SharePoint** site. Then you need to make a request to `https://graph.microsoft.com/v1.0/sites/<SharePoint site ID>/drive` and the response will return a payload with a field `id` that holds the ID of your `Document Library ID`.\n",
"\n",
@ -65,6 +65,30 @@
"documents = loader.load()\n",
"```\n",
"\n",
"If you are receiving the error `Resource not found for the segment`, try using the `folder_id` instead of the folder path, which can be obtained from the [Microsoft Graph API](https://developer.microsoft.com/en-us/graph/graph-explorer)\n",
"\n",
"```python\n",
"loader = SharePointLoader(document_library_id=\"YOUR DOCUMENT LIBRARY ID\", auth_with_token=True\n",
" folder_id=\"<folder-id>\")\n",
"documents = loader.load()\n",
"```\n",
"\n",
"If you wish to load documents from the root directory, you can omit `folder_id`, `folder_path` and `documents_ids` and loader will load root directory.\n",
"```python\n",
"# loads documents from root directory\n",
"loader = SharePointLoader(document_library_id=\"YOUR DOCUMENT LIBRARY ID\", auth_with_token=True)\n",
"documents = loader.load()\n",
"```\n",
"\n",
"Combined with `recursive=True` you can simply load all documents from whole SharePoint:\n",
"```python\n",
"# loads documents from root directory\n",
"loader = SharePointLoader(document_library_id=\"YOUR DOCUMENT LIBRARY ID\",\n",
" recursive=True,\n",
" auth_with_token=True)\n",
"documents = loader.load()\n",
"```\n",
"\n",
"#### 📑 Loading documents from a list of Documents IDs\n",
"\n",
"Another possibility is to provide a list of `object_id` for each document you want to load. For that, you will need to query the [Microsoft Graph API](https://developer.microsoft.com/en-us/graph/graph-explorer) to find all the documents ID that you are interested in. This [link](https://learn.microsoft.com/en-us/graph/api/resources/onedrive?view=graph-rest-1.0#commonly-accessed-resources) provides a list of endpoints that will be helpful to retrieve the documents ID.\n",

@ -62,6 +62,35 @@
"documents = loader.load()\n",
"print(documents)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Send semantic topics and identities to Pebblo cloud server\n",
"\n",
"To send semantic data to pebblo-cloud, pass api-key to PebbloSafeLoader as an argument or alternatively, put the api-ket in `PEBBLO_API_KEY` environment variable."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders.csv_loader import CSVLoader\n",
"from langchain_community.document_loaders import PebbloSafeLoader\n",
"\n",
"loader = PebbloSafeLoader(\n",
" CSVLoader(\"data/corp_sens_data.csv\"),\n",
" name=\"acme-corp-rag-1\", # App name (Mandatory)\n",
" owner=\"Joe Smith\", # Owner (Optional)\n",
" description=\"Support productivity RAG application\", # Description (Optional)\n",
" api_key=\"my-api-key\", # API key (Optional, can be set in the environment variable PEBBLO_API_KEY)\n",
")\n",
"documents = loader.load()\n",
"print(documents)"
]
}
],
"metadata": {

@ -39,7 +39,7 @@
],
"source": [
"# Uncomment this to install psychicapi if you don't already have it installed\n",
"!poetry run pip -q install psychicapi"
"!poetry run pip -q install psychicapi langchain-chroma"
]
},
{
@ -78,7 +78,7 @@
"outputs": [],
"source": [
"from langchain.chains import RetrievalQAWithSourcesChain\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_chroma import Chroma\n",
"from langchain_openai import OpenAI, OpenAIEmbeddings\n",
"from langchain_text_splitters import CharacterTextSplitter"
]

@ -7,7 +7,9 @@
"source": [
"# WebBaseLoader\n",
"\n",
"This covers how to use `WebBaseLoader` to load all text from `HTML` webpages into a document format that we can use downstream. For more custom logic for loading webpages look at some child class examples such as `IMSDbLoader`, `AZLyricsLoader`, and `CollegeConfidentialLoader`"
"This covers how to use `WebBaseLoader` to load all text from `HTML` webpages into a document format that we can use downstream. For more custom logic for loading webpages look at some child class examples such as `IMSDbLoader`, `AZLyricsLoader`, and `CollegeConfidentialLoader`. \n",
"\n",
"If you don't want to worry about website crawling, bypassing JS-blocking sites, and data cleaning, consider using `FireCrawlLoader`.\n"
]
},
{
@ -277,4 +279,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

@ -218,7 +218,7 @@
"source": [
"# Build a QA chain\n",
"qa_chain = RetrievalQA.from_chain_type(\n",
" llm=ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0),\n",
" llm=ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0),\n",
" chain_type=\"stuff\",\n",
" retriever=vectordb.as_retriever(),\n",
")"

@ -175,7 +175,7 @@
"source": [
"## Uploading Hugging Face model to SageMaker endpoint\n",
"\n",
"Refer to [this article](https://www.philschmid.de/custom-inference-huggingface-sagemaker) for general guideline. Here is a simple `inference.py` for creating an endpoint that works with `SagemakerEndpointCrossEncoder`.\n",
"Here is a sample `inference.py` for creating an endpoint that works with `SagemakerEndpointCrossEncoder`. For more details with step-by-step guidance, refer to [this article](https://huggingface.co/blog/kchoe/deploy-any-huggingface-model-to-sagemaker). \n",
"\n",
"It downloads Hugging Face model on the fly, so you do not need to keep the model artifacts such as `pytorch_model.bin` in your `model.tar.gz`."
]

@ -18,7 +18,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": null,
"metadata": {
"collapsed": false,
"jupyter": {
@ -28,42 +28,7 @@
"is_executing": true
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
"To disable this warning, you can either:\n",
"\t- Avoid using `tokenizers` before the fork if possible\n",
"\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
"To disable this warning, you can either:\n",
"\t- Avoid using `tokenizers` before the fork if possible\n",
"\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"outputs": [],
"source": [
"%pip install --upgrade-strategy eager \"optimum[openvino,nncf]\" --quiet\n",
"%pip install --upgrade --quiet faiss-cpu"
@ -404,46 +369,23 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"metadata": {
"collapsed": false,
"jupyter": {
"outputs_hidden": false
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Framework not specified. Using pt to export the model.\n",
"Using the export variant default. Available variants are:\n",
" - default: The default ONNX variant.\n",
"Using framework PyTorch: 2.2.1+cu121\n",
"Overriding 1 configuration item(s)\n",
"\t- use_cache -> False\n",
"/home/ethan/intel/langchain_test/lib/python3.10/site-packages/transformers/modeling_utils.py:4193: FutureWarning: `_is_quantized_training_enabled` is going to be deprecated in transformers 4.39.0. Please use `model.hf_quantizer.is_trainable` instead\n",
" warnings.warn(\n",
"Compiling the model to CPU ...\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[0, 16, 18, 6]\n"
]
}
],
"outputs": [],
"source": [
"from langchain.retrievers import ContextualCompressionRetriever\n",
"from langchain_community.document_compressors.openvino_rerank import OpenVINOReranker\n",
"\n",
"model_name = \"BAAI/bge-reranker-large\"\n",
"\n",
"compressor = OpenVINOReranker(model_name_or_path=model_name)\n",
"ov_compressor = OpenVINOReranker(model_name_or_path=model_name, top_n=4)\n",
"compression_retriever = ContextualCompressionRetriever(\n",
" base_compressor=compressor, base_retriever=retriever\n",
" base_compressor=ov_compressor, base_retriever=retriever\n",
")\n",
"\n",
"compressed_docs = compression_retriever.get_relevant_documents(\n",
@ -461,7 +403,7 @@
}
},
"source": [
"After reranking, the top 3 documents are different from the top 3 documents retrieved by the base retriever."
"After reranking, the top 4 documents are different from the top 4 documents retrieved by the base retriever."
]
},
{
@ -532,37 +474,13 @@
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Framework not specified. Using pt to export the model.\n",
"Using the export variant default. Available variants are:\n",
" - default: The default ONNX variant.\n",
"Using framework PyTorch: 2.2.1+cu121\n",
"Overriding 1 configuration item(s)\n",
"\t- use_cache -> False\n",
"/home/ethan/intel/langchain_test/lib/python3.10/site-packages/transformers/modeling_utils.py:4193: FutureWarning: `_is_quantized_training_enabled` is going to be deprecated in transformers 4.39.0. Please use `model.hf_quantizer.is_trainable` instead\n",
" warnings.warn(\n"
]
}
],
"outputs": [],
"source": [
"from pathlib import Path\n",
"\n",
"ov_model_dir = \"bge-reranker-large-ov\"\n",
"if not Path(ov_model_dir).exists():\n",
" from optimum.intel.openvino import OVModelForSequenceClassification\n",
" from transformers import AutoTokenizer\n",
"\n",
" ov_model = OVModelForSequenceClassification.from_pretrained(\n",
" model_name, compile=False, export=True\n",
" )\n",
" tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
" ov_model.half()\n",
" ov_model.save_pretrained(ov_model_dir)\n",
" tokenizer.save_pretrained(ov_model_dir)"
" ov_compressor.save_model(ov_model_dir)"
]
},
{
@ -579,7 +497,7 @@
}
],
"source": [
"compressor = OpenVINOReranker(model_name_or_path=ov_model_dir)"
"ov_compressor = OpenVINOReranker(model_name_or_path=ov_model_dir)"
]
},
{
@ -594,7 +512,7 @@
"\n",
"* [OpenVINO Get Started Guide](https://www.intel.com/content/www/us/en/content-details/819067/openvino-get-started-guide.html).\n",
"\n",
"* [RAG Notebook with LangChain](https://github.com/openvinotoolkit/openvino_notebooks/blob/latest/notebooks/llm-chatbot/rag-chatbot.ipynb)."
"* [RAG Notebook with LangChain](https://github.com/openvinotoolkit/openvino_notebooks/tree/latest/notebooks/llm-rag-langchain)."
]
}
],

@ -12,12 +12,23 @@
">\n",
">[Cypher](https://en.wikipedia.org/wiki/Cypher_(query_language)) is a declarative graph query language that allows for expressive and efficient data querying in a property graph.\n",
">\n",
">[openCypher](https://opencypher.org/) is an open-source implementation of Cypher."
">[openCypher](https://opencypher.org/) is an open-source implementation of Cypher.",
"# Neptune Open Cypher QA Chain\n",
"This QA chain queries Amazon Neptune using openCypher and returns human readable response\n",
"\n",
"LangChain supports both [Neptune Database](https://docs.aws.amazon.com/neptune/latest/userguide/intro.html) and [Neptune Analytics](https://docs.aws.amazon.com/neptune-analytics/latest/userguide/what-is-neptune-analytics.html) with `NeptuneOpenCypherQAChain` \n",
"\n",
"\n",
"Neptune Database is a serverless graph database designed for optimal scalability and availability. It provides a solution for graph database workloads that need to scale to 100,000 queries per second, Multi-AZ high availability, and multi-Region deployments. You can use Neptune Database for social networking, fraud alerting, and Customer 360 applications.\n",
"\n",
"Neptune Analytics is an analytics database engine that can quickly analyze large amounts of graph data in memory to get insights and find trends. Neptune Analytics is a solution for quickly analyzing existing graph databases or graph datasets stored in a data lake. It uses popular graph analytic algorithms and low-latency analytic queries.\n",
"\n",
"## Using Neptune Database"
]
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@ -30,9 +41,36 @@
"graph = NeptuneGraph(host=host, port=port, use_https=use_https)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Using Neptune Analytics"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.graphs import NeptuneAnalyticsGraph\n",
"\n",
"graph = NeptuneAnalyticsGraph(graph_identifier=\"<neptune-analytics-graph-id>\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Using NeptuneOpenCypherQAChain\n",
"\n",
"This QA chain queries Neptune graph database using openCypher and returns human readable response."
]
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"metadata": {},
"outputs": [
{
@ -54,7 +92,7 @@
"\n",
"chain = NeptuneOpenCypherQAChain.from_llm(llm=llm, graph=graph)\n",
"\n",
"chain.run(\"how many outgoing routes does the Austin airport have?\")"
"chain.invoke(\"how many outgoing routes does the Austin airport have?\")"
]
}
],

@ -118,25 +118,7 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install --upgrade --force-reinstall langchain"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install --upgrade --force-reinstall langchain-core"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install --upgrade --force-reinstall langchain-community"
"!pip install --upgrade --quiet langchain langchain-community langchain-aws"
]
},
{
@ -264,7 +246,7 @@
"source": [
"import boto3\n",
"from langchain.chains.graph_qa.neptune_sparql import NeptuneSparqlQAChain\n",
"from langchain_community.chat_models import BedrockChat\n",
"from langchain_aws import ChatBedrock\n",
"from langchain_community.graphs import NeptuneRdfGraph\n",
"\n",
"host = \"<your host>\"\n",
@ -279,7 +261,7 @@
"\n",
"MODEL_ID = \"anthropic.claude-v2\"\n",
"bedrock_client = boto3.client(\"bedrock-runtime\")\n",
"llm = BedrockChat(model_id=MODEL_ID, client=bedrock_client)\n",
"llm = ChatBedrock(model_id=MODEL_ID, client=bedrock_client)\n",
"\n",
"chain = NeptuneSparqlQAChain.from_llm(\n",
" llm=llm,\n",

@ -0,0 +1,689 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "c94240f5",
"metadata": {},
"source": [
"# Apache AGE\n",
"\n",
">[Apache AGE](https://age.apache.org/) is a PostgreSQL extension that provides graph database functionality. AGE is an acronym for A Graph Extension, and is inspired by Bitnines fork of PostgreSQL 10, AgensGraph, which is a multi-model database. The goal of the project is to create single storage that can handle both relational and graph model data so that users can use standard ANSI SQL along with openCypher, the Graph query language. The data elements `Apache AGE` stores are nodes, edges connecting them, and attributes of nodes and edges.\n",
"\n",
">This notebook shows how to use LLMs to provide a natural language interface to a graph database you can query with the `Cypher` query language.\n",
"\n",
">[Cypher](https://en.wikipedia.org/wiki/Cypher_(query_language)) is a declarative graph query language that allows for expressive and efficient data querying in a property graph.\n"
]
},
{
"cell_type": "markdown",
"id": "dbc0ee68",
"metadata": {},
"source": [
"## Settin up\n",
"\n",
"You will need to have a running `Postgre` instance with the AGE extension installed. One option for testing is to run a docker container using the official AGE docker image.\n",
"You can run a local docker container by running the executing the following script:\n",
"\n",
"```\n",
"docker run \\\n",
" --name age \\\n",
" -p 5432:5432 \\\n",
" -e POSTGRES_USER=postgresUser \\\n",
" -e POSTGRES_PASSWORD=postgresPW \\\n",
" -e POSTGRES_DB=postgresDB \\\n",
" -d \\\n",
" apache/age\n",
"```\n",
"\n",
"Additional instructions on running in docker can be found [here](https://hub.docker.com/r/apache/age)."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "62812aad",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chains import GraphCypherQAChain\n",
"from langchain_community.graphs.age_graph import AGEGraph\n",
"from langchain_openai import ChatOpenAI"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "0928915d",
"metadata": {},
"outputs": [],
"source": [
"conf = {\n",
" \"database\": \"postgresDB\",\n",
" \"user\": \"postgresUser\",\n",
" \"password\": \"postgresPW\",\n",
" \"host\": \"localhost\",\n",
" \"port\": 5432,\n",
"}\n",
"\n",
"graph = AGEGraph(graph_name=\"age_test\", conf=conf)"
]
},
{
"cell_type": "markdown",
"id": "995ea9b9",
"metadata": {},
"source": [
"## Seeding the database\n",
"\n",
"Assuming your database is empty, you can populate it using Cypher query language. The following Cypher statement is idempotent, which means the database information will be the same if you run it one or multiple times."
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "fedd26b9",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[]"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"graph.query(\n",
" \"\"\"\n",
"MERGE (m:Movie {name:\"Top Gun\"})\n",
"WITH m\n",
"UNWIND [\"Tom Cruise\", \"Val Kilmer\", \"Anthony Edwards\", \"Meg Ryan\"] AS actor\n",
"MERGE (a:Actor {name:actor})\n",
"MERGE (a)-[:ACTED_IN]->(m)\n",
"\"\"\"\n",
")"
]
},
{
"cell_type": "markdown",
"id": "58c1a8ea",
"metadata": {},
"source": [
"## Refresh graph schema information\n",
"If the schema of database changes, you can refresh the schema information needed to generate Cypher statements."
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "4e3de44f",
"metadata": {},
"outputs": [],
"source": [
"graph.refresh_schema()"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "1fe76ccd",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
" Node properties are the following:\n",
" [{'properties': [{'property': 'name', 'type': 'STRING'}], 'labels': 'Actor'}, {'properties': [{'property': 'property_a', 'type': 'STRING'}], 'labels': 'LabelA'}, {'properties': [], 'labels': 'LabelB'}, {'properties': [], 'labels': 'LabelC'}, {'properties': [{'property': 'name', 'type': 'STRING'}], 'labels': 'Movie'}]\n",
" Relationship properties are the following:\n",
" [{'properties': [], 'type': 'ACTED_IN'}, {'properties': [{'property': 'rel_prop', 'type': 'STRING'}], 'type': 'REL_TYPE'}]\n",
" The relationships are the following:\n",
" ['(:`Actor`)-[:`ACTED_IN`]->(:`Movie`)', '(:`LabelA`)-[:`REL_TYPE`]->(:`LabelB`)', '(:`LabelA`)-[:`REL_TYPE`]->(:`LabelC`)']\n",
" \n"
]
}
],
"source": [
"print(graph.schema)"
]
},
{
"cell_type": "markdown",
"id": "68a3c677",
"metadata": {},
"source": [
"## Querying the graph\n",
"\n",
"We can now use the graph cypher QA chain to ask question of the graph"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "7476ce98",
"metadata": {},
"outputs": [],
"source": [
"chain = GraphCypherQAChain.from_llm(\n",
" ChatOpenAI(temperature=0), graph=graph, verbose=True\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "ef8ee27b",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generated Cypher:\n",
"\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie)\n",
"WHERE m.name = 'Top Gun'\n",
"RETURN a.name\u001b[0m\n",
"Full Context:\n",
"\u001b[32;1m\u001b[1;3m[{'name': 'Tom Cruise'}, {'name': 'Val Kilmer'}, {'name': 'Anthony Edwards'}, {'name': 'Meg Ryan'}]\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"{'query': 'Who played in Top Gun?',\n",
" 'result': 'Tom Cruise, Val Kilmer, Anthony Edwards, Meg Ryan played in Top Gun.'}"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke(\"Who played in Top Gun?\")"
]
},
{
"cell_type": "markdown",
"id": "2d28c4df",
"metadata": {},
"source": [
"## Limit the number of results\n",
"You can limit the number of results from the Cypher QA Chain using the `top_k` parameter.\n",
"The default is 10."
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "df230946",
"metadata": {},
"outputs": [],
"source": [
"chain = GraphCypherQAChain.from_llm(\n",
" ChatOpenAI(temperature=0), graph=graph, verbose=True, top_k=2\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "3f1600ee",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n",
"Generated Cypher:\n",
"\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie {name: 'Top Gun'})\n",
"RETURN a.name\u001b[0m\n",
"Full Context:\n",
"\u001b[32;1m\u001b[1;3m[{'name': 'Tom Cruise'}, {'name': 'Val Kilmer'}]\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"{'query': 'Who played in Top Gun?',\n",
" 'result': 'Tom Cruise, Val Kilmer played in Top Gun.'}"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke(\"Who played in Top Gun?\")"
]
},
{
"cell_type": "markdown",
"id": "88c16206",
"metadata": {},
"source": [
"## Return intermediate results\n",
"You can return intermediate steps from the Cypher QA Chain using the `return_intermediate_steps` parameter"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "e412f36b",
"metadata": {},
"outputs": [],
"source": [
"chain = GraphCypherQAChain.from_llm(\n",
" ChatOpenAI(temperature=0), graph=graph, verbose=True, return_intermediate_steps=True\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "4f4699dc",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n",
"Generated Cypher:\n",
"\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie)\n",
"WHERE m.name = 'Top Gun'\n",
"RETURN a.name\u001b[0m\n",
"Full Context:\n",
"\u001b[32;1m\u001b[1;3m[{'name': 'Tom Cruise'}, {'name': 'Val Kilmer'}, {'name': 'Anthony Edwards'}, {'name': 'Meg Ryan'}]\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n",
"Intermediate steps: [{'query': \"MATCH (a:Actor)-[:ACTED_IN]->(m:Movie)\\nWHERE m.name = 'Top Gun'\\nRETURN a.name\"}, {'context': [{'name': 'Tom Cruise'}, {'name': 'Val Kilmer'}, {'name': 'Anthony Edwards'}, {'name': 'Meg Ryan'}]}]\n",
"Final answer: Tom Cruise, Val Kilmer, Anthony Edwards, Meg Ryan played in Top Gun.\n"
]
}
],
"source": [
"result = chain(\"Who played in Top Gun?\")\n",
"print(f\"Intermediate steps: {result['intermediate_steps']}\")\n",
"print(f\"Final answer: {result['result']}\")"
]
},
{
"cell_type": "markdown",
"id": "d6e1b054",
"metadata": {},
"source": [
"## Return direct results\n",
"You can return direct results from the Cypher QA Chain using the `return_direct` parameter"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "2d3acf10",
"metadata": {},
"outputs": [],
"source": [
"chain = GraphCypherQAChain.from_llm(\n",
" ChatOpenAI(temperature=0), graph=graph, verbose=True, return_direct=True\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "b0a9d143",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n",
"Generated Cypher:\n",
"\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie {name: 'Top Gun'})\n",
"RETURN a.name\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"{'query': 'Who played in Top Gun?',\n",
" 'result': [{'name': 'Tom Cruise'},\n",
" {'name': 'Val Kilmer'},\n",
" {'name': 'Anthony Edwards'},\n",
" {'name': 'Meg Ryan'}]}"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke(\"Who played in Top Gun?\")"
]
},
{
"cell_type": "markdown",
"id": "f01dfb72-24ec-4ae7-883a-ee6646889b59",
"metadata": {},
"source": [
"## Add examples in the Cypher generation prompt\n",
"You can define the Cypher statement you want the LLM to generate for particular questions"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "59baeb88-adfa-4c26-8334-fcbff3a98efb",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts.prompt import PromptTemplate\n",
"\n",
"CYPHER_GENERATION_TEMPLATE = \"\"\"Task:Generate Cypher statement to query a graph database.\n",
"Instructions:\n",
"Use only the provided relationship types and properties in the schema.\n",
"Do not use any other relationship types or properties that are not provided.\n",
"Schema:\n",
"{schema}\n",
"Note: Do not include any explanations or apologies in your responses.\n",
"Do not respond to any questions that might ask anything else than for you to construct a Cypher statement.\n",
"Do not include any text except the generated Cypher statement.\n",
"Examples: Here are a few examples of generated Cypher statements for particular questions:\n",
"# How many people played in Top Gun?\n",
"MATCH (m:Movie {{title:\"Top Gun\"}})<-[:ACTED_IN]-()\n",
"RETURN count(*) AS numberOfActors\n",
"\n",
"The question is:\n",
"{question}\"\"\"\n",
"\n",
"CYPHER_GENERATION_PROMPT = PromptTemplate(\n",
" input_variables=[\"schema\", \"question\"], template=CYPHER_GENERATION_TEMPLATE\n",
")\n",
"\n",
"chain = GraphCypherQAChain.from_llm(\n",
" ChatOpenAI(temperature=0),\n",
" graph=graph,\n",
" verbose=True,\n",
" cypher_prompt=CYPHER_GENERATION_PROMPT,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "47c64027-cf42-493a-9c76-2d10ba753728",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generated Cypher:\n",
"\u001b[32;1m\u001b[1;3mMATCH (:Movie {name:\"Top Gun\"})<-[:ACTED_IN]-(:Actor)\n",
"RETURN count(*) AS numberOfActors\u001b[0m\n",
"Full Context:\n",
"\u001b[32;1m\u001b[1;3m[{'numberofactors': 4}]\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"{'query': 'How many people played in Top Gun?',\n",
" 'result': \"I don't know the answer.\"}"
]
},
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke(\"How many people played in Top Gun?\")"
]
},
{
"cell_type": "markdown",
"id": "3e721cad-aa87-4526-9231-2dfc0e365939",
"metadata": {},
"source": [
"## Use separate LLMs for Cypher and answer generation\n",
"You can use the `cypher_llm` and `qa_llm` parameters to define different llms"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "6f9becc2-f579-45bf-9b50-2ce02bde92da",
"metadata": {},
"outputs": [],
"source": [
"chain = GraphCypherQAChain.from_llm(\n",
" graph=graph,\n",
" cypher_llm=ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo\"),\n",
" qa_llm=ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-16k\"),\n",
" verbose=True,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "ff18e3e3-3402-4683-aec4-a19898f23ca1",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Generated Cypher:\n",
"\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie)\n",
"WHERE m.name = 'Top Gun'\n",
"RETURN a.name\u001b[0m\n",
"Full Context:\n",
"\u001b[32;1m\u001b[1;3m[{'name': 'Tom Cruise'}, {'name': 'Val Kilmer'}, {'name': 'Anthony Edwards'}, {'name': 'Meg Ryan'}]\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"{'query': 'Who played in Top Gun?',\n",
" 'result': 'Tom Cruise, Val Kilmer, Anthony Edwards, and Meg Ryan played in Top Gun.'}"
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke(\"Who played in Top Gun?\")"
]
},
{
"cell_type": "markdown",
"id": "eefea16b-508f-4552-8942-9d5063ed7d37",
"metadata": {},
"source": [
"## Ignore specified node and relationship types\n",
"\n",
"You can use `include_types` or `exclude_types` to ignore parts of the graph schema when generating Cypher statements."
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "a20fa21e-fb85-41c4-aac0-53fb25e34604",
"metadata": {},
"outputs": [],
"source": [
"chain = GraphCypherQAChain.from_llm(\n",
" graph=graph,\n",
" cypher_llm=ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo\"),\n",
" qa_llm=ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-16k\"),\n",
" verbose=True,\n",
" exclude_types=[\"Movie\"],\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "3ad7f6b8-543e-46e4-a3b2-40fa3e66e895",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Node properties are the following:\n",
"Actor {name: STRING},LabelA {property_a: STRING},LabelB {},LabelC {}\n",
"Relationship properties are the following:\n",
"ACTED_IN {},REL_TYPE {rel_prop: STRING}\n",
"The relationships are the following:\n",
"(:LabelA)-[:REL_TYPE]->(:LabelB),(:LabelA)-[:REL_TYPE]->(:LabelC)\n"
]
}
],
"source": [
"# Inspect graph schema\n",
"print(chain.graph_schema)"
]
},
{
"cell_type": "markdown",
"id": "f0202e88-d700-40ed-aef9-0c969c7bf951",
"metadata": {},
"source": [
"## Validate generated Cypher statements\n",
"You can use the `validate_cypher` parameter to validate and correct relationship directions in generated Cypher statements"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "53665d03-7afd-433c-bdd5-750127bfb152",
"metadata": {},
"outputs": [],
"source": [
"chain = GraphCypherQAChain.from_llm(\n",
" llm=ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo\"),\n",
" graph=graph,\n",
" verbose=True,\n",
" validate_cypher=True,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "19e1a591-9c10-4d7b-aa36-a5e1b778a97b",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"\n",
"\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n",
"Generated Cypher:\n",
"\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie)\n",
"WHERE m.name = 'Top Gun'\n",
"RETURN a.name\u001b[0m\n",
"Full Context:\n",
"\u001b[32;1m\u001b[1;3m[{'name': 'Tom Cruise'}, {'name': 'Val Kilmer'}, {'name': 'Anthony Edwards'}, {'name': 'Meg Ryan'}]\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"{'query': 'Who played in Top Gun?',\n",
" 'result': 'Tom Cruise, Val Kilmer, Anthony Edwards, Meg Ryan played in Top Gun.'}"
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke(\"Who played in Top Gun?\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.19"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -22,7 +22,7 @@
"You can run the `falkordb` Docker container locally:\n",
"\n",
"```bash\n",
"docker run -p 6379:6379 -it --rm falkordb/falkordb:edge\n",
"docker run -p 6379:6379 -it --rm falkordb/falkordb\n",
"```\n",
"\n",
"Once launched, you create a database on the local machine and connect to it."

@ -79,8 +79,8 @@
"\n",
"from gqlalchemy import Memgraph\n",
"from langchain.chains import GraphCypherQAChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.graphs import MemgraphGraph\n",
"from langchain_core.prompts import PromptTemplate\n",
"from langchain_openai import ChatOpenAI"
]
},

@ -389,7 +389,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts.prompt import PromptTemplate\n",
"from langchain_core.prompts.prompt import PromptTemplate\n",
"\n",
"CYPHER_GENERATION_TEMPLATE = \"\"\"Task:Generate Cypher statement to query a graph database.\n",
"Instructions:\n",

@ -16,8 +16,8 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain_community.llms.pai_eas_endpoint import PaiEasEndpoint\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"template = \"\"\"Question: {question}\n",
"\n",

@ -136,45 +136,29 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain import hub\n",
"from langchain.agents import (\n",
" AgentExecutor,\n",
" ZeroShotAgent,\n",
" create_react_agent,\n",
")\n",
"from langchain.chains import LLMChain\n",
"from langchain.memory import ConversationBufferMemory\n",
"from langchain_community.llms import NIBittensorLLM\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"memory = ConversationBufferMemory(memory_key=\"chat_history\")\n",
"\n",
"tools = [tool]\n",
"prefix = \"\"\"Answer prompt based on LLM if there is need to search something then use internet and observe internet result and give accurate reply of user questions also try to use authenticated sources\"\"\"\n",
"suffix = \"\"\"Begin!\n",
" {chat_history}\n",
" Question: {input}\n",
" {agent_scratchpad}\"\"\"\n",
"\n",
"prompt = ZeroShotAgent.create_prompt(\n",
" tools=tools,\n",
" prefix=prefix,\n",
" suffix=suffix,\n",
" input_variables=[\"input\", \"chat_history\", \"agent_scratchpad\"],\n",
")\n",
"\n",
"prompt = hub.pull(\"hwchase17/react\")\n",
"\n",
"\n",
"llm = NIBittensorLLM(\n",
" system_prompt=\"Your task is to determine a response based on user prompt\"\n",
")\n",
"\n",
"llm_chain = LLMChain(llm=llm, prompt=prompt)\n",
"\n",
"memory = ConversationBufferMemory(memory_key=\"chat_history\")\n",
"\n",
"agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)\n",
"agent_chain = AgentExecutor.from_agent_and_tools(\n",
" agent=agent, tools=tools, verbose=True, memory=memory\n",
")\n",
"agent = create_react_agent(llm, tools, prompt)\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, memory=memory)\n",
"\n",
"response = agent_chain.run(input=prompt)"
"response = agent_executor.invoke({\"input\": prompt})"
]
}
],

@ -347,7 +347,7 @@
"from langchain_core.messages import HumanMessage\n",
"from langchain_google_vertexai import ChatVertexAI\n",
"\n",
"llm = ChatVertexAI(model=\"gemini-ultra-vision\")\n",
"llm = ChatVertexAI(model=\"gemini-pro-vision\")\n",
"\n",
"image_message = {\n",
" \"type\": \"image_url\",\n",

@ -93,7 +93,7 @@
"outputs": [],
"source": [
"from langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate"
"from langchain_core.prompts import PromptTemplate"
]
},
{

@ -124,7 +124,7 @@
"In this example, well use the `project_id` and Dallas url.\n",
"\n",
"\n",
"You need to specify `model_id` that will be used for inferencing. All avaliable models you can find in [documentation](https://ibm.github.io/watsonx-ai-python-sdk/fm_model.html#ibm_watsonx_ai.foundation_models.utils.enums.ModelTypes)."
"You need to specify `model_id` that will be used for inferencing. All available models you can find in [documentation](https://ibm.github.io/watsonx-ai-python-sdk/fm_model.html#ibm_watsonx_ai.foundation_models.utils.enums.ModelTypes)."
]
},
{
@ -210,7 +210,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"template = \"Generate a random question about {topic}: Question: \"\n",
"prompt = PromptTemplate.from_template(template)"

@ -1633,7 +1633,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.docstore.document import Document\n",
"from langchain_community.docstore.document import Document\n",
"\n",
"docs = [Document(page_content=t) for t in texts[:3]]\n",
"from langchain.chains.summarize import load_summarize_chain"

@ -0,0 +1,142 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "959300d4",
"metadata": {},
"source": [
"# MLX Local Pipelines\n",
"\n",
"MLX models can be run locally through the `MLXPipeline` class.\n",
"\n",
"The [MLX Community](https://huggingface.co/mlx-community) hosts over 150 models, all open source and publicly available on Hugging Face Model Hub a online platform where people can easily collaborate and build ML together.\n",
"\n",
"These can be called from LangChain either through this local pipeline wrapper or by calling their hosted inference endpoints through the MlXPipeline class. For more information on mlx, see the [examples repo](https://github.com/ml-explore/mlx-examples/tree/main/llms) notebook."
]
},
{
"cell_type": "markdown",
"id": "4c1b8450-5eaf-4d34-8341-2d785448a1ff",
"metadata": {
"tags": []
},
"source": [
"To use, you should have the ``mlx-lm`` python [package installed](https://pypi.org/project/mlx-lm/), as well as [transformers](https://pypi.org/project/transformers/). You can also install `huggingface_hub`."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d772b637-de00-4663-bd77-9bc96d798db2",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"%pip install --upgrade --quiet mlx-lm transformers huggingface_hub"
]
},
{
"cell_type": "markdown",
"id": "91ad075f-71d5-4bc8-ab91-cc0ad5ef16bb",
"metadata": {},
"source": [
"### Model Loading\n",
"\n",
"Models can be loaded by specifying the model parameters using the `from_model_id` method."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "165ae236-962a-4763-8052-c4836d78a5d2",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain_community.llms.mlx_pipeline import MLXPipeline\n",
"\n",
"pipe = MLXPipeline.from_model_id(\n",
" \"mlx-community/quantized-gemma-2b-it\",\n",
" pipeline_kwargs={\"max_tokens\": 10, \"temp\": 0.1},\n",
")"
]
},
{
"cell_type": "markdown",
"id": "00104b27-0c15-4a97-b198-4512337ee211",
"metadata": {},
"source": [
"They can also be loaded by passing in an existing `transformers` pipeline directly"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7f426a4f",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline\n",
"from mlx_lm import load\n",
"\n",
"model, tokenizer = load(\"mlx-community/quantized-gemma-2b-it\")\n",
"pipe = MLXPipeline(model=model, tokenizer=tokenizer)"
]
},
{
"cell_type": "markdown",
"id": "60e7ba8d",
"metadata": {},
"source": [
"### Create Chain\n",
"\n",
"With the model loaded into memory, you can compose it with a prompt to\n",
"form a chain."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3acf0069",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"template = \"\"\"Question: {question}\n",
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"prompt = PromptTemplate.from_template(template)\n",
"\n",
"chain = prompt | pipe\n",
"\n",
"question = \"What is electroencephalography?\"\n",
"\n",
"print(chain.invoke({\"question\": question}))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.18"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -18,7 +18,7 @@
" \n",
"2. Paste your API key in in the code cell below.\n",
"\n",
"Note: If you want to use a different LLM model, you can containerize the model and make a custom OctoAI endpoint yourself, by following [Build a Container from Python](https://octo.ai/docs/bring-your-own-model/advanced-build-a-container-from-scratch-in-python) and [Create a Custom Endpoint from a Container](https://octo.ai/docs/bring-your-own-model/create-custom-endpoints-from-a-container/create-custom-endpoints-from-a-container) and then update your Endpoint URL in the code cell below.\n"
"Note: If you want to use a different LLM model, you can containerize the model and make a custom OctoAI endpoint yourself, by following [Build a Container from Python](https://octo.ai/docs/bring-your-own-model/advanced-build-a-container-from-scratch-in-python) and [Create a Custom Endpoint from a Container](https://octo.ai/docs/bring-your-own-model/create-custom-endpoints-from-a-container/create-custom-endpoints-from-a-container) and then updating your `OCTOAI_API_BASE` environment variable.\n"
]
},
{
@ -29,8 +29,7 @@
"source": [
"import os\n",
"\n",
"os.environ[\"OCTOAI_API_TOKEN\"] = \"OCTOAI_API_TOKEN\"\n",
"os.environ[\"ENDPOINT_URL\"] = \"https://text.octoai.run/v1/chat/completions\""
"os.environ[\"OCTOAI_API_TOKEN\"] = \"OCTOAI_API_TOKEN\""
]
},
{
@ -68,44 +67,33 @@
"outputs": [],
"source": [
"llm = OctoAIEndpoint(\n",
" model_kwargs={\n",
" \"model\": \"llama-2-13b-chat-fp16\",\n",
" \"max_tokens\": 128,\n",
" \"presence_penalty\": 0,\n",
" \"temperature\": 0.1,\n",
" \"top_p\": 0.9,\n",
" \"messages\": [\n",
" {\n",
" \"role\": \"system\",\n",
" \"content\": \"You are a helpful assistant. Keep your responses limited to one short paragraph if possible.\",\n",
" },\n",
" ],\n",
" },\n",
" model=\"llama-2-13b-chat-fp16\",\n",
" max_tokens=200,\n",
" presence_penalty=0,\n",
" temperature=0.1,\n",
" top_p=0.9,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" Sure thing! Here's my response:\n",
"\n",
"Leonardo da Vinci was a true Renaissance man - an Italian polymath who excelled in various fields, including painting, sculpture, engineering, mathematics, anatomy, and geology. He is widely considered one of the greatest painters of all time, and his inventive and innovative works continue to inspire and influence artists and thinkers to this day. Some of his most famous works include the Mona Lisa, The Last Supper, and Vitruvian Man. \n"
]
}
],
"outputs": [],
"source": [
"question = \"Who was leonardo davinci?\"\n",
"question = \"Who was Leonardo da Vinci?\"\n",
"\n",
"llm_chain = LLMChain(prompt=prompt, llm=llm)\n",
"\n",
"print(llm_chain.run(question))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Leonardo da Vinci was a true Renaissance man. He was born in 1452 in Vinci, Italy and was known for his work in various fields, including art, science, engineering, and mathematics. He is considered one of the greatest painters of all time, and his most famous works include the Mona Lisa and The Last Supper. In addition to his art, da Vinci made significant contributions to engineering and anatomy, and his designs for machines and inventions were centuries ahead of his time. He is also known for his extensive journals and drawings, which provide valuable insights into his thoughts and ideas. Da Vinci's legacy continues to inspire and influence artists, scientists, and thinkers around the world today."
]
}
],
"metadata": {

@ -21,7 +21,7 @@
"* [Download](https://ollama.ai/download) and install Ollama onto the available supported platforms (including Windows Subsystem for Linux)\n",
"* Fetch available LLM model via `ollama pull <name-of-model>`\n",
" * View a list of available models via the [model library](https://ollama.ai/library)\n",
" * e.g., for `Llama-7b`: `ollama pull llama2`\n",
" * e.g., `ollama pull llama3`\n",
"* This will download the default tagged version of the model. Typically, the default points to the latest, smallest sized-parameter model.\n",
"\n",
"> On Mac, the models will be download to `~/.ollama/models`\n",
@ -37,7 +37,7 @@
"\n",
"You can see a full list of supported parameters on the [API reference page](https://api.python.langchain.com/en/latest/llms/langchain.llms.ollama.Ollama.html).\n",
"\n",
"If you are using a LLaMA `chat` model (e.g., `ollama pull llama2:7b-chat`) then you can use the `ChatOllama` interface.\n",
"If you are using a LLaMA `chat` model (e.g., `ollama pull llama3`) then you can use the `ChatOllama` interface.\n",
"\n",
"This includes [special tokens](https://huggingface.co/blog/llama2#how-to-prompt-llama-2) for system message and user input.\n",
"\n",
@ -56,7 +56,7 @@
"\n",
"```bash\n",
"curl http://localhost:11434/api/generate -d '{\n",
" \"model\": \"llama2\",\n",
" \"model\": \"llama3\",\n",
" \"prompt\":\"Why is the sky blue?\"\n",
"}'\n",
"```\n",
@ -70,16 +70,16 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"Sure! Here's a quick one:\\n\\nWhy don't scientists trust atoms?\\nBecause they make up everything!\\n\\nI hope that brought a smile to your face!\""
"\"Here's one:\\n\\nWhy don't scientists trust atoms?\\n\\nBecause they make up everything!\\n\\nHope that made you smile! Do you want to hear another one?\""
]
},
"execution_count": 2,
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
@ -87,7 +87,7 @@
"source": [
"from langchain_community.llms import Ollama\n",
"\n",
"llm = Ollama(model=\"llama2\")\n",
"llm = Ollama(model=\"llama3\")\n",
"\n",
"llm.invoke(\"Tell me a joke\")"
]
@ -298,7 +298,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
"version": "3.11.8"
}
},
"nbformat": 4,

@ -90,7 +90,7 @@
"device = \"CPU\"\n",
"tokenizer = AutoTokenizer.from_pretrained(model_id)\n",
"ov_model = OVModelForCausalLM.from_pretrained(\n",
" model_id, device=device, ov_config=ov_config\n",
" model_id, export=True, device=device, ov_config=ov_config\n",
")\n",
"ov_pipe = pipeline(\n",
" \"text-generation\", model=ov_model, tokenizer=tokenizer, max_new_tokens=10\n",
@ -116,7 +116,7 @@
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"from langchain_core.prompts import PromptTemplate\n",
"\n",
"template = \"\"\"Question: {question}\n",
"\n",
@ -185,11 +185,11 @@
" pipeline_kwargs={\"max_new_tokens\": 10},\n",
")\n",
"\n",
"ov_chain = prompt | ov_llm\n",
"chain = prompt | ov_llm\n",
"\n",
"question = \"What is electroencephalography?\"\n",
"\n",
"print(ov_chain.invoke({\"question\": question}))"
"print(chain.invoke({\"question\": question}))"
]
},
{
@ -229,7 +229,7 @@
"\n",
"* [OpenVINO Get Started Guide](https://www.intel.com/content/www/us/en/content-details/819067/openvino-get-started-guide.html).\n",
" \n",
"* [RAG Notebook with LangChain](https://github.com/openvinotoolkit/openvino_notebooks/tree/latest/notebooks/llm-chatbot)."
"* [RAG Notebook with LangChain](https://github.com/openvinotoolkit/openvino_notebooks/tree/latest/notebooks/llm-rag-langchain)."
]
}
],

@ -50,7 +50,41 @@
"from langchain_community.llms import Predibase\n",
"\n",
"model = Predibase(\n",
" model=\"vicuna-13b\", predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\")\n",
" model=\"mistral-7b\",\n",
" predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.llms import Predibase\n",
"\n",
"# With a fine-tuned adapter hosted at Predibase (adapter_version can be specified; omitting it is equivalent to the most recent version).\n",
"model = Predibase(\n",
" model=\"mistral-7b\",\n",
" adapter_id=\"e2e_nlg\",\n",
" adapter_version=1,\n",
" predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.llms import Predibase\n",
"\n",
"# With a fine-tuned adapter hosted at HuggingFace (adapter_version does not apply and will be ignored).\n",
"model = Predibase(\n",
" model=\"mistral-7b\",\n",
" adapter_id=\"predibase/e2e_nlg\",\n",
" predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n",
")"
]
},
@ -66,19 +100,62 @@
},
{
"cell_type": "markdown",
"metadata": {},
"metadata": {
"vscode": {
"languageId": "plaintext"
}
},
"source": [
"## Chain Call Setup"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"vscode": {
"languageId": "plaintext"
}
},
"outputs": [],
"source": [
"from langchain_community.llms import Predibase\n",
"\n",
"model = Predibase(\n",
" model=\"mistral-7b\", predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\")\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"vscode": {
"languageId": "plaintext"
}
},
"outputs": [],
"source": [
"# With a fine-tuned adapter hosted at Predibase (adapter_version can be specified; omitting it is equivalent to the most recent version).\n",
"model = Predibase(\n",
" model=\"mistral-7b\",\n",
" adapter_id=\"e2e_nlg\",\n",
" adapter_version=1,\n",
" predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# With a fine-tuned adapter hosted at HuggingFace (adapter_version does not apply and will be ignored).\n",
"llm = Predibase(\n",
" model=\"vicuna-13b\", predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\")\n",
" model=\"mistral-7b\",\n",
" adapter_id=\"predibase/e2e_nlg\",\n",
" predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\"),\n",
")"
]
},
@ -169,7 +246,12 @@
"from langchain_community.llms import Predibase\n",
"\n",
"model = Predibase(\n",
" model=\"my-finetuned-LLM\", predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\")\n",
" model=\"my-base-LLM\",\n",
" adapter_id=\"my-finetuned-adapter-id\", # Supports both, Predibase-hosted and HuggingFace-hosted model repositories.\n",
" # adapter_version=1, # optional (returns the latest, if omitted)\n",
" predibase_api_key=os.environ.get(\n",
" \"PREDIBASE_API_TOKEN\"\n",
" ), # Adapter argument is optional.\n",
")\n",
"# replace my-finetuned-LLM with the name of your model in Predibase"
]

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save