updated langchain/docs/modules/models/llms/integrations/ notebooks (#3041)

- Updated `langchain/docs/modules/models/llms/integrations/` notebooks:
added links to the original sites, the install information, etc.
- Added the `nlpcloud` notebook.
- Removed "Example" from Titles of some notebooks, so all notebook
titles are consistent.
fix_agent_callbacks
leo-gan 1 year ago committed by GitHub
parent 471ef84835
commit 5420a0e404
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -6,14 +6,55 @@
"metadata": {},
"source": [
"# AI21\n",
"This example goes over how to use LangChain to interact with AI21 models"
"\n",
"[AI21 Studio](https://docs.ai21.com/) provides API access to `Jurassic-2` large language models.\n",
"\n",
"This example goes over how to use LangChain to interact with [AI21 models](https://docs.ai21.com/docs/jurassic-2-models)."
]
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"id": "02be122d-04e8-4ec6-84d1-f1d8961d6828",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# install the package:\n",
"!pip install ai21"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "4229227e-6ca2-41ad-a3c3-5f29e3559091",
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdin",
"output_type": "stream",
"text": [
" ········\n"
]
}
],
"source": [
"# get AI21_API_KEY. Use https://studio.ai21.com/account/account\n",
"\n",
"from getpass import getpass\n",
"AI21_API_KEY = getpass()"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "6fb585dd",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain.llms import AI21\n",
@ -22,9 +63,11 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 9,
"id": "035dea0f",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"template = \"\"\"Question: {question}\n",
@ -36,19 +79,23 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 10,
"id": "3f3458d9",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"llm = AI21()"
"llm = AI21(ai21_api_key=AI21_API_KEY)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 11,
"id": "a641dbd9",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
@ -56,10 +103,23 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 12,
"id": "9f0b1960",
"metadata": {},
"outputs": [],
"metadata": {
"tags": []
},
"outputs": [
{
"data": {
"text/plain": [
"'\\n1. What year was Justin Bieber born?\\nJustin Bieber was born in 1994.\\n2. What team won the Super Bowl in 1994?\\nThe Dallas Cowboys won the Super Bowl in 1994.'"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
"\n",
@ -91,7 +151,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
"version": "3.10.6"
}
},
"nbformat": 4,

@ -1,20 +1,61 @@
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"id": "9597802c",
"metadata": {},
"source": [
"# Aleph Alpha\n",
"\n",
"[The Luminous series](https://docs.aleph-alpha.com/docs/introduction/luminous/) is a family of large language models.\n",
"\n",
"This example goes over how to use LangChain to interact with Aleph Alpha models"
]
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"id": "fe1bf9fb-e9fa-49f3-a768-8f603225ccce",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# Install the package\n",
"!pip install aleph-alpha-client"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "0cb0f937-b610-42a2-b765-336eed037031",
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdin",
"output_type": "stream",
"text": [
" ········\n"
]
}
],
"source": [
"# create a new token: https://docs.aleph-alpha.com/docs/account/#create-a-new-token\n",
"\n",
"from getpass import getpass\n",
"\n",
"ALEPH_ALPHA_API_KEY = getpass()"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "6fb585dd",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain.llms import AlephAlpha\n",
@ -23,9 +64,11 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 7,
"id": "f81a230d",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"template = \"\"\"Q: {question}\n",
@ -37,19 +80,23 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 8,
"id": "f0d26e48",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"llm = AlephAlpha(model=\"luminous-extended\", maximum_tokens=20, stop_sequences=[\"Q:\"])"
"llm = AlephAlpha(model=\"luminous-extended\", maximum_tokens=20, stop_sequences=[\"Q:\"], aleph_alpha_api_key=ALEPH_ALPHA_API_KEY)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 9,
"id": "6811d621",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
@ -57,9 +104,11 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 10,
"id": "3058e63f",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [
{
"data": {
@ -67,7 +116,7 @@
"' Artificial Intelligence (AI) is the simulation of human intelligence processes by machines, especially computer systems.\\n'"
]
},
"execution_count": 5,
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
@ -81,7 +130,7 @@
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
@ -95,7 +144,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
"version": "3.10.6"
},
"vscode": {
"interpreter": {

@ -6,14 +6,46 @@
"metadata": {},
"source": [
"# Anthropic\n",
"This example goes over how to use LangChain to interact with Anthropic models"
"\n",
"[Anthropic](https://console.anthropic.com/docs) is creator of the `Claude` LLM.\n",
"\n",
"This example goes over how to use LangChain to interact with Anthropic models."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e55c0f2e-63e1-4e83-ac44-ffcc1dfeacc8",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# Install the package\n",
"!pip install anthropic"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cec62d45-afa2-422a-95ef-57f8ab41a6f9",
"metadata": {},
"outputs": [],
"source": [
"# get a new token: https://www.anthropic.com/earlyaccess\n",
"\n",
"from getpass import getpass\n",
"\n",
"ANTHROPIC_API_KEY = getpass()"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "6fb585dd",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain.llms import Anthropic\n",
@ -24,7 +56,9 @@
"cell_type": "code",
"execution_count": 2,
"id": "035dea0f",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"template = \"\"\"Question: {question}\n",
@ -36,12 +70,14 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"id": "3f3458d9",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"llm = Anthropic()"
"llm = Anthropic(, anthropic_api_key=ANTHROPIC_API_KEY)"
]
},
{
@ -102,7 +138,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
"version": "3.10.6"
}
},
"nbformat": 4,

@ -5,7 +5,7 @@
"id": "9e9b7651",
"metadata": {},
"source": [
"# Azure OpenAI LLM Example\n",
"# Azure OpenAI\n",
"\n",
"This notebook goes over how to use Langchain with [Azure OpenAI](https://aka.ms/azure-openai).\n",
"\n",
@ -49,6 +49,18 @@
"```\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "89fdb593-5a42-4098-87b7-1496fa511b1c",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"!pip install openai"
]
},
{
"cell_type": "code",
"execution_count": 1,
@ -146,7 +158,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
"version": "3.10.6"
},
"vscode": {
"interpreter": {

@ -5,19 +5,50 @@
"metadata": {},
"source": [
"# Banana\n",
"\n",
"\n",
"[Banana](https://www.banana.dev/about-us) is focused on building the machine learning infrastructure.\n",
"\n",
"This example goes over how to use LangChain to interact with Banana models"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# Install the package https://docs.banana.dev/banana-docs/core-concepts/sdks/python\n",
"!pip install banana-dev"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# get new tokens: https://app.banana.dev/\n",
"# We need two tokens, not just an `api_key`: `BANANA_API_KEY` and `YOUR_MODEL_KEY`\n",
"\n",
"import os\n",
"from getpass import getpass\n",
"\n",
"os.environ[\"BANANA_API_KEY\"] = \"YOUR_API_KEY\"\n",
"# OR\n",
"# BANANA_API_KEY = getpass()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain.llms import Banana\n",
"from langchain import PromptTemplate, LLMChain\n",
"os.environ[\"BANANA_API_KEY\"] = \"YOUR_API_KEY\""
"from langchain import PromptTemplate, LLMChain"
]
},
{
@ -65,15 +96,22 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.9.12 ('palm')",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"version": "3.9.12"
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
@ -81,5 +119,5 @@
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 4
}

@ -4,7 +4,10 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# CerebriumAI LLM Example\n",
"# CerebriumAI\n",
"\n",
"`Cerebrium` is an AWS Sagemaker alternative. It also provides API access to [several LLM models](https://docs.cerebrium.ai/cerebrium/prebuilt-models/deploymen).\n",
"\n",
"This notebook goes over how to use Langchain with [CerebriumAI](https://docs.cerebrium.ai/introduction)."
]
},
@ -13,7 +16,7 @@
"metadata": {},
"source": [
"## Install cerebrium\n",
"The `cerebrium` package is required to use the CerebriumAI API. Install `cerebrium` using `pip3 install cerebrium`."
"The `cerebrium` package is required to use the `CerebriumAI` API. Install `cerebrium` using `pip3 install cerebrium`."
]
},
{
@ -22,7 +25,8 @@
"metadata": {},
"outputs": [],
"source": [
"$ pip3 install cerebrium"
"# Install the package\n",
"!pip3 install cerebrium"
]
},
{
@ -48,7 +52,7 @@
"metadata": {},
"source": [
"## Set the Environment API Key\n",
"Make sure to get your API key from CerebriumAI. You are given a 1 hour free of serverless GPU compute to test different models."
"Make sure to get your API key from CerebriumAI. See [here](https://dashboard.cerebrium.ai/login). You are given a 1 hour free of serverless GPU compute to test different models."
]
},
{
@ -136,15 +140,22 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.9.12 ('palm')",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"version": "3.9.12"
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
@ -152,5 +163,5 @@
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 4
}

@ -5,15 +5,57 @@
"id": "9597802c",
"metadata": {},
"source": [
"# Cohere\n",
"# Install the package\n",
"\n",
"[Cohere](https://cohere.ai/about) is a Canadian startup that provides natural language processing models that help companies improve human-machine interactions.\n",
"\n",
"This example goes over how to use LangChain to interact with Cohere models"
]
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"id": "91ea14ce-831d-409a-a88f-30353acdabd1",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# Install the package\n",
"!pip install cohere"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "3f5dc9d7-65e3-4b5b-9086-3327d016cfe0",
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdin",
"output_type": "stream",
"text": [
" ········\n"
]
}
],
"source": [
"# get a new token: https://dashboard.cohere.ai/\n",
"\n",
"from getpass import getpass\n",
"\n",
"COHERE_API_KEY = getpass()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "6fb585dd",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain.llms import Cohere\n",
@ -22,9 +64,11 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 4,
"id": "035dea0f",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"template = \"\"\"Question: {question}\n",
@ -36,19 +80,23 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 6,
"id": "3f3458d9",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"llm = Cohere()"
"llm = Cohere(cohere_api_key=COHERE_API_KEY)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 7,
"id": "a641dbd9",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
@ -102,7 +150,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
"version": "3.10.6"
}
},
"nbformat": 4,

@ -1,11 +1,13 @@
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# DeepInfra LLM Example\n",
"# DeepInfra\n",
"\n",
"`DeepInfra` provides [several LLMs](https://deepinfra.com/models).\n",
"\n",
"This notebook goes over how to use Langchain with [DeepInfra](https://deepinfra.com)."
]
},
@ -18,8 +20,10 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"execution_count": 1,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"import os\n",
@ -32,17 +36,44 @@
"metadata": {},
"source": [
"## Set the Environment API Key\n",
"Make sure to get your API key from DeepInfra. You are given a 1 hour free of serverless GPU compute to test different models.\n",
"Make sure to get your API key from DeepInfra. You have to [Login](https://deepinfra.com/login?from=%2Fdash) and get a new token.\n",
"\n",
"You are given a 1 hour free of serverless GPU compute to test different models. (see [here](https://github.com/deepinfra/deepctl#deepctl))\n",
"You can print your token with `deepctl auth token`"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"execution_count": 2,
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdin",
"output_type": "stream",
"text": [
" ········\n"
]
}
],
"source": [
"# get a new token: https://deepinfra.com/login?from=%2Fdash\n",
"\n",
"from getpass import getpass\n",
"\n",
"DEEPINFRA_API_TOKEN = getpass()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"os.environ[\"DEEPINFRA_API_TOKEN\"] = \"YOUR_KEY_HERE\""
"os.environ[\"DEEPINFRA_API_TOKEN\"] = DEEPINFRA_API_TOKEN"
]
},
{
@ -50,7 +81,7 @@
"metadata": {},
"source": [
"## Create the DeepInfra instance\n",
"Make sure to deploy your model first via `deepctl deploy create -m google/flat-t5-xl` (for example)"
"Make sure to deploy your model first via `deepctl deploy create -m google/flat-t5-xl` (see [here](https://github.com/deepinfra/deepctl#deepctl))"
]
},
{
@ -121,15 +152,22 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.9.12 ('palm')",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"version": "3.9.12"
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
@ -137,5 +175,5 @@
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 4
}

@ -4,8 +4,12 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# ForefrontAI LLM Example\n",
"This notebook goes over how to use Langchain with [ForefrontAI](https://www.forefront.ai/)."
"# ForefrontAI\n",
"\n",
"\n",
"The `Forefront` platform gives you the ability to fine-tune and use [open source large language models](https://docs.forefront.ai/forefront/master/models).\n",
"\n",
"This notebook goes over how to use Langchain with [ForefrontAI](https://www.forefront.ai/).\n"
]
},
{
@ -40,7 +44,20 @@
"metadata": {},
"outputs": [],
"source": [
"os.environ[\"FOREFRONTAI_API_KEY\"] = \"YOUR_KEY_HERE\""
"# get a new token: https://docs.forefront.ai/forefront/api-reference/authentication\n",
"\n",
"from getpass import getpass\n",
"\n",
"FOREFRONTAI_API_KEY = getpass()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"os.environ[\"FOREFRONTAI_API_KEY\"] = FOREFRONTAI_API_KEY"
]
},
{
@ -119,15 +136,22 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.9.12 ('palm')",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"version": "3.9.12"
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
@ -135,5 +159,5 @@
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 4
}

@ -4,8 +4,11 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# GooseAI LLM Example\n",
"This notebook goes over how to use Langchain with [GooseAI](https://goose.ai/)."
"# GooseAI\n",
"\n",
"`GooseAI` is a fully managed NLP-as-a-Service, delivered via API. GooseAI provides access to [these models](https://goose.ai/docs/models).\n",
"\n",
"This notebook goes over how to use Langchain with [GooseAI](https://goose.ai/).\n"
]
},
{
@ -57,7 +60,18 @@
"metadata": {},
"outputs": [],
"source": [
"os.environ[\"GOOSEAI_API_KEY\"] = \"YOUR_KEY_HERE\""
"from getpass import getpass\n",
"\n",
"GOOSEAI_API_KEY = getpass()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"os.environ[\"GOOSEAI_API_KEY\"] = GOOSEAI_API_KEY"
]
},
{
@ -136,15 +150,22 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.9.12 ('palm')",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"version": "3.9.12"
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
@ -152,5 +173,5 @@
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 4
}

@ -6,22 +6,36 @@
"source": [
"# GPT4All\n",
"\n",
"This example goes over how to use LangChain to interact with GPT4All models"
"[GitHub:nomic-ai/gpt4all](https://github.com/nomic-ai/gpt4all) an ecosystem of open-source chatbots trained on a massive collections of clean assistant data including code, stories and dialogue.\n",
"\n",
"This example goes over how to use LangChain to interact with `GPT4All` models."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"execution_count": 1,
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"%pip install pyllamacpp > /dev/null"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"execution_count": 1,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain import PromptTemplate, LLMChain\n",
@ -32,8 +46,10 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"execution_count": 2,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"template = \"\"\"Question: {question}\n",
@ -51,6 +67,10 @@
"\n",
"To run locally, download a compatible ggml-formatted model. For more info, visit https://github.com/nomic-ai/pyllamacpp\n",
"\n",
"For full installation instructions go [here](https://gpt4all.io/index.html).\n",
"\n",
"The GPT4All Chat installer needs to decompress a 3GB LLM model during the installation process!\n",
"\n",
"Note that new models are uploaded regularly - check the link above for the most recent `.bin` URL"
]
},
@ -146,9 +166,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.2"
"version": "3.10.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 4
}

@ -7,9 +7,57 @@
"source": [
"# Hugging Face Hub\n",
"\n",
"The [Hugging Face Hub](https://huggingface.co/docs/hub/index) is a platform with over 120k models, 20k datasets, and 50k demo apps (Spaces), all open source and publicly available, in an online platform where people can easily collaborate and build ML together.\n",
"\n",
"This example showcases how to connect to the Hugging Face Hub."
]
},
{
"cell_type": "markdown",
"id": "4c1b8450-5eaf-4d34-8341-2d785448a1ff",
"metadata": {
"tags": []
},
"source": [
"To use, you should have the ``huggingface_hub`` python [package installed](https://huggingface.co/docs/huggingface_hub/installation)."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d772b637-de00-4663-bd77-9bc96d798db2",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"!pip install huggingface_hub"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d597a792-354c-4ca5-b483-5965eec5d63d",
"metadata": {},
"outputs": [],
"source": [
"# get a token: https://huggingface.co/docs/api-inference/quicktour#get-your-api-token\n",
"\n",
"from getpass import getpass\n",
"\n",
"HUGGINGFACEHUB_API_TOKEN = getpass()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b8c5b88c-e4b8-4d0d-9a35-6e8f106452c2",
"metadata": {},
"outputs": [],
"source": [
"os.environ[\"HUGGINGFACEHUB_API_TOKEN\"] = HUGGINGFACEHUB_API_TOKEN"
]
},
{
"cell_type": "code",
"execution_count": 41,
@ -63,7 +111,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.12"
"version": "3.10.6"
}
},
"nbformat": 4,

@ -6,22 +6,38 @@
"source": [
"# Llama-cpp\n",
"\n",
"This notebook goes over how to run llama-cpp within LangChain"
"[llama-cpp](https://github.com/abetlen/llama-cpp-python) is a Python binding for [llama.cpp](https://github.com/ggerganov/llama.cpp). \n",
"It supports [several LLMs](https://github.com/ggerganov/llama.cpp).\n",
"\n",
"This notebook goes over how to run `llama-cpp` within LangChain."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"!pip install llama-cpp-python"
]
},
{
"cell_type": "code",
"execution_count": 2,
"cell_type": "markdown",
"metadata": {},
"source": [
"Make sure you are following all instructions to [install all necessary model files](https://github.com/ggerganov/llama.cpp).\n",
"\n",
"You don't need an `API_TOKEN`!"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain.llms import LlamaCpp\n",
@ -30,8 +46,10 @@
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"execution_count": 4,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"template = \"\"\"Question: {question}\n",
@ -44,7 +62,9 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"llm = LlamaCpp(model_path=\"./ggml-model-q4_0.bin\")"
@ -98,9 +118,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
"version": "3.10.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 4
}

@ -15,14 +15,30 @@
"id": "59fcaebc",
"metadata": {},
"source": [
"For more detailed information on `manifest`, and how to use it with local hugginface models like in this example, see https://github.com/HazyResearch/manifest"
"For more detailed information on `manifest`, and how to use it with local hugginface models like in this example, see https://github.com/HazyResearch/manifest\n",
"\n",
"Another example of [using Manifest with Langchain](https://github.com/HazyResearch/manifest/blob/main/examples/langchain_chatgpt.ipynb)."
]
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"id": "1205d1e4-e6da-4d67-a0c7-b7e8fd1e98d5",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"!pip install manifest-ml"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "04a0170a",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from manifest import Manifest\n",
@ -31,18 +47,12 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": null,
"id": "de250a6a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'model_name': 'bigscience/T0_3B', 'model_path': 'bigscience/T0_3B'}\n"
]
}
],
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"manifest = Manifest(\n",
" client_name = \"huggingface\",\n",
@ -202,7 +212,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
"version": "3.10.6"
},
"vscode": {
"interpreter": {

@ -5,7 +5,60 @@
"metadata": {},
"source": [
"# Modal\n",
"This example goes over how to use LangChain to interact with Modal models"
"\n",
"The [Modal Python Library](https://modal.com/docs/guide) provides convenient, on-demand access to serverless cloud compute from Python scripts on your local computer. \n",
"The `Modal` itself does not provide any LLMs but only the infrastructure.\n",
"\n",
"This example goes over how to use LangChain to interact with `Modal`.\n",
"\n",
"[Here](https://modal.com/docs/guide/ex/potus_speech_qanda) is another example how to use LangChain to interact with `Modal`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"!pip install modal-client"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[?25lLaunching login page in your browser window\u001b[33m...\u001b[0m\n",
"\u001b[2KIf this is not showing up, please copy this URL into your web browser manually:\n",
"\u001b[2Km⠙\u001b[0m Waiting for authentication in the web browser...\n",
"\u001b]8;id=417802;https://modal.com/token-flow/tf-ptEuGecm7T1T5YQe42kwM1\u001b\\\u001b[4;94mhttps://modal.com/token-flow/tf-ptEuGecm7T1T5YQe42kwM1\u001b[0m\u001b]8;;\u001b\\\n",
"\n",
"\u001b[2K\u001b[32m⠙\u001b[0m Waiting for authentication in the web browser...\n",
"\u001b[1A\u001b[2K^C\n",
"\n",
"\u001b[31mAborted.\u001b[0m\n"
]
}
],
"source": [
"# register and get a new token\n",
"\n",
"!modal token new"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Follow [these instructions](https://modal.com/docs/guide/secrets) to deal with secrets."
]
},
{
@ -63,15 +116,22 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.9.12 ('palm')",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"version": "3.9.12"
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
@ -79,5 +139,5 @@
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 4
}

@ -0,0 +1,171 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "9597802c",
"metadata": {},
"source": [
"# NLP Cloud\n",
"\n",
"The [NLP Cloud](https://nlpcloud.io) serves high performance pre-trained or custom models for NER, sentiment-analysis, classification, summarization, paraphrasing, grammar and spelling correction, keywords and keyphrases extraction, chatbot, product description and ad generation, intent classification, text generation, image generation, blog post generation, code generation, question answering, automatic speech recognition, machine translation, language detection, semantic search, semantic similarity, tokenization, POS tagging, embeddings, and dependency parsing. It is ready for production, served through a REST API.\n",
"\n",
"\n",
"This example goes over how to use LangChain to interact with `NLP Cloud` [models](https://docs.nlpcloud.com/#models)."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8e94b1ca-6e84-44c4-91ca-df7364c007f0",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"!pip install nlpcloud"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "ea7adb58-cabe-4a2c-b0a2-988fc3aac012",
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdin",
"output_type": "stream",
"text": [
" ········\n"
]
}
],
"source": [
"# get a token: https://docs.nlpcloud.com/#authentication\n",
"\n",
"from getpass import getpass\n",
"\n",
"NLPCLOUD_API_KEY = getpass()"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "9cc2d68f-52a8-4a11-ba34-bb6c068e0b6a",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"NLPCLOUD_API_KEY\"] = NLPCLOUD_API_KEY"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "6fb585dd",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain.llms import NLPCloud\n",
"from langchain import PromptTemplate, LLMChain"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "035dea0f",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"template = \"\"\"Question: {question}\n",
"\n",
"Answer: Let's think step by step.\"\"\"\n",
"\n",
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "3f3458d9",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"llm = NLPCloud()"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "a641dbd9",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "9f844993",
"metadata": {
"tags": []
},
"outputs": [
{
"data": {
"text/plain": [
"' Justin Bieber was born in 1994, so the team that won the Super Bowl that year was the San Francisco 49ers.'"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
"\n",
"llm_chain.run(question)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6"
},
"vscode": {
"interpreter": {
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
}
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -6,14 +6,57 @@
"metadata": {},
"source": [
"# OpenAI\n",
"This example goes over how to use LangChain to interact with OpenAI models"
"\n",
"[OpenAI](https://platform.openai.com/docs/introduction) offers a spectrum of models with different levels of power suitable for different tasks.\n",
"\n",
"This example goes over how to use LangChain to interact with `OpenAI` [models](https://platform.openai.com/docs/models)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "5d71df86-8a17-4283-83d7-4e46e7c06c44",
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdin",
"output_type": "stream",
"text": [
" ········\n"
]
}
],
"source": [
"# get a token: https://platform.openai.com/account/api-keys\n",
"\n",
"from getpass import getpass\n",
"\n",
"OPENAI_API_KEY = getpass()"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "5472a7cd-af26-48ca-ae9b-5f6ae73c74d2",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = OPENAI_API_KEY"
]
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 6,
"id": "6fb585dd",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain.llms import OpenAI\n",
@ -22,9 +65,11 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 7,
"id": "035dea0f",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"template = \"\"\"Question: {question}\n",
@ -36,9 +81,11 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 8,
"id": "3f3458d9",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"llm = OpenAI()"
@ -46,9 +93,11 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 9,
"id": "a641dbd9",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
@ -56,17 +105,19 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 10,
"id": "9f844993",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [
{
"data": {
"text/plain": [
"' Justin Bieber was born in 1994, so the NFL team that won the Super Bowl in that year was the Dallas Cowboys.'"
"' Justin Bieber was born in 1994, so we are looking for the Super Bowl winner from that year. The Super Bowl in 1994 was Super Bowl XXVIII, and the winner was the Dallas Cowboys.'"
]
},
"execution_count": 5,
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
@ -94,7 +145,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
"version": "3.10.6"
},
"vscode": {
"interpreter": {

@ -4,7 +4,10 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# Petals LLM Example\n",
"# Petals\n",
"\n",
"`Petals` runs 100B+ language models at home, BitTorrent-style.\n",
"\n",
"This notebook goes over how to use Langchain with [Petals](https://github.com/bigscience-workshop/petals)."
]
},
@ -22,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"$ pip3 install petals"
"!pip3 install petals"
]
},
{
@ -34,7 +37,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
@ -48,16 +51,37 @@
"metadata": {},
"source": [
"## Set the Environment API Key\n",
"Make sure to get your API key from Huggingface."
"Make sure to get [your API key](https://huggingface.co/docs/api-inference/quicktour#get-your-api-token) from Huggingface."
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdin",
"output_type": "stream",
"text": [
" ········\n"
]
}
],
"source": [
"from getpass import getpass\n",
"\n",
"HUGGINGFACE_API_KEY = getpass()"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"os.environ[\"HUGGINGFACE_API_KEY\"] = \"YOUR_KEY_HERE\""
"os.environ[\"HUGGINGFACE_API_KEY\"] = HUGGINGFACE_API_KEY"
]
},
{
@ -72,8 +96,18 @@
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Downloading: 1%|▏ | 40.8M/7.19G [00:24<15:44, 7.57MB/s]"
]
}
],
"source": [
"# this can take several minutes to download big files!\n",
"\n",
"llm = Petals(model_name=\"bigscience/bloom-petals\")"
]
},
@ -150,7 +184,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
"version": "3.10.6"
},
"vscode": {
"interpreter": {
@ -159,5 +193,5 @@
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 4
}

@ -1,18 +1,23 @@
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"id": "959300d4",
"metadata": {},
"source": [
"# PromptLayer OpenAI\n",
"\n",
"This example showcases how to connect to [PromptLayer](https://www.promptlayer.com) to start recording your OpenAI requests."
"`PromptLayer` is the first platform that allows you to track, manage, and share your GPT prompt engineering. `PromptLayer` acts a middleware between your code and `OpenAIs` python library.\n",
"\n",
"`PromptLayer` records all your `OpenAI API` requests, allowing you to search and explore request history in the `PromptLayer` dashboard.\n",
"\n",
"\n",
"This example showcases how to connect to [PromptLayer](https://www.promptlayer.com) to start recording your OpenAI requests.\n",
"\n",
"Another example is [here](https://python.langchain.com/en/latest/ecosystem/promptlayer.html)."
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "6a45943e",
"metadata": {},
@ -26,13 +31,14 @@
"execution_count": null,
"id": "dbe09bd8",
"metadata": {
"tags": [],
"vscode": {
"languageId": "powershell"
}
},
"outputs": [],
"source": [
"pip install promptlayer"
"!pip install promptlayer"
]
},
{
@ -45,9 +51,11 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"id": "c16da3b5",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"import os\n",
@ -56,7 +64,6 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "8564ce7d",
"metadata": {},
@ -64,21 +71,80 @@
"## Set the Environment API Key\n",
"You can create a PromptLayer API Key at [www.promptlayer.com](https://www.promptlayer.com) by clicking the settings cog in the navbar.\n",
"\n",
"Set it as an environment variable called `PROMPTLAYER_API_KEY`."
"Set it as an environment variable called `PROMPTLAYER_API_KEY`.\n",
"\n",
"You also need an OpenAI Key, called `OPENAI_API_KEY`."
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"id": "1df96674-a9fb-4126-bb87-541082782240",
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdin",
"output_type": "stream",
"text": [
" ········\n"
]
}
],
"source": [
"from getpass import getpass\n",
"\n",
"PROMPTLAYER_API_KEY = getpass()"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "46ba25dc",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"os.environ[\"PROMPTLAYER_API_KEY\"] = PROMPTLAYER_API_KEY"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "9aa68c46-4d88-45ba-8a83-18fa41b4daed",
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdin",
"output_type": "stream",
"text": [
" ········\n"
]
}
],
"source": [
"from getpass import getpass\n",
"\n",
"OPENAI_API_KEY = getpass()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "6023b6fa-d9db-49d6-b713-0e19686119b0",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"os.environ[\"PROMPTLAYER_API_KEY\"] = \"********\""
"os.environ[\"OPENAI_API_KEY\"] = OPENAI_API_KEY"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "bf0294de",
"metadata": {},
@ -89,28 +155,18 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": null,
"id": "3acf0069",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"' to go outside\\n\\nUnfortunately, cats cannot go outside without being supervised by a human. Going outside can be dangerous for cats, as they may come into contact with cars, other animals, or other dangers. If you want to go outside, ask your human to take you on a supervised walk or to a safe, enclosed outdoor space.'"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"llm = PromptLayerOpenAI(pl_tags=[\"langchain\"])\n",
"llm(\"I am a cat and I want\")"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "a2d76826",
"metadata": {},
@ -119,7 +175,6 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "05e9e2fe",
"metadata": {},
@ -144,7 +199,6 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "7eb19139",
"metadata": {},
@ -156,7 +210,7 @@
],
"metadata": {
"kernelspec": {
"display_name": "base",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
@ -170,7 +224,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8 (default, Apr 13 2021, 12:59:45) \n[Clang 10.0.0 ]"
"version": "3.10.6"
},
"vscode": {
"interpreter": {

@ -5,20 +5,10 @@
"metadata": {},
"source": [
"# Replicate\n",
"This example goes over how to use LangChain to interact with Replicate models"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from langchain.llms import Replicate\n",
"from langchain import PromptTemplate, LLMChain\n",
"\n",
"os.environ[\"REPLICATE_API_TOKEN\"] = \"YOUR REPLICATE API TOKEN\""
">[Replicate](https://replicate.com/blog/machine-learning-needs-better-tools) runs machine learning models in the cloud. We have a library of open-source models that you can run with a few lines of code. If you're building your own machine learning models, Replicate makes it easy to deploy them at scale.\n",
"\n",
"This example goes over how to use LangChain to interact with `Replicate` [models](https://replicate.com/explore)"
]
},
{
@ -35,6 +25,65 @@
"To run this notebook, you'll need to create a [replicate](https://replicate.com) account and install the [replicate python client](https://github.com/replicate/replicate-python)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"!pip install replicate"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdin",
"output_type": "stream",
"text": [
" ········\n"
]
}
],
"source": [
"# get a token: https://replicate.com/account\n",
"\n",
"from getpass import getpass\n",
"\n",
"REPLICATE_API_TOKEN = getpass()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"REPLICATE_API_TOKEN\"] = REPLICATE_API_TOKEN"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain.llms import Replicate\n",
"from langchain import PromptTemplate, LLMChain"
]
},
{
"cell_type": "markdown",
"metadata": {},
@ -58,8 +107,10 @@
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"execution_count": 5,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"llm = Replicate(model=\"daanelson/flan-t5:04e422a9b85baed86a4f24981d7f9953e20c5fd82f6103b74ebc431588e1cec8\")"
@ -339,7 +390,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
"version": "3.10.6"
},
"vscode": {
"interpreter": {
@ -348,5 +399,5 @@
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 4
}

@ -6,22 +6,56 @@
"source": [
"# SageMakerEndpoint\n",
"\n",
"This notebooks goes over how to use an LLM hosted on a SageMaker endpoint."
"[Amazon SageMaker](https://aws.amazon.com/sagemaker/) is a system that can build, train, and deploy machine learning (ML) models for any use case with fully managed infrastructure, tools, and workflows.\n",
"\n",
"This notebooks goes over how to use an LLM hosted on a `SageMaker endpoint`."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"!pip3 install langchain boto3"
]
},
{
"cell_type": "code",
"execution_count": null,
"cell_type": "markdown",
"metadata": {},
"source": [
"## Set up"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You have to set up following required parameters of the `SagemakerEndpoint` call:\n",
"- `endpoint_name`: The name of the endpoint from the deployed Sagemaker model.\n",
" Must be unique within an AWS Region.\n",
"- `credentials_profile_name`: The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which\n",
" has either access keys or role information specified.\n",
" If not specified, the default credential profile or, if on an EC2 instance,\n",
" credentials from IMDS will be used.\n",
" See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Example"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain.docstore.document import Document"
@ -29,8 +63,10 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"execution_count": 3,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"example_doc_1 = \"\"\"\n",
@ -49,7 +85,9 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from typing import Dict\n",
@ -118,7 +156,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
"version": "3.10.6"
},
"vscode": {
"interpreter": {
@ -127,5 +165,5 @@
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 4
}

@ -5,18 +5,43 @@
"id": "9597802c",
"metadata": {},
"source": [
"# Self-Hosted Models via Runhouse\n",
"# Runhouse\n",
"\n",
"The [Runhouse](https://github.com/run-house/runhouse) allows remote compute and data across environments and users. See the [Runhouse docs](https://runhouse-docs.readthedocs-hosted.com/en/latest/).\n",
"\n",
"This example goes over how to use LangChain and [Runhouse](https://github.com/run-house/runhouse) to interact with models hosted on your own GPU, or on-demand GPUs on AWS, GCP, AWS, or Lambda.\n",
"\n",
"For more information, see [Runhouse](https://github.com/run-house/runhouse) or the [Runhouse docs](https://runhouse-docs.readthedocs-hosted.com/en/latest/)."
"**Note**: Code uses `SelfHosted` name instead of the `Runhouse`."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "6fb585dd",
"metadata": {},
"id": "6066fede-2300-4173-9722-6f01f4fa34b4",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"!pip install runhouse"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "6fb585dd",
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"INFO | 2023-04-17 16:47:36,173 | No auth token provided, so not using RNS API to save and load configs\n"
]
}
],
"source": [
"from langchain.llms import SelfHostedPipeline, SelfHostedHuggingFaceLLM\n",
"from langchain import PromptTemplate, LLMChain\n",
@ -25,9 +50,11 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"id": "06d6866e",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# For an on-demand A100 with GCP, Azure, or Lambda\n",
@ -44,9 +71,11 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 5,
"id": "035dea0f",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"template = \"\"\"Question: {question}\n",
@ -60,7 +89,9 @@
"cell_type": "code",
"execution_count": null,
"id": "3f3458d9",
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"llm = SelfHostedHuggingFaceLLM(model_id=\"gpt2\", hardware=gpu, model_reqs=[\"pip:./\", \"transformers\", \"torch\"])"
@ -288,7 +319,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.15"
"version": "3.10.6"
}
},
"nbformat": 4,

@ -5,13 +5,78 @@
"metadata": {},
"source": [
"# StochasticAI\n",
"This example goes over how to use LangChain to interact with StochasticAI models"
"\n",
">[Stochastic Acceleration Platform](https://docs.stochastic.ai/docs/introduction/) aims to simplify the life cycle of a Deep Learning model. From uploading and versioning the model, through training, compression and acceleration to putting it into production.\n",
"\n",
"This example goes over how to use LangChain to interact with `StochasticAI` models."
]
},
{
"cell_type": "code",
"execution_count": null,
"cell_type": "markdown",
"metadata": {},
"source": [
"You have to get the API_KEY and the API_URL [here](https://app.stochastic.ai/workspace/profile/settings?tab=profile)."
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdin",
"output_type": "stream",
"text": [
" ········\n"
]
}
],
"source": [
"from getpass import getpass\n",
"\n",
"STOCHASTICAI_API_KEY = getpass()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"STOCHASTICAI_API_KEY\"] = STOCHASTICAI_API_KEY"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdin",
"output_type": "stream",
"text": [
" ········\n"
]
}
],
"source": [
"YOUR_API_URL = getpass()"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain.llms import StochasticAI\n",
@ -20,8 +85,10 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"execution_count": 6,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"template = \"\"\"Question: {question}\n",
@ -33,17 +100,21 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"execution_count": 11,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"llm = StochasticAI(api_url=\"YOUR_API_URL\")"
"llm = StochasticAI(api_url=YOUR_API_URL)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"execution_count": 12,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
@ -51,27 +122,54 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"execution_count": 13,
"metadata": {
"tags": []
},
"outputs": [
{
"data": {
"text/plain": [
"\"\\n\\nStep 1: In 1999, the St. Louis Rams won the Super Bowl.\\n\\nStep 2: In 1999, Beiber was born.\\n\\nStep 3: The Rams were in Los Angeles at the time.\\n\\nStep 4: So they didn't play in the Super Bowl that year.\\n\""
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
"\n",
"llm_chain.run(question)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.9.12 ('palm')",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"version": "3.9.12"
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
@ -79,5 +177,5 @@
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 4
}

@ -5,13 +5,54 @@
"metadata": {},
"source": [
"# Writer\n",
"This example goes over how to use LangChain to interact with Writer models"
"\n",
"[Writer](https://writer.com/) is a platform to generate different language content.\n",
"\n",
"This example goes over how to use LangChain to interact with `Writer` [models](https://dev.writer.com/docs/models).\n",
"\n",
"You have to get the WRITER_API_KEY [here](https://dev.writer.com/docs)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"execution_count": 4,
"metadata": {
"tags": []
},
"outputs": [
{
"name": "stdin",
"output_type": "stream",
"text": [
" ········\n"
]
}
],
"source": [
"from getpass import getpass\n",
"\n",
"WRITER_API_KEY = getpass()"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"import os\n",
"\n",
"os.environ[\"WRITER_API_KEY\"] = WRITER_API_KEY"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"from langchain.llms import Writer\n",
@ -20,8 +61,10 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"execution_count": 7,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"template = \"\"\"Question: {question}\n",
@ -33,17 +76,23 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"execution_count": 14,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# If you get an error, probably, you need to set up the \"base_url\" parameter that can be taken from the error log.\n",
"\n",
"llm = Writer()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"execution_count": 15,
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"llm_chain = LLMChain(prompt=prompt, llm=llm)"
@ -52,26 +101,42 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"question = \"What NFL team won the Super Bowl in the year Justin Beiber was born?\"\n",
"\n",
"llm_chain.run(question)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.9.12 ('palm')",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"version": "3.9.12"
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "a0a0263b650d907a3bfe41c0f8d6a63a071b884df3cfdc1579f00cdc1aed6b03"
@ -79,5 +144,5 @@
}
},
"nbformat": 4,
"nbformat_minor": 2
"nbformat_minor": 4
}

Loading…
Cancel
Save