mirror of
https://github.com/hwchase17/langchain
synced 2024-10-29 17:07:25 +00:00
215 lines
5.2 KiB
Plaintext
215 lines
5.2 KiB
Plaintext
|
{
|
||
|
"cells": [
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"# Predibase\n",
|
||
|
"\n",
|
||
|
"[Predibase](https://predibase.com/) allows you to train, finetune, and deploy any ML model—from linear regression to large language model. \n",
|
||
|
"\n",
|
||
|
"This example demonstrates using Langchain with models deployed on Predibase"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"# Setup\n",
|
||
|
"\n",
|
||
|
"To run this notebook, you'll need a [Predibase account](https://predibase.com/free-trial/?utm_source=langchain) and an [API key](https://docs.predibase.com/sdk-guide/intro).\n",
|
||
|
"\n",
|
||
|
"You'll also need to install the Predibase Python package:"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"!pip install predibase\n",
|
||
|
"import os\n",
|
||
|
"\n",
|
||
|
"os.environ[\"PREDIBASE_API_TOKEN\"] = \"{PREDIBASE_API_TOKEN}\""
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"## Initial Call"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"from langchain.llms import Predibase\n",
|
||
|
"\n",
|
||
|
"model = Predibase(\n",
|
||
|
" model=\"vicuna-13b\", predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\")\n",
|
||
|
")"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"response = model(\"Can you recommend me a nice dry wine?\")\n",
|
||
|
"print(response)"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"## Chain Call Setup"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"llm = Predibase(\n",
|
||
|
" model=\"vicuna-13b\", predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\")\n",
|
||
|
")"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"## SequentialChain"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"from langchain.chains import LLMChain\n",
|
||
|
"from langchain.prompts import PromptTemplate"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"# This is an LLMChain to write a synopsis given a title of a play.\n",
|
||
|
"template = \"\"\"You are a playwright. Given the title of play, it is your job to write a synopsis for that title.\n",
|
||
|
"\n",
|
||
|
"Title: {title}\n",
|
||
|
"Playwright: This is a synopsis for the above play:\"\"\"\n",
|
||
|
"prompt_template = PromptTemplate(input_variables=[\"title\"], template=template)\n",
|
||
|
"synopsis_chain = LLMChain(llm=llm, prompt=prompt_template)"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"# This is an LLMChain to write a review of a play given a synopsis.\n",
|
||
|
"template = \"\"\"You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.\n",
|
||
|
"\n",
|
||
|
"Play Synopsis:\n",
|
||
|
"{synopsis}\n",
|
||
|
"Review from a New York Times play critic of the above play:\"\"\"\n",
|
||
|
"prompt_template = PromptTemplate(input_variables=[\"synopsis\"], template=template)\n",
|
||
|
"review_chain = LLMChain(llm=llm, prompt=prompt_template)"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"# This is the overall chain where we run these two chains in sequence.\n",
|
||
|
"from langchain.chains import SimpleSequentialChain\n",
|
||
|
"\n",
|
||
|
"overall_chain = SimpleSequentialChain(\n",
|
||
|
" chains=[synopsis_chain, review_chain], verbose=True\n",
|
||
|
")"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"review = overall_chain.run(\"Tragedy at sunset on the beach\")"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "markdown",
|
||
|
"metadata": {},
|
||
|
"source": [
|
||
|
"## Fine-tuned LLM (Use your own fine-tuned LLM from Predibase)"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"from langchain.llms import Predibase\n",
|
||
|
"\n",
|
||
|
"model = Predibase(\n",
|
||
|
" model=\"my-finetuned-LLM\", predibase_api_key=os.environ.get(\"PREDIBASE_API_TOKEN\")\n",
|
||
|
")\n",
|
||
|
"# replace my-finetuned-LLM with the name of your model in Predibase"
|
||
|
]
|
||
|
},
|
||
|
{
|
||
|
"cell_type": "code",
|
||
|
"execution_count": null,
|
||
|
"metadata": {},
|
||
|
"outputs": [],
|
||
|
"source": [
|
||
|
"# response = model(\"Can you help categorize the following emails into positive, negative, and neutral?\")"
|
||
|
]
|
||
|
}
|
||
|
],
|
||
|
"metadata": {
|
||
|
"kernelspec": {
|
||
|
"display_name": "Python 3.8.9 64-bit",
|
||
|
"language": "python",
|
||
|
"name": "python3"
|
||
|
},
|
||
|
"language_info": {
|
||
|
"codemirror_mode": {
|
||
|
"name": "ipython",
|
||
|
"version": 3
|
||
|
},
|
||
|
"file_extension": ".py",
|
||
|
"mimetype": "text/x-python",
|
||
|
"name": "python",
|
||
|
"nbconvert_exporter": "python",
|
||
|
"pygments_lexer": "ipython3",
|
||
|
"version": "3.8.9"
|
||
|
},
|
||
|
"orig_nbformat": 4,
|
||
|
"vscode": {
|
||
|
"interpreter": {
|
||
|
"hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
|
||
|
}
|
||
|
}
|
||
|
},
|
||
|
"nbformat": 4,
|
||
|
"nbformat_minor": 2
|
||
|
}
|