Merge pull request #5 from openai/ted-at-openai/openai-cookbook-updates

Ted at openai/openai cookbook updates
pull/6/head
Ted Sanders 2 years ago committed by GitHub
commit 627fdd8c84
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

@ -28,8 +28,11 @@
"source": [
"import openai\n",
"\n",
"embedding = openai.Embedding.create(input=\"Sample document text goes here\", engine=\"text-similarity-davinci-001\")['data'][0]['embedding']\n",
"len(embedding)"
"embedding = openai.Embedding.create(\n",
" input=\"Sample document text goes here\",\n",
" engine=\"text-similarity-davinci-001\"\n",
")[\"data\"][0][\"embedding\"]\n",
"len(embedding)\n"
]
},
{
@ -49,21 +52,23 @@
"import openai\n",
"from tenacity import retry, wait_random_exponential, stop_after_attempt\n",
"\n",
"\n",
"@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))\n",
"def get_embedding(text: str, engine=\"text-similarity-davinci-001\") -> List[float]:\n",
"def get_embedding(text: str, engine=\"text-similarity-davinci-001\") -> list[float]:\n",
"\n",
" # replace newlines, which can negatively affect performance.\n",
" text = text.replace(\"\\n\", \" \")\n",
"\n",
" return openai.Embedding.create(input=[text], engine=engine)[\"data\"][0][\"embedding\"]\n",
"\n",
"\n",
"embedding = get_embedding(\"Sample query text goes here\", engine=\"text-search-ada-query-001\")\n",
"print(len(embedding))"
"print(len(embedding))\n"
]
},
{
"cell_type": "code",
"execution_count": 53,
"execution_count": 3,
"metadata": {},
"outputs": [
{
@ -76,16 +81,14 @@
],
"source": [
"embedding = get_embedding(\"Sample document text goes here\", engine=\"text-search-ada-doc-001\")\n",
"print(len(embedding))"
"print(len(embedding))\n"
]
}
],
"metadata": {
"interpreter": {
"hash": "be4b5d5b73a21c599de40d6deb1129796d12dc1cc33a738f7bac13269cfcafe8"
},
"kernelspec": {
"display_name": "Python 3.7.3 64-bit ('base': conda)",
"display_name": "Python 3.9.9 ('openai')",
"language": "python",
"name": "python3"
},
"language_info": {
@ -98,9 +101,14 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
"version": "3.9.9"
},
"orig_nbformat": 4
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "365536dcbde60510dc9073d6b991cd35db2d9bac356a11f5b64279a5e6708b97"
}
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -45,57 +45,48 @@
" <th>Text</th>\n",
" <th>combined</th>\n",
" </tr>\n",
" <tr>\n",
" <th>Id</th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>1303862400</td>\n",
" <td>B001E4KFG0</td>\n",
" <td>A3SGXH7AUHU8GW</td>\n",
" <th>0</th>\n",
" <td>1351123200</td>\n",
" <td>B003XPF9BO</td>\n",
" <td>A3R7JR3FMEBXQB</td>\n",
" <td>5</td>\n",
" <td>Good Quality Dog Food</td>\n",
" <td>I have bought several of the Vitality canned d...</td>\n",
" <td>Title: Good Quality Dog Food; Content: I have ...</td>\n",
" <td>where does one start...and stop... with a tre...</td>\n",
" <td>Wanted to save some to bring to my Chicago fam...</td>\n",
" <td>Title: where does one start...and stop... wit...</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>1346976000</td>\n",
" <td>B00813GRG4</td>\n",
" <td>A1D87F6ZCVE5NK</td>\n",
" <th>1</th>\n",
" <td>1351123200</td>\n",
" <td>B003JK537S</td>\n",
" <td>A3JBPC3WFUT5ZP</td>\n",
" <td>1</td>\n",
" <td>Not as Advertised</td>\n",
" <td>Product arrived labeled as Jumbo Salted Peanut...</td>\n",
" <td>Title: Not as Advertised; Content: Product arr...</td>\n",
" <td>Arrived in pieces</td>\n",
" <td>Not pleased at all. When I opened the box, mos...</td>\n",
" <td>Title: Arrived in pieces; Content: Not pleased...</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" Time ProductId UserId Score Summary \\\n",
"Id \n",
"1 1303862400 B001E4KFG0 A3SGXH7AUHU8GW 5 Good Quality Dog Food \n",
"2 1346976000 B00813GRG4 A1D87F6ZCVE5NK 1 Not as Advertised \n",
" Time ProductId UserId Score \\\n",
"0 1351123200 B003XPF9BO A3R7JR3FMEBXQB 5 \n",
"1 1351123200 B003JK537S A3JBPC3WFUT5ZP 1 \n",
"\n",
" Text \\\n",
"Id \n",
"1 I have bought several of the Vitality canned d... \n",
"2 Product arrived labeled as Jumbo Salted Peanut... \n",
" Summary \\\n",
"0 where does one start...and stop... with a tre... \n",
"1 Arrived in pieces \n",
"\n",
" combined \n",
"Id \n",
"1 Title: Good Quality Dog Food; Content: I have ... \n",
"2 Title: Not as Advertised; Content: Product arr... "
" Text \\\n",
"0 Wanted to save some to bring to my Chicago fam... \n",
"1 Not pleased at all. When I opened the box, mos... \n",
"\n",
" combined \n",
"0 Title: where does one start...and stop... wit... \n",
"1 Title: Arrived in pieces; Content: Not pleased... "
]
},
"execution_count": 1,
@ -106,7 +97,8 @@
"source": [
"import pandas as pd\n",
"\n",
"df = pd.read_csv('input/Reviews.csv', index_col=0)\n",
"input_datapath = 'data/fine_food_reviews_1k.csv' # to save space, we provide a pre-filtered dataset\n",
"df = pd.read_csv(input_datapath, index_col=0)\n",
"df = df[['Time', 'ProductId', 'UserId', 'Score', 'Summary', 'Text']]\n",
"df = df.dropna()\n",
"df['combined'] = \"Title: \" + df.Summary.str.strip() + \"; Content: \" + df.Text.str.strip()\n",
@ -161,16 +153,14 @@
"# This will take just under 10 minutes\n",
"df['babbage_similarity'] = df.combined.apply(lambda x: get_embedding(x, engine='text-similarity-babbage-001'))\n",
"df['babbage_search'] = df.combined.apply(lambda x: get_embedding(x, engine='text-search-babbage-doc-001'))\n",
"df.to_csv('output/embedded_1k_reviews.csv')"
"df.to_csv('data/fine_food_reviews_with_embeddings_1k.csv')"
]
}
],
"metadata": {
"interpreter": {
"hash": "be4b5d5b73a21c599de40d6deb1129796d12dc1cc33a738f7bac13269cfcafe8"
},
"kernelspec": {
"display_name": "Python 3.7.3 64-bit ('base': conda)",
"display_name": "Python 3.9.9 ('openai')",
"language": "python",
"name": "python3"
},
"language_info": {
@ -185,7 +175,12 @@
"pygments_lexer": "ipython3",
"version": "3.9.9"
},
"orig_nbformat": 4
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "365536dcbde60510dc9073d6b991cd35db2d9bac356a11f5b64279a5e6708b97"
}
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -14,7 +14,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 1,
"id": "9e3839a6-9146-4f60-b74b-19abbc24278d",
"metadata": {},
"outputs": [],
@ -38,7 +38,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 2,
"id": "a167516c-7c19-4bda-afa5-031aa0ae13bb",
"metadata": {},
"outputs": [
@ -48,7 +48,7 @@
"\"The 2020 Summer Olympics men's high jump was won by Mariusz Przybylski of Poland.\""
]
},
"execution_count": 5,
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
@ -83,7 +83,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 3,
"id": "a5451371-17fe-4ef3-aa02-affcf4edb0e0",
"metadata": {},
"outputs": [
@ -93,7 +93,7 @@
"\"Sorry, I don't know.\""
]
},
"execution_count": 6,
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
@ -125,7 +125,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 4,
"id": "fceaf665-2602-4788-bc44-9eb256a6f955",
"metadata": {},
"outputs": [
@ -135,7 +135,7 @@
"\"Gianmarco Tamberi and Mutaz Essa Barshim won the 2020 Summer Olympics men's high jump.\""
]
},
"execution_count": 7,
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
@ -200,7 +200,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 5,
"id": "cc9c8d69-e234-48b4-87e3-935970e1523a",
"metadata": {},
"outputs": [
@ -245,57 +245,58 @@
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>Volleyball at the 2020 Summer Olympics Women's tournament</th>\n",
" <th>Format</th>\n",
" <td>The preliminary round was a competition betwee...</td>\n",
" <td>132</td>\n",
" <th>Nordic combined at the 2016 Winter Youth Olympics</th>\n",
" <th>Summary</th>\n",
" <td>Nordic combined at the 2016 Winter Youth Olymp...</td>\n",
" <td>56</td>\n",
" </tr>\n",
" <tr>\n",
" <th>Rugby sevens at the 2020 Summer Olympics Women's qualification</th>\n",
" <th>South America</th>\n",
" <td>Sudamérica Rugby held a tournament on 12 June...</td>\n",
" <td>56</td>\n",
" <th>Morocco at the 2020 Summer Olympics</th>\n",
" <th>Judo</th>\n",
" <td>Morocco qualified two female judoka for each o...</td>\n",
" <td>106</td>\n",
" </tr>\n",
" <tr>\n",
" <th rowspan=\"2\" valign=\"top\">Canoeing at the 2020 Summer Olympics Men's slalom C-1</th>\n",
" <th>Competition format</th>\n",
" <td>Slalom canoeing uses a three-round format, wit...</td>\n",
" <td>136</td>\n",
" <th>Guinea-Bissau at the 2020 Summer Olympics</th>\n",
" <th>Wrestling</th>\n",
" <td>Guinea-Bissau qualified two wrestlers for each...</td>\n",
" <td>69</td>\n",
" </tr>\n",
" <tr>\n",
" <th>Qualification</th>\n",
" <td>A National Olympic Committee (NOC) entered onl...</td>\n",
" <td>171</td>\n",
" <th>Rome bid for the 2020 Summer Olympics</th>\n",
" <th>History</th>\n",
" <td>The Italian National Olympic Committee (CONI) ...</td>\n",
" <td>738</td>\n",
" </tr>\n",
" <tr>\n",
" <th>Tunisia at the 2020 Summer Olympics</th>\n",
" <th>Athletics</th>\n",
" <td>Tunisian athletes further achieved the entry s...</td>\n",
" <td>48</td>\n",
" <th>Italy at the 2020 Summer Olympics</th>\n",
" <th>Slalom</th>\n",
" <td>Italian canoeists qualified one boat for each ...</td>\n",
" <td>76</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" content \\\n",
"title heading \n",
"Volleyball at the 2020 Summer Olympics Women'... Format The preliminary round was a competition betwee... \n",
"Rugby sevens at the 2020 Summer Olympics Wome... South America Sudamérica Rugby held a tournament on 12 June... \n",
"Canoeing at the 2020 Summer Olympics Men's sl... Competition format Slalom canoeing uses a three-round format, wit... \n",
" Qualification A National Olympic Committee (NOC) entered onl... \n",
"Tunisia at the 2020 Summer Olympics Athletics Tunisian athletes further achieved the entry s... \n",
" content \\\n",
"title heading \n",
"Nordic combined at the 2016 Winter Youth Olympics Summary Nordic combined at the 2016 Winter Youth Olymp... \n",
"Morocco at the 2020 Summer Olympics Judo Morocco qualified two female judoka for each o... \n",
"Guinea-Bissau at the 2020 Summer Olympics Wrestling Guinea-Bissau qualified two wrestlers for each... \n",
"Rome bid for the 2020 Summer Olympics History The Italian National Olympic Committee (CONI) ... \n",
"Italy at the 2020 Summer Olympics Slalom Italian canoeists qualified one boat for each ... \n",
"\n",
" tokens \n",
"title heading \n",
"Volleyball at the 2020 Summer Olympics Women'... Format 132 \n",
"Rugby sevens at the 2020 Summer Olympics Wome... South America 56 \n",
"Canoeing at the 2020 Summer Olympics Men's sl... Competition format 136 \n",
" Qualification 171 \n",
"Tunisia at the 2020 Summer Olympics Athletics 48 "
" tokens \n",
"title heading \n",
"Nordic combined at the 2016 Winter Youth Olympics Summary 56 \n",
"Morocco at the 2020 Summer Olympics Judo 106 \n",
"Guinea-Bissau at the 2020 Summer Olympics Wrestling 69 \n",
"Rome bid for the 2020 Summer Olympics History 738 \n",
"Italy at the 2020 Summer Olympics Slalom 76 "
]
},
"execution_count": 8,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@ -324,7 +325,7 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 6,
"id": "4b874907-5109-4eef-ad9a-add4367925a3",
"metadata": {},
"outputs": [],
@ -337,7 +338,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 7,
"id": "ba475f30-ef7f-431c-b60d-d5970b62ad09",
"metadata": {},
"outputs": [],
@ -368,7 +369,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 8,
"id": "737266aa-cbe7-4691-87c1-fce8a31632f1",
"metadata": {},
"outputs": [],
@ -398,7 +399,7 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 9,
"id": "ab50bfca-cb02-41c6-b338-4400abe1d86e",
"metadata": {},
"outputs": [],
@ -412,7 +413,7 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 10,
"id": "b9a8c713-c8a9-47dc-85a4-871ee1395566",
"metadata": {},
"outputs": [
@ -446,7 +447,7 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 11,
"id": "dcd680e9-f194-4180-b14f-fc357498eb92",
"metadata": {},
"outputs": [],
@ -476,7 +477,7 @@
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 12,
"id": "e3a27d73-f47f-480d-b336-079414f749cb",
"metadata": {},
"outputs": [
@ -495,7 +496,7 @@
" (\"Athletics at the 2020 Summer Olympics Women's long jump\", 'Summary'))]"
]
},
"execution_count": 15,
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
@ -506,7 +507,7 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 13,
"id": "729c2ce7-8540-4ab2-bb3a-76c4dfcb689c",
"metadata": {},
"outputs": [
@ -525,7 +526,7 @@
" (\"Athletics at the 2020 Summer Olympics Women's pole vault\", 'Summary'))]"
]
},
"execution_count": 16,
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
@ -554,7 +555,7 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 14,
"id": "b763ace2-1946-48e0-8ff1-91ba335d47a0",
"metadata": {},
"outputs": [
@ -564,7 +565,7 @@
"'Context separator contains 3 tokens'"
]
},
"execution_count": 18,
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
@ -581,7 +582,7 @@
},
{
"cell_type": "code",
"execution_count": 19,
"execution_count": 15,
"id": "0c5c0509-eeb9-4552-a5d4-6ace04ef73dd",
"metadata": {},
"outputs": [],
@ -618,7 +619,7 @@
},
{
"cell_type": "code",
"execution_count": 20,
"execution_count": 16,
"id": "f614045a-3917-4b28-9643-7e0c299ec1a7",
"metadata": {},
"outputs": [
@ -670,7 +671,7 @@
},
{
"cell_type": "code",
"execution_count": 21,
"execution_count": 17,
"id": "b0edfec7-9243-4573-92e0-253d31c771ad",
"metadata": {},
"outputs": [],
@ -685,7 +686,7 @@
},
{
"cell_type": "code",
"execution_count": 22,
"execution_count": 18,
"id": "9c1c9a69-848e-4099-a90d-c8da36c153d5",
"metadata": {},
"outputs": [],
@ -715,7 +716,7 @@
},
{
"cell_type": "code",
"execution_count": 24,
"execution_count": 19,
"id": "c233e449-bf33-4c9e-b095-6a4dd278c8fd",
"metadata": {},
"outputs": [
@ -735,7 +736,7 @@
"'Gianmarco Tamberi and Mutaz Essa Barshim emerged as joint winners of the event following a tie between both of them as they cleared 2.37m.'"
]
},
"execution_count": 24,
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
@ -760,7 +761,7 @@
},
{
"cell_type": "code",
"execution_count": 27,
"execution_count": 20,
"id": "1127867b-2884-44bb-9439-0e8ae171c835",
"metadata": {},
"outputs": [
@ -785,7 +786,7 @@
},
{
"cell_type": "code",
"execution_count": 28,
"execution_count": 21,
"id": "720d9e0b-b189-4101-91ee-babf736199e6",
"metadata": {},
"outputs": [
@ -810,7 +811,7 @@
},
{
"cell_type": "code",
"execution_count": 29,
"execution_count": 22,
"id": "4e8e51cc-e4eb-4557-9e09-2929d4df5b7f",
"metadata": {},
"outputs": [
@ -837,7 +838,7 @@
},
{
"cell_type": "code",
"execution_count": 30,
"execution_count": 23,
"id": "37c83519-e3c6-4c44-8b4a-98cbb3a5f5ba",
"metadata": {},
"outputs": [
@ -870,7 +871,7 @@
},
{
"cell_type": "code",
"execution_count": 31,
"execution_count": 24,
"id": "26a1a9ef-e1ee-4f80-a1b1-6164ccfa5bac",
"metadata": {},
"outputs": [
@ -897,7 +898,7 @@
},
{
"cell_type": "code",
"execution_count": 32,
"execution_count": 25,
"id": "9fba8a63-eb81-4661-ae17-59bb5e2933d6",
"metadata": {},
"outputs": [
@ -928,7 +929,7 @@
},
{
"cell_type": "code",
"execution_count": 33,
"execution_count": 26,
"id": "2d4c693b-cdb9-4f4c-bd1b-f77b29097a1f",
"metadata": {},
"outputs": [
@ -961,7 +962,7 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "Python 3.9.9 ('openai')",
"language": "python",
"name": "python3"
},
@ -976,6 +977,11 @@
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.9"
},
"vscode": {
"interpreter": {
"hash": "365536dcbde60510dc9073d6b991cd35db2d9bac356a11f5b64279a5e6708b97"
}
}
},
"nbformat": 4,

File diff suppressed because it is too large Load Diff

@ -13,14 +13,14 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Babbage similarity embedding performance on 1k Amazon reviews: mse=0.38, mae=0.39\n"
"Babbage similarity embedding performance on 1k Amazon reviews: mse=0.39, mae=0.38\n"
]
}
],
@ -32,39 +32,41 @@
"from sklearn.model_selection import train_test_split\n",
"from sklearn.metrics import mean_squared_error, mean_absolute_error\n",
"\n",
"df = pd.read_csv('output/embedded_1k_reviews.csv')\n",
"df['babbage_similarity'] = df.babbage_similarity.apply(eval).apply(np.array)\n",
"datafile_path = \"https://cdn.openai.com/API/examples/data/fine_food_reviews_with_embeddings_1k.csv\" # for your convenience, we precomputed the embeddings\n",
"df = pd.read_csv(datafile_path)\n",
"df[\"babbage_similarity\"] = df.babbage_similarity.apply(eval).apply(np.array)\n",
"\n",
"X_train, X_test, y_train, y_test = train_test_split(list(df.babbage_similarity.values), df.Score, test_size = 0.2, random_state=42)\n",
"X_train, X_test, y_train, y_test = train_test_split(list(df.babbage_similarity.values), df.Score, test_size=0.2, random_state=42)\n",
"\n",
"rfr = RandomForestRegressor(n_estimators=100)\n",
"rfr.fit(X_train, y_train)\n",
"preds = rfr.predict(X_test)\n",
"\n",
"\n",
"mse = mean_squared_error(y_test, preds)\n",
"mae = mean_absolute_error(y_test, preds)\n",
"\n",
"print(f\"Babbage similarity embedding performance on 1k Amazon reviews: mse={mse:.2f}, mae={mae:.2f}\")"
"print(f\"Babbage similarity embedding performance on 1k Amazon reviews: mse={mse:.2f}, mae={mae:.2f}\")\n"
]
},
{
"cell_type": "code",
"execution_count": 26,
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Dummy mean prediction performance on Amazon reviews: mse=1.77, mae=1.04\n"
"Dummy mean prediction performance on Amazon reviews: mse=1.81, mae=1.08\n"
]
}
],
"source": [
"bmse = mean_squared_error(y_test, np.repeat(y_test.mean(), len(y_test)))\n",
"bmae = mean_absolute_error(y_test, np.repeat(y_test.mean(), len(y_test)))\n",
"print(f\"Dummy mean prediction performance on Amazon reviews: mse={bmse:.2f}, mae={bmae:.2f}\")"
"print(\n",
" f\"Dummy mean prediction performance on Amazon reviews: mse={bmse:.2f}, mae={bmae:.2f}\"\n",
")\n"
]
},
{
@ -83,11 +85,9 @@
}
],
"metadata": {
"interpreter": {
"hash": "be4b5d5b73a21c599de40d6deb1129796d12dc1cc33a738f7bac13269cfcafe8"
},
"kernelspec": {
"display_name": "Python 3.7.3 64-bit ('base': conda)",
"display_name": "Python 3.9.9 ('openai')",
"language": "python",
"name": "python3"
},
"language_info": {
@ -100,9 +100,14 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
"version": "3.9.9"
},
"orig_nbformat": 4
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "365536dcbde60510dc9073d6b991cd35db2d9bac356a11f5b64279a5e6708b97"
}
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -18,9 +18,9 @@
"import pandas as pd\n",
"import numpy as np\n",
"\n",
"\n",
"df = pd.read_csv('output/embedded_1k_reviews.csv')\n",
"df['babbage_search'] = df.babbage_search.apply(eval).apply(np.array)"
"datafile_path = \"https://cdn.openai.com/API/examples/data/fine_food_reviews_with_embeddings_1k.csv\" # for your convenience, we precomputed the embeddings\n",
"df = pd.read_csv(datafile_path)\n",
"df[\"babbage_search\"] = df.babbage_search.apply(eval).apply(np.array)\n"
]
},
{
@ -39,11 +39,11 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Jamaican Blue beans: Excellent coffee bean for roasting. Our family just purchased another 5 pounds for more roasting. Plenty of flavor and mild on acidity when roasted to a dark brown bean and befor\n",
"Fantastic Instant Refried beans: Fantastic Instant Refried Beans have been a staple for my family now for nearly 20 years. All 7 of us love it and my grown kids are passing on the tradition.\n",
"\n",
"Good Buy: I liked the beans. They were vacuum sealed, plump and moist. Would recommend them for any use. I personally split and stuck them in some vodka to make vanilla extract. Yum!\n",
"Jamaican Blue beans: Excellent coffee bean for roasting. Our family just purchased another 5 pounds for more roasting. Plenty of flavor and mild on acidity when roasted to a dark brown bean and befor\n",
"\n",
"Fantastic Instant Refried beans: Fantastic Instant Refried Beans have been a staple for my family now for nearly 20 years. All 7 of us love it and my grown kids are passing on the tradition.\n",
"Delicious!: I enjoy this white beans seasoning, it gives a rich flavor to the beans I just love it, my mother in law didn't know about this Zatarain's brand and now she is traying different seasoning\n",
"\n"
]
}
@ -53,16 +53,26 @@
"\n",
"# search through the reviews for a specific product\n",
"def search_reviews(df, product_description, n=3, pprint=True):\n",
" embedding = get_embedding(product_description, engine='text-search-babbage-query-001')\n",
" df['similarities'] = df.babbage_search.apply(lambda x: cosine_similarity(x, embedding))\n",
" embedding = get_embedding(\n",
" product_description,\n",
" engine=\"text-search-babbage-query-001\"\n",
" )\n",
" df[\"similarities\"] = df.babbage_search.apply(lambda x: cosine_similarity(x, embedding))\n",
"\n",
" res = df.sort_values('similarities', ascending=False).head(n).combined.str.replace('Title: ','').str.replace('; Content:', ': ')\n",
" res = (\n",
" df.sort_values(\"similarities\", ascending=False)\n",
" .head(n)\n",
" .combined.str.replace(\"Title: \", \"\")\n",
" .str.replace(\"; Content:\", \": \")\n",
" )\n",
" if pprint:\n",
" for r in res:\n",
" print(r[:200])\n",
" print()\n",
" return res\n",
"res = search_reviews(df, 'delicious beans', n=3)\n"
"\n",
"\n",
"res = search_reviews(df, \"delicious beans\", n=3)\n"
]
},
{
@ -74,17 +84,17 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Rustichella ROCKS!: Anything this company makes is worthwhile eating! My favorite is their Trenne.<br />Their whole wheat pasta is the best I have ever had.\n",
"\n",
"sooo good: tastes so good. Worth the money. My boyfriend hates wheat pasta and LOVES this. cooks fast tastes great.I love this brand and started buying more of their pastas. Bulk is best.\n",
"\n",
"Wonderful: Came quickly. Was plentiful and delicious and cheaper than in the store. You will enjoy it if you like thick pasta.\n",
"Tasty and Quick Pasta: Barilla Whole Grain Fusilli with Vegetable Marinara is tasty and has an excellent chunky vegetable marinara. I just wish there was more of it. If you aren't starving or on a \n",
"\n",
"Rustichella ROCKS!: Anything this company makes is worthwhile eating! My favorite is their Trenne.<br />Their whole wheat pasta is the best I have ever had.\n",
"\n"
]
}
],
"source": [
"res = search_reviews(df, 'whole wheat pasta', n=3)"
"res = search_reviews(df, \"whole wheat pasta\", n=3)\n"
]
},
{
@ -109,7 +119,7 @@
}
],
"source": [
"res = search_reviews(df, 'bad delivery', n=1)"
"res = search_reviews(df, \"bad delivery\", n=1)\n"
]
},
{
@ -134,7 +144,7 @@
}
],
"source": [
"res = search_reviews(df, 'spoilt', n=1)"
"res = search_reviews(df, \"spoilt\", n=1)\n"
]
},
{
@ -148,22 +158,20 @@
"text": [
"Good food: The only dry food my queen cat will eat. Helps prevent hair balls. Good packaging. Arrives promptly. Recommended by a friend who sells pet food.\n",
"\n",
"A great deal on Greenies: Paid only $22 with free shipping for 96 teenies compared to about $35 at the pet store. How can you go wrong with a deal like that? The dog begs for his daily Greenie. Got \n",
"Good product: I like that this is a better product for my pets but really for the price of it I couldn't afford to buy this all the time. My cat isn't very picky usually and she ate this, we usually \n",
"\n"
]
}
],
"source": [
"res = search_reviews(df, 'pet food', n=2)"
"res = search_reviews(df, \"pet food\", n=2)\n"
]
}
],
"metadata": {
"interpreter": {
"hash": "be4b5d5b73a21c599de40d6deb1129796d12dc1cc33a738f7bac13269cfcafe8"
},
"kernelspec": {
"display_name": "Python 3.7.3 64-bit ('base': conda)",
"display_name": "Python 3.9.9 ('openai')",
"language": "python",
"name": "python3"
},
"language_info": {
@ -176,9 +184,14 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
"version": "3.9.9"
},
"orig_nbformat": 4
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "365536dcbde60510dc9073d6b991cd35db2d9bac356a11f5b64279a5e6708b97"
}
}
},
"nbformat": 4,
"nbformat_minor": 2

@ -39,7 +39,7 @@
"import numpy as np\n",
"from sklearn.model_selection import train_test_split\n",
"\n",
"df = pd.read_csv('output/embedded_babbage_similarity_50k.csv', index_col=0)\n",
"df = pd.read_csv('output/embedded_babbage_similarity_50k.csv', index_col=0) # note that you will need to generate this file to run the code below\n",
"df['babbage_similarity'] = df.babbage_similarity.apply(eval).apply(np.array)\n",
"X_train, X_test, y_train, y_test = train_test_split(df, df.Score, test_size = 0.2, random_state=42)\n",
"\n",

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

@ -0,0 +1,196 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Azure embeddings example\n",
"In this example we'll try to go over all operations for embeddings that can be done using the Azure endpoints. \\\n",
"This example focuses on finetuning but touches on the majority of operations that are also available using the API. This example is meant to be a quick way of showing simple operations and is not meant as a tutorial."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import openai\n",
"from openai import cli"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup\n",
"In the following section the endpoint and key need to be set up of the next sections to work. \\\n",
"Please go to https://portal.azure.com, find your resource and then under \"Resource Management\" -> \"Keys and Endpoints\" look for the \"Endpoint\" value and one of the Keys. They will act as api_base and api_key in the code below."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"openai.api_key = '' # Please add your api key here\n",
"openai.api_base = '' # Please add your endpoint here\n",
"\n",
"openai.api_type = 'azure'\n",
"openai.api_version = '2022-03-01-preview' # this may change in the future"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Deployments\n",
"In this section we are going to create a deployment using the finetune model that we just adapted and then used the deployment to create a simple completion operation."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Deployments: Create Manually\n",
"Let's create a deployment using the text-similarity-curie-001 engine. You can create a new deployment by going to your Resource in your portal under \"Resource Management\" -> \"Deployments\"."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### (Optional) Deployments: Create Programatically\n",
"We can also create a deployment using code:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = \"text-similarity-curie-001\"\n",
"\n",
"# Now let's create the deployment\n",
"print(f'Creating a new deployment with model: {model}')\n",
"result = openai.Deployment.create(model=model, scale_settings={\"scale_type\":\"manual\", \"capacity\": 1})\n",
"deployment_id = result[\"id\"]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### (Optional) Deployments: Retrieving\n",
"Now let's check the status of the newly created deployment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(f'Checking for deployment status.')\n",
"resp = openai.Deployment.retrieve(id=deployment_id)\n",
"status = resp[\"status\"]\n",
"print(f'Deployment {deployment_id} is with status: {status}')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Deployments: Listing\n",
"Now because creating a new deployment takes a long time, let's look in the subscription for an already finished deployment that succeeded."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print('While deployment running, selecting a completed one.')\n",
"deployment_id = None\n",
"result = openai.Deployment.list()\n",
"for deployment in result.data:\n",
" if deployment[\"status\"] == \"succeeded\":\n",
" deployment_id = deployment[\"id\"]\n",
" break\n",
"\n",
"if not deployment_id:\n",
" print('No deployment with status: succeeded found.')\n",
"else:\n",
" print(f'Found a successful deployment with id: {deployment_id}.')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Embeddings\n",
"Now let's send a sample embedding to the deployment."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"embeddings = openai.Embedding.create(engine=deployment_id,\n",
" input=\"The food was delicious and the waiter...\")\n",
" \n",
"print(embeddings)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### (Optional) Deployments: Delete\n",
"Finally let's delete the deployment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(f'Deleting deployment: {deployment_id}')\n",
"openai.Deployment.delete(sid=deployment_id)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.9.9 ('openai')",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.9"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "365536dcbde60510dc9073d6b991cd35db2d9bac356a11f5b64279a5e6708b97"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -0,0 +1,475 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Azure Fune tuning example\n",
"In this example we'll try to go over all operations that can be done using the Azure endpoints and their differences with the openAi endpoints (if any).<br>\n",
"This example focuses on finetuning but touches on the majority of operations that are also available using the API. This example is meant to be a quick way of showing simple operations and is not meant as a finetune model adaptation tutorial.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import openai\n",
"from openai import cli"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup\n",
"In the following section the endpoint and key need to be set up of the next sections to work.<br> Please go to https://portal.azure.com, find your resource and then under \"Resource Management\" -> \"Keys and Endpoints\" look for the \"Endpoint\" value and one of the Keys. They will act as api_base and api_key in the code below."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"openai.api_key = '' # Please add your api key here\n",
"openai.api_base = '' # Please add your endpoint here\n",
"\n",
"openai.api_type = 'azure'\n",
"openai.api_version = '2022-03-01-preview' # this may change in the future"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Microsoft Active Directory Authentication\n",
"Instead of key based authentication, you can use Active Directory to authenticate using credential tokens. Uncomment the next code section to use credential based authentication:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\"\"\"\n",
"from azure.identity import DefaultAzureCredential\n",
"\n",
"default_credential = DefaultAzureCredential()\n",
"token = default_credential.get_token(\"https://cognitiveservices.azure.com\")\n",
"\n",
"openai.api_type = 'azure_ad'\n",
"openai.api_key = token.token\n",
"openai.api_version = '2022-03-01-preview' # this may change in the future\n",
"\n",
"\n",
"openai.api_base = '' # Please add your endpoint here\n",
"\"\"\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Files\n",
"In the next section we will focus on the files operations: importing, listing, retrieving, deleting. For this we need to create 2 temporary files with some sample data. For the sake of simplicity, we will use the same data for training and validation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import shutil\n",
"import json\n",
"\n",
"training_file_name = 'training.jsonl'\n",
"validation_file_name = 'validation.jsonl'\n",
"\n",
"sample_data = [{\"prompt\": \"When I go to the store, I want an\", \"completion\": \"apple\"},\n",
" {\"prompt\": \"When I go to work, I want a\", \"completion\": \"coffe\"},\n",
" {\"prompt\": \"When I go home, I want a\", \"completion\": \"soda\"}]\n",
"\n",
"print(f'Generating the training file: {training_file_name}')\n",
"with open(training_file_name, 'w') as training_file:\n",
" for entry in sample_data:\n",
" json.dump(entry, training_file)\n",
" training_file.write('\\n')\n",
"\n",
"print(f'Copying the training file to the validation file')\n",
"shutil.copy(training_file_name, validation_file_name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Files: Listing\n",
"List all of the uploaded files and check for the ones that are named \"training.jsonl\" or \"validation.jsonl\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print('Checking for existing uploaded files.')\n",
"results = []\n",
"files = openai.File.list().data\n",
"print(f'Found {len(files)} total uploaded files in the subscription.')\n",
"for item in files:\n",
" if item[\"filename\"] in [training_file_name, validation_file_name]:\n",
" results.append(item[\"id\"])\n",
"print(f'Found {len(results)} already uploaded files that match our names.')\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Files: Deleting\n",
"Let's now delete those found files (if any) since we're going to be re-uploading them next."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(f'Deleting already uploaded files.')\n",
"for id in results:\n",
" openai.File.delete(sid = id)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Files: Importing & Retrieving\n",
"Now, let's import our two files ('training.jsonl' and 'validation.jsonl') and keep those IDs since we're going to use them later for finetuning.<br>\n",
"For this operation we are going to use the cli wrapper which does a bit more checks before uploading and also gives us progress. In addition, after uploading we're going to check the status our import until it has succeeded (or failed if something goes wrong)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import time\n",
"\n",
"def check_status(training_id, validation_id):\n",
" train_status = openai.File.retrieve(training_id)[\"status\"]\n",
" valid_status = openai.File.retrieve(validation_id)[\"status\"]\n",
" print(f'Status (training_file | validation_file): {train_status} | {valid_status}')\n",
" return (train_status, valid_status)\n",
"\n",
"#importing our two files\n",
"training_id = cli.FineTune._get_or_upload(training_file_name, True)\n",
"validation_id = cli.FineTune._get_or_upload(validation_file_name, True)\n",
"\n",
"#checking the status of the imports\n",
"(train_status, valid_status) = check_status(training_id, validation_id)\n",
"\n",
"while train_status not in [\"succeeded\", \"failed\"] or valid_status not in [\"succeeded\", \"failed\"]:\n",
" time.sleep(1)\n",
" (train_status, valid_status) = check_status(training_id, validation_id)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Files: Downloading\n",
"Now let's download one of the files, the training file for example, to check that everything was in order during importing and all bits are there."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(f'Downloading training file: {training_id}')\n",
"result = openai.File.download(training_id)\n",
"print(result)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Finetune\n",
"In this section we are going to use the two training and validation files that we imported in the previous section, to train a finetune model."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Finetune: Adapt\n",
"First let's create the finetune adaptation job."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"create_args = {\n",
" \"training_file\": training_id,\n",
" \"validation_file\": validation_id,\n",
" \"model\": \"curie\",\n",
" \"compute_classification_metrics\": True,\n",
" \"classification_n_classes\": 3\n",
"}\n",
"resp = openai.FineTune.create(**create_args)\n",
"job_id = resp[\"id\"]\n",
"status = resp[\"status\"]\n",
"\n",
"print(f'Fine-tunning model with jobID: {job_id}.')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Finetune: Streaming\n",
"While the job runs, we can subscribe to the streaming events to check the progress of the operation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import signal\n",
"import datetime\n",
"\n",
"def signal_handler(sig, frame):\n",
" status = openai.FineTune.retrieve(job_id).status\n",
" print(f\"Stream interrupted. Job is still {status}.\")\n",
" return\n",
"\n",
"print('Streaming events for the fine-tuning job: {job_id}')\n",
"signal.signal(signal.SIGINT, signal_handler)\n",
"\n",
"events = openai.FineTune.stream_events(job_id)\n",
"try:\n",
" for event in events:\n",
" print(f'{datetime.datetime.fromtimestamp(event[\"created_at\"])} {event[\"message\"]}')\n",
"\n",
"except Exception:\n",
" print(\"Stream interrupted (client disconnected).\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Finetune: Listing and Retrieving\n",
"Now let's check that our operation was successful and in addition we can look at all of the finetuning operations using a list operation."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"status = openai.FineTune.retrieve(id=job_id)[\"status\"]\n",
"if status not in [\"succeeded\", \"failed\"]:\n",
" print(f'Job not in terminal status: {status}. Waiting.')\n",
" while status not in [\"succeeded\", \"failed\"]:\n",
" time.sleep(2)\n",
" status = openai.FineTune.retrieve(id=job_id)[\"status\"]\n",
" print(f'Status: {status}')\n",
"else:\n",
" print(f'Finetune job {job_id} finished with status: {status}')\n",
"\n",
"print('Checking other finetune jobs in the subscription.')\n",
"result = openai.FineTune.list()\n",
"print(f'Found {len(result)} finetune jobs.')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Finetune: Deleting\n",
"Finally we can delete our finetune job.<br>\n",
"WARNING: Please skip this step if you want to continue with the next section as the finetune model is needed. (The delete code is commented out by default)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# openai.FineTune.delete(sid=job_id)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Deployments\n",
"In this section we are going to create a deployment using the finetune model that we just adapted and then used the deployment to create a simple completion operation."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Deployments: Create\n",
"Let's create a deployment using the fine-tune model."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#Fist let's get the model of the previous job:\n",
"result = openai.FineTune.retrieve(id=job_id)\n",
"if result[\"status\"] == 'succeeded':\n",
" model = result[\"fine_tuned_model\"]\n",
"\n",
"# Now let's create the deployment\n",
"print(f'Creating a new deployment with model: {model}')\n",
"result = openai.Deployment.create(model=model, scale_settings={\"scale_type\":\"manual\", \"capacity\": 1})\n",
"deployment_id = result[\"id\"]\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Deployments: Retrieving\n",
"Now let's check the status of the newly created deployment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(f'Checking for deployment status.')\n",
"resp = openai.Deployment.retrieve(id=deployment_id)\n",
"status = resp[\"status\"]\n",
"print(f'Deployment {deployment_id} is with status: {status}')\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Deployments: Listing\n",
"Now because creating a new deployment takes a long time, let's look in the subscription for an already finished deployment that succeeded."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print('While deployment running, selecting a completed one.')\n",
"deployment_id = None\n",
"result = openai.Deployment.list()\n",
"for deployment in result.data:\n",
" if deployment[\"status\"] == \"succeeded\":\n",
" deployment_id = deployment[\"id\"]\n",
" break\n",
"\n",
"if not deployment_id:\n",
" print('No deployment with status: succeeded found.')\n",
"else:\n",
" print(f'Found a successful deployment with id: {deployment_id}.')\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Completions\n",
"Now let's send a sample completion to the deployment."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print('Sending a test completion job')\n",
"start_phrase = 'When I go to the store, I want a'\n",
"response = openai.Completion.create(engine=deployment_id, prompt=start_phrase, max_tokens=4)\n",
"text = response['choices'][0]['text'].replace('\\n', '').replace(' .', '.').strip()\n",
"print(f'\"{start_phrase} {text}\"')\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Deployments: Delete\n",
"Finally let's delete the deployment"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(f'Deleting deployment: {deployment_id}')\n",
"openai.Deployment.delete(sid=deployment_id)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Thank you"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.9.9 64-bit ('3.9.9')",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.9"
},
"orig_nbformat": 4,
"vscode": {
"interpreter": {
"hash": "cb9817b186a29e4e9713184d901f26c1ee05ad25243d878baff7f31bb1fef480"
}
}
},
"nbformat": 4,
"nbformat_minor": 2
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save