mirror of
https://github.com/openai/openai-cookbook
synced 2024-11-11 13:11:02 +00:00
119 lines
3.1 KiB
Plaintext
119 lines
3.1 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"attachments": {},
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Using embeddings\n",
|
|
"\n",
|
|
"This notebook contains some helpful snippets you can use to embed text with the 'text-embedding-ada-002' model via the OpenAI API."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"1536"
|
|
]
|
|
},
|
|
"execution_count": 1,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"import openai\n",
|
|
"\n",
|
|
"embedding = openai.Embedding.create(\n",
|
|
" input=\"Your text goes here\", model=\"text-embedding-ada-002\"\n",
|
|
")[\"data\"][0][\"embedding\"]\n",
|
|
"len(embedding)\n"
|
|
]
|
|
},
|
|
{
|
|
"attachments": {},
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"It's recommended to use the 'tenacity' package or another exponential backoff implementation to better manage API rate limits, as hitting the API too much too fast can trigger rate limits. Using the following function ensures you get your embeddings as fast as possible."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Negative example (slow and rate-limited)\n",
|
|
"import openai\n",
|
|
"\n",
|
|
"num_embeddings = 10000 # Some large number\n",
|
|
"for i in range(num_embeddings):\n",
|
|
" embedding = openai.Embedding.create(\n",
|
|
" input=\"Your text goes here\", model=\"text-embedding-ada-002\"\n",
|
|
" )[\"data\"][0][\"embedding\"]\n",
|
|
" print(len(embedding))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"1536\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"# Best practice\n",
|
|
"import openai\n",
|
|
"from tenacity import retry, wait_random_exponential, stop_after_attempt\n",
|
|
"\n",
|
|
"# Retry up to 6 times with exponential backoff, starting at 1 second and maxing out at 20 seconds delay\n",
|
|
"@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))\n",
|
|
"def get_embedding(text: str, model=\"text-embedding-ada-002\") -> list[float]:\n",
|
|
" return openai.Embedding.create(input=[text], model=model)[\"data\"][0][\"embedding\"]\n",
|
|
"\n",
|
|
"embedding = get_embedding(\"Your text goes here\", model=\"text-embedding-ada-002\")\n",
|
|
"print(len(embedding))"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3.9.9 ('openai')",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.9.9"
|
|
},
|
|
"orig_nbformat": 4,
|
|
"vscode": {
|
|
"interpreter": {
|
|
"hash": "365536dcbde60510dc9073d6b991cd35db2d9bac356a11f5b64279a5e6708b97"
|
|
}
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 2
|
|
}
|