updates to coloring, and intro paragraph (#945)

pull/947/head
jhills20 5 months ago committed by GitHub
parent d3f79a2130
commit 1abc529895
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -26,7 +26,10 @@
"* `logprobs` can assist with self-evaluation in retrieval applications. In the Q&A example, the model outputs a contrived `has_sufficient_context_for_answer` boolean, which can serve as a confidence score of whether the answer is contained in the retrieved content. Evaluations of this type can reduce retrieval-based hallucinations and enhance accuracy.\n",
"\n",
"3. Autocomplete\n",
"* `logprobs` could help us decide how to suggest words as a user is typing."
"* `logprobs` could help us decide how to suggest words as a user is typing.\n",
"\n",
"4. Token highlighting and outputting bytes\\n\",\n",
"* Users can easily create a token highlighter using the built in tokenization that comes with enabling `logprobs`. Additionally, the bytes parameter includes the ASCII encoding of each output character, which is particularly useful for reproducing emojis and special characters.\""
]
},
{
@ -38,20 +41,22 @@
},
{
"cell_type": "code",
"execution_count": 122,
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"from openai import OpenAI\n",
"from math import exp\n",
"import numpy as np\n",
"from colorama import init, Fore\n",
"\n",
"\n",
"client = OpenAI()"
"client = OpenAI()\n"
]
},
{
"cell_type": "code",
"execution_count": 123,
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
@ -80,7 +85,7 @@
" params[\"tools\"] = tools\n",
"\n",
" completion = client.chat.completions.create(**params)\n",
" return completion"
" return completion\n"
]
},
{
@ -108,15 +113,15 @@
},
{
"cell_type": "code",
"execution_count": 124,
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"CLASSIFICATION_PROMPT = \"\"\"You will be given a headline of a news article. \n",
"CLASSIFICATION_PROMPT = \"\"\"You will be given a headline of a news article.\n",
"Classify the article into one of the following categories: Technology, Politics, Sports, and Art.\n",
"Return only the name of the category, and nothing else. \n",
"MAKE SURE your output is one of the four categories stated. \n",
"Article headline: {headline}\"\"\""
"Return only the name of the category, and nothing else.\n",
"MAKE SURE your output is one of the four categories stated.\n",
"Article headline: {headline}\"\"\"\n"
]
},
{
@ -128,7 +133,7 @@
},
{
"cell_type": "code",
"execution_count": 125,
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
@ -136,12 +141,12 @@
" \"Tech Giant Unveils Latest Smartphone Model with Advanced Photo-Editing Features.\",\n",
" \"Local Mayor Launches Initiative to Enhance Urban Public Transport.\",\n",
" \"Tennis Champion Showcases Hidden Talents in Symphony Orchestra Debut\",\n",
"]"
"]\n"
]
},
{
"cell_type": "code",
"execution_count": 126,
"execution_count": 5,
"metadata": {},
"outputs": [
{
@ -182,7 +187,7 @@
},
{
"cell_type": "code",
"execution_count": 127,
"execution_count": 6,
"metadata": {},
"outputs": [
{
@ -191,20 +196,20 @@
"text": [
"\n",
"Headline: Tech Giant Unveils Latest Smartphone Model with Advanced Photo-Editing Features.\n",
"\u001b[96mOutput token 1:\u001b[0m Technology, \u001b[93mlogprobs:\u001b[0m -4.246537e-06, \u001b[95mlinear probability:\u001b[0m 100.0%\n",
"\u001b[96mOutput token 2:\u001b[0m Techn, \u001b[93mlogprobs:\u001b[0m -13.093754, \u001b[95mlinear probability:\u001b[0m 0.0%\n",
"\u001b[36mOutput token 1:\u001b[39m Technology, \u001b[33mlogprobs:\u001b[39m -1.9816675e-06, \u001b[35mlinear probability:\u001b[39m 100.0%\n",
"\u001b[36mOutput token 2:\u001b[39m Techn, \u001b[33mlogprobs:\u001b[39m -14.062502, \u001b[35mlinear probability:\u001b[39m 0.0%\n",
"\n",
"\n",
"\n",
"Headline: Local Mayor Launches Initiative to Enhance Urban Public Transport.\n",
"\u001b[96mOutput token 1:\u001b[0m Politics, \u001b[93mlogprobs:\u001b[0m -2.577686e-06, \u001b[95mlinear probability:\u001b[0m 100.0%\n",
"\u001b[96mOutput token 2:\u001b[0m Polit, \u001b[93mlogprobs:\u001b[0m -13.703128, \u001b[95mlinear probability:\u001b[0m 0.0%\n",
"\u001b[36mOutput token 1:\u001b[39m Politics, \u001b[33mlogprobs:\u001b[39m -3.650519e-06, \u001b[35mlinear probability:\u001b[39m 100.0%\n",
"\u001b[36mOutput token 2:\u001b[39m Technology, \u001b[33mlogprobs:\u001b[39m -13.015629, \u001b[35mlinear probability:\u001b[39m 0.0%\n",
"\n",
"\n",
"\n",
"Headline: Tennis Champion Showcases Hidden Talents in Symphony Orchestra Debut\n",
"\u001b[96mOutput token 1:\u001b[0m Art, \u001b[93mlogprobs:\u001b[0m -0.28890297, \u001b[95mlinear probability:\u001b[0m 74.91%\n",
"\u001b[96mOutput token 2:\u001b[0m Sports, \u001b[93mlogprobs:\u001b[0m -1.382653, \u001b[95mlinear probability:\u001b[0m 25.09%\n",
"\u001b[36mOutput token 1:\u001b[39m Art, \u001b[33mlogprobs:\u001b[39m -0.19579042, \u001b[35mlinear probability:\u001b[39m 82.22%\n",
"\u001b[36mOutput token 2:\u001b[39m Sports, \u001b[33mlogprobs:\u001b[39m -1.7270404, \u001b[35mlinear probability:\u001b[39m 17.78%\n",
"\n",
"\n"
]
@ -222,11 +227,11 @@
" top_two_logprobs = API_RESPONSE.choices[0].logprobs.content[0].top_logprobs\n",
" for i, logprob in enumerate(top_two_logprobs, start=1):\n",
" print(\n",
" f\"\\033[96mOutput token {i}:\\033[0m {logprob.token}, \"\n",
" f\"\\033[93mlogprobs:\\033[0m {logprob.logprob}, \"\n",
" f\"\\033[95mlinear probability:\\033[0m {np.round(np.exp(logprob.logprob)*100,2)}%\"\n",
" f\"{Fore.CYAN}Output token {i}:{Fore.RESET} {logprob.token}, \"\n",
" f\"{Fore.YELLOW}logprobs:{Fore.RESET} {logprob.logprob}, \"\n",
" f\"{Fore.MAGENTA}linear probability:{Fore.RESET} {np.round(np.exp(logprob.logprob)*100,2)}%\"\n",
" )\n",
" print(\"\\n\")"
" print(\"\\n\")\n"
]
},
{
@ -261,7 +266,7 @@
},
{
"cell_type": "code",
"execution_count": 128,
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
@ -282,9 +287,9 @@
"\n",
"# Questions that are not fully covered in the article\n",
"medium_questions = [\n",
" \"Did Lovelace collaborate closely with Charles Dickens\",\n",
" \"Did Lovelace collaborate with Charles Dickens\",\n",
" \"What concepts did Lovelace build with Charles Babbage\",\n",
"]"
"]\n"
]
},
{
@ -296,49 +301,49 @@
},
{
"cell_type": "code",
"execution_count": 129,
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"PROMPT = \"\"\"You retrieved this article: {article}. The question is: {question}. \n",
"PROMPT = \"\"\"You retrieved this article: {article}. The question is: {question}.\n",
"Before even answering the question, consider whether you have sufficient information in the article to answer the question fully.\n",
"Your output should JUST be the boolean true or false, of if you have sufficient information in the article to answer the question.\n",
"Respond with just one word, the boolean true or false. You must output the word 'True', or the word 'False', nothing else.\n",
"\"\"\""
"\"\"\"\n"
]
},
{
"cell_type": "code",
"execution_count": 130,
"execution_count": 15,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[1mQuestions clearly answered in article\u001b[0m\n",
"Questions clearly answered in article\n",
"\n",
"\u001b[92mQuestion:\u001b[0m What nationality was Ada Lovelace?\n",
"\u001b[96mhas_sufficient_context_for_answer:\u001b[0m True, \u001b[93mlogprobs:\u001b[0m -3.1281633e-07, \u001b[95mlinear probability:\u001b[0m 100.0% \n",
"\u001b[32mQuestion:\u001b[39m What nationality was Ada Lovelace?\n",
"\u001b[36mhas_sufficient_context_for_answer:\u001b[39m True, \u001b[33mlogprobs:\u001b[39m -3.1281633e-07, \u001b[35mlinear probability:\u001b[39m 100.0% \n",
"\n",
"\u001b[92mQuestion:\u001b[0m What was an important finding from Lovelace's seventh note?\n",
"\u001b[96mhas_sufficient_context_for_answer:\u001b[0m True, \u001b[93mlogprobs:\u001b[0m -2.1008714e-06, \u001b[95mlinear probability:\u001b[0m 100.0% \n",
"\u001b[32mQuestion:\u001b[39m What was an important finding from Lovelace's seventh note?\n",
"\u001b[36mhas_sufficient_context_for_answer:\u001b[39m True, \u001b[33mlogprobs:\u001b[39m -6.704273e-07, \u001b[35mlinear probability:\u001b[39m 100.0% \n",
"\n",
"\n",
"\n",
"\u001b[1mQuestions only partially covered in the article\u001b[0m\n",
"Questions only partially covered in the article\n",
"\n",
"\u001b[92mQuestion:\u001b[0m Did Lovelace collaborate closely with Charles Dickens\n",
"\u001b[96mhas_sufficient_context_for_answer:\u001b[0m False, \u001b[93mlogprobs:\u001b[0m -0.0076062703, \u001b[95mlinear probability:\u001b[0m 99.24% \n",
"\u001b[32mQuestion:\u001b[39m Did Lovelace collaborate with Charles Dickens\n",
"\u001b[36mhas_sufficient_context_for_answer:\u001b[39m True, \u001b[33mlogprobs:\u001b[39m -0.07655343, \u001b[35mlinear probability:\u001b[39m 92.63% \n",
"\n",
"\u001b[92mQuestion:\u001b[0m What concepts did Lovelace build with Charles Babbage\n",
"\u001b[96mhas_sufficient_context_for_answer:\u001b[0m True, \u001b[93mlogprobs:\u001b[0m -0.0065100174, \u001b[95mlinear probability:\u001b[0m 99.35% \n",
"\u001b[32mQuestion:\u001b[39m What concepts did Lovelace build with Charles Babbage\n",
"\u001b[36mhas_sufficient_context_for_answer:\u001b[39m True, \u001b[33mlogprobs:\u001b[39m -0.0699371, \u001b[35mlinear probability:\u001b[39m 93.25% \n",
"\n"
]
}
],
"source": [
"print(\"\\033[1mQuestions clearly answered in article\\033[0m\\n\")\n",
"print(\"Questions clearly answered in article\" + \"\\n\")\n",
"\n",
"for question in easy_questions:\n",
" API_RESPONSE = get_completion(\n",
@ -353,14 +358,14 @@
" model=\"gpt-4\",\n",
" logprobs=True,\n",
" )\n",
" print(\"\\033[92mQuestion:\\033[0m\", question)\n",
" print(Fore.GREEN + \"Question:\" + Fore.RESET, question)\n",
" for logprob in API_RESPONSE.choices[0].logprobs.content:\n",
" print(\n",
" f\"\\033[96mhas_sufficient_context_for_answer:\\033[0m {logprob.token}, \\033[93mlogprobs:\\033[0m {logprob.logprob}, \\033[95mlinear probability:\\033[0m {np.round(np.exp(logprob.logprob)*100,2)}%\",\n",
" Fore.CYAN + \"has_sufficient_context_for_answer:\" + Fore.RESET + f\" {logprob.token}, \" + Fore.YELLOW + \"logprobs:\" + Fore.RESET + f\" {logprob.logprob}, \" + Fore.MAGENTA + \"linear probability:\" + Fore.RESET + f\" {np.round(np.exp(logprob.logprob)*100,2)}%\",\n",
" \"\\n\",\n",
" )\n",
"\n",
"print(\"\\n\\n\\033[1mQuestions only partially covered in the article\\033[0m\\n\")\n",
"print(\"\\n\\n\" + \"Questions only partially covered in the article\" + \"\\n\")\n",
"\n",
"for question in medium_questions:\n",
" API_RESPONSE = get_completion(\n",
@ -376,12 +381,12 @@
" logprobs=True,\n",
" top_logprobs=3,\n",
" )\n",
" print(\"\\033[92mQuestion:\\033[0m\", question)\n",
" print(Fore.GREEN + \"Question:\" + Fore.RESET, question)\n",
" for logprob in API_RESPONSE.choices[0].logprobs.content:\n",
" print(\n",
" f\"\\033[96mhas_sufficient_context_for_answer:\\033[0m {logprob.token}, \\033[93mlogprobs:\\033[0m {logprob.logprob}, \\033[95mlinear probability:\\033[0m {np.round(np.exp(logprob.logprob)*100,2)}%\",\n",
" Fore.CYAN + \"has_sufficient_context_for_answer:\" + Fore.RESET + f\" {logprob.token}, \" + Fore.YELLOW + \"logprobs:\" + Fore.RESET + f\" {logprob.logprob}, \" + Fore.MAGENTA + \"linear probability:\" + Fore.RESET + f\" {np.round(np.exp(logprob.logprob)*100,2)}%\",\n",
" \"\\n\",\n",
" )"
" )\n"
]
},
{
@ -416,7 +421,7 @@
},
{
"cell_type": "code",
"execution_count": 131,
"execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
@ -428,7 +433,7 @@
" \"My least favorite TV show\",\n",
" \"My least favorite TV show is\",\n",
" \"My least favorite TV show is Breaking Bad\",\n",
"]"
"]\n"
]
},
{
@ -440,53 +445,53 @@
},
{
"cell_type": "code",
"execution_count": 132,
"execution_count": 17,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Sentence: My\n",
"\u001b[96mPredicted next token:\u001b[0m favorite, \u001b[93mlogprobs:\u001b[0m -0.18245785, \u001b[95mlinear probability:\u001b[0m 83.32%\n",
"\u001b[96mPredicted next token:\u001b[0m dog, \u001b[93mlogprobs:\u001b[0m -2.397172, \u001b[95mlinear probability:\u001b[0m 9.1%\n",
"\u001b[96mPredicted next token:\u001b[0m ap, \u001b[93mlogprobs:\u001b[0m -3.8732424, \u001b[95mlinear probability:\u001b[0m 2.08%\n",
"\u001b[36mSentence:\u001b[39m My\n",
"\u001b[36mPredicted next token:\u001b[39m favorite, \u001b[33mlogprobs:\u001b[39m -0.18245785, \u001b[35mlinear probability:\u001b[39m 83.32%\n",
"\u001b[36mPredicted next token:\u001b[39m dog, \u001b[33mlogprobs:\u001b[39m -2.397172, \u001b[35mlinear probability:\u001b[39m 9.1%\n",
"\u001b[36mPredicted next token:\u001b[39m ap, \u001b[33mlogprobs:\u001b[39m -3.8732424, \u001b[35mlinear probability:\u001b[39m 2.08%\n",
"\n",
"\n",
"Sentence: My least\n",
"\u001b[96mPredicted next token:\u001b[0m favorite, \u001b[93mlogprobs:\u001b[0m -0.013642592, \u001b[95mlinear probability:\u001b[0m 98.65%\n",
"\u001b[96mPredicted next token:\u001b[0m My, \u001b[93mlogprobs:\u001b[0m -4.3126197, \u001b[95mlinear probability:\u001b[0m 1.34%\n",
"\u001b[96mPredicted next token:\u001b[0m favorite, \u001b[93mlogprobs:\u001b[0m -9.684484, \u001b[95mlinear probability:\u001b[0m 0.01%\n",
"\u001b[36mSentence:\u001b[39m My least\n",
"\u001b[36mPredicted next token:\u001b[39m favorite, \u001b[33mlogprobs:\u001b[39m -0.01722952, \u001b[35mlinear probability:\u001b[39m 98.29%\n",
"\u001b[36mPredicted next token:\u001b[39m My, \u001b[33mlogprobs:\u001b[39m -4.079079, \u001b[35mlinear probability:\u001b[39m 1.69%\n",
"\u001b[36mPredicted next token:\u001b[39m favorite, \u001b[33mlogprobs:\u001b[39m -9.6813755, \u001b[35mlinear probability:\u001b[39m 0.01%\n",
"\n",
"\n",
"Sentence: My least favorite\n",
"\u001b[96mPredicted next token:\u001b[0m food, \u001b[93mlogprobs:\u001b[0m -0.919063, \u001b[95mlinear probability:\u001b[0m 39.89%\n",
"\u001b[96mPredicted next token:\u001b[0m color, \u001b[93mlogprobs:\u001b[0m -1.2944425, \u001b[95mlinear probability:\u001b[0m 27.41%\n",
"\u001b[96mPredicted next token:\u001b[0m My, \u001b[93mlogprobs:\u001b[0m -1.5212015, \u001b[95mlinear probability:\u001b[0m 21.84%\n",
"\u001b[36mSentence:\u001b[39m My least favorite\n",
"\u001b[36mPredicted next token:\u001b[39m food, \u001b[33mlogprobs:\u001b[39m -0.9481721, \u001b[35mlinear probability:\u001b[39m 38.74%\n",
"\u001b[36mPredicted next token:\u001b[39m My, \u001b[33mlogprobs:\u001b[39m -1.3447137, \u001b[35mlinear probability:\u001b[39m 26.06%\n",
"\u001b[36mPredicted next token:\u001b[39m color, \u001b[33mlogprobs:\u001b[39m -1.3887696, \u001b[35mlinear probability:\u001b[39m 24.94%\n",
"\n",
"\n",
"Sentence: My least favorite TV\n",
"\u001b[96mPredicted next token:\u001b[0m show, \u001b[93mlogprobs:\u001b[0m -0.0007898556, \u001b[95mlinear probability:\u001b[0m 99.92%\n",
"\u001b[96mPredicted next token:\u001b[0m My, \u001b[93mlogprobs:\u001b[0m -7.711523, \u001b[95mlinear probability:\u001b[0m 0.04%\n",
"\u001b[96mPredicted next token:\u001b[0m series, \u001b[93mlogprobs:\u001b[0m -9.348547, \u001b[95mlinear probability:\u001b[0m 0.01%\n",
"\u001b[36mSentence:\u001b[39m My least favorite TV\n",
"\u001b[36mPredicted next token:\u001b[39m show, \u001b[33mlogprobs:\u001b[39m -0.0007898556, \u001b[35mlinear probability:\u001b[39m 99.92%\n",
"\u001b[36mPredicted next token:\u001b[39m My, \u001b[33mlogprobs:\u001b[39m -7.711523, \u001b[35mlinear probability:\u001b[39m 0.04%\n",
"\u001b[36mPredicted next token:\u001b[39m series, \u001b[33mlogprobs:\u001b[39m -9.348547, \u001b[35mlinear probability:\u001b[39m 0.01%\n",
"\n",
"\n",
"Sentence: My least favorite TV show\n",
"\u001b[96mPredicted next token:\u001b[0m is, \u001b[93mlogprobs:\u001b[0m -0.2851253, \u001b[95mlinear probability:\u001b[0m 75.19%\n",
"\u001b[96mPredicted next token:\u001b[0m of, \u001b[93mlogprobs:\u001b[0m -1.55335, \u001b[95mlinear probability:\u001b[0m 21.15%\n",
"\u001b[96mPredicted next token:\u001b[0m My, \u001b[93mlogprobs:\u001b[0m -3.4928775, \u001b[95mlinear probability:\u001b[0m 3.04%\n",
"\u001b[36mSentence:\u001b[39m My least favorite TV show\n",
"\u001b[36mPredicted next token:\u001b[39m is, \u001b[33mlogprobs:\u001b[39m -0.18602066, \u001b[35mlinear probability:\u001b[39m 83.03%\n",
"\u001b[36mPredicted next token:\u001b[39m of, \u001b[33mlogprobs:\u001b[39m -2.0780265, \u001b[35mlinear probability:\u001b[39m 12.52%\n",
"\u001b[36mPredicted next token:\u001b[39m My, \u001b[33mlogprobs:\u001b[39m -3.271426, \u001b[35mlinear probability:\u001b[39m 3.8%\n",
"\n",
"\n",
"Sentence: My least favorite TV show is\n",
"\u001b[96mPredicted next token:\u001b[0m \"My, \u001b[93mlogprobs:\u001b[0m -0.69349754, \u001b[95mlinear probability:\u001b[0m 49.98%\n",
"\u001b[96mPredicted next token:\u001b[0m \"The, \u001b[93mlogprobs:\u001b[0m -1.2899293, \u001b[95mlinear probability:\u001b[0m 27.53%\n",
"\u001b[96mPredicted next token:\u001b[0m My, \u001b[93mlogprobs:\u001b[0m -2.4170141, \u001b[95mlinear probability:\u001b[0m 8.92%\n",
"\u001b[36mSentence:\u001b[39m My least favorite TV show is\n",
"\u001b[36mPredicted next token:\u001b[39m \"My, \u001b[33mlogprobs:\u001b[39m -0.77423567, \u001b[35mlinear probability:\u001b[39m 46.11%\n",
"\u001b[36mPredicted next token:\u001b[39m \"The, \u001b[33mlogprobs:\u001b[39m -1.2854586, \u001b[35mlinear probability:\u001b[39m 27.65%\n",
"\u001b[36mPredicted next token:\u001b[39m My, \u001b[33mlogprobs:\u001b[39m -2.2629042, \u001b[35mlinear probability:\u001b[39m 10.4%\n",
"\n",
"\n",
"Sentence: My least favorite TV show is Breaking Bad\n",
"\u001b[96mPredicted next token:\u001b[0m because, \u001b[93mlogprobs:\u001b[0m -0.15848404, \u001b[95mlinear probability:\u001b[0m 85.34%\n",
"\u001b[96mPredicted next token:\u001b[0m ,, \u001b[93mlogprobs:\u001b[0m -2.4646707, \u001b[95mlinear probability:\u001b[0m 8.5%\n",
"\u001b[96mPredicted next token:\u001b[0m ., \u001b[93mlogprobs:\u001b[0m -3.3132863, \u001b[95mlinear probability:\u001b[0m 3.64%\n",
"\u001b[36mSentence:\u001b[39m My least favorite TV show is Breaking Bad\n",
"\u001b[36mPredicted next token:\u001b[39m because, \u001b[33mlogprobs:\u001b[39m -0.16519119, \u001b[35mlinear probability:\u001b[39m 84.77%\n",
"\u001b[36mPredicted next token:\u001b[39m ,, \u001b[33mlogprobs:\u001b[39m -2.430881, \u001b[35mlinear probability:\u001b[39m 8.8%\n",
"\u001b[36mPredicted next token:\u001b[39m ., \u001b[33mlogprobs:\u001b[39m -3.2097907, \u001b[35mlinear probability:\u001b[39m 4.04%\n",
"\n",
"\n"
]
@ -504,12 +509,11 @@
" logprobs=True,\n",
" top_logprobs=3,\n",
" )\n",
" # for next_token in API_RESPONSE.choices[0].logprobs.content[0]:\n",
" print(\"Sentence:\", sentence)\n",
" print(Fore.CYAN + \"Sentence:\" + Fore.RESET, sentence)\n",
" first_token = True\n",
" for token in API_RESPONSE.choices[0].logprobs.content[0].top_logprobs:\n",
" print(\n",
" f\"\\033[96mPredicted next token:\\033[0m {token.token}, \\033[93mlogprobs:\\033[0m {token.logprob}, \\033[95mlinear probability:\\033[0m {np.round(np.exp(token.logprob)*100,2)}%\"\n",
" Fore.CYAN + \"Predicted next token:\" + Fore.RESET + f\" {token.token}, \" + Fore.YELLOW + \"logprobs:\" + Fore.RESET + f\" {token.logprob}, \" + Fore.MAGENTA + \"linear probability:\" + Fore.RESET + f\" {np.round(np.exp(token.logprob)*100,2)}%\"\n",
" )\n",
" if first_token:\n",
" if np.exp(token.logprob) > 0.95:\n",
@ -517,7 +521,7 @@
" if np.exp(token.logprob) < 0.60:\n",
" low_prob_completions[sentence] = token.token\n",
" first_token = False\n",
" print(\"\\n\")"
" print(\"\\n\")\n"
]
},
{
@ -529,7 +533,7 @@
},
{
"cell_type": "code",
"execution_count": 133,
"execution_count": 18,
"metadata": {},
"outputs": [
{
@ -538,13 +542,13 @@
"{'My least': 'favorite', 'My least favorite TV': 'show'}"
]
},
"execution_count": 133,
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"high_prob_completions"
"high_prob_completions\n"
]
},
{
@ -556,7 +560,7 @@
},
{
"cell_type": "code",
"execution_count": 134,
"execution_count": 19,
"metadata": {},
"outputs": [
{
@ -565,13 +569,13 @@
"{'My least favorite': 'food', 'My least favorite TV show is': '\"My'}"
]
},
"execution_count": 134,
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"low_prob_completions"
"low_prob_completions\n"
]
},
{
@ -598,7 +602,7 @@
},
{
"cell_type": "code",
"execution_count": 135,
"execution_count": 20,
"metadata": {},
"outputs": [],
"source": [
@ -608,47 +612,45 @@
" [{\"role\": \"user\", \"content\": PROMPT}], model=\"gpt-4\", logprobs=True, top_logprobs=5\n",
")\n",
"\n",
"# Function to highlight each token\n",
"def highlight_text(api_response):\n",
" # ANSI codes for different colors\n",
" colors = [\n",
" \"\\033[95m\", # Purple\n",
" \"\\033[92m\", # Green\n",
" \"\\033[93m\", # Orange\n",
" \"\\033[91m\", # Red\n",
" \"\\033[94m\", # Blue\n",
" Fore.MAGENTA,\n",
" Fore.GREEN,\n",
" Fore.YELLOW,\n",
" Fore.RED,\n",
" Fore.BLUE,\n",
" ]\n",
" reset_color = \"\\033[0m\" # ANSI code to reset color\n",
" tokens = api_response.choices[0].logprobs.content # Extract tokens from the API response\n",
" reset_color = Fore.RESET\n",
" tokens = api_response.choices[0].logprobs.content\n",
"\n",
" color_idx = 0 # Initialize color index\n",
" for t in tokens:\n",
" token_str = bytes(t.bytes).decode(\"utf-8\") # Decode bytes to string\n",
" # Print each token in a different color\n",
"\n",
" print(f\"{colors[color_idx]}{token_str}{reset_color}\", end=\"\")\n",
"\n",
" # Move to the next color in the sequence, wrapping around if necessary\n",
" # Move to the next color\n",
" color_idx = (color_idx + 1) % len(colors)\n",
" print() # Print a newline for readability\n",
" print(f\"Total number of tokens: {len(tokens)}\") # Print the total number of tokens"
" print()\n",
" print(f\"Total number of tokens: {len(tokens)}\")\n"
]
},
{
"cell_type": "code",
"execution_count": 136,
"execution_count": 21,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[95mThe\u001b[0m\u001b[92m longest\u001b[0m\u001b[93m word\u001b[0m\u001b[91m in\u001b[0m\u001b[94m the\u001b[0m\u001b[95m English\u001b[0m\u001b[92m language\u001b[0m\u001b[93m,\u001b[0m\u001b[91m according\u001b[0m\u001b[94m to\u001b[0m\u001b[95m the\u001b[0m\u001b[92m Guinness\u001b[0m\u001b[93m World\u001b[0m\u001b[91m Records\u001b[0m\u001b[94m,\u001b[0m\u001b[95m is\u001b[0m\u001b[92m '\u001b[0m\u001b[93mp\u001b[0m\u001b[91mne\u001b[0m\u001b[94mum\u001b[0m\u001b[95mon\u001b[0m\u001b[92moul\u001b[0m\u001b[93mtram\u001b[0m\u001b[91micro\u001b[0m\u001b[94msc\u001b[0m\u001b[95mop\u001b[0m\u001b[92mics\u001b[0m\u001b[93mil\u001b[0m\u001b[91mic\u001b[0m\u001b[94mov\u001b[0m\u001b[95mol\u001b[0m\u001b[92mcano\u001b[0m\u001b[93mcon\u001b[0m\u001b[91miosis\u001b[0m\u001b[94m'.\u001b[0m\u001b[95m It\u001b[0m\u001b[92m is\u001b[0m\u001b[93m a\u001b[0m\u001b[91m type\u001b[0m\u001b[94m of\u001b[0m\u001b[95m lung\u001b[0m\u001b[92m disease\u001b[0m\u001b[93m caused\u001b[0m\u001b[91m by\u001b[0m\u001b[94m inh\u001b[0m\u001b[95maling\u001b[0m\u001b[92m ash\u001b[0m\u001b[93m and\u001b[0m\u001b[91m sand\u001b[0m\u001b[94m dust\u001b[0m\u001b[95m.\u001b[0m\n",
"\u001b[35mThe\u001b[39m\u001b[32m longest\u001b[39m\u001b[33m word\u001b[39m\u001b[31m in\u001b[39m\u001b[34m the\u001b[39m\u001b[35m English\u001b[39m\u001b[32m language\u001b[39m\u001b[33m,\u001b[39m\u001b[31m according\u001b[39m\u001b[34m to\u001b[39m\u001b[35m the\u001b[39m\u001b[32m Guinness\u001b[39m\u001b[33m World\u001b[39m\u001b[31m Records\u001b[39m\u001b[34m,\u001b[39m\u001b[35m is\u001b[39m\u001b[32m '\u001b[39m\u001b[33mp\u001b[39m\u001b[31mne\u001b[39m\u001b[34mum\u001b[39m\u001b[35mon\u001b[39m\u001b[32moul\u001b[39m\u001b[33mtram\u001b[39m\u001b[31micro\u001b[39m\u001b[34msc\u001b[39m\u001b[35mop\u001b[39m\u001b[32mics\u001b[39m\u001b[33mil\u001b[39m\u001b[31mic\u001b[39m\u001b[34mov\u001b[39m\u001b[35mol\u001b[39m\u001b[32mcano\u001b[39m\u001b[33mcon\u001b[39m\u001b[31miosis\u001b[39m\u001b[34m'.\u001b[39m\u001b[35m It\u001b[39m\u001b[32m is\u001b[39m\u001b[33m a\u001b[39m\u001b[31m type\u001b[39m\u001b[34m of\u001b[39m\u001b[35m lung\u001b[39m\u001b[32m disease\u001b[39m\u001b[33m caused\u001b[39m\u001b[31m by\u001b[39m\u001b[34m inh\u001b[39m\u001b[35maling\u001b[39m\u001b[32m ash\u001b[39m\u001b[33m and\u001b[39m\u001b[31m sand\u001b[39m\u001b[34m dust\u001b[39m\u001b[35m.\u001b[39m\n",
"Total number of tokens: 51\n"
]
}
],
"source": [
"highlight_text(API_RESPONSE)"
"highlight_text(API_RESPONSE)\n"
]
},
{
@ -660,7 +662,7 @@
},
{
"cell_type": "code",
"execution_count": 137,
"execution_count": 22,
"metadata": {},
"outputs": [
{
@ -668,8 +670,8 @@
"output_type": "stream",
"text": [
"Token: \\xf0\\x9f\\x92\n",
"Log prob: -0.00012856863\n",
"Linear prob: 99.99 %\n",
"Log prob: -0.0003056686\n",
"Linear prob: 99.97 %\n",
"Bytes: [240, 159, 146] \n",
"\n",
"Token: \\x99\n",
@ -678,23 +680,23 @@
"Bytes: [153] \n",
"\n",
"Token: -\n",
"Log prob: -0.009299593\n",
"Linear prob: 99.07 %\n",
"Log prob: -0.011257432\n",
"Linear prob: 98.88 %\n",
"Bytes: [32, 45] \n",
"\n",
"Token: Blue\n",
"Log prob: -0.0002700377\n",
"Linear prob: 99.97 %\n",
"Log prob: -0.0004397287\n",
"Linear prob: 99.96 %\n",
"Bytes: [32, 66, 108, 117, 101] \n",
"\n",
"Token: Heart\n",
"Log prob: -5.3596854e-05\n",
"Log prob: -7.1954215e-05\n",
"Linear prob: 99.99 %\n",
"Bytes: [32, 72, 101, 97, 114, 116] \n",
"\n",
"Bytes array: [240, 159, 146, 153, 32, 45, 32, 66, 108, 117, 101, 32, 72, 101, 97, 114, 116]\n",
"Decoded bytes: 💙 - Blue Heart\n",
"Joint prob: 99.03 %\n"
"Joint prob: 98.8 %\n"
]
}
],
@ -725,7 +727,7 @@
"# Print the results\n",
"print(\"Bytes array:\", aggregated_bytes)\n",
"print(f\"Decoded bytes: {aggregated_text}\")\n",
"print(\"Joint prob:\", np.round(exp(joint_logprob) * 100, 2), \"%\")"
"print(\"Joint prob:\", np.round(exp(joint_logprob) * 100, 2), \"%\")\n"
]
},
{
@ -788,7 +790,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
"version": "3.9.16"
}
},
"nbformat": 4,

Loading…
Cancel
Save