Migrate all notebooks to API V1 (#914)
Co-authored-by: ayush rajgor <ayushrajgorar@gmail.com>pull/1022/head
parent
d891437737
commit
2c441ab9a2
@ -1,189 +0,0 @@
|
||||
from typing import List, Union
|
||||
|
||||
from smokey import Smokey
|
||||
|
||||
import openai
|
||||
|
||||
|
||||
def get_candidates(
|
||||
prompt: str,
|
||||
stop: List[str],
|
||||
temperature: float,
|
||||
priming_prefix: str,
|
||||
engine: str,
|
||||
n: int = 5,
|
||||
) -> List[str]:
|
||||
"""
|
||||
Generate N candidate completions based on the prompt, generated with a specific temperature.
|
||||
|
||||
:param prompt: The prompt to start the conversation with.
|
||||
:param stop: A list of tokens that indicate the end of the generation.
|
||||
:param temperature: The temperature of the generation.
|
||||
:param priming_prefix: The prefix to use for the priming.
|
||||
:param engine: The engine to use for the generation.
|
||||
:param n: The number of completions to generate.
|
||||
:return: A list of completions.
|
||||
"""
|
||||
response = openai.Completion.create(
|
||||
engine=engine,
|
||||
prompt=prompt,
|
||||
temperature=temperature,
|
||||
max_tokens=150,
|
||||
top_p=1,
|
||||
frequency_penalty=0,
|
||||
presence_penalty=0,
|
||||
stop=stop,
|
||||
n=n,
|
||||
)
|
||||
responses = [priming_prefix + choice.text for choice in response.choices]
|
||||
return responses
|
||||
|
||||
|
||||
def rindex(lst: List, value: str) -> int:
|
||||
"""
|
||||
Return the index of the last occurrence of a value in a list.
|
||||
|
||||
:param lst: The list to search in.
|
||||
:param value: The value to search for.
|
||||
:return: The index of the last occurrence of the value.
|
||||
"""
|
||||
try:
|
||||
return len(lst) - lst[::-1].index(value) - 1
|
||||
except ValueError:
|
||||
raise ValueError(f"Answer start token `{value}` not found in the eval template")
|
||||
|
||||
|
||||
def eval_candidate(
|
||||
candidate_answer: str,
|
||||
original_instruction: str,
|
||||
eval_template: str,
|
||||
answer_start_token: str,
|
||||
engine: str,
|
||||
) -> float:
|
||||
"""
|
||||
Evaluate a candidate answer by calculating the average log probability
|
||||
of the original instruction, given the candidate answer with a specific
|
||||
evaluation template, aimed at reconstructing the original instruction.
|
||||
|
||||
:param candidate_answer: The candidate answer to evaluate.
|
||||
:param original_instruction: The original instruction.
|
||||
:param eval_template: The template to use for the evaluation.
|
||||
:param answer_start_token: The token to use to indicate the start of the answer.
|
||||
:param engine: The engine to use for the evaluation.
|
||||
:return: The evaluation of the candidate answer.
|
||||
"""
|
||||
response = openai.Completion.create(
|
||||
engine=engine,
|
||||
prompt=eval_template.format(candidate_answer, original_instruction),
|
||||
temperature=0,
|
||||
max_tokens=0,
|
||||
top_p=1,
|
||||
frequency_penalty=0,
|
||||
presence_penalty=0,
|
||||
logprobs=1,
|
||||
echo=True,
|
||||
)
|
||||
|
||||
answer_start = rindex(
|
||||
response["choices"][0]["logprobs"]["tokens"], answer_start_token
|
||||
)
|
||||
logprobs = response["choices"][0]["logprobs"]["token_logprobs"][answer_start + 1 :]
|
||||
return sum(logprobs) / len(logprobs)
|
||||
|
||||
|
||||
def backtranslation(
|
||||
prompt_template: str,
|
||||
additional_info: str,
|
||||
instruction: str,
|
||||
eval_template: str,
|
||||
priming_prefix: str = "SELECT",
|
||||
stop1: List[str] = ["#", ";"],
|
||||
answer_start_token: str = "--",
|
||||
n: int = 5,
|
||||
temperature: float = 0.5,
|
||||
return_all_results: bool = False,
|
||||
engine: str = "davinci-codex",
|
||||
) -> Union[str, List[str, float]]:
|
||||
"""
|
||||
Generate a number of SQL queries given a natural language instruction,
|
||||
and pick the best one based on the average log probability of explaining the
|
||||
candidate SQL query with the exact original instruction, when prompted for
|
||||
a natural language explanation of the candidate SQL query.
|
||||
|
||||
:param prompt_template: The template to use for the prompt to generate SQL.
|
||||
:param additional_info: Additional information to include in the prompt
|
||||
(SQL Tables, and their properties).
|
||||
:param instruction: The instruction in natural language.
|
||||
:param eval_template: The template to use for the evaluation.
|
||||
:param priming_prefix: The prefix to use for the priming of the SQL query.
|
||||
:param stop1: A list of tokens that indicate the end of the generation.
|
||||
:param answer_start_token: The token to use to indicate the start of the
|
||||
natural answer.
|
||||
:param n: The number of candidates to generate.
|
||||
:param temperature: The temperature of the generation.
|
||||
:param return_all_results: Whether to return all results or just the best one.
|
||||
:param engine: The engine to use for the generation and evaluation.
|
||||
:return: The best SQL query, or a list of all scored generated SQL queries.
|
||||
"""
|
||||
prompt_template = prompt_template.format(
|
||||
additional_info, instruction, priming_prefix
|
||||
)
|
||||
|
||||
candidates = []
|
||||
responses = get_candidates(
|
||||
prompt_template, stop1, temperature, priming_prefix, engine=engine, n=n
|
||||
)
|
||||
for i in range(n):
|
||||
quality = eval_candidate(
|
||||
responses[i],
|
||||
instruction,
|
||||
eval_template,
|
||||
answer_start_token,
|
||||
engine=engine,
|
||||
)
|
||||
candidates.append((responses[i], quality))
|
||||
|
||||
candidates.sort(key=lambda x: x[1], reverse=True)
|
||||
if return_all_results:
|
||||
return candidates
|
||||
return candidates[0][0]
|
||||
|
||||
|
||||
def main(
|
||||
nl_query: str = "Return the name of each department that had more than 10 employees in June 2021",
|
||||
eval_template: str = "{};\n-- Explanation of the above query in human readable format\n-- {}",
|
||||
table_definitions: str = "# Employee(id, name, department_id)\n# Department(id, name, address)\n# Salary_Payments(id, employee_id, amount, date)\n",
|
||||
prompt_template: str = "### Postgres SQL tables, with their properties:\n#\n{}#\n### {}\n{}",
|
||||
n: int = 3,
|
||||
temperature: float = 0.3,
|
||||
engine: str = "davinci-codex",
|
||||
):
|
||||
"""
|
||||
Generate a number of SQL queries given a natural language instruction,
|
||||
and pick the best one based on the highest backtranslation score.
|
||||
|
||||
:param nl_query: The natural language query.
|
||||
:param eval_template: The template to use for the evaluation.
|
||||
:param table_definitions: The definitions of the tables used in the query.
|
||||
:param prompt_template: The template to use for the prompt to generate SQL.
|
||||
:param n: The number of candidates to generate.
|
||||
:param temperature: The temperature of the generation.
|
||||
:param engine: The engine to use for the generation and evaluation.
|
||||
:return: The best SQL query, or a list of all scored generated SQL queries.
|
||||
"""
|
||||
|
||||
result = backtranslation(
|
||||
prompt_template,
|
||||
table_definitions,
|
||||
nl_query,
|
||||
eval_template,
|
||||
priming_prefix="SELECT",
|
||||
temperature=temperature,
|
||||
n=n,
|
||||
engine=engine,
|
||||
)
|
||||
print(result)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
Smokey(main)
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1,452 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Unit test writing using a multi-step prompt (with the older API)\n",
|
||||
"\n",
|
||||
"Complex tasks, such as writing unit tests, can benefit from multi-step prompts. In contrast to a single prompt, a multi-step prompt generates text from GPT-3 and then feeds that text back into subsequent prompts. This can help in cases where you want GPT-3 to explain its reasoning before answering, or brainstorm a plan before executing it.\n",
|
||||
"\n",
|
||||
"In this notebook, we use a 3-step prompt to write unit tests in Python using the following steps:\n",
|
||||
"\n",
|
||||
"1. Given a Python function, we first prompt GPT-3 to explain what the function is doing.\n",
|
||||
"2. Second, we prompt GPT-3 to plan a set of unit tests for the function.\n",
|
||||
" - If the plan is too short, we ask GPT-3 to elaborate with more ideas for unit tests.\n",
|
||||
"3. Finally, we prompt GPT-3 to write the unit tests.\n",
|
||||
"\n",
|
||||
"The code example illustrates a few optional embellishments on the chained, multi-step prompt:\n",
|
||||
"\n",
|
||||
"- Conditional branching (e.g., only asking for elaboration if the first plan is too short)\n",
|
||||
"- Different models for different steps (e.g., `text-davinci-002` for the text planning steps and `code-davinci-002` for the code writing step)\n",
|
||||
"- A check that re-runs the function if the output is unsatisfactory (e.g., if the output code cannot be parsed by Python's `ast` module)\n",
|
||||
"- Streaming output so that you can start reading the output before it's fully generated (useful for long, multi-step outputs)\n",
|
||||
"\n",
|
||||
"The full 3-step prompt looks like this (using as an example `pytest` for the unit test framework and `is_palindrome` as the function):\n",
|
||||
"\n",
|
||||
" # How to write great unit tests with pytest\n",
|
||||
"\n",
|
||||
" In this advanced tutorial for experts, we'll use Python 3.9 and `pytest` to write a suite of unit tests to verify the behavior of the following function.\n",
|
||||
" ```python\n",
|
||||
" def is_palindrome(s):\n",
|
||||
" return s == s[::-1]\n",
|
||||
" ```\n",
|
||||
"\n",
|
||||
" Before writing any unit tests, let's review what each element of the function is doing exactly and what the author's intentions may have been.\n",
|
||||
" - First,{GENERATED IN STEP 1}\n",
|
||||
" \n",
|
||||
" A good unit test suite should aim to:\n",
|
||||
" - Test the function's behavior for a wide range of possible inputs\n",
|
||||
" - Test edge cases that the author may not have foreseen\n",
|
||||
" - Take advantage of the features of `pytest` to make the tests easy to write and maintain\n",
|
||||
" - Be easy to read and understand, with clean code and descriptive names\n",
|
||||
" - Be deterministic, so that the tests always pass or fail in the same way\n",
|
||||
"\n",
|
||||
" `pytest` has many convenient features that make it easy to write and maintain unit tests. We'll use them to write unit tests for the function above.\n",
|
||||
"\n",
|
||||
" For this particular function, we'll want our unit tests to handle the following diverse scenarios (and under each scenario, we include a few examples as sub-bullets):\n",
|
||||
" -{GENERATED IN STEP 2}\n",
|
||||
"\n",
|
||||
" [OPTIONALLY APPENDED]In addition to the scenarios above, we'll also want to make sure we don't forget to test rare or unexpected edge cases (and under each edge case, we include a few examples as sub-bullets):\n",
|
||||
" -{GENERATED IN STEP 2B}\n",
|
||||
"\n",
|
||||
" Before going into the individual tests, let's first look at the complete suite of unit tests as a cohesive whole. We've added helpful comments to explain what each line does.\n",
|
||||
" ```python\n",
|
||||
" import pytest # used for our unit tests\n",
|
||||
"\n",
|
||||
" def is_palindrome(s):\n",
|
||||
" return s == s[::-1]\n",
|
||||
"\n",
|
||||
" #Below, each test case is represented by a tuple passed to the @pytest.mark.parametrize decorator\n",
|
||||
" {GENERATED IN STEP 3}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# imports needed to run the code in this notebook\n",
|
||||
"import ast # used for detecting whether generated Python code is valid\n",
|
||||
"import openai # used for calling the OpenAI API\n",
|
||||
"\n",
|
||||
"# example of a function that uses a multi-step prompt to write unit tests\n",
|
||||
"def unit_test_from_function(\n",
|
||||
" function_to_test: str, # Python function to test, as a string\n",
|
||||
" unit_test_package: str = \"pytest\", # unit testing package; use the name as it appears in the import statement\n",
|
||||
" approx_min_cases_to_cover: int = 7, # minimum number of test case categories to cover (approximate)\n",
|
||||
" print_text: bool = False, # optionally prints text; helpful for understanding the function & debugging\n",
|
||||
" text_model: str = \"text-davinci-002\", # model used to generate text plans in steps 1, 2, and 2b\n",
|
||||
" code_model: str = \"code-davinci-002\", # if you don't have access to code models, you can use text models here instead\n",
|
||||
" max_tokens: int = 1000, # can set this high, as generations should be stopped earlier by stop sequences\n",
|
||||
" temperature: float = 0.4, # temperature = 0 can sometimes get stuck in repetitive loops, so we use 0.4\n",
|
||||
" reruns_if_fail: int = 1, # if the output code cannot be parsed, this will re-run the function up to N times\n",
|
||||
") -> str:\n",
|
||||
" \"\"\"Outputs a unit test for a given Python function, using a 3-step GPT-3 prompt.\"\"\"\n",
|
||||
"\n",
|
||||
" # Step 1: Generate an explanation of the function\n",
|
||||
"\n",
|
||||
" # create a markdown-formatted prompt that asks GPT-3 to complete an explanation of the function, formatted as a bullet list\n",
|
||||
" prompt_to_explain_the_function = f\"\"\"# How to write great unit tests with {unit_test_package}\n",
|
||||
"\n",
|
||||
"In this advanced tutorial for experts, we'll use Python 3.9 and `{unit_test_package}` to write a suite of unit tests to verify the behavior of the following function.\n",
|
||||
"```python\n",
|
||||
"{function_to_test}\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Before writing any unit tests, let's review what each element of the function is doing exactly and what the author's intentions may have been.\n",
|
||||
"- First,\"\"\"\n",
|
||||
" if print_text:\n",
|
||||
" text_color_prefix = \"\\033[30m\" # black; if you read against a dark background \\033[97m is white\n",
|
||||
" print(text_color_prefix + prompt_to_explain_the_function, end=\"\") # end='' prevents a newline from being printed\n",
|
||||
"\n",
|
||||
" # send the prompt to the API, using \\n\\n as a stop sequence to stop at the end of the bullet list\n",
|
||||
" explanation_response = openai.Completion.create(\n",
|
||||
" model=text_model,\n",
|
||||
" prompt=prompt_to_explain_the_function,\n",
|
||||
" stop=[\"\\n\\n\", \"\\n\\t\\n\", \"\\n \\n\"],\n",
|
||||
" max_tokens=max_tokens,\n",
|
||||
" temperature=temperature,\n",
|
||||
" stream=True,\n",
|
||||
" )\n",
|
||||
" explanation_completion = \"\"\n",
|
||||
" if print_text:\n",
|
||||
" completion_color_prefix = \"\\033[92m\" # green\n",
|
||||
" print(completion_color_prefix, end=\"\")\n",
|
||||
" for event in explanation_response:\n",
|
||||
" event_text = event[\"choices\"][0][\"text\"]\n",
|
||||
" explanation_completion += event_text\n",
|
||||
" if print_text:\n",
|
||||
" print(event_text, end=\"\")\n",
|
||||
"\n",
|
||||
" # Step 2: Generate a plan to write a unit test\n",
|
||||
"\n",
|
||||
" # create a markdown-formatted prompt that asks GPT-3 to complete a plan for writing unit tests, formatted as a bullet list\n",
|
||||
" prompt_to_explain_a_plan = f\"\"\"\n",
|
||||
" \n",
|
||||
"A good unit test suite should aim to:\n",
|
||||
"- Test the function's behavior for a wide range of possible inputs\n",
|
||||
"- Test edge cases that the author may not have foreseen\n",
|
||||
"- Take advantage of the features of `{unit_test_package}` to make the tests easy to write and maintain\n",
|
||||
"- Be easy to read and understand, with clean code and descriptive names\n",
|
||||
"- Be deterministic, so that the tests always pass or fail in the same way\n",
|
||||
"\n",
|
||||
"`{unit_test_package}` has many convenient features that make it easy to write and maintain unit tests. We'll use them to write unit tests for the function above.\n",
|
||||
"\n",
|
||||
"For this particular function, we'll want our unit tests to handle the following diverse scenarios (and under each scenario, we include a few examples as sub-bullets):\n",
|
||||
"-\"\"\"\n",
|
||||
" if print_text:\n",
|
||||
" print(text_color_prefix + prompt_to_explain_a_plan, end=\"\")\n",
|
||||
"\n",
|
||||
" # append this planning prompt to the results from step 1\n",
|
||||
" prior_text = prompt_to_explain_the_function + explanation_completion\n",
|
||||
" full_plan_prompt = prior_text + prompt_to_explain_a_plan\n",
|
||||
"\n",
|
||||
" # send the prompt to the API, using \\n\\n as a stop sequence to stop at the end of the bullet list\n",
|
||||
" plan_response = openai.Completion.create(\n",
|
||||
" model=text_model,\n",
|
||||
" prompt=full_plan_prompt,\n",
|
||||
" stop=[\"\\n\\n\", \"\\n\\t\\n\", \"\\n \\n\"],\n",
|
||||
" max_tokens=max_tokens,\n",
|
||||
" temperature=temperature,\n",
|
||||
" stream=True,\n",
|
||||
" )\n",
|
||||
" plan_completion = \"\"\n",
|
||||
" if print_text:\n",
|
||||
" print(completion_color_prefix, end=\"\")\n",
|
||||
" for event in plan_response:\n",
|
||||
" event_text = event[\"choices\"][0][\"text\"]\n",
|
||||
" plan_completion += event_text\n",
|
||||
" if print_text:\n",
|
||||
" print(event_text, end=\"\")\n",
|
||||
"\n",
|
||||
" # Step 2b: If the plan is short, ask GPT-3 to elaborate further\n",
|
||||
" # this counts top-level bullets (e.g., categories), but not sub-bullets (e.g., test cases)\n",
|
||||
" elaboration_needed = plan_completion.count(\"\\n-\") +1 < approx_min_cases_to_cover # adds 1 because the first bullet is not counted\n",
|
||||
" if elaboration_needed:\n",
|
||||
" prompt_to_elaborate_on_the_plan = f\"\"\"\n",
|
||||
"\n",
|
||||
"In addition to the scenarios above, we'll also want to make sure we don't forget to test rare or unexpected edge cases (and under each edge case, we include a few examples as sub-bullets):\n",
|
||||
"-\"\"\"\n",
|
||||
" if print_text:\n",
|
||||
" print(text_color_prefix + prompt_to_elaborate_on_the_plan, end=\"\")\n",
|
||||
"\n",
|
||||
" # append this elaboration prompt to the results from step 2\n",
|
||||
" prior_text = full_plan_prompt + plan_completion\n",
|
||||
" full_elaboration_prompt = prior_text + prompt_to_elaborate_on_the_plan\n",
|
||||
"\n",
|
||||
" # send the prompt to the API, using \\n\\n as a stop sequence to stop at the end of the bullet list\n",
|
||||
" elaboration_response = openai.Completion.create(\n",
|
||||
" model=text_model,\n",
|
||||
" prompt=full_elaboration_prompt,\n",
|
||||
" stop=[\"\\n\\n\", \"\\n\\t\\n\", \"\\n \\n\"],\n",
|
||||
" max_tokens=max_tokens,\n",
|
||||
" temperature=temperature,\n",
|
||||
" stream=True,\n",
|
||||
" )\n",
|
||||
" elaboration_completion = \"\"\n",
|
||||
" if print_text:\n",
|
||||
" print(completion_color_prefix, end=\"\")\n",
|
||||
" for event in elaboration_response:\n",
|
||||
" event_text = event[\"choices\"][0][\"text\"]\n",
|
||||
" elaboration_completion += event_text\n",
|
||||
" if print_text:\n",
|
||||
" print(event_text, end=\"\")\n",
|
||||
"\n",
|
||||
" # Step 3: Generate the unit test\n",
|
||||
"\n",
|
||||
" # create a markdown-formatted prompt that asks GPT-3 to complete a unit test\n",
|
||||
" starter_comment = \"\"\n",
|
||||
" if unit_test_package == \"pytest\":\n",
|
||||
" starter_comment = \"Below, each test case is represented by a tuple passed to the @pytest.mark.parametrize decorator\"\n",
|
||||
" prompt_to_generate_the_unit_test = f\"\"\"\n",
|
||||
"\n",
|
||||
"Before going into the individual tests, let's first look at the complete suite of unit tests as a cohesive whole. We've added helpful comments to explain what each line does.\n",
|
||||
"```python\n",
|
||||
"import {unit_test_package} # used for our unit tests\n",
|
||||
"\n",
|
||||
"{function_to_test}\n",
|
||||
"\n",
|
||||
"#{starter_comment}\"\"\"\n",
|
||||
" if print_text:\n",
|
||||
" print(text_color_prefix + prompt_to_generate_the_unit_test, end=\"\")\n",
|
||||
"\n",
|
||||
" # append this unit test prompt to the results from step 3\n",
|
||||
" if elaboration_needed:\n",
|
||||
" prior_text = full_elaboration_prompt + elaboration_completion\n",
|
||||
" else:\n",
|
||||
" prior_text = full_plan_prompt + plan_completion\n",
|
||||
" full_unit_test_prompt = prior_text + prompt_to_generate_the_unit_test\n",
|
||||
"\n",
|
||||
" # send the prompt to the API, using ``` as a stop sequence to stop at the end of the code block\n",
|
||||
" unit_test_response = openai.Completion.create(\n",
|
||||
" model=code_model,\n",
|
||||
" prompt=full_unit_test_prompt,\n",
|
||||
" stop=\"```\",\n",
|
||||
" max_tokens=max_tokens,\n",
|
||||
" temperature=temperature,\n",
|
||||
" stream=True\n",
|
||||
" )\n",
|
||||
" unit_test_completion = \"\"\n",
|
||||
" if print_text:\n",
|
||||
" print(completion_color_prefix, end=\"\")\n",
|
||||
" for event in unit_test_response:\n",
|
||||
" event_text = event[\"choices\"][0][\"text\"]\n",
|
||||
" unit_test_completion += event_text\n",
|
||||
" if print_text:\n",
|
||||
" print(event_text, end=\"\")\n",
|
||||
"\n",
|
||||
" # check the output for errors\n",
|
||||
" code_start_index = prompt_to_generate_the_unit_test.find(\"```python\\n\") + len(\"```python\\n\")\n",
|
||||
" code_output = prompt_to_generate_the_unit_test[code_start_index:] + unit_test_completion\n",
|
||||
" try:\n",
|
||||
" ast.parse(code_output)\n",
|
||||
" except SyntaxError as e:\n",
|
||||
" print(f\"Syntax error in generated code: {e}\")\n",
|
||||
" if reruns_if_fail > 0:\n",
|
||||
" print(\"Rerunning...\")\n",
|
||||
" return unit_test_from_function(\n",
|
||||
" function_to_test=function_to_test,\n",
|
||||
" unit_test_package=unit_test_package,\n",
|
||||
" approx_min_cases_to_cover=approx_min_cases_to_cover,\n",
|
||||
" print_text=print_text,\n",
|
||||
" text_model=text_model,\n",
|
||||
" code_model=code_model,\n",
|
||||
" max_tokens=max_tokens,\n",
|
||||
" temperature=temperature,\n",
|
||||
" reruns_if_fail=reruns_if_fail-1, # decrement rerun counter when calling again\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # return the unit test as a string\n",
|
||||
" return unit_test_completion\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[30m# How to write great unit tests with pytest\n",
|
||||
"\n",
|
||||
"In this advanced tutorial for experts, we'll use Python 3.9 and `pytest` to write a suite of unit tests to verify the behavior of the following function.\n",
|
||||
"```python\n",
|
||||
"def is_palindrome(s):\n",
|
||||
" return s == s[::-1]\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"Before writing any unit tests, let's review what each element of the function is doing exactly and what the author's intentions may have been.\n",
|
||||
"- First,\u001b[92m we have a function definition. This is where we give the function a name, `is_palindrome`, and specify the arguments that the function accepts. In this case, the function accepts a single string argument, `s`.\n",
|
||||
"- Next, we have a return statement. This is where we specify the value that the function returns. In this case, the function returns `s == s[::-1]`.\n",
|
||||
"- Finally, we have a function call. This is where we actually call the function with a specific set of arguments. In this case, we're calling the function with the string `\"racecar\"`.\u001b[30m\n",
|
||||
" \n",
|
||||
"A good unit test suite should aim to:\n",
|
||||
"- Test the function's behavior for a wide range of possible inputs\n",
|
||||
"- Test edge cases that the author may not have foreseen\n",
|
||||
"- Take advantage of the features of `pytest` to make the tests easy to write and maintain\n",
|
||||
"- Be easy to read and understand, with clean code and descriptive names\n",
|
||||
"- Be deterministic, so that the tests always pass or fail in the same way\n",
|
||||
"\n",
|
||||
"`pytest` has many convenient features that make it easy to write and maintain unit tests. We'll use them to write unit tests for the function above.\n",
|
||||
"\n",
|
||||
"For this particular function, we'll want our unit tests to handle the following diverse scenarios (and under each scenario, we include a few examples as sub-bullets):\n",
|
||||
"-\u001b[92m The input is a palindrome\n",
|
||||
" - `\"racecar\"`\n",
|
||||
" - `\"madam\"`\n",
|
||||
" - `\"anna\"`\n",
|
||||
"- The input is not a palindrome\n",
|
||||
" - `\"python\"`\n",
|
||||
" - `\"test\"`\n",
|
||||
" - `\"1234\"`\n",
|
||||
"- The input is an empty string\n",
|
||||
" - `\"\"`\n",
|
||||
"- The input is `None`\n",
|
||||
"- The input is not a string\n",
|
||||
" - `1`\n",
|
||||
" - `1.0`\n",
|
||||
" - `True`\n",
|
||||
" - `False`\n",
|
||||
" - `[]`\n",
|
||||
" - `{}`\u001b[30m\n",
|
||||
"\n",
|
||||
"In addition to the scenarios above, we'll also want to make sure we don't forget to test rare or unexpected edge cases (and under each edge case, we include a few examples as sub-bullets):\n",
|
||||
"-\u001b[92m The input is a palindrome with spaces\n",
|
||||
" - `\"race car\"`\n",
|
||||
" - `\" madam \"`\n",
|
||||
" - `\" anna \"`\n",
|
||||
"- The input is not a palindrome with spaces\n",
|
||||
" - `\" python \"`\n",
|
||||
" - `\" test \"`\n",
|
||||
" - `\" 1234 \"`\n",
|
||||
"- The input is a palindrome with punctuation\n",
|
||||
" - `\"racecar!\"`\n",
|
||||
" - `\"Madam, I'm Adam.\"`\n",
|
||||
" - `\"Anna's\"`\n",
|
||||
"- The input is not a palindrome with punctuation\n",
|
||||
" - `\"python!\"`\n",
|
||||
" - `\"test.\"`\n",
|
||||
" - `\"1234!\"`\n",
|
||||
"- The input is a palindrome with mixed case\n",
|
||||
" - `\"Racecar\"`\n",
|
||||
" - `\"Madam\"`\n",
|
||||
" - `\"Anna\"`\n",
|
||||
"- The input is not a palindrome with mixed case\n",
|
||||
" - `\"Python\"`\n",
|
||||
" - `\"Test\"`\n",
|
||||
" - `\"1234\"`\u001b[30m\n",
|
||||
"\n",
|
||||
"Before going into the individual tests, let's first look at the complete suite of unit tests as a cohesive whole. We've added helpful comments to explain what each line does.\n",
|
||||
"```python\n",
|
||||
"import pytest # used for our unit tests\n",
|
||||
"\n",
|
||||
"def is_palindrome(s):\n",
|
||||
" return s == s[::-1]\n",
|
||||
"\n",
|
||||
"#Below, each test case is represented by a tuple passed to the @pytest.mark.parametrize decorator\u001b[92m.\n",
|
||||
"#The first element of the tuple is a name for the test case, and the second element is a list of arguments for the test case.\n",
|
||||
"#The @pytest.mark.parametrize decorator will generate a separate test function for each test case.\n",
|
||||
"#The generated test function will be named test_is_palindrome_<name> where <name> is the name of the test case.\n",
|
||||
"#The generated test function will be given the arguments specified in the list of arguments for the test case.\n",
|
||||
"#The generated test function will be given the fixture specified in the decorator, in this case the function itself.\n",
|
||||
"#The generated test function will call the function with the arguments and assert that the result is equal to the expected value.\n",
|
||||
"@pytest.mark.parametrize(\n",
|
||||
" \"name,args,expected\",\n",
|
||||
" [\n",
|
||||
" # Test the function's behavior for a wide range of possible inputs\n",
|
||||
" (\"palindrome\", [\"racecar\"], True),\n",
|
||||
" (\"palindrome\", [\"madam\"], True),\n",
|
||||
" (\"palindrome\", [\"anna\"], True),\n",
|
||||
" (\"non-palindrome\", [\"python\"], False),\n",
|
||||
" (\"non-palindrome\", [\"test\"], False),\n",
|
||||
" (\"non-palindrome\", [\"1234\"], False),\n",
|
||||
" (\"empty string\", [\"\"], True),\n",
|
||||
" (\"None\", [None], False),\n",
|
||||
" (\"non-string\", [1], False),\n",
|
||||
" (\"non-string\", [1.0], False),\n",
|
||||
" (\"non-string\", [True], False),\n",
|
||||
" (\"non-string\", [False], False),\n",
|
||||
" (\"non-string\", [[]], False),\n",
|
||||
" (\"non-string\", [{}], False),\n",
|
||||
" # Test edge cases that the author may not have foreseen\n",
|
||||
" (\"palindrome with spaces\", [\"race car\"], True),\n",
|
||||
" (\"palindrome with spaces\", [\" madam \"], True),\n",
|
||||
" (\"palindrome with spaces\", [\" anna \"], True),\n",
|
||||
" (\"non-palindrome with spaces\", [\" python \"], False),\n",
|
||||
" (\"non-palindrome with spaces\", [\" test \"], False),\n",
|
||||
" (\"non-palindrome with spaces\", [\" 1234 \"], False),\n",
|
||||
" (\"palindrome with punctuation\", [\"racecar!\"], True),\n",
|
||||
" (\"palindrome with punctuation\", [\"Madam, I'm Adam.\"], True),\n",
|
||||
" (\"palindrome with punctuation\", [\"Anna's\"], True),\n",
|
||||
" (\"non-palindrome with punctuation\", [\"python!\"], False),\n",
|
||||
" (\"non-palindrome with punctuation\", [\"test.\"], False),\n",
|
||||
" (\"non-palindrome with punctuation\", [\"1234!\"], False),\n",
|
||||
" (\"palindrome with mixed case\", [\"Racecar\"], True),\n",
|
||||
" (\"palindrome with mixed case\", [\"Madam\"], True),\n",
|
||||
" (\"palindrome with mixed case\", [\"Anna\"], True),\n",
|
||||
" (\"non-palindrome with mixed case\", [\"Python\"], False),\n",
|
||||
" (\"non-palindrome with mixed case\", [\"Test\"], False),\n",
|
||||
" (\"non-palindrome with mixed case\", [\"1234\"], False),\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"def test_is_palindrome(is_palindrome, args, expected):\n",
|
||||
" assert is_palindrome(*args) == expected\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"'.\\n#The first element of the tuple is a name for the test case, and the second element is a list of arguments for the test case.\\n#The @pytest.mark.parametrize decorator will generate a separate test function for each test case.\\n#The generated test function will be named test_is_palindrome_<name> where <name> is the name of the test case.\\n#The generated test function will be given the arguments specified in the list of arguments for the test case.\\n#The generated test function will be given the fixture specified in the decorator, in this case the function itself.\\n#The generated test function will call the function with the arguments and assert that the result is equal to the expected value.\\n@pytest.mark.parametrize(\\n \"name,args,expected\",\\n [\\n # Test the function\\'s behavior for a wide range of possible inputs\\n (\"palindrome\", [\"racecar\"], True),\\n (\"palindrome\", [\"madam\"], True),\\n (\"palindrome\", [\"anna\"], True),\\n (\"non-palindrome\", [\"python\"], False),\\n (\"non-palindrome\", [\"test\"], False),\\n (\"non-palindrome\", [\"1234\"], False),\\n (\"empty string\", [\"\"], True),\\n (\"None\", [None], False),\\n (\"non-string\", [1], False),\\n (\"non-string\", [1.0], False),\\n (\"non-string\", [True], False),\\n (\"non-string\", [False], False),\\n (\"non-string\", [[]], False),\\n (\"non-string\", [{}], False),\\n # Test edge cases that the author may not have foreseen\\n (\"palindrome with spaces\", [\"race car\"], True),\\n (\"palindrome with spaces\", [\" madam \"], True),\\n (\"palindrome with spaces\", [\" anna \"], True),\\n (\"non-palindrome with spaces\", [\" python \"], False),\\n (\"non-palindrome with spaces\", [\" test \"], False),\\n (\"non-palindrome with spaces\", [\" 1234 \"], False),\\n (\"palindrome with punctuation\", [\"racecar!\"], True),\\n (\"palindrome with punctuation\", [\"Madam, I\\'m Adam.\"], True),\\n (\"palindrome with punctuation\", [\"Anna\\'s\"], True),\\n (\"non-palindrome with punctuation\", [\"python!\"], False),\\n (\"non-palindrome with punctuation\", [\"test.\"], False),\\n (\"non-palindrome with punctuation\", [\"1234!\"], False),\\n (\"palindrome with mixed case\", [\"Racecar\"], True),\\n (\"palindrome with mixed case\", [\"Madam\"], True),\\n (\"palindrome with mixed case\", [\"Anna\"], True),\\n (\"non-palindrome with mixed case\", [\"Python\"], False),\\n (\"non-palindrome with mixed case\", [\"Test\"], False),\\n (\"non-palindrome with mixed case\", [\"1234\"], False),\\n ],\\n)\\ndef test_is_palindrome(is_palindrome, args, expected):\\n assert is_palindrome(*args) == expected\\n'"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"example_function = \"\"\"def is_palindrome(s):\n",
|
||||
" return s == s[::-1]\"\"\"\n",
|
||||
"\n",
|
||||
"unit_test_from_function(example_function, print_text=True)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.9.9 ('openai')",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.9"
|
||||
},
|
||||
"orig_nbformat": 4,
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "365536dcbde60510dc9073d6b991cd35db2d9bac356a11f5b64279a5e6708b97"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -1,320 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Azure completions example\n",
|
||||
"\n",
|
||||
"This example will cover completions using the Azure OpenAI service. It also includes information on content filtering."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Setup\n",
|
||||
"\n",
|
||||
"First, we install the necessary dependencies and import the libraries we will be using."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install \"openai>=1.0.0,<2.0.0\"\n",
|
||||
"! pip install python-dotenv"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import openai\n",
|
||||
"import dotenv\n",
|
||||
"\n",
|
||||
"dotenv.load_dotenv()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Authentication\n",
|
||||
"\n",
|
||||
"The Azure OpenAI service supports multiple authentication mechanisms that include API keys and Azure Active Directory token credentials."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"use_azure_active_directory = False # Set this flag to True if you are using Azure Active Directory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Authentication using API key\n",
|
||||
"\n",
|
||||
"To set up the OpenAI SDK to use an *Azure API Key*, we need to set `api_key` to a key associated with your endpoint (you can find this key in *\"Keys and Endpoints\"* under *\"Resource Management\"* in the [Azure Portal](https://portal.azure.com)). You'll also find the endpoint for your resource here."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"if not use_azure_active_directory:\n",
|
||||
" endpoint = os.environ[\"AZURE_OPENAI_ENDPOINT\"]\n",
|
||||
" api_key = os.environ[\"AZURE_OPENAI_API_KEY\"]\n",
|
||||
"\n",
|
||||
" client = openai.AzureOpenAI(\n",
|
||||
" azure_endpoint=endpoint,\n",
|
||||
" api_key=api_key,\n",
|
||||
" api_version=\"2023-09-01-preview\"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Authentication using Azure Active Directory\n",
|
||||
"Let's now see how we can autheticate via Azure Active Directory. We'll start by installing the `azure-identity` library. This library will provide the token credentials we need to authenticate and help us build a token credential provider through the `get_bearer_token_provider` helper function. It's recommended to use `get_bearer_token_provider` over providing a static token to `AzureOpenAI` because this API will automatically cache and refresh tokens for you. \n",
|
||||
"\n",
|
||||
"For more information on how to set up Azure Active Directory authentication with Azure OpenAI, see the [documentation](https://learn.microsoft.com/azure/ai-services/openai/how-to/managed-identity)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"! pip install \"azure-identity>=1.15.0\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n",
|
||||
"\n",
|
||||
"if use_azure_active_directory:\n",
|
||||
" endpoint = os.environ[\"AZURE_OPENAI_ENDPOINT\"]\n",
|
||||
" api_key = os.environ[\"AZURE_OPENAI_API_KEY\"]\n",
|
||||
"\n",
|
||||
" client = openai.AzureOpenAI(\n",
|
||||
" azure_endpoint=endpoint,\n",
|
||||
" azure_ad_token_provider=get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"),\n",
|
||||
" api_version=\"2023-09-01-preview\"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"> Note: the AzureOpenAI infers the following arguments from their corresponding environment variables if they are not provided:\n",
|
||||
"\n",
|
||||
"- `api_key` from `AZURE_OPENAI_API_KEY`\n",
|
||||
"- `azure_ad_token` from `AZURE_OPENAI_AD_TOKEN`\n",
|
||||
"- `api_version` from `OPENAI_API_VERSION`\n",
|
||||
"- `azure_endpoint` from `AZURE_OPENAI_ENDPOINT`\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Deployments\n",
|
||||
"\n",
|
||||
"In this section we are going to create a deployment of a model that we can use to create completions."
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Deployments: Create in the Azure OpenAI Studio\n",
|
||||
"Let's deploy a model to use with completions. Go to https://portal.azure.com, find your Azure OpenAI resource, and then navigate to the Azure OpenAI Studio. Click on the \"Deployments\" tab and then create a deployment for the model you want to use for completions. The deployment name that you give the model will be used in the code below."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"deployment = \"\" # Fill in the deployment name from the portal here"
|
||||
]
|
||||
},
|
||||
{
|
||||
"attachments": {},
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Completions\n",
|
||||
"\n",
|
||||
"Now let's create a completion using the client we built."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = \"The food was delicious and the waiter\"\n",
|
||||
"completion = client.completions.create(\n",
|
||||
" model=deployment,\n",
|
||||
" prompt=prompt,\n",
|
||||
" stop=\".\",\n",
|
||||
" temperature=0\n",
|
||||
")\n",
|
||||
" \n",
|
||||
"print(f\"{prompt}{completion.choices[0].text}.\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Create a streaming completion\n",
|
||||
"\n",
|
||||
"We can also stream the response."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"prompt = \"The food was delicious and the waiter\"\n",
|
||||
"response = client.completions.create(\n",
|
||||
" model=deployment,\n",
|
||||
" prompt=prompt,\n",
|
||||
" stream=True,\n",
|
||||
")\n",
|
||||
"for completion in response:\n",
|
||||
" if len(completion.choices) > 0:\n",
|
||||
" print(f\"{completion.choices[0].text}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Content filtering\n",
|
||||
"\n",
|
||||
"Azure OpenAI service includes content filtering of prompts and completion responses. You can learn more about content filtering and how to configure it [here](https://learn.microsoft.com/azure/ai-services/openai/concepts/content-filter).\n",
|
||||
"\n",
|
||||
"If the prompt is flagged by the content filter, the library will raise a `BadRequestError` exception with a `content_filter` error code. Otherwise, you can access the `prompt_filter_results` and `content_filter_results` on the response to see the results of the content filtering and what categories were flagged."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Prompt flagged by content filter"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"\n",
|
||||
"try:\n",
|
||||
" completion = client.completions.create(\n",
|
||||
" prompt=\"<text violating the content policy>\",\n",
|
||||
" model=deployment,\n",
|
||||
" )\n",
|
||||
"except openai.BadRequestError as e:\n",
|
||||
" err = json.loads(e.response.text)\n",
|
||||
" if err[\"error\"][\"code\"] == \"content_filter\":\n",
|
||||
" print(\"Content filter triggered!\")\n",
|
||||
" content_filter_result = err[\"error\"][\"innererror\"][\"content_filter_result\"]\n",
|
||||
" for category, details in content_filter_result.items():\n",
|
||||
" print(f\"{category}:\\n filtered={details['filtered']}\\n severity={details['severity']}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Checking the result of the content filter"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"completion = client.completions.create(\n",
|
||||
" prompt=\"What's the biggest city in Washington?\",\n",
|
||||
" model=deployment,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(f\"Answer: {completion.choices[0].text}\")\n",
|
||||
"\n",
|
||||
"# prompt content filter result in \"model_extra\" for azure\n",
|
||||
"prompt_filter_result = completion.model_extra[\"prompt_filter_results\"][0][\"content_filter_results\"]\n",
|
||||
"print(\"\\nPrompt content filter results:\")\n",
|
||||
"for category, details in prompt_filter_result.items():\n",
|
||||
" print(f\"{category}:\\n filtered={details['filtered']}\\n severity={details['severity']}\")\n",
|
||||
"\n",
|
||||
"# completion content filter result\n",
|
||||
"print(\"\\nCompletion content filter results:\")\n",
|
||||
"completion_filter_result = completion.choices[0].model_extra[\"content_filter_results\"]\n",
|
||||
"for category, details in completion_filter_result.items():\n",
|
||||
" print(f\"{category}:\\n filtered={details['filtered']}\\n severity={details['severity']}\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.0"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "3a5103089ab7e7c666b279eeded403fcec76de49a40685dbdfe9f9c78ad97c17"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue