From fde2a6474db03bf61d8def356fead62784fe45e9 Mon Sep 17 00:00:00 2001 From: gusmally Date: Fri, 22 Sep 2023 10:49:07 -0700 Subject: [PATCH] add note about legacy fine tuning (#729) --- examples/Chat_finetuning_data_prep.ipynb | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/examples/Chat_finetuning_data_prep.ipynb b/examples/Chat_finetuning_data_prep.ipynb index 31236ee2..a0318d55 100644 --- a/examples/Chat_finetuning_data_prep.ipynb +++ b/examples/Chat_finetuning_data_prep.ipynb @@ -9,7 +9,9 @@ "# Data preparation and analysis for chat model fine-tuning\n", "\n", "This notebook serves as a tool to preprocess and analyze the chat dataset used for fine-tuning a chat model. \n", - "It checks for format errors, provides basic statistics, and estimates token counts for fine-tuning costs.\n" + "It checks for format errors, provides basic statistics, and estimates token counts for fine-tuning costs.\n", + "The method shown here corresponds to [legacy fine-tuning](https://platform.openai.com/docs/guides/legacy-fine-tuning) for models like babbage-002 and davinci-002.\n", + "For fine-tuning gpt-3.5-turbo, see [the current fine-tuning page](https://platform.openai.com/docs/guides/fine-tuning)." ] }, {