From 5002f3ae35070f2ba903bccb7b1028595e3c626a Mon Sep 17 00:00:00 2001 From: Tommaso De Lorenzo Date: Tue, 23 May 2023 20:18:03 +0200 Subject: [PATCH] solving #2887 (#5127) # Allowing openAI fine-tuned models Very simple fix that checks whether a openAI `model_name` is a fine-tuned model when loading `context_size` and when computing call's cost in the `openai_callback`. Fixes #2887 --------- Co-authored-by: Dev 2049 --- langchain/callbacks/openai_info.py | 8 ++++++++ langchain/llms/openai.py | 4 ++++ 2 files changed, 12 insertions(+) diff --git a/langchain/callbacks/openai_info.py b/langchain/callbacks/openai_info.py index b3d5e2d5..9b0af085 100644 --- a/langchain/callbacks/openai_info.py +++ b/langchain/callbacks/openai_info.py @@ -24,12 +24,20 @@ MODEL_COST_PER_1K_TOKENS = { "text-davinci-003": 0.02, "text-davinci-002": 0.02, "code-davinci-002": 0.02, + "ada-finetuned": 0.0016, + "babbage-finetuned": 0.0024, + "curie-finetuned": 0.0120, + "davinci-finetuned": 0.1200, } def get_openai_token_cost_for_model( model_name: str, num_tokens: int, is_completion: bool = False ) -> float: + # handling finetuned models + if "ft-" in model_name: + model_name = f"{model_name.split(':')[0]}-finetuned" + suffix = "-completion" if is_completion and model_name.startswith("gpt-4") else "" model = model_name.lower() + suffix if model not in MODEL_COST_PER_1K_TOKENS: diff --git a/langchain/llms/openai.py b/langchain/llms/openai.py index d9dca1bb..0376f900 100644 --- a/langchain/llms/openai.py +++ b/langchain/llms/openai.py @@ -512,6 +512,10 @@ class BaseOpenAI(BaseLLM): "code-cushman-001": 2048, } + # handling finetuned models + if "ft-" in modelname: + modelname = modelname.split(":")[0] + context_size = model_token_mapping.get(modelname, None) if context_size is None: