Finetuned OpenAI models cost calculation #11715 (#12190)

**Description:**
Add cost calculation for fine tuned models (new and legacy), this is
required after OpenAI added new models for fine tuning and separated the
costs of I/O for fine tuned models.
Also I updated the relevant unit tests
see https://platform.openai.com/docs/guides/fine-tuning for more
information.
issue: https://github.com/langchain-ai/langchain/issues/11715

  - **Issue:** 11715
  - **Twitter handle:** @nirkopler
pull/12219/head
Nir Kopler 9 months ago committed by GitHub
parent a2840a2b42
commit d3744175bf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -57,10 +57,19 @@ MODEL_COST_PER_1K_TOKENS = {
"text-davinci-003": 0.02,
"text-davinci-002": 0.02,
"code-davinci-002": 0.02,
"ada-finetuned": 0.0016,
"babbage-finetuned": 0.0024,
"curie-finetuned": 0.012,
"davinci-finetuned": 0.12,
# Fine Tuned input
"babbage-002-finetuned": 0.0016,
"davinci-002-finetuned": 0.012,
"gpt-3.5-turbo-0613-finetuned": 0.012,
# Fine Tuned output
"babbage-002-finetuned-completion": 0.0016,
"davinci-002-finetuned-completion": 0.012,
"gpt-3.5-turbo-0613-finetuned-completion": 0.016,
# Legacy fine-tuned models
"ada-finetuned-legacy": 0.0016,
"babbage-finetuned-legacy": 0.0024,
"curie-finetuned-legacy": 0.012,
"davinci-finetuned-legacy": 0.12,
}
@ -82,11 +91,14 @@ def standardize_model_name(
"""
model_name = model_name.lower()
if "ft-" in model_name:
return model_name.split(":")[0] + "-finetuned"
return model_name.split(":")[0] + "-finetuned-legacy"
if "ft:" in model_name:
return model_name.split(":")[1] + "-finetuned"
elif is_completion and (
model_name.startswith("gpt-4")
or model_name.startswith("gpt-3.5")
or model_name.startswith("gpt-35")
or ("finetuned" in model_name and "legacy" not in model_name)
):
return model_name + "-completion"
else:

@ -49,7 +49,21 @@ def test_on_llm_end_custom_model(handler: OpenAICallbackHandler) -> None:
assert handler.total_cost == 0
def test_on_llm_end_finetuned_model(handler: OpenAICallbackHandler) -> None:
@pytest.mark.parametrize(
"model_name",
[
"ada:ft-your-org:custom-model-name-2022-02-15-04-21-04",
"babbage:ft-your-org:custom-model-name-2022-02-15-04-21-04",
"curie:ft-your-org:custom-model-name-2022-02-15-04-21-04",
"davinci:ft-your-org:custom-model-name-2022-02-15-04-21-04",
"ft:babbage-002:your-org:custom-model-name:1abcdefg",
"ft:davinci-002:your-org:custom-model-name:1abcdefg",
"ft:gpt-3.5-turbo-0613:your-org:custom-model-name:1abcdefg",
],
)
def test_on_llm_end_finetuned_model(
handler: OpenAICallbackHandler, model_name: str
) -> None:
response = LLMResult(
generations=[],
llm_output={
@ -58,7 +72,7 @@ def test_on_llm_end_finetuned_model(handler: OpenAICallbackHandler) -> None:
"completion_tokens": 1,
"total_tokens": 3,
},
"model_name": "ada:ft-your-org:custom-model-name-2022-02-15-04-21-04",
"model_name": model_name,
},
)
handler.on_llm_end(response)

Loading…
Cancel
Save