From 3b7c4c51d50380ee480a8d1b0bc4d9cd496b854f Mon Sep 17 00:00:00 2001 From: matias-biatoz <111389463+matias-biatoz@users.noreply.github.com> Date: Sun, 18 Jun 2023 22:32:20 -0300 Subject: [PATCH] Added gpt-3.5-turbo 0613 16k and 16k-0613 pricing (#6287) @agola11 Issue #6193 I added the new pricing for the new models. Also, now gpt-3.5-turbo got split into "input" and "output" pricing. It currently does not support that. --- langchain/callbacks/openai_info.py | 34 +++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/langchain/callbacks/openai_info.py b/langchain/callbacks/openai_info.py index 765b8e34..63925631 100644 --- a/langchain/callbacks/openai_info.py +++ b/langchain/callbacks/openai_info.py @@ -5,21 +5,33 @@ from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import AgentAction, AgentFinish, LLMResult MODEL_COST_PER_1K_TOKENS = { + # GPT-4 input "gpt-4": 0.03, "gpt-4-0314": 0.03, - "gpt-4-completion": 0.06, - "gpt-4-0314-completion": 0.06, + "gpt-4-0613": 0.03, "gpt-4-32k": 0.06, "gpt-4-32k-0314": 0.06, + "gpt-4-32k-0613": 0.06, + # GPT-4 output + "gpt-4-completion": 0.06, + "gpt-4-0314-completion": 0.06, + "gpt-4-0613-completion": 0.06, "gpt-4-32k-completion": 0.12, "gpt-4-32k-0314-completion": 0.12, - "gpt-4-0613": 0.06, - "gpt-4-32k-0613": 0.12, - "gpt-3.5-turbo": 0.002, - "gpt-3.5-turbo-0301": 0.002, - "gpt-3.5-turbo-16k": 0.004, - "gpt-3.5-turbo-0613": 0.002, - "gpt-3.5-turbo-16k-0613": 0.004, + "gpt-4-32k-0613-completion": 0.12, + # GPT-3.5 input + "gpt-3.5-turbo": 0.0015, + "gpt-3.5-turbo-0301": 0.0015, + "gpt-3.5-turbo-0613": 0.0015, + "gpt-3.5-turbo-16k": 0.003, + "gpt-3.5-turbo-16k-0613": 0.003, + # GPT-3.5 output + "gpt-3.5-turbo-completion": 0.002, + "gpt-3.5-turbo-0301-completion": 0.002, + "gpt-3.5-turbo-0613-completion": 0.002, + "gpt-3.5-turbo-16k-completion": 0.004, + "gpt-3.5-turbo-16k-0613-completion": 0.004, + # Others "text-ada-001": 0.0004, "ada": 0.0004, "text-babbage-001": 0.0005, @@ -43,7 +55,9 @@ def standardize_model_name( model_name = model_name.lower() if "ft-" in model_name: return model_name.split(":")[0] + "-finetuned" - elif is_completion and model_name.startswith("gpt-4"): + elif is_completion and ( + model_name.startswith("gpt-4") or model_name.startswith("gpt-3.5") + ): return model_name + "-completion" else: return model_name