You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
aichat/models.yaml

853 lines
24 KiB
YAML

# notes:
# - do not submit pull requests to add new models; this list will be updated in batches with new releases.
# - do not add any open-source LLMs except for the following: Mixtral, LLama-3, Gemma, Qwen, Phi-3, DeepSeek, Command-R, dbrx, Yi.
- platform: openai
# docs:
# - https://platform.openai.com/docs/models
# - https://openai.com/pricing
# - https://platform.openai.com/docs/api-reference/chat
# notes
# - get max_output_tokens info from api error
models:
- name: gpt-3.5-turbo
max_input_tokens: 16385
max_output_tokens: 4096
input_price: 0.5
output_price: 1.5
- name: gpt-3.5-turbo-1106
max_input_tokens: 16385
max_output_tokens: 4096
input_price: 1
output_price: 2
- name: gpt-4o
max_input_tokens: 128000
max_output_tokens: 4096
input_price: 5
output_price: 15
supports_vision: true
- name: gpt-4-turbo
max_input_tokens: 128000
max_output_tokens: 4096
input_price: 10
output_price: 30
supports_vision: true
- name: gpt-4-turbo-preview
max_input_tokens: 128000
max_output_tokens: 4096
input_price: 10
output_price: 30
- name: gpt-4-1106-preview
max_input_tokens: 128000
max_output_tokens: 4096
input_price: 10
output_price: 30
- name: gpt-4-vision-preview
max_input_tokens: 128000
max_output_tokens: 4096
pass_max_tokens: true
input_price: 10
output_price: 30
supports_vision: true
- name: gpt-4
max_input_tokens: 8192
max_output_tokens: 4096
input_price: 30
output_price: 60
- name: gpt-4-32k
max_input_tokens: 32768
max_output_tokens: 4096
input_price: 60
output_price: 120
- platform: gemini
# docs:
# - https://ai.google.dev/models/gemini
# - https://ai.google.dev/pricing
# - https://ai.google.dev/api/rest/v1beta/models/streamGenerateContent
# notes:
# - get max_output_tokens info from list models api
models:
- name: gemini-1.0-pro-latest
max_input_tokens: 30720
max_output_tokens: 2048
input_price: 0.5
output_price: 1.5
- name: gemini-1.0-pro-vision-latest
max_input_tokens: 12288
max_output_tokens: 4096
input_price: 0.5
output_price: 1.5
supports_vision: true
- name: gemini-1.5-pro-latest
max_input_tokens: 1048576
max_output_tokens: 8192
input_price: 7
output_price: 21
supports_vision: true
- platform: claude
# docs:
# - https://docs.anthropic.com/claude/docs/models-overview
# - https://docs.anthropic.com/claude/reference/messages-streaming
# notes:
# - get max_output_tokens info from models doc
models:
- name: claude-3-opus-20240229
max_input_tokens: 200000
max_output_tokens: 4096
pass_max_tokens: true
input_price: 15
output_price: 75
supports_vision: true
- name: claude-3-sonnet-20240229
max_input_tokens: 200000
max_output_tokens: 4096
pass_max_tokens: true
input_price: 3
output_price: 15
supports_vision: true
- name: claude-3-haiku-20240307
max_input_tokens: 200000
max_output_tokens: 4096
pass_max_tokens: true
input_price: 0.25
output_price: 1.25
supports_vision: true
- platform: mistral
# docs:
# - https://docs.mistral.ai/getting-started/models/
# - https://mistral.ai/technology/#pricing
# - https://docs.mistral.ai/api/
# notes:
# - unable to get max_output_tokens info
models:
- name: open-mistral-7b
max_input_tokens: 32000
input_price: 0.25
output_price: 0.25
- name: open-mixtral-8x7b
max_input_tokens: 32000
input_price: 0.7
output_price: 0.7
- name: open-mixtral-8x22b
max_input_tokens: 64000
input_price: 2
output_price: 6
- name: mistral-small-latest
max_input_tokens: 32000
input_price: 2
output_price: 6
- name: mistral-large-latest
max_input_tokens: 32000
input_price: 8
output_price: 24
- platform: cohere
# docs:
# - https://docs.cohere.com/docs/command-r
# - https://cohere.com/pricing
# - https://docs.cohere.com/reference/chat
# notes
# - get max_output_tokens info from api error
models:
- name: command-r
max_input_tokens: 128000
max_output_tokens: 4000
input_price: 0.5
output_price: 1.5
- name: command-r-plus
max_input_tokens: 128000
max_output_tokens: 4000
input_price: 3
output_price: 15
- platform: perplexity
# docs:
# - https://docs.perplexity.ai/docs/model-cards
# - https://docs.perplexity.ai/docs/pricing
# - https://docs.perplexity.ai/reference/post_chat_completions
# notes
# - get max_output_tokens info from api error
models:
- name: llama-3-sonar-small-32k-chat
max_input_tokens: 32768
max_output_tokens: 32768
input_price: 0.2
output_price: 0.2
- name: llama-3-sonar-large-32k-chat
max_input_tokens: 32768
max_output_tokens: 32768
input_price: 0.6
output_price: 0.6
- name: llama-3-8b-instruct
max_input_tokens: 8192
max_output_tokens: 8192
input_price: 0.2
output_price: 0.2
- name: llama-3-70b-instruct
max_input_tokens: 8192
max_output_tokens: 8192
input_price: 1
output_price: 1
- name: mixtral-8x7b-instruct
max_input_tokens: 16384
max_output_tokens: 16384
input_price: 0.6
output_price: 0.6
- platform: groq
# docs:
# - https://console.groq.com/docs/models
# - https://wow.groq.com
# - https://console.groq.com/docs/text-chat
# notes:
# - get max_output_tokens info from playgourd
# - all models are free with rate limits
models:
- name: llama3-8b-8192
max_input_tokens: 8192
max_output_tokens: 8192
input_price: 0.05
output_price: 0.10
- name: llama3-70b-8192
max_input_tokens: 8192
max_output_tokens: 8192
input_price: 0.59
output_price: 0.79
- name: mixtral-8x7b-32768
max_input_tokens: 32768
max_output_tokens: 32768
input_price: 0.27
output_price: 0.27
- name: gemma-7b-it
max_input_tokens: 8192
max_output_tokens: 8192
input_price: 0.10
output_price: 0.10
- platform: vertexai
# docs:
# - https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models
# - https://cloud.google.com/vertex-ai/generative-ai/pricing
# - https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/gemini
# notes:
# - get max_output_tokens info from models doc
models:
- name: gemini-1.0-pro
max_input_tokens: 24568
max_output_tokens: 8192
input_price: 0.125
output_price: 0.375
- name: gemini-1.0-pro-vision
max_input_tokens: 14336
max_output_tokens: 2048
input_price: 0.125
output_price: 0.375
supports_vision: true
- name: gemini-1.5-pro-preview-0409
max_input_tokens: 1000000
max_output_tokens: 8192
input_price: 2.5
output_price: 7.5
supports_vision: true
- platform: vertexai-claude
# docs:
# - https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude
# notes:
# - get max_output_tokens info from models doc
# - claude models have not been tested
models:
- name: claude-3-opus@20240229
max_input_tokens: 200000
max_output_tokens: 4096
pass_max_tokens: true
input_price: 15
output_price: 75
supports_vision: true
- name: claude-3-sonnet@20240229
max_input_tokens: 200000
max_output_tokens: 4096
pass_max_tokens: true
input_price: 3
output_price: 15
supports_vision: true
- name: claude-3-haiku@20240307
max_input_tokens: 200000
max_output_tokens: 4096
pass_max_tokens: true
input_price: 0.25
output_price: 1.25
supports_vision: true
- platform: bedrock
# docs:
# - https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns
# - https://aws.amazon.com/bedrock/pricing/
# notes:
# - get max_output_tokens info from playground
# - claude/llama models have not been tested
models:
- name: anthropic.claude-3-opus-20240229-v1:0
max_input_tokens: 200000
max_output_tokens: 4096
pass_max_tokens: true
input_price: 15
output_price: 75
supports_vision: true
- name: anthropic.claude-3-sonnet-20240229-v1:0
max_input_tokens: 200000
max_output_tokens: 4096
pass_max_tokens: true
input_price: 3
output_price: 15
supports_vision: true
- name: anthropic.claude-3-haiku-20240307-v1:0
max_input_tokens: 200000
max_output_tokens: 4096
pass_max_tokens: true
input_price: 0.25
output_price: 1.25
supports_vision: true
- name: meta.llama3-8b-instruct-v1:0
max_input_tokens: 8192
max_output_tokens: 4096
pass_max_tokens: true
input_price: 0.4
output_price: 0.6
- name: meta.llama3-70b-instruct-v1:0
max_input_tokens: 8192
max_output_tokens: 4096
pass_max_tokens: true
input_price: 2.65
output_price: 3.5
- name: mistral.mistral-7b-instruct-v0:2
max_input_tokens: 32000
max_output_tokens: 8192
pass_max_tokens: true
input_price: 0.15
output_price: 0.2
- name: mistral.mixtral-8x7b-instruct-v0:1
max_input_tokens: 32000
max_output_tokens: 8192
pass_max_tokens: true
input_price: 0.45
output_price: 0.7
- name: mistral.mistral-large-2402-v1:0
max_input_tokens: 32000
max_output_tokens: 8192
pass_max_tokens: true
input_price: 8
output_price: 2.4
- platform: cloudflare
# docs:
# - https://developers.cloudflare.com/workers-ai/models/
# - https://developers.cloudflare.com/workers-ai/platform/pricing/
# notes:
# - unable to get max_output_tokens info
models:
- name: '@cf/meta/llama-3-8b-instruct'
max_input_tokens: 4096
max_output_tokens: 4096
pass_max_tokens: true
- name: '@cf/mistral/mistral-7b-instruct-v0.2-lora'
max_input_tokens: 4096
max_output_tokens: 4096
pass_max_tokens: true
- name: '@cf/google/gemma-7b-it-lora'
max_input_tokens: 4096
max_output_tokens: 4096
pass_max_tokens: true
- name: '@cf/qwen/qwen1.5-14b-chat-awq'
max_input_tokens: 4096
max_output_tokens: 4096
pass_max_tokens: true
- name: '@hf/thebloke/deepseek-coder-6.7b-instruct-awq'
max_input_tokens: 4096
max_output_tokens: 4096
pass_max_tokens: true
- platform: replicate
# docs:
# - https://replicate.com/docs
# - https://replicate.com/pricing
# notes:
# - max_output_tokens is required but unknown
models:
- name: meta/meta-llama-3-70b-instruct
max_input_tokens: 8192
max_output_tokens: 4096
pass_max_tokens: true
input_price: 0.65
output_price: 2.75
- name: meta/meta-llama-3-8b-instruct
max_input_tokens: 8192
max_output_tokens: 4096
pass_max_tokens: true
input_price: 0.05
output_price: 0.25
- name: mistralai/mistral-7b-instruct-v0.2
max_input_tokens: 32000
max_output_tokens: 8192
pass_max_tokens: true
input_price: 0.05
output_price: 0.25
- name: mistralai/mixtral-8x7b-instruct-v0.1
max_input_tokens: 32000
max_output_tokens: 8192
pass_max_tokens: true
input_price: 0.3
output_price: 1
- platform: ernie
# docs:
# - https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu
# - https://cloud.baidu.com/doc/WENXINWORKSHOP/s/hlrk4akp7
# notes:
# - get max_output_tokens info from models doc
models:
- name: ernie-4.0-8k-preview
max_input_tokens: 5120
max_output_tokens: 2048
pass_max_tokens: true
input_price: 16.8
output_price: 16.8
- name: ernie-3.5-8k-preview
max_input_tokens: 5120
max_output_tokens: 2048
pass_max_tokens: true
input_price: 1.68
output_price: 1.68
- name: ernie-speed-128k
max_input_tokens: 124000
max_output_tokens: 4096
pass_max_tokens: true
input_price: 0.56
output_price: 1.12
- name: ernie-lite-8k
max_input_tokens: 7168
max_output_tokens: 2048
pass_max_tokens: true
input_price: 0.42
output_price: 0.84
- name: ernie-tiny-8k
max_input_tokens: 7168
max_output_tokens: 2048
pass_max_tokens: true
input_price: 0.14
output_price: 0.14
- platform: qianwen
# docs:
# - https://help.aliyun.com/zh/dashscope/developer-reference/tongyiqianwen-large-language-models/
# - https://help.aliyun.com/zh/dashscope/developer-reference/qwen-vl-plus/
# notes:
# - get max_output_tokens info from models doc
models:
- name: qwen-turbo
max_input_tokens: 6000
max_output_tokens: 1500
input_price: 1.12
output_price: 1.12
- name: qwen-plus
max_input_tokens: 30000
max_output_tokens: 2000
input_price: 2.8
output_price: 2.8
- name: qwen-max
max_input_tokens: 6000
max_output_tokens: 2000
input_price: 16.8
output_price: 16.8
- name: qwen-max-longcontext
max_input_tokens: 28000
max_output_tokens: 2000
- name: qwen-vl-plus
input_price: 1.12
output_price: 1.12
supports_vision: true
- name: qwen-vl-max
input_price: 2.8
output_price: 2.8
supports_vision: true
- platform: moonshot
# docs:
# - https://platform.moonshot.cn/docs/intro
# - https://platform.moonshot.cn/docs/pricing
# - https://platform.moonshot.cn/docs/api-reference
# notes:
# - unable to get max_output_tokens info
models:
- name: moonshot-v1-8k
max_input_tokens: 8000
input_price: 1.68
output_price: 1.68
- name: moonshot-v1-32k
max_input_tokens: 32000
input_price: 3.36
output_price: 3.36
- name: moonshot-v1-128k
max_input_tokens: 128000
input_price: 8.4
output_price: 8.4
- platform: deepseek
# docs:
# - https://platform.deepseek.com/api-docs/
# - https://platform.deepseek.com/api-docs/pricing
models:
- name: deepseek-chat
max_input_tokens: 32768
input_price: 0.14
output_price: 0.28
- name: deepseek-coder
max_input_tokens: 16384
input_price: 0.14
output_price: 0.28
- platform: zhipuai
# docs:
# - https://open.bigmodel.cn/dev/howuse/model
# - https://open.bigmodel.cn/pricing
models:
- name: glm-4
max_input_tokens: 128000
input_price: 14
output_price: 14
- name: glm-4v
max_input_tokens: 2048
input_price: 14
output_price: 14
supports_vision: true
- name: glm-3-turbo
max_input_tokens: 128000
input_price: 0.7
output_price: 0.7
- platform: anyscale
# docs:
# - https://docs.endpoints.anyscale.com/text-generation/query-a-model/
# - https://docs.endpoints.anyscale.com/pricing
models:
- name: meta-llama/Meta-Llama-3-8B-Instruct
max_input_tokens: 8192
input_price: 0.15
output_price: 0.15
- name: meta-llama/Meta-Llama-3-70B-Instruct
max_input_tokens: 8192
input_price: 1.0
output_price: 1.0
- name: codellama/CodeLlama-70b-Instruct-hf
max_input_tokens: 4096
input_price: 1.0
output_price: 1.0
- name: mistralai/Mistral-7B-Instruct-v0.1
max_input_tokens: 16384
input_price: 0.15
output_price: 0.15
- name: mistralai/Mixtral-8x7B-Instruct-v0.1
max_input_tokens: 32768
input_price: 0.50
output_price: 0.50
- name: mistralai/Mixtral-8x22B-Instruct-v0.1
max_input_tokens: 65536
input_price: 0.90
output_price: 0.90
- name: google/gemma-7b-it
max_input_tokens: 8192
input_price: 0.15
output_price: 0.15
- platform: deepinfra
# docs:
# - https://deepinfra.com/models
# - https://deepinfra.com/pricing
models:
- name: meta-llama/Meta-Llama-3-8B-Instruct
max_input_tokens: 8192
input_price: 0.08
output_price: 0.08
- name: meta-llama/Meta-Llama-3-70B-Instruct
max_input_tokens: 8192
input_price: 0.59
output_price: 0.79
- name: mistralai/Mistral-7B-Instruct-v0.2
max_input_tokens: 32768
input_price: 0.07
output_price: 0.07
- name: mistralai/Mixtral-8x7B-Instruct-v0.1
max_input_tokens: 32768
input_price: 0.24
output_price: 0.24
- name: mistralai/Mixtral-8x22B-Instruct-v0.1
max_input_tokens: 65536
input_price: 0.65
output_price: 0.65
- name: google/gemma-1.1-7b-it
max_input_tokens: 8192
input_price: 0.07
output_price: 0.07
- name: databricks/dbrx-instruct
max_input_tokens: 32768
input_price: 0.6
output_price: 0.6
- name: 01-ai/Yi-34B-Chat
max_input_tokens: 4096
input_price: 0.6
output_price: 0.6
- platform: fireworks
# docs:
# - https://fireworks.ai/models
# - https://fireworks.ai/pricing
models:
- name: accounts/fireworks/models/llama-v3-8b-instruct
max_input_tokens: 8192
input_price: 0.2
output_price: 0.2
- name: accounts/fireworks/models/llama-v3-70b-instruct
max_input_tokens: 8192
input_price: 0.9
output_price: 0.9
- name: accounts/fireworks/models/mistral-7b-instruct-v0p2
max_input_tokens: 32768
input_price: 0.2
output_price: 0.2
- name: accounts/fireworks/models/mixtral-8x7b-instruct
max_input_tokens: 32768
input_price: 0.5
output_price: 0.5
- name: accounts/fireworks/models/mixtral-8x22b-instruct
max_input_tokens: 65536
input_price: 0.9
output_price: 0.9
- name: accounts/fireworks/models/qwen-72b-chat
max_input_tokens: 4096
input_price: 0.9
output_price: 0.9
- name: accounts/fireworks/models/gemma-7b-it
max_input_tokens: 8192
input_price: 0.2
output_price: 0.2
- name: accounts/fireworks/models/dbrx-instruct
max_input_tokens: 32768
input_price: 1.6
output_price: 1.6
- platform: openrouter
# docs:
# - https://openrouter.ai/docs#models
models:
- name: meta-llama/llama-3-8b-instruct
max_input_tokens: 8192
input_price: 0.1
output_price: 0.1
- name: meta-llama/llama-3-8b-instruct:nitro
max_input_tokens: 8192
input_price: 0.2
output_price: 0.2
- name: meta-llama/llama-3-8b-instruct:extended
max_input_tokens: 16384
input_price: 0.275
output_price: 0.283
- name: meta-llama/llama-3-70b-instruct
max_input_tokens: 8192
input_price: 0.81
output_price: 0.81
- name: meta-llama/llama-3-70b-instruct:nitro
max_input_tokens: 8192
input_price: 0.9
output_price: 0.9
- name: mistralai/mistral-7b-instruct:free
max_input_tokens: 32768
input_price: 0.0
output_price: 0.0
- name: codellama/codellama-70b-instruct
max_input_tokens: 2048
input_price: 0.81
output_price: 0.81
- name: google/gemma-7b-it:free
max_input_tokens: 8192
input_price: 0.0
output_price: 0.0
- name: 01-ai/yi-34b-chat
max_input_tokens: 4096
input_price: 0.72
output_price: 0.72
- name: openai/gpt-3.5-turbo
max_input_tokens: 16385
input_price: 0.5
output_price: 1.5
- name: openai/gpt-4o
max_input_tokens: 128000
input_price: 5
output_price: 15
supports_vision: true
- name: openai/gpt-4-turbo
max_input_tokens: 128000
input_price: 10
output_price: 30
supports_vision: true
- name: openai/gpt-4-turbo-preview
max_input_tokens: 128000
input_price: 10
output_price: 30
- name: openai/gpt-4-vision-preview
max_input_tokens: 128000
max_output_tokens: 4096
input_price: 10
output_price: 30
supports_vision: true
- name: openai/gpt-4
max_input_tokens: 8192
input_price: 30
output_price: 60
- name: openai/gpt-4-32k
max_input_tokens: 32768
input_price: 60
output_price: 120
- name: google/gemini-pro
max_input_tokens: 91728
input_price: 0.125
output_price: 0.375
- name: google/gemini-pro-vision
max_input_tokens: 45875
input_price: 0.125
output_price: 0.375
supports_vision: true
- name: google/gemini-pro-1.5
max_input_tokens: 2800000
input_price: 2.5
output_price: 7.5
supports_vision: true
- name: anthropic/claude-3-opus
max_input_tokens: 200000
max_output_tokens: 4096
pass_max_tokens: true
input_price: 15
output_price: 75
supports_vision: true
- name: anthropic/claude-3-sonnet
max_input_tokens: 200000
max_output_tokens: 4096
pass_max_tokens: true
input_price: 3
output_price: 15
supports_vision: true
- name: anthropic/claude-3-haiku
max_input_tokens: 200000
max_output_tokens: 4096
pass_max_tokens: true
input_price: 0.25
output_price: 1.25
supports_vision: true
- name: mistralai/mixtral-8x7b-instruct
max_input_tokens: 32768
input_price: 0.24
output_price: 0.24
- name: mistralai/mixtral-8x22b-instruct
max_input_tokens: 65536
input_price: 0.65
output_price: 0.65
- name: mistralai/mistral-small
max_input_tokens: 32000
input_price: 2
output_price: 6
- name: mistralai/mistral-large
max_input_tokens: 32000
input_price: 8
output_price: 24
- name: databricks/dbrx-instruct
max_input_tokens: 32768
input_price: 0.6
output_price: 0.6
- name: cohere/command-r
max_input_tokens: 128000
input_price: 0.5
output_price: 1.5
- name: cohere/command-r-plus
max_input_tokens: 128000
input_price: 3
output_price: 15
- platform: octoai
# docs:
# - https://octo.ai/docs/getting-started/inference-models
# - https://octo.ai/pricing/text-gen-solution/
models:
- name: meta-llama-3-8b-instruct
max_input_tokens: 8192
input_price: 0.13
output_price: 0.13
- name: meta-llama-3-70b-instruct
max_input_tokens: 8192
input_price: 0.86
output_price: 0.86
- name: mistral-7b-instruct
max_input_tokens: 32768
input_price: 0.13
output_price: 0.13
- name: mixtral-8x7b-instruct
max_input_tokens: 32768
input_price: 0.34
output_price: 0.34
- name: mixtral-8x22b-instruct
max_input_tokens: 65536
input_price: 0.86
output_price: 0.86
- platform: together
# docs:
# - https://docs.together.ai/docs/inference-models
# - https://www.together.ai/pricing
models:
- name: meta-llama/Llama-3-8b-chat-hf
max_input_tokens: 8000
input_price: 0.2
output_price: 0.2
- name: meta-llama/Llama-3-70b-chat-hf
max_input_tokens: 8000
input_price: 0.9
output_price: 0.9
- name: mistralai/Mistral-7B-Instruct-v0.2
max_input_tokens: 32768
input_price: 0.2
output_price: 0.2
- name: mistralai/Mixtral-8x7B-Instruct-v0.1
max_input_tokens: 32768
input_price: 0.9
output_price: 0.9
- name: mistralai/Mixtral-8x22B-Instruct-v0.1
max_input_tokens: 65536
input_price: 1.2
output_price: 1.2
- name: google/gemma-7b-it
max_input_tokens: 8192
input_price: 0.2
output_price: 0.2
- name: Qwen/Qwen1.5-72B-Chat
max_input_tokens: 32768
input_price: 0.9
output_price: 0.9
- name: databricks/dbrx-instruct
max_input_tokens: 32768
input_price: 1.2
output_price: 1.2
- name: zero-one-ai/Yi-34B-Chat
max_input_tokens: 4096
input_price: 0.8
output_price: 0.8
- name: deepseek-ai/deepseek-llm-67b-chat
max_input_tokens: 4096
input_price: 0.9
output_price: 0.9
- name: deepseek-ai/deepseek-coder-33b-instruct
max_input_tokens: 16384
input_price: 0.8
output_price: 0.8