# NOTES: # - This model list is scheduled to be updated with each new aichat release. Please do not submit PR to add new models. # - This model list does not include models officially marked as legacy or beta. - type: openai # docs: # - https://platform.openai.com/docs/models # - https://openai.com/pricing # - https://platform.openai.com/docs/api-reference/chat # notes # - get max_output_tokens info from api error models: - name: gpt-3.5-turbo max_input_tokens: 16385 max_output_tokens?: 4096 input_price: 0.5 output_price: 1.5 - name: gpt-3.5-turbo-1106 max_input_tokens: 16385 max_output_tokens?: 4096 input_price: 1 output_price: 2 - name: gpt-4-turbo max_input_tokens: 128000 max_output_tokens?: 4096 input_price: 10 output_price: 30 supports_vision: true - name: gpt-4-turbo-preview max_input_tokens: 128000 max_output_tokens?: 4096 input_price: 10 output_price: 30 - name: gpt-4-1106-preview max_input_tokens: 128000 max_output_tokens?: 4096 input_price: 10 output_price: 30 - name: gpt-4-vision-preview max_input_tokens: 128000 max_output_tokens: 4096 input_price: 10 output_price: 30 supports_vision: true - name: gpt-4 max_input_tokens: 8192 max_output_tokens?: 4096 input_price: 30 output_price: 60 - name: gpt-4-32k max_input_tokens: 32768 max_output_tokens?: 4096 input_price: 60 output_price: 120 - type: gemini # docs: # - https://ai.google.dev/models/gemini # - https://ai.google.dev/pricing # - https://ai.google.dev/api/rest/v1beta/models/streamGenerateContent # notes: # - get max_output_tokens info from list models api models: - name: gemini-1.0-pro-latest max_input_tokens: 30720 max_output_tokens?: 2048 input_price: 0.5 output_price: 1.5 - name: gemini-1.0-pro-vision-latest max_input_tokens: 12288 max_output_tokens?: 4096 input_price: 0.5 output_price: 1.5 supports_vision: true - name: gemini-1.5-pro-latest max_input_tokens: 1048576 max_output_tokens?: 8192 input_price: 7 output_price: 21 supports_vision: true - type: claude # docs: # - https://docs.anthropic.com/claude/docs/models-overview # - https://docs.anthropic.com/claude/reference/messages-streaming # notes: # - get max_output_tokens info from models doc models: - name: claude-3-opus-20240229 max_input_tokens: 200000 max_output_tokens: 4096 input_price: 15 output_price: 75 supports_vision: true - name: claude-3-sonnet-20240229 max_input_tokens: 200000 max_output_tokens: 4096 input_price: 3 output_price: 15 supports_vision: true - name: claude-3-haiku-20240307 max_input_tokens: 200000 max_output_tokens: 4096 input_price: 0.25 output_price: 1.25 supports_vision: true - type: mistral # docs: # - https://docs.mistral.ai/getting-started/models/ # - https://mistral.ai/technology/#pricing # - https://docs.mistral.ai/api/ # notes: # - unable to get max_output_tokens info models: - name: open-mistral-7b max_input_tokens: 32000 input_price: 0.25 output_price: 0.25 - name: open-mixtral-8x7b max_input_tokens: 32000 input_price: 0.7 output_price: 0.7 - name: open-mixtral-8x22b max_input_tokens: 64000 input_price: 2 output_price: 6 - name: mistral-small-latest max_input_tokens: 32000 input_price: 2 output_price: 6 - name: mistral-medium-latest max_input_tokens: 32000 input_price: 2.7 output_price: 8.1 - name: mistral-large-latest max_input_tokens: 32000 input_price: 8 output_price: 24 - type: cohere # docs: # - https://docs.cohere.com/docs/command-r # - https://cohere.com/pricing # - https://docs.cohere.com/reference/chat # notes # - get max_output_tokens info from api error models: - name: command-r max_input_tokens: 128000 max_output_tokens?: 4000 input_price: 0.5 output_price: 1.5 - name: command-r-plus max_input_tokens: 128000 max_output_tokens?: 4000 input_price: 3 output_price: 15 - type: perplexity # docs: # - https://docs.perplexity.ai/docs/model-cards # - https://docs.perplexity.ai/docs/pricing # - https://docs.perplexity.ai/reference/post_chat_completions # notes # - get max_output_tokens info from api error models: - name: sonar-small-chat max_input_tokens: 16384 max_output_tokens?: 16384 - name: sonar-small-online max_input_tokens: 12000 max_output_tokens?: 12288 - name: sonar-medium-chat max_input_tokens: 16384 max_output_tokens?: 16384 - name: sonar-medium-online max_input_tokens: 12000 max_output_tokens?: 12288 - name: llama-3-8b-instruct max_input_tokens: 8192 max_output_tokens?: 8192 input_price: 0.2 output_price: 0.2 - name: llama-3-70b-instruct max_input_tokens: 8192 max_output_tokens?: 8192 input_price: 1 output_price: 1 - name: codellama-70b-instruct max_input_tokens: 16384 max_output_tokens?: 16384 input_price: 1 output_price: 1 - name: mistral-7b-instruct max_input_tokens: 16384 max_output_tokens?: 16384 input_price: 0.2 output_price: 0.2 - name: mixtral-8x7b-instruct max_input_tokens: 16384 max_output_tokens?: 16384 input_price: 0.6 output_price: 0.6 - name: mixtral-8x22b-instruct max_input_tokens: 16384 max_output_tokens?: 16384 input_price: 1 output_price: 1 - type: groq # docs: # - https://console.groq.com/docs/models # - https://wow.groq.com # - https://console.groq.com/docs/text-chat # notes: # - get max_output_tokens info from playgourd # - all models are free with rate limits models: - name: llama3-8b-8192 max_input_tokens: 8192 max_output_tokens?: 8192 input_price: 0.05 output_price: 0.10 - name: llama3-70b-8192 max_input_tokens: 8192 max_output_tokens?: 8192 input_price: 0.59 output_price: 0.79 - name: mixtral-8x7b-32768 max_input_tokens: 32768 max_output_tokens?: 32768 input_price: 0.27 output_price: 0.27 - name: gemma-7b-it max_input_tokens: 8192 max_output_tokens?: 8192 input_price: 0.10 output_price: 0.10 - type: vertexai # docs: # - https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models # - https://cloud.google.com/vertex-ai/generative-ai/pricing # - https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/gemini # notes: # - get max_output_tokens info from models doc # - claude models have not been tested models: - name: gemini-1.0-pro max_input_tokens: 24568 max_output_tokens?: 8192 input_price: 0.125 output_price: 0.375 - name: gemini-1.0-pro-vision max_input_tokens: 14336 max_output_tokens?: 2048 input_price: 0.125 output_price: 0.375 supports_vision: true - name: gemini-1.5-pro-preview-0409 max_input_tokens: 1000000 max_output_tokens?: 8192 input_price: 2.5 output_price: 7.5 supports_vision: true - name: claude-3-opus@20240229 max_input_tokens: 200000 max_output_tokens: 4096 input_price: 15 output_price: 75 supports_vision: true - name: claude-3-sonnet@20240229 max_input_tokens: 200000 max_output_tokens: 4096 input_price: 3 output_price: 15 supports_vision: true - name: claude-3-haiku@20240307 max_input_tokens: 200000 max_output_tokens: 4096 input_price: 0.25 output_price: 1.25 supports_vision: true - type: bedrock # docs: # - https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns # - https://aws.amazon.com/bedrock/pricing/ # notes: # - get max_output_tokens info from playground # - claude/llama models have not been tested models: - name: anthropic.claude-3-opus-20240229-v1:0 max_input_tokens: 200000 max_output_tokens: 4096 input_price: 15 output_price: 75 supports_vision: true - name: anthropic.claude-3-sonnet-20240229-v1:0 max_input_tokens: 200000 max_output_tokens: 4096 input_price: 3 output_price: 15 supports_vision: true - name: anthropic.claude-3-haiku-20240307-v1:0 max_input_tokens: 200000 max_output_tokens: 4096 input_price: 0.25 output_price: 1.25 supports_vision: true - name: meta.llama2-13b-chat-v1 max_input_tokens: 4096 max_output_tokens: 2048 input_price: 0.75 output_price: 1 - name: meta.llama2-70b-chat-v1 max_input_tokens: 4096 max_output_tokens: 2048 input_price: 1.95 output_price: 2.56 - name: meta.llama3-8b-instruct-v1:0 max_input_tokens: 8192 max_output_tokens: 4096 input_price: 0.4 output_price: 0.6 - name: meta.llama3-70b-instruct-v1:0 max_input_tokens: 8192 max_output_tokens: 4096 input_price: 2.65 output_price: 3.5 - name: mistral.mistral-7b-instruct-v0:2 max_input_tokens: 32000 max_output_tokens: 8192 input_price: 0.15 output_price: 0.2 - name: mistral.mixtral-8x7b-instruct-v0:1 max_input_tokens: 32000 max_output_tokens: 8192 input_price: 0.45 output_price: 0.7 - name: mistral.mistral-large-2402-v1:0 max_input_tokens: 32000 max_output_tokens: 8192 input_price: 8 output_price: 2.4 - type: cloudflare # docs: # - https://developers.cloudflare.com/workers-ai/models/ # - https://developers.cloudflare.com/workers-ai/platform/pricing/ # notes: # - get max_output_tokens info from models doc models: - name: '@cf/meta/llama-2-7b-chat-fp16' max_input_tokens: 3072 max_output_tokens: 2500 input_price: 0.56 output_price: 6.6 - name: '@cf/meta/llama-2-7b-chat-int8' max_input_tokens: 2048 max_output_tokens: 1800 input_price: 0.16 output_price: 0.24 - name: '@cf/mistral/mistral-7b-instruct-v0.1' input_price: 0.11 output_price: 0.19 - type: ernie # docs: # - https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu # - https://cloud.baidu.com/doc/WENXINWORKSHOP/s/hlrk4akp7 # notes: # - get max_output_tokens info from models doc models: - name: ernie-4.0-8k-preview max_input_tokens: 5120 max_output_tokens: 2048 input_price: 16.8 output_price: 16.8 - name: ernie-3.5-8k-preview max_input_tokens: 5120 max_output_tokens: 2048 input_price: 1.68 output_price: 1.68 - name: ernie-speed-128k max_input_tokens: 124000 max_output_tokens: 4096 input_price: 0.56 output_price: 1.12 - name: ernie-lite-8k max_input_tokens: 7168 max_output_tokens: 2048 input_price: 0.42 output_price: 0.84 - name: ernie-tiny-8k max_input_tokens: 7168 max_output_tokens: 2048 input_price: 0.14 output_price: 0.14 - type: qianwen # docs: # - https://help.aliyun.com/zh/dashscope/developer-reference/tongyiqianwen-large-language-models/ # - https://help.aliyun.com/zh/dashscope/developer-reference/qwen-vl-plus/ # notes: # - get max_output_tokens info from models doc models: - name: qwen-turbo max_input_tokens: 6000 max_output_tokens?: 1500 input_price: 1.12 output_price: 1.12 - name: qwen-plus max_input_tokens: 30000 max_output_tokens?: 2000 input_price: 2.8 output_price: 2.8 - name: qwen-max max_input_tokens: 6000 max_output_tokens?: 2000 input_price: 16.8 output_price: 16.8 - name: qwen-max-longcontext max_input_tokens: 28000 max_output_tokens?: 2000 - name: qwen-vl-plus input_price: 1.12 output_price: 1.12 supports_vision: true - name: qwen-vl-max input_price: 2.8 output_price: 2.8 supports_vision: true - type: moonshot # docs: # - https://platform.moonshot.cn/docs/intro # - https://platform.moonshot.cn/docs/pricing # - https://platform.moonshot.cn/docs/api-reference # notes: # - unable to get max_output_tokens info models: - name: moonshot-v1-8k max_input_tokens: 8000 input_price: 1.68 output_price: 1.68 - name: moonshot-v1-32k max_input_tokens: 32000 input_price: 3.36 output_price: 3.36 - name: moonshot-v1-128k max_input_tokens: 128000 input_price: 8.4 output_price: 8.4