mirror of
https://github.com/sigoden/aichat
synced 2024-11-08 13:10:28 +00:00
1148 lines
32 KiB
YAML
1148 lines
32 KiB
YAML
# notes:
|
|
# - do not submit pull requests to add new models; this list will be updated in batches with new releases.
|
|
|
|
- platform: openai
|
|
# docs:
|
|
# - https://platform.openai.com/docs/models
|
|
# - https://openai.com/pricing
|
|
# - https://platform.openai.com/docs/api-reference/chat
|
|
# notes
|
|
# - get max_output_tokens info from api error
|
|
models:
|
|
- name: gpt-4o
|
|
max_input_tokens: 128000
|
|
max_output_tokens: 4096
|
|
input_price: 5
|
|
output_price: 15
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: gpt-4-turbo
|
|
max_input_tokens: 128000
|
|
max_output_tokens: 4096
|
|
input_price: 10
|
|
output_price: 30
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: gpt-3.5-turbo
|
|
max_input_tokens: 16385
|
|
max_output_tokens: 4096
|
|
input_price: 0.5
|
|
output_price: 1.5
|
|
supports_function_calling: true
|
|
- name: text-embedding-3-large
|
|
type: embedding
|
|
max_input_tokens: 8191
|
|
input_price: 0.13
|
|
output_vector_size: 3072
|
|
default_chunk_size: 3000
|
|
max_batch_size: 100
|
|
- name: text-embedding-3-small
|
|
type: embedding
|
|
max_input_tokens: 8191
|
|
input_price: 0.02
|
|
output_vector_size: 1536
|
|
default_chunk_size: 3000
|
|
max_batch_size: 100
|
|
|
|
- platform: gemini
|
|
# docs:
|
|
# - https://ai.google.dev/models/gemini
|
|
# - https://ai.google.dev/pricing
|
|
# - https://ai.google.dev/api/rest/v1beta/models/streamGenerateContent
|
|
# notes:
|
|
# - get max_output_tokens info from list models api
|
|
models:
|
|
- name: gemini-1.5-pro-latest
|
|
max_input_tokens: 1048576
|
|
max_output_tokens: 8192
|
|
input_price: 3.5
|
|
output_price: 10.5
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: gemini-1.5-flash-latest
|
|
max_input_tokens: 1048576
|
|
max_output_tokens: 8192
|
|
input_price: 0.35
|
|
output_price: 1.05
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: gemini-1.0-pro-latest
|
|
max_input_tokens: 30720
|
|
max_output_tokens: 2048
|
|
input_price: 0.5
|
|
output_price: 1.5
|
|
supports_function_calling: true
|
|
- name: text-embedding-004
|
|
type: embedding
|
|
max_input_tokens: 2048
|
|
default_chunk_size: 1500
|
|
max_batch_size: 5
|
|
|
|
- platform: claude
|
|
# docs:
|
|
# - https://docs.anthropic.com/claude/docs/models-overview
|
|
# - https://docs.anthropic.com/claude/reference/messages-streaming
|
|
# notes:
|
|
# - get max_output_tokens info from models doc
|
|
models:
|
|
- name: claude-3-5-sonnet-20240620
|
|
max_input_tokens: 200000
|
|
max_output_tokens: 4096
|
|
require_max_tokens: true
|
|
input_price: 3
|
|
output_price: 15
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: claude-3-opus-20240229
|
|
max_input_tokens: 200000
|
|
max_output_tokens: 4096
|
|
require_max_tokens: true
|
|
input_price: 15
|
|
output_price: 75
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: claude-3-sonnet-20240229
|
|
max_input_tokens: 200000
|
|
max_output_tokens: 4096
|
|
require_max_tokens: true
|
|
input_price: 3
|
|
output_price: 15
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: claude-3-haiku-20240307
|
|
max_input_tokens: 200000
|
|
max_output_tokens: 4096
|
|
require_max_tokens: true
|
|
input_price: 0.25
|
|
output_price: 1.25
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
|
|
- platform: mistral
|
|
# docs:
|
|
# - https://docs.mistral.ai/getting-started/models/
|
|
# - https://mistral.ai/technology/#pricing
|
|
# - https://docs.mistral.ai/api/
|
|
# notes:
|
|
# - unable to get max_output_tokens info
|
|
models:
|
|
- name: open-mistral-7b
|
|
max_input_tokens: 32000
|
|
input_price: 0.25
|
|
output_price: 0.25
|
|
- name: open-mixtral-8x7b
|
|
max_input_tokens: 32000
|
|
input_price: 0.7
|
|
output_price: 0.7
|
|
- name: open-mixtral-8x22b
|
|
max_input_tokens: 64000
|
|
input_price: 2
|
|
output_price: 6
|
|
- name: mistral-small-latest
|
|
max_input_tokens: 32000
|
|
input_price: 1
|
|
output_price: 3
|
|
- name: mistral-large-latest
|
|
max_input_tokens: 32000
|
|
input_price: 4
|
|
output_price: 12
|
|
- name: codestral-latest
|
|
max_input_tokens: 32000
|
|
input_price: 1
|
|
output_price: 3
|
|
- name: mistral-embed
|
|
type: embedding
|
|
input_price: 0.1
|
|
output_vector_size: 1024
|
|
max_input_tokens: 8092
|
|
default_chunk_size: 2000
|
|
|
|
- platform: cohere
|
|
# docs:
|
|
# - https://docs.cohere.com/docs/command-r
|
|
# - https://cohere.com/pricing
|
|
# - https://docs.cohere.com/reference/chat
|
|
models:
|
|
- name: command-r
|
|
max_input_tokens: 128000
|
|
input_price: 0.5
|
|
output_price: 1.5
|
|
supports_function_calling: true
|
|
- name: command-r-plus
|
|
max_input_tokens: 128000
|
|
input_price: 3
|
|
output_price: 15
|
|
supports_function_calling: true
|
|
- name: embed-english-v3.0
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.1
|
|
output_vector_size: 1024
|
|
default_chunk_size: 1000
|
|
max_batch_size: 96
|
|
- name: embed-multilingual-v3.0
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.1
|
|
output_vector_size: 1024
|
|
default_chunk_size: 1000
|
|
max_batch_size: 96
|
|
- name: rerank-english-v3.0
|
|
type: rerank
|
|
max_input_tokens: 4096
|
|
- name: rerank-multilingual-v3.0
|
|
type: rerank
|
|
max_input_tokens: 4096
|
|
|
|
- platform: perplexity
|
|
# docs:
|
|
# - https://docs.perplexity.ai/docs/model-cards
|
|
# - https://docs.perplexity.ai/docs/pricing
|
|
# - https://docs.perplexity.ai/reference/post_chat_completions
|
|
models:
|
|
- name: llama-3-sonar-small-32k-chat
|
|
max_input_tokens: 32768
|
|
input_price: 0.2
|
|
output_price: 0.2
|
|
- name: llama-3-sonar-small-32k-online
|
|
max_input_tokens: 28000
|
|
input_price: 0.2
|
|
output_price: 0.2
|
|
- name: llama-3-sonar-large-32k-chat
|
|
max_input_tokens: 32768
|
|
input_price: 1
|
|
output_price: 1
|
|
- name: llama-3-sonar-large-32k-online
|
|
max_input_tokens: 28000
|
|
input_price: 1
|
|
output_price: 1
|
|
- name: llama-3-8b-instruct
|
|
max_input_tokens: 8192
|
|
input_price: 0.2
|
|
output_price: 0.2
|
|
- name: llama-3-70b-instruct
|
|
max_input_tokens: 8192
|
|
input_price: 1
|
|
output_price: 1
|
|
- name: mixtral-8x7b-instruct
|
|
max_input_tokens: 16384
|
|
input_price: 0.6
|
|
output_price: 0.6
|
|
|
|
- platform: groq
|
|
# docs:
|
|
# - https://console.groq.com/docs/models
|
|
# - https://wow.groq.com
|
|
# - https://console.groq.com/docs/text-chat
|
|
# notes:
|
|
# - all models are free with rate limits
|
|
models:
|
|
- name: llama3-8b-8192
|
|
max_input_tokens: 8192
|
|
input_price: 0.05
|
|
output_price: 0.08
|
|
supports_function_calling: true
|
|
- name: llama3-70b-8192
|
|
max_input_tokens: 8192
|
|
input_price: 0.59
|
|
output_price: 0.79
|
|
supports_function_calling: true
|
|
- name: mixtral-8x7b-32768
|
|
max_input_tokens: 32768
|
|
input_price: 0.24
|
|
output_price: 0.24
|
|
- name: gemma-7b-it
|
|
max_input_tokens: 8192
|
|
input_price: 0.07
|
|
output_price: 0.07
|
|
|
|
- platform: vertexai
|
|
# docs:
|
|
# - https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models
|
|
# - https://cloud.google.com/vertex-ai/generative-ai/pricing
|
|
# - https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/gemini
|
|
# notes:
|
|
# - get max_output_tokens info from models doc
|
|
models:
|
|
- name: gemini-1.5-pro-001
|
|
max_input_tokens: 1000000
|
|
max_output_tokens: 8192
|
|
input_price: 1.25
|
|
output_price: 3.75
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: gemini-1.5-flash-001
|
|
max_input_tokens: 1000000
|
|
max_output_tokens: 8192
|
|
input_price: 0.125
|
|
output_price: 0.375
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: gemini-1.0-pro-002
|
|
max_input_tokens: 24568
|
|
max_output_tokens: 8192
|
|
input_price: 0.125
|
|
output_price: 0.375
|
|
supports_function_calling: true
|
|
- name: text-embedding-004
|
|
type: embedding
|
|
max_input_tokens: 3072
|
|
input_price: 0.025
|
|
output_vector_size: 768
|
|
default_chunk_size: 1500
|
|
max_batch_size: 5
|
|
- name: text-multilingual-embedding-002
|
|
type: embedding
|
|
max_input_tokens: 3072
|
|
input_price: 0.2
|
|
output_vector_size: 768
|
|
default_chunk_size: 1500
|
|
max_batch_size: 5
|
|
|
|
- platform: vertexai-claude
|
|
# docs:
|
|
# - https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude
|
|
# notes:
|
|
# - get max_output_tokens info from models doc
|
|
models:
|
|
- name: claude-3-5-sonnet@20240620
|
|
max_input_tokens: 200000
|
|
max_output_tokens: 4096
|
|
require_max_tokens: true
|
|
input_price: 3
|
|
output_price: 15
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: claude-3-opus@20240229
|
|
max_input_tokens: 200000
|
|
max_output_tokens: 4096
|
|
require_max_tokens: true
|
|
input_price: 15
|
|
output_price: 75
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: claude-3-sonnet@20240229
|
|
max_input_tokens: 200000
|
|
max_output_tokens: 4096
|
|
require_max_tokens: true
|
|
input_price: 3
|
|
output_price: 15
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: claude-3-haiku@20240307
|
|
max_input_tokens: 200000
|
|
max_output_tokens: 4096
|
|
require_max_tokens: true
|
|
input_price: 0.25
|
|
output_price: 1.25
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
|
|
- platform: bedrock
|
|
# docs:
|
|
# - https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns
|
|
# - https://aws.amazon.com/bedrock/pricing/
|
|
# notes:
|
|
# - get max_output_tokens info from playground
|
|
models:
|
|
- name: anthropic.claude-3-5-sonnet-20240620-v1:0
|
|
max_input_tokens: 200000
|
|
max_output_tokens: 4096
|
|
require_max_tokens: true
|
|
input_price: 3
|
|
output_price: 15
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: anthropic.claude-3-opus-20240229-v1:0
|
|
max_input_tokens: 200000
|
|
max_output_tokens: 4096
|
|
require_max_tokens: true
|
|
input_price: 15
|
|
output_price: 75
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: anthropic.claude-3-sonnet-20240229-v1:0
|
|
max_input_tokens: 200000
|
|
max_output_tokens: 4096
|
|
require_max_tokens: true
|
|
input_price: 3
|
|
output_price: 15
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: anthropic.claude-3-haiku-20240307-v1:0
|
|
max_input_tokens: 200000
|
|
max_output_tokens: 4096
|
|
require_max_tokens: true
|
|
input_price: 0.25
|
|
output_price: 1.25
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: meta.llama3-8b-instruct-v1:0
|
|
max_input_tokens: 8192
|
|
max_output_tokens: 2048
|
|
require_max_tokens: true
|
|
input_price: 0.4
|
|
output_price: 0.6
|
|
- name: meta.llama3-70b-instruct-v1:0
|
|
max_input_tokens: 8192
|
|
max_output_tokens: 2048
|
|
require_max_tokens: true
|
|
input_price: 2.65
|
|
output_price: 3.5
|
|
- name: mistral.mistral-7b-instruct-v0:2
|
|
max_input_tokens: 32000
|
|
max_output_tokens: 8192
|
|
require_max_tokens: true
|
|
input_price: 0.15
|
|
output_price: 0.2
|
|
- name: mistral.mixtral-8x7b-instruct-v0:1
|
|
max_input_tokens: 32000
|
|
max_output_tokens: 8192
|
|
require_max_tokens: true
|
|
input_price: 0.45
|
|
output_price: 0.7
|
|
- name: mistral.mistral-large-2402-v1:0
|
|
max_input_tokens: 32000
|
|
max_output_tokens: 8192
|
|
require_max_tokens: true
|
|
input_price: 8
|
|
output_price: 2.4
|
|
|
|
- platform: cloudflare
|
|
# docs:
|
|
# - https://developers.cloudflare.com/workers-ai/models/
|
|
# - https://developers.cloudflare.com/workers-ai/platform/pricing/
|
|
models:
|
|
- name: '@cf/meta/llama-3-8b-instruct'
|
|
max_input_tokens: 6144
|
|
max_output_tokens: 2048
|
|
require_max_tokens: true
|
|
input_price: 0
|
|
output_price: 0
|
|
- name: '@hf/mistral/mistral-7b-instruct-v0.2'
|
|
max_input_tokens: 6144
|
|
max_output_tokens: 2048
|
|
require_max_tokens: true
|
|
input_price: 0
|
|
output_price: 0
|
|
- name: '@cf/qwen/qwen1.5-14b-chat-awq'
|
|
max_input_tokens: 6144
|
|
max_output_tokens: 2048
|
|
require_max_tokens: true
|
|
input_price: 0
|
|
output_price: 0
|
|
- name: '@cf/google/gemma-7b-it'
|
|
max_input_tokens: 6144
|
|
max_output_tokens: 2048
|
|
require_max_tokens: true
|
|
input_price: 0
|
|
output_price: 0
|
|
- name: '@cf/baai/bge-base-en-v1.5'
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0
|
|
output_vector_size: 768
|
|
default_chunk_size: 1000
|
|
max_batch_size: 100
|
|
- name: '@cf/baai/bge-large-en-v1.5'
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0
|
|
output_vector_size: 1024
|
|
default_chunk_size: 1000
|
|
max_batch_size: 100
|
|
|
|
- platform: replicate
|
|
# docs:
|
|
# - https://replicate.com/explore
|
|
# - https://replicate.com/pricing
|
|
# - https://replicate.com/docs/reference/http
|
|
models:
|
|
- name: meta/meta-llama-3-70b-instruct
|
|
max_input_tokens: 8192
|
|
max_output_tokens: 4096
|
|
require_max_tokens: true
|
|
input_price: 0.65
|
|
output_price: 2.75
|
|
- name: meta/meta-llama-3-8b-instruct
|
|
max_input_tokens: 8192
|
|
max_output_tokens: 4096
|
|
require_max_tokens: true
|
|
input_price: 0.05
|
|
output_price: 0.25
|
|
- name: mistralai/mistral-7b-instruct-v0.2
|
|
max_input_tokens: 32000
|
|
max_output_tokens: 8192
|
|
require_max_tokens: true
|
|
input_price: 0.05
|
|
output_price: 0.25
|
|
- name: mistralai/mixtral-8x7b-instruct-v0.1
|
|
max_input_tokens: 32000
|
|
max_output_tokens: 8192
|
|
require_max_tokens: true
|
|
input_price: 0.3
|
|
output_price: 1
|
|
|
|
- platform: ernie
|
|
# docs:
|
|
# - https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu
|
|
# - https://cloud.baidu.com/doc/WENXINWORKSHOP/s/hlrk4akp7
|
|
models:
|
|
- name: ernie-4.0-8k-0613
|
|
max_input_tokens: 8192
|
|
input_price: 16.8
|
|
output_price: 16.8
|
|
supports_function_calling: true
|
|
- name: ernie-3.5-8k-0613
|
|
max_input_tokens: 8192
|
|
input_price: 1.68
|
|
output_price: 1.68
|
|
supports_function_calling: true
|
|
- name: ernie-3.5-128k
|
|
max_input_tokens: 8192
|
|
input_price: 6.72
|
|
output_price: 13.44
|
|
supports_function_calling: true
|
|
- name: ernie-speed-128k
|
|
max_input_tokens: 128000
|
|
input_price: 0
|
|
output_price: 0
|
|
- name: bge_large_zh
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.28
|
|
output_vector_size: 1024
|
|
default_chunk_size: 1000
|
|
max_batch_size: 16
|
|
- name: bge_large_en
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.28
|
|
output_vector_size: 1024
|
|
default_chunk_size: 1000
|
|
max_batch_size: 16
|
|
- name: tao_8k
|
|
type: embedding
|
|
max_input_tokens: 8192
|
|
input_price: 0.28
|
|
output_vector_size: 1024
|
|
default_chunk_size: 2000
|
|
max_batch_size: 1
|
|
- name: bce_reranker_base
|
|
type: rerank
|
|
max_input_tokens: 1024
|
|
|
|
- platform: qianwen
|
|
# docs:
|
|
# - https://help.aliyun.com/zh/dashscope/developer-reference/tongyiqianwen-large-language-models/
|
|
# - https://help.aliyun.com/zh/dashscope/developer-reference/qwen-vl-plus/
|
|
models:
|
|
- name: qwen-long
|
|
max_input_tokens: 1000000
|
|
input_price: 0.07
|
|
output_price: 0.28
|
|
- name: qwen-turbo
|
|
max_input_tokens: 8000
|
|
input_price: 0.28
|
|
output_price: 0.84
|
|
supports_function_calling: true
|
|
- name: qwen-plus
|
|
max_input_tokens: 32000
|
|
input_price: 0.56
|
|
output_price: 1.68
|
|
supports_function_calling: true
|
|
- name: qwen-max
|
|
max_input_tokens: 8000
|
|
input_price: 5.6
|
|
output_price: 16.8
|
|
supports_function_calling: true
|
|
- name: qwen-max-longcontext
|
|
input_price: 5.6
|
|
output_price: 16.8
|
|
max_input_tokens: 30000
|
|
- name: qwen-vl-plus
|
|
input_price: 1.12
|
|
output_price: 1.12
|
|
supports_vision: true
|
|
- name: qwen-vl-max
|
|
input_price: 2.8
|
|
output_price: 2.8
|
|
supports_vision: true
|
|
- name: text-embedding-v2
|
|
type: embedding
|
|
max_input_tokens: 2048
|
|
input_price: 0.1
|
|
output_vector_size: 1536
|
|
default_chunk_size: 1500
|
|
max_batch_size: 25
|
|
|
|
- platform: moonshot
|
|
# docs:
|
|
# - https://platform.moonshot.cn/docs/intro
|
|
# - https://platform.moonshot.cn/docs/pricing
|
|
# - https://platform.moonshot.cn/docs/api-reference
|
|
models:
|
|
- name: moonshot-v1-8k
|
|
max_input_tokens: 8000
|
|
input_price: 1.68
|
|
output_price: 1.68
|
|
supports_function_calling: true
|
|
- name: moonshot-v1-32k
|
|
max_input_tokens: 32000
|
|
input_price: 3.36
|
|
output_price: 3.36
|
|
supports_function_calling: true
|
|
- name: moonshot-v1-128k
|
|
max_input_tokens: 128000
|
|
input_price: 8.4
|
|
output_price: 8.4
|
|
supports_function_calling: true
|
|
|
|
- platform: deepseek
|
|
# docs:
|
|
# - https://platform.deepseek.com/api-docs/
|
|
# - https://platform.deepseek.com/api-docs/pricing
|
|
models:
|
|
- name: deepseek-chat
|
|
max_input_tokens: 32768
|
|
input_price: 0.14
|
|
output_price: 0.28
|
|
- name: deepseek-coder
|
|
max_input_tokens: 32768
|
|
input_price: 0.14
|
|
output_price: 0.28
|
|
|
|
- platform: zhipuai
|
|
# docs:
|
|
# - https://open.bigmodel.cn/dev/howuse/model
|
|
# - https://open.bigmodel.cn/pricing
|
|
models:
|
|
- name: glm-4-0520
|
|
max_input_tokens: 128000
|
|
input_price: 14
|
|
output_price: 14
|
|
supports_function_calling: true
|
|
- name: glm-4-airx
|
|
max_input_tokens: 8092
|
|
input_price: 1.4
|
|
output_price: 1.4
|
|
supports_function_calling: true
|
|
- name: glm-4-air
|
|
max_input_tokens: 128000
|
|
input_price: 0.14
|
|
output_price: 0.14
|
|
supports_function_calling: true
|
|
- name: glm-4-flash
|
|
max_input_tokens: 128000
|
|
input_price: 0.014
|
|
output_price: 0.014
|
|
supports_function_calling: true
|
|
- name: glm-4v
|
|
max_input_tokens: 2048
|
|
input_price: 7
|
|
output_price: 7
|
|
supports_vision: true
|
|
- name: embedding-2
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.07
|
|
output_vector_size: 1024
|
|
default_chunk_size: 1000
|
|
|
|
- platform: lingyiwanwu
|
|
# docs:
|
|
# - https://platform.lingyiwanwu.com/docs#%E6%A8%A1%E5%9E%8B
|
|
# - https://platform.lingyiwanwu.com/docs#%E8%AE%A1%E8%B4%B9%E5%8D%95%E5%85%83
|
|
models:
|
|
- name: yi-large
|
|
max_input_tokens: 32768
|
|
input_price: 2.8
|
|
output_price: 2.8
|
|
- name: yi-large-turbo
|
|
max_input_tokens: 16384
|
|
input_price: 1.68
|
|
output_price: 1.68
|
|
- name: yi-large-rag
|
|
max_input_tokens: 16384
|
|
input_price: 3.5
|
|
output_price: 3.5
|
|
- name: yi-vision
|
|
max_input_tokens: 4096
|
|
input_price: 0.84
|
|
output_price: 0.84
|
|
supports_vision: true
|
|
- name: yi-medium
|
|
max_input_tokens: 16384
|
|
input_price: 0.35
|
|
output_price: 0.35
|
|
- name: yi-medium-200k
|
|
max_input_tokens: 200000
|
|
input_price: 1.68
|
|
output_price: 1.68
|
|
- name: yi-spark
|
|
max_input_tokens: 16384
|
|
input_price: 0.14
|
|
output_price: 0.14
|
|
|
|
- platform: anyscale
|
|
# docs:
|
|
# - https://docs.anyscale.com/endpoints/text-generation/query-a-model
|
|
# - https://www.anyscale.com/pricing-detail
|
|
models:
|
|
- name: meta-llama/Meta-Llama-3-8B-Instruct
|
|
max_input_tokens: 8192
|
|
input_price: 0.15
|
|
output_price: 0.15
|
|
- name: meta-llama/Meta-Llama-3-70B-Instruct
|
|
max_input_tokens: 8192
|
|
input_price: 1.0
|
|
output_price: 1.0
|
|
- name: mistralai/Mistral-7B-Instruct-v0.1
|
|
max_input_tokens: 16384
|
|
input_price: 0.15
|
|
output_price: 0.15
|
|
- name: mistralai/Mixtral-8x7B-Instruct-v0.1
|
|
max_input_tokens: 32768
|
|
input_price: 0.50
|
|
output_price: 0.50
|
|
- name: mistralai/Mixtral-8x22B-Instruct-v0.1
|
|
max_input_tokens: 65536
|
|
input_price: 0.90
|
|
output_price: 0.90
|
|
- name: google/gemma-7b-it
|
|
max_input_tokens: 8192
|
|
input_price: 0.15
|
|
output_price: 0.15
|
|
- name: BAAI/bge-large-en-v1.5
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.05
|
|
output_vector_size: 1024
|
|
default_chunk_size: 1000
|
|
max_batch_size: 30
|
|
- name: thenlper/gte-large
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.05
|
|
output_vector_size: 1024
|
|
default_chunk_size: 1000
|
|
max_batch_size: 30
|
|
|
|
- platform: deepinfra
|
|
# docs:
|
|
# - https://deepinfra.com/models
|
|
# - https://deepinfra.com/pricing
|
|
models:
|
|
- name: meta-llama/Meta-Llama-3-8B-Instruct
|
|
max_input_tokens: 8192
|
|
input_price: 0.08
|
|
output_price: 0.08
|
|
supports_function_calling: true
|
|
- name: meta-llama/Meta-Llama-3-70B-Instruct
|
|
max_input_tokens: 8192
|
|
input_price: 0.59
|
|
output_price: 0.79
|
|
supports_function_calling: true
|
|
- name: mistralai/Mistral-7B-Instruct-v0.3
|
|
max_input_tokens: 32768
|
|
input_price: 0.07
|
|
output_price: 0.07
|
|
- name: mistralai/Mixtral-8x7B-Instruct-v0.1
|
|
max_input_tokens: 32768
|
|
input_price: 0.24
|
|
output_price: 0.24
|
|
supports_function_calling: true
|
|
- name: mistralai/Mixtral-8x22B-Instruct-v0.1
|
|
max_input_tokens: 65536
|
|
input_price: 0.65
|
|
output_price: 0.65
|
|
supports_function_calling: true
|
|
- name: google/gemma-1.1-7b-it
|
|
max_input_tokens: 8192
|
|
input_price: 0.07
|
|
output_price: 0.07
|
|
- name: Qwen/Qwen2-72B-Instruct
|
|
max_input_tokens: 32768
|
|
input_price: 0.59
|
|
output_price: 0.79
|
|
- name: microsoft/Phi-3-medium-4k-instruct
|
|
max_input_tokens: 4096
|
|
input_price: 0.14
|
|
output_price: 0.14
|
|
- name: BAAI/bge-large-en-v1.5
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.01
|
|
output_vector_size: 1024
|
|
default_chunk_size: 1000
|
|
max_batch_size: 100
|
|
- name: BAAI/bge-base-en-v1.5
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.005
|
|
output_vector_size: 768
|
|
default_chunk_size: 1000
|
|
max_batch_size: 100
|
|
- name: BAAI/bge-m3
|
|
type: embedding
|
|
max_input_tokens: 8192
|
|
input_price: 0.01
|
|
output_vector_size: 1024
|
|
default_chunk_size: 2000
|
|
max_batch_size: 100
|
|
- name: intfloat/e5-base-v2
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.005
|
|
output_vector_size: 768
|
|
default_chunk_size: 1000
|
|
max_batch_size: 100
|
|
- name: intfloat/e5-large-v2
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.01
|
|
output_vector_size: 1024
|
|
default_chunk_size: 1000
|
|
max_batch_size: 100
|
|
- name: intfloat/multilingual-e5-large
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.01
|
|
output_vector_size: 1024
|
|
default_chunk_size: 1000
|
|
max_batch_size: 100
|
|
- name: thenlper/gte-base
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.005
|
|
output_vector_size: 768
|
|
default_chunk_size: 1000
|
|
max_batch_size: 100
|
|
- name: thenlper/gte-large
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.01
|
|
output_vector_size: 1024
|
|
default_chunk_size: 1000
|
|
max_batch_size: 100
|
|
|
|
- platform: fireworks
|
|
# docs:
|
|
# - https://fireworks.ai/models
|
|
# - https://fireworks.ai/pricing
|
|
models:
|
|
- name: accounts/fireworks/models/firellava-13b
|
|
max_input_tokens: 4096
|
|
input_price: 0.2
|
|
output_price: 0.2
|
|
supports_vision: true
|
|
- name: accounts/fireworks/models/firefunction-v2
|
|
max_input_tokens: 32768
|
|
input_price: 0.2
|
|
output_price: 0.2
|
|
supports_function_calling: true
|
|
- name: accounts/fireworks/models/llama-v3-8b-instruct
|
|
max_input_tokens: 8192
|
|
input_price: 0.2
|
|
output_price: 0.2
|
|
- name: accounts/fireworks/models/llama-v3-70b-instruct
|
|
max_input_tokens: 8192
|
|
input_price: 0.9
|
|
output_price: 0.9
|
|
- name: accounts/fireworks/models/mistral-7b-instruct-v3
|
|
max_input_tokens: 32768
|
|
input_price: 0.2
|
|
output_price: 0.2
|
|
- name: accounts/fireworks/models/mixtral-8x7b-instruct
|
|
max_input_tokens: 32768
|
|
input_price: 0.5
|
|
output_price: 0.5
|
|
- name: accounts/fireworks/models/mixtral-8x22b-instruct
|
|
max_input_tokens: 65536
|
|
input_price: 0.9
|
|
output_price: 0.9
|
|
- name: accounts/fireworks/models/gemma-7b-it
|
|
max_input_tokens: 8192
|
|
input_price: 0.2
|
|
output_price: 0.2
|
|
- name: accounts/fireworks/models/qwen2-72b-instruct
|
|
max_input_tokens: 32768
|
|
input_price: 0.9
|
|
output_price: 0.9
|
|
- name: accounts/fireworks/models/phi-3-mini-128k-instruct
|
|
max_input_tokens: 131072
|
|
input_price: 0.2
|
|
output_price: 0.2
|
|
- name: accounts/fireworks/models/phi-3-vision-128k-instruct
|
|
max_input_tokens: 131072
|
|
input_price: 0.2
|
|
output_price: 0.2
|
|
supports_vision: true
|
|
- name: nomic-ai/nomic-embed-text-v1.5
|
|
type: embedding
|
|
max_input_tokens: 8192
|
|
input_price: 0.008
|
|
output_vector_size: 768
|
|
default_chunk_size: 1500
|
|
max_batch_size: 100
|
|
- name: WhereIsAI/UAE-Large-V1
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.016
|
|
output_vector_size: 1024
|
|
default_chunk_size: 1000
|
|
max_batch_size: 100
|
|
- name: thenlper/gte-large
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.016
|
|
output_vector_size: 1024
|
|
default_chunk_size: 1000
|
|
max_batch_size: 100
|
|
- name: thenlper/gte-base
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.008
|
|
output_vector_size: 768
|
|
default_chunk_size: 1000
|
|
max_batch_size: 100
|
|
|
|
- platform: openrouter
|
|
# docs:
|
|
# - https://openrouter.ai/docs#models
|
|
models:
|
|
- name: meta-llama/llama-3-8b-instruct
|
|
max_input_tokens: 8192
|
|
input_price: 0.07
|
|
output_price: 0.07
|
|
- name: meta-llama/llama-3-70b-instruct
|
|
max_input_tokens: 8192
|
|
input_price: 0.59
|
|
output_price: 0.79
|
|
- name: microsoft/phi-3-mini-128k-instruct
|
|
max_input_tokens: 128000
|
|
input_price: 0.1
|
|
output_price: 0.1
|
|
- name: microsoft/phi-3-medium-4k-instruct
|
|
max_input_tokens: 4000
|
|
input_price: 0.14
|
|
output_price: 0.14
|
|
- name: microsoft/phi-3-medium-128k-instruct
|
|
max_input_tokens: 128000
|
|
input_price: 1
|
|
output_price: 1
|
|
- name: qwen/qwen-2-72b-instruct
|
|
max_input_tokens: 32768
|
|
input_price: 0.9
|
|
output_price: 0.9
|
|
- name: openai/gpt-4o
|
|
max_input_tokens: 128000
|
|
input_price: 5
|
|
output_price: 15
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: openai/gpt-4-turbo
|
|
max_input_tokens: 128000
|
|
input_price: 10
|
|
output_price: 30
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: openai/gpt-3.5-turbo
|
|
max_input_tokens: 16385
|
|
input_price: 0.5
|
|
output_price: 1.5
|
|
supports_function_calling: true
|
|
- name: google/gemini-pro-1.5
|
|
max_input_tokens: 2800000
|
|
input_price: 2.5
|
|
output_price: 7.5
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: google/gemini-flash-1.5
|
|
max_input_tokens: 2800000
|
|
input_price: 0.25
|
|
output_price: 0.75
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: google/gemini-pro
|
|
max_input_tokens: 91728
|
|
input_price: 0.125
|
|
output_price: 0.375
|
|
supports_function_calling: true
|
|
- name: anthropic/claude-3.5-sonnet
|
|
max_input_tokens: 200000
|
|
max_output_tokens: 4096
|
|
require_max_tokens: true
|
|
input_price: 3
|
|
output_price: 15
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: anthropic/claude-3-opus
|
|
max_input_tokens: 200000
|
|
max_output_tokens: 4096
|
|
require_max_tokens: true
|
|
input_price: 15
|
|
output_price: 75
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: anthropic/claude-3-sonnet
|
|
max_input_tokens: 200000
|
|
max_output_tokens: 4096
|
|
require_max_tokens: true
|
|
input_price: 3
|
|
output_price: 15
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: anthropic/claude-3-haiku
|
|
max_input_tokens: 200000
|
|
max_output_tokens: 4096
|
|
require_max_tokens: true
|
|
input_price: 0.25
|
|
output_price: 1.25
|
|
supports_vision: true
|
|
supports_function_calling: true
|
|
- name: mistralai/mistral-7b-instruct-v0.3
|
|
max_input_tokens: 32768
|
|
input_price: 0.07
|
|
output_price: 0.07
|
|
- name: mistralai/mixtral-8x7b-instruct
|
|
max_input_tokens: 32768
|
|
input_price: 0.24
|
|
output_price: 0.24
|
|
- name: mistralai/mixtral-8x22b-instruct
|
|
max_input_tokens: 65536
|
|
input_price: 0.65
|
|
output_price: 0.65
|
|
- name: mistralai/mistral-small
|
|
max_input_tokens: 32000
|
|
input_price: 2
|
|
output_price: 6
|
|
- name: mistralai/mistral-large
|
|
max_input_tokens: 32000
|
|
input_price: 8
|
|
output_price: 24
|
|
- name: cohere/command-r
|
|
max_input_tokens: 128000
|
|
input_price: 0.5
|
|
output_price: 1.5
|
|
supports_function_calling: true
|
|
- name: cohere/command-r-plus
|
|
max_input_tokens: 128000
|
|
input_price: 3
|
|
output_price: 15
|
|
supports_function_calling: true
|
|
- name: deepseek/deepseek-chat
|
|
max_input_tokens: 32768
|
|
input_price: 0.14
|
|
output_price: 0.28
|
|
- name: deepseek/deepseek-coder
|
|
max_input_tokens: 32768
|
|
input_price: 0.14
|
|
output_price: 0.28
|
|
- name: perplexity/llama-3-sonar-small-32k-chat
|
|
max_input_tokens: 32768
|
|
input_price: 0.2
|
|
output_price: 0.2
|
|
- name: perplexity/llama-3-sonar-small-32k-online
|
|
max_input_tokens: 28000
|
|
input_price: 0.2
|
|
output_price: 0.2
|
|
- name: perplexity/llama-3-sonar-large-32k-chat
|
|
max_input_tokens: 32768
|
|
input_price: 1
|
|
output_price: 1
|
|
- name: perplexity/llama-3-sonar-large-32k-online
|
|
max_input_tokens: 28000
|
|
input_price: 1
|
|
output_price: 1
|
|
|
|
|
|
- platform: octoai
|
|
# docs:
|
|
# - https://octo.ai/docs/getting-started/inference-models
|
|
# - https://octo.ai/pricing/text-gen-solution/
|
|
models:
|
|
- name: meta-llama-3-8b-instruct
|
|
max_input_tokens: 8192
|
|
input_price: 0.13
|
|
output_price: 0.13
|
|
- name: meta-llama-3-70b-instruct
|
|
max_input_tokens: 8192
|
|
input_price: 0.86
|
|
output_price: 0.86
|
|
- name: mistral-7b-instruct
|
|
max_input_tokens: 32768
|
|
input_price: 0.13
|
|
output_price: 0.13
|
|
- name: mixtral-8x7b-instruct
|
|
max_input_tokens: 32768
|
|
input_price: 0.34
|
|
output_price: 0.34
|
|
- name: mixtral-8x22b-instruct
|
|
max_input_tokens: 65536
|
|
input_price: 0.86
|
|
output_price: 0.86
|
|
- name: thenlper/gte-large
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.05
|
|
output_vector_size: 1024
|
|
default_chunk_size: 1000
|
|
max_batch_size: 100
|
|
|
|
- platform: together
|
|
# docs:
|
|
# - https://docs.together.ai/docs/inference-models
|
|
# - https://docs.together.ai/docs/embedding-models
|
|
# - https://www.together.ai/pricing
|
|
models:
|
|
- name: meta-llama/Llama-3-8b-chat-hf
|
|
max_input_tokens: 8000
|
|
input_price: 0.2
|
|
output_price: 0.2
|
|
- name: meta-llama/Llama-3-70b-chat-hf
|
|
max_input_tokens: 8000
|
|
input_price: 0.9
|
|
output_price: 0.9
|
|
- name: mistralai/Mistral-7B-Instruct-v0.3
|
|
max_input_tokens: 32768
|
|
input_price: 0.2
|
|
output_price: 0.2
|
|
- name: mistralai/Mixtral-8x7B-Instruct-v0.1
|
|
max_input_tokens: 32768
|
|
input_price: 0.9
|
|
output_price: 0.9
|
|
- name: mistralai/Mixtral-8x22B-Instruct-v0.1
|
|
max_input_tokens: 65536
|
|
input_price: 1.2
|
|
output_price: 1.2
|
|
- name: google/gemma-7b-it
|
|
max_input_tokens: 8192
|
|
input_price: 0.2
|
|
output_price: 0.2
|
|
- name: Qwen/Qwen2-72B-Instruct
|
|
max_input_tokens: 32768
|
|
input_price: 0.9
|
|
output_price: 0.9
|
|
max_batch_size: 100
|
|
- name: WhereIsAI/UAE-Large-V1
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.016
|
|
output_vector_size: 1024
|
|
default_chunk_size: 1000
|
|
max_batch_size: 100
|
|
- name: BAAI/bge-large-en-v1.5
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.016
|
|
output_vector_size: 1024
|
|
default_chunk_size: 1000
|
|
max_batch_size: 100
|
|
- name: BAAI/bge-base-en-v1.5
|
|
type: embedding
|
|
max_input_tokens: 512
|
|
input_price: 0.008
|
|
output_vector_size: 768
|
|
default_chunk_size: 1000
|
|
max_batch_size: 100 |