mirror of
https://github.com/sigoden/aichat
synced 2024-11-04 18:00:20 +00:00
122 lines
5.9 KiB
YAML
122 lines
5.9 KiB
YAML
model: openai:gpt-3.5-turbo # Specify the language model to use
|
|
temperature: null # Set default temperature parameter
|
|
top_p: null # Set default top-p parameter
|
|
save: true # Indicates whether to persist the message
|
|
save_session: null # Controls the persistence of the session, if null, asking the user
|
|
highlight: true # Controls syntax highlighting
|
|
light_theme: false # Activates a light color theme when true
|
|
wrap: no # Controls text wrapping (no, auto, <max-width>)
|
|
wrap_code: false # Enables or disables wrapping of code blocks
|
|
auto_copy: false # Enables or disables automatic copying the last LLM response to the clipboard
|
|
keybindings: emacs # Choose keybinding style (emacs, vi)
|
|
prelude: null # Set a default role or session to start with (role:<name>, session:<name>)
|
|
|
|
# Command that will be used to edit the current line buffer with ctrl+o
|
|
# if unset fallback to $EDITOR and $VISUAL
|
|
buffer_editor: null
|
|
|
|
# Compress session when token count reaches or exceeds this threshold (must be at least 1000)
|
|
compress_threshold: 1000
|
|
# Text prompt used for creating a concise summary of session message
|
|
summarize_prompt: 'Summarize the discussion briefly in 200 words or less to use as a prompt for future context.'
|
|
# Text prompt used for including the summary of the entire session
|
|
summary_prompt: 'This is a summary of the chat history as a recap: '
|
|
|
|
# Custom REPL prompt, see https://github.com/sigoden/aichat/wiki/Custom-REPL-Prompt
|
|
left_prompt: '{color.green}{?session {session}{?role /}}{role}{color.cyan}{?session )}{!session >}{color.reset} '
|
|
right_prompt: '{color.purple}{?session {?consume_tokens {consume_tokens}({consume_percent}%)}{!consume_tokens {consume_tokens}}}{color.reset}'
|
|
|
|
clients:
|
|
# All clients have the following configuration:
|
|
# - type: xxxx
|
|
# name: xxxx # Only use it to distinguish clients with the same client type. Optional
|
|
# models:
|
|
# - name: xxxx # The model name
|
|
# max_input_tokens: 100000 # Optional field
|
|
# max_output_tokens: 4096 # Optional field
|
|
# capabilities: text,vision # Optional field, supported capabilities: text, vision
|
|
# extra_fields: # Optional field, set custom parameters, will merge with the body json
|
|
# key: value
|
|
# extra:
|
|
# proxy: socks5://127.0.0.1:1080 # Specify https/socks5 proxy server. Note HTTPS_PROXY/ALL_PROXY also works.
|
|
# connect_timeout: 10 # Set a timeout in seconds for connect to server
|
|
|
|
# See https://platform.openai.com/docs/quickstart
|
|
- type: openai
|
|
api_key: sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
|
api_base: https://api.openai.com/v1 # Optional field
|
|
organization_id: org-xxxxxxxxxxxxxxxxxxxxxxxx # Optional field
|
|
|
|
# See https://ai.google.dev/docs
|
|
- type: gemini
|
|
api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
|
# Optional field, possible values: BLOCK_NONE, BLOCK_ONLY_HIGH, BLOCK_MEDIUM_AND_ABOVE, BLOCK_LOW_AND_ABOVE
|
|
block_threshold: BLOCK_NONE
|
|
|
|
# See https://docs.anthropic.com/claude/reference/getting-started-with-the-api
|
|
- type: claude
|
|
api_key: sk-ant-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
|
|
|
# See https://docs.mistral.ai/
|
|
- type: mistral
|
|
api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
|
|
|
# See https://docs.cohere.com/docs/the-cohere-platform
|
|
- type: cohere
|
|
api_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
|
|
|
# See https://console.groq.com/docs/quickstart
|
|
- type: groq
|
|
api_key: gsk_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
|
|
|
# Any openai-compatible API providers
|
|
- type: openai-compatible
|
|
name: localai
|
|
api_base: http://localhost:8080/v1
|
|
api_key: sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
|
chat_endpoint: /chat/completions # Optional field
|
|
models:
|
|
- name: llama2
|
|
max_input_tokens: 8192
|
|
extra_fields: # Optional field, set custom parameters
|
|
key: value
|
|
- name: llava
|
|
max_input_tokens: 8192
|
|
capabilities: text,vision # Optional field, choices: text, vision
|
|
|
|
# See https://github.com/jmorganca/ollama
|
|
- type: ollama
|
|
api_base: http://localhost:11434
|
|
api_key: Basic xxx # Set authorization header
|
|
chat_endpoint: /api/chat # Optional field
|
|
models:
|
|
- name: llama2
|
|
max_input_tokens: 8192
|
|
|
|
# See https://learn.microsoft.com/en-us/azure/ai-services/openai/chatgpt-quickstart
|
|
- type: azure-openai
|
|
api_base: https://{RESOURCE}.openai.azure.com
|
|
api_key: xxx
|
|
models:
|
|
- name: MyGPT4 # Model deployment name
|
|
max_input_tokens: 8192
|
|
|
|
# See https://cloud.google.com/vertex-ai
|
|
- type: vertexai
|
|
api_base: https://{REGION}-aiplatform.googleapis.com/v1/projects/{PROJECT_ID}/locations/{REGION}/publishers/google/models
|
|
# Specifies a application-default-credentials (adc) file, Optional field
|
|
# Run `gcloud auth application-default login` to init the adc file
|
|
# see https://cloud.google.com/docs/authentication/external/set-up-adc
|
|
adc_file: <path-to/gcloud/application_default_credentials.json>
|
|
# Optional field, possible values: BLOCK_NONE, BLOCK_ONLY_HIGH, BLOCK_MEDIUM_AND_ABOVE, BLOCK_LOW_AND_ABOVE
|
|
block_threshold: BLOCK_ONLY_HIGH
|
|
|
|
# See https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html
|
|
- type: ernie
|
|
api_key: xxxxxxxxxxxxxxxxxxxxxxxx
|
|
secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
|
|
|
# See https://help.aliyun.com/zh/dashscope/
|
|
- type: qianwen
|
|
api_key: sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|