mirror of
https://github.com/nomic-ai/gpt4all
synced 2024-11-02 09:40:42 +00:00
8aba2c9009
* feat: local inference server * fix: source to use bash + vars * chore: isort and black * fix: make file + inference mode * chore: logging * refactor: remove old links * fix: add new env vars * feat: hf inference server * refactor: remove old links * test: batch and single response * chore: black + isort * separate gpu and cpu dockerfiles * moved gpu to separate dockerfile * Fixed test endpoints * Edits to API. server won't start due to failed instantiation error * Method signature * fix: gpu_infer * tests: fix tests --------- Co-authored-by: Andriy Mulyar <andriy.mulyar@gmail.com>
19 lines
472 B
YAML
19 lines
472 B
YAML
version: "3.8"
|
|
|
|
services:
|
|
gpt4all_api:
|
|
image: gpt4all_api
|
|
container_name: gpt4all_api
|
|
restart: always #restart on error (usually code compilation from save during bad state)
|
|
ports:
|
|
- "4891:4891"
|
|
environment:
|
|
- APP_ENVIRONMENT=dev
|
|
- WEB_CONCURRENCY=2
|
|
- LOGLEVEL=debug
|
|
- PORT=4891
|
|
- model=ggml-mpt-7b-chat.bin
|
|
- inference_mode=cpu
|
|
volumes:
|
|
- './gpt4all_api/app:/app'
|
|
command: ["/start-reload.sh"] |