version: "3" services: text-generation-webui-docker: build: context: . target: default # Specify the variant to build # args: # - LCL_SRC_DIR=text-generation-webui # Developers - see Dockerfile app_base container_name: text-generation-webui environment: - EXTRA_LAUNCH_ARGS="--listen --verbose" # Custom launch args (e.g., --model MODEL_NAME) ports: - 7860:7860 # Default web port # - 5000:5000 # Default API port # - 5005:5005 # Default streaming port # - 5001:5001 # Default OpenAI API extension port volumes: - ./config/loras:/app/loras - ./config/models:/app/models - ./config/presets:/app/presets - ./config/prompts:/app/prompts - ./config/softprompts:/app/softprompts - ./config/training:/app/training logging: driver: json-file options: max-file: "3" # number of files or file count max-size: '10m' deploy: resources: reservations: devices: - driver: nvidia device_ids: ['0'] capabilities: [gpu]