diff --git a/.github/workflows/docker-nightly.yml b/.github/workflows/docker-nightly.yml new file mode 100644 index 0000000..52e4850 --- /dev/null +++ b/.github/workflows/docker-nightly.yml @@ -0,0 +1,41 @@ +name: Docker Nightly Build +on: +# push: + schedule: + - cron: '22 12 * * *' + +jobs: + nightly-build-and-push: + runs-on: ubuntu-latest + steps: + - + name: Maximize build space + uses: easimon/maximize-build-space@master + with: + overprovision-lvm: 'true' + remove-dotnet: 'true' + remove-android: 'true' + - + name: Set up QEMU + uses: docker/setup-qemu-action@v2 + - + name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - + name: Login to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - + name: Set build date + run: echo "BUILD_DATE=NIGHTLY $(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_ENV + + - + name: Build and push + uses: docker/build-push-action@v4 + with: + push: true + target: default + build-args: BUILD_DATE=${{ env.BUILD_DATE }} + tags: atinoda/text-generation-webui:default-nightly diff --git a/README.md b/README.md index 09d0389..5db6416 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,7 @@ Each variant has the 'extras' incuded in `default` but has some changes made as | Variant | Description | |---|---| | `default` | Implementation of the vanilla deployment from source. Plus pre-installed `ExLlAMA` library from `turboderp/exllama`, and CUDA GPU offloading enabled for `llama-cpp`. *This version is recommended for most users.* | +| `default-nightly` | Automated nightly build of the `default` variant. This image is built and pushed automatically - it is untested and may be unstable. *Suitable when more frequent updates are required and instability is not an issue.* | | `triton` | Updated `GPTQ-for-llama` using the latest `triton` branch from `qwopqwop200/GPTQ-for-LLaMa`. Suitable for Linux only. *This version is accurate but a little slow.* | | `cuda` | Updated `GPTQ-for-llama` using the latest `cuda` branch from `qwopqwop200/GPTQ-for-LLaMa`. *This version is very slow!* | | `monkey-patch` | Use LoRAs in 4-Bit `GPTQ-for-llama` mode. ***DEPRECATION WARNING:** This version is outdated, but will remain for now.* | @@ -79,7 +80,7 @@ Then recreate the container: `docker compose up` -*When the container is launched, it will print out how many commits behind origin the current build is, so you can decide if you want to update it. Docker hub images will be periodically updated, but if you need bleeding edge versions you must build locally.* +*When the container is launched, it will print out how many commits behind origin the current build is, so you can decide if you want to update it. Docker hub images will be periodically updated. The `default-nightly` image is built every day but it is not manually tested. If you need bleeding edge versions you must build locally.* ### Build (optional) The provided `docker-compose.yml.build` shows how to build the image locally. You can use it as a reference to modify the original `docker-compose.yml`, or you can rename it and use it as-is. Choose the desired variant to build by setting the build `target` and then run: